diff --git a/.circleci/.gitattributes b/.circleci/.gitattributes deleted file mode 100644 index 2dd06ee5f7cd..000000000000 --- a/.circleci/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -config.yml linguist-generated diff --git a/.circleci/.gitignore b/.circleci/.gitignore deleted file mode 100644 index 3018b3a68132..000000000000 --- a/.circleci/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.tmp/ diff --git a/.circleci/Makefile b/.circleci/Makefile deleted file mode 100644 index dc75ea5f1f19..000000000000 --- a/.circleci/Makefile +++ /dev/null @@ -1,100 +0,0 @@ -# Set SHELL to 'strict mode' without using .SHELLFLAGS for max compatibility. -# See https://fieldnotes.tech/how-to-shell-for-compatible-makefiles/ -SHELL := /usr/bin/env bash -euo pipefail -c - -# CONFIG is the name of the make target someone -# would invoke to update the main config file (config.yml). -CONFIG ?= ci-config -# VERIFY is the name of the make target someone -# would invoke to verify the config file. -VERIFY ?= ci-verify - -CIRCLECI := circleci --skip-update-check -ifeq ($(DEBUG_CIRCLECI_CLI),YES) -CIRCLECI += --debug -endif - -# For config processing, always refer to circleci.com not self-hosted circleci, -# because self-hosted does not currently support the necessary API. -CIRCLECI_CLI_HOST := https://circleci.com -export CIRCLECI_CLI_HOST - -# Set up some documentation/help message variables. -# We do not attempt to install the CircleCI CLI from this Makefile. -CCI_INSTALL_LINK := https://circleci.com/docs/2.0/local-cli/\#installation -CCI_INSTALL_MSG := Please install CircleCI CLI. See $(CCI_INSTALL_LINK) -CCI_VERSION := $(shell $(CIRCLECI) version 2> /dev/null) -ifeq ($(CCI_VERSION),) -# Attempting to use the CLI fails with installation instructions. -CIRCLECI := echo '$(CCI_INSTALL_MSG)'; exit 1; \# -endif - -SOURCE_DIR := config -SOURCE_YML := $(shell [ ! -d $(SOURCE_DIR) ] || find $(SOURCE_DIR) -name '*.yml') -CONFIG_SOURCE := Makefile $(SOURCE_YML) | $(SOURCE_DIR) -OUT := config.yml -TMP := .tmp/config-processed -CONFIG_PACKED := .tmp/config-packed -GO_VERSION_FILE := ../.go-version -GO_VERSION := $(shell cat $(GO_VERSION_FILE)) - -default: help - -help: - @echo "Usage:" - @echo " make $(CONFIG): recompile config.yml from $(SOURCE_DIR)/" - @echo " make $(VERIFY): verify that config.yml is a true mapping from $(SOURCE_DIR)/" - @echo - @echo "Diagnostics:" - @[ -z "$(CCI_VERSION)" ] || echo " circleci-cli version $(CCI_VERSION)" - @[ -n "$(CCI_VERSION)" ] || echo " $(CCI_INSTALL_MSG)" - -$(SOURCE_DIR): - @echo No source directory $(SOURCE_DIR) found.; exit 1 - -# Make sure our .tmp dir exists. -$(shell [ -d .tmp ] || mkdir .tmp) - -.PHONY: $(CONFIG) -$(CONFIG): $(OUT) $(GO_VERSION_FILE) - -.PHONY: $(VERIFY) -$(VERIFY): config-up-to-date - @$(CIRCLECI) config validate $(OUT) - -define GENERATED_FILE_HEADER -### *** -### WARNING: DO NOT manually EDIT or MERGE this file, it is generated by 'make $(CONFIG)'. -### INSTEAD: Edit or merge the source in $(SOURCE_DIR)/ then run 'make $(CONFIG)'. -### *** -endef -export GENERATED_FILE_HEADER - -# GEN_CONFIG writes the config to a temporary file. If the whole process succeeds, -# it them moves that file to $@. This makes is an atomic operation, so if it fails -# make doesn't consider a half-baked file up to date. -define GEN_CONFIG - @yq -i ".references.environment.GO_IMAGE = \"docker.mirror.hashicorp.services/cimg/go:$(GO_VERSION)\"" $(SOURCE_DIR)/executors/\@executors.yml - - @$(CIRCLECI) config pack $(SOURCE_DIR) > $(CONFIG_PACKED) - @echo "$$GENERATED_FILE_HEADER" > $@.tmp || { rm -f $@; exit 1; } - @$(CIRCLECI) config process $(CONFIG_PACKED) >> $@.tmp || { rm -f $@.tmp; exit 1; } - @mv -f $@.tmp $@ -endef - -.PHONY: $(OUT) -$(OUT): $(CONFIG_SOURCE) - $(GEN_CONFIG) - @echo "$@ updated" - -$(TMP): $(CONFIG_SOURCE) - $(GEN_CONFIG) - -.PHONY: config-up-to-date -config-up-to-date: $(TMP) # Note this must not depend on $(OUT)! - @if diff -w $(OUT) $<; then \ - echo "Generated $(OUT) is up to date!"; \ - else \ - echo "Generated $(OUT) is out of date, run make $(CONFIG) to update."; \ - exit 1; \ - fi diff --git a/.circleci/README.md b/.circleci/README.md deleted file mode 100644 index 1ec75cafade9..000000000000 --- a/.circleci/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# How to use CircleCI multi-file config - -This README and the Makefile should be in your `.circleci` directory, -in the root of your repository. -All path references in this README assume we are in this `.circleci` directory. - -The `Makefile` in this directory generates `./config.yml` in CircleCI 2.0 syntax, -from the tree rooted at `./config/`, which contains files in CircleCI 2.0 or 2.1 syntax. - - -## Quickstart - -The basic workflow is: - -- Edit source files in `./config/` -- When you are done, run `make ci-config` to update `./config.yml` -- Commit this entire `.circleci` directory, including that generated file together. -- Run `make ci-verify` to ensure the current `./config.yml` is up to date with the source. - -When merging this `.circleci` directory: - -- Do not merge the generated `./config.yml` file, instead: -- Merge the source files under `./config/`, and then -- Run `make ci-config` to re-generate the merged `./config.yml` - -And that's it, for more detail, read on! - - -## How does it work, roughly? - -CircleCI supports [generating a single config file from many], -using the `$ circleci config pack` command. -It also supports [expanding 2.1 syntax to 2.0 syntax] -using the `$ circleci config process` command. -We use these two commands, stitched together using the `Makefile` -to implement the workflow. - -[generating a single config file from many]: https://circleci.com/docs/2.0/local-cli/#packing-a-config -[expanding 2.1 syntax to 2.0 syntax]: https://circleci.com/docs/2.0/local-cli/#processing-a-config - - -## Prerequisites - -You will need the [CircleCI CLI tool] installed and working, -at least version `0.1.5607`. -You can [download this tool directly from GitHub Releases]. - -``` -$ circleci version -0.1.5607+f705856 -``` - -[CircleCI CLI tool]: https://circleci.com/docs/2.0/local-cli/ -[download this tool directly from GitHub Releases]: https://github.com/CircleCI-Public/circleci-cli/releases - - -## Updating the config source - -Before making changes, be sure to understand the layout -of the `./config/` file tree, as well as circleci 2.1 syntax. -See the [Syntax and layout] section below. - -To update the config, you should edit, add or remove files -in the `./config/` directory, -and then run `make ci-config`. -If that's successful, -you should then commit every `*.yml` file in the tree rooted in this directory. -That is: you should commit both the source under `./config/` -and the generated file `./config.yml` at the same time, in the same commit. -The included git pre-commit hook will help with this. -Do not edit the `./config.yml` file directly, as you will lose your changes -next time `make ci-config` is run. - -[Syntax and layout]: #syntax-and-layout - - -### Verifying `./config.yml` - -To check whether or not the current `./config.yml` is up to date with the source -and valid, run `$ make ci-verify`. -Note that `$ make ci-verify` should be run in CI, -in case not everyone has the git pre-commit hook set up correctly. - - -#### Example shell session - -```sh -$ make ci-config -config.yml updated -$ git add -A . # The -A makes sure to include deletions/renames etc. -$ git commit -m "ci: blah blah blah" -Changes detected in .circleci/, running 'make -C .circleci ci-verify' ---> Generated config.yml is up to date! ---> Config file at config.yml is valid. -``` - - -### Syntax and layout - -It is important to understand the layout of the config directory. -Read the documentation on [packing a config] for a full understanding -of how multiple YAML files are merged by the circleci CLI tool. - -[packing a config]: https://circleci.com/docs/2.0/local-cli/#packing-a-config - -Here is an example file tree (with comments added afterwards): - -```sh -$ tree . -. -├── Makefile -├── README.md # This file. -├── config # The source code for config.yml is rooted here. -│   ├── @config.yml # Files beginning with @ are treated specially by `circleci config pack` -│   ├── commands # Subdirectories of config become top-level keys. -│   │   └── go_test.yml # Filenames (minus .yml) become top-level keys under -│   │   └── go_build.yml # their parent (in this case "commands"). -│ │ # The contents of go_test.yml therefore are placed at: .commands.go_test: -│   └── jobs # jobs also becomes a top-level key under config... -│   ├── build.yml # ...and likewise filenames become keys under their parent. -│   └── test.yml -└── config.yml # The generated file in 2.0 syntax. -``` - -About those `@` files... Preceding a filename with `@` -indicates to `$ circleci config pack` that the contents of this YAML file -should be at the top-level, rather than underneath a key named after their filename. -This naming convention is unfortunate as it breaks autocompletion in bash, -but there we go. - diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 3ae52c52e856..000000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,1216 +0,0 @@ -### *** -### WARNING: DO NOT manually EDIT or MERGE this file, it is generated by 'make ci-config'. -### INSTEAD: Edit or merge the source in config/ then run 'make ci-config'. -### *** -# Orb 'circleci/slack@3.2.0' resolved to 'circleci/slack@3.2.0' -version: 2 -jobs: - install-ui-dependencies: - docker: - - environment: - JOBS: 2 - image: docker.mirror.hashicorp.services/circleci/node:14-browsers - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - steps: - - checkout - - restore_cache: - key: yarn-lock-v7-{{ checksum "ui/yarn.lock" }} - name: Restore yarn cache - - run: - command: | - cd ui - yarn install - npm rebuild node-sass - name: Install UI dependencies - - save_cache: - key: yarn-lock-v7-{{ checksum "ui/yarn.lock" }} - name: Save yarn cache - paths: - - ui/node_modules - test-ui: - docker: - - environment: - JOBS: 2 - image: docker.mirror.hashicorp.services/circleci/node:14-browsers - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - resource_class: xlarge - steps: - - run: - command: | - case "$CIRCLE_BRANCH" in - main|ui/*|backport/ui/*|release/*|merge*) ;; - *) # If the branch being tested doesn't match one of the above patterns, - # we don't need to run test-ui and can abort the job. - circleci-agent step halt - ;; - esac - - # exit with success either way - exit 0 - name: Check branch name - working_directory: ~/ - - checkout - - restore_cache: - key: yarn-lock-v7-{{ checksum "ui/yarn.lock" }} - name: Restore yarn cache - - attach_workspace: - at: . - - run: - command: | - # Add ./bin to the PATH so vault binary can be run by Ember tests - export PATH="${PWD}/bin:${PATH}" - - # Run Ember tests - cd ui - mkdir -p test-results/qunit - yarn test:oss - name: Test UI - - store_artifacts: - path: ui/test-results - - store_test_results: - path: ui/test-results - build-go-dev: - machine: - image: ubuntu-2004:2022.10.1 - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - steps: - - checkout - - run: - command: | - GO_VERSION=$(cat .go-version) - [ -n "$GO_VERSION" ] || { echo "You must set GO_VERSION"; exit 1; } - # Install Go - cd ~ - curl -sSLO "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" - sudo rm -rf /usr/local/go - sudo tar -C /usr/local -xzf "go${GO_VERSION}.linux-amd64.tar.gz" - rm -f "go${GO_VERSION}.linux-amd64.tar.gz" - GOPATH="/home/circleci/go" - mkdir $GOPATH 2>/dev/null || { sudo mkdir $GOPATH && sudo chmod 777 $GOPATH; } - mkdir $GOPATH/bin 2>/dev/null || { sudo mkdir $GOPATH/bin && sudo chmod 777 $GOPATH/bin; } - echo "export GOPATH='$GOPATH'" >> "$BASH_ENV" - echo "export PATH='$PATH:$GOPATH/bin:/usr/local/go/bin'" >> "$BASH_ENV" - echo "export GOPROXY=off" >> "$BASH_ENV" - echo "export GOPRIVATE=github.com/hashicorp/*" >> "$BASH_ENV" - - echo "$ go version" - go version - name: Setup Go - - restore_cache: - keys: - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - name: Restore exact go modules cache - - attach_workspace: - at: . - - run: - command: | - # Move dev UI assets to expected location - rm -rf ./pkg - mkdir ./pkg - - # Build dev binary - make ci-bootstrap dev - name: Build dev binary - - persist_to_workspace: - paths: - - bin - root: . - environment: - - CIRCLECI_CLI_VERSION: 0.1.5546 - - GO_IMAGE: docker.mirror.hashicorp.services/cimg/go:1.20.1 - - GO_TAGS: '' - - GOFUMPT_VERSION: 0.3.1 - - GOTESTSUM_VERSION: 0.5.2 - test-go-remote-docker: - docker: - - image: docker.mirror.hashicorp.services/cimg/go:1.20.1 - resource_class: medium - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - parallelism: 8 - steps: - - run: - command: | - # If the branch being tested starts with ui/ or docs/ we want to exit the job without failing - [[ "$CIRCLE_BRANCH" = ui/* || "$CIRCLE_BRANCH" = docs/* || "$CIRCLE_BRANCH" = backport/docs/* ]] && { - # stop the job from this step - circleci-agent step halt - } - # exit with success either way - exit 0 - name: Check branch name - working_directory: ~/ - - checkout - - setup_remote_docker: - docker_layer_caching: true - version: 20.10.17 - - add_ssh_keys: - fingerprints: - - b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9 - - run: - command: | - git config --global url."git@github.com:".insteadOf https://github.com/ - - run: - command: | - TZ=GMT date '+%Y%m%d' > /tmp/go-cache-key - name: Compute test cache key - - restore_cache: - keys: - - go-test-cache-date-v1-{{ checksum "/tmp/go-cache-key" }} - - restore_cache: - keys: - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - name: Restore exact go modules cache - - run: - command: | - set -exo pipefail - - EXTRA_TAGS= - case "" in - *-race*) export VAULT_CI_GO_TEST_RACE=1;; - *) EXTRA_TAGS=deadlock;; - esac - - # Install CircleCI CLI - curl -sSL \ - "https://github.com/CircleCI-Public/circleci-cli/releases/download/v${CIRCLECI_CLI_VERSION}/circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64.tar.gz" \ - | sudo tar --overwrite -xz \ - -C /usr/local/bin \ - "circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64/circleci" - - USE_DOCKER=0 - USE_DOCKER=1 - - # Check all directories with a go.mod file - modules=("." "api" "sdk") - all_package_names="" - - for dir in "${modules[@]}" - do - pushd "$dir" - # On its own line so that -e will fail the tests if we detect errors here. - go list -test -json ./... > test-list.json - # Split Go tests by prior test times. If use_docker is true, only run - # tests that depend on docker, otherwise only those that don't. - # The appended true condition ensures the command will succeed if no packages are found - if [ $USE_DOCKER == 1 ]; then - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(any(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker"))) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - else - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(all(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker")|not)) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - fi - # Move back into root directory - popd - # Append the test packages into the global list, if any are found - if [ -n "$package_names" ]; then - all_package_names+=" ${package_names}" - fi - done - - # After running tests split step, we are now running the following steps - # in multiple different containers, each getting a different subset of - # the test packages in their package_names variable. Each container - # has its own remote docker VM. - - make prep - - mkdir -p test-results/go-test - - # We don't want VAULT_LICENSE set when running Go tests, because that's - # not what developers have in their environments and it could break some - # tests; it would be like setting VAULT_TOKEN. However some non-Go - # CI commands, like the UI tests, shouldn't have to worry about licensing. - # So we set VAULT_LICENSE in CI, and here we unset it. Instead of - # VAULT_LICENSE, we populate VAULT_LICENSE_CI, so that tests which want - # an externally supplied license can opt-in to using it. - export VAULT_LICENSE_CI="$VAULT_LICENSE" - VAULT_LICENSE= - - # Create a docker network for our test container - if [ $USE_DOCKER == 1 ]; then - # Despite the fact that we're using a circleci image (thus getting the - # version they chose for the docker cli) and that we're specifying a - # docker version to use for the remote docker instances, we occasionally - # see "client version too new, max supported version 1.39" errors for - # reasons unclear. - export DOCKER_API_VERSION=1.39 - - TEST_DOCKER_NETWORK_NAME="${CIRCLE_WORKFLOW_JOB_ID}-${CIRCLE_NODE_INDEX}" - export TEST_DOCKER_NETWORK_ID=$(docker network list --quiet --no-trunc --filter="name=${TEST_DOCKER_NETWORK_NAME}") - if [ -z $TEST_DOCKER_NETWORK_ID ]; then - docker network prune -f - TEST_DOCKER_NETWORK_ID=$(docker network create "${TEST_DOCKER_NETWORK_NAME}") - fi - - - - # Start a docker test container to run the tests in - CONTAINER_ID="$(docker run -d \ - -e TEST_DOCKER_NETWORK_ID \ - -e GOPRIVATE \ - -e DOCKER_CERT_PATH \ - -e DOCKER_HOST \ - -e DOCKER_MACHINE_NAME \ - -e DOCKER_TLS_VERIFY \ - -e NO_PROXY \ - -e VAULT_TEST_LOG_DIR=/tmp/testlogs \ - --network ${TEST_DOCKER_NETWORK_NAME} \ - $GO_IMAGE \ - tail -f /dev/null)" - mkdir workspace - echo ${CONTAINER_ID} > workspace/container_id - - # Hack: Docker permissions appear to have changed; let's explicitly - # add a new user/group with the correct host uid to the docker - # container, fixing all of these permissions issues correctly. We - # then have to run with this user consistently in the future. - # - # Notably, in this shell pipeline we see: - # uid=1001(circleci) gid=1002(circleci) groups=1002(circleci) - # - # but inside the docker image below, we see: - # uid=3434(circleci) gid=3434(circleci) groups=3434(circleci) - # - # See also: https://github.com/CircleCI-Public/cimg-base/issues/122 - export HOST_GID="$(id -g)" - export HOST_UID="$(id -u)" - export CONT_GID="$(docker exec ${CONTAINER_ID} sh -c 'id -g')" - export CONT_GNAME="$(docker exec ${CONTAINER_ID} sh -c 'id -g -n')" - export CONT_UID="$(docker exec ${CONTAINER_ID} sh -c 'id -u')" - if (( HOST_UID != CONT_UID )); then - # Only provision a group if necessary; otherwise reuse the - # existing one. - if (( HOST_GID != CONT_GID )); then - docker exec -e HOST_GID -e CONT_GNAME ${CONTAINER_ID} sh -c 'sudo groupmod -g $HOST_GID $CONT_GNAME' - fi - - docker exec -e CONT_GNAME -e HOST_UID ${CONTAINER_ID} sh -c 'sudo usermod -a -G $CONT_GNAME -u $HOST_UID circleci' - fi - - # Run tests - test -d /tmp/go-cache && docker cp /tmp/go-cache ${CONTAINER_ID}:/tmp/gocache - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' - docker cp . ${CONTAINER_ID}:/home/circleci/go/src/github.com/hashicorp/vault/ - docker cp $DOCKER_CERT_PATH/ ${CONTAINER_ID}:$DOCKER_CERT_PATH - - # Copy the downloaded modules inside the container. - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/pkg' - docker cp "$(go env GOPATH)/pkg/mod" ${CONTAINER_ID}:/home/circleci/go/pkg/mod - - docker exec -w /home/circleci/go/src/github.com/hashicorp/vault/ \ - -e CIRCLECI -e VAULT_CI_GO_TEST_RACE \ - -e GOCACHE=/tmp/gocache \ - -e GO_TAGS \ - -e GOPROXY="off" \ - -e VAULT_LICENSE_CI \ - -e GOARCH=amd64 \ - ${CONTAINER_ID} \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - \ - ${all_package_names} - else - GOARCH=amd64 \ - GOCACHE=/tmp/go-cache \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - \ - ${all_package_names} - fi - environment: - GOPRIVATE: github.com/hashicorp/* - name: Run Go tests - no_output_timeout: 60m - - run: - command: | - docker cp $(cat workspace/container_id):/home/circleci/go/src/github.com/hashicorp/vault/test-results . - docker cp $(cat workspace/container_id):/tmp/gocache /tmp/go-cache - name: Copy test results - when: always - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: /tmp/testlogs - environment: - - CIRCLECI_CLI_VERSION: 0.1.5546 - - GO_IMAGE: docker.mirror.hashicorp.services/cimg/go:1.20.1 - - GO_TAGS: '' - - GOFUMPT_VERSION: 0.3.1 - - GOTESTSUM_VERSION: 0.5.2 - fmt: - machine: - image: ubuntu-2004:2022.10.1 - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - steps: - - checkout - - run: - command: | - GO_VERSION=$(cat .go-version) - [ -n "$GO_VERSION" ] || { echo "You must set GO_VERSION"; exit 1; } - # Install Go - cd ~ - curl -sSLO "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" - sudo rm -rf /usr/local/go - sudo tar -C /usr/local -xzf "go${GO_VERSION}.linux-amd64.tar.gz" - rm -f "go${GO_VERSION}.linux-amd64.tar.gz" - GOPATH="/home/circleci/go" - mkdir $GOPATH 2>/dev/null || { sudo mkdir $GOPATH && sudo chmod 777 $GOPATH; } - mkdir $GOPATH/bin 2>/dev/null || { sudo mkdir $GOPATH/bin && sudo chmod 777 $GOPATH/bin; } - echo "export GOPATH='$GOPATH'" >> "$BASH_ENV" - echo "export PATH='$PATH:$GOPATH/bin:/usr/local/go/bin'" >> "$BASH_ENV" - echo "export GOPROXY=https://proxy.golang.org,direct" >> "$BASH_ENV" - echo "export GOPRIVATE=github.com/hashicorp/*" >> "$BASH_ENV" - - echo "$ go version" - go version - name: Setup Go - - run: - command: | - echo "Using gofumpt version ${GOFUMPT_VERSION}" - go install "mvdan.cc/gofumpt@v${GOFUMPT_VERSION}" - make fmt - if ! git diff --exit-code; then - echo "Code has formatting errors. Run 'make fmt' to fix" - exit 1 - fi - name: make fmt - environment: - - CIRCLECI_CLI_VERSION: 0.1.5546 - - GO_IMAGE: docker.mirror.hashicorp.services/cimg/go:1.20.1 - - GO_TAGS: '' - - GOFUMPT_VERSION: 0.3.1 - - GOTESTSUM_VERSION: 0.5.2 - test-go-race: - docker: - - image: docker.mirror.hashicorp.services/cimg/go:1.20.1 - resource_class: xlarge - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - parallelism: 8 - steps: - - run: - command: | - # If the branch being tested starts with ui/ or docs/ we want to exit the job without failing - [[ "$CIRCLE_BRANCH" = ui/* || "$CIRCLE_BRANCH" = docs/* || "$CIRCLE_BRANCH" = backport/docs/* ]] && { - # stop the job from this step - circleci-agent step halt - } - # exit with success either way - exit 0 - name: Check branch name - working_directory: ~/ - - checkout - - add_ssh_keys: - fingerprints: - - b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9 - - run: - command: | - git config --global url."git@github.com:".insteadOf https://github.com/ - - run: - command: | - TZ=GMT date '+%Y%m%d' > /tmp/go-cache-key - name: Compute test cache key - - restore_cache: - keys: - - go-test-cache-date-v1-{{ checksum "/tmp/go-cache-key" }} - - restore_cache: - keys: - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - name: Restore exact go modules cache - - run: - command: | - set -exo pipefail - - EXTRA_TAGS= - case "-race" in - *-race*) export VAULT_CI_GO_TEST_RACE=1;; - *) EXTRA_TAGS=deadlock;; - esac - - # Install CircleCI CLI - curl -sSL \ - "https://github.com/CircleCI-Public/circleci-cli/releases/download/v${CIRCLECI_CLI_VERSION}/circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64.tar.gz" \ - | sudo tar --overwrite -xz \ - -C /usr/local/bin \ - "circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64/circleci" - - USE_DOCKER=0 - - # Check all directories with a go.mod file - modules=("." "api" "sdk") - all_package_names="" - - for dir in "${modules[@]}" - do - pushd "$dir" - # On its own line so that -e will fail the tests if we detect errors here. - go list -test -json ./... > test-list.json - # Split Go tests by prior test times. If use_docker is true, only run - # tests that depend on docker, otherwise only those that don't. - # The appended true condition ensures the command will succeed if no packages are found - if [ $USE_DOCKER == 1 ]; then - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(any(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker"))) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - else - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(all(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker")|not)) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - fi - # Move back into root directory - popd - # Append the test packages into the global list, if any are found - if [ -n "$package_names" ]; then - all_package_names+=" ${package_names}" - fi - done - - # After running tests split step, we are now running the following steps - # in multiple different containers, each getting a different subset of - # the test packages in their package_names variable. Each container - # has its own remote docker VM. - - make prep - - mkdir -p test-results/go-test - - # We don't want VAULT_LICENSE set when running Go tests, because that's - # not what developers have in their environments and it could break some - # tests; it would be like setting VAULT_TOKEN. However some non-Go - # CI commands, like the UI tests, shouldn't have to worry about licensing. - # So we set VAULT_LICENSE in CI, and here we unset it. Instead of - # VAULT_LICENSE, we populate VAULT_LICENSE_CI, so that tests which want - # an externally supplied license can opt-in to using it. - export VAULT_LICENSE_CI="$VAULT_LICENSE" - VAULT_LICENSE= - - # Create a docker network for our test container - if [ $USE_DOCKER == 1 ]; then - # Despite the fact that we're using a circleci image (thus getting the - # version they chose for the docker cli) and that we're specifying a - # docker version to use for the remote docker instances, we occasionally - # see "client version too new, max supported version 1.39" errors for - # reasons unclear. - export DOCKER_API_VERSION=1.39 - - TEST_DOCKER_NETWORK_NAME="${CIRCLE_WORKFLOW_JOB_ID}-${CIRCLE_NODE_INDEX}" - export TEST_DOCKER_NETWORK_ID=$(docker network list --quiet --no-trunc --filter="name=${TEST_DOCKER_NETWORK_NAME}") - if [ -z $TEST_DOCKER_NETWORK_ID ]; then - docker network prune -f - TEST_DOCKER_NETWORK_ID=$(docker network create "${TEST_DOCKER_NETWORK_NAME}") - fi - - - - # Start a docker test container to run the tests in - CONTAINER_ID="$(docker run -d \ - -e TEST_DOCKER_NETWORK_ID \ - -e GOPRIVATE \ - -e DOCKER_CERT_PATH \ - -e DOCKER_HOST \ - -e DOCKER_MACHINE_NAME \ - -e DOCKER_TLS_VERIFY \ - -e NO_PROXY \ - -e VAULT_TEST_LOG_DIR=/tmp/testlogs \ - --network ${TEST_DOCKER_NETWORK_NAME} \ - $GO_IMAGE \ - tail -f /dev/null)" - mkdir workspace - echo ${CONTAINER_ID} > workspace/container_id - - # Hack: Docker permissions appear to have changed; let's explicitly - # add a new user/group with the correct host uid to the docker - # container, fixing all of these permissions issues correctly. We - # then have to run with this user consistently in the future. - # - # Notably, in this shell pipeline we see: - # uid=1001(circleci) gid=1002(circleci) groups=1002(circleci) - # - # but inside the docker image below, we see: - # uid=3434(circleci) gid=3434(circleci) groups=3434(circleci) - # - # See also: https://github.com/CircleCI-Public/cimg-base/issues/122 - export HOST_GID="$(id -g)" - export HOST_UID="$(id -u)" - export CONT_GID="$(docker exec ${CONTAINER_ID} sh -c 'id -g')" - export CONT_GNAME="$(docker exec ${CONTAINER_ID} sh -c 'id -g -n')" - export CONT_UID="$(docker exec ${CONTAINER_ID} sh -c 'id -u')" - if (( HOST_UID != CONT_UID )); then - # Only provision a group if necessary; otherwise reuse the - # existing one. - if (( HOST_GID != CONT_GID )); then - docker exec -e HOST_GID -e CONT_GNAME ${CONTAINER_ID} sh -c 'sudo groupmod -g $HOST_GID $CONT_GNAME' - fi - - docker exec -e CONT_GNAME -e HOST_UID ${CONTAINER_ID} sh -c 'sudo usermod -a -G $CONT_GNAME -u $HOST_UID circleci' - fi - - # Run tests - test -d /tmp/go-cache && docker cp /tmp/go-cache ${CONTAINER_ID}:/tmp/gocache - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' - docker cp . ${CONTAINER_ID}:/home/circleci/go/src/github.com/hashicorp/vault/ - docker cp $DOCKER_CERT_PATH/ ${CONTAINER_ID}:$DOCKER_CERT_PATH - - # Copy the downloaded modules inside the container. - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/pkg' - docker cp "$(go env GOPATH)/pkg/mod" ${CONTAINER_ID}:/home/circleci/go/pkg/mod - - docker exec -w /home/circleci/go/src/github.com/hashicorp/vault/ \ - -e CIRCLECI -e VAULT_CI_GO_TEST_RACE \ - -e GOCACHE=/tmp/gocache \ - -e GO_TAGS \ - -e GOPROXY="off" \ - -e VAULT_LICENSE_CI \ - -e GOARCH=amd64 \ - ${CONTAINER_ID} \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - -race \ - ${all_package_names} - else - GOARCH=amd64 \ - GOCACHE=/tmp/go-cache \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - -race \ - ${all_package_names} - fi - environment: - GOPRIVATE: github.com/hashicorp/* - name: Run Go tests - no_output_timeout: 60m - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: /tmp/testlogs - environment: - - CIRCLECI_CLI_VERSION: 0.1.5546 - - GO_IMAGE: docker.mirror.hashicorp.services/cimg/go:1.20.1 - - GO_TAGS: '' - - GOFUMPT_VERSION: 0.3.1 - - GOTESTSUM_VERSION: 0.5.2 - test-go: - docker: - - image: docker.mirror.hashicorp.services/cimg/go:1.20.1 - resource_class: large - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - parallelism: 8 - steps: - - run: - command: | - # If the branch being tested starts with ui/ or docs/ we want to exit the job without failing - [[ "$CIRCLE_BRANCH" = ui/* || "$CIRCLE_BRANCH" = docs/* || "$CIRCLE_BRANCH" = backport/docs/* ]] && { - # stop the job from this step - circleci-agent step halt - } - # exit with success either way - exit 0 - name: Check branch name - working_directory: ~/ - - checkout - - add_ssh_keys: - fingerprints: - - b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9 - - run: - command: | - git config --global url."git@github.com:".insteadOf https://github.com/ - - run: - command: | - TZ=GMT date '+%Y%m%d' > /tmp/go-cache-key - name: Compute test cache key - - restore_cache: - keys: - - go-test-cache-date-v1-{{ checksum "/tmp/go-cache-key" }} - - restore_cache: - keys: - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - name: Restore exact go modules cache - - run: - command: | - set -exo pipefail - - EXTRA_TAGS= - case "" in - *-race*) export VAULT_CI_GO_TEST_RACE=1;; - *) EXTRA_TAGS=deadlock;; - esac - - # Install CircleCI CLI - curl -sSL \ - "https://github.com/CircleCI-Public/circleci-cli/releases/download/v${CIRCLECI_CLI_VERSION}/circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64.tar.gz" \ - | sudo tar --overwrite -xz \ - -C /usr/local/bin \ - "circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64/circleci" - - USE_DOCKER=0 - - # Check all directories with a go.mod file - modules=("." "api" "sdk") - all_package_names="" - - for dir in "${modules[@]}" - do - pushd "$dir" - # On its own line so that -e will fail the tests if we detect errors here. - go list -test -json ./... > test-list.json - # Split Go tests by prior test times. If use_docker is true, only run - # tests that depend on docker, otherwise only those that don't. - # The appended true condition ensures the command will succeed if no packages are found - if [ $USE_DOCKER == 1 ]; then - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(any(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker"))) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - else - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(all(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker")|not)) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - fi - # Move back into root directory - popd - # Append the test packages into the global list, if any are found - if [ -n "$package_names" ]; then - all_package_names+=" ${package_names}" - fi - done - - # After running tests split step, we are now running the following steps - # in multiple different containers, each getting a different subset of - # the test packages in their package_names variable. Each container - # has its own remote docker VM. - - make prep - - mkdir -p test-results/go-test - - # We don't want VAULT_LICENSE set when running Go tests, because that's - # not what developers have in their environments and it could break some - # tests; it would be like setting VAULT_TOKEN. However some non-Go - # CI commands, like the UI tests, shouldn't have to worry about licensing. - # So we set VAULT_LICENSE in CI, and here we unset it. Instead of - # VAULT_LICENSE, we populate VAULT_LICENSE_CI, so that tests which want - # an externally supplied license can opt-in to using it. - export VAULT_LICENSE_CI="$VAULT_LICENSE" - VAULT_LICENSE= - - # Create a docker network for our test container - if [ $USE_DOCKER == 1 ]; then - # Despite the fact that we're using a circleci image (thus getting the - # version they chose for the docker cli) and that we're specifying a - # docker version to use for the remote docker instances, we occasionally - # see "client version too new, max supported version 1.39" errors for - # reasons unclear. - export DOCKER_API_VERSION=1.39 - - TEST_DOCKER_NETWORK_NAME="${CIRCLE_WORKFLOW_JOB_ID}-${CIRCLE_NODE_INDEX}" - export TEST_DOCKER_NETWORK_ID=$(docker network list --quiet --no-trunc --filter="name=${TEST_DOCKER_NETWORK_NAME}") - if [ -z $TEST_DOCKER_NETWORK_ID ]; then - docker network prune -f - TEST_DOCKER_NETWORK_ID=$(docker network create "${TEST_DOCKER_NETWORK_NAME}") - fi - - - - # Start a docker test container to run the tests in - CONTAINER_ID="$(docker run -d \ - -e TEST_DOCKER_NETWORK_ID \ - -e GOPRIVATE \ - -e DOCKER_CERT_PATH \ - -e DOCKER_HOST \ - -e DOCKER_MACHINE_NAME \ - -e DOCKER_TLS_VERIFY \ - -e NO_PROXY \ - -e VAULT_TEST_LOG_DIR=/tmp/testlogs \ - --network ${TEST_DOCKER_NETWORK_NAME} \ - $GO_IMAGE \ - tail -f /dev/null)" - mkdir workspace - echo ${CONTAINER_ID} > workspace/container_id - - # Hack: Docker permissions appear to have changed; let's explicitly - # add a new user/group with the correct host uid to the docker - # container, fixing all of these permissions issues correctly. We - # then have to run with this user consistently in the future. - # - # Notably, in this shell pipeline we see: - # uid=1001(circleci) gid=1002(circleci) groups=1002(circleci) - # - # but inside the docker image below, we see: - # uid=3434(circleci) gid=3434(circleci) groups=3434(circleci) - # - # See also: https://github.com/CircleCI-Public/cimg-base/issues/122 - export HOST_GID="$(id -g)" - export HOST_UID="$(id -u)" - export CONT_GID="$(docker exec ${CONTAINER_ID} sh -c 'id -g')" - export CONT_GNAME="$(docker exec ${CONTAINER_ID} sh -c 'id -g -n')" - export CONT_UID="$(docker exec ${CONTAINER_ID} sh -c 'id -u')" - if (( HOST_UID != CONT_UID )); then - # Only provision a group if necessary; otherwise reuse the - # existing one. - if (( HOST_GID != CONT_GID )); then - docker exec -e HOST_GID -e CONT_GNAME ${CONTAINER_ID} sh -c 'sudo groupmod -g $HOST_GID $CONT_GNAME' - fi - - docker exec -e CONT_GNAME -e HOST_UID ${CONTAINER_ID} sh -c 'sudo usermod -a -G $CONT_GNAME -u $HOST_UID circleci' - fi - - # Run tests - test -d /tmp/go-cache && docker cp /tmp/go-cache ${CONTAINER_ID}:/tmp/gocache - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' - docker cp . ${CONTAINER_ID}:/home/circleci/go/src/github.com/hashicorp/vault/ - docker cp $DOCKER_CERT_PATH/ ${CONTAINER_ID}:$DOCKER_CERT_PATH - - # Copy the downloaded modules inside the container. - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/pkg' - docker cp "$(go env GOPATH)/pkg/mod" ${CONTAINER_ID}:/home/circleci/go/pkg/mod - - docker exec -w /home/circleci/go/src/github.com/hashicorp/vault/ \ - -e CIRCLECI -e VAULT_CI_GO_TEST_RACE \ - -e GOCACHE=/tmp/gocache \ - -e GO_TAGS \ - -e GOPROXY="off" \ - -e VAULT_LICENSE_CI \ - -e GOARCH=amd64 \ - ${CONTAINER_ID} \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - \ - ${all_package_names} - else - GOARCH=amd64 \ - GOCACHE=/tmp/go-cache \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - \ - ${all_package_names} - fi - environment: - GOPRIVATE: github.com/hashicorp/* - name: Run Go tests - no_output_timeout: 60m - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: /tmp/testlogs - environment: - - CIRCLECI_CLI_VERSION: 0.1.5546 - - GO_IMAGE: docker.mirror.hashicorp.services/cimg/go:1.20.1 - - GO_TAGS: '' - - GOFUMPT_VERSION: 0.3.1 - - GOTESTSUM_VERSION: 0.5.2 - semgrep: - docker: - - image: docker.mirror.hashicorp.services/returntocorp/semgrep:0.113.0 - shell: /bin/sh - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - steps: - - checkout - - attach_workspace: - at: . - - run: - command: "# Alpine images can't run the make file due to a bash requirement. Run\n# semgrep explicitly here. \nexport PATH=\"$HOME/.local/bin:$PATH\" \necho -n 'Semgrep Version: '\nsemgrep --version\nsemgrep --error --include '*.go' --exclude 'vendor' -f tools/semgrep/ci .\n" - name: Run Semgrep Rules - pre-flight-checks: - machine: - image: ubuntu-2004:2022.10.1 - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - steps: - - checkout - - run: - command: | - GO_VERSION=$(cat .go-version) - [ -n "$GO_VERSION" ] || { echo "You must set GO_VERSION"; exit 1; } - # Install Go - cd ~ - curl -sSLO "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" - sudo rm -rf /usr/local/go - sudo tar -C /usr/local -xzf "go${GO_VERSION}.linux-amd64.tar.gz" - rm -f "go${GO_VERSION}.linux-amd64.tar.gz" - GOPATH="/home/circleci/go" - mkdir $GOPATH 2>/dev/null || { sudo mkdir $GOPATH && sudo chmod 777 $GOPATH; } - mkdir $GOPATH/bin 2>/dev/null || { sudo mkdir $GOPATH/bin && sudo chmod 777 $GOPATH/bin; } - echo "export GOPATH='$GOPATH'" >> "$BASH_ENV" - echo "export PATH='$PATH:$GOPATH/bin:/usr/local/go/bin'" >> "$BASH_ENV" - echo "export GOPROXY=https://proxy.golang.org,direct" >> "$BASH_ENV" - echo "export GOPRIVATE=github.com/hashicorp/*" >> "$BASH_ENV" - - echo "$ go version" - go version - name: Setup Go - - run: - command: | - export CCI_PATH=/tmp/circleci-cli/$CIRCLECI_CLI_VERSION - mkdir -p $CCI_PATH - NAME=circleci-cli_${CIRCLECI_CLI_VERSION}_${ARCH} - URL=$BASE/v${CIRCLECI_CLI_VERSION}/${NAME}.tar.gz - curl -sSL $URL \ - | tar --overwrite --strip-components=1 -xz -C $CCI_PATH "${NAME}/circleci" - # Add circleci to the path for subsequent steps. - echo "export PATH=$CCI_PATH:\$PATH" >> $BASH_ENV - # Done, print some debug info. - set -x - . $BASH_ENV - which circleci - circleci version - environment: - ARCH: linux_amd64 - BASE: https://github.com/CircleCI-Public/circleci-cli/releases/download - name: Install CircleCI CLI - - run: - command: | - set -x - . $BASH_ENV - make ci-verify - name: Verify CircleCI - - add_ssh_keys: - fingerprints: - - b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9 - - run: - command: | - git config --global url."git@github.com:".insteadOf https://github.com/ - - restore_cache: - keys: - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}} - - v1.5-{{checksum "go.sum"}} - name: Restore closest matching go modules cache - - run: - command: | - # set GOPATH explicitly to download to the right cache - export GOPATH=$HOME/go - # go list ./... forces downloading some additional versions of modules that 'go mod - # download' misses. We need this because we make use of go list itself during - # code generation in later builds that rely on this module cache. - go list ./... - go mod download -json - ( cd sdk && go mod download -json; ) - ( cd api && go mod download -json; ) - name: go mod download - - run: - command: | - git --no-pager diff --exit-code || { - echo "ERROR: Files modified by go mod download, see above." - exit 1 - } - name: Verify downloading modules did not modify any files - - save_cache: - key: v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - name: Save go modules cache - paths: - - /home/circleci/go/pkg/mod - environment: - - CIRCLECI_CLI_VERSION: 0.1.5546 - - GO_IMAGE: docker.mirror.hashicorp.services/cimg/go:1.20.1 - - GO_TAGS: '' - - GOFUMPT_VERSION: 0.3.1 - - GOTESTSUM_VERSION: 0.5.2 - test-go-race-remote-docker: - docker: - - image: docker.mirror.hashicorp.services/cimg/go:1.20.1 - resource_class: medium - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - parallelism: 8 - steps: - - run: - command: | - # If the branch being tested starts with ui/ or docs/ we want to exit the job without failing - [[ "$CIRCLE_BRANCH" = ui/* || "$CIRCLE_BRANCH" = docs/* || "$CIRCLE_BRANCH" = backport/docs/* ]] && { - # stop the job from this step - circleci-agent step halt - } - # exit with success either way - exit 0 - name: Check branch name - working_directory: ~/ - - checkout - - setup_remote_docker: - docker_layer_caching: true - version: 20.10.17 - - add_ssh_keys: - fingerprints: - - b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9 - - run: - command: | - git config --global url."git@github.com:".insteadOf https://github.com/ - - run: - command: | - TZ=GMT date '+%Y%m%d' > /tmp/go-cache-key - name: Compute test cache key - - restore_cache: - keys: - - go-test-cache-date-v1-{{ checksum "/tmp/go-cache-key" }} - - restore_cache: - keys: - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - name: Restore exact go modules cache - - run: - command: | - set -exo pipefail - - EXTRA_TAGS= - case "-race" in - *-race*) export VAULT_CI_GO_TEST_RACE=1;; - *) EXTRA_TAGS=deadlock;; - esac - - # Install CircleCI CLI - curl -sSL \ - "https://github.com/CircleCI-Public/circleci-cli/releases/download/v${CIRCLECI_CLI_VERSION}/circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64.tar.gz" \ - | sudo tar --overwrite -xz \ - -C /usr/local/bin \ - "circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64/circleci" - - USE_DOCKER=0 - USE_DOCKER=1 - - # Check all directories with a go.mod file - modules=("." "api" "sdk") - all_package_names="" - - for dir in "${modules[@]}" - do - pushd "$dir" - # On its own line so that -e will fail the tests if we detect errors here. - go list -test -json ./... > test-list.json - # Split Go tests by prior test times. If use_docker is true, only run - # tests that depend on docker, otherwise only those that don't. - # The appended true condition ensures the command will succeed if no packages are found - if [ $USE_DOCKER == 1 ]; then - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(any(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker"))) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - else - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(all(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker")|not)) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - fi - # Move back into root directory - popd - # Append the test packages into the global list, if any are found - if [ -n "$package_names" ]; then - all_package_names+=" ${package_names}" - fi - done - - # After running tests split step, we are now running the following steps - # in multiple different containers, each getting a different subset of - # the test packages in their package_names variable. Each container - # has its own remote docker VM. - - make prep - - mkdir -p test-results/go-test - - # We don't want VAULT_LICENSE set when running Go tests, because that's - # not what developers have in their environments and it could break some - # tests; it would be like setting VAULT_TOKEN. However some non-Go - # CI commands, like the UI tests, shouldn't have to worry about licensing. - # So we set VAULT_LICENSE in CI, and here we unset it. Instead of - # VAULT_LICENSE, we populate VAULT_LICENSE_CI, so that tests which want - # an externally supplied license can opt-in to using it. - export VAULT_LICENSE_CI="$VAULT_LICENSE" - VAULT_LICENSE= - - # Create a docker network for our test container - if [ $USE_DOCKER == 1 ]; then - # Despite the fact that we're using a circleci image (thus getting the - # version they chose for the docker cli) and that we're specifying a - # docker version to use for the remote docker instances, we occasionally - # see "client version too new, max supported version 1.39" errors for - # reasons unclear. - export DOCKER_API_VERSION=1.39 - - TEST_DOCKER_NETWORK_NAME="${CIRCLE_WORKFLOW_JOB_ID}-${CIRCLE_NODE_INDEX}" - export TEST_DOCKER_NETWORK_ID=$(docker network list --quiet --no-trunc --filter="name=${TEST_DOCKER_NETWORK_NAME}") - if [ -z $TEST_DOCKER_NETWORK_ID ]; then - docker network prune -f - TEST_DOCKER_NETWORK_ID=$(docker network create "${TEST_DOCKER_NETWORK_NAME}") - fi - - - - # Start a docker test container to run the tests in - CONTAINER_ID="$(docker run -d \ - -e TEST_DOCKER_NETWORK_ID \ - -e GOPRIVATE \ - -e DOCKER_CERT_PATH \ - -e DOCKER_HOST \ - -e DOCKER_MACHINE_NAME \ - -e DOCKER_TLS_VERIFY \ - -e NO_PROXY \ - -e VAULT_TEST_LOG_DIR=/tmp/testlogs \ - --network ${TEST_DOCKER_NETWORK_NAME} \ - $GO_IMAGE \ - tail -f /dev/null)" - mkdir workspace - echo ${CONTAINER_ID} > workspace/container_id - - # Hack: Docker permissions appear to have changed; let's explicitly - # add a new user/group with the correct host uid to the docker - # container, fixing all of these permissions issues correctly. We - # then have to run with this user consistently in the future. - # - # Notably, in this shell pipeline we see: - # uid=1001(circleci) gid=1002(circleci) groups=1002(circleci) - # - # but inside the docker image below, we see: - # uid=3434(circleci) gid=3434(circleci) groups=3434(circleci) - # - # See also: https://github.com/CircleCI-Public/cimg-base/issues/122 - export HOST_GID="$(id -g)" - export HOST_UID="$(id -u)" - export CONT_GID="$(docker exec ${CONTAINER_ID} sh -c 'id -g')" - export CONT_GNAME="$(docker exec ${CONTAINER_ID} sh -c 'id -g -n')" - export CONT_UID="$(docker exec ${CONTAINER_ID} sh -c 'id -u')" - if (( HOST_UID != CONT_UID )); then - # Only provision a group if necessary; otherwise reuse the - # existing one. - if (( HOST_GID != CONT_GID )); then - docker exec -e HOST_GID -e CONT_GNAME ${CONTAINER_ID} sh -c 'sudo groupmod -g $HOST_GID $CONT_GNAME' - fi - - docker exec -e CONT_GNAME -e HOST_UID ${CONTAINER_ID} sh -c 'sudo usermod -a -G $CONT_GNAME -u $HOST_UID circleci' - fi - - # Run tests - test -d /tmp/go-cache && docker cp /tmp/go-cache ${CONTAINER_ID}:/tmp/gocache - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' - docker cp . ${CONTAINER_ID}:/home/circleci/go/src/github.com/hashicorp/vault/ - docker cp $DOCKER_CERT_PATH/ ${CONTAINER_ID}:$DOCKER_CERT_PATH - - # Copy the downloaded modules inside the container. - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/pkg' - docker cp "$(go env GOPATH)/pkg/mod" ${CONTAINER_ID}:/home/circleci/go/pkg/mod - - docker exec -w /home/circleci/go/src/github.com/hashicorp/vault/ \ - -e CIRCLECI -e VAULT_CI_GO_TEST_RACE \ - -e GOCACHE=/tmp/gocache \ - -e GO_TAGS \ - -e GOPROXY="off" \ - -e VAULT_LICENSE_CI \ - -e GOARCH=amd64 \ - ${CONTAINER_ID} \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - -race \ - ${all_package_names} - else - GOARCH=amd64 \ - GOCACHE=/tmp/go-cache \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - -race \ - ${all_package_names} - fi - environment: - GOPRIVATE: github.com/hashicorp/* - name: Run Go tests - no_output_timeout: 60m - - run: - command: | - docker cp $(cat workspace/container_id):/home/circleci/go/src/github.com/hashicorp/vault/test-results . - docker cp $(cat workspace/container_id):/tmp/gocache /tmp/go-cache - name: Copy test results - when: always - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: /tmp/testlogs - environment: - - CIRCLECI_CLI_VERSION: 0.1.5546 - - GO_IMAGE: docker.mirror.hashicorp.services/cimg/go:1.20.1 - - GO_TAGS: '' - - GOFUMPT_VERSION: 0.3.1 - - GOTESTSUM_VERSION: 0.5.2 -workflows: - ci: - jobs: - - pre-flight-checks - - fmt - - install-ui-dependencies: - requires: - - pre-flight-checks - - build-go-dev: - requires: - - pre-flight-checks - - test-ui: - requires: - - install-ui-dependencies - - build-go-dev - - test-go: - requires: - - pre-flight-checks - - test-go-remote-docker: - requires: - - pre-flight-checks - - test-go-race: - requires: - - pre-flight-checks - - test-go-race-remote-docker: - requires: - - pre-flight-checks - - semgrep: - requires: - - pre-flight-checks - version: 2 diff --git a/.circleci/config/@config.yml b/.circleci/config/@config.yml deleted file mode 100644 index 89ec6dcfbe93..000000000000 --- a/.circleci/config/@config.yml +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - ---- -version: 2.1 - -orbs: - slack: circleci/slack@3.2.0 diff --git a/.circleci/config/commands/@caches.yml b/.circleci/config/commands/@caches.yml deleted file mode 100644 index c7d0aec4a0b5..000000000000 --- a/.circleci/config/commands/@caches.yml +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -restore_yarn_cache: - steps: - - restore_cache: - name: Restore yarn cache - key: &YARN_LOCK_CACHE_KEY yarn-lock-v7-{{ checksum "ui/yarn.lock" }} -save_yarn_cache: - steps: - - save_cache: - name: Save yarn cache - key: *YARN_LOCK_CACHE_KEY - paths: - - ui/node_modules -# allows restoring go mod caches by incomplete prefix. This is useful when re-generating -# cache, but not when running builds and tests that require an exact match. -# TODO should we be including arch in cache key? -restore_go_mod_cache_permissive: - steps: - - restore_cache: - name: Restore closest matching go modules cache - keys: - - &gocachekey v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}} - - v1.5-{{checksum "go.sum"}} -restore_go_mod_cache: - steps: - - restore_cache: - name: Restore exact go modules cache - keys: - - *gocachekey -save_go_mod_cache: - steps: - - save_cache: - name: Save go modules cache - key: *gocachekey - paths: - - /home/circleci/go/pkg/mod -refresh_go_mod_cache: - steps: - - restore_go_mod_cache_permissive - - run: - name: go mod download - command: | - # set GOPATH explicitly to download to the right cache - export GOPATH=$HOME/go - # go list ./... forces downloading some additional versions of modules that 'go mod - # download' misses. We need this because we make use of go list itself during - # code generation in later builds that rely on this module cache. - go list ./... - go mod download -json - ( cd sdk && go mod download -json; ) - ( cd api && go mod download -json; ) - - run: - name: Verify downloading modules did not modify any files - command: | - git --no-pager diff --exit-code || { - echo "ERROR: Files modified by go mod download, see above." - exit 1 - } - - save_go_mod_cache diff --git a/.circleci/config/commands/configure-git.yml b/.circleci/config/commands/configure-git.yml deleted file mode 100644 index b6649864ee03..000000000000 --- a/.circleci/config/commands/configure-git.yml +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -steps: - - add_ssh_keys: - fingerprints: - # "CircleCI Additional SSH Key" associated with hc-github-team-secure-vault-core GitHub user - - "b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9" - - run: | - git config --global url."git@github.com:".insteadOf https://github.com/ diff --git a/.circleci/config/commands/exit-if-branch-does-not-need-test-ui.yml b/.circleci/config/commands/exit-if-branch-does-not-need-test-ui.yml deleted file mode 100644 index b4e3f136bdfb..000000000000 --- a/.circleci/config/commands/exit-if-branch-does-not-need-test-ui.yml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -description: > - Check if branch name starts with ui/ or docs/ and if so, exit. -steps: - - run: - working_directory: ~/ - name: Check branch name - command: | - case "$CIRCLE_BRANCH" in - main|ui/*|backport/ui/*|release/*|merge*) ;; - *) # If the branch being tested doesn't match one of the above patterns, - # we don't need to run test-ui and can abort the job. - circleci-agent step halt - ;; - esac - - # exit with success either way - exit 0 diff --git a/.circleci/config/commands/exit-if-ui-or-docs-branch.yml b/.circleci/config/commands/exit-if-ui-or-docs-branch.yml deleted file mode 100644 index d50633b4e38e..000000000000 --- a/.circleci/config/commands/exit-if-ui-or-docs-branch.yml +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -description: > - Check if branch name starts with ui/ or docs/ and if so, exit. -steps: - - run: - working_directory: ~/ - name: Check branch name - command: | - # If the branch being tested starts with ui/ or docs/ we want to exit the job without failing - [[ "$CIRCLE_BRANCH" = ui/* || "$CIRCLE_BRANCH" = docs/* || "$CIRCLE_BRANCH" = backport/docs/* ]] && { - # stop the job from this step - circleci-agent step halt - } - # exit with success either way - exit 0 diff --git a/.circleci/config/commands/go_test.yml b/.circleci/config/commands/go_test.yml deleted file mode 100644 index 38d0751f1f77..000000000000 --- a/.circleci/config/commands/go_test.yml +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -description: run go tests -parameters: - extra_flags: - type: string - default: "" - log_dir: - type: string - default: "/tmp/testlogs" - cache_dir: - type: string - default: /tmp/go-cache - save_cache: - type: boolean - default: false - use_docker: - type: boolean - default: false - arch: - type: string - # Only supported for use_docker=false, and only other value allowed is 386 - default: amd64 # must be 386 or amd64 -steps: - - configure-git - - run: - name: Compute test cache key - command: | - TZ=GMT date '+%Y%m%d' > /tmp/go-cache-key - - restore_cache: - keys: - - go-test-cache-date-v1-{{ checksum "/tmp/go-cache-key" }} - - restore_go_mod_cache - - run: - name: Run Go tests - no_output_timeout: 60m - environment: - GOPRIVATE: 'github.com/hashicorp/*' - command: | - set -exo pipefail - - EXTRA_TAGS= - case "<< parameters.extra_flags >>" in - *-race*) export VAULT_CI_GO_TEST_RACE=1;; - *) EXTRA_TAGS=deadlock;; - esac - - # Install CircleCI CLI - curl -sSL \ - "https://github.com/CircleCI-Public/circleci-cli/releases/download/v${CIRCLECI_CLI_VERSION}/circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64.tar.gz" \ - | sudo tar --overwrite -xz \ - -C /usr/local/bin \ - "circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64/circleci" - - USE_DOCKER=0 - <<# parameters.use_docker >> - USE_DOCKER=1 - <> - - # Check all directories with a go.mod file - modules=("." "api" "sdk") - all_package_names="" - - for dir in "${modules[@]}" - do - pushd "$dir" - # On its own line so that -e will fail the tests if we detect errors here. - go list -test -json ./... > test-list.json - # Split Go tests by prior test times. If use_docker is true, only run - # tests that depend on docker, otherwise only those that don't. - # The appended true condition ensures the command will succeed if no packages are found - if [ $USE_DOCKER == 1 ]; then - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(any(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker"))) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - else - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(all(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker")|not)) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - fi - # Move back into root directory - popd - # Append the test packages into the global list, if any are found - if [ -n "$package_names" ]; then - all_package_names+=" ${package_names}" - fi - done - - # After running tests split step, we are now running the following steps - # in multiple different containers, each getting a different subset of - # the test packages in their package_names variable. Each container - # has its own remote docker VM. - - make prep - - mkdir -p test-results/go-test - - # We don't want VAULT_LICENSE set when running Go tests, because that's - # not what developers have in their environments and it could break some - # tests; it would be like setting VAULT_TOKEN. However some non-Go - # CI commands, like the UI tests, shouldn't have to worry about licensing. - # So we set VAULT_LICENSE in CI, and here we unset it. Instead of - # VAULT_LICENSE, we populate VAULT_LICENSE_CI, so that tests which want - # an externally supplied license can opt-in to using it. - export VAULT_LICENSE_CI="$VAULT_LICENSE" - VAULT_LICENSE= - - # Create a docker network for our test container - if [ $USE_DOCKER == 1 ]; then - # Despite the fact that we're using a circleci image (thus getting the - # version they chose for the docker cli) and that we're specifying a - # docker version to use for the remote docker instances, we occasionally - # see "client version too new, max supported version 1.39" errors for - # reasons unclear. - export DOCKER_API_VERSION=1.39 - - TEST_DOCKER_NETWORK_NAME="${CIRCLE_WORKFLOW_JOB_ID}-${CIRCLE_NODE_INDEX}" - export TEST_DOCKER_NETWORK_ID=$(docker network list --quiet --no-trunc --filter="name=${TEST_DOCKER_NETWORK_NAME}") - if [ -z $TEST_DOCKER_NETWORK_ID ]; then - docker network prune -f - TEST_DOCKER_NETWORK_ID=$(docker network create "${TEST_DOCKER_NETWORK_NAME}") - fi - - - - # Start a docker test container to run the tests in - CONTAINER_ID="$(docker run -d \ - -e TEST_DOCKER_NETWORK_ID \ - -e GOPRIVATE \ - -e DOCKER_CERT_PATH \ - -e DOCKER_HOST \ - -e DOCKER_MACHINE_NAME \ - -e DOCKER_TLS_VERIFY \ - -e NO_PROXY \ - -e VAULT_TEST_LOG_DIR=<< parameters.log_dir >> \ - --network ${TEST_DOCKER_NETWORK_NAME} \ - $GO_IMAGE \ - tail -f /dev/null)" - mkdir workspace - echo ${CONTAINER_ID} > workspace/container_id - - # Hack: Docker permissions appear to have changed; let's explicitly - # add a new user/group with the correct host uid to the docker - # container, fixing all of these permissions issues correctly. We - # then have to run with this user consistently in the future. - # - # Notably, in this shell pipeline we see: - # uid=1001(circleci) gid=1002(circleci) groups=1002(circleci) - # - # but inside the docker image below, we see: - # uid=3434(circleci) gid=3434(circleci) groups=3434(circleci) - # - # See also: https://github.com/CircleCI-Public/cimg-base/issues/122 - export HOST_GID="$(id -g)" - export HOST_UID="$(id -u)" - export CONT_GID="$(docker exec ${CONTAINER_ID} sh -c 'id -g')" - export CONT_GNAME="$(docker exec ${CONTAINER_ID} sh -c 'id -g -n')" - export CONT_UID="$(docker exec ${CONTAINER_ID} sh -c 'id -u')" - if (( HOST_UID != CONT_UID )); then - # Only provision a group if necessary; otherwise reuse the - # existing one. - if (( HOST_GID != CONT_GID )); then - docker exec -e HOST_GID -e CONT_GNAME ${CONTAINER_ID} sh -c 'sudo groupmod -g $HOST_GID $CONT_GNAME' - fi - - docker exec -e CONT_GNAME -e HOST_UID ${CONTAINER_ID} sh -c 'sudo usermod -a -G $CONT_GNAME -u $HOST_UID circleci' - fi - - # Run tests - test -d << parameters.cache_dir >> && docker cp << parameters.cache_dir >> ${CONTAINER_ID}:/tmp/gocache - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' - docker cp . ${CONTAINER_ID}:/home/circleci/go/src/github.com/hashicorp/vault/ - docker cp $DOCKER_CERT_PATH/ ${CONTAINER_ID}:$DOCKER_CERT_PATH - - # Copy the downloaded modules inside the container. - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/pkg' - docker cp "$(go env GOPATH)/pkg/mod" ${CONTAINER_ID}:/home/circleci/go/pkg/mod - - docker exec -w /home/circleci/go/src/github.com/hashicorp/vault/ \ - -e CIRCLECI -e VAULT_CI_GO_TEST_RACE \ - -e GOCACHE=/tmp/gocache \ - -e GO_TAGS \ - -e GOPROXY="off" \ - -e VAULT_LICENSE_CI \ - -e GOARCH=<< parameters.arch >> \ - ${CONTAINER_ID} \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - << parameters.extra_flags >> \ - ${all_package_names} - else - GOARCH=<< parameters.arch >> \ - GOCACHE=<< parameters.cache_dir >> \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - << parameters.extra_flags >> \ - ${all_package_names} - fi - - - when: - condition: << parameters.use_docker >> - steps: - - run: - name: Copy test results - when: always - command: | - docker cp $(cat workspace/container_id):/home/circleci/go/src/github.com/hashicorp/vault/test-results . - docker cp $(cat workspace/container_id):/tmp/gocache << parameters.cache_dir >> - - when: - condition: << parameters.save_cache >> - steps: - - save_cache: - when: always - key: go-test-cache-date-v1-{{ checksum "/tmp/go-cache-key" }} - paths: - - << parameters.cache_dir >> diff --git a/.circleci/config/commands/setup-go.yml b/.circleci/config/commands/setup-go.yml deleted file mode 100644 index c773eea82c71..000000000000 --- a/.circleci/config/commands/setup-go.yml +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - ---- -description: > - Ensure the right version of Go is installed and set GOPATH to $HOME/go. -parameters: - GOPROXY: - description: > - Set GOPROXY. By default this is set to "off" meaning you have to have all modules pre-downloaded. - type: string - default: "off" - GOPRIVATE: - description: Set GOPRIVATE, defaults to github.com/hashicorp/* - type: string - default: github.com/hashicorp/* -steps: - - run: - name: Setup Go - command: | - GO_VERSION=$(cat .go-version) - [ -n "$GO_VERSION" ] || { echo "You must set GO_VERSION"; exit 1; } - # Install Go - cd ~ - curl -sSLO "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" - sudo rm -rf /usr/local/go - sudo tar -C /usr/local -xzf "go${GO_VERSION}.linux-amd64.tar.gz" - rm -f "go${GO_VERSION}.linux-amd64.tar.gz" - GOPATH="/home/circleci/go" - mkdir $GOPATH 2>/dev/null || { sudo mkdir $GOPATH && sudo chmod 777 $GOPATH; } - mkdir $GOPATH/bin 2>/dev/null || { sudo mkdir $GOPATH/bin && sudo chmod 777 $GOPATH/bin; } - echo "export GOPATH='$GOPATH'" >> "$BASH_ENV" - echo "export PATH='$PATH:$GOPATH/bin:/usr/local/go/bin'" >> "$BASH_ENV" - echo "export GOPROXY=<>" >> "$BASH_ENV" - echo "export GOPRIVATE=<>" >> "$BASH_ENV" - - echo "$ go version" - go version diff --git a/.circleci/config/executors/@executors.yml b/.circleci/config/executors/@executors.yml deleted file mode 100644 index 767276bd7b84..000000000000 --- a/.circleci/config/executors/@executors.yml +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 -references: - environment: &ENVIRONMENT - CIRCLECI_CLI_VERSION: 0.1.5546 # Pin CircleCI CLI to patch version (ex: 1.2.3) - GOTESTSUM_VERSION: 0.5.2 # Pin gotestsum to patch version (ex: 1.2.3) - GOFUMPT_VERSION: 0.3.1 # Pin gofumpt to patch version (ex: 1.2.3) - GO_TAGS: "" - GO_IMAGE: &GO_IMAGE "docker.mirror.hashicorp.services/cimg/go:1.20.1" -go-machine: - machine: - image: ubuntu-2004:2022.10.1 - environment: *ENVIRONMENT - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault -node: - docker: - - image: docker.mirror.hashicorp.services/circleci/node:14-browsers - environment: - # See https://git.io/vdao3 for details. - JOBS: 2 - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault -python: - docker: - - image: docker.mirror.hashicorp.services/python:3-alpine - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault -semgrep: - docker: - - image: docker.mirror.hashicorp.services/returntocorp/semgrep:0.113.0 - shell: /bin/sh - working_directory: /home/circleci/go/src/github.com/hashicorp/vault -docker-env-go-test-remote-docker: - resource_class: medium - docker: - - image: *GO_IMAGE - environment: *ENVIRONMENT - working_directory: /home/circleci/go/src/github.com/hashicorp/vault -docker-env-go-test: - resource_class: large - docker: - - image: *GO_IMAGE - environment: *ENVIRONMENT - working_directory: /home/circleci/go/src/github.com/hashicorp/vault -docker-env-go-test-race: - resource_class: xlarge - docker: - - image: *GO_IMAGE - environment: *ENVIRONMENT - working_directory: /home/circleci/go/src/github.com/hashicorp/vault diff --git a/.circleci/config/jobs/build-go-dev.yml b/.circleci/config/jobs/build-go-dev.yml deleted file mode 100644 index 0900c7f74e55..000000000000 --- a/.circleci/config/jobs/build-go-dev.yml +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -executor: go-machine -steps: - - checkout - - setup-go - - restore_go_mod_cache - - attach_workspace: - at: . - - run: - name: Build dev binary - command: | - # Move dev UI assets to expected location - rm -rf ./pkg - mkdir ./pkg - - # Build dev binary - make ci-bootstrap dev - - persist_to_workspace: - root: . - paths: - - bin diff --git a/.circleci/config/jobs/fmt.yml b/.circleci/config/jobs/fmt.yml deleted file mode 100644 index 0a13e0418052..000000000000 --- a/.circleci/config/jobs/fmt.yml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -description: Ensure go formatting is correct. -executor: go-machine -steps: - - checkout - # Setup Go enabling the proxy for downloading modules. - - setup-go: - GOPROXY: https://proxy.golang.org,direct - - run: - name: make fmt - command: | - echo "Using gofumpt version ${GOFUMPT_VERSION}" - go install "mvdan.cc/gofumpt@v${GOFUMPT_VERSION}" - make fmt - if ! git diff --exit-code; then - echo "Code has formatting errors. Run 'make fmt' to fix" - exit 1 - fi diff --git a/.circleci/config/jobs/install-ui-dependencies.yml b/.circleci/config/jobs/install-ui-dependencies.yml deleted file mode 100644 index 48e14380b6c9..000000000000 --- a/.circleci/config/jobs/install-ui-dependencies.yml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -executor: node -steps: - - checkout - - restore_yarn_cache - - run: - name: Install UI dependencies - command: | - cd ui - yarn install - npm rebuild node-sass - - save_yarn_cache diff --git a/.circleci/config/jobs/pre-flight-checks.yml b/.circleci/config/jobs/pre-flight-checks.yml deleted file mode 100644 index 1b309b4868e0..000000000000 --- a/.circleci/config/jobs/pre-flight-checks.yml +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -description: Ensure nothing obvious is broken, and pre-cache Go modules. -executor: go-machine -steps: - - checkout - # Setup Go enabling the proxy for downloading modules. - - setup-go: - GOPROXY: https://proxy.golang.org,direct - - run: - name: Install CircleCI CLI - environment: - ARCH: linux_amd64 - BASE: https://github.com/CircleCI-Public/circleci-cli/releases/download - command: | - export CCI_PATH=/tmp/circleci-cli/$CIRCLECI_CLI_VERSION - mkdir -p $CCI_PATH - NAME=circleci-cli_${CIRCLECI_CLI_VERSION}_${ARCH} - URL=$BASE/v${CIRCLECI_CLI_VERSION}/${NAME}.tar.gz - curl -sSL $URL \ - | tar --overwrite --strip-components=1 -xz -C $CCI_PATH "${NAME}/circleci" - # Add circleci to the path for subsequent steps. - echo "export PATH=$CCI_PATH:\$PATH" >> $BASH_ENV - # Done, print some debug info. - set -x - . $BASH_ENV - which circleci - circleci version - - run: - name: Verify CircleCI - command: | - set -x - . $BASH_ENV - make ci-verify - - configure-git - - refresh_go_mod_cache diff --git a/.circleci/config/jobs/semgrep.yml b/.circleci/config/jobs/semgrep.yml deleted file mode 100644 index 45529bd95f60..000000000000 --- a/.circleci/config/jobs/semgrep.yml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - ---- -executor: semgrep -steps: - - checkout - - attach_workspace: - at: . - - run: - name: Run Semgrep Rules - command: | - # Alpine images can't run the make file due to a bash requirement. Run - # semgrep explicitly here. - export PATH="$HOME/.local/bin:$PATH" - echo -n 'Semgrep Version: ' - semgrep --version - semgrep --error --include '*.go' --exclude 'vendor' -f tools/semgrep/ci . diff --git a/.circleci/config/jobs/test-go-nightly.yml b/.circleci/config/jobs/test-go-nightly.yml deleted file mode 100644 index 3b84dc63e5a7..000000000000 --- a/.circleci/config/jobs/test-go-nightly.yml +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -executor: go-machine -steps: - - checkout - - setup-go - - restore_go_mod_cache - - go_test: - log_dir: "/tmp/testlogs" - save_cache: true - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: "/tmp/testlogs" diff --git a/.circleci/config/jobs/test-go-race-remote-docker.yml b/.circleci/config/jobs/test-go-race-remote-docker.yml deleted file mode 100644 index 3aafe4487963..000000000000 --- a/.circleci/config/jobs/test-go-race-remote-docker.yml +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -executor: docker-env-go-test-remote-docker -parallelism: 8 -steps: - - exit-if-ui-or-docs-branch - - checkout - - setup_remote_docker: - version: 20.10.17 - docker_layer_caching: true - - go_test: - extra_flags: "-race" - log_dir: "/tmp/testlogs" - use_docker: true - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: "/tmp/testlogs" diff --git a/.circleci/config/jobs/test-go-race.yml b/.circleci/config/jobs/test-go-race.yml deleted file mode 100644 index 82b358d28ed1..000000000000 --- a/.circleci/config/jobs/test-go-race.yml +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -executor: docker-env-go-test-race -parallelism: 8 -steps: - - exit-if-ui-or-docs-branch - - checkout - - go_test: - extra_flags: "-race" - log_dir: "/tmp/testlogs" - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: "/tmp/testlogs" diff --git a/.circleci/config/jobs/test-go-remote-docker.yml b/.circleci/config/jobs/test-go-remote-docker.yml deleted file mode 100644 index a33473f5ebdc..000000000000 --- a/.circleci/config/jobs/test-go-remote-docker.yml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -executor: docker-env-go-test-remote-docker -parallelism: 8 -steps: - - exit-if-ui-or-docs-branch - - checkout - - setup_remote_docker: - version: 20.10.17 - docker_layer_caching: true - - go_test: - log_dir: "/tmp/testlogs" - use_docker: true - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: "/tmp/testlogs" diff --git a/.circleci/config/jobs/test-go.yml b/.circleci/config/jobs/test-go.yml deleted file mode 100644 index b97f2c7b4251..000000000000 --- a/.circleci/config/jobs/test-go.yml +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -executor: docker-env-go-test -parallelism: 8 -steps: - - exit-if-ui-or-docs-branch - - checkout - - go_test: - log_dir: "/tmp/testlogs" - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: "/tmp/testlogs" diff --git a/.circleci/config/jobs/test-ui.yml b/.circleci/config/jobs/test-ui.yml deleted file mode 100644 index 33c3706cd09a..000000000000 --- a/.circleci/config/jobs/test-ui.yml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -executor: node -resource_class: xlarge -steps: - - exit-if-branch-does-not-need-test-ui - - checkout - - restore_yarn_cache - - attach_workspace: - at: . - - run: - name: Test UI - command: | - # Add ./bin to the PATH so vault binary can be run by Ember tests - export PATH="${PWD}/bin:${PATH}" - - # Run Ember tests - cd ui - mkdir -p test-results/qunit - yarn test:oss - - store_artifacts: - path: ui/test-results - - store_test_results: - path: ui/test-results diff --git a/.circleci/config/workflows/ci.yml b/.circleci/config/workflows/ci.yml deleted file mode 100644 index 433fc7298d51..000000000000 --- a/.circleci/config/workflows/ci.yml +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -jobs: - - pre-flight-checks - - fmt - - install-ui-dependencies: - requires: - - pre-flight-checks - - build-go-dev: - requires: - - pre-flight-checks - - test-ui: - requires: - - install-ui-dependencies - - build-go-dev - # Only main, UI, release and merge branches need to run UI tests. - # We don't filter here however because test-ui is configured in github as - # required so it must run, instead we short-circuit within test-ui. - - test-go: - requires: - - pre-flight-checks - # We don't filter here because this is a required CI check; - # instead we short-circuit within the test command so it ends quickly. - - test-go-remote-docker: - requires: - - pre-flight-checks - # We don't filter here because this is a required CI check; - # instead we short-circuit within the test command so it ends quickly. - - test-go-race: - requires: - - pre-flight-checks - - test-go-race-remote-docker: - requires: - - pre-flight-checks - - semgrep: - requires: - - pre-flight-checks diff --git a/.copywrite.hcl b/.copywrite.hcl index df52f5c5a299..de148843b44e 100644 --- a/.copywrite.hcl +++ b/.copywrite.hcl @@ -1,15 +1,16 @@ schema_version = 1 project { - license = "MPL-2.0" - copyright_year = 2015 + license = "BUSL-1.1" + copyright_year = 2024 # (OPTIONAL) A list of globs that should not have copyright/license headers. # Supports doublestar glob patterns for more flexibility in defining which # files or folders should be ignored header_ignore = [ - "builtin/credential/aws/pkcs7/**", + "helper/pkcs7/**", "ui/node_modules/**", "enos/modules/k8s_deploy_vault/raft-config.hcl", + "plugins/database/postgresql/scram/**", ] } diff --git a/.github/.secret_scanning.yml b/.github/.secret_scanning.yml new file mode 100644 index 000000000000..470059630471 --- /dev/null +++ b/.github/.secret_scanning.yml @@ -0,0 +1,7 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +paths-ignore: + - '**/*.mdx' # any file ending in .mdx + - '**/*.md' # any file ending in .md + - '**/*_test.go' # any file ending in _test.go diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 23958d835280..d313527dcd8b 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 contact_links: - name: Ask a question diff --git a/.github/ISSUE_TEMPLATE/plugin-submission.md b/.github/ISSUE_TEMPLATE/plugin-submission.md index 8bed55a04c66..54becc1c9e79 100644 --- a/.github/ISSUE_TEMPLATE/plugin-submission.md +++ b/.github/ISSUE_TEMPLATE/plugin-submission.md @@ -7,7 +7,7 @@ assignees: '' --- -Please provide details for the plugin to be listed. All fields are required for a submission to be included in the [Plugin Portal](https://www.vaultproject.io/docs/plugin-portal) page. +Please provide details for the plugin to be listed. All fields are required for a submission to be included in the [Vault Integrations](https://developer.hashicorp.com/vault/integrations) page. **Plugin Information** Name as it would appear listed: diff --git a/.github/actionlint.yaml b/.github/actionlint.yaml new file mode 100644 index 000000000000..281951c170bc --- /dev/null +++ b/.github/actionlint.yaml @@ -0,0 +1,19 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +self-hosted-runner: + # Labels of self-hosted runner in array of string + labels: + - small + - medium + - large + - ondemand + - disk_gb=64 + - os=linux + - type=m5.2xlarge + - type=c6a.xlarge + - type=c6a.4xlarge + - ubuntu-20.04 + - custom-linux-small-vault-latest + - custom-linux-medium-vault-latest + - custom-linux-xl-vault-latest diff --git a/.github/actions/build-vault/action.yml b/.github/actions/build-vault/action.yml new file mode 100644 index 000000000000..5e2641344208 --- /dev/null +++ b/.github/actions/build-vault/action.yml @@ -0,0 +1,202 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Build Vault +description: | + Build various Vault binaries and package them into Zip bundles, Deb and RPM packages, + and various container images. Upload the resulting artifacts to Github Actions artifact storage. + This composite action is used across both CE and Ent, thus is should maintain compatibility with + both repositories. + +inputs: + github-token: + type: string + description: An elevated Github token to access private Go modules if necessary. + default: "" + cgo-enabled: + type: number + description: Enable or disable CGO during the build. + default: 0 + create-docker-container: + type: boolean + description: Package the binary into a Docker/AWS container. + default: true + create-redhat-container: + type: boolean + description: Package the binary into a Redhat container. + default: false + create-packages: + type: boolean + description: Package the binaries into deb and rpm formats. + default: true + goos: + type: string + description: The Go GOOS value environment variable to set during the build. + goarch: + type: string + description: The Go GOARCH value environment variable to set during the build. + goarm: + type: string + description: The Go GOARM value environment variable to set during the build. + default: "" + goexperiment: + type: string + description: Which Go experiments to enable. + default: "" + go-tags: + type: string + description: A comma separated list of tags to pass to the Go compiler during build. + default: "" + package-name: + type: string + description: The name to use for the linux packages. + default: ${{ github.event.repository.name }} + vault-binary-name: + type: string + description: The name of the vault binary. + default: vault + vault-edition: + type: string + description: The edition of vault to build. + vault-version: + type: string + description: The version metadata to inject into the build via the linker. + web-ui-cache-key: + type: string + description: The cache key for restoring the pre-built web UI artifact. + +outputs: + vault-binary-path: + description: The location of the built binary. + value: ${{ steps.containerize.outputs.vault-binary-path != '' && steps.containerize.outputs.vault-binary-path || steps.metadata.outputs.binary-path }} + +runs: + using: composite + steps: + - name: Ensure zstd is available for actions/cache + # actions/cache restores based on cache key and "cache version", the former is unique to the + # build job or web UI, the latter is a hash which is based on the runner OS, the paths being + # cached, and the program used to compress it. Most of our workflows will use zstd to compress + # the cached artifact so we have to have it around for our machines to get both a version match + # and to decompress it. Most runners include zstd by default but there are exception like + # our Ubuntu 20.04 compatibility runners which do not. + shell: bash + run: which zstd || (sudo apt update && sudo apt install -y zstd) + - uses: ./.github/actions/set-up-go + with: + github-token: ${{ inputs.github-token }} + - uses: ./.github/actions/install-external-tools + - if: inputs.vault-edition != 'ce' + name: Configure Git + shell: bash + run: git config --global url."https://${{ inputs.github-token }}:@github.com".insteadOf "https://github.com" + - name: Restore UI from cache + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + with: + # Restore the UI asset from the UI build workflow. Never use a partial restore key. + enableCrossOsArchive: true + fail-on-cache-miss: true + path: http/web_ui + key: ${{ inputs.web-ui-cache-key }} + - name: Metadata + id: metadata + env: + # We need these for the artifact basename helper + GOARCH: ${{ inputs.goarch }} + GOOS: ${{ inputs.goos }} + VERSION: ${{ inputs.vault-version }} + VERSION_METADATA: ${{ inputs.vault-edition != 'ce' && inputs.vault-edition || '' }} + shell: bash + run: | + if [[ '${{ inputs.vault-edition }}' =~ 'ce' ]]; then + build_step_name='Vault ${{ inputs.goos }} ${{ inputs.goarch }} v${{ inputs.vault-version }}' + package_version='${{ inputs.vault-version }}' + else + build_step_name='Vault ${{ inputs.goos }} ${{ inputs.goarch }} v${{ inputs.vault-version }}+${{ inputs.vault-edition }}' + package_version='${{ inputs.vault-version }}+ent' # this should always be +ent here regardless of enterprise edition + fi + { + echo "artifact-basename=$(make ci-get-artifact-basename)" + echo "binary-path=dist/${{ inputs.vault-binary-name }}" + echo "build-step-name=${build_step_name}" + echo "package-version=${package_version}" + } | tee -a "$GITHUB_OUTPUT" + - name: ${{ steps.metadata.outputs.build-step-name }} + env: + CGO_ENABLED: ${{ inputs.cgo-enabled }} + GO_TAGS: ${{ inputs.go-tags }} + GOARCH: ${{ inputs.goarch }} + GOARM: ${{ inputs.goarm }} + GOOS: ${{ inputs.goos }} + GOEXPERIMENT: ${{ inputs.goexperiment }} + GOPRIVATE: github.com/hashicorp + VERSION: ${{ inputs.version }} + VERSION_METADATA: ${{ inputs.vault-edition != 'ce' && inputs.vault-edition || '' }} + shell: bash + run: make ci-build + - if: inputs.vault-edition != 'ce' + shell: bash + run: make ci-prepare-ent-legal + - if: inputs.vault-edition == 'ce' + shell: bash + run: make ci-prepare-ce-legal + - name: Bundle Vault + env: + BUNDLE_PATH: out/${{ steps.metadata.outputs.artifact-basename }}.zip + shell: bash + run: make ci-bundle + - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 + with: + name: ${{ steps.metadata.outputs.artifact-basename }}.zip + path: out/${{ steps.metadata.outputs.artifact-basename }}.zip + if-no-files-found: error + - if: inputs.create-packages == 'true' + uses: hashicorp/actions-packaging-linux@33f7d23b14f24e6a7b7d9948cb7f5caca2045ee3 + with: + name: ${{ inputs.package-name }} + description: Vault is a tool for secrets management, encryption as a service, and privileged access management. + arch: ${{ inputs.goarch }} + version: ${{ steps.metadata.outputs.package-version }} + maintainer: HashiCorp + homepage: https://github.com/hashicorp/vault + license: BUSL-1.1 + binary: ${{ steps.metadata.outputs.binary-path }} + deb_depends: openssl + rpm_depends: openssl + config_dir: .release/linux/package/ + preinstall: .release/linux/preinst + postinstall: .release/linux/postinst + postremove: .release/linux/postrm + - if: inputs.create-packages == 'true' + id: package-files + name: Determine package file names + shell: bash + run: | + { + echo "rpm-files=$(basename out/*.rpm)" + echo "deb-files=$(basename out/*.deb)" + } | tee -a "$GITHUB_OUTPUT" + - if: inputs.create-packages == 'true' + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 + with: + name: ${{ steps.package-files.outputs.rpm-files }} + path: out/${{ steps.package-files.outputs.rpm-files }} + if-no-files-found: error + - if: inputs.create-packages == 'true' + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 + with: + name: ${{ steps.package-files.outputs.deb-files }} + path: out/${{ steps.package-files.outputs.deb-files }} + if-no-files-found: error + # Do our containerization last as it will move the binary location if we create containers. + - uses: ./.github/actions/containerize + id: containerize + with: + docker: ${{ inputs.create-docker-container }} + redhat: ${{ inputs.create-redhat-container }} + goarch: ${{ inputs.goarch }} + goos: ${{ inputs.goos }} + vault-binary-path: ${{ steps.metadata.outputs.binary-path }} + vault-edition: ${{ inputs.vault-edition }} + vault-version: ${{ inputs.vault-version }} diff --git a/.github/actions/changed-files/action.yml b/.github/actions/changed-files/action.yml new file mode 100644 index 000000000000..d36c450877f6 --- /dev/null +++ b/.github/actions/changed-files/action.yml @@ -0,0 +1,73 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Determine what files changed between two git referecnes. +description: | + Determine what files have changed between two git references. If the github.event_type is + pull_request we'll compare the github.base_ref (merge target) and pull request head SHA. + For other event types we'll gather the changed files from the most recent commit. This allows + us to support PR and merge workflows. + +outputs: + app-changed: + description: Whether or not the vault Go app was modified. + value: ${{ steps.changed-files.outputs.app-changed }} + docs-changed: + description: Whether or not the documentation was modified. + value: ${{ steps.changed-files.outputs.docs-changed }} + ui-changed: + description: Whether or not the web UI was modified. + value: ${{ steps.changed-files.outputs.ui-changed }} + files: + description: All of the file names that changed. + value: ${{ steps.changed-files.outputs.files }} + +runs: + using: composite + steps: + - id: ref + shell: bash + name: ref + run: | + # Determine our desired checkout ref. + # + # * If the trigger event is pull_request we will default to a magical merge SHA that Github + # creates. This SHA is the product of what merging our PR into the merge target branch at + # at the point in time when we created the PR. When you push a change to a PR branch + # Github updates this branch if it can. When you rebase a PR it updates this branch. + # + # * If the trigger event is pull_request and a `checkout-head` tag is present or the + # checkout-head input is set, we'll use HEAD of the PR branch instead of the magical + # merge SHA. + # + # * If the trigger event is a push (merge) then we'll get the latest commit that was pushed. + # + # * For anything any other event type we'll default to whatever is default in Github. + if [ '${{ github.event_name }}' = 'pull_request' ]; then + checkout_ref='${{ github.event.pull_request.head.sha }}' + elif [ '${{ github.event_name }}' = 'push' ]; then + # Our checkout ref for any other event type should default to the github ref. + checkout_ref='${{ github.event.after && github.event.after || github.event.push.after }}' + else + checkout_ref='${{ github.ref }}' + fi + echo "ref=${checkout_ref}" | tee -a "$GITHUB_OUTPUT" + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + repository: ${{ github.repository }} + path: "changed-files" + # The fetch-depth could probably be optimized at some point. It's currently set to zero to + # ensure that we have a successfull diff, regardless of how many commits might be present + # present between the two references we're comparing. It would be nice to change this + # depending on the number of commits by using the push.commits and/or pull_request.commits + # payload fields, however, they have different behavior and limitations. For now we'll do + # the slow but sure thing of getting the whole repository. + fetch-depth: 0 + ref: ${{ steps.ref.outputs.ref }} + - id: changed-files + name: changed-files + # This script writes output values to $GITHUB_OUTPUT and STDOUT + shell: bash + run: ./.github/scripts/changed-files.sh ${{ github.event_name }} ${{ github.ref_name }} ${{ github.base_ref }} + working-directory: changed-files diff --git a/.github/actions/checkout/action.yml b/.github/actions/checkout/action.yml new file mode 100644 index 000000000000..8b92628f9ba2 --- /dev/null +++ b/.github/actions/checkout/action.yml @@ -0,0 +1,77 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Check out the correct git reference. +description: | + Determine and checkout the correct Git reference depending on the actions event type and tags. + +inputs: + checkout-head: + description: | + Whether or not to check out HEAD on a pull request. This can also be triggered with a + `checkout-head` tag. + default: 'false' + path: + description: Relative path to $GITHUB_WORKSPACE to check out to + default: "" + +outputs: + ref: + description: The git reference that was checked out. + value: ${{ steps.ref.outputs.ref }} + depth: + description: The fetch depth that was checked out. + value: ${{ steps.ref.outputs.ref }} + +runs: + using: composite + steps: + - id: ref + shell: bash + run: | + # Determine our desired checkout ref and fetch depth. Depending our our workflow event + # trigger, inputs, and tags, we'll check out different references at different depths. + # + # * If the trigger event is a pull request we will default to a magical merge SHA that Github + # creates. Essentially, this SHA is the product of merging our PR into the merge target + # branch at some point in time. When you push a change to a PR branch Github updates this + # branch if it can. + # * If the trigger event is a pull request and a `checkout-head` tag is present or the + # checkout-head input is set, we'll use HEAD of the PR branch instead of the magical + # merge SHA. + # * If the trigger event is a push (merge) then we'll get the latest commit that was pushed. + # * For anything any other event type we'll default to whatever is default in Github. + # + # Our fetch depth will varies depending on what our chosen SHA is. We normally want to do + # the most shallow clone possible for speed, but we also need to support getting history + # for determining what files have changed, etc. We'll always check out one level deep for + # merges or standard pull requests. If checking out HEAD is requested we'll fetch a deeper + # history because we need all commits on the branch. + # + if [ '${{ github.event_name }}' = 'pull_request' ]; then + if [ '${{ contains(github.event.pull_request.labels.*.name, 'checkout-head') || inputs.checkout-head == 'true' }}' = 'true' ]; then + checkout_ref='${{ github.event.pull_request.head.sha }}' + fetch_depth=0 + else + checkout_ref='${{ github.ref }}' + fetch_depth=1 + fi + elif [ '${{ github.event_name }}' = 'push' ]; then + # Our checkout ref for any other event type should default to the github ref. + checkout_ref='${{ github.event.push.after }}' + fetch_depth=1 + else + checkout_ref='${{ github.ref }}' + fetch_depth=0 + fi + + { + echo "ref=${checkout_ref}" + echo "depth=${fetch_depth}" + } | tee -a "$GITHUB_OUTPUT" + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + path: ${{ inputs.path }} + fetch-depth: ${{ steps.ref.outputs.depth }} + ref: ${{ steps.ref.outputs.ref }} diff --git a/.github/actions/containerize/action.yml b/.github/actions/containerize/action.yml new file mode 100644 index 000000000000..c0809d3afd6f --- /dev/null +++ b/.github/actions/containerize/action.yml @@ -0,0 +1,123 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Containerize Binary +description: | + Containerize vault binaries and annotate them with the correct registry tags. Artifacts will be + uploaded to the Github artifact store. This action is used for both CE and Ent and thus needs to + stay compatible for both repository contexts. + +inputs: + docker: + description: | + Package the binary into a Docker container suitable for the Docker and AWS registries. We'll + automatically determine the correct tags and target depending on the vault edition. + default: 'true' + goarch: + description: The Go GOARCH value environment variable to set during the build. + goos: + description: The Go GOOS value environment variable to set during the build. + redhat: + description: Package the binary into a UBI container suitable for the Redhat Quay registry. + default: 'false' + vault-binary-path: + description: The path to the vault binary. + default: dist/vault + vault-edition: + description: The edition of vault to build. + default: ce + vault-version: + description: The vault version. + +outputs: + vault-binary-path: + description: The location of the binary after containerization + value: ${{ inputs.vault-binary-path }} + +runs: + using: composite + steps: + - id: vars + shell: bash + run: | + case '${{ inputs.vault-edition }}' in + "ce") + container_version='${{ inputs.vault-version }}' + docker_container_tags='docker.io/hashicorp/vault:${{ inputs.vault-version }} public.ecr.aws/hashicorp/vault:${{ inputs.vault-version }}' + docker_container_target='default' + redhat_container_tags='quay.io/redhat-isv-containers/5f89bb5e0b94cf64cfeb500a:${{ inputs.vault-version }}-ubi' + redhat_container_target='ubi' + ;; + "ent") + container_version='${{ inputs.vault-version }}+${{ inputs.vault-edition }}' + docker_container_tags='docker.io/hashicorp/vault-enterprise:${{ inputs.vault-version }}-${{ inputs.vault-edition}} public.ecr.aws/hashicorp/vault-enterprise:${{ inputs.vault-version }}-${{ inputs.vault-edition }}' + docker_container_target='default' + redhat_container_tags='quay.io/redhat-isv-containers/5f89bb9242e382c85087dce2:${{ inputs.vault-version }}-${{ inputs.vault-edition }}-ubi' + redhat_container_target='ubi' + ;; + "ent.hsm") + container_version='${{ inputs.vault-version }}+${{ inputs.vault-edition }}' + docker_container_tags='docker.io/hashicorp/vault-enterprise:${{ inputs.vault-version }}-${{ inputs.vault-edition}} public.ecr.aws/hashicorp/vault-enterprise:${{ inputs.vault-version }}-${{ inputs.vault-edition }}' + docker_container_target='ubi-hsm' + redhat_container_tags='quay.io/redhat-isv-containers/5f89bb9242e382c85087dce2:${{ inputs.vault-version }}-${{ inputs.vault-edition }}-ubi' + redhat_container_target='ubi-hsm' + ;; + "ent.hsm.fips1402") + container_version='${{ inputs.vault-version }}+${{ inputs.vault-edition }}' + docker_container_tags='docker.io/hashicorp/vault-enterprise:${{ inputs.vault-version }}-${{ inputs.vault-edition}} public.ecr.aws/hashicorp/vault-enterprise:${{ inputs.vault-version }}-${{ inputs.vault-edition }}' + docker_container_target='ubi-hsm-fips' + redhat_container_tags='quay.io/redhat-isv-containers/5f89bb9242e382c85087dce2:${{ inputs.vault-version }}-${{ inputs.vault-edition }}-ubi' + redhat_container_target='ubi-hsm-fips' + ;; + "ent.fips1402") + # NOTE: For compatibility we still publish the ent.fips1402 containers to different + # namespaces. All ent, ent.hsm, and ent.hsm.fips1402 containers are released in the + # enterprise namespaces. After we've updated the upstream docker action to support + # multiple tags we can start to tag images with both namespaces, publish to both, and + # eventually sunset the fips1402 specific namespaces. + container_version='${{ inputs.vault-version }}+${{ inputs.vault-edition }}' + docker_container_tags='docker.io/hashicorp/vault-enterprise-fips:${{ inputs.vault-version }}-${{ inputs.vault-edition }} public.ecr.aws/hashicorp/vault-enterprise-fips:${{ inputs.vault-version }}-${{ inputs.vault-edition }}' + docker_container_target='ubi-fips' + redhat_container_tags='quay.io/redhat-isv-containers/6283f645d02c6b16d9caeb8e:${{ inputs.vault-version }}-${{ inputs.vault-edition }}-ubi' + redhat_container_target='ubi-fips' + ;; + *) + echo "Cannot generate container tags for unknown vault edition: ${{ inputs.vault-edition }}" 2>&1 + exit 1 + ;; + esac + { + echo "container-version=${container_version}" + echo "docker-container-tags=${docker_container_tags}" + echo "docker-container-target=${docker_container_target}" + echo "redhat-container-tags=${redhat_container_tags}" + echo "redhat-container-target=${redhat_container_target}" + echo "revision=$(make ci-get-revision)" + } | tee -a "$GITHUB_OUTPUT" + - if: inputs.docker == 'true' || inputs.redhat == 'true' + id: copy-binary + shell: bash + run: | + dest_path='dist/${{ inputs.goos }}/${{ inputs.goarch }}/vault' + dest_dir=$(dirname "$dest_path") + [[ ! -d "$dest_dir" ]] && mkdir -p "$dest_dir" + [[ ! -f "$dest_path" ]] && cp ${{ inputs.vault-binary-path }} "${dest_path}" + - if: inputs.docker == 'true' + uses: hashicorp/actions-docker-build@f22d5ac7d36868afaa4be1cc1203ec1b5865cadd + with: + arch: ${{ inputs.goarch }} + do_zip_extract_step: 'false' # Don't download and extract an already present binary + target: ${{ steps.vars.outputs.docker-container-target }} + tags: ${{ steps.vars.outputs.docker-container-tags }} + revision: ${{ steps.vars.outputs.revision }} + version: ${{ steps.vars.outputs.container-version }} + - if: inputs.redhat == 'true' + uses: hashicorp/actions-docker-build@f22d5ac7d36868afaa4be1cc1203ec1b5865cadd + with: + arch: ${{ inputs.goarch }} + do_zip_extract_step: 'false' # Don't download and extract an already present binary + redhat_tag: ${{ steps.vars.outputs.redhat-container-tags }} + target: ${{ steps.vars.outputs.redhat-container-target }} + revision: ${{ steps.vars.outputs.revision }} + version: ${{ steps.vars.outputs.container-version }} diff --git a/.github/actions/install-external-tools/action.yml b/.github/actions/install-external-tools/action.yml new file mode 100644 index 000000000000..1357618ce350 --- /dev/null +++ b/.github/actions/install-external-tools/action.yml @@ -0,0 +1,36 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Install external tools for CI +description: Install external tools CI + +# When possible, prefer installing pre-built external tools for speed. This allows us to avoid +# downloading modules and compiling external tools on CI runners. + +runs: + using: composite + steps: + - uses: ./.github/actions/set-up-buf + with: + version: v1.25.0 # This should match the version in tools/tool.sh + - uses: ./.github/actions/set-up-gofumpt + - uses: ./.github/actions/set-up-gosimports + - uses: ./.github/actions/set-up-gotestsum + - uses: ./.github/actions/set-up-misspell + - uses: ./.github/actions/set-up-shfmt + - uses: ./.github/actions/set-up-staticcheck + # We assume that the Go toolchain will be managed by the caller workflow so we don't set one + # up here. + - run: ./.github/scripts/retry-command.sh go install google.golang.org/protobuf/cmd/protoc-gen-go@latest + shell: bash + - run: ./.github/scripts/retry-command.sh go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.4.0 + shell: bash + - run: ./.github/scripts/retry-command.sh go install github.com/favadi/protoc-go-inject-tag@latest + shell: bash + - run: ./.github/scripts/retry-command.sh go install golang.org/x/tools/cmd/goimports@latest + shell: bash + - run: ./.github/scripts/retry-command.sh go install github.com/golangci/revgrep/cmd/revgrep@latest + shell: bash + - run: ./.github/scripts/retry-command.sh go install github.com/loggerhead/enumer@latest + shell: bash diff --git a/.github/actions/metadata/action.yml b/.github/actions/metadata/action.yml new file mode 100644 index 000000000000..f88c7a305aa4 --- /dev/null +++ b/.github/actions/metadata/action.yml @@ -0,0 +1,157 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Gather and export useful workflow metadata information. +description: | + Gather and export metadata about the repository, Github, and any other variable information we + might want for variables or flow control in our various workflows. We centralize it here so as + to have a single point of truth. This workflow also handles checking out the correct Git reference + depending on workflow trigger and tags. This workflow is used in both CE and Ent and thus needs + to maintain compatibility in both execution contexts. + +inputs: + vault-version: + description: | + The version of vault from hashicorp/action-set-product-version. If set we'll utilize this + base version of vault to output complex vault version metadata. If unset those outputs will + not be populated. + default: "" + +outputs: + compute-build: + description: A JSON encoded "runs-on" for App build worfkflows. + value: ${{ steps.workflow-metadata.outputs.compute-build }} + compute-build-compat: + description: A JSON encoded "runs-on" for App build workflows that need an older glibc to link against. + value: ${{ steps.workflow-metadata.outputs.compute-build-compat }} + compute-build-ui: + description: A JSON encoded "runs-on" for web UI build workflows. + value: ${{ steps.workflow-metadata.outputs.compute-build-ui }} + compute-test-go: + description: A JSON encoded "runs-on" for Go test workflows. + value: ${{ steps.workflow-metadata.outputs.compute-test-go }} + compute-test-ui: + description: A JSON encoded "runs-on" for web UI test workflows. + value: ${{ steps.workflow-metadata.outputs.compute-test-ui }} + compute-small: + description: A JSON encoded "runs-on" workflows that don't require optimized runners for resource usage. + value: ${{ steps.workflow-metadata.outputs.compute-small }} + go-tags: + description: The minimal set of Go tags required to build the correct edition of Vault. + value: ${{ steps.workflow-metadata.outputs.go-tags }} + is-draft: + description: Whether or not the workflow is executing in the context of a pull request draft. + value: ${{ steps.workflow-metadata.outputs.is-draft }} + is-enterprise: + description: Whether or not the workflow is executing in the context of Vault enterprise. + value: ${{ steps.workflow-metadata.outputs.is-enterprise }} + is-fork: + description: Whether or not the workflow is being triggered on a pull request that is a fork. + value: ${{ steps.workflow-metadata.outputs.is-fork }} + labels: + description: | + A JSON encoded array of pull request labels names associated with a commit SHA. If the workflow + is triggerd by a pull_request event then we'll get the label names of the pull request. If + it's triggered by any other event type we'll search for a pull request associated with the + commit SHA and return its label names. + value: ${{ steps.workflow-metadata.outputs.labels }} + vault-build-date: + description: The most recent Git commit date. + value: ${{ steps.vault-metadata.outputs.build-date }} + vault-binary-name: + description: The name of the Vault binary. + value: vault + vault-revision: + description: The most recent Git commit SHA. + value: ${{ steps.vault-metadata.outputs.vault-revision }} + vault-version: + description: The version of vault. + value: ${{ inputs.vault-version }} + vault-version-metadata: + description: The version of vault includiting edition and other metadata. + value: ${{ steps.workflow-metadata.outputs.vault-version-metadata }} + vault-version-package: + description: The version of vault formatted for Linux distro packages. + value: ${{ steps.vault-metadata.outputs.vault-version-package }} + workflow-trigger: + description: The github event type that triggered the workflow. + value: ${{ steps.workflow-metadata.outputs.workflow-trigger }} + +runs: + using: composite + steps: + - if: inputs.vault-version != '' + id: vault-metadata + name: vault-metadata + env: + VAULT_VERSION: ${{ inputs.vault-version }} + shell: bash + run: | + { + echo "build-date=$(make ci-get-date)" + echo "vault-revision=$(make ci-get-revision)" + echo "vault-version-package=$(make ci-get-version-package)" + } | tee -a "$GITHUB_OUTPUT" + - id: workflow-metadata + name: workflow-metadata + shell: bash + env: + GH_TOKEN: ${{ github.token }} + run: | + if [ '${{ github.event_name }}' = 'pull_request' ]; then + is_draft='${{ github.event.pull_request.draft }}' + + # Determine our pull request labels. We specifically look them up via the pulls API + # because at some point they stopped being reliable in the + # github.event.pull_request.labels.*.name context. + + labels=$(gh api "/repos/${{ github.repository }}/issues/${{ github.event.number }}/labels" | jq -erc '. | map(.name)') + else + is_draft='false' + + # Look up the labels for the pull request that is associated with the last commit. If + # there are none set it as a JSON encoded empty array. + if pr_number=$(gh api "/repos/${{ github.repository }}/commits/${{ github.ref }}/pulls" | jq -erc '.[0].number'); then + if ! labels=$(gh api "/repos/${{ github.repository }}/issues/${pr_number}/labels" | jq -erc '. | map(.name)'); then + labels='[]' + fi + else + labels='[]' + fi + fi + + { + echo "is-draft=${is_draft}" + echo 'is-fork=${{ github.event.pull_request.head.repo.fork && 'true' || 'false' }}' + echo "labels=${labels}" + echo "workflow-trigger=${{ github.event_name }}" + } | tee -a "$GITHUB_OUTPUT" + + # Set CE and Ent specific workflow metadata + is_enterprise='${{ contains(github.repository, 'vault-enterprise') }}' + if [ "$is_enterprise" = 'true' ]; then + { + echo 'compute-build=["self-hosted","ondemand","os=linux","disk_gb=64","type=c6a.4xlarge"]' + echo 'compute-build-compat=["self-hosted","ubuntu-20.04"]' # for older glibc compatibility, m6a.4xlarge + echo 'compute-build-ui=["self-hosted","ondemand","os=linux", "disk_gb=64", "type=c6a.2xlarge"]' + echo 'compute-test-go=["self-hosted","ondemand","os=linux","disk_gb=64","type=c6a.2xlarge"]' + echo 'compute-test-ui=["self-hosted","ondemand","os=linux","type=m6a.2xlarge"]' + echo 'compute-small=["self-hosted","linux","small"]' + echo 'go-tags=ent,enterprise' + echo 'is-enterprise=true' + echo 'vault-version-metadata=${{ inputs.vault-version }}+ent' + } | tee -a "$GITHUB_OUTPUT" + else + { + echo 'compute-build="custom-linux-medium-vault-latest"' + echo 'compute-build-compat="custom-linux-medium-vault-latest"' + echo 'compute-build-ui="custom-linux-xl-vault-latest"' + echo 'compute-test-go="custom-linux-medium-vault-latest"' + echo 'compute-test-ui="custom-linux-medium-vault-latest"' + echo 'compute-small="ubuntu-latest"' + echo 'go-tags=' + echo 'is-enterprise=false' + echo 'vault-version-metadata=${{ inputs.vault-version }}' + } | tee -a "$GITHUB_OUTPUT" + fi diff --git a/.github/actions/set-up-buf/action.yml b/.github/actions/set-up-buf/action.yml new file mode 100644 index 000000000000..229f5704b6fd --- /dev/null +++ b/.github/actions/set-up-buf/action.yml @@ -0,0 +1,66 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Set up buf from Github releases +description: Set up buf from Github releases + +inputs: + destination: + description: "Where to install the buf binary (default: $HOME/bin/buf)" + type: boolean + default: "$HOME/bin/buf" + version: + description: "The version to install (default: latest)" + type: string + default: Latest + +outputs: + destination: + description: Where the installed buf binary is + value: ${{ steps.install.outputs.destination }} + destination-dir: + description: The directory where the installed buf binary is + value: ${{ steps.install.outputs.destination-dir }} + version: + description: The installed version of buf + value: ${{ steps.install.outputs.version }} + +runs: + using: composite + steps: + - id: install + shell: bash + env: + GH_TOKEN: ${{ github.token }} + run: | + VERSION=$(./.github/scripts/retry-command.sh gh release list -R bufbuild/buf --exclude-drafts --exclude-pre-releases | grep ${{ inputs.version }} | cut -f1) + + mkdir -p $(dirname ${{ inputs.destination }}) + DESTINATION="$(readlink -f "${{ inputs.destination }}")" + DESTINATION_DIR="$(dirname "$DESTINATION")" + echo "$DESTINATION_DIR" >> "$GITHUB_PATH" + + { + echo "destination=$DESTINATION" + echo "destination-dir=$DESTINATION_DIR" + echo "version=$VERSION" + } | tee -a "$GITHUB_OUTPUT" + + ARCH="$(echo "$RUNNER_ARCH" | tr '[:upper:]' '[:lower:]')" + OS="$RUNNER_OS" + if [ "$ARCH" = "x64" ]; then + export ARCH="x86_64" + fi + if [ "$ARCH" = "arm64" ] && [ "$OS" = "Linux" ]; then + export ARCH="aarch64" + fi + if [ "$OS" = "macOS" ]; then + export OS="Darwin" + fi + + mkdir -p tmp + ./.github/scripts/retry-command.sh gh release download "$VERSION" --clobber -p "buf-${OS}-${ARCH}.tar.gz" -O tmp/buf.tgz -R bufbuild/buf + pushd tmp && tar -xvf buf.tgz && popd + mv tmp/buf/bin/buf "$DESTINATION" + rm -rf tmp diff --git a/.github/actions/set-up-go/action.yml b/.github/actions/set-up-go/action.yml new file mode 100644 index 000000000000..548555d26693 --- /dev/null +++ b/.github/actions/set-up-go/action.yml @@ -0,0 +1,82 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Set up Go with a shared module cache. +description: Set up Go with a shared module cache. + +inputs: + github-token: + description: An elevated Github token to access private modules if necessary. + type: string + no-restore: + description: Whether or not to restore the Go module cache on a cache hit + type: boolean + default: false + go-version: + description: "Override .go-version" + type: string + default: "" + +outputs: + cache-key: + description: The Go modules cache key + value: ${{ steps.metadata.outputs.cache-key }} + cache-path: + description: The GOMODCACHE path + value: ${{ steps.metadata.outputs.cache-path }} + go-version: + description: "The version of Go used" + value: ${{ steps.go-version.outputs.go-version }} + +runs: + using: composite + steps: + - id: go-version + shell: bash + run: | + if [ "${{ inputs.go-version }}" = "" ]; then + echo "go-version=$(cat ./.go-version)" | tee -a "$GITHUB_OUTPUT" + else + echo "go-version=${{ inputs.go-version }}" | tee -a "$GITHUB_OUTPUT" + fi + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + with: + go-version: ${{ steps.go-version.outputs.go-version }} + cache: false # We use our own caching strategy + - id: metadata + shell: bash + run: | + { + echo "cache-path=$(go env GOMODCACHE)" + echo "cache-key=go-modules-${{ hashFiles('**/go.sum') }}" + } | tee -a "$GITHUB_OUTPUT" + - id: cache-modules + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + with: + enableCrossOsArchive: true + lookup-only: ${{ inputs.no-restore }} + # We need to be very considerate of our caching strategy because Github only allows 10gb + # of caches per repository before it starts to evict older caches. This is usually fine + # if you only use the actions cache for cache, but we also use it for Go test time results. + # These results are used to balance our Go test groups, without which we could have + # painfully unbalanced Go test execution times. We have to ensure current caches for all + # active release branches and main do not exceed 10gb. Ideally we'd cache Go modules + # and Go build cache on a per version/platform/architecture/tag/module basis, but that + # would result in several hungred gb over all of our build workflows and release branches. + # Instead, we've chosen a middle ground approach where were share Go modules between build + # workflows but lose the Go build cache. + # We intentionally do not use partial restore keys. If we get dont get an exact cache hit + # we only want to download the latest modules, not append them to a prior cache. This + # keeps cache upload time, download time, and storage size to a minimum. + path: ${{ steps.metadata.outputs.cache-path }} + key: ${{ steps.metadata.outputs.cache-key }} + - if: steps.cache-modules.outputs.cache-hit != 'true' + name: Download go modules + shell: bash + env: + GOPRIVATE: github.com/hashicorp/* + run: | + git config --global url."https://${{ inputs.github-token }}@github.com".insteadOf https://github.com + make go-mod-download + du -h -d 1 ${{ steps.metadata.outputs.cache-path }} diff --git a/.github/actions/set-up-gofumpt/action.yml b/.github/actions/set-up-gofumpt/action.yml new file mode 100644 index 000000000000..1e307f763384 --- /dev/null +++ b/.github/actions/set-up-gofumpt/action.yml @@ -0,0 +1,61 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Set up gofumpt from Github releases +description: Set up gofumpt from Github releases + +inputs: + destination: + description: "Where to install the gofumpt binary (default: $HOME/bin/gofumpt)" + type: boolean + default: "$HOME/bin/gofumpt" + version: + description: "The version to install (default: latest)" + type: string + default: Latest + +outputs: + destination: + description: Where the installed gofumpt binary is + value: ${{ steps.install.outputs.destination }} + destination-dir: + description: The directory where the installed gofumpt binary is + value: ${{ steps.install.outputs.destination-dir }} + version: + description: The installed version of gofumpt + value: ${{ steps.install.outputs.version }} + +runs: + using: composite + steps: + - id: install + shell: bash + env: + GH_TOKEN: ${{ github.token }} + run: | + VERSION=$(./.github/scripts/retry-command.sh gh release list -R mvdan/gofumpt --exclude-drafts --exclude-pre-releases | grep ${{ inputs.version }} | cut -f1) + + mkdir -p $(dirname ${{ inputs.destination }}) + DESTINATION="$(readlink -f "${{ inputs.destination }}")" + DESTINATION_DIR="$(dirname "$DESTINATION")" + echo "$DESTINATION_DIR" >> "$GITHUB_PATH" + + { + echo "destination=$DESTINATION" + echo "destination-dir=$DESTINATION_DIR" + echo "version=$VERSION" + } | tee -a "$GITHUB_OUTPUT" + + ARCH="$(echo "$RUNNER_ARCH" | tr '[:upper:]' '[:lower:]')" + OS="$(echo "$RUNNER_OS" | tr '[:upper:]' '[:lower:]')" + if [ "$ARCH" = "x64" ]; then + export ARCH="amd64" + fi + if [ "$OS" = "macos" ]; then + export OS="darwin" + fi + + ./.github/scripts/retry-command.sh gh release download "$VERSION" --clobber -p "gofumpt_*_${OS}_${ARCH}" -O gofumpt -R mvdan/gofumpt + chmod +x gofumpt + mv gofumpt "$DESTINATION" diff --git a/.github/actions/set-up-gosimports/action.yml b/.github/actions/set-up-gosimports/action.yml new file mode 100644 index 000000000000..7563c2e3b36c --- /dev/null +++ b/.github/actions/set-up-gosimports/action.yml @@ -0,0 +1,63 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Set up gosimports from Github releases +description: Set up gosimports from Github releases + +inputs: + destination: + description: "Where to install the gosimports binary (default: $HOME/bin/gosimports)" + type: boolean + default: "$HOME/bin/gosimports" + version: + description: "The version to install (default: latest)" + type: string + default: Latest + +outputs: + destination: + description: Where the installed gosimports binary is + value: ${{ steps.install.outputs.destination }} + destination-dir: + description: The directory where the installed gosimports binary is + value: ${{ steps.install.outputs.destination-dir }} + version: + description: The installed version of gosimports + value: ${{ steps.install.outputs.version }} + +runs: + using: composite + steps: + - id: install + shell: bash + env: + GH_TOKEN: ${{ github.token }} + run: | + VERSION=$(./.github/scripts/retry-command.sh gh release list -R rinchsan/gosimports --exclude-drafts --exclude-pre-releases | grep ${{ inputs.version }} | cut -f1) + + mkdir -p $(dirname ${{ inputs.destination }}) + DESTINATION="$(readlink -f "${{ inputs.destination }}")" + DESTINATION_DIR="$(dirname "$DESTINATION")" + echo "$DESTINATION_DIR" >> "$GITHUB_PATH" + + { + echo "destination=$DESTINATION" + echo "version=$VERSION" + echo "destination-dir=$DESTINATION_DIR" + } | tee -a "$GITHUB_OUTPUT" + + ARCH="$(echo "$RUNNER_ARCH" | tr '[:upper:]' '[:lower:]')" + OS="$(echo "$RUNNER_OS" | tr '[:upper:]' '[:lower:]')" + if [ "$ARCH" = "x64" ]; then + export ARCH="amd64" + fi + if [ "$OS" = "macos" ]; then + export OS="darwin" + fi + + mkdir -p tmp + ./.github/scripts/retry-command.sh gh release download "$VERSION" --clobber -p "gosimports_*_${OS}_${ARCH}.tar.gz" -O tmp/gosimports.tgz -R rinchsan/gosimports + pushd tmp && tar -xvf gosimports.tgz && popd + mv tmp/gosimports "$DESTINATION" + rm -rf tmp diff --git a/.github/actions/set-up-gotestsum/action.yml b/.github/actions/set-up-gotestsum/action.yml new file mode 100644 index 000000000000..97ceb91742a3 --- /dev/null +++ b/.github/actions/set-up-gotestsum/action.yml @@ -0,0 +1,60 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Set up gotestsum from Github releases +description: Set up gotestsum from Github releases + +inputs: + destination: + description: "Where to install the gotestsum binary (default: $HOME/bin/gotestsum)" + type: boolean + default: "$HOME/bin/gotestsum" + version: + description: "The version to install (default: latest)" + type: string + default: Latest + +outputs: + destination: + description: Where the installed gotestsum binary is + value: ${{ steps.install.outputs.destination }} + destination-dir: + description: The directory where the installed gotestsum binary is + value: ${{ steps.install.outputs.destination-dir }} + version: + description: The installed version of gotestsum + value: ${{ steps.install.outputs.version }} + +runs: + using: composite + steps: + - id: install + shell: bash + env: + GH_TOKEN: ${{ github.token }} + run: | + VERSION=$(./.github/scripts/retry-command.sh gh release list -R gotestyourself/gotestsum --exclude-drafts --exclude-pre-releases | grep ${{ inputs.version }} | cut -f1) + + mkdir -p $(dirname ${{ inputs.destination }}) + DESTINATION="$(readlink -f "${{ inputs.destination }}")" + DESTINATION_DIR="$(dirname "$DESTINATION")" + echo "$DESTINATION_DIR" >> "$GITHUB_PATH" + + { + echo "destination=$DESTINATION" + echo "destination-dir=$DESTINATION_DIR" + echo "version=$VERSION" + } | tee -a "$GITHUB_OUTPUT" + + OS="$(echo "$RUNNER_OS" | tr '[:upper:]' '[:lower:]')" + ARCH="$(echo "$RUNNER_ARCH" | tr '[:upper:]' '[:lower:]')" + if [ "$ARCH" = "x64" ]; then + export ARCH="amd64" + fi + + mkdir -p tmp + ./.github/scripts/retry-command.sh gh release download "$VERSION" --clobber -p "*${OS}_${ARCH}.tar.gz" -O tmp/gotestsum.tgz -R gotestyourself/gotestsum + pushd tmp && tar -xvf gotestsum.tgz && popd + mv tmp/gotestsum "$DESTINATION" + rm -rf tmp diff --git a/.github/actions/set-up-misspell/action.yml b/.github/actions/set-up-misspell/action.yml new file mode 100644 index 000000000000..f57ab97f31ad --- /dev/null +++ b/.github/actions/set-up-misspell/action.yml @@ -0,0 +1,63 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Set up misspell from Github releases +description: Set up misspell from Github releases + +inputs: + destination: + description: "Where to install the misspell binary (default: $HOME/bin/misspell)" + type: boolean + default: "$HOME/bin/misspell" + version: + description: "The version to install (default: latest)" + type: string + default: Latest + +outputs: + destination: + description: Where the installed misspell binary is + value: ${{ steps.install.outputs.destination }} + destination-dir: + description: The directory where the installed misspell binary is + value: ${{ steps.install.outputs.destination-dir }} + version: + description: The installed version of misspell + value: ${{ steps.install.outputs.version }} + +runs: + using: composite + steps: + - id: install + shell: bash + env: + GH_TOKEN: ${{ github.token }} + run: | + VERSION=$(./.github/scripts/retry-command.sh gh release list -R golangci/misspell --exclude-drafts --exclude-pre-releases | grep ${{ inputs.version }} | cut -f1) + + mkdir -p $(dirname ${{ inputs.destination }}) + DESTINATION="$(readlink -f "${{ inputs.destination }}")" + DESTINATION_DIR="$(dirname "$DESTINATION")" + echo "$DESTINATION_DIR" >> "$GITHUB_PATH" + + { + echo "destination=$DESTINATION" + echo "version=$VERSION" + echo "destination-dir=$DESTINATION_DIR" + } | tee -a "$GITHUB_OUTPUT" + + ARCH="$(echo "$RUNNER_ARCH" | tr '[:upper:]' '[:lower:]')" + OS="$(echo "$RUNNER_OS" | tr '[:upper:]' '[:lower:]')" + if [ "$ARCH" = "x64" ]; then + export ARCH="amd64" + fi + if [ "$OS" = "macos" ]; then + export OS="darwin" + fi + + mkdir -p tmp + ./.github/scripts/retry-command.sh gh release download "$VERSION" --clobber -p "misspell_*_${OS}_${ARCH}.tar.gz" -O tmp/misspell.tgz -R golangci/misspell + pushd tmp && tar -xvf misspell.tgz && popd + mv tmp/misspell_"$(echo "$VERSION" | tr -d v)"_${OS}_${ARCH}/misspell "$DESTINATION" + rm -rf tmp diff --git a/.github/actions/set-up-shfmt/action.yml b/.github/actions/set-up-shfmt/action.yml new file mode 100644 index 000000000000..c550a3e27ef9 --- /dev/null +++ b/.github/actions/set-up-shfmt/action.yml @@ -0,0 +1,61 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Set up shfmt from Github releases +description: Set up shfmt from Github releases + +inputs: + destination: + description: "Where to install the shfmt binary (default: $HOME/bin/shfmt)" + type: boolean + default: "$HOME/bin/shfmt" + version: + description: "The version to install (default: latest)" + type: string + default: Latest + +outputs: + destination: + description: Where the installed shfmt binary is + value: ${{ steps.install.outputs.destination }} + destination-dir: + description: The directory where the installed shfmt binary is + value: ${{ steps.install.outputs.destination-dir }} + version: + description: The installed version of shfmt + value: ${{ steps.install.outputs.version }} + +runs: + using: composite + steps: + - id: install + shell: bash + env: + GH_TOKEN: ${{ github.token }} + run: | + VERSION=$(./.github/scripts/retry-command.sh gh release list -R mvdan/sh --exclude-drafts --exclude-pre-releases | grep ${{ inputs.version }} | cut -f1) + + mkdir -p $(dirname ${{ inputs.destination }}) + DESTINATION="$(readlink -f "${{ inputs.destination }}")" + DESTINATION_DIR="$(dirname "$DESTINATION")" + echo "$DESTINATION_DIR" >> "$GITHUB_PATH" + + { + echo "destination=$DESTINATION" + echo "destination-dir=$DESTINATION_DIR" + echo "version=$VERSION" + } | tee -a "$GITHUB_OUTPUT" + + ARCH="$(echo "$RUNNER_ARCH" | tr '[:upper:]' '[:lower:]')" + OS="$(echo "$RUNNER_OS" | tr '[:upper:]' '[:lower:]')" + if [ "$ARCH" = "x64" ]; then + export ARCH="amd64" + fi + if [ "$OS" = "macos" ]; then + export OS="darwin" + fi + + ./.github/scripts/retry-command.sh gh release download "$VERSION" --clobber -p "shfmt_*_${OS}_${ARCH}" -O shfmt -R mvdan/sh + chmod +x shfmt + mv shfmt "$DESTINATION" diff --git a/.github/actions/set-up-staticcheck/action.yml b/.github/actions/set-up-staticcheck/action.yml new file mode 100644 index 000000000000..3ec7ac2ae616 --- /dev/null +++ b/.github/actions/set-up-staticcheck/action.yml @@ -0,0 +1,63 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Set up staticcheck from Github releases +description: Set up staticcheck from Github releases + +inputs: + destination: + description: "Where to install the staticcheck binary (default: $HOME/bin/staticcheck)" + type: boolean + default: "$HOME/bin/staticcheck" + version: + description: "The version to install (default: latest)" + type: string + default: Latest + +outputs: + destination: + description: Where the installed staticcheck binary is + value: ${{ steps.install.outputs.destination }} + destination-dir: + description: The directory where the installed staticcheck binary is + value: ${{ steps.install.outputs.destination-dir }} + version: + description: The installed version of staticcheck + value: ${{ steps.install.outputs.version }} + +runs: + using: composite + steps: + - id: install + shell: bash + env: + GH_TOKEN: ${{ github.token }} + run: | + VERSION=$(./.github/scripts/retry-command.sh gh release list -R dominikh/go-tools --exclude-drafts --exclude-pre-releases | grep ${{ inputs.version }} | cut -d " " -f2) + + mkdir -p $(dirname ${{ inputs.destination }}) + DESTINATION="$(readlink -f "${{ inputs.destination }}")" + DESTINATION_DIR="$(dirname "$DESTINATION")" + echo "$DESTINATION_DIR" >> "$GITHUB_PATH" + + { + echo "destination=$DESTINATION" + echo "destination-dir=$DESTINATION_DIR" + echo "version=$VERSION" + } | tee -a "$GITHUB_OUTPUT" + + ARCH="$(echo "$RUNNER_ARCH" | tr '[:upper:]' '[:lower:]')" + OS="$(echo "$RUNNER_OS" | tr '[:upper:]' '[:lower:]')" + if [ "$ARCH" = "x64" ]; then + export ARCH="amd64" + fi + if [ "$OS" = "macos" ]; then + export OS="darwin" + fi + + mkdir -p tmp + ./.github/scripts/retry-command.sh gh release download "$VERSION" --clobber -p "staticcheck_${OS}_${ARCH}.tar.gz" -O tmp/staticcheck.tgz -R dominikh/go-tools + pushd tmp && tar -xvf staticcheck.tgz && popd + mv tmp/staticcheck/staticcheck "$DESTINATION" + rm -rf tmp diff --git a/.github/configs/milestone-check.json b/.github/configs/milestone-check.json deleted file mode 100644 index a06049b15398..000000000000 --- a/.github/configs/milestone-check.json +++ /dev/null @@ -1,8 +0,0 @@ -[ - { - "type": "check-milestone", - "title": "Milestone Check", - "success": "Milestone set", - "failure": "Milestone not set" - } - ] \ No newline at end of file diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000000..81bae9acd600 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,7 @@ +version: 2 + +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "daily" diff --git a/.github/docs/pull_request_template.md b/.github/docs/pull_request_template.md new file mode 100644 index 000000000000..6b9d11b9a109 --- /dev/null +++ b/.github/docs/pull_request_template.md @@ -0,0 +1,8 @@ +### Description +Why is this docs change needed? + +### TODO +- [ ] Preview the changes you made either locally or in the Vercel deployment + and make sure it looks correct. +- [ ] If you've added a new link to the sidebar navigation, make sure it's + sorted correctly. diff --git a/.github/enos-run-matrices/build-github-oss-linux-amd64-zip.json b/.github/enos-run-matrices/build-github-oss-linux-amd64-zip.json deleted file mode 100644 index ab09a413bad3..000000000000 --- a/.github/enos-run-matrices/build-github-oss-linux-amd64-zip.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "include": [ - { - "scenario": "smoke backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 3 - }, - { - "scenario": "smoke backend:raft consul_version:1.13.4 distro:rhel seal:awskms arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 4 - }, - { - "scenario": "smoke backend:consul consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 1 - }, - { - "scenario": "smoke backend:consul consul_version:1.13.4 distro:rhel seal:awskms arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 5 - }, - { - "scenario": "smoke backend:consul consul_version:1.12.7 distro:ubuntu seal:shamir arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:rhel seal:awskms arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 3 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 5 - }, - { - "scenario": "upgrade backend:consul consul_version:1.14.2 distro:rhel seal:awskms arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 4 - }, - { - "scenario": "upgrade backend:consul consul_version:1.13.4 distro:ubuntu seal:shamir arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "upgrade backend:consul consul_version:1.12.7 distro:rhel seal:awskms arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - } - ] -} diff --git a/.github/enos-run-matrices/build-github-oss-linux-arm64-zip.json b/.github/enos-run-matrices/build-github-oss-linux-arm64-zip.json deleted file mode 100644 index ec951fdd0a18..000000000000 --- a/.github/enos-run-matrices/build-github-oss-linux-arm64-zip.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "include": [ - { - "scenario": "smoke backend:raft consul_version:1.13.4 distro:rhel seal:shamir arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "smoke backend:raft consul_version:1.14.2 distro:ubuntu seal:awskms arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "smoke backend:consul consul_version:1.12.7 distro:ubuntu seal:shamir arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 3 - }, - { - "scenario": "smoke backend:consul consul_version:1.14.2 distro:ubuntu seal:shamir arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 4 - }, - { - "scenario": "smoke backend:consul consul_version:1.13.4 distro:rhel seal:awskms arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 5 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 1 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:rhel seal:awskms arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 2 - }, - { - "scenario": "upgrade backend:consul consul_version:1.12.7 distro:rhel seal:awskms arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 3 - }, - { - "scenario": "upgrade backend:consul consul_version:1.13.4 distro:ubuntu seal:shamir arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 4 - }, - { - "scenario": "upgrade backend:consul consul_version:1.14.2 distro:rhel seal:awskms arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 5 - } - ] -} diff --git a/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-amd64-zip.json b/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-amd64-zip.json deleted file mode 100644 index 70e5ea1c3c24..000000000000 --- a/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-amd64-zip.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "include": [ - { - "scenario": "smoke backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "smoke backend:raft consul_version:1.13.4 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "smoke backend:consul consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "smoke backend:consul consul_version:1.13.4 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "smoke backend:consul consul_version:1.12.7 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "upgrade backend:consul consul_version:1.14.2 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "upgrade backend:consul consul_version:1.13.4 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "upgrade backend:consul consul_version:1.12.7 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - } - ] -} diff --git a/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-arm64-zip.json b/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-arm64-zip.json deleted file mode 100644 index e6e9edb10f28..000000000000 --- a/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-arm64-zip.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "include": [ - { - "scenario": "smoke backend:raft consul_version:1.13.4 distro:rhel seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "smoke backend:raft consul_version:1.14.2 distro:ubuntu seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "smoke backend:consul consul_version:1.12.7 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 1 - }, - { - "scenario": "smoke backend:consul consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "smoke backend:consul consul_version:1.13.4 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "upgrade backend:consul consul_version:1.12.7 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 2 - }, - { - "scenario": "upgrade backend:consul consul_version:1.13.4 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 1 - }, - { - "scenario": "upgrade backend:consul consul_version:1.14.2 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 2 - } - ] -} diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000000..4257adb67ed2 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,17 @@ +### Description +What does this PR do? + +### TODO only if you're a HashiCorp employee +- [ ] **Backport Labels:** If this PR is in the ENT repo and needs to be backported, backport + to N, N-1, and N-2, using the `backport/ent/x.x.x+ent` labels. If this PR is in the CE repo, you should only backport to N, using the `backport/x.x.x` label, not the enterprise labels. + - [ ] If this fixes a critical security vulnerability or [severity 1](https://www.hashicorp.com/customer-success/enterprise-support) bug, it will also need to be backported to the current [LTS versions](https://developer.hashicorp.com/vault/docs/enterprise/lts#why-is-there-a-risk-to-updating-to-a-non-lts-vault-enterprise-version) of Vault. To ensure this, use **all** available enterprise labels. +- [ ] **ENT Breakage:** If this PR either 1) removes a public function OR 2) changes the signature + of a public function, even if that change is in a CE file, _double check_ that + applying the patch for this PR to the ENT repo and running tests doesn't + break any tests. Sometimes ENT only tests rely on public functions in CE + files. +- [ ] **Jira:** If this change has an associated Jira, it's referenced either + in the PR description, commit message, or branch name. +- [ ] **RFC:** If this change has an associated RFC, please link it in the description. +- [ ] **ENT PR:** If this change has an associated ENT PR, please link it in the + description. Also, make sure the changelog is in this PR, _not_ in your ENT PR. diff --git a/.github/scripts/changed-files.sh b/.github/scripts/changed-files.sh new file mode 100755 index 000000000000..f44c6fc26b9d --- /dev/null +++ b/.github/scripts/changed-files.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# Determine what files have changed between two git references. +# +# * For pull_request event_type's we'll the merge target (base_ref) with the pull requests reference, +# (ref_name) which is usually a branch name. +# * For other event types (push, workflow_call) we don't have a base_ref target to merge into, so +# instead we'll compare the last commit. +# +# Write the resulting metadata to STDOUT and $GITHUB_OUTPUT if it's defined. + +event_type=$1 # GH event type (pull_request, push, workflow_call) +ref_name=$2 # branch reference that triggered the workflow +base_ref=$3 # PR branch base ref + +if [[ "$event_type" == "pull_request" ]]; then + git fetch --no-tags --prune origin "$base_ref" + head_commit="HEAD" + base_commit="origin/$base_ref" +else + git fetch --no-tags --prune origin "$ref_name" + head_commit=$(git log "origin/$ref_name" --oneline | head -1 | awk '{print $1}') + base_commit=$(git log "origin/$ref_name" --oneline | head -2 | awk 'NR==2 {print $1}') +fi + +docs_changed=false +ui_changed=false +app_changed=false + +if ! files="$(git diff "${base_commit}...${head_commit}" --name-only)"; then + echo "failed to get changed files from git" + exit 1 +fi + +for file in $(awk -F "/" '{ print $1}' <<< "$files" | uniq); do + if [[ "$file" == "changelog" ]]; then + continue + fi + + if [[ "$file" == "website" ]]; then + docs_changed=true + continue + fi + + if [[ "$file" == "ui" ]]; then + ui_changed=true + continue + fi + + # Anything that isn't either a changelog, ui, or docs change we'll consider an app change. + app_changed=true +done + +echo "app-changed=${app_changed}" +echo "docs-changed=${docs_changed}" +echo "ui-changed=${ui_changed}" +echo "files='${files}'" +[ -n "$GITHUB_OUTPUT" ] && { + echo "app-changed=${app_changed}" + echo "docs-changed=${docs_changed}" + echo "ui-changed=${ui_changed}" + # Use a random delimiter for multiline strings. + # https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#multiline-strings + delimiter="$(openssl rand -hex 8)" + echo "files<<${delimiter}" + echo "${files}" + echo "${delimiter}" +} >> "$GITHUB_OUTPUT" diff --git a/.github/scripts/generate-test-package-lists.sh b/.github/scripts/generate-test-package-lists.sh deleted file mode 100755 index 493a92c8c87c..000000000000 --- a/.github/scripts/generate-test-package-lists.sh +++ /dev/null @@ -1,283 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -# This script is meant to be sourced into the shell running in a Github -# workflow. - -# This script is a temporary measure until we implement a dynamic test-splitting -# solution. It distributes the entire set of test packages into 16 sublists, -# which should roughly take an equal amount of time to complete. - -test_packages=() - -base="github.com/hashicorp/vault" - -# Total time: 526 -test_packages[1]+=" $base/api" -test_packages[1]+=" $base/command" -test_packages[1]+=" $base/sdk/helper/keysutil" - -# Total time: 1160 -test_packages[2]+=" $base/sdk/helper/ocsp" -if [ "${ENTERPRISE:+x}" == "x" ] ; then - test_packages[2]+=" $base/vault/external_tests/replication-perf" -fi - -# Total time: 1009 -test_packages[3]+=" $base/builtin/credential/approle" -test_packages[3]+=" $base/command/agent/sink/file" -test_packages[3]+=" $base/command/agent/template" -test_packages[3]+=" $base/helper/random" -test_packages[3]+=" $base/helper/storagepacker" -test_packages[3]+=" $base/sdk/helper/certutil" -if [ "${ENTERPRISE:+x}" == "x" ] ; then - test_packages[3]+=" $base/vault/external_tests/entropy" -fi -test_packages[3]+=" $base/vault/external_tests/raft" - -# Total time: 830 -test_packages[4]+=" $base/builtin/plugin" -if [ "${ENTERPRISE:+x}" == "x" ] ; then - test_packages[4]+=" $base/enthelpers/fsm" -fi -test_packages[4]+=" $base/http" -test_packages[4]+=" $base/sdk/helper/pluginutil" -test_packages[4]+=" $base/serviceregistration/kubernetes" -test_packages[4]+=" $base/tools/godoctests/pkg/analyzer" -if [ "${ENTERPRISE:+x}" == "x" ] ; then - test_packages[4]+=" $base/vault/external_tests/apilock" - test_packages[4]+=" $base/vault/external_tests/filteredpaths" - test_packages[4]+=" $base/vault/external_tests/perfstandby" - test_packages[4]+=" $base/vault/external_tests/replication-dr" -fi - - -# Total time: 258 -test_packages[5]+=" $base/builtin/credential/aws" -test_packages[5]+=" $base/builtin/credential/cert" -test_packages[5]+=" $base/builtin/logical/aws" -if [ "${ENTERPRISE:+x}" == "x" ] ; then - test_packages[5]+=" $base/enthelpers/logshipper" - test_packages[5]+=" $base/enthelpers/merkle" -fi -test_packages[5]+=" $base/helper/hostutil" -test_packages[5]+=" $base/helper/pgpkeys" -test_packages[5]+=" $base/sdk/physical/inmem" -test_packages[5]+=" $base/vault/activity" -test_packages[5]+=" $base/vault/diagnose" -test_packages[5]+=" $base/vault/external_tests/pprof" -if [ "${ENTERPRISE:+x}" == "x" ] ; then - test_packages[5]+=" $base/vault/external_tests/resolver" -fi -test_packages[5]+=" $base/vault/external_tests/response" -if [ "${ENTERPRISE:+x}" == "x" ] ; then - test_packages[5]+=" $base/vault/external_tests/seal" -fi -test_packages[5]+=" $base/vault/external_tests/sealmigration" -if [ "${ENTERPRISE:+x}" == "x" ] ; then - test_packages[5]+=" $base/vault/external_tests/transform" -fi - -# Total time: 588 -test_packages[6]+=" $base" -test_packages[6]+=" $base/audit" -test_packages[6]+=" $base/builtin/audit/file" -test_packages[6]+=" $base/builtin/credential/github" -test_packages[6]+=" $base/builtin/credential/okta" -test_packages[6]+=" $base/builtin/logical/database/dbplugin" -test_packages[6]+=" $base/command/agent/auth/cert" -test_packages[6]+=" $base/command/agent/auth/jwt" -test_packages[6]+=" $base/command/agent/auth/kerberos" -test_packages[6]+=" $base/command/agent/auth/kubernetes" -test_packages[6]+=" $base/command/agent/auth/token-file" -test_packages[6]+=" $base/command/agent/cache" -test_packages[6]+=" $base/command/agent/cache/cacheboltdb" -test_packages[6]+=" $base/command/agent/cache/cachememdb" -test_packages[6]+=" $base/command/agent/cache/keymanager" -test_packages[6]+=" $base/command/agent/config" -test_packages[6]+=" $base/command/config" -test_packages[6]+=" $base/command/token" -if [ "${ENTERPRISE:+x}" == "x" ] ; then - test_packages[6]+=" $base/enthelpers/namespace" - test_packages[6]+=" $base/enthelpers/replicatedpaths" - test_packages[6]+=" $base/enthelpers/sealrewrap" -fi -test_packages[6]+=" $base/helper/builtinplugins" -test_packages[6]+=" $base/helper/dhutil" -test_packages[6]+=" $base/helper/fairshare" -test_packages[6]+=" $base/helper/flag-kv" -test_packages[6]+=" $base/helper/flag-slice" -test_packages[6]+=" $base/helper/forwarding" -test_packages[6]+=" $base/helper/logging" -test_packages[6]+=" $base/helper/metricsutil" -test_packages[6]+=" $base/helper/namespace" -test_packages[6]+=" $base/helper/osutil" -test_packages[6]+=" $base/helper/parseip" -test_packages[6]+=" $base/helper/policies" -test_packages[6]+=" $base/helper/testhelpers/logical" -test_packages[6]+=" $base/helper/timeutil" -test_packages[6]+=" $base/helper/useragent" -test_packages[6]+=" $base/helper/versions" -test_packages[6]+=" $base/internalshared/configutil" -test_packages[6]+=" $base/internalshared/listenerutil" -test_packages[6]+=" $base/physical/alicloudoss" -test_packages[6]+=" $base/physical/gcs" -test_packages[6]+=" $base/physical/manta" -test_packages[6]+=" $base/physical/mssql" -test_packages[6]+=" $base/physical/oci" -test_packages[6]+=" $base/physical/s3" -test_packages[6]+=" $base/physical/spanner" -test_packages[6]+=" $base/physical/swift" -test_packages[6]+=" $base/physical/zookeeper" -test_packages[6]+=" $base/plugins/database/hana" -test_packages[6]+=" $base/plugins/database/redshift" -test_packages[6]+=" $base/sdk/database/dbplugin/v5" -test_packages[6]+=" $base/sdk/database/helper/credsutil" -test_packages[6]+=" $base/sdk/helper/authmetadata" -test_packages[6]+=" $base/sdk/helper/compressutil" -test_packages[6]+=" $base/sdk/helper/cryptoutil" -test_packages[6]+=" $base/sdk/helper/identitytpl" -test_packages[6]+=" $base/sdk/helper/kdf" -test_packages[6]+=" $base/sdk/helper/locksutil" -test_packages[6]+=" $base/sdk/helper/pathmanager" -test_packages[6]+=" $base/sdk/helper/roottoken" -test_packages[6]+=" $base/sdk/helper/testhelpers/schema" -test_packages[6]+=" $base/sdk/helper/xor" -test_packages[6]+=" $base/sdk/physical/file" -test_packages[6]+=" $base/sdk/plugin/pb" -test_packages[6]+=" $base/serviceregistration/kubernetes/client" -test_packages[6]+=" $base/shamir" -test_packages[6]+=" $base/vault/cluster" -test_packages[6]+=" $base/vault/eventbus" -test_packages[6]+=" $base/vault/external_tests/api" -if [ "${ENTERPRISE:+x}" == "x" ] ; then - test_packages[6]+=" $base/vault/external_tests/consistencyheaders" -fi -test_packages[6]+=" $base/vault/external_tests/expiration" -test_packages[6]+=" $base/vault/external_tests/hcp_link" -test_packages[6]+=" $base/vault/external_tests/kv" -if [ "${ENTERPRISE:+x}" == "x" ] ; then - test_packages[6]+=" $base/vault/external_tests/plugins" -fi -test_packages[6]+=" $base/vault/external_tests/quotas" -test_packages[6]+=" $base/vault/seal" - -# Total time: 389 -test_packages[7]+=" $base/builtin/credential/userpass" -test_packages[7]+=" $base/builtin/logical/pki" -test_packages[7]+=" $base/builtin/logical/transit" -test_packages[7]+=" $base/command/agent" -test_packages[7]+=" $base/helper/monitor" -test_packages[7]+=" $base/sdk/database/helper/connutil" -test_packages[7]+=" $base/sdk/database/helper/dbutil" -test_packages[7]+=" $base/sdk/helper/cidrutil" -test_packages[7]+=" $base/sdk/helper/custommetadata" -test_packages[7]+=" $base/sdk/helper/jsonutil" -test_packages[7]+=" $base/sdk/helper/ldaputil" -test_packages[7]+=" $base/sdk/helper/logging" -test_packages[7]+=" $base/sdk/helper/policyutil" -test_packages[7]+=" $base/sdk/helper/salt" -test_packages[7]+=" $base/sdk/helper/template" -test_packages[7]+=" $base/sdk/helper/useragent" -test_packages[7]+=" $base/sdk/logical" -test_packages[7]+=" $base/sdk/plugin/mock" -test_packages[7]+=" $base/sdk/queue" -if [ "${ENTERPRISE:+x}" == "x" ] ; then - test_packages[7]+=" $base/vault/autosnapshots" - test_packages[7]+=" $base/vault/external_tests/activity" -fi -test_packages[7]+=" $base/vault/external_tests/approle" -if [ "${ENTERPRISE:+x}" == "x" ] ; then - test_packages[7]+=" $base/vault/external_tests/kmip" -fi -test_packages[7]+=" $base/vault/external_tests/mfa" -test_packages[7]+=" $base/vault/external_tests/misc" -test_packages[7]+=" $base/vault/quotas" - -# Total time: 779 -test_packages[8]+=" $base/builtin/credential/aws/pkcs7" -test_packages[8]+=" $base/builtin/logical/totp" -test_packages[8]+=" $base/command/agent/auth" -test_packages[8]+=" $base/physical/raft" -test_packages[8]+=" $base/sdk/framework" -test_packages[8]+=" $base/sdk/plugin" -test_packages[8]+=" $base/vault" -if [ "${ENTERPRISE:+x}" == "x" ] ; then - test_packages[8]+=" $base/vault/external_tests/barrier" - test_packages[8]+=" $base/vault/external_tests/cubbyholes" -fi -test_packages[8]+=" $base/vault/external_tests/metrics" -if [ "${ENTERPRISE:+x}" == "x" ] ; then - test_packages[8]+=" $base/vault/external_tests/replication" -fi -test_packages[8]+=" $base/vault/external_tests/router" -if [ "${ENTERPRISE:+x}" == "x" ] ; then - test_packages[8]+=" $base/vault/external_tests/system" - test_packages[8]+=" $base/vault/managed_key" -fi - -# Total time: 310 -test_packages[9]+=" $base/vault/hcp_link/capabilities/api_capability" -test_packages[9]+=" $base/vault/external_tests/plugin" - -# Total time: 925 -test_packages[10]+=" $base/builtin/credential/ldap" -test_packages[10]+=" $base/builtin/logical/database" -test_packages[10]+=" $base/physical/etcd" -test_packages[10]+=" $base/physical/postgresql" - -# Total time: 851 -test_packages[11]+=" $base/builtin/logical/rabbitmq" -test_packages[11]+=" $base/physical/dynamodb" -test_packages[11]+=" $base/plugins/database/influxdb" -test_packages[11]+=" $base/vault/external_tests/identity" -test_packages[11]+=" $base/vault/external_tests/token" - -# Total time: 340 -test_packages[12]+=" $base/builtin/logical/consul" -test_packages[12]+=" $base/physical/couchdb" -test_packages[12]+=" $base/plugins/database/mongodb" -test_packages[12]+=" $base/plugins/database/mssql" -test_packages[12]+=" $base/plugins/database/mysql" - -# Total time: 704 -test_packages[13]+=" $base/builtin/logical/pkiext" -test_packages[13]+=" $base/command/server" -test_packages[13]+=" $base/physical/aerospike" -test_packages[13]+=" $base/physical/cockroachdb" -test_packages[13]+=" $base/plugins/database/postgresql" -if [ "${ENTERPRISE:+x}" == "x" ] ; then - test_packages[13]+=" $base/vault/external_tests/filteredpathsext" -fi -test_packages[13]+=" $base/vault/external_tests/policy" - -# Total time: 374 -test_packages[14]+=" $base/builtin/credential/radius" -test_packages[14]+=" $base/builtin/logical/ssh" -if [ "${ENTERPRISE:+x}" == "x" ] ; then - test_packages[14]+=" $base/enthelpers/wal" -fi -test_packages[14]+=" $base/physical/azure" -test_packages[14]+=" $base/serviceregistration/consul" -if [ "${ENTERPRISE:+x}" == "x" ] ; then - test_packages[14]+=" $base/vault/external_tests/quotas-docker" -fi -test_packages[14]+=" $base/vault/external_tests/raftha" - -# Total time: 362 -test_packages[15]+=" $base/builtin/logical/nomad" -test_packages[15]+=" $base/physical/mysql" -test_packages[15]+=" $base/plugins/database/cassandra" -if [ "${ENTERPRISE:+x}" == "x" ] ; then - test_packages[15]+=" $base/vault/external_tests/namespaces" -fi -test_packages[15]+=" $base/vault/external_tests/sealmigrationext" - -# Total time: 635 -test_packages[16]+=" $base/physical/cassandra" -test_packages[16]+=" $base/physical/consul" -if [ "${ENTERPRISE:+x}" == "x" ] ; then - test_packages[16]+=" $base/vault/external_tests/autosnapshots" - test_packages[16]+=" $base/vault/external_tests/replicationext" - test_packages[16]+=" $base/vault/external_tests/sealext" -fi diff --git a/.github/scripts/gh-comment.sh b/.github/scripts/gh-comment.sh new file mode 100644 index 000000000000..111ed97e0306 --- /dev/null +++ b/.github/scripts/gh-comment.sh @@ -0,0 +1,40 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +function update_or_create_comment { + REPO=$1 + PR_NUMBER=$2 + SEARCH_KEY=$3 + BODY=$4 + + # We only want for the GH bot to place one comment to report build failures + # and if we rerun a job, that comment needs to be updated. + # Let's try to find if the GH bot has placed a similar comment + comment_id=$(gh api \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + --paginate \ + /repos/hashicorp/"$REPO"/issues/"$PR_NUMBER"/comments | + jq -r --arg SEARCH_KEY "$SEARCH_KEY" '.[] | select (.body | startswith($SEARCH_KEY)) | .id') + + if [[ "$comment_id" != "" ]]; then + # update the comment with the new body + gh api \ + --method PATCH \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + /repos/hashicorp/"$REPO"/issues/comments/"$comment_id" \ + -f body="$BODY" + else + # create a comment with the new body + gh api \ + --method POST \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + /repos/hashicorp/"$REPO"/issues/"$PR_NUMBER"/comments \ + -f body="$BODY" + fi +} diff --git a/.github/scripts/report-build-status.sh b/.github/scripts/report-build-status.sh new file mode 100755 index 000000000000..8b92534a2c9d --- /dev/null +++ b/.github/scripts/report-build-status.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +# All of these environment variables are required or an error will be returned. +[ "${GITHUB_TOKEN:?}" ] +[ "${PR_NUMBER:?}" ] +[ "${REPO:?}" ] +[ "${RUN_ID:?}" ] + +# list of build jobs +[ "${ARTIFACTS:?}" ] +[ "${TEST:?}" ] +[ "${TEST_CONTAINERS:?}" ] +[ "${UI:?}" ] + +# Build jobs +jobs=("artifacts:$ARTIFACTS" "test:$TEST" "test-containers:$TEST_CONTAINERS" "ui:$UI") + +# Sometimes failed jobs can have a result of "cancelled". Handle both. +failed_jobs=() +for job in "${jobs[@]}";do + if [[ "$job" == *"failure"* || "$job" == *"cancelled"* ]]; then + failed_jobs+=("$job") + fi +done + +# Create a comment body to set on the pull request which reports failed jobs with a url to the +# failed workflow. +if [ ${#failed_jobs[@]} -eq 0 ]; then + new_body="Build Results: +All builds succeeded! :white_check_mark:" +else + new_body="Build Results: +Build failed for these jobs: ${failed_jobs[*]}. Please refer to this workflow to learn more: https://github.com/hashicorp/vault/actions/runs/$RUN_ID" +fi + +source ./.github/scripts/gh-comment.sh + +update_or_create_comment "$REPO" "$PR_NUMBER" "Build Results:" "$new_body" diff --git a/.github/scripts/report-ci-status.sh b/.github/scripts/report-ci-status.sh new file mode 100755 index 000000000000..39a9ca7aee87 --- /dev/null +++ b/.github/scripts/report-ci-status.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e +MAX_TESTS=10 + +# All of these environment variables are required or an error will be returned. +[ "${GITHUB_TOKEN:?}" ] +[ "${RUN_ID:?}" ] +[ "${REPO:?}" ] +[ "${PR_NUMBER:?}" ] +[ "${RESULT:?}" ] + +table_data() { + if [ -z "$TABLE_DATA" ]; then + return 0 + fi + + # Remove any rows that don't have a test name + # Only keep the test type, test package, test name, and logs column + # Remove the scroll emoji + # Remove "github.com/hashicorp/vault" from the package name + TABLE_DATA=$(echo "$TABLE_DATA" | awk -F\| '{if ($4 != " - ") { print "|" $2 "|" $3 "|" $4 "|" $7 }}' | sed -r 's/ :scroll://' | sed -r 's/github.com\/hashicorp\/vault\///') + NUM_FAILURES=$(wc -l <<< "$TABLE_DATA") + + # Check if the number of failures is greater than the maximum tests to display + # If so, limit the table to MAX_TESTS number of results + if [ "$NUM_FAILURES" -gt "$MAX_TESTS" ]; then + TABLE_DATA=$(echo "$TABLE_DATA" | head -n "$MAX_TESTS") + NUM_OTHER=( "$NUM_FAILURES" - "$MAX_TESTS" ) + TABLE_DATA="${TABLE_DATA} + +and ${NUM_OTHER[*]} other tests" + fi + + # Add the header for the table + printf "%s" "Failures: +| Test Type | Package | Test | Logs | +| --------- | ------- | ---- | ---- | +${TABLE_DATA}" +} + +td="$(table_data)" + +case "$RESULT" in + success) + if [ -z "$td" ]; then + BODY="CI Results: +All Go tests succeeded! :white_check_mark:" + else + BODY="CI Results: +All required Go tests succeeded but failures were detected :warning: +${td}" + fi + ;; + *) + BODY="CI Results: ${RESULT} :x: +${td}" + ;; +esac + +source ./.github/scripts/gh-comment.sh + +update_or_create_comment "$REPO" "$PR_NUMBER" "CI Results:" "$BODY" diff --git a/.github/scripts/retry-command.sh b/.github/scripts/retry-command.sh new file mode 100755 index 000000000000..76f0c902bae0 --- /dev/null +++ b/.github/scripts/retry-command.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 +set -uo pipefail + +tries=5 +count=0 + +until "$@" +do + if [ $count -eq $tries ]; then + echo "tried $count times, exiting" + exit 1 + fi + ((count++)) + echo "trying again, attempt $count" + sleep $count +done diff --git a/.github/scripts/test-generate-test-package-lists.sh b/.github/scripts/test-generate-test-package-lists.sh deleted file mode 100755 index c3d1cb60670b..000000000000 --- a/.github/scripts/test-generate-test-package-lists.sh +++ /dev/null @@ -1,75 +0,0 @@ -#!/bin/bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -set -e${DEBUG+x}o pipefail - -# -# This script is run to make sure that every package returned by -# go list -test ./... (when run from the repo root, api/, and sdk/ directory) -# appear in the test_packages array defined in the sibling file -# generate-test-package-lists.sh -# -# This script is executed as part of the ci workflow triggered by pull_requests -# events. In the event that the job that runs this script fails, examine the -# output of the 'test' step in that job to obtain the list of test packages that -# are missing in the test_packages array or that should be removed from it. -# - -dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) - -source generate-test-package-lists.sh - -get_module_packages() { - local package_list=($(go list -test -json ./... | jq -r '.ForTest | select(.!=null)' | grep -v vault/integ | grep '^github.com/hashicorp/')) - - for package in "${package_list[@]}" ; do - # Check if the current package already exists in all_packages - if ! grep "\b$package\b" <<< "${all_packages[@]}" &> /dev/null; then - all_packages+=($package) - fi - done -} - -find_packages() { - for package in "${all_packages[@]}" ; do - if ! grep "\b${package}\b" <<< "${test_packages[@]}" &> /dev/null ; then - echo "Error: package ${package} is not present in test_packages" - exit 1 - fi - done -} - -count_test_packages() { - count=0 - for test_package in "${test_packages[@]}" ; do - count=$((${count}+$(wc -w <<< "${test_package}"))) - done - - echo $count -} - -all_packages=() - -cd "$dir/../.." -get_module_packages - -cd "$dir/../../sdk" -get_module_packages - -cd "$dir/../../api" -get_module_packages - -find_packages - -test_package_count=$(count_test_packages) -if (( ${#all_packages[@]} != $test_package_count )) ; then - echo "Error: there are currently ${#all_packages[@]} packages in the repository but $test_package_count packages in test_packages" - - unused_packages="${test_packages[@]} " - for ap in ${all_packages[@]} ; do - unused_packages="$(echo "$unused_packages" | sed -r "s~$ap ~ ~" )" - done - - echo "Packages in test_packages that aren't used: ${unused_packages// /}" -fi diff --git a/.github/workflows/actionlint.yml b/.github/workflows/actionlint.yml new file mode 100644 index 000000000000..1b8dcd89af04 --- /dev/null +++ b/.github/workflows/actionlint.yml @@ -0,0 +1,22 @@ +name: Lint GitHub Actions Workflows +on: + pull_request: + paths: + - '.github/**' + types: [opened, synchronize, reopened, ready_for_review] + +# cancel existing runs of the same workflow on the same ref +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} + cancel-in-progress: true + +jobs: + actionlint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - name: "Check workflow files" + uses: docker://docker.mirror.hashicorp.services/rhysd/actionlint@sha256:93834930f56ca380be3e9a3377670d7aa5921be251b9c774891a39b3629b83b8 + with: + # milestoned and demilestoned work (https://github.com/github/docs/issues/23909) but they aren't listed in the github documentation, so actionlint complains about them + args: "-ignore=\"invalid activity type \\\"demilestoned\\\" for \\\"pull_request\\\" Webhook event\" -ignore=\"invalid activity type \\\"milestoned\\\" for \\\"pull_request\\\" Webhook event\"" diff --git a/.github/workflows/add-hashicorp-contributed-label.yml b/.github/workflows/add-hashicorp-contributed-label.yml new file mode 100644 index 000000000000..379b8cc9c8ca --- /dev/null +++ b/.github/workflows/add-hashicorp-contributed-label.yml @@ -0,0 +1,26 @@ +name: Add HashiCorp contributed label + +# The purpose of this job is to label all HashiCorp contributed PRs, so that +# we can more easily identify community contributed PRs (anything that doesn't +# have this label). +# While it might seem like this is the 'reverse' of what we should do, GitHub +# (rightly) does not allow branches from forks to have write permissions, so +# making PRs from forks self-label themselves as community-contributed is not +# possible. + +on: + # On every pull request, on every branch + pull_request: + types: [opened, synchronize, reopened] + +jobs: + add-hashicorp-contributed-label: + # Only run if this is NOT coming from a fork of hashicorp/vault (if this is not true, it's community contributed) + if: ${{ github.repository == 'hashicorp/vault' && (github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name) }} + runs-on: ubuntu-latest + steps: + - name: "Add label to PR" + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PR: ${{ github.event.pull_request.html_url }} + run: gh pr edit "$PR" --add-label 'hashicorp-contributed-pr' diff --git a/.github/workflows/backport-ce.yml b/.github/workflows/backport-ce.yml new file mode 100644 index 000000000000..ef684918fa36 --- /dev/null +++ b/.github/workflows/backport-ce.yml @@ -0,0 +1,37 @@ +--- +name: Backport Assistant Runner (for OSS & ENT) + +on: + pull_request_target: + types: + - closed + - labeled + repository_dispatch: + types: [ent-backport] + +jobs: + backport-targeted-release-branch: + if: github.event.pull_request.merged && github.repository == 'hashicorp/vault' + runs-on: ubuntu-latest + container: hashicorpdev/backport-assistant:0.4.3 + steps: + - name: Backport changes to targeted release branch + run: | + backport-assistant backport -merge-method=squash -gh-automerge + env: + BACKPORT_LABEL_REGEXP: "backport/(?P\\d+\\.\\d+\\.[+\\w]+)" + BACKPORT_TARGET_TEMPLATE: "release/{{.target}}" + BACKPORT_MERGE_COMMIT: true + GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + ENABLE_VERSION_MANIFESTS: true + backport-ent: + if: github.event.pull_request.merged && contains(join(github.event.pull_request.labels.*.name), 'backport/ent') + runs-on: ubuntu-latest + steps: + - name: Trigger backport for Enterprise + uses: peter-evans/repository-dispatch@ff45666b9427631e3450c54a1bcbee4d9ff4d7c0 # v3.0.0 + with: + token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + repository: hashicorp/vault-enterprise + event-type: ent-backport + client-payload: ${{ toJson(github.event) }} diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml deleted file mode 100644 index c2f347e57081..000000000000 --- a/.github/workflows/backport.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -name: Backport Assistant Runner (for OSS & ENT) - -on: - pull_request_target: - types: - - closed - - labeled - -jobs: - backport-targeted-release-branch: - if: github.event.pull_request.merged - runs-on: ubuntu-latest - container: hashicorpdev/backport-assistant:0.3.0 - steps: - - name: Backport changes to targeted release branch - run: | - backport-assistant backport -merge-method=squash -gh-automerge - env: - BACKPORT_LABEL_REGEXP: "backport/(?P\\d+\\.\\d+\\.[+\\w]+)" - BACKPORT_TARGET_TEMPLATE: "release/{{.target}}" - BACKPORT_MERGE_COMMIT: true - GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} diff --git a/.github/workflows/build-artifacts-ce.yml b/.github/workflows/build-artifacts-ce.yml new file mode 100644 index 000000000000..0d0a0731bc50 --- /dev/null +++ b/.github/workflows/build-artifacts-ce.yml @@ -0,0 +1,267 @@ +name: ce + +# The inputs and outputs for this workflow have been carefully defined as a sort of workflow +# interface as defined in the build.yml workflow. The inputs and outputs here must be consistent +# across the build-artifacts-ce workflow and the build-artifacts-ent workflow. + +on: + workflow_dispatch: + inputs: + build-all: + type: boolean + description: Build all extended artifacts + default: false + build-date: + type: string + description: The date associated with the revision SHA + required: true + checkout-ref: + type: string + description: The repo Git SHA to checkout + default: "" + compute-build: + type: string # JSON encoded to support passing arrays + description: A JSON encoded "runs-on" for build worfkflows + required: true + compute-build-compat: + type: string # JSON encoded to support passing arrays + description: A JSON encoded "runs-on" for build workflows that need older glibc + required: true + compute-small: + type: string # JSON encoded to support passing arrays + description: A JSON encoded "runs-on" for non-resource-intensive workflows + required: true + vault-revision: + type: string + description: The revision SHA of vault + required: true + vault-version: + type: string + description: The version of vault + required: true + vault-version-package: + type: string + description: Whether or not to package the binary as Debian and RPM packages + required: true + web-ui-cache-key: + type: string + description: The UI asset cache key + required: true + workflow_call: + inputs: + build-all: + type: boolean + default: false + build-date: + type: string + required: true + checkout-ref: + type: string + default: "" + compute-build: + type: string # JSON encoded to support passing arrays + description: A JSON encoded "runs-on" for build worfkflows + required: true + compute-build-compat: + type: string # JSON encoded to support passing arrays + description: A JSON encoded "runs-on" for build workflows that need older glibc + required: true + compute-small: + type: string # JSON encoded to support passing arrays + description: A JSON encoded "runs-on" for non-resource-intensive workflows + required: true + vault-revision: + type: string + required: true + vault-version: + type: string + required: true + vault-version-package: + type: string + required: true + web-ui-cache-key: + type: string + required: true + outputs: + testable-containers: + value: ${{ jobs.core.outputs.testable-containers }} + testable-packages: + value: ${{ jobs.core.outputs.testable-packages }} + +jobs: + # Core are the Linux builds that are officially supported and tested as part of the normal + # CI/CD pipeline. + core: + strategy: + matrix: + include: + - goos: linux + goarch: amd64 + redhat: true + - goos: linux + goarch: arm64 + redhat: true + fail-fast: true + runs-on: ${{ fromJSON(inputs.compute-build) }} + name: (${{ matrix.goos }}, ${{ matrix.goarch }}) + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + ref: ${{ inputs.checkout-ref }} + - uses: ./.github/actions/build-vault + with: + cgo-enabled: 0 + create-docker-container: true + create-packages: true + create-redhat-container: ${{ matrix.redhat }} + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + goarch: ${{ matrix.goarch }} + goos: ${{ matrix.goos }} + go-tags: ui + vault-binary-name: vault + vault-edition: ce + vault-version: ${{ inputs.vault-version }} + web-ui-cache-key: ${{ inputs.web-ui-cache-key }} + outputs: + # Outputs are strings so we need to encode our collection outputs as JSON. + testable-containers: | + [ + { + "sample": "ce_default_linux_amd64_ent_docker", + "artifact": "${{ github.event.repository.name }}_default_linux_amd64_${{ inputs.vault-version }}_${{ inputs.vault-revision }}.docker.tar", + "edition": "ce" + }, + { + "sample": "ce_default_linux_arm64_ce_docker", + "artifact": "${{ github.event.repository.name }}_default_linux_arm64_${{ inputs.vault-version }}_${{ inputs.vault-revision }}.docker.tar", + "edition": "ce" + }, + { + "sample": "ce_ubi_linux_amd64_ce_redhat", + "artifact": "${{ github.event.repository.name}}_ubi_linux_amd64_${{ inputs.vault-version}}_${{ inputs.vault-revision }}.docker.redhat.tar", + "edition": "ce" + }, + { + "sample": "ce_ubi_linux_arm64_ce_redhat", + "artifact": "${{ github.event.repository.name}}_ubi_linux_arm64_${{ inputs.vault-version}}_${{ inputs.vault-revision }}.docker.redhat.tar", + "edition": "ce" + } + ] + testable-packages: | + [ + { "sample": "build_ce_linux_amd64_deb", + "artifact": "vault_${{ inputs.vault-version-package }}-1_amd64.deb", + "edition": "ce" + }, + { "sample": "build_ce_linux_arm64_deb", + "artifact": "vault_${{ inputs.vault-version-package }}-1_arm64.deb", + "edition": "ce" + }, + { "sample": "build_ce_linux_amd64_rpm", + "artifact": "vault-${{ inputs.vault-version-package }}-1.x86_64.rpm", + "edition": "ce" + }, + { "sample": "build_ce_linux_arm64_rpm", + "artifact": "vault-${{ inputs.vault-version-package }}-1.aarch64.rpm", + "edition": "ce" + }, + { "sample": "build_ce_linux_amd64_zip", + "artifact": "vault_${{ inputs.vault-version }}_linux_amd64.zip", + "edition": "ce" + }, + { "sample": "build_ce_linux_arm64_zip", + "artifact": "vault_${{ inputs.vault-version }}_linux_arm64.zip", + "edition": "ce" + } + ] + + # Extended build targets are best-case builds for non-Linux platforms that we create for + # convenience but are not built or tested as part our normal CI pipeline. + extended: + if: inputs.build-all == true + strategy: + matrix: + docker: + - false + packages: + - false + goos: + - freebsd + - netbsd + - openbsd + - solaris + - windows + goarch: + - 386 + - amd64 + - arm + exclude: + - goos: solaris + goarch: 386 + - goos: solaris + goarch: arm + - goos: windows + goarch: arm + include: + - goos: darwin + goarch: amd64 + go-tags: ui netcgo + docker: false + packages: false + - goos: darwin + goarch: arm64 + go-tags: ui netcgo + docker: false + packages: false + - goos: linux + goarch: 386 + docker: true + packages: true + - goos: linux + docker: true + goarch: arm + goarm: 6 + packages: true + fail-fast: true + name: (${{ matrix.goos }}, ${{ matrix.goarch }}${{ matrix.goarm && ' ' || '' }}${{ matrix.goarm }}) + runs-on: ${{ fromJSON(inputs.compute-build) }} + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + ref: ${{ inputs.checkout-ref }} + - uses: ./.github/actions/build-vault + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + create-docker-container: ${{ matrix.docker }} + create-packages: ${{ matrix.packages }} + create-redhat-container: false + goarch: ${{ matrix.goarch }} + goos: ${{ matrix.goos }} + goarm: ${{ matrix.goarm }} + go-tags: ${{ matrix.go-tags != '' && matrix.go-tags || 'ui' }} + vault-binary-name: vault + vault-edition: ce + vault-version: ${{ inputs.vault-version }} + web-ui-cache-key: ${{ inputs.web-ui-cache-key }} + + status: + if: always() + runs-on: ${{ fromJSON(inputs.compute-small) }} + permissions: + id-token: write + contents: read + needs: + - core + - extended + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + ref: ${{ inputs.checkout-ref }} + - name: Determine status + run: | + results=$(tr -d '\n' <<< '${{ toJSON(needs.*.result) }}') + if ! grep -q -v -E '(failure|cancelled)' <<< "$results"; then + echo "One or more required build workflows failed: ${results}" + exit 1 + fi + exit 0 diff --git a/.github/workflows/build-vault-oss.yml b/.github/workflows/build-vault-oss.yml deleted file mode 100644 index c7d8dc1e6d7a..000000000000 --- a/.github/workflows/build-vault-oss.yml +++ /dev/null @@ -1,109 +0,0 @@ ---- -name: build_vault - -# This workflow is intended to be called by the build workflow for each Vault -# binary that needs to be built and packaged. The ci make targets that are -# utilized automatically determine build metadata and handle building and -# packing vault. - -on: - workflow_call: - inputs: - bundle-path: - required: false - type: string - cgo-enabled: - type: string - default: 0 - create-packages: - type: boolean - default: true - goos: - required: true - type: string - goarch: - required: true - type: string - go-tags: - type: string - go-version: - type: string - package-name: - type: string - default: vault - vault-version: - type: string - required: true - -jobs: - build: - runs-on: ubuntu-latest - name: Vault ${{ inputs.goos }} ${{ inputs.goarch }} v${{ inputs.vault-version }} - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 - with: - go-version: ${{ inputs.go-version }} - - name: Set up node and yarn - uses: actions/setup-node@v3 - with: - node-version: 14 - cache: yarn - cache-dependency-path: ui/yarn.lock - - name: Build UI - run: make ci-build-ui - - name: Build Vault - env: - CGO_ENABLED: ${{ inputs.cgo-enabled }} - GOARCH: ${{ inputs.goarch }} - GOOS: ${{ inputs.goos }} - GO_TAGS: ${{ inputs.go-tags }} - run: make ci-build - - name: Determine artifact basename - env: - GOARCH: ${{ inputs.goarch }} - GOOS: ${{ inputs.goos }} - run: echo "ARTIFACT_BASENAME=$(make ci-get-artifact-basename)" >> $GITHUB_ENV - - name: Bundle Vault - env: - BUNDLE_PATH: out/${{ env.ARTIFACT_BASENAME }}.zip - run: make ci-bundle - - uses: actions/upload-artifact@v3 - with: - name: ${{ env.ARTIFACT_BASENAME }}.zip - path: out/${{ env.ARTIFACT_BASENAME }}.zip - if-no-files-found: error - - if: ${{ inputs.create-packages }} - uses: hashicorp/actions-packaging-linux@v1 - with: - name: ${{ github.event.repository.name }} - description: Vault is a tool for secrets management, encryption as a service, and privileged access management. - arch: ${{ inputs.goarch }} - version: ${{ inputs.vault-version }} - maintainer: HashiCorp - homepage: https://github.com/hashicorp/vault - license: MPL-2.0 - binary: dist/${{ inputs.package-name }} - deb_depends: openssl - rpm_depends: openssl - config_dir: .release/linux/package/ - preinstall: .release/linux/preinst - postinstall: .release/linux/postinst - postremove: .release/linux/postrm - - if: ${{ inputs.create-packages }} - name: Determine package file names - run: | - echo "RPM_PACKAGE=$(basename out/*.rpm)" >> $GITHUB_ENV - echo "DEB_PACKAGE=$(basename out/*.deb)" >> $GITHUB_ENV - - if: ${{ inputs.create-packages }} - uses: actions/upload-artifact@v3 - with: - name: ${{ env.RPM_PACKAGE }} - path: out/${{ env.RPM_PACKAGE }} - if-no-files-found: error - - if: ${{ inputs.create-packages }} - uses: actions/upload-artifact@v3 - with: - name: ${{ env.DEB_PACKAGE }} - path: out/${{ env.DEB_PACKAGE }} - if-no-files-found: error diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 0561661b0969..e6e142905110 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,216 +1,402 @@ ---- name: build +# Some words of caution before modifying this workflow: + +# This file and workflow have been carefully architected to meet the following requirements: +# * Builds and tests the correct artifacts in both CE and Ent while maintaining a merge-conflict-free +# build.yml between the two repos +# * Supports multiple Github event triggers +# * Is highly optimized for cost and speed +# * Supports a variety of complex use cases + +# If you wish to modify this file/workflow, please consider: +# * That the workflow must work under when triggered by pull_request, push, schedule, and +# workflow_dispatch events. +# * Merge-conflict-free compatibility between CE and Ent. Any changes that you make here must work +# in both repository contexts. +# * There are many workflow flow control modifiers. Further details below. +# * The total number of workers and the runner size. Further details below. + +# Further details: +# * The workflow is used by the CRT system for building, notarizing, signing, and releasing +# artifacts. Whatever we do in this workflow must support building all artifacts and uploading +# them to Github in order to fulfill the CRT requirement, while also maintaining a smaller +# default build matrix for the pull requests. +# * CRT is designed to trigger a workflow called build in a workflow file called build.yml. This +# file must build the correct artifacts in CE and Ent, depending on the repository context. +# We've gone to great lengths to architect this file and workflow so that we can build and test +# the correct artifacts in each context while maintaining a merge-conflict-free file between CE +# and Ent. Any changes that you make here must work in both repository contexts. +# * The workflow must support multiple event triggers, all of which have varying event payloads +# which must be considered. If you make changes you must ensure that the workflow still works +# under normal pull_request, push, schedule, and workflow_dispatch trigger events. +# * The workflow has been highly optimized for cost and speed. If possible, it's better to add a +# step to an existing job than create another job. Over a long time horizon a new job is often +# much more expensive than a single step in an existing job, they also take up a limited number +# of our available runners. +# * Flow control in the workflow is complex in order to support many various use cases, including: +# * Only building on tier 1 supported "core" artifacts by default. +# * Only building the UI if the Go application or UI has been modified. +# * Skipping builds entirely if the commit or PR only modifies changelog or website documentation. +# * The ability to check out the HEAD reference instead of a Github merge branch reference. +# * The ability to control building all of our tier 2 supported "extended" artifacts via a +# build/all label, even if the event trigger is pull_request or, more importantly, a push. +# It's important to note that we must maintain support for building all artifacts on push +# via a pull request, even though push events aren't directly tied to pull requests. Our +# label metadata helpers are designed to handle this complexity. +# * The ability to build all of our artifacts on a scheduled cadence to ensure we don't +# accidentally regress. +# * All of these considerations, and many others, have led to the modular design we have here. +# * If you're doing something in more than one place, try and use small composite actions +# whenever possible. + on: workflow_dispatch: pull_request: + types: + - opened + - ready_for_review + - reopened + - synchronize push: branches: - main - release/** + schedule: + - cron: '05 02 * * *' # * is a special character in YAML so you have to quote this string + +concurrency: + group: ${{ github.head_ref || github.run_id }}-build + cancel-in-progress: true jobs: - product-metadata: - runs-on: ubuntu-latest + setup: + # Setup is our entrypoint into the entire workflow. Here we gather metadata and export useful + # outputs for further use as inputs or for flow control. + # + # Trigger the setup workflow if any of the following conditions are true: + # * The workflow was triggered by a push (merge) to the main or release branch. + # * The workflow was triggered by pull request and the pull request is not a draft. + # * The workflow was triggered by on schedule to test building all artifacts. + if: | + github.event_name == 'push' || + github.event_name == 'schedule' || + (github.event_name == 'pull_request' && github.event.pull_request.draft == false) + runs-on: ${{ github.repository == 'hashicorp/vault' && 'ubuntu-latest' || fromJSON('["self-hosted","linux","small"]') }} outputs: - build-date: ${{ steps.get-metadata.outputs.build-date }} - filepath: ${{ steps.generate-metadata-file.outputs.filepath }} - go-version: ${{ steps.get-metadata.outputs.go-version }} - matrix-test-group: ${{ steps.get-metadata.outputs.matrix-test-group }} - package-name: ${{ steps.get-metadata.outputs.package-name }} - vault-revision: ${{ steps.get-metadata.outputs.vault-revision }} - vault-version: ${{ steps.get-metadata.outputs.vault-version }} - vault-base-version: ${{ steps.get-metadata.outputs.vault-base-version }} + app-changed: ${{ steps.changed-files.outputs.app-changed }} + build-date: ${{ steps.metadata.outputs.vault-build-date }} + checkout-ref: ${{ steps.checkout.outputs.ref }} + compute-build: ${{ steps.metadata.outputs.compute-build }} + compute-build-compat: ${{ steps.metadata.outputs.compute-build-compat }} + compute-build-ui: ${{ steps.metadata.outputs.compute-build-ui }} + compute-small: ${{ steps.metadata.outputs.compute-small }} + docs-changed: ${{ steps.changed-files.outputs.docs-changed }} + is-draft: ${{ steps.metadata.outputs.is-draft }} + is-enterprise: ${{ steps.metadata.outputs.is-enterprise }} + is-fork: ${{ steps.metadata.outputs.is-fork }} + labels: ${{ steps.metadata.outputs.labels }} + ui-changed: ${{ steps.changed-files.outputs.ui-changed }} + vault-binary-name: ${{ steps.metadata.outputs.vault-binary-name }} + vault-revision: ${{ steps.metadata.outputs.vault-revision }} + vault-version: ${{ steps.metadata.outputs.vault-version }} + vault-version-metadata: ${{ steps.metadata.outputs.vault-version-metadata }} + vault-version-package: ${{ steps.metadata.outputs.vault-version-package }} + workflow-trigger: ${{ steps.metadata.outputs.workflow-trigger }} steps: - - uses: actions/checkout@v3 - - name: Get metadata - id: get-metadata - env: - # MATRIX_MAX_TEST_GROUPS is required to determine the randomly selected - # test group. It should be set to the highest test_group used in the - # enos-run-matrices. - MATRIX_MAX_TEST_GROUPS: 5 - run: | - echo "build-date=$(make ci-get-date)" >> $GITHUB_OUTPUT - echo "go-version=$(cat ./.go-version)" >> $GITHUB_OUTPUT - echo "matrix-test-group=$(make ci-get-matrix-group-id)" >> $GITHUB_OUTPUT - echo "package-name=vault" >> $GITHUB_OUTPUT - echo "vault-base-version=$(make ci-get-version-base)" >> $GITHUB_OUTPUT - echo "vault-revision=$(make ci-get-revision)" >> $GITHUB_OUTPUT - echo "vault-version=$(make ci-get-version)" >> $GITHUB_OUTPUT - - uses: hashicorp/actions-generate-metadata@v1 - id: generate-metadata-file + # Run the changed-files action to determine what Git reference we should check out + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: ./.github/actions/changed-files + id: changed-files + - uses: ./.github/actions/checkout + id: checkout # Make sure we check out correct ref after checking changed files + # Get the vault version metadata + - uses: hashicorp/actions-set-product-version@v2 + id: set-product-version with: - version: ${{ steps.get-metadata.outputs.vault-version }} - product: ${{ steps.get-metadata.outputs.package-name }} - - uses: actions/upload-artifact@v3 + checkout: false # don't override the reference we've checked out + # Gather additional metadata about our execution context + - uses: ./.github/actions/metadata + id: metadata with: - name: metadata.json - path: ${{ steps.generate-metadata-file.outputs.filepath }} - if-no-files-found: error - - build-other: - name: Other - needs: product-metadata - strategy: - matrix: - goos: [freebsd, windows, netbsd, openbsd, solaris] - goarch: [386, amd64, arm] - exclude: - - goos: solaris - goarch: 386 - - goos: solaris - goarch: arm - - goos: windows - goarch: arm - fail-fast: true - uses: ./.github/workflows/build-vault-oss.yml - with: - create-packages: false - goarch: ${{ matrix.goarch }} - goos: ${{ matrix.goos }} - go-tags: ui - go-version: ${{ needs.product-metadata.outputs.go-version }} - package-name: ${{ needs.product-metadata.outputs.package-name }} - vault-version: ${{ needs.product-metadata.outputs.vault-version }} - secrets: inherit - - build-linux: - name: Linux - needs: product-metadata - strategy: - matrix: - goos: [linux] - goarch: [arm, arm64, 386, amd64] - fail-fast: true - uses: ./.github/workflows/build-vault-oss.yml - with: - goarch: ${{ matrix.goarch }} - goos: ${{ matrix.goos }} - go-tags: ui - go-version: ${{ needs.product-metadata.outputs.go-version }} - package-name: ${{ needs.product-metadata.outputs.package-name }} - vault-version: ${{ needs.product-metadata.outputs.vault-version }} - secrets: inherit - - build-darwin: - name: Darwin - needs: product-metadata - strategy: - matrix: - goos: [darwin] - goarch: [amd64, arm64] - fail-fast: true - uses: ./.github/workflows/build-vault-oss.yml - with: - create-packages: false - goarch: ${{ matrix.goarch }} - goos: ${{ matrix.goos }} - go-tags: ui netcgo - go-version: ${{ needs.product-metadata.outputs.go-version }} - package-name: ${{ needs.product-metadata.outputs.package-name }} - vault-version: ${{ needs.product-metadata.outputs.vault-version }} - secrets: inherit + vault-version: ${{ steps.set-product-version.outputs.product-version }} + - uses: ./.github/actions/set-up-go + # Make sure all required Go modules are cached at this point. We don't want all of the Go + # tests and build jobs to download modules and race to upload them to the cache. + name: Ensure Go modules are cached + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + # Don't download them on a cache hit during setup, just make sure they're cached before + # subsequent workflows are run. + no-restore: true - build-docker: - name: Docker image - needs: - - product-metadata - - build-linux - runs-on: ubuntu-latest - strategy: - matrix: - arch: [arm, arm64, 386, amd64] + ui: + # The Web UI workflow is a prerequisite workflow for building our artifacts. If the application + # or UI change we'll trigger this workflow but only build it if we don't already have the asset + # in our Github cache. + # + # Ensure the Web UI is built if any of the following conditions are true: + # * The workflow was triggered by a push (merge) to the main or release branch. + # * The workflow was triggered by on schedule to test building all artifacts. + # * The `build/all` tag is present on either a pull request or on the pull request that created + # a merge + # * The workflow was triggered by a pull request, the pull request is not a draft, and the UI + # or app changed. + if: | + needs.setup.outputs.workflow-trigger == 'push' || + needs.setup.outputs.workflow-trigger == 'schedule' || + contains(fromJSON(needs.setup.outputs.labels), 'build/all') || + ( + needs.setup.outputs.workflow-trigger == 'pull_request' && + needs.setup.outputs.is-draft == 'false' && + ( + needs.setup.outputs.ui-changed == 'true' || + needs.setup.outputs.app-changed == 'true' + ) + ) + needs: setup + runs-on: ${{ fromJSON(needs.setup.outputs.compute-build-ui) }} + outputs: + cache-key: ui-${{ steps.ui-hash.outputs.ui-hash }} steps: - - uses: actions/checkout@v3 - - uses: hashicorp/actions-docker-build@v1 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: - version: ${{ needs.product-metadata.outputs.vault-version }} - target: default - arch: ${{ matrix.arch }} - zip_artifact_name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_${{ matrix.arch }}.zip - tags: | - docker.io/hashicorp/${{ github.event.repository.name }}:${{ needs.product-metadata.outputs.vault-version }} - public.ecr.aws/hashicorp/${{ github.event.repository.name }}:${{ needs.product-metadata.outputs.vault-version }} + ref: ${{ needs.setup.outputs.checkout-ref }} + - name: Get UI hash + id: ui-hash + run: echo "ui-hash=$(git ls-tree HEAD ui --object-only)" | tee -a "$GITHUB_OUTPUT" + - name: Set up UI asset cache + id: cache-ui-assets + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + with: + enableCrossOsArchive: true + lookup-only: true + path: http/web_ui + # Only restore the UI asset cache if we haven't modified anything in the ui directory. + # Never do a partial restore of the web_ui if we don't get a cache hit. + key: ui-${{ steps.ui-hash.outputs.ui-hash }} + - if: steps.cache-ui-assets.outputs.cache-hit != 'true' + name: Set up node and yarn + uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 + with: + node-version-file: ui/package.json + cache: yarn + cache-dependency-path: ui/yarn.lock + - if: steps.cache-ui-assets.outputs.cache-hit != 'true' + name: Build UI + run: make ci-build-ui - build-ubi: - name: UBI image + artifacts: + # Artifacts is where we'll build the various Vault binaries and package them into their respective + # Zip bundles, RPM and Deb packages, and container images. After we've packaged them we upload + # them to the Github Actions artifacts storage and execute our Enos test scenarios. If the + # workflow is triggered by a push to main CRT will take these artifacts from Github and perform + # all of the necessary notarizing and signing before uploading them to Artifactory. + # + # # Trigger the setup workflow if any of the following conditions are true: + # + # * The workflow was triggered by on schedule to test building all artifacts. + # * The Go app was changed. + # * The build/all label is present on a pull request or push. + if: | + needs.setup.outputs.workflow-trigger == 'schedule' || + needs.setup.outputs.app-changed == 'true' || + contains(fromJSON(needs.setup.outputs.labels), 'build/all') needs: - - product-metadata - - build-linux - runs-on: ubuntu-latest - strategy: - matrix: - arch: [amd64] - steps: - - uses: actions/checkout@v2 - - uses: hashicorp/actions-docker-build@v1 - with: - version: ${{ needs.product-metadata.outputs.vault-version }} - target: ubi - arch: ${{ matrix.arch }} - zip_artifact_name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_${{ matrix.arch }}.zip - redhat_tag: quay.io/redhat-isv-containers/5f89bb5e0b94cf64cfeb500a:${{ needs.product-metadata.outputs.vault-version }}-ubi + - setup + - ui # Don't build and test artifacts unless the UI build was triggered. + # The following is the only line that should be different between CE and Ent. + uses: ./.github/workflows/build-artifacts-ce.yml # Make sure we use the correct workflow. + with: + # The inputs defined here must be supported in both the build-artifacts-ce and + # build-artifacts-ent workflows. The implementations should seek to keep a compatible interface. + build-all: ${{ contains(fromJSON(needs.setup.outputs.labels), 'build/all') || needs.setup.outputs.workflow-trigger == 'schedule' }} + build-date: ${{ needs.setup.outputs.build-date }} + checkout-ref: ${{ needs.setup.outputs.checkout-ref }} + compute-build: ${{ needs.setup.outputs.compute-build }} + compute-build-compat: ${{ needs.setup.outputs.compute-build-compat }} + compute-small: ${{ needs.setup.outputs.compute-small }} + vault-revision: ${{ needs.setup.outputs.vault-revision }} + vault-version: ${{ needs.setup.outputs.vault-version }} + vault-version-package: ${{ needs.setup.outputs.vault-version-package }} + web-ui-cache-key: ${{ needs.ui.outputs.cache-key }} + secrets: inherit test: - name: Test ${{ matrix.build-artifact-name }} - # Only run the Enos workflow against branches that are created from the - # hashicorp/vault repository. This has the effect of limiting execution of - # Enos scenarios to branches that originate from authors that have write - # access to hashicorp/vault repository. This is required as Github Actions - # will not populate the required secrets for branches created by outside - # contributors in order to protect the secrets integrity. - if: "! github.event.pull_request.head.repo.fork" + # Test all of the testable artifacts if our repo isn't a fork. We don't test when the PR is + # created from a fork because secrets are not passed in and they are required. + if: ${{ needs.setup.outputs.is-fork == 'false' }} + name: test ${{ matrix.artifact }} needs: - - product-metadata - - build-linux + - setup + - ui + - artifacts uses: ./.github/workflows/test-run-enos-scenario-matrix.yml strategy: fail-fast: false matrix: - include: - - matrix-file-name: build-github-oss-linux-amd64-zip - build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_amd64.zip - - matrix-file-name: build-github-oss-linux-arm64-zip - build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_arm64.zip + include: ${{ fromJSON(needs.artifacts.outputs.testable-packages) }} with: - build-artifact-name: ${{ matrix.build-artifact-name }} - matrix-file-name: ${{ matrix.matrix-file-name }} - matrix-test-group: ${{ needs.product-metadata.outputs.matrix-test-group }} - vault-edition: oss - vault-revision: ${{ needs.product-metadata.outputs.vault-revision }} + build-artifact-name: ${{ matrix.artifact }} + sample-max: 1 + sample-name: ${{ matrix.sample }} ssh-key-name: ${{ github.event.repository.name }}-ci-ssh-key + vault-edition: ${{ matrix.edition }} + vault-revision: ${{ needs.setup.outputs.vault-revision }} + vault-version: ${{ needs.setup.outputs.vault-version-metadata }} secrets: inherit - test-docker-k8s: - name: Test Docker K8s - # Only run the Enos workflow against branches that are created from the - # hashicorp/vault repository. This has the effect of limiting execution of - # Enos scenarios to branches that originate from authors that have write - # access to hashicorp/vault repository. This is required as Github Actions - # will not populate the required secrets for branches created by outside - # contributors in order to protect the secrets integrity. - if: "! github.event.pull_request.head.repo.fork" + test-containers: + # Test all of the testable containers if our repo isn't a fork. We don't test when the PR is + # created from a fork because secrets are not passed in and they are required (for now). + if: ${{ needs.setup.outputs.is-fork == 'false' }} + name: test ${{ matrix.artifact }} needs: - - product-metadata - - build-docker - uses: ./.github/workflows/enos-run-k8s.yml + - setup + - ui + - artifacts + uses: ./.github/workflows/test-run-enos-scenario-containers.yml + strategy: + fail-fast: false + matrix: + include: ${{ fromJSON(needs.artifacts.outputs.testable-containers) }} with: - artifact-build-date: ${{ needs.product-metadata.outputs.build-date }} - artifact-name: ${{ github.event.repository.name }}_default_linux_amd64_${{ needs.product-metadata.outputs.vault-version }}_${{ needs.product-metadata.outputs.vault-revision }}.docker.tar - artifact-revision: ${{ needs.product-metadata.outputs.vault-revision }} - artifact-version: ${{ needs.product-metadata.outputs.vault-version }} + build-artifact-name: ${{ matrix.artifact }} + sample-max: 1 + sample-name: ${{ matrix.sample }} + vault-edition: ${{ matrix.edition }} + vault-revision: ${{ needs.setup.outputs.vault-revision }} + vault-version: ${{ needs.setup.outputs.vault-version-metadata }} secrets: inherit completed-successfully: - runs-on: ubuntu-latest + # build/completed-successfully is the only build workflow that must pass in order to merge + # a pull request. This workflow is used to determine the overall status of all the prior + # workflows and to notify various different channels of success or failure. As part of this + # workflow we create the necessary build metadata that is required for the CRT build system. + # + # Our logic here mirrors that of setup as it and this are the only two workflows that must + # be triggered together. + if: | + always() && + ( + github.event_name == 'push' || + github.event_name == 'schedule' || + (github.event_name == 'pull_request' && github.event.pull_request.draft == false) + ) + runs-on: ${{ github.repository == 'hashicorp/vault' && 'ubuntu-latest' || fromJSON('["self-hosted","linux","small"]') }} + permissions: write-all # Ensure we have id-token:write access for vault-auth. needs: - - build-other - - build-linux - - build-darwin - - build-docker - - build-ubi + - setup + - ui + - artifacts - test - - test-docker-k8s + - test-containers steps: - - run: echo "All required build and test workflows have succeeded!" + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - id: status + name: Determine status + run: | + results=$(tr -d '\n' <<< '${{ toJSON(needs.*.result) }}') + if ! grep -q -v -E '(failure|cancelled)' <<< "$results"; then + result="failed" + else + result="success" + fi + { + echo "result=${result}" + echo "results=${results}" + } | tee -a "$GITHUB_OUTPUT" + - if: needs.setup.outputs.is-enterprise == 'true' + id: vault-auth + name: Vault Authenticate + run: vault-auth + - if: needs.setup.outputs.is-enterprise == 'true' + id: secrets + name: Fetch Vault Secrets + uses: hashicorp/vault-action@v3 + with: + url: ${{ steps.vault-auth.outputs.addr }} + caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} + token: ${{ steps.vault-auth.outputs.token }} + secrets: | + kv/data/github/${{ github.repository }}/github_actions_notifications_bot token | SLACK_BOT_TOKEN; + - id: slackbot-token + run: + echo "slackbot-token=${{ needs.setup.outputs.is-enterprise != 'true' && secrets.SLACK_BOT_TOKEN || steps.secrets.outputs.SLACK_BOT_TOKEN }}" >> "$GITHUB_OUTPUT" + - if: | + needs.setup.outputs.workflow-trigger == 'pull_request' && + github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name && + (github.repository == 'hashicorp/vault' || github.repository == 'hashicorp/vault-enterprise') + name: Create or update a build status comment on the pull request + env: + ARTIFACTS: ${{ needs.artifacts.result }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PR_NUMBER: ${{ github.event.pull_request.number }} + REPO: ${{ github.event.repository.name }} + RUN_ID: ${{ github.run_id }} + TEST: ${{ needs.test.result }} + TEST_CONTAINERS: ${{ needs.test-containers.result }} + UI: ${{ needs.ui.result }} + run: ./.github/scripts/report-build-status.sh + - name: Notify build failures in Slack + if: | + always() && + steps.status.outputs.result != 'success' && + (github.ref_name == 'main' || startsWith(github.ref_name, 'release/')) + uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e # v1.26.0 + env: + SLACK_BOT_TOKEN: ${{ steps.slackbot-token.outputs.slackbot-token }} + with: + channel-id: "C05AABYEA9Y" # Notify #feed-vault-ci-official + # channel-id: "C05Q4D5V89W" # Notify #test-vault-ci-slack-integration + payload: | + { + "text": "${{ github.repository }} build failures on ${{ github.ref_name }}", + "blocks": [ + { + "type": "header", + "text": { + "type": "plain_text", + "text": ":rotating_light: ${{ github.repository }} build failures on ${{ github.ref_name }} :rotating_light:", + "emoji": true + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "${{ needs.setup.result != 'failure' && ':white_check_mark:' || ':x:' }} Setup\n${{ needs.ui.result != 'failure' && ':white_check_mark:' || ':x:' }} Build UI\n${{ needs.artifacts.result != 'failure' && ':white_check_mark:' || ':x:' }} Build Vault Artifacts\n${{ needs.test.result != 'failure' && ':white_check_mark:' || ':x:' }} Enos package test scenarios\n${{ needs.test-containers.result != 'failure' && ':white_check_mark:' || ':x:' }} Enos container test scenarios" + }, + "accessory": { + "type": "button", + "text": { + "type": "plain_text", + "text": "View Failing Workflow", + "emoji": true + }, + "url": "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + } + } + ] + } + - uses: hashicorp/actions-generate-metadata@v1 + if: needs.artifacts.result == 'success' # create build metadata if we successfully created artifacts + id: generate-metadata-file + with: + version: ${{ needs.setup.outputs.vault-version-metadata }} + product: ${{ needs.setup.outputs.vault-binary-name }} + - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 + if: steps.generate-metadata-file.outcome == 'success' # upload our metadata if we created it + with: + name: metadata.json + path: ${{ steps.generate-metadata-file.outputs.filepath }} + if-no-files-found: error + - if: always() && steps.status.outputs.result != 'success' + name: Check for failed status + run: | + echo "One or more required build workflows failed: ${{ steps.status.outputs.results }}" + exit 1 diff --git a/.github/workflows/changelog-checker.yml b/.github/workflows/changelog-checker.yml index d8a380270b26..23ab8246a710 100644 --- a/.github/workflows/changelog-checker.yml +++ b/.github/workflows/changelog-checker.yml @@ -18,7 +18,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 # by default the checkout action doesn't checkout all branches @@ -48,17 +48,17 @@ jobs: echo "Not found." echo "" echo "Did not find a changelog entry named ${expected_changelog_file}" - echo "If your changelog file is correct, skip this check with the 'pr/no-changelog' label" - echo "Reference - https://github.com/hashicorp/vault/pull/10363 and https://github.com/hashicorp/vault/pull/11894" + echo "If your changelog file is correct, or this change does not need a changelog, skip this check with the 'pr/no-changelog' label" + echo "Reference - https://github.com/hashicorp/vault/blob/main/CONTRIBUTING.md#changelog-entries" exit 1 fi # Else, we found some toolchain files. Let's make sure the contents are correct. if ! grep -q 'release-note:change' "$toolchain_files" || ! grep -q '^core: Bump Go version to' "$toolchain_files"; then echo "Invalid format for changelog. Expected format:" - echo "```release-note:change" + echo '```release-note:change' echo "core: Bump Go version to x.y.z." - echo "```" + echo '```' exit 1 else echo "Found Go toolchain changelog entry in PR!" @@ -76,6 +76,12 @@ jobs: elif grep -q ':fix$' "$changelog_files"; then echo "Found invalid type (fix) in changelog - did you mean bug?" exit 1 + elif grep -q ':feature$' "$changelog_files"; then + if ! grep -q '^\*\*' "$changelog_files"; then + echo "Feature changelogs must be formatted like the following:" + echo "**Feature Name**: Feature description" + exit 1 + fi elif ! grep -q '```release-note:' "$changelog_files"; then # People often make changelog files like ```changelog:, which is incorrect. echo "Changelog file did not contain 'release-note' heading - check formatting." diff --git a/.github/workflows/check-legacy-links-format.yml b/.github/workflows/check-legacy-links-format.yml deleted file mode 100644 index 1330579dc0d4..000000000000 --- a/.github/workflows/check-legacy-links-format.yml +++ /dev/null @@ -1,17 +0,0 @@ -name: Legacy Link Format Checker - -on: - push: - paths: - - "website/content/**/*.mdx" - - "website/data/*-nav-data.json" - -jobs: - check-links: - uses: hashicorp/dev-portal/.github/workflows/docs-content-check-legacy-links-format.yml@475289345d312552b745224b46895f51cc5fc490 - with: - repo-owner: "hashicorp" - repo-name: "vault" - commit-sha: ${{ github.sha }} - mdx-directory: "website/content" - nav-data-directory: "website/data" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a56634faf151..2419ef3b2fc9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,83 +1,416 @@ name: CI on: + pull_request: + # The default types for pull_request are [opened, synchronize, reopened]. This is insufficient + # for our needs, since we're skipping stuff on PRs in draft mode.By adding the ready_for_review + # type, when a draft pr is marked ready, we run everything, including the stuff we'd have + # skipped up until now. + types: [opened, synchronize, reopened, ready_for_review] push: + branches: + - main + - release/** + workflow_dispatch: + +concurrency: + group: ${{ github.head_ref || github.run_id }}-ci + cancel-in-progress: true jobs: setup: - name: Setup - runs-on: ubuntu-latest + runs-on: ${{ github.repository == 'hashicorp/vault' && 'ubuntu-latest' || fromJSON('["self-hosted","linux","small"]') }} outputs: - runs-on: ${{ steps.setup-outputs.outputs.runs-on }} - enterprise: ${{ steps.setup-outputs.outputs.enterprise }} - go-tags: ${{ steps.setup-outputs.outputs.go-tags }} + app-changed: ${{ steps.changed-files.outputs.app-changed }} + checkout-ref: ${{ steps.checkout.outputs.ref }} + compute-small: ${{ steps.metadata.outputs.compute-small }} + compute-test-go: ${{ steps.metadata.outputs.compute-test-go }} + compute-test-ui: ${{ steps.metadata.outputs.compute-test-ui }} + go-tags: ${{ steps.metadata.outputs.go-tags }} + is-draft: ${{ steps.metadata.outputs.is-draft }} + is-enterprise: ${{ steps.metadata.outputs.is-enterprise }} + is-fork: ${{ steps.metadata.outputs.is-fork }} + labels: ${{ steps.metadata.outputs.labels }} + ui-changed: ${{ steps.changed-files.outputs.ui-changed }} + workflow-trigger: ${{ steps.metadata.outputs.workflow-trigger }} steps: - - id: setup-outputs - name: Setup outputs - run: | - github_repository="${{ github.repository }}" + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: ./.github/actions/changed-files + id: changed-files + - uses: ./.github/actions/checkout + id: checkout # make sure we check out correct ref after checking changed files + - uses: ./.github/actions/metadata + id: metadata + - name: Ensure Go modules are cached + uses: ./.github/actions/set-up-go + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + no-restore: true # don't download them on a cache hit - if [ "${github_repository##*/}" == "vault-enterprise" ] ; then - echo 'runs-on=["self-hosted","ondemand","linux","type=c5.2xlarge"]' >> $GITHUB_OUTPUT - echo 'enterprise=1' >> $GITHUB_OUTPUT - echo 'go-tags=ent enterprise' >> $GITHUB_OUTPUT - else - echo 'runs-on="ubuntu-latest"' >> $GITHUB_OUTPUT - echo 'enterprise=' >> $GITHUB_OUTPUT - echo 'go-tags=' >> $GITHUB_OUTPUT - fi - semgrep: - name: Semgrep - needs: - - setup - runs-on: ${{ fromJSON(needs.setup.outputs.runs-on) }} - container: - image: returntocorp/semgrep@sha256:ffc6f3567654f9431456d49fd059dfe548f007c494a7eb6cd5a1a3e50d813fb3 - steps: - - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c - - name: Run Semgrep Rules - id: semgrep - run: semgrep ci --include '*.go' --config 'tools/semgrep/ci' - setup-go-cache: - name: Go Caches - needs: - - setup - uses: ./.github/workflows/setup-go-cache.yml - with: - runs-on: ${{ needs.setup.outputs.runs-on }} - secrets: inherit - fmt: - name: Check Format - needs: - - setup - runs-on: ${{ fromJSON(needs.setup.outputs.runs-on) }} - steps: - - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c - - uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 - with: - go-version-file: ./.go-version - cache: true - - id: format - run: | - echo "Using gofumpt version $(go run mvdan.cc/gofumpt -version)" - make fmt - if ! git diff --exit-code; then - echo "Code has formatting errors. Run 'make fmt' to fix" - exit 1 - fi test-go: + # Run Go tests if the vault app changed + if: needs.setup.outputs.app-changed == 'true' name: Run Go tests - needs: - - setup - - setup-go-cache - # Don't run this job for branches starting with 'ui/', 'docs/', or 'backport/docs/' - if: ${{ ! (startsWith( github.ref_name, 'ui/' ) || startsWith( github.ref_name, 'docs/' ) || startsWith( github.ref_name, 'backport/docs/') ) }} + needs: setup + uses: ./.github/workflows/test-go.yml + with: + # The regular Go tests use an extra runner to execute the binary-dependent tests. We isolate + # them there so that the other tests aren't slowed down waiting for a binary build. + binary-tests: true + checkout-ref: ${{ needs.setup.outputs.checkout-ref }} + go-arch: amd64 + go-tags: '${{ needs.setup.outputs.go-tags }},deadlock' + name: standard + runs-on: ${{ needs.setup.outputs.compute-test-go }} + runs-on-small: ${{ needs.setup.outputs.compute-small }} + test-timing-cache-key: go-test-timing-standard + total-runners: 16 + secrets: inherit + + test-go-testonly: + # Run Go tests tagged with "testonly" if the vault app changed + if: needs.setup.outputs.app-changed == 'true' + name: Run Go tests tagged with testonly + needs: setup + uses: ./.github/workflows/test-go.yml + with: + checkout-ref: ${{ needs.setup.outputs.checkout-ref }} + go-arch: amd64 + go-tags: '${{ needs.setup.outputs.go-tags }},deadlock,testonly' + name: testonly + runs-on: ${{ needs.setup.outputs.compute-test-go }} + runs-on-small: ${{ needs.setup.outputs.compute-small }} + testonly: true + test-timing-cache-enabled: false + total-runners: 2 # test runners cannot be less than 2 + secrets: inherit + + test-go-race: + # Run Go test with the data race detector enabled if the vault app changed and we're out of + # drafts mode. + if: needs.setup.outputs.app-changed == 'true' && needs.setup.outputs.is-draft == 'false' + name: Run Go tests with data race detection + needs: setup uses: ./.github/workflows/test-go.yml with: - # The example inputs below are just here to get the workflow to run during the migration. - # In the future, they will be substituted - possibly with references to values coming from a testing matrix. + checkout-ref: ${{ needs.setup.outputs.checkout-ref }} + env-vars: | + { + "VAULT_CI_GO_TEST_RACE": 1 + } + extra-flags: '-race' + name: race go-arch: amd64 go-tags: ${{ needs.setup.outputs.go-tags }} - extra-tags: deadlock - runs-on: ${{ needs.setup.outputs.runs-on }} - enterprise: ${{ needs.setup.outputs.enterprise }} + runs-on: ${{ needs.setup.outputs.compute-test-go }} + runs-on-small: ${{ needs.setup.outputs.compute-small }} + test-timing-cache-key: go-test-timing-race + total-runners: 16 + secrets: inherit + + test-go-fips: + name: Run Go tests with FIPS configuration + # Run the Go tests with fips if the vault app changed, we're in the context vault enterprise + # and our trigger is a merge to main or releases/* or if the 'fips' label is present on a PR. + if: | + needs.setup.outputs.app-changed == 'true' && + needs.setup.outputs.is-enterprise == 'true' && + (needs.setup.outputs.workflow-trigger == 'push' || contains(needs.setup.outputs.labels, 'fips')) + needs: setup + uses: ./.github/workflows/test-go.yml + with: + checkout-ref: ${{ needs.setup.outputs.checkout-ref }} + env-vars: | + { + "GOEXPERIMENT": "boringcrypto" + } + name: fips + go-arch: amd64 + go-tags: '${{ needs.setup.outputs.go-tags }},deadlock,cgo,fips,fips_140_2' + runs-on: ${{ needs.setup.outputs.compute-test-go }} + runs-on-small: ${{ needs.setup.outputs.compute-small }} + test-timing-cache-key: go-test-timing-fips + total-runners: 16 secrets: inherit + + test-ui: + name: Test UI + # Run the UI tests if our UI has changed, or a 'ui' label is present, or our workflow trigger + # was triggered by a merge to main or releases/*. + if: | + needs.setup.outputs.ui-changed == 'true' || + needs.setup.outputs.workflow-trigger == 'push' || + contains(github.event.pull_request.labels.*.name, 'ui') + needs: setup + permissions: + id-token: write + contents: read + runs-on: ${{ fromJSON(needs.setup.outputs.compute-test-ui) }} + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + name: status + with: + ref: ${{ needs.setup.outputs.checkout-ref }} + - uses: ./.github/actions/set-up-go + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + # Setup node.js without caching to allow running npm install -g yarn (next step) + - uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 + with: + node-version-file: './ui/package.json' + - run: npm install -g yarn + # Setup node.js with caching using the yarn.lock file + - uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 + with: + node-version-file: './ui/package.json' + cache: yarn + cache-dependency-path: ui/yarn.lock + - uses: browser-actions/setup-chrome@facf10a55b9caf92e0cc749b4f82bf8220989148 # v1.7.2 + with: + # Temporarily pin our Chrome version while we sort out a broken test on latest + chrome-version: 1314712 + - name: ui-dependencies + working-directory: ./ui + run: | + yarn install --frozen-lockfile + npm rebuild node-sass + - if: needs.setup.outputs.is-enterprise != 'true' + name: Rebuild font cache on Github hosted runner + # Fix `Fontconfig error: No writable cache directories` error on Github hosted runners + # This seems to have been introduced with this runner image: https://github.com/actions/runner-images/releases/tag/ubuntu22%2F20240818.1 + # Hopefully this will resolve itself at some point with a newer image and we can remove it + run: fc-cache -f -v + - if: needs.setup.outputs.is-enterprise == 'true' + id: vault-auth + name: Authenticate to Vault + run: vault-auth + - if: needs.setup.outputs.is-enterprise == 'true' + id: secrets + name: Fetch secrets + uses: hashicorp/vault-action@v3 + with: + url: ${{ steps.vault-auth.outputs.addr }} + caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} + token: ${{ steps.vault-auth.outputs.token }} + secrets: | + kv/data/github/hashicorp/vault-enterprise/github-token username-and-token | PRIVATE_REPO_GITHUB_TOKEN; + kv/data/github/hashicorp/vault-enterprise/license license_1 | VAULT_LICENSE; + kv/data/github/${{ github.repository }}/datadog-ci DATADOG_API_KEY; + - if: needs.setup.outputs.is-enterprise == 'true' + name: Set up Git + run: git config --global url."https://${{ steps.secrets.outputs.PRIVATE_REPO_GITHUB_TOKEN }}@github.com".insteadOf https://github.com + - uses: ./.github/actions/install-external-tools + - name: build-go-dev + run: | + rm -rf ./pkg + mkdir ./pkg + make prep dev + - name: test-ui + env: + VAULT_LICENSE: ${{ steps.secrets.outputs.VAULT_LICENSE }} + run: | + export PATH="${PWD}/bin:${PATH}" + # Run Ember tests + cd ui + mkdir -p test-results/qunit + yarn ${{ needs.setup.outputs.is-enterprise == 'true' && 'test' || 'test:oss' }} + - if: always() + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 + with: + name: test-results-ui + path: ui/test-results + - name: Prepare datadog-ci + if: (github.repository == 'hashicorp/vault' || github.repository == 'hashicorp/vault-enterprise') && (success() || failure()) + continue-on-error: true + run: | + if type datadog-ci > /dev/null 2>&1; then + exit 0 + fi + # Curl does not always exit 1 if things go wrong. To determine if this is successful + # we'll silence all non-error output and check the results to determine success. + if ! out="$(curl -sSL --fail https://github.com/DataDog/datadog-ci/releases/latest/download/datadog-ci_linux-x64 --output /usr/local/bin/datadog-ci 2>&1)"; then + printf "failed to download datadog-ci: %s" "$out" + fi + if [[ -n "$out" ]]; then + printf "failed to download datadog-ci: %s" "$out" + fi + chmod +x /usr/local/bin/datadog-ci + - name: Upload test results to DataDog + if: success() || failure() + continue-on-error: true + env: + DD_ENV: ci + run: | + if [[ ${{ github.repository }} == 'hashicorp/vault' ]]; then + export DATADOG_API_KEY=${{ secrets.DATADOG_API_KEY }} + fi + datadog-ci junit upload --service "$GITHUB_REPOSITORY" 'ui/test-results/qunit/results.xml' + - if: always() + uses: test-summary/action@31493c76ec9e7aa675f1585d3ed6f1da69269a86 # v2.4 + with: + paths: "ui/test-results/qunit/results.xml" + show: "fail" + + tests-completed: + needs: + - setup + - test-go + - test-go-testonly + - test-go-race + - test-go-fips + - test-ui + if: always() + runs-on: ${{ github.repository == 'hashicorp/vault' && 'ubuntu-latest' || fromJSON('["self-hosted","linux","small"]') }} + permissions: write-all # Ensure we have id-token:write access for vault-auth. + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + # Determine the overall status of our required test jobs. + - name: Determine status + id: status + run: | + # Determine the overall status of the job. We allow fips and race tests to fail so we + # don't consider their result here. + # + # Encode the needs context into JSON, filter out unrequired workflows, shape the result + # into a more useful schema. Determine the overall status by comparing the total number of + # successful results with the number of required jobs. + if results=$(jq -rec 'del(.["test-go-fips"], .["test-go-race"]) as $required + | $required | keys as $jobs + | reduce $jobs[] as $job ([]; . + [{job: $job}+$required[$job]])' <<< '${{ toJSON(needs) }}' + ); then + # Determine if all of our required jobs have succeeded. + if jq -rec 'length as $expected + | [.[] | select((.result == "success") or (.result == "skipped"))] | length as $got + | $expected == $got' <<< "$results"; then + msg="All required test jobs succeeded!" + result="success" + else + msg="One or more required test jobs failed!" + result="failed" + fi + else + msg="Failed to decode and filter test results" + result="failed" + results="''" + fi + { + echo "msg=${msg}" + echo "result=${result}" + echo "results<> "$GITHUB_OUTPUT" + - if: | + always() && + needs.setup.outputs.workflow-trigger == 'push' && + ( + needs.test-go.result == 'failure' || + needs.test-go-race.result == 'failure' || + needs.test-go-race.outputs.data-race-result == 'failure' || + needs.test-go-testonly.result == 'failure' || + needs.test-ui.result == 'failure' + ) + name: Notify build failures in Slack + uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e # v1.26.0 + env: + SLACK_BOT_TOKEN: ${{ steps.slackbot-token.outputs.slackbot-token }} + with: + channel-id: "C05AABYEA9Y" # sent to #feed-vault-ci-official, use "C05Q4D5V89W"/test-vault-ci-slack-integration for testing + payload: | + { + "text": "CE test failures on ${{ github.ref_name }}", + "text": "${{ github.repository }} build failures on ${{ github.ref_name }}", + "blocks": [ + { + "type": "header", + "text": { + "type": "plain_text", + "text": ":rotating_light: ${{ github.repository }} test failures on ${{ github.ref_name }} :rotating_light:", + "emoji": true + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "${{ needs.test-go.result != 'failure' && ':white_check_mark:' || ':x:' }} Go tests\n${{ needs.test-go-race.result != 'failure' && ':white_check_mark:' || ':x:' }} Go race tests\n\t\t${{ needs.test-go-race.outputs.data-race-result != 'success' && ':x: Data race detected' || ':white_check_mark: No race detected' }}\n${{ needs.test-go-testonly.result != 'failure' && ':white_check_mark:' || ':x:' }} Go testonly tests\n${{ needs.test-ui.result != 'failure' && ':white_check_mark:' || ':x:' }} UI tests" + }, + "accessory": { + "type": "button", + "text": { + "type": "plain_text", + "text": "View Failing Workflow", + "emoji": true + }, + "url": "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + } + } + ] + } + # Only create the PR summary if it's a pull request and it is not a fork as we need access + # to secrets. + - if: ${{ needs.setup.outputs.is-fork == 'false' }} + name: Download failure summaries + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + pattern: failure-summary-*.md + path: failure-summaries + merge-multiple: true + - if: ${{ needs.setup.outputs.is-fork == 'false' }} + id: prepare-failure-summary + name: Prepare failure summary + run: | + # Sort all of the summary table rows and push them to a temp file. + temp_file_name=temp-$(date +%s) + cat failure-summaries/*.md | sort >> "$temp_file_name" + + # If there are test failures, present them in a format of a GitHub Markdown table. + if [ -s "$temp_file_name" ]; then + # Here we create the headings for the summary table + { + echo "| Test Type | Package | Test | Elapsed | Runner Index | Logs |" + echo "| --------- | ------- | ---- | ------- | ------------ | ---- |" + cat "$temp_file_name" + } >> "$GITHUB_STEP_SUMMARY" + else + if [ "${{ steps.status.outputs.result }}" == 'success' ]; then + echo "### All required Go tests passed! :white_check_mark:" >> "$GITHUB_STEP_SUMMARY" + fi + fi + { + echo 'table-test-results<> "$GITHUB_OUTPUT" + github_repository="${{ github.repository }}" + if [ "${github_repository##*/}" == "vault-enterprise" ] ; then + echo 'runs-on=["self-hosted","ondemand","linux","type=c6a.4xlarge"]' >> "$GITHUB_OUTPUT" + else + echo 'runs-on="custom-linux-xl-vault-latest"' >> "$GITHUB_OUTPUT" + fi + + lint: + needs: metadata + runs-on: ${{ fromJSON(needs.metadata.outputs.runs-on) }} + env: + GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + with: + cache: false # save cache space for vault builds: https://github.com/hashicorp/vault/pull/21764 + go-version-file: .go-version + - uses: ./.github/actions/install-external-tools + - uses: hashicorp/setup-terraform@v3 + with: + terraform_wrapper: false + - uses: hashicorp/action-setup-enos@v1 + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - name: Ensure shellcheck is available for linting + run: which shellcheck || (sudo apt update && sudo apt install -y shellcheck) + - name: lint + working-directory: ./enos + env: + ENOS_VAR_vault_product_version: ${{ needs.metadata.outputs.version }} + run: make lint diff --git a/.github/workflows/enos-release-testing-oss.yml b/.github/workflows/enos-release-testing-oss.yml index 5fe50e8da061..c648d5d2a9e3 100644 --- a/.github/workflows/enos-release-testing-oss.yml +++ b/.github/workflows/enos-release-testing-oss.yml @@ -11,25 +11,28 @@ jobs: if: ${{ startsWith(github.event.client_payload.payload.branch, 'release/') }} runs-on: ubuntu-latest outputs: - matrix-test-group: ${{ steps.get-metadata.outputs.matrix-test-group }} - vault-revision: ${{ steps.get-metadata.outputs.vault-revision }} - vault-version: ${{ steps.get-metadata.outputs.vault-version }} + vault-revision: ${{ github.event.client_payload.payload.sha }} + vault-version: ${{ github.event.client_payload.payload.version }} + vault-version-package: ${{ steps.get-metadata.outputs.vault-version-package }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: # Check out the repository at the same Git SHA that was used to create # the artifacts to get the correct metadata. ref: ${{ github.event.client_payload.payload.sha }} - id: get-metadata env: - # MATRIX_MAX_TEST_GROUPS is required to determine the randomly selected - # test group. It should be set to the highest test_group used in the - # enos-run-matrices. - MATRIX_MAX_TEST_GROUPS: 2 + VAULT_VERSION: ${{ github.event.client_payload.payload.version }} run: | - echo "matrix-test-group=$(make ci-get-matrix-group-id)" >> $GITHUB_OUTPUT - echo "vault-revision=$(make ci-get-revision)" >> $GITHUB_OUTPUT - echo "vault-version=$(make ci-get-version)" >> $GITHUB_OUTPUT + echo "vault-version-package=$(make ci-get-version-package)" >> "$GITHUB_OUTPUT" + - name: Release Artifact Info + run: | + # shellcheck disable=SC2129 + echo "__Product:__ ${{ github.event.client_payload.payload.product }}" >> "$GITHUB_STEP_SUMMARY" + echo "__Version:__ ${{ github.event.client_payload.payload.version }}" >> "$GITHUB_STEP_SUMMARY" + echo "__Commit:__ ${{ github.event.client_payload.payload.sha }}" >> "$GITHUB_STEP_SUMMARY" + echo "" >> "$GITHUB_STEP_SUMMARY" + echo "[Build Workflow](https://github.com/${{github.event.client_payload.payload.org}}/${{github.event.client_payload.payload.repo}}/actions/runs/${{github.event.client_payload.payload.buildworkflowid}})" >> "$GITHUB_STEP_SUMMARY" test: name: Test ${{ matrix.build-artifact-name }} @@ -40,21 +43,30 @@ jobs: fail-fast: false matrix: include: - - matrix-file-name: enos_release_testing_oss-artifactory-oss-linux-amd64-zip + - sample-name: release_ce_linux_amd64_deb + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_amd64.deb + - sample-name: release_ce_linux_arm64_deb + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_arm64.deb + - sample-name: release_ce_linux_amd64_rpm + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1.x86_64.rpm + - sample-name: release_ce_linux_arm64_rpm + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1.aarch64.rpm + - sample-name: release_ce_linux_amd64_zip build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_amd64.zip - - matrix-file-name: enos_release_testing_oss-artifactory-oss-linux-arm64-zip + - sample-name: release_ce_linux_arm64_zip build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_arm64.zip with: build-artifact-name: ${{ matrix.build-artifact-name }} - matrix-file-name: ${{ matrix.matrix-file-name }} - matrix-test-group: ${{ needs.product-metadata.outputs.matrix-test-group }} - vault-edition: oss + sample-max: 2 + sample-name: ${{ matrix.sample-name }} vault-revision: ${{ needs.product-metadata.outputs.vault-revision }} + vault-version: ${{ needs.product-metadata.outputs.vault-version }} secrets: inherit save-metadata: - runs-on: linux + runs-on: ubuntu-latest + if: always() needs: test steps: - name: Persist metadata - uses: hashicorp/actions-persist-metadata@v1 + uses: hashicorp/actions-persist-metadata@v2 diff --git a/.github/workflows/enos-run-k8s.yml b/.github/workflows/enos-run-k8s.yml deleted file mode 100644 index 50630b46ab21..000000000000 --- a/.github/workflows/enos-run-k8s.yml +++ /dev/null @@ -1,116 +0,0 @@ ---- -name: enos-k8s - -on: - workflow_call: - inputs: - artifact-build-date: - required: false - type: string - artifact-name: - required: true - type: string - artifact-revision: - required: true - type: string - artifact-version: - required: true - type: string - -env: - ARTIFACT_BUILD_DATE: ${{ inputs.artifact-build-date }} - ARTIFACT_NAME: ${{ inputs.artifact-name }} - ARTIFACT_REVISION: ${{ inputs.artifact-revision }} - ARTIFACT_VERSION: ${{ inputs.artifact-version }} - -jobs: - enos: - name: Integration - runs-on: ubuntu-latest - env: - GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - steps: - - name: Checkout - uses: actions/checkout@v3 - - name: Set up Terraform - uses: hashicorp/setup-terraform@v2 - with: - # the Terraform wrapper will break Terraform execution in Enos because - # it changes the output to text when we expect it to be JSON. - terraform_wrapper: false - # Terraform 1.4.x introduced an issue that prevents some resources from - # planning. Pin to 1.3.x until it is resolved. - terraform_version: 1.3.9 - - name: Set up Enos - uses: hashicorp/action-setup-enos@v1 - with: - github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - - name: Download Docker Image - id: download - uses: actions/download-artifact@v3 - with: - name: ${{ inputs.artifact-name }} - path: ./enos/support/downloads - - name: Prepare for scenario execution - env: - IS_ENT: ${{ startsWith(env.ARTIFACT_NAME, 'vault-enterprise' ) }} - run: | - mkdir -p ./enos/support/terraform-plugin-cache - if ${IS_ENT} == true; then - echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic || true - echo "edition=ent" >> $GITHUB_ENV - echo "edition set to 'ent'" - echo "image_repo=hashicorp/vault-enterprise" >> $GITHUB_ENV - echo "image repo set to 'hashicorp/vault-enterprise'" - else - echo "edition=oss" >> $GITHUB_ENV - echo "edition set to 'oss'" - echo "image_repo=hashicorp/vault" >> $GITHUB_ENV - echo "image repo set to 'hashicorp/vault'" - fi - - name: Run Enos scenario - id: run - # Continue once and retry to handle occasional blips when creating - # infrastructure. - continue-on-error: true - env: - ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} - ENOS_VAR_terraform_plugin_cache_dir: ../support/terraform-plugin-cache - ENOS_VAR_vault_build_date: ${{ env.ARTIFACT_BUILD_DATE }} - ENOS_VAR_vault_product_version: ${{ env.ARTIFACT_VERSION }} - ENOS_VAR_vault_product_revision: ${{ env.ARTIFACT_REVISION }} - ENOS_VAR_vault_docker_image_archive: ${{steps.download.outputs.download-path}}/${{ env.ARTIFACT_NAME }} - ENOS_VAR_vault_image_repository: ${{ env.image_repo }} - run: | - enos scenario run --timeout 10m0s --chdir ./enos/k8s edition:${{ env.edition }} - - name: Retry Enos scenario - id: run_retry - if: steps.run.outcome == 'failure' - env: - ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} - ENOS_VAR_terraform_plugin_cache_dir: ../support/terraform-plugin-cache - ENOS_VAR_vault_build_date: ${{ env.ARTIFACT_BUILD_DATE }} - ENOS_VAR_vault_product_version: ${{ env.ARTIFACT_VERSION }} - ENOS_VAR_vault_product_revision: ${{ env.ARTIFACT_REVISION }} - ENOS_VAR_vault_docker_image_archive: ${{steps.download.outputs.download-path}}/${{ env.ARTIFACT_NAME }} - ENOS_VAR_vault_image_repository: ${{ env.image_repo }} - run: | - enos scenario run --timeout 10m0s --chdir ./enos/k8s edition:${{ env.edition }} - - name: Destroy Enos scenario - if: ${{ always() }} - env: - ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} - ENOS_VAR_terraform_plugin_cache_dir: ./support/terraform-plugin-cache - ENOS_VAR_vault_build_date: ${{ env.ARTIFACT_BUILD_DATE }} - ENOS_VAR_vault_product_version: ${{ env.ARTIFACT_VERSION }} - ENOS_VAR_vault_product_revision: ${{ env.ARTIFACT_REVISION }} - ENOS_VAR_vault_docker_image_archive: ${{steps.download.outputs.download-path}} - ENOS_VAR_vault_image_repository: ${{ env.image_repo }} - run: | - enos scenario destroy --timeout 10m0s --chdir ./enos/k8s edition:${{ env.edition }} - - name: Cleanup Enos runtime directories - if: ${{ always() }} - run: | - rm -rf /tmp/enos* - rm -rf ./enos/support - rm -rf ./enos/k8s/.enos diff --git a/.github/workflows/godoc-test-checker.yml b/.github/workflows/godoc-test-checker.yml deleted file mode 100644 index 048042cf752a..000000000000 --- a/.github/workflows/godoc-test-checker.yml +++ /dev/null @@ -1,27 +0,0 @@ -name: Check Go Docs for tests - -on: - pull_request: - types: [opened, synchronize] - # Runs on PRs to main - branches: - - main - -jobs: - godoc-test-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.pull_request.head.sha }} - fetch-depth: 0 - - name: get metadata - id: get-metadata - run: echo "go-version=$(cat ./.go-version)" >> $GITHUB_OUTPUT - - name: Set Up Go - uses: actions/setup-go@v3 - with: - cache: true - go-version: ${{ steps.get-metadata.outputs.go-version }} - - name: Verify new tests have go docs - run: make ci-vet-godoctests \ No newline at end of file diff --git a/.github/workflows/goversion-checker.yml b/.github/workflows/goversion-checker.yml deleted file mode 100644 index 71ed31b65e5f..000000000000 --- a/.github/workflows/goversion-checker.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Check Go version - -on: - pull_request: - types: [opened, synchronize] - -jobs: - go-version-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.pull_request.head.sha }} - fetch-depth: 0 - - name: Verify go versions in tree are consistent with one another - run: | - GOVER=$(cat .go-version) - EXPECTED="docker.mirror.hashicorp.services/cimg/go:$GOVER" - GOT=$(yq .references.environment.GO_IMAGE .circleci/config/executors/@executors.yml) - if [ "$EXPECTED" != "$GOT" ]; then - echo "version mismatch, .go-version has '$GOVER' and circleci config uses '$GOT'" - exit 1 - fi \ No newline at end of file diff --git a/.github/workflows/milestone-checker.yml b/.github/workflows/milestone-checker.yml index 77ff50b8cf1f..294b58576492 100644 --- a/.github/workflows/milestone-checker.yml +++ b/.github/workflows/milestone-checker.yml @@ -5,7 +5,8 @@ name: Check Milestone on: pull_request: - types: [opened, synchronize, labeled, unlabeled] + # milestoned and demilestoned work (https://github.com/github/docs/issues/23909) but they aren't listed in the github documentation + types: [opened, synchronize, labeled, unlabeled, milestoned, demilestoned] # Runs on PRs to main and release branches branches: - main @@ -14,20 +15,11 @@ on: jobs: # checks that a milestone entry is present for a PR milestone-check: - # If there a `pr/no-milestone` label we ignore this check - if: "!contains(github.event.pull_request.labels.*.name, 'pr/no-milestone')" + # If there is a `pr/no-milestone` label, or this comes from a fork (community contributor) we ignore this check + if: ${{ ((github.repository == 'hashicorp/vault' || github.repository == 'hashicorp/vault-enterprise') + && (github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name)) + && (!contains(github.event.pull_request.labels.*.name, 'pr/no-milestone')) }} runs-on: ubuntu-latest steps: - - name: Checkout Actions - uses: actions/checkout@v2 - with: - repository: "grafana/grafana-github-actions" - path: ./actions - ref: main - - name: Install Actions - run: npm install --production --prefix ./actions - - name: Run PR Checks - uses: ./actions/pr-checks - with: - token: ${{secrets.GITHUB_TOKEN}} - configPath: configs/milestone-check \ No newline at end of file + - name: Check milestone + run: ${{ github.event.pull_request.milestone != null }} diff --git a/.github/workflows/oss.yml b/.github/workflows/oss.yml index 4e03b9761ba4..ae7c5c750d87 100644 --- a/.github/workflows/oss.yml +++ b/.github/workflows/oss.yml @@ -19,9 +19,9 @@ jobs: runs-on: ubuntu-latest steps: - if: github.event.pull_request != null - uses: actions/checkout@v3 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - if: github.event.pull_request != null - uses: dorny/paths-filter@v2 + uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 id: changes with: # derived from CODEOWNERS @@ -58,17 +58,17 @@ jobs: - 'ui/**' - name: "Default to core board" - run: echo "PROJECT=170" >> $GITHUB_ENV + run: echo "PROJECT=170" >> "$GITHUB_ENV" - if: github.event.pull_request != null && steps.changes.outputs.cryptosec == 'true' - run: echo "PROJECT=172" >> $GITHUB_ENV + run: echo "PROJECT=172" >> "$GITHUB_ENV" - if: github.event.pull_request != null && steps.changes.outputs.ecosystem == 'true' - run: echo "PROJECT=169" >> $GITHUB_ENV + run: echo "PROJECT=169" >> "$GITHUB_ENV" - if: github.event.pull_request != null && steps.changes.outputs.devex == 'true' - run: echo "PROJECT=176" >> $GITHUB_ENV + run: echo "PROJECT=176" >> "$GITHUB_ENV" - if: github.event.pull_request != null && steps.changes.outputs.ui == 'true' - run: echo "PROJECT=171" >> $GITHUB_ENV + run: echo "PROJECT=171" >> "$GITHUB_ENV" - - uses: actions/add-to-project@v0.3.0 + - uses: actions/add-to-project@244f685bbc3b7adfa8466e08b698b5577571133e # v1.0.2 with: project-url: https://github.com/orgs/hashicorp/projects/${{ env.PROJECT }} github-token: ${{ secrets.TRIAGE_GITHUB_TOKEN }} @@ -125,4 +125,4 @@ jobs: # ) { # deletedItemId # } - # }' -f project_id=$PROJECT_ID -f item_id=$item_id || true \ No newline at end of file + # }' -f project_id=$PROJECT_ID -f item_id=$item_id || true diff --git a/.github/workflows/plugin-update-check.yml b/.github/workflows/plugin-update-check.yml new file mode 100644 index 000000000000..36bc0d27e7df --- /dev/null +++ b/.github/workflows/plugin-update-check.yml @@ -0,0 +1,115 @@ +name: Plugin update check +run-name: ${{ inputs.repo }} update check + +on: + workflow_dispatch: + inputs: + repo: + type: string + description: 'The owner and repository name as per the github.repository context property.' + required: true + plugin_branch: + type: string + description: 'The name of the plugin branch.' + required: true + +jobs: + plugin-update-check: + runs-on: ubuntu-latest + env: + PLUGIN_REPO: "${{inputs.repo}}" + PLUGIN_BRANCH: "${{inputs.plugin_branch}}" + VAULT_BRANCH: "auto-plugin-update/${{inputs.repo}}/${{inputs.plugin_branch}}" + RUN_ID: "${{github.run_id}}" + steps: + - run: echo "Branch $PLUGIN_BRANCH of $PLUGIN_REPO" + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + # We don't use the default token so that checks are executed on the resulting PR + # https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow + token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + with: + cache: false # save cache space for vault builds: https://github.com/hashicorp/vault/pull/21764 + go-version-file: .go-version + + - name: update plugin + run: | + go get "github.com/$PLUGIN_REPO@$PLUGIN_BRANCH" + go mod tidy + + - name: detect changes + id: changes + run: | + echo "count=$(git status --porcelain=v1 2>/dev/null | wc -l)" >> "$GITHUB_OUTPUT" + + - name: commit/push + if: steps.changes.outputs.count > 0 + run: | + git config user.name hc-github-team-secure-vault-ecosystem + git config user.email hc-github-team-secure-vault-ecosystem@users.noreply.github.com + git add . + git commit -m "Automated dependency upgrades" + git push -f origin ${{ github.ref_name }}:"$VAULT_BRANCH" + + - name: Open pull request if needed + id: pr + if: steps.changes.outputs.count > 0 + env: + GITHUB_TOKEN: ${{secrets.ELEVATED_GITHUB_TOKEN}} + # Only open a PR if the branch is not attached to an existing one + run: | + PR=$(gh pr list --head "$VAULT_BRANCH" --json number -q '.[0].number') + + if [ -z "$PR" ]; then + gh pr create \ + --head "$VAULT_BRANCH" \ + --title "[DO NOT MERGE]: $PLUGIN_REPO Automated plugin update check" \ + --body "Updates $PLUGIN_REPO to verify vault CI. Full log: https://github.com/hashicorp/vault/actions/runs/$RUN_ID" + else + echo "Pull request already exists, won't create a new one." + fi + + echo "vault_pr_num=$(gh pr list --head "$VAULT_BRANCH" --json number -q '.[0].number')" >> "$GITHUB_OUTPUT" + echo "vault_pr_url=$(gh pr list --head "$VAULT_BRANCH" --json url -q '.[0].url')" >> "$GITHUB_OUTPUT" + + - name: Add labels to Vault CI check PR + if: steps.changes.outputs.count > 0 + env: + # this is a different token to the one we have been using that should + # allow us to add labels + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + continue-on-error: true + run: | + if [ -z "${{ steps.pr.outputs.vault_pr_url }}" ]; then + echo "error: no vault PR found" + exit 1 + fi + + gh pr edit "${{ steps.pr.outputs.vault_pr_num }}" \ + --add-label "dependencies,pr/no-changelog,pr/no-milestone" \ + --repo hashicorp/vault + + - name: Comment on plugin PR + if: steps.changes.outputs.count > 0 + env: + GITHUB_TOKEN: ${{secrets.ELEVATED_GITHUB_TOKEN}} + run: | + # get Plugin PR number + plugin_pr_num=$(gh pr list --head "$PLUGIN_BRANCH" --json number --repo "$PLUGIN_REPO" -q '.[0].number') + + if [ -z "$plugin_pr_num" ]; then + echo "error: no plugin PR found" + exit 1 + fi + + if [ -z "${{ steps.pr.outputs.vault_pr_url }}" ]; then + echo "error: no vault PR found" + exit 1 + fi + + # make a comment on the plugin repo's PR + gh pr comment "$plugin_pr_num" \ + --body "Vault CI check PR: ${{ steps.pr.outputs.vault_pr_url }}" \ + --repo "$PLUGIN_REPO" diff --git a/.github/workflows/plugin-update.yml b/.github/workflows/plugin-update.yml new file mode 100644 index 000000000000..0e3f2551a9f7 --- /dev/null +++ b/.github/workflows/plugin-update.yml @@ -0,0 +1,123 @@ +name: Plugin update +run-name: Update ${{ inputs.plugin }} to v${{ inputs.version }} + +on: + workflow_dispatch: + inputs: + plugin: + description: 'Full name of the plugin, e.g., vault-plugin-auth-kubernetes' + required: true + type: string + branch: + description: 'Git branch name to use' + required: true + type: string + version: + description: 'Version of the plugin with *NO* "v", e.g., 1.2.3' + required: true + type: string + reviewer: + description: 'Reviewer to tag on the PR' + required: false + type: string + +jobs: + plugin-update: + runs-on: ubuntu-latest + env: + VAULT_BRANCH: ${{ inputs.branch }} + REVIEWER: ${{ inputs.reviewer || github.actor }} + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + # We don't use the default token so that checks are executed on the resulting PR + # https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow + token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + with: + cache: false # save cache space for vault builds: https://github.com/hashicorp/vault/pull/21764 + go-version-file: .go-version + + - name: Update plugin + if: github.repository == 'hashicorp/vault' + run: | + go get "github.com/hashicorp/${{ inputs.plugin }}@v${{ inputs.version }}" + go mod tidy + + - name: Update Enterprise-only plugin + if: github.repository == 'hashicorp/vault-enterprise' + run: | + (cd vault_ent && go get "github.com/hashicorp/${{ inputs.plugin }}@v${{ inputs.version }}" && go mod tidy) + go mod tidy + + - name: Detect changes + run: | + count=$(git status --porcelain=v1 2>/dev/null | wc -l) + if [ "$count" -eq 0 ]; then + echo "::error::no updates were made for ${{ inputs.plugin }} with tag v${{ inputs.version }}" + exit 1 + fi + + - name: Commit and push + run: | + git config user.name hc-github-team-secure-vault-ecosystem + git config user.email hc-github-team-secure-vault-ecosystem@users.noreply.github.com + git add go.mod go.sum + git commit -m "Update ${{ inputs.plugin }} to v${{ inputs.version }}" + git push -f origin ${{ github.ref_name }}:"$VAULT_BRANCH" + + - name: Open pull request if needed + id: pr + env: + GITHUB_TOKEN: ${{secrets.ELEVATED_GITHUB_TOKEN}} + # Only open a PR if the branch is not attached to an existing one + run: | + PR=$(gh pr list --head "$VAULT_BRANCH" --json number -q '.[0].number') + + if [ -z "$PR" ]; then + gh pr create \ + --head "$VAULT_BRANCH" \ + --reviewer "$REVIEWER" \ + --assignee "$REVIEWER" \ + --title "Update ${{ inputs.plugin }} to v${{ inputs.version }}" \ + --body "This PR was generated by a GitHub Action. Full log: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" + + echo "vault_pr_num=$(gh pr list --head "$VAULT_BRANCH" --json number -q '.[0].number')" >> "$GITHUB_OUTPUT" + echo "vault_pr_url=$(gh pr list --head "$VAULT_BRANCH" --json url -q '.[0].url')" >> "$GITHUB_OUTPUT" + else + echo "::notice::Pull request $PR already exists, won't create a new one." + fi + + - name: Add changelog + if: steps.pr.outputs.vault_pr_num != '' + run: | + PLUGIN="${{ inputs.plugin }}" + + # plugin type is one of auth/secrets/database + PLUGIN_TYPE=$(echo "$PLUGIN" | awk -F- '{print $3}') + echo "::debug::plugin type: $PLUGIN_TYPE" + + # plugin service is the rest of the repo name + PLUGIN_SERVICE=$(echo "$PLUGIN" | cut -d- -f 4-) + echo "::debug::plugin service: $PLUGIN_SERVICE" + + echo "\`\`\`release-note:change + ${PLUGIN_TYPE}/${PLUGIN_SERVICE}: Update plugin to v${{ inputs.version }} + \`\`\`" > "changelog/${{ steps.pr.outputs.vault_pr_num }}.txt" + + git add changelog/ + git commit -m "Add changelog" + git push origin ${{ github.ref_name }}:"$VAULT_BRANCH" + + - name: Add labels to Vault PR + if: steps.pr.outputs.vault_pr_num != '' + env: + # this is a different token to the one we have been using that should + # allow us to add labels + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + continue-on-error: true + run: | + gh pr edit "${{ steps.pr.outputs.vault_pr_num }}" \ + --add-label "dependencies" \ + --repo hashicorp/vault diff --git a/.github/workflows/remove-labels.yml b/.github/workflows/remove-labels.yml index 7531e9fdacb9..014b6752af7a 100644 --- a/.github/workflows/remove-labels.yml +++ b/.github/workflows/remove-labels.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Remove triaging labels from closed issues and PRs - uses: actions-ecosystem/action-remove-labels@v1 + uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0 # v1.3.0 with: labels: | waiting-for-response \ No newline at end of file diff --git a/.github/workflows/security-scan.yml b/.github/workflows/security-scan.yml index 5d61d8af333a..a396ed9314d2 100644 --- a/.github/workflows/security-scan.yml +++ b/.github/workflows/security-scan.yml @@ -1,5 +1,10 @@ name: Security Scan +# cancel existing runs of the same workflow on the same ref +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} + cancel-in-progress: true + on: push: branches: [main] @@ -10,55 +15,60 @@ on: jobs: scan: - runs-on: - labels: ['linux', 'large'] - if: ${{ github.actor != 'dependabot[bot]' || github.actor != 'hc-github-team-secure-vault-core' }} + runs-on: ${{ github.repository == 'hashicorp/vault' && 'ubuntu-latest' || fromJSON('["self-hosted","ondemand","os=linux","type=c6a.4xlarge"]') }} + # The first check ensures this doesn't run on community-contributed PRs, who won't have the + # permissions to run this job. + if: | + ! github.event.pull_request.head.repo.fork && + github.actor != 'dependabot[bot]' && + github.actor != 'hc-github-team-secure-vault-core' steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: - go-version: 1.18 + cache: false # save cache space for vault builds: https://github.com/hashicorp/vault/pull/21764 + go-version-file: .go-version - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: 3.x - name: Clone Security Scanner repo - uses: actions/checkout@v3 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: repository: hashicorp/security-scanner token: ${{ secrets.HASHIBOT_PRODSEC_GITHUB_TOKEN }} path: security-scanner - ref: 2526c196a28bb367b1ac6c997ff48e9ebf06834f + ref: main - name: Install dependencies shell: bash env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | - mkdir $HOME/.bin - cd $GITHUB_WORKSPACE/security-scanner/pkg/sdk/examples/scan-plugin-semgrep + mkdir "$HOME/.bin" + cd "$GITHUB_WORKSPACE/security-scanner/pkg/sdk/examples/scan-plugin-semgrep" go build -o scan-plugin-semgrep . - mv scan-plugin-semgrep $HOME/.bin - - cd $GITHUB_WORKSPACE/security-scanner/pkg/sdk/examples/scan-plugin-codeql + mv scan-plugin-semgrep "$HOME/.bin" + + cd "$GITHUB_WORKSPACE/security-scanner/pkg/sdk/examples/scan-plugin-codeql" go build -o scan-plugin-codeql . - mv scan-plugin-codeql $HOME/.bin - + mv scan-plugin-codeql "$HOME/.bin" + # Semgrep - python3 -m pip install semgrep - + python3 -m pip install semgrep==1.45.0 + # CodeQL LATEST=$(gh release list --repo https://github.com/github/codeql-action | cut -f 3 | sort --version-sort | tail -n1) gh release download --repo https://github.com/github/codeql-action --pattern codeql-bundle-linux64.tar.gz "$LATEST" - tar xf codeql-bundle-linux64.tar.gz -C $HOME/.bin - + tar xf codeql-bundle-linux64.tar.gz -C "$HOME/.bin" + # Add to PATH - echo "$HOME/.bin" >> $GITHUB_PATH - echo "$HOME/.bin/codeql" >> $GITHUB_PATH + echo "$HOME/.bin" >> "$GITHUB_PATH" + echo "$HOME/.bin/codeql" >> "$GITHUB_PATH" - name: Scan id: scan @@ -69,15 +79,15 @@ jobs: #SEMGREP_BASELINE_REF: ${{ github.base_ref }} with: repository: "$PWD" + cache-build: true + cache-go-modules: false - name: SARIF Output shell: bash - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | cat results.sarif - name: Upload SARIF file - uses: github/codeql-action/upload-sarif@v2 + uses: github/codeql-action/upload-sarif@c4fb451437765abf5018c6fbf22cce1a7da1e5cc # codeql-bundle-v2.17.1 with: sarif_file: results.sarif diff --git a/.github/workflows/setup-go-cache.yml b/.github/workflows/setup-go-cache.yml deleted file mode 100644 index 3b8040a20545..000000000000 --- a/.github/workflows/setup-go-cache.yml +++ /dev/null @@ -1,33 +0,0 @@ -on: - workflow_call: - inputs: - runs-on: - required: true - type: string -jobs: - setup-go-cache: - runs-on: ${{ fromJSON(inputs.runs-on) }} - steps: - - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c #v3.3.0 as of 2023-01-18 - - id: setup-go - name: Setup go - uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 #v3.4.0 as of 2022-12-07 - with: - go-version-file: ./.go-version - cache: true - - id: setup-git - name: Setup Git configuration - run: | - git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}@github.com".insteadOf https://github.com - - id: download-modules - name: Download go modules - run: | - # go list ./... forces downloading some additional versions of modules that 'go mod - # download' misses. We need this because we make use of go list itself during - # code generation in later builds that rely on this module cache. - go list ./... - go list -test ./... - - go mod download - ( cd sdk && go mod download ) - ( cd api && go mod download ) diff --git a/.github/workflows/stable-website.yaml b/.github/workflows/stable-website.yaml index fdd6da27f9d6..89a468bad3fe 100644 --- a/.github/workflows/stable-website.yaml +++ b/.github/workflows/stable-website.yaml @@ -3,6 +3,11 @@ on: types: - closed +# cancel existing runs of the same workflow on the same ref +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} + cancel-in-progress: true + jobs: stable_website_cherry_pick: if: github.event.pull_request.merged && contains(github.event.pull_request.labels.*.name, 'docs-cherrypick') @@ -10,7 +15,7 @@ jobs: name: Cherry pick to stable-website branch steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: ref: stable-website - run: | diff --git a/.github/workflows/test-acc-dockeronly-nightly.yml b/.github/workflows/test-acc-dockeronly-nightly.yml index 4a78bb62f49a..a3c6e484d072 100644 --- a/.github/workflows/test-acc-dockeronly-nightly.yml +++ b/.github/workflows/test-acc-dockeronly-nightly.yml @@ -4,6 +4,11 @@ on: # Change to nightly cadence once API-credential-requiring tests are added to the jobs workflow_dispatch: +# cancel existing runs of the same workflow on the same ref +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} + cancel-in-progress: true + # Currently the jobs here are only for acceptance tests that have no dependencies except for Docker jobs: plugins-database: diff --git a/.github/workflows/test-ci-bootstrap.yml b/.github/workflows/test-ci-bootstrap.yml index ec3f2c934565..9d030408cb2f 100644 --- a/.github/workflows/test-ci-bootstrap.yml +++ b/.github/workflows/test-ci-bootstrap.yml @@ -1,5 +1,10 @@ name: test-ci-bootstrap +# cancel existing runs of the same workflow on the same ref +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} + cancel-in-progress: true + on: workflow_dispatch: pull_request: @@ -24,11 +29,11 @@ jobs: TF_VAR_aws_ssh_public_key: ${{ secrets.SSH_KEY_PUBLIC_CI }} TF_TOKEN_app_terraform_io: ${{ secrets.TF_API_TOKEN }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Set up Terraform - uses: hashicorp/setup-terraform@v2 + uses: hashicorp/setup-terraform@v3 - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} diff --git a/.github/workflows/test-ci-cleanup.yml b/.github/workflows/test-ci-cleanup.yml index 5035b86760c0..697ad2cca6ed 100644 --- a/.github/workflows/test-ci-cleanup.yml +++ b/.github/workflows/test-ci-cleanup.yml @@ -11,7 +11,7 @@ jobs: regions: ${{steps.setup.outputs.regions}} steps: - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1-node16 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} @@ -22,7 +22,7 @@ jobs: - name: Get all regions id: setup run: | - echo "regions=$(aws ec2 describe-regions --region us-east-1 --output json --query 'Regions[].RegionName' | tr -d '\n ')" >> $GITHUB_OUTPUT + echo "regions=$(aws ec2 describe-regions --region us-east-1 --output json --query 'Regions[].RegionName' | tr -d '\n ')" >> "$GITHUB_OUTPUT" aws-nuke: needs: setup @@ -40,7 +40,7 @@ jobs: steps: - name: Configure AWS credentials id: aws-configure - uses: aws-actions/configure-aws-credentials@v1-node16 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} @@ -49,7 +49,7 @@ jobs: role-skip-session-tagging: true role-duration-seconds: 3600 mask-aws-account-id: false - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Configure run: | cp enos/ci/aws-nuke.yml . @@ -75,7 +75,7 @@ jobs: region: ${{ fromJSON(needs.setup.outputs.regions) }} steps: - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1-node16 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} diff --git a/.github/workflows/test-enos-scenario-ui.yml b/.github/workflows/test-enos-scenario-ui.yml index bdd6de45dc36..017d62640751 100644 --- a/.github/workflows/test-enos-scenario-ui.yml +++ b/.github/workflows/test-enos-scenario-ui.yml @@ -1,6 +1,11 @@ --- name: Vault UI Tests +# cancel existing runs of the same workflow on the same ref +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} + cancel-in-progress: true + on: workflow_call: inputs: @@ -32,26 +37,22 @@ jobs: name: Get metadata runs-on: ubuntu-latest outputs: - go-version: ${{ steps.get-metadata.outputs.go-version }} - node-version: ${{ steps.get-metadata.outputs.node-version }} runs-on: ${{ steps.get-metadata.outputs.runs-on }} vault_edition: ${{ steps.get-metadata.outputs.vault_edition }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - id: get-metadata env: IS_ENT: ${{ startsWith(github.event.repository.name, 'vault-enterprise' ) }} run: | - echo "go-version=$(cat ./.go-version)" >> $GITHUB_OUTPUT - echo "node-version=$(cat ./ui/.nvmrc)" >> $GITHUB_OUTPUT - if ${IS_ENT} == true; then + if [ "$IS_ENT" == true ]; then echo "detected vault_edition=ent" - echo "runs-on=['self-hosted', 'ondemand', 'os=linux', 'type=m5d.4xlarge']" >> $GITHUB_OUTPUT - echo "vault_edition=ent" >> $GITHUB_OUTPUT + echo "runs-on=['self-hosted', 'ondemand', 'os=linux', 'type=m5d.4xlarge']" >> "$GITHUB_OUTPUT" + echo "vault_edition=ent" >> "$GITHUB_OUTPUT" else echo "detected vault_edition=oss" - echo "runs-on=\"custom-linux-xl-vault-latest\"" >> $GITHUB_OUTPUT - echo "vault_edition=oss" >> $GITHUB_OUTPUT + echo "runs-on=\"custom-linux-xl-vault-latest\"" >> "$GITHUB_OUTPUT" + echo "vault_edition=oss" >> "$GITHUB_OUTPUT" fi run-ui-tests: @@ -71,39 +72,35 @@ jobs: GOPRIVATE: github.com/hashicorp steps: - name: Checkout - uses: actions/checkout@v3 - - name: Set Up Go - uses: actions/setup-go@v3 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: ./.github/actions/set-up-go with: - go-version: ${{ needs.get-metadata.outputs.go-version }} + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - uses: hashicorp/action-setup-enos@v1 with: github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - name: Set Up Git run: git config --global url."https://${{ secrets.elevated_github_token }}:@github.com".insteadOf "https://github.com" - name: Set Up Node - uses: actions/setup-node@v3 + uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 with: - node-version: ${{ needs.get-metadata.outputs.node-version }} + node-version-file: './ui/package.json' - name: Set Up Terraform - uses: hashicorp/setup-terraform@v2 + uses: hashicorp/setup-terraform@v3 with: cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }} terraform_wrapper: false - # Terraform 1.4.x introduced an issue that prevents some resources from - # planning. Pin to 1.3.x until it is resolved. - terraform_version: 1.3.9 - name: Prepare scenario dependencies run: | mkdir -p ./enos/support/terraform-plugin-cache echo "${{ secrets.SSH_KEY_PRIVATE_CI }}" > ./enos/support/private_key.pem chmod 600 ./enos/support/private_key.pem - name: Set Up Vault Enterprise License - if: contains(${{ github.event.repository.name }}, 'ent') + if: contains(github.event.repository.name, 'ent') run: echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic || true - name: Check Chrome Installed id: chrome-check - run: echo "chrome-version=$(chrome --version 2> /dev/null || google-chrome --version 2> /dev/null || google-chrome-stable --version 2> /dev/null || echo 'not-installed')" >> $GITHUB_OUTPUT + run: echo "chrome-version=$(chrome --version 2> /dev/null || google-chrome --version 2> /dev/null || google-chrome-stable --version 2> /dev/null || echo 'not-installed')" >> "$GITHUB_OUTPUT" - name: Install Chrome Dependencies if: steps.chrome-check.outputs.chrome-version == 'not-installed' run: | @@ -111,12 +108,12 @@ jobs: sudo apt install -y libnss3-dev libgdk-pixbuf2.0-dev libgtk-3-dev libxss-dev libasound2 - name: Install Chrome if: steps.chrome-check.outputs.chrome-version == 'not-installed' - uses: browser-actions/setup-chrome@v1 + uses: browser-actions/setup-chrome@facf10a55b9caf92e0cc749b4f82bf8220989148 # v1.7.2 - name: Installed Chrome Version run: | echo "Installed Chrome Version = [$(chrome --version 2> /dev/null || google-chrome --version 2> /dev/null || google-chrome-stable --version 2> /dev/null)]" - name: Configure AWS credentials from Test account - uses: aws-actions/configure-aws-credentials@v1-node16 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} diff --git a/.github/workflows/test-go.yml b/.github/workflows/test-go.yml index 4ffa55218a40..9bb61654037a 100644 --- a/.github/workflows/test-go.yml +++ b/.github/workflows/test-go.yml @@ -5,82 +5,286 @@ on: description: The execution architecture (arm, amd64, etc.) required: true type: string - extra-tags: - description: A comma-separated list of additional build tags. + total-runners: + description: Number of runners to use for executing non-binary tests. + required: true + type: string + binary-tests: + description: Whether to run the binary tests. + required: false + default: false + type: boolean + env-vars: + description: A map of environment variables as JSON. + required: false + type: string + default: '{}' + extra-flags: + description: A space-separated list of additional build flags. required: false type: string + default: '' runs-on: - description: An expression indicating which kind of runners to use. + description: An expression indicating which kind of runners to use Go testing jobs. required: false type: string - default: ubuntu-latest - enterprise: - description: A flag indicating if this workflow is executing for the enterprise repository. - required: true + default: '"ubuntu-latest"' + runs-on-small: + description: An expression indicating which kind of runners to use for small computing jobs. + required: false type: string + default: '"ubuntu-latest"' go-tags: - description: The go tags to include on the go test command. + description: A comma-separated list of additional build tags to consider satisfied during the build. required: false type: string + name: + description: | + A unique identifier to use for labeling artifacts and workflows. It is commonly used to + specify context, e.g: fips, race, testonly, standard. + required: true + type: string + go-test-parallelism: + description: The parallelism parameter for Go tests + required: false + default: 20 + type: number + go-test-timeout: + description: The timeout parameter for Go tests + required: false + default: 50m + type: string + timeout-minutes: + description: The maximum number of minutes that this workflow should run + required: false + default: 60 + type: number + testonly: + description: Whether to run the tests tagged with testonly. + required: false + default: false + type: boolean + test-timing-cache-enabled: + description: Cache the gotestsum test timing data. + required: false + default: true + type: boolean + test-timing-cache-key: + description: The cache key to use for gotestsum test timing data. + required: false + default: go-test-reports + type: string + checkout-ref: + description: The ref to use for checkout. + required: false + default: ${{ github.ref }} + type: string + outputs: + data-race-output: + description: A textual output of any data race detector failures + value: ${{ jobs.status.outputs.data-race-output }} + data-race-result: + description: Whether or not there were any data races detected + value: ${{ jobs.status.outputs.data-race-result }} -env: - total-runners: 16 +env: ${{ fromJSON(inputs.env-vars) }} jobs: - test-generate-test-package-list: - runs-on: ${{ fromJSON(inputs.runs-on) }} - name: Verify Test Package Distribution - steps: - - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c - - id: test - working-directory: .github/scripts - run: | - ENTERPRISE=${{ inputs.enterprise }} ./test-generate-test-package-lists.sh - runner-indexes: - runs-on: ${{ fromJSON(inputs.runs-on) }} - name: Generate runner indexes - # - # This job generates a JSON Array of integers ranging from 1 to 16. - # That array is used in the matrix section of the test-go job below. - # + test-matrix: + permissions: + id-token: write # Note: this permission is explicitly required for Vault auth + contents: read + runs-on: ${{ fromJSON(inputs.runs-on-small) }} outputs: - runner-indexes: ${{ steps.generate-index-list.outputs.indexes }} + go-test-dir: ${{ steps.metadata.outputs.go-test-dir }} + matrix: ${{ steps.build.outputs.matrix }} + matrix_ids: ${{ steps.build.outputs.matrix_ids }} steps: - - id: generate-index-list - run: | - INDEX_LIST=$(seq 1 ${{ env.total-runners }}) - INDEX_JSON=$(jq --null-input --compact-output '. |= [inputs]' <<< ${INDEX_LIST}) - echo "indexes=${INDEX_JSON}" >> ${GITHUB_OUTPUT} + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + ref: ${{ inputs.checkout-ref }} + - name: Authenticate to Vault + id: vault-auth + if: github.repository == 'hashicorp/vault-enterprise' + run: vault-auth + - name: Fetch Secrets + id: secrets + if: github.repository == 'hashicorp/vault-enterprise' + uses: hashicorp/vault-action@v3 + with: + url: ${{ steps.vault-auth.outputs.addr }} + caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} + token: ${{ steps.vault-auth.outputs.token }} + secrets: | + kv/data/github/${{ github.repository }}/datadog-ci DATADOG_API_KEY; + kv/data/github/${{ github.repository }}/github-token username-and-token | github-token; + kv/data/github/${{ github.repository }}/license license_1 | VAULT_LICENSE_CI; + kv/data/github/${{ github.repository }}/license license_2 | VAULT_LICENSE_2; + - id: setup-git-private + name: Setup Git configuration (private) + if: github.repository == 'hashicorp/vault-enterprise' + run: | + git config --global url."https://${{ steps.secrets.outputs.github-token }}@github.com".insteadOf https://github.com + - id: setup-git-public + name: Setup Git configuration (public) + if: github.repository != 'hashicorp/vault-enterprise' + run: | + git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN}}@github.com".insteadOf https://github.com + - uses: ./.github/actions/set-up-go + with: + github-token: ${{ github.repository == 'hashicorp/vault-enterprise' && steps.secrets.outputs.github-token || secrets.ELEVATED_GITHUB_TOKEN }} + - id: metadata + name: Set up metadata + run: echo "go-test-dir=test-results/go-test" | tee -a "$GITHUB_OUTPUT" + - uses: ./.github/actions/set-up-gotestsum + - run: mkdir -p ${{ steps.metadata.outputs.go-test-dir }} + - uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + if: inputs.test-timing-cache-enabled + with: + path: ${{ steps.metadata.outputs.go-test-dir }} + key: ${{ inputs.test-timing-cache-key }}-${{ github.run_number }} + restore-keys: | + ${{ inputs.test-timing-cache-key }}- + - name: Sanitize timing files + id: sanitize-timing-files + run: | + # Prune invalid timing files + find '${{ steps.metadata.outputs.go-test-dir }}' -mindepth 1 -type f -name "*.json" -exec sh -c ' + file="$1"; + jq . "$file" || rm "$file" + ' shell {} \; > /dev/null 2>&1 + - name: Build matrix excluding binary, integration, and testonly tests + id: build-non-binary + if: ${{ !inputs.testonly }} + env: + GOPRIVATE: github.com/hashicorp/* + run: | + # testonly tests need additional build tag though let's exclude them anyway for clarity + ( + make all-packages | grep -v "_binary" | grep -v "vault/integ" | grep -v "testonly" | gotestsum tool ci-matrix --debug \ + --partitions "${{ inputs.total-runners }}" \ + --timing-files '${{ steps.metadata.outputs.go-test-dir }}/*.json' > matrix.json + ) + - name: Build matrix for tests tagged with testonly + if: ${{ inputs.testonly }} + env: + GOPRIVATE: github.com/hashicorp/* + run: | + set -exo pipefail + # enable glob expansion + shopt -s nullglob + # testonly tagged tests need an additional tag to be included + # also running some extra tests for sanity checking with the testonly build tag + ( + go list -tags=testonly ./vault/external_tests/{kv,token,*replication-perf*,*testonly*} ./command/*testonly* ./vault/ | gotestsum tool ci-matrix --debug \ + --partitions "${{ inputs.total-runners }}" \ + --timing-files '${{ steps.metadata.outputs.go-test-dir }}/*.json' > matrix.json + ) + # disable glob expansion + shopt -u nullglob + - name: Capture list of binary tests + if: inputs.binary-tests + id: list-binary-tests + run: | + LIST="$(make all-packages | grep "_binary" | xargs)" + echo "list=$LIST" >> "$GITHUB_OUTPUT" + - name: Build complete matrix + id: build + run: | + set -exo pipefail + matrix_file="matrix.json" + if [ "${{ inputs.binary-tests}}" == "true" ] && [ -n "${{ steps.list-binary-tests.outputs.list }}" ]; then + export BINARY_TESTS="${{ steps.list-binary-tests.outputs.list }}" + jq --arg BINARY "${BINARY_TESTS}" --arg BINARY_INDEX "${{ inputs.total-runners }}" \ + '.include += [{ + "id": $BINARY_INDEX, + "estimatedRuntime": "N/A", + "packages": $BINARY, + "description": "partition $BINARY_INDEX - binary test packages" + }]' matrix.json > new-matrix.json + matrix_file="new-matrix.json" + fi + # convert the json to a map keyed by id + ( + echo -n "matrix=" + jq -c \ + '.include | map( { (.id|tostring): . } ) | add' "$matrix_file" + ) | tee -a "$GITHUB_OUTPUT" + # extract an array of ids from the json + ( + echo -n "matrix_ids=" + jq -c \ + '[ .include[].id | tostring ]' "$matrix_file" + ) | tee -a "$GITHUB_OUTPUT" + test-go: + needs: test-matrix permissions: - id-token: write # Note: this permission is explicitly required for Vault auth + actions: read contents: read - name: ${{ inputs.packages }} - needs: - - runner-indexes - # Use GitHub runners on the OSS Vault repo and self-hosted runners otherwise. + id-token: write # Note: this permission is explicitly required for Vault auth runs-on: ${{ fromJSON(inputs.runs-on) }} strategy: fail-fast: false matrix: - # - # Initialize the runner-index key with the JSON array of integers - # generated above. - # - runner-index: ${{ fromJSON(needs.runner-indexes.outputs.runner-indexes) }} + id: ${{ fromJSON(needs.test-matrix.outputs.matrix_ids) }} env: GOPRIVATE: github.com/hashicorp/* - TIMEOUT_IN_MINUTES: 60 + TIMEOUT_IN_MINUTES: ${{ inputs.timeout-minutes }} + outputs: + go-test-results-download-pattern: ${{ steps.metadata.outputs.go-test-results-download-pattern }} + data-race-log-download-pattern: ${{ steps.metadata.outputs.data-race-log-download-pattern }} steps: - - id: setup-git - name: Setup Git configuration - run: | - git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}@github.com".insteadOf https://github.com - - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c - - uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: - go-version-file: ./.go-version - cache: true + ref: ${{ inputs.checkout-ref }} + - uses: ./.github/actions/set-up-go + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - id: metadata + name: Set up metadata + run: | + # Metadata variables that are used throughout the workflow + # Example comments assume: + # - needs.test-matrix.outputs.go-test-dir == test-results/go-test + # - inputs.name == testonly + # - inputs.checkout-ref == main + # - matrix.id == 1 + ref="$(tr / - <<< "${{ inputs.checkout-ref }}")" # main, but removes special characters from refs with / + name="${{ inputs.name }}-${ref}-${{ matrix.id }}" # testonly-main-1 + go_test_dir='${{ needs.test-matrix.outputs.go-test-dir }}' # test-results/go-test + test_results_dir="$(dirname "$go_test_dir")" # test-results + go_test_dir_absolute="$(pwd)/${go_test_dir}" # /home/runner/work/vault/vault/test-results/go-test + go_test_log_dir="${go_test_dir}/logs" # test-results/go-test/logs + go_test_log_dir_absolute="${go_test_dir_absolute}/logs" # /home/runner/work/vault/vault/test-results/go-test/logs + go_test_log_archive_name="test-logs-${name}.tar" # test-logs-testonly-main-1.tar + go_test_results_upload_key="${test_results_dir}-${name}" # test-results/go-test-testonly-main-1 + go_test_results_download_pattern="${test_results_dir}-${{ inputs.name }}-*" # test-results/go-test-testonly-main-* + gotestsum_results_prefix="results" # results + gotestsum_junitfile=${go_test_dir}/${gotestsum_results_prefix}-${name}.xml # test-results/go-test/results-testonly-main-1.xml + gotestsum_jsonfile=${go_test_dir}/${gotestsum_results_prefix}-${name}.json # test-results/go-test/results-testonly-main-1.json + gotestsum_timing_events=failure-summary-${name}.json # failure-summary-testonly-main-1.json + failure_summary_file_name="failure-summary-${name}.md" # failure-summary-testonly-main-1.md + data_race_log_file="data-race.log" # data-race.log + data_race_log_download_pattern="data-race-${{ inputs.name }}*.log" # data-race-testonly-main-*.log + data_race_log_upload_key="data-race-${name}.log" # data-race-testonly-main-1.log + { + echo "name=${name}" + echo "failure-summary-file-name=${failure_summary_file_name}" + echo "data-race-log-file=${data_race_log_file}" + echo "data-race-log-download-pattern=${data_race_log_download_pattern}" + echo "data-race-log-upload-key=${data_race_log_upload_key}" + echo "go-test-dir=${go_test_dir}" + echo "go-test-log-archive-name=${go_test_log_archive_name}" + echo "go-test-log-dir=${go_test_log_dir}" + echo "go-test-log-dir-absolute=${go_test_log_dir_absolute}" + echo "go-test-results-download-pattern=${go_test_results_download_pattern}" + echo "go-test-results-upload-key=${go_test_results_upload_key}" + echo "gotestsum-jsonfile=${gotestsum_jsonfile}" + echo "gotestsum-junitfile=${gotestsum_junitfile}" + echo "gotestsum-results-prefix=${gotestsum_results_prefix}" + echo "gotestsum-timing-events=${gotestsum_timing_events}" + } | tee -a "$GITHUB_OUTPUT" - name: Authenticate to Vault id: vault-auth if: github.repository == 'hashicorp/vault-enterprise' @@ -88,78 +292,408 @@ jobs: - name: Fetch Secrets id: secrets if: github.repository == 'hashicorp/vault-enterprise' - uses: hashicorp/vault-action@130d1f5f4fe645bb6c83e4225c04d64cfb62de6e + uses: hashicorp/vault-action@v3 with: url: ${{ steps.vault-auth.outputs.addr }} caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} token: ${{ steps.vault-auth.outputs.token }} secrets: | + kv/data/github/${{ github.repository }}/datadog-ci DATADOG_API_KEY; + kv/data/github/${{ github.repository }}/github-token username-and-token | github-token; kv/data/github/${{ github.repository }}/license license_1 | VAULT_LICENSE_CI; kv/data/github/${{ github.repository }}/license license_2 | VAULT_LICENSE_2; - kv/data/github/${{ github.repository }}/hcp-link HCP_API_ADDRESS; - kv/data/github/${{ github.repository }}/hcp-link HCP_AUTH_URL; - kv/data/github/${{ github.repository }}/hcp-link HCP_CLIENT_ID; - kv/data/github/${{ github.repository }}/hcp-link HCP_CLIENT_SECRET; - kv/data/github/${{ github.repository }}/hcp-link HCP_RESOURCE_ID; + - id: setup-git-private + name: Setup Git configuration (private) + if: github.repository == 'hashicorp/vault-enterprise' + run: | + git config --global url."https://${{ steps.secrets.outputs.github-token }}@github.com".insteadOf https://github.com + - id: setup-git-public + name: Setup Git configuration (public) + if: github.repository != 'hashicorp/vault-enterprise' + run: | + git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN}}@github.com".insteadOf https://github.com + - uses: ./.github/actions/install-external-tools + - name: Build Vault HSM binary for tests + if: inputs.binary-tests && matrix.id == inputs.total-runners && github.repository == 'hashicorp/vault-enterprise' + env: + GOPRIVATE: github.com/hashicorp/* + run: | + set -exo pipefail + time make prep enthsmdev + # The subsequent build of vault will blow away the bin folder + mv bin/vault vault-hsm-binary + - if: inputs.binary-tests && matrix.id == inputs.total-runners + name: Build dev binary for binary tests + # The dev mode binary has to exist for binary tests that are dispatched on the last runner. + env: + GOPRIVATE: github.com/hashicorp/* + run: time make prep dev + - name: Install gVisor + # Enterprise repo runners do not allow sudo, so can't install gVisor there yet. + if: github.repository != 'hashicorp/vault-enterprise' + run: | + ( + set -e + ARCH="$(uname -m)" + URL="https://storage.googleapis.com/gvisor/releases/release/latest/${ARCH}" + wget --quiet "${URL}/runsc" "${URL}/runsc.sha512" \ + "${URL}/containerd-shim-runsc-v1" "${URL}/containerd-shim-runsc-v1.sha512" + sha512sum -c runsc.sha512 \ + -c containerd-shim-runsc-v1.sha512 + rm -f -- *.sha512 + chmod a+rx runsc containerd-shim-runsc-v1 + sudo mv runsc containerd-shim-runsc-v1 /usr/local/bin + ) + sudo tee /etc/docker/daemon.json < /dev/null 2>&1; then + exit 0 + fi + # Curl does not always exit 1 if things go wrong. To determine if this is successful we'll + # we'll silence all non-error output and check the results to determine success. + if ! out="$(curl -sSL --fail https://github.com/DataDog/datadog-ci/releases/latest/download/datadog-ci_linux-x64 --output /usr/local/bin/datadog-ci 2>&1)"; then + printf "failed to download datadog-ci: %s" "$out" + fi + if [[ -n "$out" ]]; then + printf "failed to download datadog-ci: %s" "$out" + fi + chmod +x /usr/local/bin/datadog-ci + - name: Upload test results to DataDog + continue-on-error: true + env: + DD_ENV: ci + run: | + if [[ ${{ github.repository }} == 'hashicorp/vault' ]]; then + export DATADOG_API_KEY=${{ secrets.DATADOG_API_KEY }} + fi + datadog-ci junit upload --service "$GITHUB_REPOSITORY" '${{ steps.metadata.outputs.gotestsum-junitfile }}' + if: success() || failure() + - name: Archive test logs + if: always() + id: archive-test-logs + # actions/upload-artifact will compress the artifact for us. We create a tarball to preserve + # permissions and to support file names with special characters. + run: | + tar -cvf '${{ steps.metadata.outputs.go-test-log-archive-name }}' -C "${{ steps.metadata.outputs.go-test-log-dir }}" . + - name: Upload test logs archives + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 with: - name: test-results-${{ matrix.runner-index }} - path: test-results/ - - name: Create a summary of tests - uses: test-summary/action@62bc5c68de2a6a0d02039763b8c754569df99e3f + name: ${{ steps.metadata.outputs.go-test-log-archive-name }} + path: ${{ steps.metadata.outputs.go-test-log-archive-name }} + retention-days: 7 + if: success() || failure() + - name: Upload test results + if: success() || failure() + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 with: - paths: "test-results/go-test/results.xml" - show: "fail" - if: always() + name: ${{ steps.metadata.outputs.go-test-results-upload-key }} + path: | + ${{ steps.metadata.outputs.go-test-dir }}/${{ steps.metadata.outputs.gotestsum-results-prefix}}*.json + ${{ steps.metadata.outputs.go-test-dir }}/${{ steps.metadata.outputs.gotestsum-results-prefix}}*.xml + # We cache relevant timing data with actions/cache later so we can let the file expire quickly + retention-days: 1 + - name: Check for data race failures + if: success() || failure() + id: data-race-check + working-directory: ${{ needs.test-matrix.outputs.go-test-dir }} + run: | + # Scan gotestsum output files for data race errors. + data_race_tests=() + data_race_log='${{ steps.metadata.outputs.data-race-log-file }}' + for file in *.json; do + # Check if test results contains offending phrase + if grep -q "WARNING: DATA RACE" "$file"; then + data_race_tests+=("test-go (${{ matrix.id }})") + touch "$data_race_log" + + # Write output to our log file so we can aggregate it in the final workflow + { + echo "=============== test-go (${{ matrix.id }}) ===========================" + sed -n '/WARNING: DATA RACE/,/==================/p' "$file" | jq -r -j '.Output' + } | tee -a "$data_race_log" + fi + done + + result="success" + # Fail the action if there were any failed race tests + if (("${#data_race_tests[@]}" > 0)); then + result="failure" + fi + echo "data-race-result=${result}" | tee -a "$GITHUB_OUTPUT" + - name: Upload data race detector failure log + if: | + (success() || failure()) && + steps.data-race-check.outputs.data-race-result == 'failure' + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 + with: + name: ${{ steps.metadata.outputs.data-race-log-upload-key }} + path: ${{ steps.metadata.outputs.go-test-dir }}/${{ steps.metadata.outputs.data-race-log-file }} + # Set the minimum retention possible. We only upload this because it's the only way to + # aggregate results from matrix workflows. + retention-days: 1 + if-no-files-found: error # Make sure we always upload the data race logs if it failed + # GitHub Actions doesn't expose the job ID or the URL to the job execution, + # so we have to fetch it from the API + - name: Fetch job logs URL + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + if: success() || failure() + continue-on-error: true + with: + retries: 3 + script: | + // We surround the whole script with a try-catch block, to avoid each of the matrix jobs + // displaying an error in the GHA workflow run annotations, which gets very noisy. + // If an error occurs, it will be logged so that we don't lose any information about the reason for failure. + try { + const fs = require("fs"); + const result = await github.rest.actions.listJobsForWorkflowRun({ + owner: context.repo.owner, + per_page: 100, + repo: context.repo.repo, + run_id: context.runId, + }); + + // Determine what job name to use for the query. These values are hardcoded, because GHA doesn't + // expose them in any of the contexts available within a workflow run. + let prefixToSearchFor; + switch ("${{ inputs.name }}") { + case "race": + prefixToSearchFor = 'Run Go tests with data race detection / test-go (${{ matrix.id }})' + break + case "fips": + prefixToSearchFor = 'Run Go tests with FIPS configuration / test-go (${{ matrix.id }})' + break + default: + prefixToSearchFor = 'Run Go tests / test-go (${{ matrix.id }})' + } + + const jobData = result.data.jobs.filter( + (job) => job.name.startsWith(prefixToSearchFor) + ); + const url = jobData[0].html_url; + const envVarName = "GH_JOB_URL"; + const envVar = envVarName + "=" + url; + const envFile = process.env.GITHUB_ENV; + + fs.appendFile(envFile, envVar, (err) => { + if (err) throw err; + console.log("Successfully set " + envVarName + " to: " + url); + }); + } catch (error) { + console.log("Error: " + error); + return + } + - name: Prepare failure summary + if: success() || failure() + continue-on-error: true + run: | + # This jq query filters out successful tests, leaving only the failures. + # Then, it formats the results into rows of a Markdown table.k + # An example row will resemble this: + # | github.com/hashicorp/vault/package | TestName | fips | 0 | 2 | [view results](github.com/link-to-logs) | + jq -r -n 'inputs + | select(.Action == "fail") + | "| ${{inputs.name}} | \(.Package) | \(.Test // "-") | \(.Elapsed) | ${{ matrix.id }} | [view test results :scroll:](${{ env.GH_JOB_URL }}) |"' \ + '${{ steps.metadata.outputs.gotestsum-timing-events }}' \ + >> '${{ steps.metadata.outputs.failure-summary-file-name }}' + - name: Upload failure summary + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 + if: success() || failure() + with: + name: ${{ steps.metadata.outputs.failure-summary-file-name }} + path: ${{ steps.metadata.outputs.failure-summary-file-name }} + + + status: + # Perform final data aggregation and determine overall status + if: always() + needs: + - test-matrix + - test-go + runs-on: ${{ fromJSON(inputs.runs-on-small) }} + outputs: + data-race-output: ${{ steps.status.outputs.data-race-output }} + data-race-result: ${{ steps.status.outputs.data-race-result }} + steps: + - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + pattern: ${{ needs.test-go.outputs.data-race-log-download-pattern }} + path: data-race-logs + merge-multiple: true + # Determine our success/failure status by checking the result status and data race status. + - id: status + name: Determine status result + run: | + # Determine status result + result="success" + + # Aggregate all of our test workflows and determine our Go test result from them. + test_go_results=$(tr -d '\n' <<< '${{ toJSON(needs.*.result) }}' | jq -Mrc) + if ! grep -q -v -E '(failure|cancelled)' <<< "$test_go_results"; then + test_go_result="failed" + result="failed" + else + test_go_result="success" + fi + + # If we have downloaded data race detector logs then at least one Go test job detected + # a data race during execution. We'll fail on that. + if [ -z "$(ls -A data-race-logs)" ]; then + data_race_output="" + data_race_result="success" + else + data_race_output="$(cat data-race-logs/*)" + data_race_result="failed" + result="failed" + fi + + # Write Go and data race results to outputs. + { + echo "data-race-output< /dev/null 2>&1 + + ls -lhR '${{ needs.test-matrix.outputs.go-test-dir }}' + # Determine our overall pass/fail with our Go test results + - if: always() && steps.status.outputs.result != 'success' + name: Check for failed status + run: | + printf "One or more required go-test workflows failed. Required workflow statuses: ${{ steps.status.outputs.test-go-results }}\n ${{ steps.status.outputs.data-race-output }}" + exit 1 diff --git a/.github/workflows/test-run-acc-tests-for-path.yml b/.github/workflows/test-run-acc-tests-for-path.yml index c53fb1aa9537..7385905a2670 100644 --- a/.github/workflows/test-run-acc-tests-for-path.yml +++ b/.github/workflows/test-run-acc-tests-for-path.yml @@ -20,15 +20,12 @@ jobs: go-test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - id: get-metadata - run: echo "go-version=$(cat ./.go-version)" >> $GITHUB_OUTPUT - - name: Set Up Go - uses: actions/setup-go@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: ./.github/actions/set-up-go with: - go-version: ${{ steps.get-metadata.outputs.go-version }} + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - run: go test -v ./${{ inputs.path }}/... 2>&1 | tee ${{ inputs.name }}.txt - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 with: name: ${{ inputs.name }}-output path: ${{ inputs.name }}.txt diff --git a/.github/workflows/test-run-enos-scenario-containers.yml b/.github/workflows/test-run-enos-scenario-containers.yml new file mode 100644 index 000000000000..bb3e146888cb --- /dev/null +++ b/.github/workflows/test-run-enos-scenario-containers.yml @@ -0,0 +1,140 @@ +--- +name: enos-containers + +on: + # Only trigger this working using workflow_call. This workflow requires many + # secrets that must be inherited from the caller workflow. + workflow_call: + inputs: + # The name of the artifact that we're going to use for testing. This should + # match exactly to build artifacts uploaded to Github and Artifactory. + build-artifact-name: + required: true + type: string + # The maximum number of scenarios to include in the test sample. + sample-max: + default: 1 + type: number + # The name of the enos scenario sample that defines compatible scenarios we can + # can test with. + sample-name: + required: true + type: string + vault-edition: + required: false + type: string + default: ce + # The Git commit SHA used as the revision when building vault + vault-revision: + required: true + type: string + vault-version: + required: true + type: string + +jobs: + metadata: + runs-on: ubuntu-latest + outputs: + build-date: ${{ steps.metadata.outputs.build-date }} + sample: ${{ steps.metadata.outputs.sample }} + vault-version: ${{ steps.metadata.outputs.vault-version }} + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + ref: ${{ inputs.vault-revision }} + - uses: hashicorp/action-setup-enos@v1 + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - id: metadata + run: | + build_date=$(make ci-get-date) + sample_seed=$(date +%s) + if ! sample=$(enos scenario sample observe "${{ inputs.sample-name }}" --chdir ./enos/k8s --min 1 --max "${{ inputs.sample-max }}" --seed "${sample_seed}" --format json | jq -c ".observation.elements"); then + echo "failed to do sample observation: $sample" 2>&1 + exit 1 + fi + if [[ "${{ inputs.vault-edition }}" == "ce" ]]; then + vault_version="${{ inputs.vault-version }}" + else + # shellcheck disable=2001 + vault_version="$(sed 's/+ent/+${{ inputs.vault-edition }}/g' <<< '${{ inputs.vault-version }}')" + fi + { + echo "build-date=${build_date}" + echo "vault-version=${vault_version}" + echo "sample=${sample}" + echo "sample-seed=${sample_seed}" # This isn't used outside of here but is nice to know for duplicating observations + } | tee -a "$GITHUB_OUTPUT" + + run: + needs: metadata + name: run ${{ matrix.scenario.id.filter }} + runs-on: ${{ fromJSON(contains(inputs.build-artifact-name, 'vault-enterprise') && (contains(inputs.build-artifact-name, 'arm64') && '["self-hosted","ondemand","os=ubuntu-arm","type=c6g.xlarge"]' || '["self-hosted","linux","small"]') || (contains(inputs.build-artifact-name, 'arm64') && '"ubuntu-22.04-arm"' || '"ubuntu-latest"')) }} + strategy: + fail-fast: false # don't fail as that can skip required cleanup steps for jobs + matrix: + include: ${{ fromJSON(needs.metadata.outputs.sample) }} + env: + GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: hashicorp/setup-terraform@v3 + with: + # the Terraform wrapper will break Terraform execution in Enos because + # it changes the output to text when we expect it to be JSON. + terraform_wrapper: false + - uses: hashicorp/action-setup-enos@v1 + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - name: Download Docker Image + id: download + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: ${{ inputs.build-artifact-name }} + path: ./enos/support/downloads + - if: inputs.vault-edition != 'ce' + name: Configure license + run: | + echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic || true + - name: Run Enos scenario + id: run + # Continue once and retry to handle occasional blips when creating + # infrastructure. + continue-on-error: true + env: + ENOS_VAR_terraform_plugin_cache_dir: ../support/terraform-plugin-cache + ENOS_VAR_vault_build_date: ${{ needs.metadata.outputs.build-date }} + ENOS_VAR_vault_version: ${{ needs.metadata.outputs.vault-version }} + ENOS_VAR_vault_revision: ${{ inputs.vault-revision }} + ENOS_VAR_container_image_archive: ${{steps.download.outputs.download-path}}/${{ inputs.build-artifact-name }} + run: | + mkdir -p ./enos/support/terraform-plugin-cache + enos scenario run --timeout 10m0s --chdir ./enos/k8s ${{ matrix.scenario.id.filter }} + - name: Retry Enos scenario + id: run_retry + if: steps.run.outcome == 'failure' + env: + ENOS_VAR_terraform_plugin_cache_dir: ../support/terraform-plugin-cache + ENOS_VAR_vault_build_date: ${{ needs.metadata.outputs.build-date }} + ENOS_VAR_vault_version: ${{ needs.metadata.outputs.vault-version }} + ENOS_VAR_vault_revision: ${{ inputs.vault-revision }} + ENOS_VAR_container_image_archive: ${{steps.download.outputs.download-path}}/${{ inputs.build-artifact-name }} + run: | + enos scenario run --timeout 10m0s --chdir ./enos/k8s ${{ matrix.scenario.id.filter }} + - name: Destroy Enos scenario + if: ${{ always() }} + env: + ENOS_VAR_terraform_plugin_cache_dir: ../support/terraform-plugin-cache + ENOS_VAR_vault_build_date: ${{ needs.metadata.outputs.build-date }} + ENOS_VAR_vault_version: ${{ needs.metadata.outputs.vault-version }} + ENOS_VAR_vault_revision: ${{ inputs.vault-revision }} + ENOS_VAR_container_image_archive: ${{steps.download.outputs.download-path}}/${{ inputs.build-artifact-name }} + run: | + enos scenario destroy --timeout 10m0s --grpc-listen http://localhost --chdir ./enos/k8s ${{ matrix.scenario.id.filter }} + - name: Cleanup Enos runtime directories + if: ${{ always() }} + run: | + rm -rf /tmp/enos* + rm -rf ./enos/support + rm -rf ./enos/k8s/.enos diff --git a/.github/workflows/test-run-enos-scenario-matrix.yml b/.github/workflows/test-run-enos-scenario-matrix.yml index 2f15bc472830..826a5a51c7f8 100644 --- a/.github/workflows/test-run-enos-scenario-matrix.yml +++ b/.github/workflows/test-run-enos-scenario-matrix.yml @@ -11,33 +11,15 @@ on: build-artifact-name: required: true type: string - # The base name of the file in ./github/enos-run-matrices that we use to - # determine which scenarios to run for the build artifact. - # - # They are named in the format of: - # $caller_workflow_name-$artifact_source-$vault_edition-$platform-$arch-$packing_type - # - # Where each are: - # caller_workflow_name: the Github Actions workflow that is calling - # this one - # artifact_source: where we're getting the artifact from. Either - # "github" or "artifactory" - # vault_edition: which edition of vault that we're testing. e.g. "oss" - # or "ent" - # platform: the vault binary target platform, e.g. "linux" or "macos" - # arch: the vault binary target architecture, e.g. "arm64" or "amd64" - # packing_type: how vault binary is packaged, e.g. "zip", "deb", "rpm" - # - # Examples: - # build-github-oss-linux-amd64-zip - matrix-file-name: + # The maximum number of scenarios to include in the test sample. + sample-max: + default: 1 + type: number + # The name of the enos scenario sample that defines compatible scenarios we can + # can test with. + sample-name: required: true type: string - # The test group we want to run. This corresponds to the test_group attribute - # defined in the enos-run-matrices files. - matrix-test-group: - default: 0 - type: string runs-on: # NOTE: The value should be JSON encoded as that's the only way we can # pass arrays with workflow_call. @@ -47,78 +29,98 @@ on: ssh-key-name: type: string default: ${{ github.event.repository.name }}-ci-ssh-key - # Which edition of Vault we're using. e.g. "oss", "ent", "ent.hsm.fips1402" vault-edition: - required: true + required: false type: string + default: ce # The Git commit SHA used as the revision when building vault vault-revision: required: true type: string + vault-version: + required: true + type: string jobs: metadata: runs-on: ${{ fromJSON(inputs.runs-on) }} outputs: build-date: ${{ steps.metadata.outputs.build-date }} - matrix: ${{ steps.metadata.outputs.matrix }} - version: ${{ steps.metadata.outputs.version }} - version-minor: ${{ steps.metadata.outputs.matrix }} - env: - # Pass the vault edition as VAULT_METADATA so the CI make targets can create - # values that consider the edition. - VAULT_METADATA: ${{ inputs.vault-edition }} - # Pass in the matrix and matrix group for filtering - MATRIX_FILE: ./.github/enos-run-matrices/${{ inputs.matrix-file-name }}.json - MATRIX_TEST_GROUP: ${{ inputs.matrix-test-group }} + sample: ${{ steps.metadata.outputs.sample }} + vault-version: ${{ steps.metadata.outputs.vault-version }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: ref: ${{ inputs.vault-revision }} + - uses: hashicorp/action-setup-enos@v1 + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - id: metadata run: | - echo "build-date=$(make ci-get-date)" >> $GITHUB_OUTPUT - echo "version=$(make ci-get-version)" >> $GITHUB_OUTPUT - filtered=$(make ci-filter-matrix) - echo "matrix=$(echo $filtered)}" >> $GITHUB_OUTPUT + build_date=$(make ci-get-date) + sample_seed=$(date +%s) + if ! sample=$(enos scenario sample observe "${{ inputs.sample-name }}" --chdir ./enos --min 1 --max "${{ inputs.sample-max }}" --seed "${sample_seed}" --format json | jq -c ".observation.elements"); then + echo "failed to do sample observation: $sample" 2>&1 + exit 1 + fi + if [[ "${{ inputs.vault-edition }}" == "ce" ]]; then + vault_version="${{ inputs.vault-version }}" + else + # shellcheck disable=2001 + vault_version="$(sed 's/+ent/+${{ inputs.vault-edition }}/g' <<< '${{ inputs.vault-version }}')" + fi + { + echo "build-date=${build_date}" + echo "vault-version=${vault_version}" + echo "sample=${sample}" + echo "sample-seed=${sample_seed}" # This isn't used outside of here but is nice to know for duplicating observations + } | tee -a "$GITHUB_OUTPUT" - # Run the Enos test scenarios + # Run the Enos test scenario(s) run: needs: metadata + name: run ${{ matrix.scenario.id.filter }} strategy: fail-fast: false # don't fail as that can skip required cleanup steps for jobs - matrix: ${{ fromJson(needs.metadata.outputs.matrix) }} - runs-on: ubuntu-latest + matrix: + include: ${{ fromJSON(needs.metadata.outputs.sample) }} + runs-on: ${{ fromJSON(inputs.runs-on) }} env: GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} # Pass in enos variables - ENOS_VAR_aws_region: ${{ matrix.aws_region }} + ENOS_VAR_aws_region: ${{ matrix.attributes.aws_region }} ENOS_VAR_aws_ssh_keypair_name: ${{ inputs.ssh-key-name }} ENOS_VAR_aws_ssh_private_key_path: ./support/private_key.pem ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} ENOS_VAR_artifactory_username: ${{ secrets.ARTIFACTORY_USER }} ENOS_VAR_artifactory_token: ${{ secrets.ARTIFACTORY_TOKEN }} ENOS_VAR_terraform_plugin_cache_dir: ./support/terraform-plugin-cache + ENOS_VAR_vault_artifact_path: ./support/downloads/${{ inputs.build-artifact-name }} ENOS_VAR_vault_build_date: ${{ needs.metadata.outputs.build-date }} - ENOS_VAR_vault_product_version: ${{ needs.metadata.outputs.version }} + ENOS_VAR_vault_product_version: ${{ needs.metadata.outputs.vault-version }} ENOS_VAR_vault_revision: ${{ inputs.vault-revision }} - ENOS_VAR_vault_bundle_path: ./support/downloads/${{ inputs.build-artifact-name }} + ENOS_VAR_consul_license_path: ./support/consul.hclic ENOS_VAR_vault_license_path: ./support/vault.hclic + ENOS_VAR_distro_version_amzz: ${{ matrix.attributes.distro_version_amzn }} + ENOS_VAR_distro_version_leap: ${{ matrix.attributes.distro_version_leap }} + ENOS_VAR_distro_version_rhel: ${{ matrix.attributes.distro_version_rhel }} + ENOS_VAR_distro_version_sles: ${{ matrix.attributes.distro_version_sles }} + ENOS_VAR_distro_version_ubuntu: ${{ matrix.attributes.distro_version_ubuntu }} + ENOS_DEBUG_DATA_ROOT_DIR: /tmp/enos-debug-data steps: - - uses: actions/checkout@v3 - - uses: hashicorp/setup-terraform@v2 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + ref: ${{ inputs.vault-revision }} + - uses: hashicorp/setup-terraform@v3 with: # the Terraform wrapper will break Terraform execution in Enos because # it changes the output to text when we expect it to be JSON. terraform_wrapper: false - # Terraform 1.4.x introduced an issue that prevents some resources from - # planning. Pin to 1.3.x until it is resolved. - terraform_version: 1.3.9 - - uses: aws-actions/configure-aws-credentials@v1-node16 + - uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} - aws-region: ${{ matrix.aws_region }} + aws-region: ${{ matrix.attributes.aws_region }} role-to-assume: ${{ secrets.AWS_ROLE_ARN_CI }} role-skip-session-tagging: true role-duration-seconds: 3600 @@ -126,37 +128,89 @@ jobs: with: github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - name: Prepare scenario dependencies + id: prepare_scenario run: | - mkdir -p ./enos/support/terraform-plugin-cache - echo "${{ secrets.SSH_KEY_PRIVATE_CI }}" > ./enos/support/private_key.pem - chmod 600 ./enos/support/private_key.pem - - if: contains(inputs.matrix-file-name, 'github') - uses: actions/download-artifact@v3 + mkdir -p "./enos/support/terraform-plugin-cache" + echo "${{ secrets.SSH_KEY_PRIVATE_CI }}" > "./enos/support/private_key.pem" + chmod 600 "./enos/support/private_key.pem" + echo "debug_data_artifact_name=enos-debug-data_$(echo "${{ matrix.scenario }}" | sed -e 's/ /_/g' | sed -e 's/:/=/g')" >> "$GITHUB_OUTPUT" + - if: contains(inputs.sample-name, 'build') + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: name: ${{ inputs.build-artifact-name }} path: ./enos/support/downloads - - if: contains(inputs.matrix-file-name, 'ent') + - if: contains(inputs.sample-name, 'ent') name: Configure Vault license run: echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic || true - - name: Run Enos scenario - id: run - # Continue once and retry to handle occasional blips when creating - # infrastructure. + - if: contains(matrix.scenario.id.filter, 'consul_edition:ent') + name: Configure Consul license + run: | + echo "matrix.scenario.id.filter: ${{ matrix.scenario.id.filter }}" + echo "${{ secrets.CONSUL_LICENSE }}" > ./enos/support/consul.hclic || true + - id: launch + name: enos scenario launch ${{ matrix.scenario.id.filter }} + # Continue once and retry to handle occasional blips when creating infrastructure. continue-on-error: true - run: enos scenario run --timeout 60m0s --chdir ./enos ${{ matrix.scenario }} - - name: Retry Enos scenario if necessary - id: run_retry - if: steps.run.outcome == 'failure' - run: enos scenario run --timeout 60m0s --chdir ./enos ${{ matrix.scenario }} - - name: Ensure scenario has been destroyed - if: ${{ always() }} - # With Enos version 0.0.11 the destroy step returns an error if the infrastructure - # is already destroyed by enos run. So temporarily setting it to continue on error in GHA + run: enos scenario launch --timeout 60m0s --chdir ./enos ${{ matrix.scenario.id.filter }} + - if: steps.launch.outcome == 'failure' + id: launch_retry + name: Retry enos scenario launch ${{ matrix.scenario.id.filter }} + run: enos scenario launch --timeout 60m0s --chdir ./enos ${{ matrix.scenario.id.filter }} + - name: Upload Debug Data + if: failure() + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 + with: + # The name of the artifact is the same as the matrix scenario name with the spaces replaced with underscores and colons replaced by equals. + name: ${{ steps.prepare_scenario.outputs.debug_data_artifact_name }} + path: ${{ env.ENOS_DEBUG_DATA_ROOT_DIR }} + retention-days: 30 + continue-on-error: true + - if: ${{ always() }} + id: destroy + name: enos scenario destroy ${{ matrix.scenario.id.filter }} + continue-on-error: true + run: enos scenario destroy --timeout 60m0s --chdir ./enos ${{ matrix.scenario.id.filter }} + - if: steps.destroy.outcome == 'failure' + id: destroy_retry + name: Retry enos scenario destroy ${{ matrix.scenario.id.filter }} continue-on-error: true - run: enos scenario destroy --timeout 60m0s --chdir ./enos ${{ matrix.scenario }} + run: enos scenario destroy --timeout 60m0s --chdir ./enos ${{ matrix.scenario.id.filter }} - name: Clean up Enos runtime directories + id: cleanup if: ${{ always() }} + continue-on-error: true run: | rm -rf /tmp/enos* rm -rf ./enos/support rm -rf ./enos/.enos + # Send slack notifications to #feed-vault-enos-failures any of our enos scenario commands fail. + # There is an incoming webhook set up on the "Enos Vault Failure Bot" Slackbot: + # https://api.slack.com/apps/A05E31CH1LG/incoming-webhooks + - if: ${{ always() && ! cancelled() }} + name: Notify launch failed + uses: hashicorp/actions-slack-status@v2.0.1 + with: + failure-message: "enos scenario launch ${{ matrix.scenario.id.filter}} failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`" + status: ${{ steps.launch.outcome }} + slack-webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }} + - if: ${{ always() && ! cancelled() }} + name: Notify retry launch failed + uses: hashicorp/actions-slack-status@v2.0.1 + with: + failure-message: "retry enos scenario launch ${{ matrix.scenario.id.filter}} failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`" + status: ${{ steps.launch_retry.outcome }} + slack-webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }} + - if: ${{ always() && ! cancelled() }} + name: Notify destroy failed + uses: hashicorp/actions-slack-status@v2.0.1 + with: + failure-message: "enos scenario destroy ${{ matrix.scenario.id.filter}} failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`" + status: ${{ steps.destroy.outcome }} + slack-webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }} + - if: ${{ always() && ! cancelled() }} + name: Notify retry destroy failed + uses: hashicorp/actions-slack-status@v2.0.1 + with: + failure-message: "retry enos scenario destroy ${{ matrix.scenario.id.filter}} failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`" + status: ${{ steps.destroy_retry.outcome }} + slack-webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/test-run-enos-scenario.yml b/.github/workflows/test-run-enos-scenario.yml new file mode 100644 index 000000000000..b8f1a5c0ca65 --- /dev/null +++ b/.github/workflows/test-run-enos-scenario.yml @@ -0,0 +1,129 @@ +# Reusable workflow called by interactive scenario tests in GHA +name: Test run Vault Enos scenario + +on: + workflow_call: + inputs: + artifact-source: + type: string + description: "The artifact source to test artifactory or local (use local for current branch)" + required: true + artifact-type: + type: string + description: "The Vault artifact type to test" + required: true + distro: + type: string + description: "Linux distribution that Vault replication will be tested on" + required: true + product-version: + type: string + description: "Vault version to test (vault_product_version)" + required: false + scenario: + type: string + description: "Enos test scenario to run" + required: true + ssh-key-name: + type: string + default: ${{ github.event.repository.name }}-ci-ssh-key + vault-revision: + type: string + description: "The git SHA of the Vault release (vault_revision)" + required: false + +jobs: + enos-run-vault-interactive-test: + name: Enos run Vault interactive test + runs-on: ubuntu-latest + timeout-minutes: 120 + env: + GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + # Pass in enos variables + ENOS_VAR_aws_ssh_keypair_name: ${{ inputs.ssh-key-name }} + ENOS_VAR_vault_log_level: trace + ENOS_VAR_aws_ssh_private_key_path: ./support/private_key.pem + ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} + ENOS_VAR_artifactory_username: ${{ secrets.ARTIFACTORY_USER }} + ENOS_VAR_artifactory_token: ${{ secrets.ARTIFACTORY_TOKEN }} + ENOS_VAR_terraform_plugin_cache_dir: ./support/terraform-plugin-cache + ENOS_VAR_vault_license_path: ./support/vault.hclic + ENOS_DEBUG_DATA_ROOT_DIR: /tmp/enos-debug-data + VAULT_METADATA: ent + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - name: Set product version and revision + # If the Vault version and revision are not provided as workflow inputs, incase of + # testing local artifact, the environment variables ENOS_VAR_vault_product_version + # and ENOS_VAR_vault_revision are set using the current branch + id: set-version-sha + run: | + [[ -n "${{ inputs.product-version }}" ]] && echo "ENOS_VAR_vault_product_version=${{ inputs.product-version }}" >> "$GITHUB_ENV" || echo "ENOS_VAR_vault_product_version=$(make ci-get-version)" >> "$GITHUB_ENV" + [[ -n "${{ inputs.vault-revision }}" ]] && echo "ENOS_VAR_vault_revision=${{ inputs.vault-revision }}" >> "$GITHUB_ENV" || echo "ENOS_VAR_vault_revision=$(make ci-get-revision)" >> "$GITHUB_ENV" + - uses: ./.github/actions/set-up-go + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - name: Configure Git + run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" + - name: Set up node + uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 + with: + node-version: 14 + cache-dependency-path: ui/yarn.lock + - uses: hashicorp/setup-terraform@v2 + with: + # the Terraform wrapper will break Terraform execution in Enos because + # it changes the output to text when we expect it to be JSON. + terraform_wrapper: false + - uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} + aws-region: 'us-west-1' + role-to-assume: ${{ secrets.AWS_ROLE_ARN_CI }} + role-skip-session-tagging: true + role-duration-seconds: 3600 + - uses: hashicorp/action-setup-enos@v1 + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - name: Prepare scenario dependencies + id: scenario-deps + run: | + mkdir -p ./enos/support/terraform-plugin-cache + mkdir -p /tmp/enos-scenario-logs + echo logsdir="/tmp/enos-scenario-logs" >> "$GITHUB_OUTPUT" + echo "${{ secrets.SSH_KEY_PRIVATE_CI }}" > ./enos/support/private_key.pem + chmod 600 ./enos/support/private_key.pem + - name: Setup Vault Enterprise License + id: license + run: echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic + - name: Run Enos scenario + id: run + run: enos scenario run --timeout 60m0s --chdir ./enos ${{ inputs.scenario }} + - name: Collect logs when scenario fails + id: collect_logs + if: ${{ always() }} + run: | + bash -x ./scripts/gha_enos_logs.sh "${{ steps.scenario-deps.outputs.logsdir }}" "${{ inputs.scenario }}" "${{ inputs.distro }}" "${{ inputs.artifact-type }}" 2>/dev/null + find "${{ steps.scenario-deps.outputs.logsdir }}" -maxdepth 0 -empty -exec rmdir {} \; + - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + if: ${{ always() }} + with: + name: enos-scenario-logs + path: ${{ steps.scenario-deps.outputs.logsdir }} + retention-days: 1 + - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + if: ${{ always() }} + with: + name: enos-debug-data-logs + path: ${{ env.ENOS_DEBUG_DATA_ROOT_DIR }} + retention-days: 1 + - name: Ensure scenario has been destroyed + if: ${{ always() }} + run: enos scenario destroy --timeout 60m0s --grpc-listen http://localhost --chdir ./enos ${{ inputs.scenario }} + - name: Clean up Enos runtime directories + if: ${{ always() }} + run: | + rm -rf /tmp/enos* + rm -rf ./enos/support + rm -rf ./enos/.enos diff --git a/.gitignore b/.gitignore index cdd542d60689..15bcfca6d330 100644 --- a/.gitignore +++ b/.gitignore @@ -55,23 +55,26 @@ Vagrantfile !.release/ci.hcl !.release/security-scan.hcl !.release/linux/package/etc/vault.d/vault.hcl -!command/agent/config/test-fixtures/*.hcl -!command/server/test-fixtures/**/*.hcl !enos/**/*.hcl +!**/test-fixtures/**/*.hcl +!**/testdata/*.hcl # Enos -enos/.enos -enos/support -# Enos local Terraform files -enos/.terraform/* -enos/.terraform.lock.hcl -enos/*.tfstate -enos/*.tfstate.* +.enos +enos-local.vars.hcl +enos/**/support +enos/**/kubeconfig +.terraform +.terraform.lock.hcl +.tfstate.* .DS_Store .idea .vscode +# VSCode debugger executable +__debug_bin* + dist/* # ignore ctags @@ -127,4 +130,7 @@ website/components/node_modules .releaser/ *.log -tools/godoctests/.bin \ No newline at end of file +tools/godoctests/.bin +tools/gonilnilfunctions/.bin +tools/codechecker/.bin +.ci-bootstrap \ No newline at end of file diff --git a/.go-version b/.go-version index 0044d6cb9691..87b26e8b1aa0 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.20.1 +1.22.7 diff --git a/.hooks/pre-commit b/.hooks/pre-commit index 17309e55a9d7..40482966c985 100755 --- a/.hooks/pre-commit +++ b/.hooks/pre-commit @@ -35,9 +35,7 @@ block() { # Add all check functions to this space separated list. # They are executed in this order (see end of file). -CHECKS="ui_lint circleci_verify" - -MIN_CIRCLECI_VERSION=0.1.5575 +CHECKS="ui_lint backend_lint" # Run ui linter if changes in that dir detected. ui_lint() { @@ -62,80 +60,16 @@ ui_lint() { $LINTER || block "UI lint failed" } -# Check .circleci/config.yml is up to date and valid, and that all changes are -# included together in this commit. -circleci_verify() { - # Change to the root dir of the repo. - cd "$(git rev-parse --show-toplevel)" - - # Fail early if we accidentally used '.yaml' instead of '.yml' - if ! git diff --name-only --cached --exit-code -- '.circleci/***.yaml'; then - # This is just for consistency, as I keep making this mistake - Sam. - block "ERROR: File(s) with .yaml extension detected. Please rename them .yml instead." - fi - - # Succeed early if no changes to yml files in .circleci/ are currently staged. - # make ci-verify is slow so we really don't want to run it unnecessarily. - if git diff --name-only --cached --exit-code -- '.circleci/***.yml'; then - return 0 - fi - # Make sure to add no explicit output before this line, as it would just be noise - # for those making non-circleci changes. - echo "==> Verifying config changes in .circleci/" - echo "--> OK: All files are .yml not .yaml" - - # Ensure commit includes _all_ files in .circleci/ - # So not only are the files up to date, but we are also committing them in one go. - if ! git diff --name-only --exit-code -- '.circleci/***.yml'; then - echo "ERROR: Some .yml diffs in .circleci/ are staged, others not." - block "Please commit the entire .circleci/ directory together, or omit it altogether." - fi - - echo "--> OK: All .yml files in .circleci are staged." - - if ! REASON=$(check_circleci_cli_version); then - echo "*** WARNING: Unable to verify changes in .circleci/:" - echo "--> $REASON" - # We let this pass if there is no valid circleci version installed. +backend_lint() { + # Silently succeed if no changes staged for Go code files. + staged=$(git diff --name-only --cached --exit-code -- '*.go') + ret=$? + if [ $ret -eq 0 ]; then return 0 fi - if ! make -C .circleci ci-verify; then - block "ERROR: make ci-verify failed" - fi - - echo "--> OK: make ci-verify succeeded." -} - -check_circleci_cli_version() { - if ! command -v circleci > /dev/null 2>&1; then - echo "circleci cli not installed." - return 1 - fi - - CCI="circleci --skip-update-check" - - if ! THIS_VERSION=$($CCI version) > /dev/null 2>&1; then - # Guards against very old versions that do not have --skip-update-check. - echo "The installed circleci cli is too old. Please upgrade to at least $MIN_CIRCLECI_VERSION." - return 1 - fi - - # SORTED_MIN is the lower of the THIS_VERSION and MIN_CIRCLECI_VERSION. - if ! SORTED_MIN="$(printf "%s\n%s" "$MIN_CIRCLECI_VERSION" "$THIS_VERSION" | sort -V | head -n1)"; then - echo "Failed to sort versions. Please open an issue to report this." - return 1 - fi - - if [ "$THIS_VERSION" != "${THIS_VERSION#$MIN_CIRCLECI_VERSION}" ]; then - return 0 # OK - Versions have the same prefix, so we consider them equal. - elif [ "$SORTED_MIN" = "$MIN_CIRCLECI_VERSION" ]; then - return 0 # OK - MIN_CIRCLECI_VERSION is lower than THIS_VERSION. - fi - - # Version too low. - echo "The installed circleci cli v$THIS_VERSION is too old. Please upgrade to at least $MIN_CIRCLECI_VERSION" - return 1 + # Only run check-fmt on staged files + ./scripts/go-helper.sh check-fmt "${staged}" || block "Backend linting failed; run 'make fmt' to fix." } for CHECK in $CHECKS; do diff --git a/.release/ci.hcl b/.release/ci.hcl index 335a21200fbf..69ddf9e4a71a 100644 --- a/.release/ci.hcl +++ b/.release/ci.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 schema = "1" @@ -32,145 +32,13 @@ event "build" { } } -event "upload-dev" { +event "prepare" { depends = ["build"] - action "upload-dev" { + action "prepare" { organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "upload-dev" - depends = ["build"] - } - - notification { - on = "fail" - } -} - -event "quality-tests" { - depends = ["upload-dev"] - action "quality-tests" { - organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "quality-tests" - } - - notification { - on = "fail" - } -} - -event "security-scan-binaries" { - depends = ["quality-tests"] - action "security-scan-binaries" { - organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "security-scan-binaries" - config = "security-scan.hcl" - } - - notification { - on = "fail" - } -} - -event "security-scan-containers" { - depends = ["security-scan-binaries"] - action "security-scan-containers" { - organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "security-scan-containers" - config = "security-scan.hcl" - } - - notification { - on = "fail" - } -} - -event "notarize-darwin-amd64" { - depends = ["security-scan-containers"] - action "notarize-darwin-amd64" { - organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "notarize-darwin-amd64" - } - - notification { - on = "fail" - } -} - -event "notarize-darwin-arm64" { - depends = ["notarize-darwin-amd64"] - action "notarize-darwin-arm64" { - organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "notarize-darwin-arm64" - } - - notification { - on = "fail" - } -} - -event "notarize-windows-386" { - depends = ["notarize-darwin-arm64"] - action "notarize-windows-386" { - organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "notarize-windows-386" - } - - notification { - on = "fail" - } -} - -event "notarize-windows-amd64" { - depends = ["notarize-windows-386"] - action "notarize-windows-amd64" { - organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "notarize-windows-amd64" - } - - notification { - on = "fail" - } -} - -event "sign" { - depends = ["notarize-windows-amd64"] - action "sign" { - organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "sign" - } - - notification { - on = "fail" - } -} - -event "sign-linux-rpms" { - depends = ["sign"] - action "sign-linux-rpms" { - organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "sign-linux-rpms" - } - - notification { - on = "fail" - } -} - -event "verify" { - depends = ["sign-linux-rpms"] - action "verify" { - organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "verify" + repository = "crt-workflows-common" + workflow = "prepare" + depends = ["build"] } notification { @@ -179,7 +47,7 @@ event "verify" { } event "enos-release-testing-oss" { - depends = ["verify"] + depends = ["prepare"] action "enos-release-testing-oss" { organization = "hashicorp" repository = "vault" @@ -285,8 +153,17 @@ event "post-publish-website" { } } -event "update-ironbank" { +event "bump-version" { depends = ["post-publish-website"] + action "bump-version" { + organization = "hashicorp" + repository = "crt-workflows-common" + workflow = "bump-version" + } +} + +event "update-ironbank" { + depends = ["bump-version"] action "update-ironbank" { organization = "hashicorp" repository = "crt-workflows-common" @@ -297,3 +174,15 @@ event "update-ironbank" { on = "fail" } } + +event "crt-generate-sbom" { + depends = ["promote-production"] + action "crt-generate-sbom" { + organization = "hashicorp" + repository = "security-generate-release-sbom" + workflow = "crt-generate-sbom" + } + notification { + on = "fail" + } +} diff --git a/.release/docker/docker-entrypoint.sh b/.release/docker/docker-entrypoint.sh index 2b9b8f35a160..a3b581697c35 100755 --- a/.release/docker/docker-entrypoint.sh +++ b/.release/docker/docker-entrypoint.sh @@ -1,6 +1,6 @@ #!/usr/bin/dumb-init /bin/sh # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 set -e diff --git a/.release/docker/ubi-docker-entrypoint.sh b/.release/docker/ubi-docker-entrypoint.sh index 794e69c61486..dda1260bb8bc 100755 --- a/.release/docker/ubi-docker-entrypoint.sh +++ b/.release/docker/ubi-docker-entrypoint.sh @@ -1,6 +1,6 @@ #!/bin/sh # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 set -e diff --git a/.release/linux/package/etc/vault.d/vault.hcl b/.release/linux/package/etc/vault.d/vault.hcl index 4a59d3672507..18ff8b4bbce9 100644 --- a/.release/linux/package/etc/vault.d/vault.hcl +++ b/.release/linux/package/etc/vault.d/vault.hcl @@ -1,7 +1,7 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 -# Full configuration options can be found at https://www.vaultproject.io/docs/configuration +# Full configuration options can be found at https://developer.hashicorp.com/vault/docs/configuration ui = true diff --git a/.release/linux/package/usr/lib/systemd/system/vault.service b/.release/linux/package/usr/lib/systemd/system/vault.service index 45c896b2c9a5..6408b49b3d69 100644 --- a/.release/linux/package/usr/lib/systemd/system/vault.service +++ b/.release/linux/package/usr/lib/systemd/system/vault.service @@ -1,6 +1,6 @@ [Unit] Description="HashiCorp Vault - A tool for managing secrets" -Documentation=https://www.vaultproject.io/docs/ +Documentation=https://developer.hashicorp.com/vault/docs Requires=network-online.target After=network-online.target ConditionFileNotEmpty=/etc/vault.d/vault.hcl @@ -29,6 +29,7 @@ RestartSec=5 TimeoutStopSec=30 LimitNOFILE=65536 LimitMEMLOCK=infinity +LimitCORE=0 [Install] WantedBy=multi-user.target diff --git a/.release/release-metadata.hcl b/.release/release-metadata.hcl index 3a49b69c59b1..8d480ad4a73a 100644 --- a/.release/release-metadata.hcl +++ b/.release/release-metadata.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 url_docker_registry_dockerhub = "https://hub.docker.com/r/hashicorp/vault" url_docker_registry_ecr = "https://gallery.ecr.aws/hashicorp/vault" diff --git a/.release/security-scan.hcl b/.release/security-scan.hcl index 62460e431db9..3917c269cd49 100644 --- a/.release/security-scan.hcl +++ b/.release/security-scan.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 container { dependencies = true diff --git a/.release/versions.hcl b/.release/versions.hcl new file mode 100644 index 000000000000..f1db6233a6c6 --- /dev/null +++ b/.release/versions.hcl @@ -0,0 +1,18 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# This manifest file describes active releases and is consumed by the backport tooling. + +schema = 1 +active_versions { + version "1.17.x" { + ce_active = true + } + version "1.16.x" { + ce_active = false + lts = true + } + version "1.15.x" { + ce_active = false + } +} diff --git a/CHANGELOG-pre-v1.10.md b/CHANGELOG-pre-v1.10.md new file mode 100644 index 000000000000..49f1b912d7b0 --- /dev/null +++ b/CHANGELOG-pre-v1.10.md @@ -0,0 +1,3510 @@ +## 1.9.10 + +### September 30, 2022 + +SECURITY: + +* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] + +BUG FIXES: + +* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] +* replication (enterprise): Fix data race in SaveCheckpoint() +* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] + +## 1.9.9 + +### August 31, 2022 + +SECURITY: + +* core: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. This vulnerability, CVE-2022-40186, is fixed in 1.11.3, 1.10.6, and 1.9.9. [[HSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] + +CHANGES: + +* core: Bump Go version to 1.17.13. + +BUG FIXES: + +* core (enterprise): Fix some races in merkle index flushing code found in testing +* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] +* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] +* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] +* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] +* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] + +SECURITY: + +* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] + +## 1.9.8 + +### July 21, 2022 + +SECURITY: + +* storage/raft: Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] + +CHANGES: + +* core: Bump Go version to 1.17.12. + +IMPROVEMENTS: + +* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] + +BUG FIXES: + +* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] +* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty +* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] +* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] +* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. +* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] +* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] + +## 1.9.7 + +### June 10, 2022 + +CHANGES: + +* core: Bump Go version to 1.17.11. [[GH-go-ver-197](https://github.com/hashicorp/vault/pull/go-ver-197)] + +IMPROVEMENTS: + +* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] + +BUG FIXES: + +* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] +* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set +has been fixed. The previous behavior would make a request to the LDAP server to +get `user_attr` before discarding it and using the username instead. This would +make it impossible for a user to connect if this attribute was missing or had +multiple values, even though it would not be used anyway. This has been fixed +and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] +* core (enterprise): Fix overcounting of lease count quota usage at startup. +* core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} [[GH-15224](https://github.com/hashicorp/vault/pull/15224)] +* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] +* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] +* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] +* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] +* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. +* transform (enterprise): Fix non-overridable column default value causing tokenization tokens to expire prematurely when using the MySQL storage backend. +* ui: Fixes client count timezone bug [[GH-15743](https://github.com/hashicorp/vault/pull/15743)] +* ui: Fixes issue logging in with OIDC from a listed auth mounts tab [[GH-15666](https://github.com/hashicorp/vault/pull/15666)] + +## 1.9.6 + +### April 29, 2022 + +BUG FIXES: + +* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] +* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] + +## 1.9.5 + +### April 22, 2022 + +CHANGES: + +* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] +* core: Bump Go version to 1.17.9. [[GH-15045](https://github.com/hashicorp/vault/pull/15045)] + +IMPROVEMENTS: + +* auth/ldap: Add username_as_alias configurable to change how aliases are named [[GH-14324](https://github.com/hashicorp/vault/pull/14324)] +* core: Systemd unit file included with the Linux packages now sets the service type to notify. [[GH-14385](https://github.com/hashicorp/vault/pull/14385)] +* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer +* website/docs: added a link to an Enigma secret plugin. [[GH-14389](https://github.com/hashicorp/vault/pull/14389)] + +BUG FIXES: + +* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] +* api: Respect increment value in grace period calculations in LifetimeWatcher [[GH-14836](https://github.com/hashicorp/vault/pull/14836)] +* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] +* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] +* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] +* core (enterprise): Allow local alias create RPCs to persist alias metadata +* core/metrics: Fix incorrect table size metric for local mounts [[GH-14755](https://github.com/hashicorp/vault/pull/14755)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] +* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] +* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] +* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] +* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] +* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] +* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] +* identity/token: Fixes a bug where duplicate public keys could appear in the .well-known JWKS [[GH-14543](https://github.com/hashicorp/vault/pull/14543)] +* metrics/autosnapshots (enterprise) : Fix bug that could cause +vault.autosnapshots.save.errors to not be incremented when there is an +autosnapshot save error. +* replication (enterprise): fix panic due to missing entity during invalidation of local aliases. [[GH-14622](https://github.com/hashicorp/vault/pull/14622)] +* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not excepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] +* ui: Fix issue where UI incorrectly handled API errors when mounting backends [[GH-14551](https://github.com/hashicorp/vault/pull/14551)] +* ui: Fixes caching issue on kv new version create [[GH-14489](https://github.com/hashicorp/vault/pull/14489)] +* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] +* ui: Fixes issue logging out with wrapped token query parameter [[GH-14329](https://github.com/hashicorp/vault/pull/14329)] +* ui: Fixes issue with correct auth method not selected when logging out from OIDC or JWT methods [[GH-14545](https://github.com/hashicorp/vault/pull/14545)] +* ui: Redirects to managed namespace if incorrect namespace in URL param [[GH-14422](https://github.com/hashicorp/vault/pull/14422)] +* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] +* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] + +## 1.9.4 + +### March 3, 2022 + +SECURITY: + +* secrets/pki: Vault and Vault Enterprise (“Vault”) allowed the PKI secrets engine under certain configurations to issue wildcard certificates to authorized users for a specified domain, even if the PKI role policy attribute allow_subdomains is set to false. This vulnerability, CVE-2022-25243, was fixed in Vault 1.8.9 and 1.9.4. +* transform (enterprise): Vault Enterprise (“Vault”) clusters using the tokenization transform feature can expose the tokenization key through the tokenization key configuration endpoint to authorized operators with read permissions on this endpoint. This vulnerability, CVE-2022-25244, was fixed in Vault Enterprise 1.7.10, 1.8.9, and 1.9.4. + +CHANGES: + +* secrets/azure: Changes the configuration parameter `use_microsoft_graph_api` to use the Microsoft +Graph API by default. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] + +IMPROVEMENTS: + +* core: Bump Go version to 1.17.7. [[GH-14232](https://github.com/hashicorp/vault/pull/14232)] +* secrets/pki: Restrict issuance of wildcard certificates via role parameter (`allow_wildcard_certificates`) [[GH-14238](https://github.com/hashicorp/vault/pull/14238)] + +BUG FIXES: + +* Fixed bug where auth method only considers system-identity when multiple identities are available. [#50](https://github.com/hashicorp/vault-plugin-auth-azure/pull/50) [[GH-14138](https://github.com/hashicorp/vault/pull/14138)] +* auth/kubernetes: Properly handle the migration of role storage entries containing an empty `alias_name_source` [[GH-13925](https://github.com/hashicorp/vault/pull/13925)] +* auth/kubernetes: ensure valid entity alias names created for projected volume tokens [[GH-14144](https://github.com/hashicorp/vault/pull/14144)] +* identity/oidc: Adds support for port-agnostic validation of loopback IP redirect URIs. [[GH-13871](https://github.com/hashicorp/vault/pull/13871)] +* identity/oidc: Fixes inherited group membership when evaluating client assignments [[GH-14013](https://github.com/hashicorp/vault/pull/14013)] +* secrets/azure: Fixed bug where Azure environment did not change Graph URL [[GH-13973](https://github.com/hashicorp/vault/pull/13973)] +* secrets/azure: Fixes the [rotate root](https://www.vaultproject.io/api-docs/secret/azure#rotate-root) +operation for upgraded configurations with a `root_password_ttl` of zero. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] +* secrets/gcp: Fixed bug where error was not reported for invalid bindings [[GH-13974](https://github.com/hashicorp/vault/pull/13974)] +* secrets/openldap: Fix panic from nil logger in backend [[GH-14171](https://github.com/hashicorp/vault/pull/14171)] +* secrets/pki: Fix issuance of wildcard certificates matching glob patterns [[GH-14235](https://github.com/hashicorp/vault/pull/14235)] +* storage/raft: Fix issues allowing invalid nodes to become leadership candidates. [[GH-13703](https://github.com/hashicorp/vault/pull/13703)] +* ui: Fix default TTL display and set on database role [[GH-14224](https://github.com/hashicorp/vault/pull/14224)] +* ui: Fix incorrect validity message on transit secrets engine [[GH-14233](https://github.com/hashicorp/vault/pull/14233)] +* ui: Fix kv engine access bug [[GH-13872](https://github.com/hashicorp/vault/pull/13872)] +* ui: Fix issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] +* ui: Trigger background token self-renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] + +## 1.9.3 + +### January 27, 2022 + +IMPROVEMENTS: + +* auth/kubernetes: Added support for dynamically reloading short-lived tokens for better Kubernetes 1.21+ compatibility [[GH-13698](https://github.com/hashicorp/vault/pull/13698)] +* auth/ldap: Add username to alias metadata [[GH-13669](https://github.com/hashicorp/vault/pull/13669)] +* core/identity: Support updating an alias' `custom_metadata` to be empty. [[GH-13395](https://github.com/hashicorp/vault/pull/13395)] +* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] +* http (enterprise): Serve /sys/license/status endpoint within namespaces + +BUG FIXES: + +* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13492](https://github.com/hashicorp/vault/pull/13492)] +* cli: Fix using kv patch with older server versions that don't support HTTP PATCH. [[GH-13615](https://github.com/hashicorp/vault/pull/13615)] +* core (enterprise): Workaround AWS CloudHSM v5 SDK issue not allowing read-only sessions +* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13476](https://github.com/hashicorp/vault/pull/13476)] +* core: add support for go-sockaddr templates in the top-level cluster_addr field [[GH-13678](https://github.com/hashicorp/vault/pull/13678)] +* identity/oidc: Check for a nil signing key on rotation to prevent panics. [[GH-13716](https://github.com/hashicorp/vault/pull/13716)] +* kmip (enterprise): Fix locate by name operations fail to find key after a rekey operation. +* secrets/database/mssql: Accept a boolean for `contained_db`, rather than just a string. [[GH-13469](https://github.com/hashicorp/vault/pull/13469)] +* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13548](https://github.com/hashicorp/vault/pull/13548)] +* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-13759](https://github.com/hashicorp/vault/pull/13759)] +* storage/raft: On linux, use map_populate for bolt files to improve startup time. [[GH-13573](https://github.com/hashicorp/vault/pull/13573)] +* storage/raft: Units for bolt metrics now given in milliseconds instead of nanoseconds [[GH-13749](https://github.com/hashicorp/vault/pull/13749)] +* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] +* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] + +## 1.9.2 + +### December 21, 2021 + +CHANGES: + +* go: Update go version to 1.17.5 [[GH-13408](https://github.com/hashicorp/vault/pull/13408)] + +IMPROVEMENTS: + +* auth/jwt: The Authorization Code flow makes use of the Proof Key for Code Exchange (PKCE) extension. [[GH-13365](https://github.com/hashicorp/vault/pull/13365)] + +BUG FIXES: + +* ui: Fix client count current month data not showing unless monthly history data exists [[GH-13396](https://github.com/hashicorp/vault/pull/13396)] + +## 1.9.1 + +### December 9, 2021 + +SECURITY: + +* storage/raft: Integrated Storage backend could be caused to crash by an authenticated user with write permissions to the KV secrets engine. This vulnerability, CVE-2021-45042, was fixed in Vault 1.7.7, 1.8.6, and 1.9.1. + +IMPROVEMENTS: + +* storage/aerospike: Upgrade `aerospike-client-go` to v5.6.0. [[GH-12165](https://github.com/hashicorp/vault/pull/12165)] + +BUG FIXES: + +* auth/approle: Fix regression where unset cidrlist is returned as nil instead of zero-length array. [[GH-13235](https://github.com/hashicorp/vault/pull/13235)] +* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes +* http:Fix /sys/monitor endpoint returning streaming not supported [[GH-13200](https://github.com/hashicorp/vault/pull/13200)] +* identity/oidc: Make the `nonce` parameter optional for the Authorization Endpoint of OIDC providers. [[GH-13231](https://github.com/hashicorp/vault/pull/13231)] +* identity: Fixes a panic in the OIDC key rotation due to a missing nil check. [[GH-13298](https://github.com/hashicorp/vault/pull/13298)] +* sdk/queue: move lock before length check to prevent panics. [[GH-13146](https://github.com/hashicorp/vault/pull/13146)] +* secrets/azure: Fixes service principal generation when assigning roles that have [DataActions](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-definitions#dataactions). [[GH-13277](https://github.com/hashicorp/vault/pull/13277)] +* secrets/pki: Recognize ed25519 when requesting a response in PKCS8 format [[GH-13257](https://github.com/hashicorp/vault/pull/13257)] +* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] +* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] +* ui: Do not show verify connection value on database connection config page [[GH-13152](https://github.com/hashicorp/vault/pull/13152)] +* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] +* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] +* ui: Fixes issue with automate secret deletion value not displaying initially if set in secret metadata edit view [[GH-13177](https://github.com/hashicorp/vault/pull/13177)] +* ui: Fixes issue with placeholder not displaying for automatically deleted secrets when deletion time has passed [[GH-13166](https://github.com/hashicorp/vault/pull/13166)] +* ui: Fixes node-forge error when parsing EC (elliptical curve) certs [[GH-13238](https://github.com/hashicorp/vault/pull/13238)] + +## 1.9.0 + +### November 17, 2021 + +CHANGES: + +* auth/kubernetes: `disable_iss_validation` defaults to true. [#127](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/127) [[GH-12975](https://github.com/hashicorp/vault/pull/12975)] +* expiration: VAULT_16_REVOKE_PERMITPOOL environment variable has been removed. [[GH-12888](https://github.com/hashicorp/vault/pull/12888)] +* expiration: VAULT_LEASE_USE_LEGACY_REVOCATION_STRATEGY environment variable has +been removed. [[GH-12888](https://github.com/hashicorp/vault/pull/12888)] +* go: Update go version to 1.17.2 +* secrets/ssh: Roles with empty allowed_extensions will now forbid end-users +specifying extensions when requesting ssh key signing. Update roles setting +allowed_extensions to `*` to permit any extension to be specified by an end-user. [[GH-12847](https://github.com/hashicorp/vault/pull/12847)] + +FEATURES: + +* **Customizable HTTP Headers**: Add support to define custom HTTP headers for root path (`/`) and also on API endpoints (`/v1/*`) [[GH-12485](https://github.com/hashicorp/vault/pull/12485)] +* **Deduplicate Token With Entities in Activity Log**: Vault tokens without entities are now tracked with client IDs and deduplicated in the Activity Log [[GH-12820](https://github.com/hashicorp/vault/pull/12820)] +* **Elasticsearch Database UI**: The UI now supports adding and editing Elasticsearch connections in the database secret engine. [[GH-12672](https://github.com/hashicorp/vault/pull/12672)] +* **KV Custom Metadata**: Add ability in kv-v2 to specify version-agnostic custom key metadata via the +metadata endpoint. The data will be present in responses made to the data endpoint independent of the +calling token's `read` access to the metadata endpoint. [[GH-12907](https://github.com/hashicorp/vault/pull/12907)] +* **KV patch (Tech Preview)**: Add partial update support for the `//data/:path` kv-v2 +endpoint through HTTP `PATCH`. A new `patch` ACL capability has been added and +is required to make such requests. [[GH-12687](https://github.com/hashicorp/vault/pull/12687)] +* **Key Management Secrets Engine (Enterprise)**: Adds support for distributing and managing keys in GCP Cloud KMS. +* **Local Auth Mount Entities (enterprise)**: Logins on `local` auth mounts will +generate identity entities for the tokens issued. The aliases of the entity +resulting from local auth mounts (local-aliases), will be scoped by the cluster. +This means that the local-aliases will never leave the geographical boundary of +the cluster where they were issued. This is something to be mindful about for +those who have implemented local auth mounts for complying with GDPR guidelines. +* **Namespaces (Enterprise)**: Adds support for locking Vault API for particular namespaces. +* **OIDC Identity Provider (Tech Preview)**: Adds support for Vault to be an OpenID Connect (OIDC) provider. [[GH-12932](https://github.com/hashicorp/vault/pull/12932)] +* **Oracle Database UI**: The UI now supports adding and editing Oracle connections in the database secret engine. [[GH-12752](https://github.com/hashicorp/vault/pull/12752)] +* **Postgres Database UI**: The UI now supports adding and editing Postgres connections in the database secret engine. [[GH-12945](https://github.com/hashicorp/vault/pull/12945)] + +SECURITY: + +* core/identity: A Vault user with write permission to an entity alias ID sharing a mount accessor with another user may acquire this other user’s policies by merging their identities. This vulnerability, CVE-2021-41802, was fixed in Vault and Vault Enterprise 1.7.5, 1.8.4, and 1.9.0. +* core/identity: Templated ACL policies would always match the first-created entity alias if multiple entity aliases existed for a specified entity and mount combination, potentially resulting in incorrect policy enforcement. This vulnerability, CVE-2021-43998, was fixed in Vault and Vault Enterprise 1.7.6, 1.8.5, and 1.9.0. + +IMPROVEMENTS: + +* agent/cache: Process persistent cache leases in dependency order during restore to ensure child leases are always correctly restored [[GH-12843](https://github.com/hashicorp/vault/pull/12843)] +* agent/cache: Use an in-process listener between consul-template and vault-agent when caching is enabled and either templates or a listener is defined [[GH-12762](https://github.com/hashicorp/vault/pull/12762)] +* agent/cache: tolerate partial restore failure from persistent cache [[GH-12718](https://github.com/hashicorp/vault/pull/12718)] +* agent/template: add support for new 'writeToFile' template function [[GH-12505](https://github.com/hashicorp/vault/pull/12505)] +* api: Add configuration option for ensuring isolated read-after-write semantics for all Client requests. [[GH-12814](https://github.com/hashicorp/vault/pull/12814)] +* api: adds native Login method to Go client module with different auth method interfaces to support easier authentication [[GH-12796](https://github.com/hashicorp/vault/pull/12796)] +* api: Move mergeStates and other required utils from agent to api module [[GH-12731](https://github.com/hashicorp/vault/pull/12731)] +* api: Support VAULT_HTTP_PROXY environment variable to allow overriding the Vault client's HTTP proxy [[GH-12582](https://github.com/hashicorp/vault/pull/12582)] +* auth/approle: The `role/:name/secret-id-accessor/lookup` endpoint now returns a 404 status code when the `secret_id_accessor` cannot be found [[GH-12788](https://github.com/hashicorp/vault/pull/12788)] +* auth/approle: expose secret_id_accessor as WrappedAccessor when creating wrapped secret-id. [[GH-12425](https://github.com/hashicorp/vault/pull/12425)] +* auth/aws: add profile support for AWS credentials when using the AWS auth method [[GH-12621](https://github.com/hashicorp/vault/pull/12621)] +* auth/kubernetes: validate JWT against the provided role on alias look ahead operations [[GH-12688](https://github.com/hashicorp/vault/pull/12688)] +* auth/kubernetes: Add ability to configure entity alias names based on the serviceaccount's namespace and name. [#110](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/110) [#112](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/112) [[GH-12633](https://github.com/hashicorp/vault/pull/12633)] +* auth/ldap: include support for an optional user filter field when searching for users [[GH-11000](https://github.com/hashicorp/vault/pull/11000)] +* auth/oidc: Adds the `skip_browser` CLI option to allow users to skip opening the default browser during the authentication flow. [[GH-12876](https://github.com/hashicorp/vault/pull/12876)] +* auth/okta: Send x-forwarded-for in Okta Push Factor request [[GH-12320](https://github.com/hashicorp/vault/pull/12320)] +* auth/token: Add `allowed_policies_glob` and `disallowed_policies_glob` fields to token roles to allow glob matching of policies [[GH-7277](https://github.com/hashicorp/vault/pull/7277)] +* cli: Operator diagnose now tests for missing or partial telemetry configurations. [[GH-12802](https://github.com/hashicorp/vault/pull/12802)] +* cli: add new http option : -header which enable sending arbitrary headers with the cli [[GH-12508](https://github.com/hashicorp/vault/pull/12508)] +* command: operator generate-root -decode: allow passing encoded token via stdin [[GH-12881](https://github.com/hashicorp/vault/pull/12881)] +* core/token: Return the token_no_default_policy config on token role read if set [[GH-12565](https://github.com/hashicorp/vault/pull/12565)] +* core: Add support for go-sockaddr templated addresses in config. [[GH-9109](https://github.com/hashicorp/vault/pull/9109)] +* core: adds custom_metadata field for aliases [[GH-12502](https://github.com/hashicorp/vault/pull/12502)] +* core: Update Oracle Cloud library to enable seal integration with the uk-gov-london-1 region [[GH-12724](https://github.com/hashicorp/vault/pull/12724)] +* core: Update github.com/ulikunitz/xz to fix security vulnerability GHSA-25xm-hr59-7c27. [[GH-12253](https://github.com/hashicorp/vault/pull/12253)] +* core: Upgrade github.com/gogo/protobuf [[GH-12255](https://github.com/hashicorp/vault/pull/12255)] +* core: build with Go 1.17, and mitigate a breaking change they made that could impact how approle and ssh interpret IPs/CIDRs [[GH-12868](https://github.com/hashicorp/vault/pull/12868)] +* core: observe the client counts broken down by namespace for partial month client count [[GH-12393](https://github.com/hashicorp/vault/pull/12393)] +* core: Artifact builds will now only run on merges to the release branches or to `main` +* core: The [dockerfile](https://github.com/hashicorp/vault/blob/main/Dockerfile) that is used to build the vault docker image available at [hashicorp/vault](https://hub.docker.com/repository/docker/hashicorp/vault) now lives in the root of this repo, and the entrypoint is available under [.release/docker/docker-entrypoint.sh](https://github.com/hashicorp/vault/blob/main/.release/docker/docker-entrypoint.sh) +* core: The vault linux packaging service configs and pre/post install scripts are now available under [.release/linux](https://github.com/hashicorp/vault/blob/main/.release/linux) +* core: Vault linux packages are now available for all supported linux architectures including arm, arm64, 386, and amd64 +* db/cassandra: make the connect_timeout config option actually apply to connection timeouts, in addition to non-connection operations [[GH-12903](https://github.com/hashicorp/vault/pull/12903)] +* identity/token: Only return keys from the `.well-known/keys` endpoint that are being used by roles to sign/verify tokens. [[GH-12780](https://github.com/hashicorp/vault/pull/12780)] +* identity: fix issue where Cache-Control header causes stampede of requests for JWKS keys [[GH-12414](https://github.com/hashicorp/vault/pull/12414)] +* physical/etcd: Upgrade etcd3 client to v3.5.0 and etcd2 to v2.305.0. [[GH-11980](https://github.com/hashicorp/vault/pull/11980)] +* pki: adds signature_bits field to customize signature algorithm on CAs and certs signed by Vault [[GH-11245](https://github.com/hashicorp/vault/pull/11245)] +* plugin: update the couchbase gocb version in the couchbase plugin [[GH-12483](https://github.com/hashicorp/vault/pull/12483)] +* replication (enterprise): Add merkle.flushDirty.num_pages_outstanding metric which specifies number of +outstanding dirty pages that were not flushed. [[GH-2093](https://github.com/hashicorp/vault/pull/2093)] +* sdk/framework: The '+' wildcard is now supported for parameterizing unauthenticated paths. [[GH-12668](https://github.com/hashicorp/vault/pull/12668)] +* secrets/aws: Add conditional template that allows custom usernames for both STS and IAM cases [[GH-12185](https://github.com/hashicorp/vault/pull/12185)] +* secrets/azure: Adds support for rotate-root. [#70](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/70) [[GH-13034](https://github.com/hashicorp/vault/pull/13034)] +* secrets/azure: Adds support for using Microsoft Graph API since Azure Active Directory API is being removed in 2022. [#67](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/67) [[GH-12629](https://github.com/hashicorp/vault/pull/12629)] +* secrets/database: Update MSSQL dependency github.com/denisenkom/go-mssqldb to v0.11.0 and include support for contained databases in MSSQL plugin [[GH-12839](https://github.com/hashicorp/vault/pull/12839)] +* secrets/pki: Allow signing of self-issued certs with a different signature algorithm. [[GH-12514](https://github.com/hashicorp/vault/pull/12514)] +* secrets/pki: Use entropy augmentation when available when generating root and intermediate CA key material. [[GH-12559](https://github.com/hashicorp/vault/pull/12559)] +* secrets/pki: select appropriate signature algorithm for ECDSA signature on certificates. [[GH-11216](https://github.com/hashicorp/vault/pull/11216)] +* secrets/pki: Support ed25519 as a key for the pki backend [[GH-11780](https://github.com/hashicorp/vault/pull/11780)] +* secrets/rabbitmq: Update dependency github.com/michaelklishin/rabbit-hole to v2 and resolve UserInfo.tags regression from RabbitMQ v3.9 [[GH-12877](https://github.com/hashicorp/vault/pull/12877)] +* secrets/ssh: Let allowed_users template mix templated and non-templated parts. [[GH-10886](https://github.com/hashicorp/vault/pull/10886)] +* secrets/ssh: Use entropy augmentation when available for generation of the signing key. [[GH-12560](https://github.com/hashicorp/vault/pull/12560)] +* serviceregistration: add `external-source: "vault"` metadata value for Consul registration. [[GH-12163](https://github.com/hashicorp/vault/pull/12163)] +* storage/raft: Best-effort handling of cancelled contexts. [[GH-12162](https://github.com/hashicorp/vault/pull/12162)] +* transform (enterprise): Add advanced features for encoding and decoding for Transform FPE +* transform (enterprise): Add a `reference` field to batch items, and propogate it to the response +* ui: Add KV secret search box when no metadata list access. [[GH-12626](https://github.com/hashicorp/vault/pull/12626)] +* ui: Add custom metadata to KV secret engine and metadata to config [[GH-12169](https://github.com/hashicorp/vault/pull/12169)] +* ui: Creates new StatText component [[GH-12295](https://github.com/hashicorp/vault/pull/12295)] +* ui: client count monthly view [[GH-12554](https://github.com/hashicorp/vault/pull/12554)] +* ui: creates bar chart component for displaying client count data by namespace [[GH-12437](https://github.com/hashicorp/vault/pull/12437)] +* ui: Add creation time to KV 2 version history and version view [[GH-12663](https://github.com/hashicorp/vault/pull/12663)] +* ui: Added resize for JSON editor [[GH-12906](https://github.com/hashicorp/vault/pull/12906)] [[GH-12906](https://github.com/hashicorp/vault/pull/12906)] +* ui: Adds warning about white space in KV secret engine. [[GH-12921](https://github.com/hashicorp/vault/pull/12921)] +* ui: Click to copy database static role last rotation value in tooltip [[GH-12890](https://github.com/hashicorp/vault/pull/12890)] +* ui: Filter DB connection attributes so only relevant attrs POST to backend [[GH-12770](https://github.com/hashicorp/vault/pull/12770)] +* ui: Removes empty rows from DB config views [[GH-12819](https://github.com/hashicorp/vault/pull/12819)] +* ui: Standardizes toolbar presentation of destructive actions [[GH-12895](https://github.com/hashicorp/vault/pull/12895)] +* ui: Updates font for table row value fields [[GH-12908](https://github.com/hashicorp/vault/pull/12908)] +* ui: namespace search in client count views [[GH-12577](https://github.com/hashicorp/vault/pull/12577)] +* ui: parse and display pki cert metadata [[GH-12541](https://github.com/hashicorp/vault/pull/12541)] +* ui: replaces Vault's use of elazarl/go-bindata-assetfs in building the UI with Go's native Embed package [[GH-11208](https://github.com/hashicorp/vault/pull/11208)] +* ui: updated client tracking config view [[GH-12422](https://github.com/hashicorp/vault/pull/12422)] + +DEPRECATIONS: + +* auth/kubernetes: deprecate `disable_iss_validation` and `issuer` configuration fields [#127](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/127) [[GH-12975](https://github.com/hashicorp/vault/pull/12975)] + +BUG FIXES: + +* activity log (enterprise): allow partial monthly client count to be accessed from namespaces [[GH-13086](https://github.com/hashicorp/vault/pull/13086)] +* agent: Avoid possible `unexpected fault address` panic when using persistent cache. [[GH-12534](https://github.com/hashicorp/vault/pull/12534)] +* api: Fixes storage APIs returning incorrect error when parsing responses [[GH-12338](https://github.com/hashicorp/vault/pull/12338)] +* auth/aws: Fix ec2 auth on instances that have a cert in their PKCS7 signature [[GH-12519](https://github.com/hashicorp/vault/pull/12519)] +* auth/aws: Fixes ec2 login no longer supporting DSA signature verification [[GH-12340](https://github.com/hashicorp/vault/pull/12340)] +* auth/aws: fix config/rotate-root to store new key [[GH-12715](https://github.com/hashicorp/vault/pull/12715)] +* auth/jwt: Fixes OIDC auth from the Vault UI when using `form_post` as the `oidc_response_mode`. [[GH-12265](https://github.com/hashicorp/vault/pull/12265)] +* cli/api: Providing consistency for the use of comma separated parameters in auth/secret enable/tune [[GH-12126](https://github.com/hashicorp/vault/pull/12126)] +* cli: fixes CLI requests when namespace is both provided as argument and part of the path [[GH-12720](https://github.com/hashicorp/vault/pull/12720)] +* cli: fixes CLI requests when namespace is both provided as argument and part of the path [[GH-12911](https://github.com/hashicorp/vault/pull/12911)] +* cli: vault debug now puts newlines after every captured log line. [[GH-12175](https://github.com/hashicorp/vault/pull/12175)] +* core (enterprise): Allow deletion of stored licenses on DR secondary nodes +* core (enterprise): Disallow autogenerated licenses to be used in diagnose even when config is specified +* core (enterprise): Fix bug where password generation through password policies do not work on namespaces if performed outside a request callback or from an external plugin. [[GH-12635](https://github.com/hashicorp/vault/pull/12635)] +* core (enterprise): Fix data race during perf standby sealing +* core (enterprise): Fixes reading raft auto-snapshot configuration from performance standby node [[GH-12317](https://github.com/hashicorp/vault/pull/12317)] +* core (enterprise): Only delete quotas on primary cluster. [[GH-12339](https://github.com/hashicorp/vault/pull/12339)] +* core (enterprise): namespace header included in responses, Go client uses it when displaying error messages [[GH-12196](https://github.com/hashicorp/vault/pull/12196)] +* core/api: Fix an arm64 bug converting a negative int to an unsigned int [[GH-12372](https://github.com/hashicorp/vault/pull/12372)] +* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13093](https://github.com/hashicorp/vault/pull/13093)] +* core/identity: Cleanup alias in the in-memory entity after an alias deletion by ID [[GH-12834](https://github.com/hashicorp/vault/pull/12834)] +* core/identity: Disallow entity alias creation/update if a conflicting alias exists for the target entity and mount combination [[GH-12747](https://github.com/hashicorp/vault/pull/12747)] +* core: Fix a deadlock on HA leadership transfer [[GH-12691](https://github.com/hashicorp/vault/pull/12691)] +* core: Fix warnings logged on perf standbys re stored versions [[GH-13042](https://github.com/hashicorp/vault/pull/13042)] +* core: fix byte printing for diagnose disk checks [[GH-12229](https://github.com/hashicorp/vault/pull/12229)] +* core: revert some unintentionally downgraded dependencies from 1.9.0-rc1 [[GH-13168](https://github.com/hashicorp/vault/pull/13168)] +* database/couchbase: change default template to truncate username at 128 characters [[GH-12301](https://github.com/hashicorp/vault/pull/12301)] +* database/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] +* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node +* http: removed unpublished true from logical_system path, making openapi spec consistent with documentation [[GH-12713](https://github.com/hashicorp/vault/pull/12713)] +* identity/token: Adds missing call to unlock mutex in key deletion error handling [[GH-12916](https://github.com/hashicorp/vault/pull/12916)] +* identity: Fail alias rename if the resulting (name,accessor) exists already [[GH-12473](https://github.com/hashicorp/vault/pull/12473)] +* identity: Fix a panic on arm64 platform when doing identity I/O. [[GH-12371](https://github.com/hashicorp/vault/pull/12371)] +* identity: Fix regression preventing startup when aliases were created pre-1.9. [[GH-13169](https://github.com/hashicorp/vault/pull/13169)] +* identity: dedup from_entity_ids when merging two entities [[GH-10101](https://github.com/hashicorp/vault/pull/10101)] +* identity: disallow creation of role without a key parameter [[GH-12208](https://github.com/hashicorp/vault/pull/12208)] +* identity: do not allow a role's token_ttl to be longer than the signing key's verification_ttl [[GH-12151](https://github.com/hashicorp/vault/pull/12151)] +* identity: merge associated entity groups when merging entities [[GH-10085](https://github.com/hashicorp/vault/pull/10085)] +* identity: suppress duplicate policies on entities [[GH-12812](https://github.com/hashicorp/vault/pull/12812)] +* kmip (enterprise): Fix handling of custom attributes when servicing GetAttributes requests +* kmip (enterprise): Fix handling of invalid role parameters within various vault api calls +* kmip (enterprise): Forward KMIP register operations to the active node +* license: ignore stored terminated license while autoloading is enabled [[GH-2104](https://github.com/hashicorp/vault/pull/2104)] +* licensing (enterprise): Revert accidental inclusion of the TDE feature from the `prem` build. +* physical/raft: Fix safeio.Rename error when restoring snapshots on windows [[GH-12377](https://github.com/hashicorp/vault/pull/12377)] +* pki: Fix regression preventing email addresses being used as a common name within certificates [[GH-12716](https://github.com/hashicorp/vault/pull/12716)] +* plugin/couchbase: Fix an issue in which the locking patterns did not allow parallel requests. [[GH-13033](https://github.com/hashicorp/vault/pull/13033)] +* plugin/snowflake: Fixed bug where plugin would crash on 32 bit systems [[GH-12378](https://github.com/hashicorp/vault/pull/12378)] +* raft (enterprise): Fix panic when updating auto-snapshot config +* replication (enterprise): Fix issue where merkle.flushDirty.num_pages metric is not emitted if number +of dirty pages is 0. [[GH-2093](https://github.com/hashicorp/vault/pull/2093)] +* replication (enterprise): Fix merkle.saveCheckpoint.num_dirty metric to accurately specify the number +of dirty pages in the merkle tree at time of checkpoint creation. [[GH-2093](https://github.com/hashicorp/vault/pull/2093)] +* sdk/database: Fix a DeleteUser error message on the gRPC client. [[GH-12351](https://github.com/hashicorp/vault/pull/12351)] +* secrets/db: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [[GH-12563](https://github.com/hashicorp/vault/pull/12563)] +* secrets/gcp: Fixes a potential panic in the service account policy rollback for rolesets. [[GH-12379](https://github.com/hashicorp/vault/pull/12379)] +* secrets/keymgmt (enterprise): Fix support for Azure Managed HSM Key Vault instances. [[GH-12934](https://github.com/hashicorp/vault/pull/12934)] +* secrets/openldap: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [#28](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/28) [[GH-12600](https://github.com/hashicorp/vault/pull/12600)] +* secrets/transit: Enforce minimum cache size for transit backend and init cache size on transit backend without restart. [[GH-12418](https://github.com/hashicorp/vault/pull/12418)] +* storage/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] +* storage/raft (enterprise): Ensure that raft autosnapshot backoff retry duration never hits 0s +* storage/raft: Detect incomplete raft snapshots in api.RaftSnapshot(), and thereby in `vault operator raft snapshot save`. [[GH-12388](https://github.com/hashicorp/vault/pull/12388)] +* storage/raft: Fix regression in 1.9.0-rc1 that changed how time is represented in Raft logs; this prevented using a raft db created pre-1.9. [[GH-13165](https://github.com/hashicorp/vault/pull/13165)] +* storage/raft: Support `addr_type=public_v6` in auto-join [[GH-12366](https://github.com/hashicorp/vault/pull/12366)] +* transform (enterprise): Enforce minimum cache size for Transform backend and reset cache size without a restart +* transform (enterprise): Fix an error where the decode response of an expired token is an empty result rather than an error. +* ui: Adds pagination to auth methods list view [[GH-13054](https://github.com/hashicorp/vault/pull/13054)] +* ui: Fix bug where capabilities check on secret-delete-menu was encoding the forward slashes. [[GH-12550](https://github.com/hashicorp/vault/pull/12550)] +* ui: Fix bug where edit role form on auth method is invalid by default [[GH-12646](https://github.com/hashicorp/vault/pull/12646)] +* ui: Fixed api explorer routing bug [[GH-12354](https://github.com/hashicorp/vault/pull/12354)] +* ui: Fixed text overflow in flash messages [[GH-12357](https://github.com/hashicorp/vault/pull/12357)] +* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] +* ui: Fixes metrics page when read on counter config not allowed [[GH-12348](https://github.com/hashicorp/vault/pull/12348)] +* ui: Remove spinner after token renew [[GH-12887](https://github.com/hashicorp/vault/pull/12887)] +* ui: Removes ability to tune token_type for token auth methods [[GH-12904](https://github.com/hashicorp/vault/pull/12904)] +* ui: Show day of month instead of day of year in the expiration warning dialog [[GH-11984](https://github.com/hashicorp/vault/pull/11984)] +* ui: fix issue where on MaskedInput on auth methods if tab it would clear the value. [[GH-12409](https://github.com/hashicorp/vault/pull/12409)] +* ui: fix missing navbar items on login to namespace [[GH-12478](https://github.com/hashicorp/vault/pull/12478)] +* ui: update bar chart when model changes [[GH-12622](https://github.com/hashicorp/vault/pull/12622)] +* ui: updating database TTL picker help text. [[GH-12212](https://github.com/hashicorp/vault/pull/12212)] + +## 1.8.12 + +### June 10, 2022 + +BUG FIXES: + +* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] +* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] +* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] +* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] +* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. +* transform (enterprise): Fix non-overridable column default value causing tokenization tokens to expire prematurely when using the MySQL storage backend. + +## 1.8.11 + +### April 29, 2022 + +BUG FIXES: + +* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] +* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] + +## 1.8.10 + +### April 22, 2022 + +CHANGES: + +* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] +* core: Bump Go version to 1.16.15. [[GH-go-ver-1810](https://github.com/hashicorp/vault/pull/go-ver-1810)] + +IMPROVEMENTS: + +* auth/ldap: Add username_as_alias configurable to change how aliases are named [[GH-14324](https://github.com/hashicorp/vault/pull/14324)] +* core: Systemd unit file included with the Linux packages now sets the service type to notify. [[GH-14385](https://github.com/hashicorp/vault/pull/14385)] +* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer + +BUG FIXES: + +* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] +* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] +* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] +* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] +* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] +* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] +* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] +* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] +* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] +* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] +* metrics/autosnapshots (enterprise) : Fix bug that could cause +vault.autosnapshots.save.errors to not be incremented when there is an +autosnapshot save error. +* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not excepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] +* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] +* ui: Fixes issue logging out with wrapped token query parameter [[GH-14329](https://github.com/hashicorp/vault/pull/14329)] +* ui: Fixes issue with correct auth method not selected when logging out from OIDC or JWT methods [[GH-14545](https://github.com/hashicorp/vault/pull/14545)] +* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] +* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] + +## 1.8.9 + +### March 3, 2022 + +* secrets/pki: Vault and Vault Enterprise (“Vault”) allowed the PKI secrets engine under certain configurations to issue wildcard certificates to authorized users for a specified domain, even if the PKI role policy attribute allow_subdomains is set to false. This vulnerability, CVE-2022-25243, was fixed in Vault 1.8.9 and 1.9.4. +* transform (enterprise): Vault Enterprise (“Vault”) clusters using the tokenization transform feature can expose the tokenization key through the tokenization key configuration endpoint to authorized operators with read permissions on this endpoint. This vulnerability, CVE-2022-25244, was fixed in Vault Enterprise 1.7.10, 1.8.9, and 1.9.4. + +IMPROVEMENTS: + +* secrets/pki: Restrict issuance of wildcard certificates via role parameter (`allow_wildcard_certificates`) [[GH-14238](https://github.com/hashicorp/vault/pull/14238)] + +BUG FIXES: + +* auth/aws: Fix ec2 auth on instances that have a cert in their PKCS7 signature [[GH-12519](https://github.com/hashicorp/vault/pull/12519)] +* database/mssql: Removed string interpolation on internal queries and replaced them with inline queries using named parameters. [[GH-13799](https://github.com/hashicorp/vault/pull/13799)] +* secrets/openldap: Fix panic from nil logger in backend [[GH-14170](https://github.com/hashicorp/vault/pull/14170)] +* secrets/pki: Fix issuance of wildcard certificates matching glob patterns [[GH-14235](https://github.com/hashicorp/vault/pull/14235)] +* ui: Fix issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] +* ui: Trigger background token self-renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] + +## 1.8.8 + +### January 27, 2022 + +IMPROVEMENTS: + +* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] + +BUG FIXES: + +* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13494](https://github.com/hashicorp/vault/pull/13494)] +* core (enterprise): Workaround AWS CloudHSM v5 SDK issue not allowing read-only sessions +* kmip (enterprise): Fix locate by name operations fail to find key after a rekey operation. +* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13549](https://github.com/hashicorp/vault/pull/13549)] +* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-13759](https://github.com/hashicorp/vault/pull/13759)] +* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-2456](https://github.com/hashicorp/vault/pull/2456)] +* storage/raft: Fix issues allowing invalid nodes to become leadership candidates. [[GH-13703](https://github.com/hashicorp/vault/pull/13703)] +* storage/raft: On linux, use map_populate for bolt files to improve startup time. [[GH-13573](https://github.com/hashicorp/vault/pull/13573)] +* storage/raft: Units for bolt metrics now given in milliseconds instead of nanoseconds [[GH-13749](https://github.com/hashicorp/vault/pull/13749)] +* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] +* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] + +## 1.8.7 + +### December 21, 2021 + +CHANGES: + +* go: Update go version to 1.16.12 [[GH-13422](https://github.com/hashicorp/vault/pull/13422)] + +## 1.8.6 + +### December 9, 2021 + +CHANGES: + +* go: Update go version to 1.16.9 [[GH-13029](https://github.com/hashicorp/vault/pull/13029)] + +SECURITY: + +* storage/raft: Integrated Storage backend could be caused to crash by an authenticated user with write permissions to the KV secrets engine. This vulnerability, CVE-2021-45042, was fixed in Vault 1.7.7, 1.8.6, and 1.9.1. + +BUG FIXES: + +* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes +* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] +* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] +* ui: Adds pagination to auth methods list view [[GH-13054](https://github.com/hashicorp/vault/pull/13054)] +* ui: Do not show verify connection value on database connection config page [[GH-13152](https://github.com/hashicorp/vault/pull/13152)] +* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] +* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] +* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] + +## 1.8.5 + +### November 4, 2021 + +SECURITY: + +* core/identity: Templated ACL policies would always match the first-created entity alias if multiple entity aliases existed for a specified entity and mount combination, potentially resulting in incorrect policy enforcement. This vulnerability, CVE-2021-43998, was fixed in Vault and Vault Enterprise 1.7.6, 1.8.5, and 1.9.0. + +BUG FIXES: + +* auth/aws: fix config/rotate-root to store new key [[GH-12715](https://github.com/hashicorp/vault/pull/12715)] +* core/identity: Cleanup alias in the in-memory entity after an alias deletion by ID [[GH-12834](https://github.com/hashicorp/vault/pull/12834)] +* core/identity: Disallow entity alias creation/update if a conflicting alias exists for the target entity and mount combination [[GH-12747](https://github.com/hashicorp/vault/pull/12747)] +* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node +* identity/token: Adds missing call to unlock mutex in key deletion error handling [[GH-12916](https://github.com/hashicorp/vault/pull/12916)] +* kmip (enterprise): Fix handling of custom attributes when servicing GetAttributes requests +* kmip (enterprise): Fix handling of invalid role parameters within various vault api calls +* kmip (enterprise): Forward KMIP register operations to the active node +* secrets/keymgmt (enterprise): Fix support for Azure Managed HSM Key Vault instances. [[GH-12952](https://github.com/hashicorp/vault/pull/12952)] +* transform (enterprise): Fix an error where the decode response of an expired token is an empty result rather than an error. + +## 1.8.4 + +### 6 October 2021 + +SECURITY: + +* core/identity: A Vault user with write permission to an entity alias ID sharing a mount accessor with another user may acquire this other user’s policies by merging their identities. This vulnerability, CVE-2021-41802, was fixed in Vault and Vault Enterprise 1.7.5 and 1.8.4. + +IMPROVEMENTS: + +* core: Update Oracle Cloud library to enable seal integration with the uk-gov-london-1 region [[GH-12724](https://github.com/hashicorp/vault/pull/12724)] + +BUG FIXES: + +* core: Fix a deadlock on HA leadership transfer [[GH-12691](https://github.com/hashicorp/vault/pull/12691)] +* database/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] +* pki: Fix regression preventing email addresses being used as a common name within certificates [[GH-12716](https://github.com/hashicorp/vault/pull/12716)] +* storage/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] +* ui: Fix bug where edit role form on auth method is invalid by default [[GH-12646](https://github.com/hashicorp/vault/pull/12646)] + +## 1.8.3 + +### 29 September 2021 + +IMPROVEMENTS: + +* secrets/pki: Allow signing of self-issued certs with a different signature algorithm. [[GH-12514](https://github.com/hashicorp/vault/pull/12514)] + +BUG FIXES: + +* agent: Avoid possible `unexpected fault address` panic when using persistent cache. [[GH-12534](https://github.com/hashicorp/vault/pull/12534)] +* core (enterprise): Allow deletion of stored licenses on DR secondary nodes +* core (enterprise): Fix bug where password generation through password policies do not work on namespaces if performed outside a request callback or from an external plugin. [[GH-12635](https://github.com/hashicorp/vault/pull/12635)] +* core (enterprise): Only delete quotas on primary cluster. [[GH-12339](https://github.com/hashicorp/vault/pull/12339)] +* identity: Fail alias rename if the resulting (name,accessor) exists already [[GH-12473](https://github.com/hashicorp/vault/pull/12473)] +* raft (enterprise): Fix panic when updating auto-snapshot config +* secrets/db: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [[GH-12563](https://github.com/hashicorp/vault/pull/12563)] +* secrets/openldap: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [#28](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/28) [[GH-12599](https://github.com/hashicorp/vault/pull/12599)] +* secrets/transit: Enforce minimum cache size for transit backend and init cache size on transit backend without restart. [[GH-12418](https://github.com/hashicorp/vault/pull/12418)] +* storage/raft: Detect incomplete raft snapshots in api.RaftSnapshot(), and thereby in `vault operator raft snapshot save`. [[GH-12388](https://github.com/hashicorp/vault/pull/12388)] +* ui: Fix bug where capabilities check on secret-delete-menu was encoding the forward slashes. [[GH-12550](https://github.com/hashicorp/vault/pull/12550)] +* ui: Show day of month instead of day of year in the expiration warning dialog [[GH-11984](https://github.com/hashicorp/vault/pull/11984)] + +## 1.8.2 + +### 26 August 2021 + +CHANGES: + +* Alpine: Docker images for Vault 1.6.6+, 1.7.4+, and 1.8.2+ are built with Alpine 3.14, due to CVE-2021-36159 +* go: Update go version to 1.16.7 [[GH-12408](https://github.com/hashicorp/vault/pull/12408)] + +BUG FIXES: + +* auth/aws: Fixes ec2 login no longer supporting DSA signature verification [[GH-12340](https://github.com/hashicorp/vault/pull/12340)] +* cli: vault debug now puts newlines after every captured log line. [[GH-12175](https://github.com/hashicorp/vault/pull/12175)] +* database/couchbase: change default template to truncate username at 128 characters [[GH-12300](https://github.com/hashicorp/vault/pull/12300)] +* identity: Fix a panic on arm64 platform when doing identity I/O. [[GH-12371](https://github.com/hashicorp/vault/pull/12371)] +* physical/raft: Fix safeio.Rename error when restoring snapshots on windows [[GH-12377](https://github.com/hashicorp/vault/pull/12377)] +* plugin/snowflake: Fixed bug where plugin would crash on 32 bit systems [[GH-12378](https://github.com/hashicorp/vault/pull/12378)] +* sdk/database: Fix a DeleteUser error message on the gRPC client. [[GH-12351](https://github.com/hashicorp/vault/pull/12351)] +* secrets/gcp: Fixes a potential panic in the service account policy rollback for rolesets. [[GH-12379](https://github.com/hashicorp/vault/pull/12379)] +* ui: Fixed api explorer routing bug [[GH-12354](https://github.com/hashicorp/vault/pull/12354)] +* ui: Fixes metrics page when read on counter config not allowed [[GH-12348](https://github.com/hashicorp/vault/pull/12348)] +* ui: fix issue where on MaskedInput on auth methods if tab it would clear the value. [[GH-12409](https://github.com/hashicorp/vault/pull/12409)] + +## 1.8.1 + +### August 5th, 2021 + +CHANGES: + +* go: Update go version to 1.16.6 [[GH-12245](https://github.com/hashicorp/vault/pull/12245)] + +IMPROVEMENTS: + +* serviceregistration: add `external-source: "vault"` metadata value for Consul registration. [[GH-12163](https://github.com/hashicorp/vault/pull/12163)] + +BUG FIXES: + +* auth/aws: Remove warning stating AWS Token TTL will be capped by the Default Lease TTL. [[GH-12026](https://github.com/hashicorp/vault/pull/12026)] +* auth/jwt: Fixes OIDC auth from the Vault UI when using `form_post` as the `oidc_response_mode`. [[GH-12258](https://github.com/hashicorp/vault/pull/12258)] +* core (enterprise): Disallow autogenerated licenses to be used in diagnose even when config is specified +* core: fix byte printing for diagnose disk checks [[GH-12229](https://github.com/hashicorp/vault/pull/12229)] +* identity: do not allow a role's token_ttl to be longer than the signing key's verification_ttl [[GH-12151](https://github.com/hashicorp/vault/pull/12151)] + +## 1.8.0 + +### July 28th, 2021 + +CHANGES: + +* agent: Errors in the template engine will no longer cause agent to exit unless +explicitly defined to do so. A new configuration parameter, +`exit_on_retry_failure`, within the new top-level stanza, `template_config`, can +be set to `true` in order to cause agent to exit. Note that for agent to exit if +`template.error_on_missing_key` is set to `true`, `exit_on_retry_failure` must +be also set to `true`. Otherwise, the template engine will log an error but then +restart its internal runner. [[GH-11775](https://github.com/hashicorp/vault/pull/11775)] +* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs +when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] +* core (enterprise): License/EULA changes that ensure the presence of a valid HashiCorp license to +start Vault. More information is available in the [Vault License FAQ](https://www.vaultproject.io/docs/enterprise/license/faqs) + +FEATURES: + +* **GCP Secrets Engine Static Accounts**: Adds ability to use existing service accounts for generation + of service account keys and access tokens. [[GH-12023](https://github.com/hashicorp/vault/pull/12023)] +* **Key Management Secrets Engine (Enterprise)**: Adds general availability for distributing and managing keys in AWS KMS. [[GH-11958](https://github.com/hashicorp/vault/pull/11958)] +* **License Autoloading (Enterprise)**: Licenses may now be automatically loaded from the environment or disk. +* **MySQL Database UI**: The UI now supports adding and editing MySQL connections in the database secret engine [[GH-11532](https://github.com/hashicorp/vault/pull/11532)] +* **Vault Diagnose**: A new `vault operator` command to detect common issues with vault server setups. + +SECURITY: + +* storage/raft: When initializing Vault’s Integrated Storage backend, excessively broad filesystem permissions may be set for the underlying Bolt database used by Vault’s Raft implementation. This vulnerability, CVE-2021-38553, was fixed in Vault 1.8.0. +* ui: The Vault UI erroneously cached and exposed user-viewed secrets between authenticated sessions in a single shared browser, if the browser window / tab was not refreshed or closed between logout and a subsequent login. This vulnerability, CVE-2021-38554, was fixed in Vault 1.8.0 and will be addressed in pending 1.7.4 / 1.6.6 releases. + +IMPROVEMENTS: + +* agent/template: Added static_secret_render_interval to specify how often to fetch non-leased secrets [[GH-11934](https://github.com/hashicorp/vault/pull/11934)] +* agent: Allow Agent auto auth to read symlinked JWT files [[GH-11502](https://github.com/hashicorp/vault/pull/11502)] +* api: Allow a leveled logger to be provided to `api.Client` through `SetLogger`. [[GH-11696](https://github.com/hashicorp/vault/pull/11696)] +* auth/aws: Underlying error included in validation failure message. [[GH-11638](https://github.com/hashicorp/vault/pull/11638)] +* cli/api: Add lease lookup command [[GH-11129](https://github.com/hashicorp/vault/pull/11129)] +* core: Add `prefix_filter` to telemetry config [[GH-12025](https://github.com/hashicorp/vault/pull/12025)] +* core: Add a darwin/arm64 binary release supporting the Apple M1 CPU [[GH-12071](https://github.com/hashicorp/vault/pull/12071)] +* core: Add a small (<1s) exponential backoff to failed TCP listener Accept failures. [[GH-11588](https://github.com/hashicorp/vault/pull/11588)] +* core (enterprise): Add controlled capabilities to control group policy stanza +* core: Add metrics for standby node forwarding. [[GH-11366](https://github.com/hashicorp/vault/pull/11366)] +* core: Add metrics to report if a node is a perf standby, if a node is a dr secondary or primary, and if a node is a perf secondary or primary. [[GH-11472](https://github.com/hashicorp/vault/pull/11472)] +* core: Send notifications to systemd on start, stop, and configuration reload. [[GH-11517](https://github.com/hashicorp/vault/pull/11517)] +* core: add irrevocable lease list and count apis [[GH-11607](https://github.com/hashicorp/vault/pull/11607)] +* core: allow arbitrary length stack traces upon receiving SIGUSR2 (was 32MB) [[GH-11364](https://github.com/hashicorp/vault/pull/11364)] +* core: Improve renew/revoke performance using per-lease locks [[GH-11122](https://github.com/hashicorp/vault/pull/11122)] +* db/cassandra: Added tls_server_name to specify server name for TLS validation [[GH-11820](https://github.com/hashicorp/vault/pull/11820)] +* go: Update to Go 1.16.5 [[GH-11802](https://github.com/hashicorp/vault/pull/11802)] +* replication: Delay evaluation of X-Vault-Index headers until merkle sync completes. +* secrets/rabbitmq: Add ability to customize dynamic usernames [[GH-11899](https://github.com/hashicorp/vault/pull/11899)] +* secrets/ad: Add `rotate-role` endpoint to allow rotations of service accounts. [[GH-11942](https://github.com/hashicorp/vault/pull/11942)] +* secrets/aws: add IAM tagging support for iam_user roles [[GH-10953](https://github.com/hashicorp/vault/pull/10953)] +* secrets/aws: add ability to provide a role session name when generating STS credentials [[GH-11345](https://github.com/hashicorp/vault/pull/11345)] +* secrets/database/elasticsearch: Add ability to customize dynamic usernames [[GH-11957](https://github.com/hashicorp/vault/pull/11957)] +* secrets/database/influxdb: Add ability to customize dynamic usernames [[GH-11796](https://github.com/hashicorp/vault/pull/11796)] +* secrets/database/mongodb: Add ability to customize `SocketTimeout`, `ConnectTimeout`, and `ServerSelectionTimeout` [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] +* secrets/database/mongodb: Increased throughput by allowing for multiple request threads to simultaneously update users in MongoDB [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] +* secrets/database/mongodbatlas: Adds the ability to customize username generation for dynamic users in MongoDB Atlas. [[GH-11956](https://github.com/hashicorp/vault/pull/11956)] +* secrets/database/redshift: Add ability to customize dynamic usernames [[GH-12016](https://github.com/hashicorp/vault/pull/12016)] +* secrets/database/snowflake: Add ability to customize dynamic usernames [[GH-11997](https://github.com/hashicorp/vault/pull/11997)] +* ssh: add support for templated values in SSH CA DefaultExtensions [[GH-11495](https://github.com/hashicorp/vault/pull/11495)] +* storage/raft: Improve raft batch size selection [[GH-11907](https://github.com/hashicorp/vault/pull/11907)] +* storage/raft: change freelist type to map and set nofreelistsync to true [[GH-11895](https://github.com/hashicorp/vault/pull/11895)] +* storage/raft: Switch to shared raft-boltdb library and add boltdb metrics [[GH-11269](https://github.com/hashicorp/vault/pull/11269)] +* storage/raft: Support autopilot for HA only raft storage. [[GH-11260](https://github.com/hashicorp/vault/pull/11260)] +* storage/raft (enterprise): Enable Autopilot on DR secondary clusters +* ui: Add Validation to KV secret engine [[GH-11785](https://github.com/hashicorp/vault/pull/11785)] +* ui: Add database secret engine support for MSSQL [[GH-11231](https://github.com/hashicorp/vault/pull/11231)] +* ui: Add push notification message when selecting okta auth. [[GH-11442](https://github.com/hashicorp/vault/pull/11442)] +* ui: Add regex validation to Transform Template pattern input [[GH-11586](https://github.com/hashicorp/vault/pull/11586)] +* ui: Add specific error message if unseal fails due to license [[GH-11705](https://github.com/hashicorp/vault/pull/11705)] +* ui: Add validation support for open api form fields [[GH-11963](https://github.com/hashicorp/vault/pull/11963)] +* ui: Added auth method descriptions to UI login page [[GH-11795](https://github.com/hashicorp/vault/pull/11795)] +* ui: JSON fields on database can be cleared on edit [[GH-11708](https://github.com/hashicorp/vault/pull/11708)] +* ui: Obscure secret values on input and displayOnly fields like certificates. [[GH-11284](https://github.com/hashicorp/vault/pull/11284)] +* ui: Redesign of KV 2 Delete toolbar. [[GH-11530](https://github.com/hashicorp/vault/pull/11530)] +* ui: Replace tool partials with components. [[GH-11672](https://github.com/hashicorp/vault/pull/11672)] +* ui: Show description on secret engine list [[GH-11995](https://github.com/hashicorp/vault/pull/11995)] +* ui: Update ember to latest LTS and upgrade UI dependencies [[GH-11447](https://github.com/hashicorp/vault/pull/11447)] +* ui: Update partials to components [[GH-11680](https://github.com/hashicorp/vault/pull/11680)] +* ui: Updated ivy code mirror component for consistency [[GH-11500](https://github.com/hashicorp/vault/pull/11500)] +* ui: Updated node to v14, latest stable build [[GH-12049](https://github.com/hashicorp/vault/pull/12049)] +* ui: Updated search select component styling [[GH-11360](https://github.com/hashicorp/vault/pull/11360)] +* ui: add transform secrets engine to features list [[GH-12003](https://github.com/hashicorp/vault/pull/12003)] +* ui: add validations for duplicate path kv engine [[GH-11878](https://github.com/hashicorp/vault/pull/11878)] +* ui: show site-wide banners for license warnings if applicable [[GH-11759](https://github.com/hashicorp/vault/pull/11759)] +* ui: update license page with relevant autoload info [[GH-11778](https://github.com/hashicorp/vault/pull/11778)] + +DEPRECATIONS: + +* secrets/gcp: Deprecated the `/gcp/token/:roleset` and `/gcp/key/:roleset` paths for generating + secrets for rolesets. Use `/gcp/roleset/:roleset/token` and `/gcp/roleset/:roleset/key` instead. [[GH-12023](https://github.com/hashicorp/vault/pull/12023)] + +BUG FIXES: + +* activity: Omit wrapping tokens and control groups from client counts [[GH-11826](https://github.com/hashicorp/vault/pull/11826)] +* agent/cert: Fix issue where the API client on agent was not honoring certificate + information from the auto-auth config map on renewals or retries. [[GH-11576](https://github.com/hashicorp/vault/pull/11576)] +* agent/template: fix command shell quoting issue [[GH-11838](https://github.com/hashicorp/vault/pull/11838)] +* agent: Fixed agent templating to use configured tls servername values [[GH-11288](https://github.com/hashicorp/vault/pull/11288)] +* agent: fix timestamp format in log messages from the templating engine [[GH-11838](https://github.com/hashicorp/vault/pull/11838)] +* auth/approle: fixing dereference of nil pointer [[GH-11864](https://github.com/hashicorp/vault/pull/11864)] +* auth/jwt: Updates the [hashicorp/cap](https://github.com/hashicorp/cap) library to `v0.1.0` to + bring in a verification key caching fix. [[GH-11784](https://github.com/hashicorp/vault/pull/11784)] +* auth/kubernetes: Fix AliasLookahead to correctly extract ServiceAccount UID when using ephemeral JWTs [[GH-12073](https://github.com/hashicorp/vault/pull/12073)] +* auth/ldap: Fix a bug where the LDAP auth method does not return the request_timeout configuration parameter on config read. [[GH-11975](https://github.com/hashicorp/vault/pull/11975)] +* cli: Add support for response wrapping in `vault list` and `vault kv list` with output format other than `table`. [[GH-12031](https://github.com/hashicorp/vault/pull/12031)] +* cli: vault delete and vault kv delete should support the same output options (e.g. -format) as vault write. [[GH-11992](https://github.com/hashicorp/vault/pull/11992)] +* core (enterprise): Fix orphan return value from auth methods executed on performance standby nodes. +* core (enterprise): Fix plugins mounted in namespaces being unable to use password policies [[GH-11596](https://github.com/hashicorp/vault/pull/11596)] +* core (enterprise): serialize access to HSM entropy generation to avoid errors in concurrent key generation. +* core/metrics: Add generic KV mount support for vault.kv.secret.count telemetry metric [[GH-12020](https://github.com/hashicorp/vault/pull/12020)] +* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)] +* core: Fix edge cases in the configuration endpoint for barrier key autorotation. [[GH-11541](https://github.com/hashicorp/vault/pull/11541)] +* core: Fix goroutine leak when updating rate limit quota [[GH-11371](https://github.com/hashicorp/vault/pull/11371)] +* core (enterprise): Fix panic on DR secondary when there are lease count quotas [[GH-11742](https://github.com/hashicorp/vault/pull/11742)] +* core: Fix race that allowed remounting on path used by another mount [[GH-11453](https://github.com/hashicorp/vault/pull/11453)] +* core: Fix storage entry leak when revoking leases created with non-orphan batch tokens. [[GH-11377](https://github.com/hashicorp/vault/pull/11377)] +* core: Fixed double counting of http requests after operator stepdown [[GH-11970](https://github.com/hashicorp/vault/pull/11970)] +* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] +* identity: Use correct mount accessor when refreshing external group memberships. [[GH-11506](https://github.com/hashicorp/vault/pull/11506)] +* mongo-db: default username template now strips invalid '.' characters [[GH-11872](https://github.com/hashicorp/vault/pull/11872)] +* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)] +* replication: Fix panic trying to update walState during identity group invalidation. +* replication: Fix: mounts created within a namespace that was part of an Allow + filtering rule would not appear on performance secondary if created after rule + was defined. +* secret/pki: use case insensitive domain name comparison as per RFC1035 section 2.3.3 +* secret: fix the bug where transit encrypt batch doesn't work with key_version [[GH-11628](https://github.com/hashicorp/vault/pull/11628)] +* secrets/ad: Forward all creds requests to active node [[GH-76](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/76)] [[GH-11836](https://github.com/hashicorp/vault/pull/11836)] +* secrets/database/cassandra: Fixed issue where hostnames were not being validated when using TLS [[GH-11365](https://github.com/hashicorp/vault/pull/11365)] +* secrets/database/cassandra: Fixed issue where the PEM parsing logic of `pem_bundle` and `pem_json` didn't work for CA-only configurations [[GH-11861](https://github.com/hashicorp/vault/pull/11861)] +* secrets/database/cassandra: Updated default statement for password rotation to allow for special characters. This applies to root and static credentials. [[GH-11262](https://github.com/hashicorp/vault/pull/11262)] +* secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. [[GH-11451](https://github.com/hashicorp/vault/pull/11451)] +* secrets/database: Fixed an issue that prevented external database plugin processes from restarting after a shutdown. [[GH-12087](https://github.com/hashicorp/vault/pull/12087)] +* secrets/database: Fixed minor race condition when rotate-root is called [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] +* secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` [[GH-11585](https://github.com/hashicorp/vault/pull/11585)] +* secrets/openldap: Fix bug where schema was not compatible with rotate-root [#24](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/24) [[GH-12019](https://github.com/hashicorp/vault/pull/12019)] +* storage/dynamodb: Handle throttled batch write requests by retrying, without which writes could be lost. [[GH-10181](https://github.com/hashicorp/vault/pull/10181)] +* storage/raft: Support cluster address change for nodes in a cluster managed by autopilot [[GH-11247](https://github.com/hashicorp/vault/pull/11247)] +* storage/raft: Tweak creation of vault.db file [[GH-12034](https://github.com/hashicorp/vault/pull/12034)] +* storage/raft: leader_tls_servername wasn't used unless leader_ca_cert_file and/or mTLS were configured. [[GH-11252](https://github.com/hashicorp/vault/pull/11252)] +* tokenutil: Perform the num uses check before token type. [[GH-11647](https://github.com/hashicorp/vault/pull/11647)] +* transform (enterprise): Fix an issue with malformed transform configuration + storage when upgrading from 1.5 to 1.6. See Upgrade Notes for 1.6.x. +* ui: Add role from database connection automatically populates the database for new role [[GH-11119](https://github.com/hashicorp/vault/pull/11119)] +* ui: Add root rotation statements support to appropriate database secret engine plugins [[GH-11404](https://github.com/hashicorp/vault/pull/11404)] +* ui: Automatically refresh the page when user logs out [[GH-12035](https://github.com/hashicorp/vault/pull/12035)] +* ui: Fix Version History queryParams on LinkedBlock [[GH-12079](https://github.com/hashicorp/vault/pull/12079)] +* ui: Fix bug where database secret engines with custom names cannot delete connections [[GH-11127](https://github.com/hashicorp/vault/pull/11127)] +* ui: Fix bug where the UI does not recognize version 2 KV until refresh, and fix [object Object] error message [[GH-11258](https://github.com/hashicorp/vault/pull/11258)] +* ui: Fix database role CG access [[GH-12111](https://github.com/hashicorp/vault/pull/12111)] +* ui: Fix date display on expired token notice [[GH-11142](https://github.com/hashicorp/vault/pull/11142)] +* ui: Fix entity group membership and metadata not showing [[GH-11641](https://github.com/hashicorp/vault/pull/11641)] +* ui: Fix error message caused by control group [[GH-11143](https://github.com/hashicorp/vault/pull/11143)] +* ui: Fix footer URL linking to the correct version changelog. [[GH-11283](https://github.com/hashicorp/vault/pull/11283)] +* ui: Fix issue where logging in without namespace input causes error [[GH-11094](https://github.com/hashicorp/vault/pull/11094)] +* ui: Fix namespace-bug on login [[GH-11182](https://github.com/hashicorp/vault/pull/11182)] +* ui: Fix status menu no showing on login [[GH-11213](https://github.com/hashicorp/vault/pull/11213)] +* ui: Fix text link URL on database roles list [[GH-11597](https://github.com/hashicorp/vault/pull/11597)] +* ui: Fixed and updated lease renewal picker [[GH-11256](https://github.com/hashicorp/vault/pull/11256)] +* ui: fix control group access for database credential [[GH-12024](https://github.com/hashicorp/vault/pull/12024)] +* ui: fix issue where select-one option was not showing in secrets database role creation [[GH-11294](https://github.com/hashicorp/vault/pull/11294)] +* ui: fix oidc login with Safari [[GH-11884](https://github.com/hashicorp/vault/pull/11884)] + +## 1.7.10 + +### March 3, 2022 + +SECURITY: + +* transform (enterprise): Vault Enterprise (“Vault”) clusters using the tokenization transform feature can expose the tokenization key through the tokenization key configuration endpoint to authorized operators with read permissions on this endpoint. This vulnerability, CVE-2022-25244, was fixed in Vault Enterprise 1.7.10, 1.8.9, and 1.9.4. + +BUG FIXES: + +* database/mssql: Removed string interpolation on internal queries and replaced them with inline queries using named parameters. [[GH-13799](https://github.com/hashicorp/vault/pull/13799)] +* ui: Fix issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] +* ui: Trigger background token self-renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] + +## 1.7.9 + +### January 27, 2022 + +IMPROVEMENTS: + +* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] + +BUG FIXES: + +* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13493](https://github.com/hashicorp/vault/pull/13493)] +* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13735](https://github.com/hashicorp/vault/pull/13735)] +* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] +* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] + +## 1.7.8 + +### December 21, 2021 + +CHANGES: + +* go: Update go version to 1.16.12 [[GH-13422](https://github.com/hashicorp/vault/pull/13422)] + +BUG FIXES: + +* auth/aws: Fixes ec2 login no longer supporting DSA signature verification [[GH-12340](https://github.com/hashicorp/vault/pull/12340)] +* identity: Fix a panic on arm64 platform when doing identity I/O. [[GH-12371](https://github.com/hashicorp/vault/pull/12371)] + +## 1.7.7 + +### December 9, 2021 + +SECURITY: + +* storage/raft: Integrated Storage backend could be caused to crash by an authenticated user with write permissions to the KV secrets engine. This vulnerability, CVE-2021-45042, was fixed in Vault 1.7.7, 1.8.6, and 1.9.1. + +BUG FIXES: + +* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes +* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] +* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] +* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] +* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] +* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] + +## 1.7.6 + +### November 4, 2021 + +SECURITY: + +* core/identity: Templated ACL policies would always match the first-created entity alias if multiple entity aliases existed for a specified entity and mount combination, potentially resulting in incorrect policy enforcement. This vulnerability, CVE-2021-43998, was fixed in Vault and Vault Enterprise 1.7.6, 1.8.5, and 1.9.0. + +BUG FIXES: + +* auth/aws: fix config/rotate-root to store new key [[GH-12715](https://github.com/hashicorp/vault/pull/12715)] +* core/identity: Cleanup alias in the in-memory entity after an alias deletion by ID [[GH-12834](https://github.com/hashicorp/vault/pull/12834)] +* core/identity: Disallow entity alias creation/update if a conflicting alias exists for the target entity and mount combination [[GH-12747](https://github.com/hashicorp/vault/pull/12747)] +* core: Fix a deadlock on HA leadership transfer [[GH-12691](https://github.com/hashicorp/vault/pull/12691)] +* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node +* kmip (enterprise): Fix handling of custom attributes when servicing GetAttributes requests +* kmip (enterprise): Fix handling of invalid role parameters within various vault api calls +* kmip (enterprise): Forward KMIP register operations to the active node +* secrets/keymgmt (enterprise): Fix support for Azure Managed HSM Key Vault instances. [[GH-12957](https://github.com/hashicorp/vault/pull/12957)] +* storage/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] +* database/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] +* transform (enterprise): Fix an error where the decode response of an expired token is an empty result rather than an error. + +## 1.7.5 + +### 29 September 2021 + +SECURITY: + +* core/identity: A Vault user with write permission to an entity alias ID sharing a mount accessor with another user may acquire this other user’s policies by merging their identities. This vulnerability, CVE-2021-41802, was fixed in Vault and Vault Enterprise 1.7.5 and 1.8.4. + +IMPROVEMENTS: + +* secrets/pki: Allow signing of self-issued certs with a different signature algorithm. [[GH-12514](https://github.com/hashicorp/vault/pull/12514)] + +BUG FIXES: + +* agent: Avoid possible `unexpected fault address` panic when using persistent cache. [[GH-12534](https://github.com/hashicorp/vault/pull/12534)] +* core (enterprise): Fix bug where password generation through password policies do not work on namespaces if performed outside a request callback or from an external plugin. [[GH-12635](https://github.com/hashicorp/vault/pull/12635)] +* core (enterprise): Only delete quotas on primary cluster. [[GH-12339](https://github.com/hashicorp/vault/pull/12339)] +* identity: Fail alias rename if the resulting (name,accessor) exists already [[GH-12473](https://github.com/hashicorp/vault/pull/12473)] +* raft (enterprise): Fix panic when updating auto-snapshot config +* secrets/db: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [[GH-12563](https://github.com/hashicorp/vault/pull/12563)] +* secrets/openldap: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [#28](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/28) [[GH-12598](https://github.com/hashicorp/vault/pull/12598)] +* storage/raft: Detect incomplete raft snapshots in api.RaftSnapshot(), and thereby in `vault operator raft snapshot save`. [[GH-12388](https://github.com/hashicorp/vault/pull/12388)] +* ui: Fixed api explorer routing bug [[GH-12354](https://github.com/hashicorp/vault/pull/12354)] + +## 1.7.4 + +### 26 August 2021 + +SECURITY: + +* _UI Secret Caching_: The Vault UI erroneously cached and exposed user-viewed secrets between authenticated sessions in a single shared browser, if the browser window / tab was not refreshed or closed between logout and a subsequent login. This vulnerability, CVE-2021-38554, was fixed in Vault 1.8.0 and will be addressed in pending 1.7.4 / 1.6.6 releases. + +CHANGES: + +* Alpine: Docker images for Vault 1.6.6+, 1.7.4+, and 1.8.2+ are built with Alpine 3.14, due to CVE-2021-36159 +* go: Update go version to 1.15.15 [[GH-12411](https://github.com/hashicorp/vault/pull/12411)] + +IMPROVEMENTS: + +* ui: Updated node to v14, latest stable build [[GH-12049](https://github.com/hashicorp/vault/pull/12049)] + +BUG FIXES: + +* replication (enterprise): Fix a panic that could occur when checking the last wal and the log shipper buffer is empty. +* cli: vault debug now puts newlines after every captured log line. [[GH-12175](https://github.com/hashicorp/vault/pull/12175)] +* database/couchbase: change default template to truncate username at 128 characters [[GH-12299](https://github.com/hashicorp/vault/pull/12299)] +* physical/raft: Fix safeio.Rename error when restoring snapshots on windows [[GH-12377](https://github.com/hashicorp/vault/pull/12377)] +* secrets/database/cassandra: Fixed issue where the PEM parsing logic of `pem_bundle` and `pem_json` didn't work for CA-only configurations [[GH-11861](https://github.com/hashicorp/vault/pull/11861)] +* secrets/database: Fixed an issue that prevented external database plugin processes from restarting after a shutdown. [[GH-12087](https://github.com/hashicorp/vault/pull/12087)] +* ui: Automatically refresh the page when user logs out [[GH-12035](https://github.com/hashicorp/vault/pull/12035)] +* ui: Fix database role CG access [[GH-12111](https://github.com/hashicorp/vault/pull/12111)] +* ui: Fixes metrics page when read on counter config not allowed [[GH-12348](https://github.com/hashicorp/vault/pull/12348)] +* ui: fix control group access for database credential [[GH-12024](https://github.com/hashicorp/vault/pull/12024)] +* ui: fix oidc login with Safari [[GH-11884](https://github.com/hashicorp/vault/pull/11884)] + +## 1.7.3 + +### June 16th, 2021 + +CHANGES: + +* go: Update go version to 1.15.13 [[GH-11857](https://github.com/hashicorp/vault/pull/11857)] + +IMPROVEMENTS: + +* db/cassandra: Added tls_server_name to specify server name for TLS validation [[GH-11820](https://github.com/hashicorp/vault/pull/11820)] +* ui: Add specific error message if unseal fails due to license [[GH-11705](https://github.com/hashicorp/vault/pull/11705)] + +BUG FIXES: + +* auth/jwt: Updates the [hashicorp/cap](https://github.com/hashicorp/cap) library to `v0.1.0` to +bring in a verification key caching fix. [[GH-11784](https://github.com/hashicorp/vault/pull/11784)] +* core (enterprise): serialize access to HSM entropy generation to avoid errors in concurrent key generation. +* secret: fix the bug where transit encrypt batch doesn't work with key_version [[GH-11628](https://github.com/hashicorp/vault/pull/11628)] +* secrets/ad: Forward all creds requests to active node [[GH-76](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/76)] [[GH-11836](https://github.com/hashicorp/vault/pull/11836)] +* tokenutil: Perform the num uses check before token type. [[GH-11647](https://github.com/hashicorp/vault/pull/11647)] + +## 1.7.2 + +### May 20th, 2021 + +SECURITY: + +* Non-Expiring Leases: Vault and Vault Enterprise renewed nearly-expiring token +leases and dynamic secret leases with a zero-second TTL, causing them to be +treated as non-expiring, and never revoked. This issue affects Vault and Vault +Enterprise versions 0.10.0 through 1.7.1, and is fixed in 1.5.9, 1.6.5, and +1.7.2 (CVE-2021-32923). + +CHANGES: + +* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs +when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] +* auth/gcp: Update to v0.9.1 to use IAM Service Account Credentials API for +signing JWTs [[GH-11494](https://github.com/hashicorp/vault/pull/11494)] + +IMPROVEMENTS: + +* api, agent: LifetimeWatcher now does more retries when renewal failures occur. This also impacts Agent auto-auth and leases managed via Agent caching. [[GH-11445](https://github.com/hashicorp/vault/pull/11445)] +* auth/aws: Underlying error included in validation failure message. [[GH-11638](https://github.com/hashicorp/vault/pull/11638)] +* http: Add optional HTTP response headers for hostname and raft node ID [[GH-11289](https://github.com/hashicorp/vault/pull/11289)] +* secrets/aws: add ability to provide a role session name when generating STS credentials [[GH-11345](https://github.com/hashicorp/vault/pull/11345)] +* secrets/database/mongodb: Add ability to customize `SocketTimeout`, `ConnectTimeout`, and `ServerSelectionTimeout` [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] +* secrets/database/mongodb: Increased throughput by allowing for multiple request threads to simultaneously update users in MongoDB [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] + +BUG FIXES: + +* agent/cert: Fix issue where the API client on agent was not honoring certificate +information from the auto-auth config map on renewals or retries. [[GH-11576](https://github.com/hashicorp/vault/pull/11576)] +* agent: Fixed agent templating to use configured tls servername values [[GH-11288](https://github.com/hashicorp/vault/pull/11288)] +* core (enterprise): Fix plugins mounted in namespaces being unable to use password policies [[GH-11596](https://github.com/hashicorp/vault/pull/11596)] +* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] +* identity: Use correct mount accessor when refreshing external group memberships. [[GH-11506](https://github.com/hashicorp/vault/pull/11506)] +* replication: Fix panic trying to update walState during identity group invalidation. [[GH-1865](https://github.com/hashicorp/vault/pull/1865)] +* secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. [[GH-11451](https://github.com/hashicorp/vault/pull/11451)] +* secrets/database: Fixed minor race condition when rotate-root is called [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] +* secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` [[GH-11585](https://github.com/hashicorp/vault/pull/11585)] +* secrets/keymgmt (enterprise): Fixes audit logging for the read key response. +* storage/raft: Support cluster address change for nodes in a cluster managed by autopilot [[GH-11247](https://github.com/hashicorp/vault/pull/11247)] +* ui: Fix entity group membership and metadata not showing [[GH-11641](https://github.com/hashicorp/vault/pull/11641)] +* ui: Fix text link URL on database roles list [[GH-11597](https://github.com/hashicorp/vault/pull/11597)] + +## 1.7.1 + +### 21 April 2021 + +SECURITY: + +* The PKI Secrets Engine tidy functionality may cause Vault to exclude revoked-but-unexpired certificates from the + Vault CRL. This vulnerability affects Vault and Vault Enterprise 1.5.1 and newer and was fixed in versions + 1.5.8, 1.6.4, and 1.7.1. (CVE-2021-27668) +* The Cassandra Database and Storage backends were not correctly verifying TLS certificates. This issue affects all + versions of Vault and Vault Enterprise and was fixed in versions 1.6.4, and 1.7.1. (CVE-2021-27400) + +CHANGES: + +* go: Update to Go 1.15.11 [[GH-11395](https://github.com/hashicorp/vault/pull/11395)] + +IMPROVEMENTS: + +* auth/jwt: Adds ability to directly provide service account JSON in G Suite provider config. [[GH-11388](https://github.com/hashicorp/vault/pull/11388)] +* core: Add tls_max_version listener config option. [[GH-11226](https://github.com/hashicorp/vault/pull/11226)] +* core: Add metrics for standby node forwarding. [[GH-11366](https://github.com/hashicorp/vault/pull/11366)] +* core: allow arbitrary length stack traces upon receiving SIGUSR2 (was 32MB) [[GH-11364](https://github.com/hashicorp/vault/pull/11364)] +* storage/raft: Support autopilot for HA only raft storage. [[GH-11260](https://github.com/hashicorp/vault/pull/11260)] + +BUG FIXES: + +* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)] +* core: Fix goroutine leak when updating rate limit quota [[GH-11371](https://github.com/hashicorp/vault/pull/11371)] +* core: Fix storage entry leak when revoking leases created with non-orphan batch tokens. [[GH-11377](https://github.com/hashicorp/vault/pull/11377)] +* core: requests forwarded by standby weren't always timed out. [[GH-11322](https://github.com/hashicorp/vault/pull/11322)] +* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)] +* replication: Fix: mounts created within a namespace that was part of an Allow + filtering rule would not appear on performance secondary if created after rule + was defined. +* replication: Perf standby nodes on newly enabled DR secondary sometimes couldn't connect to active node with TLS errors. [[GH-1823](https://github.com/hashicorp/vault/pull/1823)] +* secrets/database/cassandra: Fixed issue where hostnames were not being validated when using TLS [[GH-11365](https://github.com/hashicorp/vault/pull/11365)] +* secrets/database/cassandra: Updated default statement for password rotation to allow for special characters. This applies to root and static credentials. [[GH-11262](https://github.com/hashicorp/vault/pull/11262)] +* storage/dynamodb: Handle throttled batch write requests by retrying, without which writes could be lost. [[GH-10181](https://github.com/hashicorp/vault/pull/10181)] +* storage/raft: leader_tls_servername wasn't used unless leader_ca_cert_file and/or mTLS were configured. [[GH-11252](https://github.com/hashicorp/vault/pull/11252)] +* storage/raft: using raft for ha_storage with a different storage backend was broken in 1.7.0, now fixed. [[GH-11340](https://github.com/hashicorp/vault/pull/11340)] +* ui: Add root rotation statements support to appropriate database secret engine plugins [[GH-11404](https://github.com/hashicorp/vault/pull/11404)] +* ui: Fix bug where the UI does not recognize version 2 KV until refresh, and fix [object Object] error message [[GH-11258](https://github.com/hashicorp/vault/pull/11258)] +* ui: Fix OIDC bug seen when running on HCP [[GH-11283](https://github.com/hashicorp/vault/pull/11283)] +* ui: Fix namespace-bug on login [[GH-11182](https://github.com/hashicorp/vault/pull/11182)] +* ui: Fix status menu no showing on login [[GH-11213](https://github.com/hashicorp/vault/pull/11213)] +* ui: fix issue where select-one option was not showing in secrets database role creation [[GH-11294](https://github.com/hashicorp/vault/pull/11294)] + +## 1.7.0 + +### 24 March 2021 + +CHANGES: + +* agent: Failed auto-auth attempts are now throttled by an exponential backoff instead of the +~2 second retry delay. The maximum backoff may be configured with the new `max_backoff` parameter, +which defaults to 5 minutes. [[GH-10964](https://github.com/hashicorp/vault/pull/10964)] +* aws/auth: AWS Auth concepts and endpoints that use the "whitelist" and "blacklist" terms +have been updated to more inclusive language (e.g. `/auth/aws/identity-whitelist` has been +updated to`/auth/aws/identity-accesslist`). The old and new endpoints are aliases, +sharing the same underlying data. The legacy endpoint names are considered **deprecated** +and will be removed in a future release (not before Vault 1.9). The complete list of +endpoint changes is available in the [AWS Auth API docs](/api-docs/auth/aws#deprecations-effective-in-vault-1-7). +* go: Update Go version to 1.15.10 [[GH-11114](https://github.com/hashicorp/vault/pull/11114)] [[GH-11173](https://github.com/hashicorp/vault/pull/11173)] + +FEATURES: + +* **Aerospike Storage Backend**: Add support for using Aerospike as a storage backend [[GH-10131](https://github.com/hashicorp/vault/pull/10131)] +* **Autopilot for Integrated Storage**: A set of features has been added to allow for automatic operator-friendly management of Vault servers. This is only applicable when integrated storage is in use. + * **Dead Server Cleanup**: Dead servers will periodically be cleaned up and removed from the Raft peer set, to prevent them from interfering with the quorum size and leader elections. + * **Server Health Checking**: An API has been added to track the state of servers, including their health. + * **New Server Stabilization**: When a new server is added to the cluster, there will be a waiting period where it must be healthy and stable for a certain amount of time before being promoted to a full, voting member. +* **Tokenization Secrets Engine (Enterprise)**: The Tokenization Secrets Engine is now generally available. We have added support for MySQL, key rotation, and snapshot/restore. +* replication (enterprise): The log shipper is now memory as well as length bound, and length and size can be separately configured. +* agent: Support for persisting the agent cache to disk [[GH-10938](https://github.com/hashicorp/vault/pull/10938)] +* auth/jwt: Adds `max_age` role parameter and `auth_time` claim validation. [[GH-10919](https://github.com/hashicorp/vault/pull/10919)] +* core (enterprise): X-Vault-Index and related headers can be used by clients to manage eventual consistency. +* kmip (enterprise): Use entropy augmentation to generate kmip certificates +* sdk: Private key generation in the certutil package now allows custom io.Readers to be used. [[GH-10653](https://github.com/hashicorp/vault/pull/10653)] +* secrets/aws: add IAM tagging support for iam_user roles [[GH-10953](https://github.com/hashicorp/vault/pull/10953)] +* secrets/database/cassandra: Add ability to customize dynamic usernames [[GH-10906](https://github.com/hashicorp/vault/pull/10906)] +* secrets/database/couchbase: Add ability to customize dynamic usernames [[GH-10995](https://github.com/hashicorp/vault/pull/10995)] +* secrets/database/mongodb: Add ability to customize dynamic usernames [[GH-10858](https://github.com/hashicorp/vault/pull/10858)] +* secrets/database/mssql: Add ability to customize dynamic usernames [[GH-10767](https://github.com/hashicorp/vault/pull/10767)] +* secrets/database/mysql: Add ability to customize dynamic usernames [[GH-10834](https://github.com/hashicorp/vault/pull/10834)] +* secrets/database/postgresql: Add ability to customize dynamic usernames [[GH-10766](https://github.com/hashicorp/vault/pull/10766)] +* secrets/db/snowflake: Added support for Snowflake to the Database Secret Engine [[GH-10603](https://github.com/hashicorp/vault/pull/10603)] +* secrets/keymgmt (enterprise): Adds beta support for distributing and managing keys in AWS KMS. +* secrets/keymgmt (enterprise): Adds general availability for distributing and managing keys in Azure Key Vault. +* secrets/openldap: Added dynamic roles to OpenLDAP similar to the combined database engine [[GH-10996](https://github.com/hashicorp/vault/pull/10996)] +* secrets/terraform: New secret engine for managing Terraform Cloud API tokens [[GH-10931](https://github.com/hashicorp/vault/pull/10931)] +* ui: Adds check for feature flag on application, and updates namespace toolbar on login if present [[GH-10588](https://github.com/hashicorp/vault/pull/10588)] +* ui: Adds the wizard to the Database Secret Engine [[GH-10982](https://github.com/hashicorp/vault/pull/10982)] +* ui: Database secrets engine, supporting MongoDB only [[GH-10655](https://github.com/hashicorp/vault/pull/10655)] + +IMPROVEMENTS: + +* agent: Add a `vault.retry` stanza that allows specifying number of retries on failure; this applies both to templating and proxied requests. [[GH-11113](https://github.com/hashicorp/vault/pull/11113)] +* agent: Agent can now run as a Windows service. [[GH-10231](https://github.com/hashicorp/vault/pull/10231)] +* agent: Better concurrent request handling on identical requests proxied through Agent. [[GH-10705](https://github.com/hashicorp/vault/pull/10705)] +* agent: Route templating server through cache when persistent cache is enabled. [[GH-10927](https://github.com/hashicorp/vault/pull/10927)] +* agent: change auto-auth to preload an existing token on start [[GH-10850](https://github.com/hashicorp/vault/pull/10850)] +* auth/approle: Secrets ID generation endpoint now returns `secret_id_ttl` as part of its response. [[GH-10826](https://github.com/hashicorp/vault/pull/10826)] +* auth/ldap: Improve consistency in error messages [[GH-10537](https://github.com/hashicorp/vault/pull/10537)] +* auth/okta: Adds support for Okta Verify TOTP MFA. [[GH-10942](https://github.com/hashicorp/vault/pull/10942)] +* changelog: Add dependencies listed in dependencies/2-25-21 [[GH-11015](https://github.com/hashicorp/vault/pull/11015)] +* command/debug: Now collects logs (at level `trace`) as a periodic output. [[GH-10609](https://github.com/hashicorp/vault/pull/10609)] +* core (enterprise): "vault status" command works when a namespace is set. [[GH-10725](https://github.com/hashicorp/vault/pull/10725)] +* core (enterprise): Update Trial Enterprise license from 30 minutes to 6 hours +* core/metrics: Added "vault operator usage" command. [[GH-10365](https://github.com/hashicorp/vault/pull/10365)] +* core/metrics: New telemetry metrics reporting lease expirations by time interval and namespace [[GH-10375](https://github.com/hashicorp/vault/pull/10375)] +* core: Added active since timestamp to the status output of active nodes. [[GH-10489](https://github.com/hashicorp/vault/pull/10489)] +* core: Check audit device with a test message before adding it. [[GH-10520](https://github.com/hashicorp/vault/pull/10520)] +* core: Track barrier encryption count and automatically rotate after a large number of operations or on a schedule [[GH-10774](https://github.com/hashicorp/vault/pull/10774)] +* core: add metrics for active entity count [[GH-10514](https://github.com/hashicorp/vault/pull/10514)] +* core: add partial month client count api [[GH-11022](https://github.com/hashicorp/vault/pull/11022)] +* core: dev mode listener allows unauthenticated sys/metrics requests [[GH-10992](https://github.com/hashicorp/vault/pull/10992)] +* core: reduce memory used by leases [[GH-10726](https://github.com/hashicorp/vault/pull/10726)] +* secrets/gcp: Truncate ServiceAccount display names longer than 100 characters. [[GH-10558](https://github.com/hashicorp/vault/pull/10558)] +* storage/raft (enterprise): Listing of peers is now allowed on DR secondary +cluster nodes, as an update operation that takes in DR operation token for +authenticating the request. +* transform (enterprise): Improve FPE transformation performance +* transform (enterprise): Use transactions with batch tokenization operations for improved performance +* ui: Clarify language on usage metrics page empty state [[GH-10951](https://github.com/hashicorp/vault/pull/10951)] +* ui: Customize MongoDB input fields on Database Secrets Engine [[GH-10949](https://github.com/hashicorp/vault/pull/10949)] +* ui: Upgrade Ember-cli from 3.8 to 3.22. [[GH-9972](https://github.com/hashicorp/vault/pull/9972)] +* ui: Upgrade Storybook from 5.3.19 to 6.1.17. [[GH-10904](https://github.com/hashicorp/vault/pull/10904)] +* ui: Upgrade date-fns from 1.3.0 to 2.16.1. [[GH-10848](https://github.com/hashicorp/vault/pull/10848)] +* ui: Upgrade dependencies to resolve potential JS vulnerabilities [[GH-10677](https://github.com/hashicorp/vault/pull/10677)] +* ui: better errors on Database secrets engine role create [[GH-10980](https://github.com/hashicorp/vault/pull/10980)] + +BUG FIXES: + +* agent: Only set the namespace if the VAULT_NAMESPACE env var isn't present [[GH-10556](https://github.com/hashicorp/vault/pull/10556)] +* agent: Set TokenParent correctly in the Index to be cached. [[GH-10833](https://github.com/hashicorp/vault/pull/10833)] +* agent: Set namespace for template server in agent. [[GH-10757](https://github.com/hashicorp/vault/pull/10757)] +* api/sys/config/ui: Fixes issue where multiple UI custom header values are ignored and only the first given value is used [[GH-10490](https://github.com/hashicorp/vault/pull/10490)] +* api: Fixes CORS API methods that were outdated and invalid [[GH-10444](https://github.com/hashicorp/vault/pull/10444)] +* auth/jwt: Fixes `bound_claims` validation for provider-specific group and user info fetching. [[GH-10546](https://github.com/hashicorp/vault/pull/10546)] +* auth/jwt: Fixes an issue where JWT verification keys weren't updated after a `jwks_url` change. [[GH-10919](https://github.com/hashicorp/vault/pull/10919)] +* auth/jwt: Fixes an issue where `jwt_supported_algs` were not being validated for JWT auth using +`jwks_url` and `jwt_validation_pubkeys`. [[GH-10919](https://github.com/hashicorp/vault/pull/10919)] +* auth/oci: Fixes alias name to use the role name, and not the literal string `name` [[GH-10](https://github.com/hashicorp/vault-plugin-auth-oci/pull/10)] [[GH-10952](https://github.com/hashicorp/vault/pull/10952)] +* consul-template: Update consul-template vendor version and associated dependencies to master, +pulling in [[GH-10756](https://github.com/hashicorp/vault/pull/10756)] +* core (enterprise): Limit entropy augmentation during token generation to root tokens. [[GH-10487](https://github.com/hashicorp/vault/pull/10487)] +* core (enterprise): Vault EGP policies attached to path * were not correctly scoped to the namespace. +* core/identity: Fix deadlock in entity merge endpoint. [[GH-10877](https://github.com/hashicorp/vault/pull/10877)] +* core: Avoid disclosing IP addresses in the errors of unauthenticated requests [[GH-10579](https://github.com/hashicorp/vault/pull/10579)] +* core: Fix client.Clone() to include the address [[GH-10077](https://github.com/hashicorp/vault/pull/10077)] +* core: Fix duplicate quotas on performance standby nodes. [[GH-10855](https://github.com/hashicorp/vault/pull/10855)] +* core: Fix rate limit resource quota migration from 1.5.x to 1.6.x by ensuring `purgeInterval` and +`staleAge` are set appropriately. [[GH-10536](https://github.com/hashicorp/vault/pull/10536)] +* core: Make all APIs that report init status consistent, and make them report +initialized=true when a Raft join is in progress. [[GH-10498](https://github.com/hashicorp/vault/pull/10498)] +* core: Make the response to an unauthenticated request to sys/internal endpoints consistent regardless of mount existence. [[GH-10650](https://github.com/hashicorp/vault/pull/10650)] +* core: Turn off case sensitivity for allowed entity alias check during token create operation. [[GH-10743](https://github.com/hashicorp/vault/pull/10743)] +* http: change max_request_size to be unlimited when the config value is less than 0 [[GH-10072](https://github.com/hashicorp/vault/pull/10072)] +* license: Fix license caching issue that prevents new licenses to get picked up by the license manager [[GH-10424](https://github.com/hashicorp/vault/pull/10424)] +* metrics: Protect emitMetrics from panicking during post-seal [[GH-10708](https://github.com/hashicorp/vault/pull/10708)] +* quotas/rate-limit: Fix quotas enforcing old rate limit quota paths [[GH-10689](https://github.com/hashicorp/vault/pull/10689)] +* replication (enterprise): Fix bug with not starting merkle sync while requests are in progress +* secrets/database/influxdb: Fix issue where not all errors from InfluxDB were being handled [[GH-10384](https://github.com/hashicorp/vault/pull/10384)] +* secrets/database/mysql: Fixes issue where the DisplayName within generated usernames was the incorrect length [[GH-10433](https://github.com/hashicorp/vault/pull/10433)] +* secrets/database: Sanitize `private_key` field when reading database plugin config [[GH-10416](https://github.com/hashicorp/vault/pull/10416)] +* secrets/gcp: Fix issue with account and iam_policy roleset WALs not being removed after attempts when GCP project no longer exists [[GH-10759](https://github.com/hashicorp/vault/pull/10759)] +* secrets/transit: allow for null string to be used for optional parameters in encrypt and decrypt [[GH-10386](https://github.com/hashicorp/vault/pull/10386)] +* serviceregistration: Fix race during shutdown of Consul service registration. [[GH-10901](https://github.com/hashicorp/vault/pull/10901)] +* storage/raft (enterprise): Automated snapshots with Azure required specifying +`azure_blob_environment`, which should have had as a default `AZUREPUBLICCLOUD`. +* storage/raft (enterprise): Reading a non-existent auto snapshot config now returns 404. +* storage/raft (enterprise): The parameter aws_s3_server_kms_key was misnamed and +didn't work. Renamed to aws_s3_kms_key, and make it work so that when provided +the given key will be used to encrypt the snapshot using AWS KMS. +* transform (enterprise): Fix bug tokenization handling metadata on exportable stores +* transform (enterprise): Fix bug where tokenization store changes are persisted but don't take effect +* transform (enterprise): Fix transform configuration not handling `stores` parameter on the legacy path +* transform (enterprise): Make expiration timestamps human readable +* transform (enterprise): Return false for invalid tokens on the validate endpoint rather than returning an HTTP error +* ui: Add role from database connection automatically populates the database for new role [[GH-11119](https://github.com/hashicorp/vault/pull/11119)] +* ui: Fix bug in Transform secret engine when a new role is added and then removed from a transformation [[GH-10417](https://github.com/hashicorp/vault/pull/10417)] +* ui: Fix bug that double encodes secret route when there are spaces in the path and makes you unable to view the version history. [[GH-10596](https://github.com/hashicorp/vault/pull/10596)] +* ui: Fix expected response from feature-flags endpoint [[GH-10684](https://github.com/hashicorp/vault/pull/10684)] +* ui: Fix footer URL linking to the correct version changelog. [[GH-10491](https://github.com/hashicorp/vault/pull/10491)] + +DEPRECATIONS: + +* aws/auth: AWS Auth endpoints that use the "whitelist" and "blacklist" terms have been deprecated. +Refer to the CHANGES section for additional details. + +## 1.6.7 + +### 29 September 2021 + +BUG FIXES: + +* core (enterprise): Fix bug where password generation through password policies do not work on namespaces if performed outside a request callback or from an external plugin. [[GH-12635](https://github.com/hashicorp/vault/pull/12635)] +* core (enterprise): Only delete quotas on primary cluster. [[GH-12339](https://github.com/hashicorp/vault/pull/12339)] +* secrets/db: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [[GH-12563](https://github.com/hashicorp/vault/pull/12563)] +* secrets/openldap: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [#28](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/28) [[GH-12597](https://github.com/hashicorp/vault/pull/12597)] + +## 1.6.6 + +### 26 August 2021 + +SECURITY: + +* _UI Secret Caching_: The Vault UI erroneously cached and exposed user-viewed secrets between authenticated sessions in a single shared browser, if the browser window / tab was not refreshed or closed between logout and a subsequent login. This vulnerability, CVE-2021-38554, was fixed in Vault 1.8.0 and will be addressed in pending 1.7.4 / 1.6.6 releases. + +CHANGES: + +* Alpine: Docker images for Vault 1.6.6+, 1.7.4+, and 1.8.2+ are built with Alpine 3.14, due to CVE-2021-36159 +* go: Update go version to 1.15.15 [[GH-12423](https://github.com/hashicorp/vault/pull/12423)] + +IMPROVEMENTS: + +* db/cassandra: Added tls_server_name to specify server name for TLS validation [[GH-11820](https://github.com/hashicorp/vault/pull/11820)] + +BUG FIXES: + +* physical/raft: Fix safeio.Rename error when restoring snapshots on windows [[GH-12377](https://github.com/hashicorp/vault/pull/12377)] +* secret: fix the bug where transit encrypt batch doesn't work with key_version [[GH-11628](https://github.com/hashicorp/vault/pull/11628)] +* secrets/database: Fixed an issue that prevented external database plugin processes from restarting after a shutdown. [[GH-12087](https://github.com/hashicorp/vault/pull/12087)] +* ui: Automatically refresh the page when user logs out [[GH-12035](https://github.com/hashicorp/vault/pull/12035)] +* ui: Fixes metrics page when read on counter config not allowed [[GH-12348](https://github.com/hashicorp/vault/pull/12348)] +* ui: fix oidc login with Safari [[GH-11884](https://github.com/hashicorp/vault/pull/11884)] + +## 1.6.5 + +### May 20th, 2021 + +SECURITY: + +* Non-Expiring Leases: Vault and Vault Enterprise renewed nearly-expiring token +leases and dynamic secret leases with a zero-second TTL, causing them to be +treated as non-expiring, and never revoked. This issue affects Vault and Vault +Enterprise versions 0.10.0 through 1.7.1, and is fixed in 1.5.9, 1.6.5, and +1.7.2 (CVE-2021-32923). + +CHANGES: + +* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs +when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] +* auth/gcp: Update to v0.8.1 to use IAM Service Account Credentials API for +signing JWTs [[GH-11498](https://github.com/hashicorp/vault/pull/11498)] + +BUG FIXES: + +* core (enterprise): Fix plugins mounted in namespaces being unable to use password policies [[GH-11596](https://github.com/hashicorp/vault/pull/11596)] +* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] +* secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. [[GH-11451](https://github.com/hashicorp/vault/pull/11451)] +* secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` [[GH-11585](https://github.com/hashicorp/vault/pull/11585)] +* ui: Fix namespace-bug on login [[GH-11182](https://github.com/hashicorp/vault/pull/11182)] + +## 1.6.4 + +### 21 April 2021 + +SECURITY: + +* The PKI Secrets Engine tidy functionality may cause Vault to exclude revoked-but-unexpired certificates from the + Vault CRL. This vulnerability affects Vault and Vault Enterprise 1.5.1 and newer and was fixed in versions + 1.5.8, 1.6.4, and 1.7.1. (CVE-2021-27668) +* The Cassandra Database and Storage backends were not correctly verifying TLS certificates. This issue affects all + versions of Vault and Vault Enterprise and was fixed in versions 1.6.4, and 1.7.1. (CVE-2021-27400) + +CHANGES: + +* go: Update to Go 1.15.11 [[GH-11396](https://github.com/hashicorp/vault/pull/11396)] + +IMPROVEMENTS: + +* command/debug: Now collects logs (at level `trace`) as a periodic output. [[GH-10609](https://github.com/hashicorp/vault/pull/10609)] +* core: Add tls_max_version listener config option. [[GH-11226](https://github.com/hashicorp/vault/pull/11226)] +* core: allow arbitrary length stack traces upon receiving SIGUSR2 (was 32MB) [[GH-11364](https://github.com/hashicorp/vault/pull/11364)] + +BUG FIXES: + +* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)] +* core: Fix goroutine leak when updating rate limit quota [[GH-11371](https://github.com/hashicorp/vault/pull/11371)] +* core: Fix storage entry leak when revoking leases created with non-orphan batch tokens. [[GH-11377](https://github.com/hashicorp/vault/pull/11377)] +* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)] +* pki: Preserve ordering of all DN attribute values when issuing certificates [[GH-11259](https://github.com/hashicorp/vault/pull/11259)] +* replication: Fix: mounts created within a namespace that was part of an Allow + filtering rule would not appear on performance secondary if created after rule + was defined. +* secrets/database/cassandra: Fixed issue where hostnames were not being validated when using TLS [[GH-11365](https://github.com/hashicorp/vault/pull/11365)] +* storage/raft: leader_tls_servername wasn't used unless leader_ca_cert_file and/or mTLS were configured. [[GH-11252](https://github.com/hashicorp/vault/pull/11252)] + +## 1.6.3 + +### February 25, 2021 + +SECURITY: + +* Limited Unauthenticated License Metadata Read: We addressed a security vulnerability that allowed for the unauthenticated +reading of Vault license metadata from DR Secondaries. This vulnerability affects Vault Enterprise and is +fixed in 1.6.3 (CVE-2021-27668). + +CHANGES: + +* secrets/mongodbatlas: Move from whitelist to access list API [[GH-10966](https://github.com/hashicorp/vault/pull/10966)] + +IMPROVEMENTS: + +* ui: Clarify language on usage metrics page empty state [[GH-10951](https://github.com/hashicorp/vault/pull/10951)] + +BUG FIXES: + +* auth/kubernetes: Cancel API calls to TokenReview endpoint when request context +is closed [[GH-10930](https://github.com/hashicorp/vault/pull/10930)] +* core/identity: Fix deadlock in entity merge endpoint. [[GH-10877](https://github.com/hashicorp/vault/pull/10877)] +* quotas: Fix duplicate quotas on performance standby nodes. [[GH-10855](https://github.com/hashicorp/vault/pull/10855)] +* quotas/rate-limit: Fix quotas enforcing old rate limit quota paths [[GH-10689](https://github.com/hashicorp/vault/pull/10689)] +* replication (enterprise): Don't write request count data on DR Secondaries. +Fixes DR Secondaries becoming out of sync approximately every 30s. [[GH-10970](https://github.com/hashicorp/vault/pull/10970)] +* secrets/azure (enterprise): Forward service principal credential creation to the +primary cluster if called on a performance standby or performance secondary. [[GH-10902](https://github.com/hashicorp/vault/pull/10902)] + +## 1.6.2 + +### January 29, 2021 + +SECURITY: + +* IP Address Disclosure: We fixed a vulnerability where, under some error +conditions, Vault would return an error message disclosing internal IP +addresses. This vulnerability affects Vault and Vault Enterprise and is fixed in +1.6.2 (CVE-2021-3024). +* Limited Unauthenticated Remove Peer: As of Vault 1.6, the remove-peer command +on DR secondaries did not require authentication. This issue impacts the +stability of HA architecture, as a bad actor could remove all standby +nodes from a DR +secondary. This issue affects Vault Enterprise 1.6.0 and 1.6.1, and is fixed in +1.6.2 (CVE-2021-3282). +* Mount Path Disclosure: Vault previously returned different HTTP status codes for +existent and non-existent mount paths. This behavior would allow unauthenticated +brute force attacks to reveal which paths had valid mounts. This issue affects +Vault and Vault Enterprise and is fixed in 1.6.2 (CVE-2020-25594). + +CHANGES: + +* go: Update go version to 1.15.7 [[GH-10730](https://github.com/hashicorp/vault/pull/10730)] + +FEATURES: + +* ui: Adds check for feature flag on application, and updates namespace toolbar on login if present [[GH-10588](https://github.com/hashicorp/vault/pull/10588)] + +IMPROVEMENTS: + +* core (enterprise): "vault status" command works when a namespace is set. [[GH-10725](https://github.com/hashicorp/vault/pull/10725)] +* core: reduce memory used by leases [[GH-10726](https://github.com/hashicorp/vault/pull/10726)] +* storage/raft (enterprise): Listing of peers is now allowed on DR secondary +cluster nodes, as an update operation that takes in DR operation token for +authenticating the request. +* core: allow setting tls_servername for raft retry/auto-join [[GH-10698](https://github.com/hashicorp/vault/pull/10698)] + +BUG FIXES: + +* agent: Set namespace for template server in agent. [[GH-10757](https://github.com/hashicorp/vault/pull/10757)] +* core: Make the response to an unauthenticated request to sys/internal endpoints consistent regardless of mount existence. [[GH-10650](https://github.com/hashicorp/vault/pull/10650)] +* metrics: Protect emitMetrics from panicking during post-seal [[GH-10708](https://github.com/hashicorp/vault/pull/10708)] +* secrets/gcp: Fix issue with account and iam_policy roleset WALs not being removed after attempts when GCP project no longer exists [[GH-10759](https://github.com/hashicorp/vault/pull/10759)] +* storage/raft (enterprise): Automated snapshots with Azure required specifying +`azure_blob_environment`, which should have had as a default `AZUREPUBLICCLOUD`. +* storage/raft (enterprise): Autosnapshots config and storage weren't excluded from +performance replication, causing conflicts and errors. +* ui: Fix bug that double encodes secret route when there are spaces in the path and makes you unable to view the version history. [[GH-10596](https://github.com/hashicorp/vault/pull/10596)] +* ui: Fix expected response from feature-flags endpoint [[GH-10684](https://github.com/hashicorp/vault/pull/10684)] + +## 1.6.1 + +### December 16, 2020 + +SECURITY: + +* LDAP Auth Method: We addressed an issue where error messages returned by the + LDAP auth method allowed user enumeration [[GH-10537](https://github.com/hashicorp/vault/pull/10537)]. This vulnerability affects Vault OSS and Vault + Enterprise and is fixed in 1.5.6 and 1.6.1 (CVE-2020-35177). +* Sentinel EGP: We've fixed incorrect handling of namespace paths to prevent + users within namespaces from applying Sentinel EGP policies to paths above + their namespace. This vulnerability affects Vault Enterprise and is fixed in + 1.5.6 and 1.6.1 (CVE-2020-35453). + +IMPROVEMENTS: + +* auth/ldap: Improve consistency in error messages [[GH-10537](https://github.com/hashicorp/vault/pull/10537)] +* core/metrics: Added "vault operator usage" command. [[GH-10365](https://github.com/hashicorp/vault/pull/10365)] +* secrets/gcp: Truncate ServiceAccount display names longer than 100 characters. [[GH-10558](https://github.com/hashicorp/vault/pull/10558)] + +BUG FIXES: + +* agent: Only set the namespace if the VAULT_NAMESPACE env var isn't present [[GH-10556](https://github.com/hashicorp/vault/pull/10556)] +* auth/jwt: Fixes `bound_claims` validation for provider-specific group and user info fetching. [[GH-10546](https://github.com/hashicorp/vault/pull/10546)] +* core (enterprise): Vault EGP policies attached to path * were not correctly scoped to the namespace. +* core: Avoid deadlocks by ensuring that if grabLockOrStop returns stopped=true, the lock will not be held. [[GH-10456](https://github.com/hashicorp/vault/pull/10456)] +* core: Fix client.Clone() to include the address [[GH-10077](https://github.com/hashicorp/vault/pull/10077)] +* core: Fix rate limit resource quota migration from 1.5.x to 1.6.x by ensuring `purgeInterval` and +`staleAge` are set appropriately. [[GH-10536](https://github.com/hashicorp/vault/pull/10536)] +* core: Make all APIs that report init status consistent, and make them report +initialized=true when a Raft join is in progress. [[GH-10498](https://github.com/hashicorp/vault/pull/10498)] +* secrets/database/influxdb: Fix issue where not all errors from InfluxDB were being handled [[GH-10384](https://github.com/hashicorp/vault/pull/10384)] +* secrets/database/mysql: Fixes issue where the DisplayName within generated usernames was the incorrect length [[GH-10433](https://github.com/hashicorp/vault/pull/10433)] +* secrets/database: Sanitize `private_key` field when reading database plugin config [[GH-10416](https://github.com/hashicorp/vault/pull/10416)] +* secrets/transit: allow for null string to be used for optional parameters in encrypt and decrypt [[GH-10386](https://github.com/hashicorp/vault/pull/10386)] +* storage/raft (enterprise): The parameter aws_s3_server_kms_key was misnamed and didn't work. Renamed to aws_s3_kms_key, and make it work so that when provided the given key will be used to encrypt the snapshot using AWS KMS. +* transform (enterprise): Fix bug tokenization handling metadata on exportable stores +* transform (enterprise): Fix transform configuration not handling `stores` parameter on the legacy path +* transform (enterprise): Make expiration timestamps human readable +* transform (enterprise): Return false for invalid tokens on the validate endpoint rather than returning an HTTP error +* transform (enterprise): Fix bug where tokenization store changes are persisted but don't take effect +* ui: Fix bug in Transform secret engine when a new role is added and then removed from a transformation [[GH-10417](https://github.com/hashicorp/vault/pull/10417)] +* ui: Fix footer URL linking to the correct version changelog. [[GH-10491](https://github.com/hashicorp/vault/pull/10491)] +* ui: Fox radio click on secrets and auth list pages. [[GH-10586](https://github.com/hashicorp/vault/pull/10586)] + +## 1.6.0 + +### November 11th, 2020 + +NOTE: + +Binaries for 32-bit macOS (i.e. the `darwin_386` build) will no longer be published. This target was dropped in the latest version of the Go compiler. + +CHANGES: + +* agent: Agent now properly returns a non-zero exit code on error, such as one due to template rendering failure. Using `error_on_missing_key` in the template config will cause agent to immediately exit on failure. In order to make agent properly exit due to continuous failure from template rendering errors, the old behavior of indefinitely restarting the template server is now changed to exit once the default retry attempt of 12 times (with exponential backoff) gets exhausted. [[GH-9670](https://github.com/hashicorp/vault/pull/9670)] +* token: Periodic tokens generated by auth methods will have the period value stored in its token entry. [[GH-7885](https://github.com/hashicorp/vault/pull/7885)] +* core: New telemetry metrics reporting mount table size and number of entries [[GH-10201](https://github.com/hashicorp/vault/pull/10201)] +* go: Updated Go version to 1.15.4 [[GH-10366](https://github.com/hashicorp/vault/pull/10366)] + +FEATURES: + +* **Couchbase Secrets**: Vault can now manage static and dynamic credentials for Couchbase. [[GH-9664](https://github.com/hashicorp/vault/pull/9664)] +* **Expanded Password Policy Support**: Custom password policies are now supported for all database engines. +* **Integrated Storage Auto Snapshots (Enterprise)**: This feature enables an operator to schedule snapshots of the integrated storage backend and ensure those snapshots are persisted elsewhere. +* **Integrated Storage Cloud Auto Join**: This feature for integrated storage enables Vault nodes running in the cloud to automatically discover and join a Vault cluster via operator-supplied metadata. +* **Key Management Secrets Engine (Enterprise; Tech Preview)**: This new secret engine allows securely distributing and managing keys to Azure cloud KMS services. +* **Seal Migration**: With Vault 1.6, we will support migrating from an auto unseal mechanism to a different mechanism of the same type. For example, if you were using an AWS KMS key to automatically unseal, you can now migrate to a different AWS KMS key. +* **Tokenization (Enterprise; Tech Preview)**: Tokenization supports creating irreversible “tokens” from sensitive data. Tokens can be used in less secure environments, protecting the original data. +* **Vault Client Count**: Vault now counts the number of active entities (and non-entity tokens) per month and makes this information available via the "Metrics" section of the UI. + +IMPROVEMENTS: + +* auth/approle: Role names can now be referenced in templated policies through the `approle.metadata.role_name` property [[GH-9529](https://github.com/hashicorp/vault/pull/9529)] +* auth/aws: Improve logic check on wildcard `BoundIamPrincipalARNs` and include role name on error messages on check failure [[GH-10036](https://github.com/hashicorp/vault/pull/10036)] +* auth/jwt: Add support for fetching groups and user information from G Suite during authentication. [[GH-123](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/123)] +* auth/jwt: Adding EdDSA (ed25519) to supported algorithms [[GH-129](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/129)] +* auth/jwt: Improve cli authorization error [[GH-137](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/137)] +* auth/jwt: Add OIDC namespace_in_state option [[GH-140](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/140)] +* secrets/transit: fix missing plaintext in bulk decrypt response [[GH-9991](https://github.com/hashicorp/vault/pull/9991)] +* command/server: Delay informational messages in -dev mode until logs have settled. [[GH-9702](https://github.com/hashicorp/vault/pull/9702)] +* command/server: Add environment variable support for `disable_mlock`. [[GH-9931](https://github.com/hashicorp/vault/pull/9931)] +* core/metrics: Add metrics for storage cache [[GH_10079](https://github.com/hashicorp/vault/pull/10079)] +* core/metrics: Add metrics for leader status [[GH 10147](https://github.com/hashicorp/vault/pull/10147)] +* physical/azure: Add the ability to use Azure Instance Metadata Service to set the credentials for Azure Blob storage on the backend. [[GH-10189](https://github.com/hashicorp/vault/pull/10189)] +* sdk/framework: Add a time type for API fields. [[GH-9911](https://github.com/hashicorp/vault/pull/9911)] +* secrets/database: Added support for password policies to all databases [[GH-9641](https://github.com/hashicorp/vault/pull/9641), + [and more](https://github.com/hashicorp/vault/pulls?q=is%3Apr+is%3Amerged+dbpw)] +* secrets/database/cassandra: Added support for static credential rotation [[GH-10051](https://github.com/hashicorp/vault/pull/10051)] +* secrets/database/elasticsearch: Added support for static credential rotation [[GH-19](https://github.com/hashicorp/vault-plugin-database-elasticsearch/pull/19)] +* secrets/database/hanadb: Added support for root credential & static credential rotation [[GH-10142](https://github.com/hashicorp/vault/pull/10142)] +* secrets/database/hanadb: Default password generation now includes dashes. Custom statements may need to be updated + to include quotes around the password field [[GH-10142](https://github.com/hashicorp/vault/pull/10142)] +* secrets/database/influxdb: Added support for static credential rotation [[GH-10118](https://github.com/hashicorp/vault/pull/10118)] +* secrets/database/mongodbatlas: Added support for root credential rotation [[GH-14](https://github.com/hashicorp/vault-plugin-database-mongodbatlas/pull/14)] +* secrets/database/mongodbatlas: Support scopes field in creations statements for MongoDB Atlas database plugin [[GH-15](https://github.com/hashicorp/vault-plugin-database-mongodbatlas/pull/15)] +* seal/awskms: Add logging during awskms auto-unseal [[GH-9794](https://github.com/hashicorp/vault/pull/9794)] +* storage/azure: Update SDK library to use [azure-storage-blob-go](https://github.com/Azure/azure-storage-blob-go) since previous library has been deprecated. [[GH-9577](https://github.com/hashicorp/vault/pull/9577/)] +* secrets/ad: `rotate-root` now supports POST requests like other secret engines [[GH-70](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/70)] +* ui: Add ui functionality for the Transform Secret Engine [[GH-9665](https://github.com/hashicorp/vault/pull/9665)] +* ui: Pricing metrics dashboard [[GH-10049](https://github.com/hashicorp/vault/pull/10049)] + +BUG FIXES: + +* auth/jwt: Fix bug preventing config edit UI from rendering [[GH-141](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/141)] +* cli: Don't open or overwrite a raft snapshot file on an unsuccessful `vault operator raft snapshot` [[GH-9894](https://github.com/hashicorp/vault/pull/9894)] +* core: Implement constant time version of shamir GF(2^8) math [[GH-9932](https://github.com/hashicorp/vault/pull/9932)] +* core: Fix resource leak in plugin API (plugin-dependent, not all plugins impacted) [[GH-9557](https://github.com/hashicorp/vault/pull/9557)] +* core: Fix race involved in enabling certain features via a license change +* core: Fix error handling in HCL parsing of objects with invalid syntax [[GH-410](https://github.com/hashicorp/hcl/pull/410)] +* identity: Check for timeouts in entity API [[GH-9925](https://github.com/hashicorp/vault/pull/9925)] +* secrets/database: Fix handling of TLS options in mongodb connection strings [[GH-9519](https://github.com/hashicorp/vault/pull/9519)] +* secrets/gcp: Ensure that the IAM policy version is appropriately set after a roleset's bindings have changed. [[GH-93](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/93)] +* ui: Mask LDAP bindpass while typing [[GH-10087](https://github.com/hashicorp/vault/pull/10087)] +* ui: Update language in promote dr modal flow [[GH-10155](https://github.com/hashicorp/vault/pull/10155)] +* ui: Update language on replication primary dashboard for clarity [[GH-10205](https://github.com/hashicorp/vault/pull/10217)] +* core: Fix bug where updating an existing path quota could introduce a conflict. [[GH-10285](https://github.com/hashicorp/vault/pull/10285)] + +## 1.5.9 + +### May 20th, 2021 + +SECURITY: + +* Non-Expiring Leases: Vault and Vault Enterprise renewed nearly-expiring token +leases and dynamic secret leases with a zero-second TTL, causing them to be +treated as non-expiring, and never revoked. This issue affects Vault and Vault +Enterprise versions 0.10.0 through 1.7.1, and is fixed in 1.5.9, 1.6.5, and +1.7.2 (CVE-2021-32923). + +CHANGES: + +* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs +when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] +* auth/gcp: Update to v0.7.2 to use IAM Service Account Credentials API for +signing JWTs [[GH-11499](https://github.com/hashicorp/vault/pull/11499)] + +BUG FIXES: + +* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] + +## 1.5.8 + +### 21 April 2021 + +SECURITY: + +* The PKI Secrets Engine tidy functionality may cause Vault to exclude revoked-but-unexpired certificates from the + Vault CRL. This vulnerability affects Vault and Vault Enterprise 1.5.1 and newer and was fixed in versions + 1.5.8, 1.6.4, and 1.7.1. (CVE-2021-27668) + +CHANGES: + +* go: Update to Go 1.14.15 [[GH-11397](https://github.com/hashicorp/vault/pull/11397)] + +IMPROVEMENTS: + +* core: Add tls_max_version listener config option. [[GH-11226](https://github.com/hashicorp/vault/pull/11226)] + +BUG FIXES: + +* core/identity: Fix deadlock in entity merge endpoint. [[GH-10877](https://github.com/hashicorp/vault/pull/10877)] +* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)] +* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)] +* core: Avoid deadlocks by ensuring that if grabLockOrStop returns stopped=true, the lock will not be held. [[GH-10456](https://github.com/hashicorp/vault/pull/10456)] + +## 1.5.7 + +### January 29, 2021 + +SECURITY: + +* IP Address Disclosure: We fixed a vulnerability where, under some error +conditions, Vault would return an error message disclosing internal IP +addresses. This vulnerability affects Vault and Vault Enterprise and is fixed in +1.6.2 and 1.5.7 (CVE-2021-3024). +* Mount Path Disclosure: Vault previously returned different HTTP status codes for +existent and non-existent mount paths. This behavior would allow unauthenticated +brute force attacks to reveal which paths had valid mounts. This issue affects +Vault and Vault Enterprise and is fixed in 1.6.2 and 1.5.7 (CVE-2020-25594). + +IMPROVEMENTS: + +* storage/raft (enterprise): Listing of peers is now allowed on DR secondary +cluster nodes, as an update operation that takes in DR operation token for +authenticating the request. + +BUG FIXES: + +* core: Avoid disclosing IP addresses in the errors of unauthenticated requests [[GH-10579](https://github.com/hashicorp/vault/pull/10579)] +* core: Make the response to an unauthenticated request to sys/internal endpoints consistent regardless of mount existence. [[GH-10650](https://github.com/hashicorp/vault/pull/10650)] + +## 1.5.6 + +### December 16, 2020 + +SECURITY: + +* LDAP Auth Method: We addressed an issue where error messages returned by the + LDAP auth method allowed user enumeration [[GH-10537](https://github.com/hashicorp/vault/pull/10537)]. This vulnerability affects Vault OSS and Vault + Enterprise and is fixed in 1.5.6 and 1.6.1 (CVE-2020-35177). +* Sentinel EGP: We've fixed incorrect handling of namespace paths to prevent + users within namespaces from applying Sentinel EGP policies to paths above + their namespace. This vulnerability affects Vault Enterprise and is fixed in + 1.5.6 and 1.6.1. + +IMPROVEMENTS: + +* auth/ldap: Improve consistency in error messages [[GH-10537](https://github.com/hashicorp/vault/pull/10537)] + +BUG FIXES: + +* core (enterprise): Vault EGP policies attached to path * were not correctly scoped to the namespace. +* core: Fix bug where updating an existing path quota could introduce a conflict [[GH-10285](https://github.com/hashicorp/vault/pull/10285)] +* core: Fix client.Clone() to include the address [[GH-10077](https://github.com/hashicorp/vault/pull/10077)] +* quotas (enterprise): Reset cache before loading quotas in the db during startup +* secrets/transit: allow for null string to be used for optional parameters in encrypt and decrypt [[GH-10386](https://github.com/hashicorp/vault/pull/10386)] + +## 1.5.5 + +### October 21, 2020 + +IMPROVEMENTS: + +* auth/aws, core/seal, secret/aws: Set default IMDS timeouts to match AWS SDK [[GH-10133](https://github.com/hashicorp/vault/pull/10133)] + +BUG FIXES: + +* auth/aws: Restrict region selection when in the aws-us-gov partition to avoid IAM errors [[GH-9947](https://github.com/hashicorp/vault/pull/9947)] +* core (enterprise): Allow operators to add and remove (Raft) peers in a DR secondary cluster using Integrated Storage. +* core (enterprise): Add DR operation token to the remove peer API and CLI command (when DR secondary). +* core (enterprise): Fix deadlock in handling EGP policies +* core (enterprise): Fix extraneous error messages in DR Cluster +* secrets/mysql: Conditionally overwrite TLS parameters for MySQL secrets engine [[GH-9729](https://github.com/hashicorp/vault/pull/9729)] +* secrets/ad: Fix bug where `password_policy` setting was not using correct key when `ad/config` was read [[GH-71](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/71)] +* ui: Fix issue with listing roles and methods on the same auth methods with different names [[GH-10122](https://github.com/hashicorp/vault/pull/10122)] + +## 1.5.4 + +### September 24th, 2020 + +SECURITY: + +* Batch Token Expiry: We addressed an issue where batch token leases could outlive their TTL because we were not scheduling the expiration time correctly. This vulnerability affects Vault OSS and Vault Enterprise 1.0 and newer and is fixed in 1.4.7 and 1.5.4 (CVE-2020-25816). + +IMPROVEMENTS: + +* secrets/pki: Handle expiration of a cert not in storage as a success [[GH-9880](https://github.com/hashicorp/vault/pull/9880)] +* auth/kubernetes: Add an option to disable defaulting to the local CA cert and service account JWT when running in a Kubernetes pod [[GH-97]](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/97) +* secrets/gcp: Add check for 403 during rollback to prevent repeated deletion calls [[GH-97](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/97)] +* core: Disable usage metrics collection on performance standby nodes. [[GH-9966](https://github.com/hashicorp/vault/pull/9966)] +* credential/aws: Added X-Amz-Content-Sha256 as a default STS request header [[GH-10009](https://github.com/hashicorp/vault/pull/10009)] + +BUG FIXES: + +* agent: Fix `disable_fast_negotiation` not being set on the auth method when configured by user. [[GH-9892](https://github.com/hashicorp/vault/pull/9892)] +* core (enterprise): Fix hang when cluster-wide plugin reload cleanup is slow on unseal +* core (enterprise): Fix an error in cluster-wide plugin reload cleanup following such a reload +* core: Fix crash when metrics collection encounters zero-length keys in KV store [[GH-9811](https://github.com/hashicorp/vault/pull/9881)] +* mfa (enterprise): Fix incorrect handling of PingID responses that could result in auth requests failing +* replication (enterprise): Improve race condition when using a newly created token on a performance standby node +* replication (enterprise): Only write failover cluster addresses if they've changed +* ui: fix bug where dropdown for identity/entity management is not reflective of actual policy [[GH-9958](https://github.com/hashicorp/vault/pull/9958)] + +## 1.5.3 + +### August 27th, 2020 + +NOTE: + +All security content from 1.5.2, 1.5.1, 1.4.5, 1.4.4, 1.3.9, 1.3.8, 1.2.6, and 1.2.5 has been made fully open source, and the git tags for 1.5.3, 1.4.6, 1.3.10, and 1.2.7 will build correctly for open source users. + +BUG FIXES: + +* auth/aws: Made header handling for IAM authentication more robust +* secrets/ssh: Fixed a bug with role option for SSH signing algorithm to allow more than RSA signing + +## 1.5.2.1 + +### August 21st, 2020 + +### Enterprise Only + +NOTE: + +Includes correct license in the HSM binary. + +## 1.5.2 + +### August 20th, 2020 + +NOTE: + +OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. + +KNOWN ISSUES: + +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.5.2 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.5.2) +* In versions 1.2.6, 1.3.9, 1.4.5, and 1.5.2, enterprise licenses on the HSM build were not incorporated correctly - enterprise + customers should use 1.2.6.1, 1.3.9.1, 1.4.5.1, and 1.5.2.1. + +## 1.5.1 + +### August 20th, 2020 + +SECURITY: + +* When using the IAM AWS Auth Method, under certain circumstances, values Vault uses to validate identities and roles can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.7.1 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16250) (Discovered by Felix Wilhelm of Google Project Zero) +* When using the GCP GCE Auth Method, under certain circumstances, values Vault uses to validate GCE VMs can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.8.3 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16251) (Discovered by Felix Wilhelm of Google Project Zero) +* When using Vault Agent with cert auto-auth and caching enabled, under certain circumstances, clients without permission to access agent's token may retrieve the token without login credentials. This vulnerability affects Vault Agent 1.1.0 and newer and is fixed in 1.5.1 (CVE-2020-17455) + +KNOWN ISSUES: + +* OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.5.1 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.5.1) + +CHANGES: + +* pki: The tidy operation will now remove revoked certificates if the parameter `tidy_revoked_certs` is set to `true`. This will result in certificate entries being immediately removed, as opposed to awaiting until its NotAfter time. Note that this only affects certificates that have been already revoked. [[GH-9609](https://github.com/hashicorp/vault/pull/9609)] +* go: Updated Go version to 1.14.7 + +IMPROVEMENTS: + +* auth/jwt: Add support for fetching groups and user information from G Suite during authentication. [[GH-9574](https://github.com/hashicorp/vault/pull/9574)] +* auth/jwt: Add EdDSA to supported algorithms. [[GH-129](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/129)] +* secrets/openldap: Add "ad" schema that allows the engine to correctly rotate AD passwords. [[GH-9740](https://github.com/hashicorp/vault/pull/9740)] +* pki: Add a `allowed_domains_template` parameter that enables the use of identity templating within the `allowed_domains` parameter. [[GH-8509](https://github.com/hashicorp/vault/pull/8509)] +* secret/azure: Use write-ahead-logs to cleanup any orphaned Service Principals [[GH-9773](https://github.com/hashicorp/vault/pull/9773)] +* ui: Wrap TTL option on transit engine export action is updated to a new component. [[GH-9632](https://github.com/hashicorp/vault/pull/9632)] +* ui: Wrap Tool uses newest version of TTL Picker component. [[GH-9691](https://github.com/hashicorp/vault/pull/9691)] + +BUG FIXES: + +* secrets/gcp: Ensure that the IAM policy version is appropriately set after a roleset's bindings have changed. [[GH-9603](https://github.com/hashicorp/vault/pull/9603)] +* replication (enterprise): Fix status API output incorrectly stating replication is in `idle` state. +* replication (enterprise): Use PrimaryClusterAddr if it's been set +* core: Fix panic when printing over-long info fields at startup [[GH-9681](https://github.com/hashicorp/vault/pull/9681)] +* core: Seal migration using the new minimal-downtime strategy didn't work properly with performance standbys. [[GH-9690](https://github.com/hashicorp/vault/pull/9690)] +* core: Vault failed to start when there were non-string values in seal configuration [[GH-9555](https://github.com/hashicorp/vault/pull/9555)] +* core: Handle a trailing slash in the API address used for enabling replication + +## 1.5.0 + +### July 21st, 2020 + +CHANGES: + +* audit: Token TTL and issue time are now provided in the auth portion of audit logs. [[GH-9091](https://github.com/hashicorp/vault/pull/9091)] +* auth/gcp: Changes the default name of the entity alias that gets created to be the role ID for both IAM and GCE authentication. [[GH-99](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/99)] +* core: Remove the addition of newlines to parsed configuration when using integer/boolean values [[GH-8928](https://github.com/hashicorp/vault/pull/8928)] +* cubbyhole: Reject reads and writes to an empty ("") path. [[GH-8971](https://github.com/hashicorp/vault/pull/8971)] +* secrets/azure: Default password generation changed from uuid to cryptographically secure randomized string [[GH-40](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/40)] +* storage/gcs: The `credentials_file` config option has been removed. The `GOOGLE_APPLICATION_CREDENTIALS` environment variable + or default credentials may be used instead [[GH-9424](https://github.com/hashicorp/vault/pull/9424)] +* storage/raft: The storage configuration now accepts a new `max_entry_size` config that will limit + the total size in bytes of any entry committed via raft. It defaults to `"1048576"` (1MiB). [[GH-9027](https://github.com/hashicorp/vault/pull/9027)] +* token: Token creation with custom token ID via `id` will no longer allow periods (`.`) as part of the input string. + The final generated token value may contain periods, such as the `s.` prefix for service token + indication. [[GH-8646](https://github.com/hashicorp/vault/pull/8646/files)] +* token: Token renewals will now return token policies within the `token_policies` , identity policies within `identity_policies`, and the full policy set within `policies`. [[GH-8535](https://github.com/hashicorp/vault/pull/8535)] +* go: Updated Go version to 1.14.4 + +FEATURES: + +* **Monitoring**: We have released a Splunk App [9] for Enterprise customers. The app is accompanied by an updated monitoring guide and a few new metrics to enable OSS users to effectively monitor Vault. +* **Password Policies**: Allows operators to customize how passwords are generated for select secret engines (OpenLDAP, Active Directory, Azure, and RabbitMQ). +* **Replication UI Improvements**: We have redesigned the replication UI to highlight the state and relationship between primaries and secondaries and improved management workflows, enabling a more holistic understanding of multiple Vault clusters. +* **Resource Quotas**: As of 1.5, Vault supports specifying a quota to rate limit requests on OSS and Enterprise. Enterprise customers also have access to set quotas on the number of leases that can be generated on a path. +* **OpenShift Support**: We have updated the Helm charts to allow users to install Vault onto their OpenShift clusters. +* **Seal Migration**: We have made updates to allow migrations from auto unseal to Shamir unseal on Enterprise. +* **AWS Auth Web Identity Support**: We've added support for AWS Web Identities, which will be used in the credentials chain if present. +* **Vault Monitor**: Similar to the monitor command for Consul and Nomad, we have added the ability for Vault to stream logs from other Vault servers at varying log levels. +* **AWS Secrets Groups Support**: IAM users generated by Vault may now be added to IAM Groups. +* **Integrated Storage as HA Storage**: In Vault 1.5, it is possible to use Integrated Storage as HA Storage with a different storage backend as regular storage. +* **OIDC Auth Provider Extensions**: We've added support to OIDC Auth to incorporate IdP-specific extensions. Currently this includes expanded Azure AD groups support. +* **GCP Secrets**: Support BigQuery dataset ACLs in absence of IAM endpoints. +* **KMIP**: Add support for signing client certificates requests (CSRs) rather than having them be generated entirely within Vault. + +IMPROVEMENTS: + +* audit: Replication status requests are no longer audited. [[GH-8877](https://github.com/hashicorp/vault/pull/8877)] +* audit: Added mount_type field to requests and responses. [[GH-9167](https://github.com/hashicorp/vault/pull/9167)] +* auth/aws: Add support for Web Identity credentials [[GH-7738](https://github.com/hashicorp/vault/pull/7738)] +* auth/jwt: Support users that are members of more than 200 groups on Azure [[GH-120](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/120)] +* auth/kerberos: Support identities without userPrincipalName [[GH-44](https://github.com/hashicorp/vault-plugin-auth-kerberos/issues/44)] +* auth/kubernetes: Allow disabling `iss` validation [[GH-91](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/91)] +* auth/kubernetes: Try reading the ca.crt and TokenReviewer JWT from the default service account [[GH-83](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/83)] +* cli: Support reading TLS parameters from file for the `vault operator raft join` command. [[GH-9060](https://github.com/hashicorp/vault/pull/9060)] +* cli: Add a new subcommand, `vault monitor`, for tailing server logs in the console. [[GH-8477](https://github.com/hashicorp/vault/pull/8477)] +* core: Add the Go version used to build a Vault binary to the server message output. [[GH-9078](https://github.com/hashicorp/vault/pull/9078)] +* core: Added Password Policies for user-configurable password generation [[GH-8637](https://github.com/hashicorp/vault/pull/8637)] +* core: New telemetry metrics covering token counts, token creation, KV secret counts, lease creation. [[GH-9239](https://github.com/hashicorp/vault/pull/9239)] [[GH-9250](https://github.com/hashicorp/vault/pull/9250)] [[GH-9244](https://github.com/hashicorp/vault/pull/9244)] [[GH-9052](https://github.com/hashicorp/vault/pull/9052)] +* physical/gcs: The storage backend now uses a dedicated client for HA lock updates to prevent lock table update failures when flooded by other client requests. [[GH-9424](https://github.com/hashicorp/vault/pull/9424)] +* physical/spanner: The storage backend now uses a dedicated client for HA lock updates to prevent lock table update failures when flooded by other client requests. [[GH-9423](https://github.com/hashicorp/vault/pull/9423)] +* plugin: Add SDK method, `Sys.ReloadPlugin`, and CLI command, `vault plugin reload`, for reloading plugins. [[GH-8777](https://github.com/hashicorp/vault/pull/8777)] +* plugin (enterprise): Add a scope field to plugin reload, which when global, reloads the plugin anywhere in a cluster. [[GH-9347](https://github.com/hashicorp/vault/pull/9347)] +* sdk/framework: Support accepting TypeFloat parameters over the API [[GH-8923](https://github.com/hashicorp/vault/pull/8923)] +* secrets/aws: Add iam_groups parameter to role create/update [[GH-8811](https://github.com/hashicorp/vault/pull/8811)] +* secrets/database: Add static role rotation for MongoDB Atlas database plugin [[GH-11](https://github.com/hashicorp/vault-plugin-database-mongodbatlas/pull/11)] +* secrets/database: Add static role rotation for MSSQL database plugin [[GH-9062](https://github.com/hashicorp/vault/pull/9062)] +* secrets/database: Allow InfluxDB to use insecure TLS without cert bundle [[GH-8778](https://github.com/hashicorp/vault/pull/8778)] +* secrets/gcp: Support BigQuery dataset ACLs in absence of IAM endpoints [[GH-78](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/78)] +* secrets/pki: Allow 3072-bit RSA keys [[GH-8343](https://github.com/hashicorp/vault/pull/8343)] +* secrets/ssh: Add a CA-mode role option to specify signing algorithm [[GH-9096](https://github.com/hashicorp/vault/pull/9096)] +* secrets/ssh: The [Vault SSH Helper](https://github.com/hashicorp/vault-ssh-helper) can now be configured to reference a mount in a namespace [[GH-44](https://github.com/hashicorp/vault-ssh-helper/pull/44)] +* secrets/transit: Transit requests that make use of keys now include a new field `key_version` in their responses [[GH-9100](https://github.com/hashicorp/vault/pull/9100)] +* secrets/transit: Improving transit batch encrypt and decrypt latencies [[GH-8775](https://github.com/hashicorp/vault/pull/8775)] +* sentinel: Add a sentinel config section, and "additional_enabled_modules", a list of Sentinel modules that may be imported in addition to the defaults. +* ui: Update TTL picker styling on SSH secret engine [[GH-8891](https://github.com/hashicorp/vault/pull/8891)] +* ui: Only render the JWT input field of the Vault login form on mounts configured for JWT auth [[GH-8952](https://github.com/hashicorp/vault/pull/8952)] +* ui: Add replication dashboards. Improve replication management workflows. [[GH-8705]](https://github.com/hashicorp/vault/pull/8705). +* ui: Update alert banners to match design systems black text. [[GH-9463]](https://github.com/hashicorp/vault/pull/9463). + +BUG FIXES: + +* auth/oci: Fix issue where users of the Oracle Cloud Infrastructure (OCI) auth method could not authenticate when the plugin backend was mounted at a non-default path. [[GH-7](https://github.com/hashicorp/vault-plugin-auth-oci/pull/7)] +* core: Extend replicated cubbyhole fix in 1.4.0 to cover case where a performance primary is also a DR primary [[GH-9148](https://github.com/hashicorp/vault/pull/9148)] +* replication (enterprise): Use the PrimaryClusterAddr if it's been set +* seal/awskms: fix AWS KMS auto-unseal when AWS_ROLE_SESSION_NAME not set [[GH-9416](https://github.com/hashicorp/vault/pull/9416)] +* sentinel: fix panic due to concurrent map access when rules iterate over metadata maps +* secrets/aws: Fix issue where performance standbys weren't able to generate STS credentials after an IAM access key rotation in AWS and root IAM credential update in Vault [[GH-9186](https://github.com/hashicorp/vault/pull/9186)] +* secrets/database: Fix issue where rotating root database credentials while Vault's storage backend is unavailable causes Vault to lose access to the database [[GH-8782](https://github.com/hashicorp/vault/pull/8782)] +* secrets/database: Fix issue that prevents performance standbys from connecting to databases after a root credential rotation [[GH-9129](https://github.com/hashicorp/vault/pull/9129)] +* secrets/database: Fix parsing of multi-line PostgreSQL statements [[GH-8512](https://github.com/hashicorp/vault/pull/8512)] +* secrets/gcp: Fix issue were updates were not being applied to the `token_scopes` of a roleset. [[GH-90](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/90)] +* secrets/kv: Return the value of delete_version_after when reading kv/config, even if it is set to the default. [[GH-42](https://github.com/hashicorp/vault-plugin-secrets-kv/pull/42)] +* ui: Add Toggle component into core addon so it is available in KMIP and other Ember Engines.[[GH-8913]](https://github.com/hashicorp/vault/pull/8913) +* ui: Disallow max versions value of large than 9999999999999999 on kv2 secrets engine. [[GH-9242](https://github.com/hashicorp/vault/pull/9242)] +* ui: Add and upgrade missing dependencies to resolve a failure with `make static-dist`. [[GH-9277](https://github.com/hashicorp/vault/pull/9371)] + +## 1.4.7.1 + +### October 15th, 2020 + +### Enterprise Only + +BUG FIXES: + +* replication (enterprise): Fix panic when old filter path evaluation fails + +## 1.4.7 + +### September 24th, 2020 + +SECURITY: + +* Batch Token Expiry: We addressed an issue where batch token leases could outlive their TTL because we were not scheduling the expiration time correctly. This vulnerability affects Vault OSS and Vault Enterprise 1.0 and newer and is fixed in 1.4.7 and 1.5.4 (CVE-2020-25816). + +IMPROVEMENTS: + +* secret/azure: Use write-ahead-logs to cleanup any orphaned Service Principals [[GH-9773](https://github.com/hashicorp/vault/pull/9773)] + +BUG FIXES: + +* replication (enterprise): Don't stop replication if old filter path evaluation fails + +## 1.4.6 + +### August 27th, 2020 + +NOTE: + +All security content from 1.5.2, 1.5.1, 1.4.5, 1.4.4, 1.3.9, 1.3.8, 1.2.6, and 1.2.5 has been made fully open source, and the git tags for 1.5.3, 1.4.6, 1.3.10, and 1.2.7 will build correctly for open source users. + +BUG FIXES: + +* auth/aws: Made header handling for IAM authentication more robust +* secrets/ssh: Fixed a bug with role option for SSH signing algorithm to allow more than RSA signing [[GH-9824](https://github.com/hashicorp/vault/pull/9824)] + +## 1.4.5.1 + +### August 21st, 2020 + +### Enterprise Only + +NOTE: + +Includes correct license in the HSM binary. + +## 1.4.5 + +### August 20th, 2020 + +NOTE: + +OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. + +KNOWN ISSUES: + +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.4.5 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.4.5) +* In versions 1.2.6, 1.3.9, 1.4.5, and 1.5.2, enterprise licenses on the HSM build were not incorporated correctly - enterprise + customers should use 1.2.6.1, 1.3.9.1, 1.4.5.1, and 1.5.2.1. + +## 1.4.4 + +### August 20th, 2020 + +SECURITY: + +* When using the IAM AWS Auth Method, under certain circumstances, values Vault uses to validate identities and roles can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.7.1 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16250) (Discovered by Felix Wilhelm of Google Project Zero) +* When using the GCP GCE Auth Method, under certain circumstances, values Vault uses to validate GCE VMs can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.8.3 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16251) (Discovered by Felix Wilhelm of Google Project Zero) + +KNOWN ISSUES: + +* OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.4.4 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.4.4) + +BUG FIXES: + +* auth/okta: fix bug introduced in 1.4.0: only 200 external groups were fetched even if user belonged to more [[GH-9580](https://github.com/hashicorp/vault/pull/9580)] +* seal/awskms: fix AWS KMS auto-unseal when AWS_ROLE_SESSION_NAME not set [[GH-9416](https://github.com/hashicorp/vault/pull/9416)] +* secrets/aws: Fix possible issue creating access keys when using Performance Standbys [[GH-9606](https://github.com/hashicorp/vault/pull/9606)] + +IMPROVEMENTS: + +* auth/aws: Retry on transient failures during AWS IAM auth login attempts [[GH-8727](https://github.com/hashicorp/vault/pull/8727)] +* ui: Add transit key algorithms aes128-gcm96, ecdsa-p384, ecdsa-p521 to the UI. [[GH-9070](https://github.com/hashicorp/vault/pull/9070)] & [[GH-9520](https://github.com/hashicorp/vault/pull/9520)] + +## 1.4.3 + +### July 2nd, 2020 + +IMPROVEMENTS: + +* auth/aws: Add support for Web Identity credentials [[GH-9251](https://github.com/hashicorp/vault/pull/9251)] +* auth/kerberos: Support identities without userPrincipalName [[GH-44](https://github.com/hashicorp/vault-plugin-auth-kerberos/issues/44)] +* core: Add the Go version used to build a Vault binary to the server message output. [[GH-9078](https://github.com/hashicorp/vault/pull/9078)] +* secrets/database: Add static role rotation for MongoDB Atlas database plugin [[GH-9311](https://github.com/hashicorp/vault/pull/9311)] +* physical/mysql: Require TLS or plaintext flagging in MySQL configuration [[GH-9012](https://github.com/hashicorp/vault/pull/9012)] +* ui: Link to the Vault Changelog in the UI footer [[GH-9216](https://github.com/hashicorp/vault/pull/9216)] + +BUG FIXES: + +* agent: Restart template server when it shuts down [[GH-9200](https://github.com/hashicorp/vault/pull/9200)] +* auth/oci: Fix issue where users of the Oracle Cloud Infrastructure (OCI) auth method could not authenticate when the plugin backend was mounted at a non-default path. [[GH-9278](https://github.com/hashicorp/vault/pull/9278)] +* replication: The issue causing cubbyholes in namespaces on performance secondaries to not work, which was fixed in 1.4.0, was still an issue when the primary was both a performance primary and DR primary. +* seal: (enterprise) Fix issue causing stored seal and recovery keys to be mistaken as sealwrapped values +* secrets/aws: Fix issue where performance standbys weren't able to generate STS credentials after an IAM access key rotation in AWS and root IAM credential update in Vault [[GH-9207](https://github.com/hashicorp/vault/pull/9207)] +* secrets/database: Fix issue that prevents performance standbys from connecting to databases after a root credential rotation [[GH-9208](https://github.com/hashicorp/vault/pull/9208)] +* secrets/gcp: Fix issue were updates were not being applied to the `token_scopes` of a roleset. [[GH-9277](https://github.com/hashicorp/vault/pull/9277)] + +## 1.4.2 (May 21st, 2020) + +SECURITY: + +* core: Proxy environment variables are now redacted before being logged, in case the URLs include a username:password. This vulnerability, CVE-2020-13223, is fixed in 1.3.6 and 1.4.2, but affects 1.4.0 and 1.4.1, as well as older versions of Vault [[GH-9022](https://github.com/hashicorp/vault/pull/9022)] +* secrets/gcp: Fix a regression in 1.4.0 where the system TTLs were being used instead of the configured backend TTLs for dynamic service accounts. This vulnerability is CVE-2020-12757. [[GH-85](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/85)] + +IMPROVEMENTS: + +* storage/raft: The storage stanza now accepts `leader_ca_cert_file`, `leader_client_cert_file`, and + `leader_client_key_file` parameters to read and parse TLS certificate information from paths on disk. + Existing non-path based parameters will continue to work, but their values will need to be provided as a + single-line string with newlines delimited by `\n`. [[GH-8894](https://github.com/hashicorp/vault/pull/8894)] +* storage/raft: The `vault status` CLI command and the `sys/leader` API now contain the committed and applied + raft indexes. [[GH-9011](https://github.com/hashicorp/vault/pull/9011)] + +BUG FIXES: + +* auth/aws: Fix token renewal issues caused by the metadata changes in 1.4.1 [[GH-8991](https://github.com/hashicorp/vault/pull/8991)] +* auth/ldap: Fix 1.4.0 regression that could result in auth failures when LDAP auth config includes upndomain. [[GH-9041](https://github.com/hashicorp/vault/pull/9041)] +* secrets/ad: Forward rotation requests from standbys to active clusters [[GH-66](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/66)] +* secrets/database: Prevent generation of usernames that are not allowed by the MongoDB Atlas API [[GH-9](https://github.com/hashicorp/vault-plugin-database-mongodbatlas/pull/9)] +* secrets/database: Return an error if a manual rotation of static account credentials fails [[GH-9035](https://github.com/hashicorp/vault/pull/9035)] +* secrets/openldap: Forward all rotation requests from standbys to active clusters [[GH-9028](https://github.com/hashicorp/vault/pull/9028)] +* secrets/transform (enterprise): Fix panic that could occur when accessing cached template entries, such as a requests + that accessed templates directly or indirectly from a performance standby node. +* serviceregistration: Fix a regression for Consul service registration that ignored using the listener address as + the redirect address unless api_addr was provided. It now properly uses the same redirect address as the one + used by Vault's Core object. [[GH-8976](https://github.com/hashicorp/vault/pull/8976)] +* storage/raft: Advertise the configured cluster address to the rest of the nodes in the raft cluster. This fixes + an issue where a node advertising 0.0.0.0 is not using a unique hostname. [[GH-9008](https://github.com/hashicorp/vault/pull/9008)] +* storage/raft: Fix panic when multiple nodes attempt to join the cluster at once. [[GH-9008](https://github.com/hashicorp/vault/pull/9008)] +* sys: The path provided in `sys/internal/ui/mounts/:path` is now namespace-aware. This fixes an issue + with `vault kv` subcommands that had namespaces provided in the path returning permission denied all the time. + [[GH-8962](https://github.com/hashicorp/vault/pull/8962)] +* ui: Fix snowman that appears when namespaces have more than one period [[GH-8910](https://github.com/hashicorp/vault/pull/8910)] + +## 1.4.1 (April 30th, 2020) + +CHANGES: + +* auth/aws: The default set of metadata fields added in 1.4.1 has been changed to `account_id` and `auth_type` [[GH-8783](https://github.com/hashicorp/vault/pull/8783)] +* storage/raft: Disallow `ha_storage` to be specified if `raft` is set as the `storage` type. [[GH-8707](https://github.com/hashicorp/vault/pull/8707)] + +IMPROVEMENTS: + +* auth/aws: The set of metadata stored during login is now configurable [[GH-8783](https://github.com/hashicorp/vault/pull/8783)] +* auth/aws: Improve region selection to avoid errors seen if the account hasn't enabled some newer AWS regions [[GH-8679](https://github.com/hashicorp/vault/pull/8679)] +* auth/azure: Enable login from Azure VMs with user-assigned identities [[GH-33](https://github.com/hashicorp/vault-plugin-auth-azure/pull/33)] +* auth/gcp: The set of metadata stored during login is now configurable [[GH-92](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/92)] +* auth/gcp: The type of alias name used during login is now configurable [[GH-95](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/95)] +* auth/ldap: Improve error messages during LDAP operation failures [[GH-8740](https://github.com/hashicorp/vault/pull/8740)] +* identity: Add a batch delete API for identity entities [[GH-8785]](https://github.com/hashicorp/vault/pull/8785) +* identity: Improve performance of logins when no group updates are needed [[GH-8795]](https://github.com/hashicorp/vault/pull/8795) +* metrics: Add `vault.identity.num_entities` metric [[GH-8816]](https://github.com/hashicorp/vault/pull/8816) +* secrets/kv: Allow `delete-version-after` to be reset to 0 via the CLI [[GH-8635](https://github.com/hashicorp/vault/pull/8635)] +* secrets/rabbitmq: Improve error handling and reporting [[GH-8619](https://github.com/hashicorp/vault/pull/8619)] +* ui: Provide One Time Password during Operation Token generation process [[GH-8630]](https://github.com/hashicorp/vault/pull/8630) + +BUG FIXES: + +* auth/okta: Fix MFA regression (introduced in [GH-8143](https://github.com/hashicorp/vault/pull/8143)) from 1.4.0 [[GH-8807](https://github.com/hashicorp/vault/pull/8807)] +* auth/userpass: Fix upgrade value for `token_bound_cidrs` being ignored due to incorrect key provided [[GH-8826](https://github.com/hashicorp/vault/pull/8826/files)] +* config/seal: Fix segfault when seal block is removed [[GH-8517](https://github.com/hashicorp/vault/pull/8517)] +* core: Fix an issue where users attempting to build Vault could receive Go module checksum errors [[GH-8770](https://github.com/hashicorp/vault/pull/8770)] +* core: Fix blocked requests if a SIGHUP is issued during a long-running request has the state lock held. + Also fixes deadlock that can happen if `vault debug` with the config target is ran during this time. + [[GH-8755](https://github.com/hashicorp/vault/pull/8755)] +* core: Always rewrite the .vault-token file as part of a `vault login` to ensure permissions and ownership are set correctly [[GH-8867](https://github.com/hashicorp/vault/pull/8867)] +* database/mongodb: Fix context deadline error that may result due to retry attempts on failed commands + [[GH-8863](https://github.com/hashicorp/vault/pull/8863)] +* http: Fix superflous call messages from the http package on logs caused by missing returns after + `respondError` calls [[GH-8796](https://github.com/hashicorp/vault/pull/8796)] +* namespace (enterprise): Fix namespace listing to return `key_info` when a scoping namespace is also provided. +* seal/gcpkms: Fix panic that could occur if all seal parameters were provided via environment + variables [[GH-8840](https://github.com/hashicorp/vault/pull/8840)] +* storage/raft: Fix memory allocation and incorrect metadata tracking issues with snapshots [[GH-8793](https://github.com/hashicorp/vault/pull/8793)] +* storage/raft: Fix panic that could occur if `disable_clustering` was set to true on Raft storage cluster [[GH-8784](https://github.com/hashicorp/vault/pull/8784)] +* storage/raft: Handle errors returned from the API during snapshot operations [[GH-8861](https://github.com/hashicorp/vault/pull/8861)] +* sys/wrapping: Allow unwrapping of wrapping tokens which contain nil data [[GH-8714](https://github.com/hashicorp/vault/pull/8714)] + +## 1.4.0 (April 7th, 2020) + +CHANGES: + +* cli: The raft configuration command has been renamed to list-peers to avoid + confusion. + +FEATURES: + +* **Kerberos Authentication**: Vault now supports Kerberos authentication using a SPNEGO token. + Login can be performed using the Vault CLI, API, or agent. +* **Kubernetes Service Discovery**: A new Kubernetes service discovery feature where, if + configured, Vault will tag Vault pods with their current health status. For more, see [#8249](https://github.com/hashicorp/vault/pull/8249). +* **MongoDB Atlas Secrets**: Vault can now generate dynamic credentials for both MongoDB Atlas databases + as well as the [Atlas programmatic interface](https://docs.atlas.mongodb.com/tutorial/manage-programmatic-access/). +* **OpenLDAP Secrets Engine**: We now support password management of existing OpenLDAP user entries. For more, see [#8360](https://github.com/hashicorp/vault/pull/8360/). +* **Redshift Database Secrets Engine**: The database secrets engine now supports static and dynamic secrets for the Amazon Web Services (AWS) Redshift service. +* **Service Registration Config**: A newly introduced `service_registration` configuration stanza, that allows for service registration to be configured separately from the storage backend. For more, see [#7887](https://github.com/hashicorp/vault/pull/7887/). +* **Transform Secrets Engine (Enterprise)**: A new secrets engine that handles secure data transformations against provided input values. +* **Integrated Storage**: Promoted out of beta and into general availability for both open-source and enterprise workloads. + +IMPROVEMENTS: + +* agent: add option to force the use of the auth-auth token, and ignore the Vault token in the request [[GH-8101](https://github.com/hashicorp/vault/pull/8101)] +* api: Restore and fix DNS SRV Lookup [[GH-8520](https://github.com/hashicorp/vault/pull/8520)] +* audit: HMAC http_raw_body in audit log; this ensures that large authenticated Prometheus metrics responses get + replaced with short HMAC values [[GH-8130](https://github.com/hashicorp/vault/pull/8130)] +* audit: Generate-root, generate-recovery-token, and generate-dr-operation-token requests and responses are now audited. [[GH-8301](https://github.com/hashicorp/vault/pull/8301)] +* auth/aws: Reduce the number of simultaneous STS client credentials needed [[GH-8161](https://github.com/hashicorp/vault/pull/8161)] +* auth/azure: subscription ID, resource group, vm and vmss names are now stored in alias metadata [[GH-30](https://github.com/hashicorp/vault-plugin-auth-azure/pull/30)] +* auth/jwt: Additional OIDC callback parameters available for CLI logins [[GH-80](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/80) & [GH-86](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/86)] +* auth/jwt: Bound claims may be optionally configured using globs [[GH-89](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/89)] +* auth/jwt: Timeout during OIDC CLI login if process doesn't complete within 2 minutes [[GH-97](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/97)] +* auth/jwt: Add support for the `form_post` response mode [[GH-98](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/98)] +* auth/jwt: add optional client_nonce to authorization flow [[GH-104](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/104)] +* auth/okta: Upgrade okta sdk lib, which should improve handling of groups [[GH-8143](https://github.com/hashicorp/vault/pull/8143)] +* aws: Add support for v2 of the instance metadata service (see [issue 7924](https://github.com/hashicorp/vault/issues/7924) for all linked PRs) +* core: Separate out service discovery interface from storage interface to allow + new types of service discovery not coupled to storage [[GH-7887](https://github.com/hashicorp/vault/pull/7887)] +* core: Add support for telemetry option `metrics_prefix` [[GH-8340](https://github.com/hashicorp/vault/pull/8340)] +* core: Entropy Augmentation can now be used with AWS KMS and Vault Transit seals +* core: Allow tls_min_version to be set to TLS 1.3 [[GH-8305](https://github.com/hashicorp/vault/pull/8305)] +* cli: Incorrect TLS configuration will now correctly fail [[GH-8025](https://github.com/hashicorp/vault/pull/8025)] +* identity: Allow specifying a custom `client_id` for identity tokens [[GH-8165](https://github.com/hashicorp/vault/pull/8165)] +* metrics/prometheus: improve performance with high volume of metrics updates [[GH-8507](https://github.com/hashicorp/vault/pull/8507)] +* replication (enterprise): Fix race condition causing clusters with high throughput writes to sometimes + fail to enter streaming-wal mode +* replication (enterprise): Secondary clusters can now perform an extra gRPC call to all nodes in a primary + cluster in an attempt to resolve the active node's address +* replication (enterprise): The replication status API now outputs `last_performance_wal`, `last_dr_wal`, + and `connection_state` values +* replication (enterprise): DR secondary clusters can now be recovered by the `replication/dr/secondary/recover` + API +* replication (enterprise): We now allow for an alternate means to create a Disaster Recovery token, by using a batch + token that is created with an ACL that allows for access to one or more of the DR endpoints. +* secrets/database/mongodb: Switched internal MongoDB driver to mongo-driver [[GH-8140](https://github.com/hashicorp/vault/pull/8140)] +* secrets/database/mongodb: Add support for x509 client authorization to MongoDB [[GH-8329](https://github.com/hashicorp/vault/pull/8329)] +* secrets/database/oracle: Add support for static credential rotation [[GH-26](https://github.com/hashicorp/vault-plugin-database-oracle/pull/26)] +* secrets/consul: Add support to specify TLS options per Consul backend [[GH-4800](https://github.com/hashicorp/vault/pull/4800)] +* secrets/gcp: Allow specifying the TTL for a service key [[GH-54](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/54)] +* secrets/gcp: Add support for rotating root keys [[GH-53](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/53)] +* secrets/gcp: Handle version 3 policies for Resource Manager IAM requests [[GH-77](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/77)] +* secrets/nomad: Add support to specify TLS options per Nomad backend [[GH-8083](https://github.com/hashicorp/vault/pull/8083)] +* secrets/ssh: Allowed users can now be templated with identity information [[GH-7548](https://github.com/hashicorp/vault/pull/7548)] +* secrets/transit: Adding RSA3072 key support [[GH-8151](https://github.com/hashicorp/vault/pull/8151)] +* storage/consul: Vault returns now a more descriptive error message when only a client cert or + a client key has been provided [[GH-4930]](https://github.com/hashicorp/vault/pull/8084) +* storage/raft: Nodes in the raft cluster can all be given possible leader + addresses for them to continuously try and join one of them, thus automating + the process of join to a greater extent [[GH-7856](https://github.com/hashicorp/vault/pull/7856)] +* storage/raft: Fix a potential deadlock that could occur on leadership transition [[GH-8547](https://github.com/hashicorp/vault/pull/8547)] +* storage/raft: Refresh TLS keyring on snapshot restore [[GH-8546](https://github.com/hashicorp/vault/pull/8546)] +* storage/etcd: Bumped etcd client API SDK [[GH-7931](https://github.com/hashicorp/vault/pull/7931) & [GH-4961](https://github.com/hashicorp/vault/pull/4961) & [GH-4349](https://github.com/hashicorp/vault/pull/4349) & [GH-7582](https://github.com/hashicorp/vault/pull/7582)] +* ui: Make Transit Key actions more prominent [[GH-8304](https://github.com/hashicorp/vault/pull/8304)] +* ui: Add Core Usage Metrics [[GH-8347](https://github.com/hashicorp/vault/pull/8347)] +* ui: Add refresh Namespace list on the Namespace dropdown, and redesign of Namespace dropdown menu [[GH-8442](https://github.com/hashicorp/vault/pull/8442)] +* ui: Update transit actions to codeblocks & automatically encode plaintext unless indicated [[GH-8462](https://github.com/hashicorp/vault/pull/8462)] +* ui: Display the results of transit key actions in a modal window [[GH-8462](https://github.com/hashicorp/vault/pull/8575)] +* ui: Transit key version styling updates & ability to copy key from dropdown [[GH-8480](https://github.com/hashicorp/vault/pull/8480)] + +BUG FIXES: + +* agent: Fix issue where TLS options are ignored for agent template feature [[GH-7889](https://github.com/hashicorp/vault/pull/7889)] +* auth/jwt: Use lower case role names for `default_role` to match the `role` case convention [[GH-100](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/100)] +* auth/ldap: Fix a bug where the UPNDOMAIN parameter was wrongly used to lookup the group + membership of the given user [[GH-6325]](https://github.com/hashicorp/vault/pull/8333) +* cli: Support autocompletion for nested mounts [[GH-8303](https://github.com/hashicorp/vault/pull/8303)] +* cli: Fix CLI namespace autocompletion [[GH-8315](https://github.com/hashicorp/vault/pull/8315)] +* identity: Fix incorrect caching of identity token JWKS responses [[GH-8412](https://github.com/hashicorp/vault/pull/8412)] +* metrics/stackdriver: Fix issue that prevents the stackdriver metrics library to create unnecessary stackdriver descriptors [[GH-8073](https://github.com/hashicorp/vault/pull/8073)] +* replication (enterprise): Fix issue causing cubbyholes in namespaces on performance secondaries to not work. +* replication (enterprise): Unmounting a dynamic secrets backend could sometimes lead to replication errors. Change the order of operations to prevent that. +* seal (enterprise): Fix seal migration when transactional seal wrap backend is in use. +* secrets/database/influxdb: Fix potential panic if connection to the InfluxDB database cannot be established [[GH-8282](https://github.com/hashicorp/vault/pull/8282)] +* secrets/database/mysql: Ensures default static credential rotation statements are used [[GH-8240](https://github.com/hashicorp/vault/pull/8240)] +* secrets/database/mysql: Fix inconsistent query parameter names: {{name}} or {{username}} for + different queries. Now it allows for either for backwards compatibility [[GH-8240](https://github.com/hashicorp/vault/pull/8240)] +* secrets/database/postgres: Fix inconsistent query parameter names: {{name}} or {{username}} for + different queries. Now it allows for either for backwards compatibility [[GH-8240](https://github.com/hashicorp/vault/pull/8240)] +* secrets/pki: Support FQDNs in DNS Name [[GH-8288](https://github.com/hashicorp/vault/pull/8288)] +* storage/raft: Allow seal migration to be performed on Vault clusters using raft storage [[GH-8103](https://github.com/hashicorp/vault/pull/8103)] +* telemetry: Prometheus requests on standby nodes will now return an error instead of forwarding + the request to the active node [[GH-8280](https://github.com/hashicorp/vault/pull/8280)] +* ui: Fix broken popup menu on the transit secrets list page [[GH-8348](https://github.com/hashicorp/vault/pull/8348)] +* ui: Update headless Chrome flag to fix `yarn run test:oss` [[GH-8035](https://github.com/hashicorp/vault/pull/8035)] +* ui: Update CLI to accept empty strings as param value to reset previously-set values +* ui: Fix bug where error states don't clear when moving between action tabs on Transit [[GH-8354](https://github.com/hashicorp/vault/pull/8354)] + +## 1.3.10 + +### August 27th, 2020 + +NOTE: + +All security content from 1.5.2, 1.5.1, 1.4.5, 1.4.4, 1.3.9, 1.3.8, 1.2.6, and 1.2.5 has been made fully open source, and the git tags for 1.5.3, 1.4.6, 1.3.10, and 1.2.7 will build correctly for open source users. + +BUG FIXES: + +* auth/aws: Made header handling for IAM authentication more robust + +## 1.3.9.1 + +### August 21st, 2020 + +### Enterprise Only + +NOTE: + +Includes correct license in the HSM binary. + +## 1.3.9 + +### August 20th, 2020 + +NOTE: + +OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. + +KNOWN ISSUES: + +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.3.9 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.3.9) +* In versions 1.2.6, 1.3.9, 1.4.5, and 1.5.2, enterprise licenses on the HSM build were not incorporated correctly - enterprise + customers should use 1.2.6.1, 1.3.9.1, 1.4.5.1, and 1.5.2.1. + +## 1.3.8 + +### August 20th, 2020 + +SECURITY: + +* When using the IAM AWS Auth Method, under certain circumstances, values Vault uses to validate identities and roles can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.7.1 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16250) (Discovered by Felix Wilhelm of Google Project Zero) +* When using the GCP GCE Auth Method, under certain circumstances, values Vault uses to validate GCE VMs can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.8.3 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16251) (Discovered by Felix Wilhelm of Google Project Zero) + +KNOWN ISSUES: + +* OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.3.8 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.3.8) + +## 1.3.7 + +### July 2nd, 2020 + +BUG FIXES: + +* seal: (enterprise) Fix issue causing stored seal and recovery keys to be mistaken as sealwrapped values +* secrets/aws: Fix issue where performance standbys weren't able to generate STS credentials after an IAM access key rotation in AWS and root IAM credential update in Vault [[GH-9363](https://github.com/hashicorp/vault/pull/9363)] + +## 1.3.6 (May 21st, 2020) + +SECURITY: + +* core: proxy environment variables are now redacted before being logged, in case the URLs include a username:password. This vulnerability, CVE-2020-13223, is fixed in 1.3.6 and 1.4.2, but affects 1.4 and 1.4.1, as well as older versions of Vault [[GH-9022](https://github.com/hashicorp/vault/pull/9022)] + +BUG FIXES: + +* auth/aws: Fix token renewal issues caused by the metadata changes in 1.3.5 [[GH-8991](https://github.com/hashicorp/vault/pull/8991)] +* replication: Fix mount filter bug that allowed replication filters to hide local mounts on a performance secondary + +## 1.3.5 (April 28th, 2020) + +CHANGES: + +* auth/aws: The default set of metadata fields added in 1.3.2 has been changed to `account_id` and `auth_type` [[GH-8783](https://github.com/hashicorp/vault/pull/8783)] + +IMPROVEMENTS: + +* auth/aws: The set of metadata stored during login is now configurable [[GH-8783](https://github.com/hashicorp/vault/pull/8783)] + +## 1.3.4 (March 19th, 2020) + +SECURITY: + +* A vulnerability was identified in Vault and Vault Enterprise such that, under certain circumstances, an Entity's Group membership may inadvertently include Groups the Entity no longer has permissions to. This vulnerability, CVE-2020-10660, affects Vault and Vault Enterprise versions 0.9.0 and newer, and is fixed in 1.3.4. [[GH-8606](https://github.com/hashicorp/vault/pull/8606)] +* A vulnerability was identified in Vault Enterprise such that, under certain circumstances, existing nested-path policies may give access to Namespaces created after-the-fact. This vulnerability, CVE-2020-10661, affects Vault Enterprise versions 0.11 and newer, and is fixed in 1.3.4. + +## 1.3.3 (March 5th, 2020) + +BUG FIXES: + +* approle: Fix excessive locking during tidy, which could potentially block new approle logins for long enough to cause an outage [[GH-8418](https://github.com/hashicorp/vault/pull/8418)] +* cli: Fix issue where Raft snapshots from standby nodes created an empty backup file [[GH-8097](https://github.com/hashicorp/vault/pull/8097)] +* identity: Fix incorrect caching of identity token JWKS responses [[GH-8412](https://github.com/hashicorp/vault/pull/8412)] +* kmip: role read now returns tls_client_ttl +* kmip: fix panic when templateattr not provided in rekey request +* secrets/database/influxdb: Fix potential panic if connection to the InfluxDB database cannot be established [[GH-8282](https://github.com/hashicorp/vault/pull/8282)] +* storage/mysql: Fix potential crash when using MySQL as coordination for high availability [[GH-8300](https://github.com/hashicorp/vault/pull/8300)] +* storage/raft: Fix potential crash when using Raft as coordination for high availability [[GH-8356](https://github.com/hashicorp/vault/pull/8356)] +* ui: Fix missing License menu item [[GH-8230](https://github.com/hashicorp/vault/pull/8230)] +* ui: Fix bug where default auth method on login is defaulted to auth method that is listing-visibility=unauth instead of “other” [[GH-8218](https://github.com/hashicorp/vault/pull/8218)] +* ui: Fix bug where KMIP details were not shown in the UI Wizard [[GH-8255](https://github.com/hashicorp/vault/pull/8255)] +* ui: Show Error messages on Auth Configuration page when you hit permission errors [[GH-8500](https://github.com/hashicorp/vault/pull/8500)] +* ui: Remove duplicate form inputs for the GitHub config [[GH-8519](https://github.com/hashicorp/vault/pull/8519)] +* ui: Correct HMAC capitalization [[GH-8528](https://github.com/hashicorp/vault/pull/8528)] +* ui: Fix danger message in DR [[GH-8555](https://github.com/hashicorp/vault/pull/8555)] +* ui: Fix certificate field for LDAP config [[GH-8573](https://github.com/hashicorp/vault/pull/8573)] + +## 1.3.2 (January 22nd, 2020) + +SECURITY: + +* When deleting a namespace on Vault Enterprise, in certain circumstances, the deletion + process will fail to revoke dynamic secrets for a mount in that namespace. This will + leave any dynamic secrets in remote systems alive and will fail to clean them up. This + vulnerability, CVE-2020-7220, affects Vault Enterprise 0.11.0 and newer. + +IMPROVEMENTS: + +* auth/aws: Add aws metadata to identity alias [[GH-7985](https://github.com/hashicorp/vault/pull/7985)] +* auth/kubernetes: Allow both names and namespaces to be set to "*" [[GH-78](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/78)] + +BUG FIXES: + +* auth/azure: Fix Azure compute client to use correct base URL [[GH-8072](https://github.com/hashicorp/vault/pull/8072)] +* auth/ldap: Fix renewal of tokens without configured policies that are + generated by an LDAP login [[GH-8072](https://github.com/hashicorp/vault/pull/8072)] +* auth/okta: Fix renewal of tokens without configured policies that are + generated by an Okta login [[GH-8072](https://github.com/hashicorp/vault/pull/8072)] +* core: Fix seal migration error when attempting to migrate from auto unseal to shamir [[GH-8172](https://github.com/hashicorp/vault/pull/8172)] +* core: Fix seal migration config issue when migrating from auto unseal to auto unseal [[GH-8172](https://github.com/hashicorp/vault/pull/8172)] +* plugin: Fix issue where a plugin unwrap request potentially used an expired token [[GH-8058](https://github.com/hashicorp/vault/pull/8058)] +* replication: Fix issue where a forwarded request from a performance/standby node could run into + a timeout +* secrets/database: Fix issue where a manual static role rotation could potentially panic [[GH-8098](https://github.com/hashicorp/vault/pull/8098)] +* secrets/database: Fix issue where a manual root credential rotation request is not forwarded + to the primary node [[GH-8125](https://github.com/hashicorp/vault/pull/8125)] +* secrets/database: Fix issue where a manual static role rotation request is not forwarded + to the primary node [[GH-8126](https://github.com/hashicorp/vault/pull/8126)] +* secrets/database/mysql: Fix issue where special characters for a MySQL password were encoded [[GH-8040](https://github.com/hashicorp/vault/pull/8040)] +* ui: Fix deleting namespaces [[GH-8132](https://github.com/hashicorp/vault/pull/8132)] +* ui: Fix Error handler on kv-secret edit and kv-secret view pages [[GH-8133](https://github.com/hashicorp/vault/pull/8133)] +* ui: Fix OIDC callback to check storage [[GH-7929](https://github.com/hashicorp/vault/pull/7929)]. +* ui: Change `.box-radio` height to min-height to prevent overflow issues [[GH-8065](https://github.com/hashicorp/vault/pull/8065)] + +## 1.3.1 (December 18th, 2019) + +IMPROVEMENTS: + +* agent: Add ability to set `exit-after-auth` via the CLI [[GH-7920](https://github.com/hashicorp/vault/pull/7920)] +* auth/ldap: Add a `request_timeout` configuration option to prevent connection + requests from hanging [[GH-7909](https://github.com/hashicorp/vault/pull/7909)] +* auth/kubernetes: Add audience to tokenreview API request for Kube deployments where issuer + is not Kube. [[GH-74](https://github.com/hashicorp/vault/pull/74)] +* secrets/ad: Add a `request_timeout` configuration option to prevent connection + requests from hanging [[GH-59](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/59)] +* storage/postgresql: Add support for setting `connection_url` from enviornment + variable `VAULT_PG_CONNECTION_URL` [[GH-7937](https://github.com/hashicorp/vault/pull/7937)] +* telemetry: Add `enable_hostname_label` option to telemetry stanza [[GH-7902](https://github.com/hashicorp/vault/pull/7902)] +* telemetry: Add accept header check for prometheus mime type [[GH-7958](https://github.com/hashicorp/vault/pull/7958)] + +BUG FIXES: + +* agent: Fix issue where Agent exits before all templates are rendered when + using and `exit_after_auth` [[GH-7899](https://github.com/hashicorp/vault/pull/7899)] +* auth/aws: Fixes region-related issues when using a custom `sts_endpoint` by adding + a `sts_region` parameter [[GH-7922](https://github.com/hashicorp/vault/pull/7922)] +* auth/token: Fix panic when getting batch tokens on a performance standby from a role + that does not exist [[GH-8027](https://github.com/hashicorp/vault/pull/8027)] +* core: Improve warning message for lease TTLs [[GH-7901](https://github.com/hashicorp/vault/pull/7901)] +* identity: Fix identity token panic during invalidation [[GH-8043](https://github.com/hashicorp/vault/pull/8043)] +* plugin: Fix a panic that could occur if a mount/auth entry was unable to + mount the plugin backend and a request that required the system view to be + retrieved was made [[GH-7991](https://github.com/hashicorp/vault/pull/7991)] +* replication: Add `generate-public-key` endpoint to list of allowed endpoints + for existing DR secondaries +* secrets/gcp: Fix panic if bindings aren't provided in roleset create/update. [[GH-56](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/56)] +* secrets/pki: Prevent generating certificate on performance standby when storing + [[GH-7904](https://github.com/hashicorp/vault/pull/7904)] +* secrets/transit: Prevent restoring keys to new names that are sub paths [[GH-7998](https://github.com/hashicorp/vault/pull/7998)] +* storage/s3: Fix a bug in configurable S3 paths that was preventing use of S3 as + a source during `operator migrate` operations [[GH-7966](https://github.com/hashicorp/vault/pull/7966)] +* ui: Ensure secrets with a period in their key can be viewed and copied [[GH-7926](https://github.com/hashicorp/vault/pull/7926)] +* ui: Fix status menu after demotion [[GH-7997](https://github.com/hashicorp/vault/pull/7997)] +* ui: Fix select dropdowns in Safari when running Mojave [[GH-8023](https://github.com/hashicorp/vault/pull/8023)] + +## 1.3 (November 14th, 2019) + +CHANGES: + +* Secondary cluster activation: There has been a change to the way that activating + performance and DR secondary clusters works when using public keys for + encryption of the parameters rather than a wrapping token. This flow was + experimental and never documented. It is now officially supported and + documented but is not backwards compatible with older Vault releases. +* Cluster cipher suites: On its cluster port, Vault will no longer advertise + the full TLS 1.2 cipher suite list by default. Although this port is only + used for Vault-to-Vault communication and would always pick a strong cipher, + it could cause false flags on port scanners and other security utilities + that assumed insecure ciphers were being used. The previous behavior can be + achieved by setting the value of the (undocumented) `cluster_cipher_suites` + config flag to `tls12`. +* API/Agent Renewal behavior: The API now allows multiple options for how it + deals with renewals. The legacy behavior in the Agent/API is for the renewer + (now called the lifetime watcher) to exit on a renew error, leading to a + reauthentication. The new default behavior is for the lifetime watcher to + ignore 5XX errors and simply retry as scheduled, using the existing lease + duration. It is also possible, within custom code, to disable renewals + entirely, which allows the lifetime watcher to simply return when it + believes it is time for your code to renew or reauthenticate. + +FEATURES: + +* **Vault Debug**: A new top-level subcommand, `debug`, is added that allows + operators to retrieve debugging information related to a particular Vault + node. Operators can use this simple workflow to capture triaging information, + which can then be consumed programmatically or by support and engineering teams. + It has the abilitity to probe for config, host, metrics, pprof, server status, + and replication status. +* **Recovery Mode**: Vault server can be brought up in recovery mode to resolve + outages caused due to data store being in bad state. This is a privileged mode + that allows `sys/raw` API calls to perform surgical corrections to the data + store. Bad storage state can be caused by bugs. However, this is usually + observed when known (and fixed) bugs are hit by older versions of Vault. +* **Entropy Augmentation (Enterprise)**: Vault now supports sourcing entropy from + external source for critical security parameters. Currently an HSM that + supports PKCS#11 is the only supported source. +* **Active Directory Secret Check-In/Check-Out**: In the Active Directory secrets + engine, users or applications can check out a service account for use, and its + password will be rotated when it's checked back in. +* **Vault Agent Template**: Vault Agent now supports rendering templates containing + Vault secrets to disk, similar to Consul Template [[GH-7652](https://github.com/hashicorp/vault/pull/7652)] +* **Transit Key Type Support**: Signing and verification is now supported with the P-384 + (secp384r1) and P-521 (secp521r1) ECDSA curves [[GH-7551](https://github.com/hashicorp/vault/pull/7551)] and encryption and + decryption is now supported via AES128-GCM96 [[GH-7555](https://github.com/hashicorp/vault/pull/7555)] +* **SSRF Protection for Vault Agent**: Vault Agent has a configuration option to + require a specific header before allowing requests [[GH-7627](https://github.com/hashicorp/vault/pull/7627)] +* **AWS Auth Method Root Rotation**: The credential used by the AWS auth method can + now be rotated, to ensure that only Vault knows the credentials it is using [[GH-7131](https://github.com/hashicorp/vault/pull/7131)] +* **New UI Features**: The UI now supports managing users and groups for the + Userpass, Cert, Okta, and Radius auth methods. +* **Shamir with Stored Master Key**: The on disk format for Shamir seals has changed, + allowing for a secondary cluster using Shamir downstream from a primary cluster + using Auto Unseal. [[GH-7694](https://github.com/hashicorp/vault/pull/7694)] +* **Stackdriver Metrics Sink**: Vault can now send metrics to + [Stackdriver](https://cloud.google.com/stackdriver/). See the [configuration + documentation](https://www.vaultproject.io/docs/config/index.html) for + details. [[GH-6957](https://github.com/hashicorp/vault/pull/6957)] +* **Filtered Paths Replication (Enterprise)**: Based on the predecessor Filtered Mount Replication, + Filtered Paths Replication allows now filtering of namespaces in addition to mounts. + With this feature, Filtered Mount Replication should be considered deprecated. +* **Token Renewal via Accessor**: Tokens can now be renewed via the accessor value through + the new `auth/token/renew-accessor` endpoint if the caller's token has + permission to access that endpoint. +* **Improved Integrated Storage (Beta)**: Improved raft write performance, added support for + non-voter nodes, along with UI support for: using raft storage, joining a raft cluster, + and downloading and restoring a snapshot. + +IMPROVEMENTS: + +* agent: Add ability to set the TLS SNI name used by Agent [[GH-7519](https://github.com/hashicorp/vault/pull/7519)] +* agent & api: Change default renewer behavior to ignore 5XX errors [[GH-7733](https://github.com/hashicorp/vault/pull/7733)] +* auth/jwt: The redirect callback host may now be specified for CLI logins + [[GH-71](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/71)] +* auth/jwt: Bound claims may now contain boolean values [[GH-73](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/73)] +* auth/jwt: CLI logins can now open the browser when running in WSL [[GH-77](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/77)] +* core: Exit ScanView if context has been cancelled [[GH-7419](https://github.com/hashicorp/vault/pull/7419)] +* core: re-encrypt barrier and recovery keys if the unseal key is updated + [[GH-7493](https://github.com/hashicorp/vault/pull/7493)] +* core: Don't advertise the full set of TLS 1.2 cipher suites on the cluster + port, even though only strong ciphers were used [[GH-7487](https://github.com/hashicorp/vault/pull/7487)] +* core (enterprise): Add background seal re-wrap +* core/metrics: Add config parameter to allow unauthenticated sys/metrics + access. [[GH-7550](https://github.com/hashicorp/vault/pull/7550)] +* metrics: Upgrade DataDog library to improve performance [[GH-7794](https://github.com/hashicorp/vault/pull/7794)] +* replication (enterprise): Write-Ahead-Log entries will not duplicate the + data belonging to the encompassing physical entries of the transaction, + thereby improving the performance and storage capacity. +* replication (enterprise): Added more replication metrics +* replication (enterprise): Reindex process now compares subpages for a more + accurate indexing process. +* replication (enterprise): Reindex API now accepts a new `skip_flush` + parameter indicating all the changes should not be flushed while the tree is + locked. +* secrets/aws: The root config can now be read [[GH-7245](https://github.com/hashicorp/vault/pull/7245)] +* secrets/aws: Role paths may now contain the '@' character [[GH-7553](https://github.com/hashicorp/vault/pull/7553)] +* secrets/database/cassandra: Add ability to skip verfication of connection + [[GH-7614](https://github.com/hashicorp/vault/pull/7614)] +* secrets/gcp: Fix panic during rollback if the roleset has been deleted + [[GH-52](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/52)] +* storage/azure: Add config parameter to Azure storage backend to allow + specifying the ARM endpoint [[GH-7567](https://github.com/hashicorp/vault/pull/7567)] +* storage/cassandra: Improve storage efficiency by eliminating unnecessary + copies of value data [[GH-7199](https://github.com/hashicorp/vault/pull/7199)] +* storage/raft: Improve raft write performance by utilizing FSM Batching + [[GH-7527](https://github.com/hashicorp/vault/pull/7527)] +* storage/raft: Add support for non-voter nodes [[GH-7634](https://github.com/hashicorp/vault/pull/7634)] +* sys: Add a new `sys/host-info` endpoint for querying information about + the host [[GH-7330](https://github.com/hashicorp/vault/pull/7330)] +* sys: Add a new set of endpoints under `sys/pprof/` that allows profiling + information to be extracted [[GH-7473](https://github.com/hashicorp/vault/pull/7473)] +* sys: Add endpoint that counts the total number of active identity entities + [[GH-7541](https://github.com/hashicorp/vault/pull/7541)] +* sys: `sys/seal-status` now has a `storage_type` field denoting what type of + storage + the cluster is configured to use +* sys: Add a new `sys/internal/counters/tokens` endpoint, that counts the + total number of active service token accessors in the shared token storage. + [[GH-7541](https://github.com/hashicorp/vault/pull/7541)] +* sys/config: Add a new endpoint under `sys/config/state/sanitized` that + returns the configuration state of the server. It excludes config values + from `storage`, `ha_storage`, and `seal` stanzas and some values + from `telemetry` due to potential sensitive entries in those fields. +* ui: when using raft storage, you can now join a raft cluster, download a + snapshot, and restore a snapshot from the UI [[GH-7410](https://github.com/hashicorp/vault/pull/7410)] +* ui: clarify when secret version is deleted in the secret version history + dropdown [[GH-7714](https://github.com/hashicorp/vault/pull/7714)] + +BUG FIXES: + +* agent: Fix a data race on the token value for inmemsink [[GH-7707](https://github.com/hashicorp/vault/pull/7707)] +* api: Fix Go API using lease revocation via URL instead of body [[GH-7777](https://github.com/hashicorp/vault/pull/7777)] +* api: Allow setting a function to control retry behavior [[GH-7331](https://github.com/hashicorp/vault/pull/7331)] +* auth/gcp: Fix a bug where region information in instance groups names could + cause an authorization attempt to fail [[GH-74](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/74)] +* cli: Fix a bug where a token of an unknown format (e.g. in ~/.vault-token) + could cause confusing error messages during `vault login` [[GH-7508](https://github.com/hashicorp/vault/pull/7508)] +* cli: Fix a bug where the `namespace list` command with JSON formatting + always returned an empty object [[GH-7705](https://github.com/hashicorp/vault/pull/7705)] +* cli: Command timeouts are now always specified solely by the + `VAULT_CLIENT_TIMEOUT` value. [[GH-7469](https://github.com/hashicorp/vault/pull/7469)] +* core: Don't allow registering a non-root zero TTL token lease. This is purely + defense in depth as the lease would be revoked immediately anyways, but + there's no real reason to allow registration. [[GH-7524](https://github.com/hashicorp/vault/pull/7524)] +* core: Correctly revoke the token that's present in the response auth from a + auth/token/ request if there's partial failure during the process. [[GH-7835](https://github.com/hashicorp/vault/pull/7835)] +* identity (enterprise): Fixed identity case sensitive loading in secondary + cluster [[GH-7327](https://github.com/hashicorp/vault/pull/7327)] +* identity: Ensure only replication primary stores the identity case sensitivity state [[GH-7820](https://github.com/hashicorp/vault/pull/7820)] +* raft: Fixed VAULT_CLUSTER_ADDR env being ignored at startup [[GH-7619](https://github.com/hashicorp/vault/pull/7619)] +* secrets/pki: Don't allow duplicate SAN names in issued certs [[GH-7605](https://github.com/hashicorp/vault/pull/7605)] +* sys/health: Pay attention to the values provided for `standbyok` and + `perfstandbyok` rather than simply using their presence as a key to flip on + that behavior [[GH-7323](https://github.com/hashicorp/vault/pull/7323)] +* ui: using the `wrapped_token` query param will work with `redirect_to` and + will automatically log in as intended [[GH-7398](https://github.com/hashicorp/vault/pull/7398)] +* ui: fix an error when initializing from the UI using PGP keys [[GH-7542](https://github.com/hashicorp/vault/pull/7542)] +* ui: show all active kv v2 secret versions even when `delete_version_after` is configured [[GH-7685](https://github.com/hashicorp/vault/pull/7685)] +* ui: Ensure that items in the top navigation link to pages that users have access to [[GH-7590](https://github.com/hashicorp/vault/pull/7590)] + +## 1.2.7 + +### August 27th, 2020 + +NOTE: + +All security content from 1.5.2, 1.5.1, 1.4.5, 1.4.4, 1.3.9, 1.3.8, 1.2.6, and 1.2.5 has been made fully open source, and the git tags for 1.5.3, 1.4.6, 1.3.10, and 1.2.7 will build correctly for open source users. + +BUG FIXES: + +* auth/aws: Made header handling for IAM authentication more robust + +## 1.2.6.1 + +### August 21st, 2020 + +### Enterprise Only + +NOTE: + +Includes correct license in the HSM binary. + +## 1.2.6 + +### August 20th, 2020 + +NOTE: + +OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. + +KNOWN ISSUES: + +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.2.6 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.2.6) +* In versions 1.2.6, 1.3.9, 1.4.5, and 1.5.2, enterprise licenses on the HSM build were not incorporated correctly - enterprise + customers should use 1.2.6.1, 1.3.9.1, 1.4.5.1, and 1.5.2.1. + +## 1.2.5 + +### August 20th, 2020 + +SECURITY: + +* When using the IAM AWS Auth Method, under certain circumstances, values Vault uses to validate identities and roles can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.7.1 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16250) (Discovered by Felix Wilhelm of Google Project Zero) +* When using the GCP GCE Auth Method, under certain circumstances, values Vault uses to validate GCE VMs can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.8.3 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16251) (Discovered by Felix Wilhelm of Google Project Zero) + +KNOWN ISSUES: + +* OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.2.5 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.2.5) + +BUG FIXES: + +* seal: (enterprise) Fix issue causing stored seal and recovery keys to be mistaken as sealwrapped values + +## 1.2.4 (November 7th, 2019) + +SECURITY: + +* In a non-root namespace, revocation of a token scoped to a non-root + namespace did not trigger the expected revocation of dynamic secret leases + associated with that token. As a result, dynamic secret leases in non-root + namespaces may outlive the token that created them. This vulnerability, + CVE-2019-18616, affects Vault Enterprise 0.11.0 and newer. +* Disaster Recovery secondary clusters did not delete already-replicated data + after a mount filter has been created on an upstream Performance secondary + cluster. As a result, encrypted secrets may remain replicated on a Disaster + Recovery secondary cluster after application of a mount filter excluding + those secrets from replication. This vulnerability, CVE-2019-18617, affects + Vault Enterprise 0.8 and newer. +* Update version of Go to 1.12.12 to fix Go bug golang.org/issue/34960 which + corresponds to CVE-2019-17596. + +CHANGES: + +* auth/aws: If a custom `sts_endpoint` is configured, Vault Agent and the CLI + should provide the corresponding region via the `region` parameter (which + already existed as a CLI parameter, and has now been added to Agent). The + automatic region detection added to the CLI and Agent in 1.2 has been removed. + +IMPROVEMENTS: + +* cli: Ignore existing token during CLI login [[GH-7508](https://github.com/hashicorp/vault/pull/7508)] +* core: Log proxy settings from environment on startup [[GH-7528](https://github.com/hashicorp/vault/pull/7528)] +* core: Cache whether we've been initialized to reduce load on storage [[GH-7549](https://github.com/hashicorp/vault/pull/7549)] + +BUG FIXES: + +* agent: Fix handling of gzipped responses [[GH-7470](https://github.com/hashicorp/vault/pull/7470)] +* cli: Fix panic when pgp keys list is empty [[GH-7546](https://github.com/hashicorp/vault/pull/7546)] +* cli: Command timeouts are now always specified solely by the + `VAULT_CLIENT_TIMEOUT` value. [[GH-7469](https://github.com/hashicorp/vault/pull/7469)] +* core: add hook for initializing seals for migration [[GH-7666](https://github.com/hashicorp/vault/pull/7666)] +* core (enterprise): Migrating from one auto unseal method to another never + worked on enterprise, now it does. +* identity: Add required field `response_types_supported` to identity token + `.well-known/openid-configuration` response [[GH-7533](https://github.com/hashicorp/vault/pull/7533)] +* identity: Fixed nil pointer panic when merging entities [[GH-7712](https://github.com/hashicorp/vault/pull/7712)] +* replication (Enterprise): Fix issue causing performance standbys nodes + disconnecting when under high loads. +* secrets/azure: Fix panic that could occur if client retries timeout [[GH-7793](https://github.com/hashicorp/vault/pull/7793)] +* secrets/database: Fix bug in combined DB secrets engine that can result in + writes to static-roles endpoints timing out [[GH-7518](https://github.com/hashicorp/vault/pull/7518)] +* secrets/pki: Improve tidy to continue when value is nil [[GH-7589](https://github.com/hashicorp/vault/pull/7589)] +* ui (Enterprise): Allow kv v2 secrets that are gated by Control Groups to be + viewed in the UI [[GH-7504](https://github.com/hashicorp/vault/pull/7504)] + +## 1.2.3 (September 12, 2019) + +FEATURES: + +* **Oracle Cloud (OCI) Integration**: Vault now support using Oracle Cloud for + storage, auto unseal, and authentication. + +IMPROVEMENTS: + +* auth/jwt: Groups claim matching now treats a string response as a single + element list [[GH-63](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/63)] +* auth/kubernetes: enable better support for projected tokens API by allowing + user to specify issuer [[GH-65](https://github.com/hashicorp/vault/pull/65)] +* auth/pcf: The PCF auth plugin was renamed to the CF auth plugin, maintaining + full backwards compatibility [[GH-7346](https://github.com/hashicorp/vault/pull/7346)] +* replication: Premium packages now come with unlimited performance standby + nodes + +BUG FIXES: + +* agent: Allow batch tokens and other non-renewable tokens to be used for + agent operations [[GH-7441](https://github.com/hashicorp/vault/pull/7441)] +* auth/jwt: Fix an error where newer (v1.2) token_* configuration parameters + were not being applied to tokens generated using the OIDC login flow + [[GH-67](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/67)] +* raft: Fix an incorrect JSON tag on `leader_ca_cert` in the join request [[GH-7393](https://github.com/hashicorp/vault/pull/7393)] +* seal/transit: Allow using Vault Agent for transit seal operations [[GH-7441](https://github.com/hashicorp/vault/pull/7441)] +* storage/couchdb: Fix a file descriptor leak [[GH-7345](https://github.com/hashicorp/vault/pull/7345)] +* ui: Fix a bug where the status menu would disappear when trying to revoke a + token [[GH-7337](https://github.com/hashicorp/vault/pull/7337)] +* ui: Fix a regression that prevented input of custom items in search-select + [[GH-7338](https://github.com/hashicorp/vault/pull/7338)] +* ui: Fix an issue with the namespace picker being unable to render nested + namespaces named with numbers and sorting of namespaces in the picker + [[GH-7333](https://github.com/hashicorp/vault/pull/7333)] + +## 1.2.2 (August 15, 2019) + +CHANGES: + +* auth/pcf: The signature format has been updated to use the standard Base64 + encoding instead of the URL-safe variant. Signatures created using the + previous format will continue to be accepted [PCF-27] +* core: The http response code returned when an identity token key is not found + has been changed from 400 to 404 + +IMPROVEMENTS: + +* identity: Remove 512 entity limit for groups [[GH-7317](https://github.com/hashicorp/vault/pull/7317)] + +BUG FIXES: + +* auth/approle: Fix an error where an empty `token_type` string was not being + correctly handled as `TokenTypeDefault` [[GH-7273](https://github.com/hashicorp/vault/pull/7273)] +* auth/radius: Fix panic when logging in [[GH-7286](https://github.com/hashicorp/vault/pull/7286)] +* ui: the string-list widget will now honor multiline input [[GH-7254](https://github.com/hashicorp/vault/pull/7254)] +* ui: various visual bugs in the KV interface were addressed [[GH-7307](https://github.com/hashicorp/vault/pull/7307)] +* ui: fixed incorrect URL to access help in LDAP auth [[GH-7299](https://github.com/hashicorp/vault/pull/7299)] + +## 1.2.1 (August 6th, 2019) + +BUG FIXES: + +* agent: Fix a panic on creds pulling in some error conditions in `aws` and + `alicloud` auth methods [[GH-7238](https://github.com/hashicorp/vault/pull/7238)] +* auth/approle: Fix error reading role-id on a role created pre-1.2 [[GH-7231](https://github.com/hashicorp/vault/pull/7231)] +* auth/token: Fix sudo check in non-root namespaces on create [[GH-7224](https://github.com/hashicorp/vault/pull/7224)] +* core: Fix health checks with perfstandbyok=true returning the wrong status + code [[GH-7240](https://github.com/hashicorp/vault/pull/7240)] +* ui: The web CLI will now parse input as a shell string, with special + characters escaped [[GH-7206](https://github.com/hashicorp/vault/pull/7206)] +* ui: The UI will now redirect to a page after authentication [[GH-7088](https://github.com/hashicorp/vault/pull/7088)] +* ui (Enterprise): The list of namespaces is now cleared when logging + out [[GH-7186](https://github.com/hashicorp/vault/pull/7186)] + +## 1.2.0 (July 30th, 2019) + +CHANGES: + +* Token store roles use new, common token fields for the values + that overlap with other auth backends. `period`, `explicit_max_ttl`, and + `bound_cidrs` will continue to work, with priority being given to the + `token_` prefixed versions of those parameters. They will also be returned + when doing a read on the role if they were used to provide values initially; + however, in Vault 1.4 if `period` or `explicit_max_ttl` is zero they will no + longer be returned. (`explicit_max_ttl` was already not returned if empty.) +* Due to underlying changes in Go version 1.12 and Go > 1.11.5, Vault is now + stricter about what characters it will accept in path names. Whereas before + it would filter out unprintable characters (and this could be turned off), + control characters and other invalid characters are now rejected within Go's + HTTP library before the request is passed to Vault, and this cannot be + disabled. To continue using these (e.g. for already-written paths), they + must be properly percent-encoded (e.g. `\r` becomes `%0D`, `\x00` becomes + `%00`, and so on). +* The user-configured regions on the AWSKMS seal stanza will now be preferred + over regions set in the enclosing environment. This is a _breaking_ change. +* All values in audit logs now are omitted if they are empty. This helps + reduce the size of audit log entries by not reproducing keys in each entry + that commonly don't contain any value, which can help in cases where audit + log entries are above the maximum UDP packet size and others. +* Both PeriodicFunc and WALRollback functions will be called if both are + provided. Previously WALRollback would only be called if PeriodicFunc was + not set. See [[GH-6717](https://github.com/hashicorp/vault/pull/6717)] for + details. +* Vault now uses Go's official dependency management system, Go Modules, to + manage dependencies. As a result to both reduce transitive dependencies for + API library users and plugin authors, and to work around various conflicts, + we have moved various helpers around, mostly under an `sdk/` submodule. A + couple of functions have also moved from plugin helper code to the `api/` + submodule. If you are a plugin author, take a look at some of our official + plugins and the paths they are importing for guidance. +* AppRole uses new, common token fields for values that overlap + with other auth backends. `period` and `policies` will continue to work, + with priority being given to the `token_` prefixed versions of those + parameters. They will also be returned when doing a read on the role if they + were used to provide values initially. +* In AppRole, `"default"` is no longer automatically added to the `policies` + parameter. This was a no-op since it would always be added anyways by + Vault's core; however, this can now be explicitly disabled with the new + `token_no_default_policy` field. +* In AppRole, `bound_cidr_list` is no longer returned when reading a role +* rollback: Rollback will no longer display log messages when it runs; it will + only display messages on error. +* Database plugins will now default to 4 `max_open_connections` + rather than 2. + +FEATURES: + +* **Integrated Storage**: Vault 1.2 includes a _tech preview_ of a new way to + manage storage directly within a Vault cluster. This new integrated storage + solution is based on the Raft protocol which is also used to back HashiCorp + Consul and HashiCorp Nomad. +* **Combined DB credential rotation**: Alternative mode for the Combined DB + Secret Engine to automatically rotate existing database account credentials + and set Vault as the source of truth for credentials. +* **Identity Tokens**: Vault's Identity system can now generate OIDC-compliant + ID tokens. These customizable tokens allow encapsulating a signed, verifiable + snapshot of identity information and metadata. They can be use by other + applications—even those without Vault authorization—as a way of establishing + identity based on a Vault entity. +* **Pivotal Cloud Foundry plugin**: New auth method using Pivotal Cloud + Foundry certificates for Vault authentication. +* **ElasticSearch database plugin**: New ElasticSearch database plugin issues + unique, short-lived ElasticSearch credentials. +* **New UI Features**: An HTTP Request Volume Page and new UI for editing LDAP + Users and Groups have been added. +* **HA support for Postgres**: PostgreSQL versions >= 9.5 may now but used as + and HA storage backend. +* **KMIP secrets engine (Enterprise)**: Allows Vault to operate as a KMIP + Server, seamlessly brokering cryptographic operations for traditional + infrastructure. +* Common Token Fields: Auth methods now use common fields for controlling + token behavior, making it easier to understand configuration across methods. +* **Vault API explorer**: The Vault UI now includes an embedded API explorer + where you can browse the endpoints avaliable to you and make requests. To try + it out, open the Web CLI and type `api`. + +IMPROVEMENTS: + +* agent: Allow EC2 nonce to be passed in [[GH-6953](https://github.com/hashicorp/vault/pull/6953)] +* agent: Add optional `namespace` parameter, which sets the default namespace + for the auto-auth functionality [[GH-6988](https://github.com/hashicorp/vault/pull/6988)] +* agent: Add cert auto-auth method [[GH-6652](https://github.com/hashicorp/vault/pull/6652)] +* api: Add support for passing data to delete operations via `DeleteWithData` + [[GH-7139](https://github.com/hashicorp/vault/pull/7139)] +* audit/file: Dramatically speed up file operations by changing + locking/marshaling order [[GH-7024](https://github.com/hashicorp/vault/pull/7024)] +* auth/jwt: A JWKS endpoint may now be configured for signature verification [[GH-43](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/43)] +* auth/jwt: A new `verbose_oidc_logging` role parameter has been added to help + troubleshoot OIDC configuration [[GH-57](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/57)] +* auth/jwt: `bound_claims` will now match received claims that are lists if any element + of the list is one of the expected values [[GH-50](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/50)] +* auth/jwt: Leeways for `nbf` and `exp` are now configurable, as is clock skew + leeway [[GH-53](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/53)] +* auth/kubernetes: Allow service names/namespaces to be configured as globs + [[GH-58](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/58)] +* auth/token: Allow the support of the identity system for the token backend + via token roles [[GH-6267](https://github.com/hashicorp/vault/pull/6267)] +* auth/token: Add a large set of token configuration options to token store + roles [[GH-6662](https://github.com/hashicorp/vault/pull/6662)] +* cli: `path-help` now allows `-format=json` to be specified, which will + output OpenAPI [[GH-7006](https://github.com/hashicorp/vault/pull/7006)] +* cli: Add support for passing parameters to `vault delete` operations + [[GH-7139](https://github.com/hashicorp/vault/pull/7139)] +* cli: Add a log-format CLI flag that can specify either "standard" or "json" + for the log format for the `vault server`command. [[GH-6840](https://github.com/hashicorp/vault/pull/6840)] +* cli: Add `-dev-no-store-token` to allow dev servers to not store the + generated token at the tokenhelper location [[GH-7104](https://github.com/hashicorp/vault/pull/7104)] +* identity: Allow a group alias' canonical ID to be modified +* namespaces: Namespaces can now be created and deleted from performance + replication secondaries +* plugins: Change the default for `max_open_connections` for DB plugins to 4 + [[GH-7093](https://github.com/hashicorp/vault/pull/7093)] +* replication: Client TLS authentication is now supported when enabling or + updating a replication secondary +* secrets/database: Cassandra operations will now cancel on client timeout + [[GH-6954](https://github.com/hashicorp/vault/pull/6954)] +* secrets/kv: Add optional `delete_version_after` parameter, which takes a + duration and can be set on the mount and/or the metadata for a specific key + [[GH-7005](https://github.com/hashicorp/vault/pull/7005)] +* storage/postgres: LIST now performs better on large datasets [[GH-6546](https://github.com/hashicorp/vault/pull/6546)] +* storage/s3: A new `path` parameter allows selecting the path within a bucket + for Vault data [[GH-7157](https://github.com/hashicorp/vault/pull/7157)] +* ui: KV v1 and v2 will now gracefully degrade allowing a write without read + workflow in the UI [[GH-6570](https://github.com/hashicorp/vault/pull/6570)] +* ui: Many visual improvements with the addition of Toolbars [[GH-6626](https://github.com/hashicorp/vault/pull/6626)], the restyling + of the Confirm Action component [[GH-6741](https://github.com/hashicorp/vault/pull/6741)], and using a new set of glyphs for our + Icon component [[GH-6736](https://github.com/hashicorp/vault/pull/6736)] +* ui: Lazy loading parts of the application so that the total initial payload is + smaller [[GH-6718](https://github.com/hashicorp/vault/pull/6718)] +* ui: Tabbing to auto-complete in filters will first complete a common prefix if there + is one [[GH-6759](https://github.com/hashicorp/vault/pull/6759)] +* ui: Removing jQuery from the application makes the initial JS payload smaller [[GH-6768](https://github.com/hashicorp/vault/pull/6768)] + +BUG FIXES: + +* audit: Log requests and responses due to invalid wrapping token provided + [[GH-6541](https://github.com/hashicorp/vault/pull/6541)] +* audit: Fix bug preventing request counter queries from working with auditing + enabled [[GH-6767](https://github.com/hashicorp/vault/pull/6767) +* auth/aws: AWS Roles are now upgraded and saved to the latest version just + after the AWS credential plugin is mounted. [[GH-7025](https://github.com/hashicorp/vault/pull/7025)] +* auth/aws: Fix a case where a panic could stem from a malformed assumed-role ARN + when parsing this value [[GH-6917](https://github.com/hashicorp/vault/pull/6917)] +* auth/aws: Fix an error complaining about a read-only view that could occur + during updating of a role when on a performance replication secondary + [[GH-6926](https://github.com/hashicorp/vault/pull/6926)] +* auth/jwt: Fix a regression introduced in 1.1.1 that disabled checking of client_id + for OIDC logins [[GH-54](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/54)] +* auth/jwt: Fix a panic during OIDC CLI logins that could occur if the Vault server + response is empty [[GH-55](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/55)] +* auth/jwt: Fix issue where OIDC logins might intermittently fail when using + performance standbys [[GH-61](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/61)] +* identity: Fix a case where modifying aliases of an entity could end up + moving the entity into the wrong namespace +* namespaces: Fix a behavior (currently only known to be benign) where we + wouldn't delete policies through the official functions before wiping the + namespaces on deletion +* secrets/database: Escape username/password before using in connection URL + [[GH-7089](https://github.com/hashicorp/vault/pull/7089)] +* secrets/pki: Forward revocation requests to active node when on a + performance standby [[GH-7173](https://github.com/hashicorp/vault/pull/7173)] +* ui: Fix timestamp on some transit keys [[GH-6827](https://github.com/hashicorp/vault/pull/6827)] +* ui: Show Entities and Groups in Side Navigation [[GH-7138](https://github.com/hashicorp/vault/pull/7138)] +* ui: Ensure dropdown updates selected item on HTTP Request Metrics page + +## 1.1.4/1.1.5 (July 25th/30th, 2019) + +NOTE: + +Although 1.1.4 was tagged, we realized very soon after the tag was publicly +pushed that an intended fix was accidentally left out. As a result, 1.1.4 was +not officially announced and 1.1.5 should be used as the release after 1.1.3. + +IMPROVEMENTS: + +* identity: Allow a group alias' canonical ID to be modified +* namespaces: Improve namespace deletion performance [[GH-6939](https://github.com/hashicorp/vault/pull/6939)] +* namespaces: Namespaces can now be created and deleted from performance + replication secondaries + +BUG FIXES: + +* api: Add backwards compat support for API env vars [[GH-7135](https://github.com/hashicorp/vault/pull/7135)] +* auth/aws: Fix a case where a panic could stem from a malformed assumed-role + ARN when parsing this value [[GH-6917](https://github.com/hashicorp/vault/pull/6917)] +* auth/ldap: Add `use_pre111_group_cn_behavior` flag to allow recovering from + a regression caused by a bug fix starting in 1.1.1 [[GH-7208](https://github.com/hashicorp/vault/pull/7208)] +* auth/aws: Use a role cache to avoid separate locking paths [[GH-6926](https://github.com/hashicorp/vault/pull/6926)] +* core: Fix a deadlock if a panic happens during request handling [[GH-6920](https://github.com/hashicorp/vault/pull/6920)] +* core: Fix an issue that may cause key upgrades to not be cleaned up properly + [[GH-6949](https://github.com/hashicorp/vault/pull/6949)] +* core: Don't shutdown if key upgrades fail due to canceled context [[GH-7070](https://github.com/hashicorp/vault/pull/7070)] +* core: Fix panic caused by handling requests while vault is inactive +* identity: Fix reading entity and groups that have spaces in their names + [[GH-7055](https://github.com/hashicorp/vault/pull/7055)] +* identity: Ensure entity alias operations properly verify namespace [[GH-6886](https://github.com/hashicorp/vault/pull/6886)] +* mfa: Fix a nil pointer panic that could occur if invalid Duo credentials + were supplied +* replication: Forward step-down on perf standbys to match HA behavior +* replication: Fix various read only storage errors on performance standbys +* replication: Stop forwarding before stopping replication to eliminate some + possible bad states +* secrets/database: Allow cassandra queries to be cancled [[GH-6954](https://github.com/hashicorp/vault/pull/6954)] +* storage/consul: Fix a regression causing vault to not connect to consul over + unix sockets [[GH-6859](https://github.com/hashicorp/vault/pull/6859)] +* ui: Fix saving of TTL and string array fields generated by Open API [[GH-7094](https://github.com/hashicorp/vault/pull/7094)] + +## 1.1.3 (June 5th, 2019) + +IMPROVEMENTS: + +* agent: Now supports proxying request query parameters [[GH-6772](https://github.com/hashicorp/vault/pull/6772)] +* core: Mount table output now includes a UUID indicating the storage path [[GH-6633](https://github.com/hashicorp/vault/pull/6633)] +* core: HTTP server timeout values are now configurable [[GH-6666](https://github.com/hashicorp/vault/pull/6666)] +* replication: Improve performance of the reindex operation on secondary clusters + when mount filters are in use +* replication: Replication status API now returns the state and progress of a reindex + +BUG FIXES: + +* api: Return the Entity ID in the secret output [[GH-6819](https://github.com/hashicorp/vault/pull/6819)] +* auth/jwt: Consider bound claims when considering if there is at least one + bound constraint [[GH-49](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/49)] +* auth/okta: Fix handling of group names containing slashes [[GH-6665](https://github.com/hashicorp/vault/pull/6665)] +* cli: Add deprecated stored-shares flag back to the init command [[GH-6677](https://github.com/hashicorp/vault/pull/6677)] +* cli: Fix a panic when the KV command would return no data [[GH-6675](https://github.com/hashicorp/vault/pull/6675)] +* cli: Fix issue causing CLI list operations to not return proper format when + there is an empty response [[GH-6776](https://github.com/hashicorp/vault/pull/6776)] +* core: Correctly honor non-HMAC request keys when auditing requests [[GH-6653](https://github.com/hashicorp/vault/pull/6653)] +* core: Fix the `x-vault-unauthenticated` value in OpenAPI for a number of + endpoints [[GH-6654](https://github.com/hashicorp/vault/pull/6654)] +* core: Fix issue where some OpenAPI parameters were incorrectly listed as + being sent as a header [[GH-6679](https://github.com/hashicorp/vault/pull/6679)] +* core: Fix issue that would allow duplicate mount names to be used [[GH-6771](https://github.com/hashicorp/vault/pull/6771)] +* namespaces: Fix behavior when using `root` instead of `root/` as the + namespace header value +* pki: fix a panic when a client submits a null value [[GH-5679](https://github.com/hashicorp/vault/pull/5679)] +* replication: Properly update mount entry cache on a secondary to apply all + new values after a tune +* replication: Properly close connection on bootstrap error +* replication: Fix an issue causing startup problems if a namespace policy + wasn't replicated properly +* replication: Fix longer than necessary WAL replay during an initial reindex +* replication: Fix error during mount filter invalidation on DR secondary clusters +* secrets/ad: Make time buffer configurable [AD-35] +* secrets/gcp: Check for nil config when getting credentials [[GH-35](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/35)] +* secrets/gcp: Fix error checking in some cases where the returned value could + be 403 instead of 404 [[GH-37](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/37)] +* secrets/gcpkms: Disable key rotation when deleting a key [[GH-10](https://github.com/hashicorp/vault-plugin-secrets-gcpkms/pull/10)] +* storage/consul: recognize `https://` address even if schema not specified + [[GH-6602](https://github.com/hashicorp/vault/pull/6602)] +* storage/dynamodb: Fix an issue where a deleted lock key in DynamoDB (HA) + could cause constant switching of the active node [[GH-6637](https://github.com/hashicorp/vault/pull/6637)] +* storage/dynamodb: Eliminate a high-CPU condition that could occur if an + error was received from the DynamoDB API [[GH-6640](https://github.com/hashicorp/vault/pull/6640)] +* storage/gcs: Correctly use configured chunk size values [[GH-6655](https://github.com/hashicorp/vault/pull/6655)] +* storage/mssql: Use the correct database when pre-created schemas exist + [[GH-6356](https://github.com/hashicorp/vault/pull/6356)] +* ui: Fix issue with select arrows on drop down menus [[GH-6627](https://github.com/hashicorp/vault/pull/6627)] +* ui: Fix an issue where sensitive input values weren't being saved to the + server [[GH-6586](https://github.com/hashicorp/vault/pull/6586)] +* ui: Fix web cli parsing when using quoted values [[GH-6755](https://github.com/hashicorp/vault/pull/6755)] +* ui: Fix a namespace workflow mapping identities from external namespaces by + allowing arbitrary input in search-select component [[GH-6728](https://github.com/hashicorp/vault/pull/6728)] + +## 1.1.2 (April 18th, 2019) + +This is a bug fix release containing the two items below. It is otherwise +unchanged from 1.1.1. + +BUG FIXES: + +* auth/okta: Fix a potential dropped error [[GH-6592](https://github.com/hashicorp/vault/pull/6592)] +* secrets/kv: Fix a regression on upgrade where a KVv2 mount could fail to be + mounted on unseal if it had previously been mounted but not written to + [[GH-31](https://github.com/hashicorp/vault-plugin-secrets-kv/pull/31)] + +## 1.1.1 (April 11th, 2019) + +SECURITY: + +* Given: (a) performance replication is enabled; (b) performance standbys are + in use on the performance replication secondary cluster; and (c) mount + filters are in use, if a mount that was previously available to a secondary + is updated to be filtered out, although the data would be removed from the + secondary cluster, the in-memory cache of the data would not be purged on + the performance standby nodes. As a result, the previously-available data + could still be read from memory if it was ever read from disk, and if this + included mount configuration data this could result in token or lease + issuance. The issue is fixed in this release; in prior releases either an + active node changeover (such as a step-down) or a restart of the standby + nodes is sufficient to cause the performance standby nodes to clear their + cache. A CVE is in the process of being issued; the number is + CVE-2019-11075. +* Roles in the JWT Auth backend using the OIDC login flow (i.e. role_type of + “oidc”) were not enforcing bound_cidrs restrictions, if any were configured + for the role. This issue did not affect roles of type “jwt”. + +CHANGES: + +* auth/jwt: Disallow logins of role_type "oidc" via the `/login` path [[GH-38](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/38)] +* core/acl: New ordering defines which policy wins when there are multiple + inexact matches and at least one path contains `+`. `+*` is now illegal in + policy paths. The previous behavior simply selected any matching + segment-wildcard path that matched. [[GH-6532](https://github.com/hashicorp/vault/pull/6532)] +* replication: Due to technical limitations, mounting and unmounting was not + previously possible from a performance secondary. These have been resolved, + and these operations may now be run from a performance secondary. + +IMPROVEMENTS: + +* agent: Allow AppRole auto-auth without a secret-id [[GH-6324](https://github.com/hashicorp/vault/pull/6324)] +* auth/gcp: Cache clients to improve performance and reduce open file usage +* auth/jwt: Bounds claims validiation will now allow matching the received + claims against a list of expected values [[GH-41](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/41)] +* secret/gcp: Cache clients to improve performance and reduce open file usage +* replication: Mounting/unmounting/remounting/mount-tuning is now supported + from a performance secondary cluster +* ui: Suport for authentication via the RADIUS auth method [[GH-6488](https://github.com/hashicorp/vault/pull/6488)] +* ui: Navigating away from secret list view will clear any page-specific + filter that was applied [[GH-6511](https://github.com/hashicorp/vault/pull/6511)] +* ui: Improved the display when OIDC auth errors [[GH-6553](https://github.com/hashicorp/vault/pull/6553)] + +BUG FIXES: + +* agent: Allow auto-auth to be used with caching without having to define any + sinks [[GH-6468](https://github.com/hashicorp/vault/pull/6468)] +* agent: Disallow some nonsensical config file combinations [[GH-6471](https://github.com/hashicorp/vault/pull/6471)] +* auth/ldap: Fix CN check not working if CN was not all in uppercase [[GH-6518](https://github.com/hashicorp/vault/pull/6518)] +* auth/jwt: The CLI helper for OIDC logins will now open the browser to the correct + URL when running on Windows [[GH-37](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/37)] +* auth/jwt: Fix OIDC login issue where configured TLS certs weren't being used [[GH-40](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/40)] +* auth/jwt: Fix an issue where the `oidc_scopes` parameter was not being included in + the response to a role read request [[GH-35](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/35)] +* core: Fix seal migration case when migrating to Shamir and a seal block + wasn't explicitly specified [[GH-6455](https://github.com/hashicorp/vault/pull/6455)] +* core: Fix unwrapping when using namespaced wrapping tokens [[GH-6536](https://github.com/hashicorp/vault/pull/6536)] +* core: Fix incorrect representation of required properties in OpenAPI output + [[GH-6490](https://github.com/hashicorp/vault/pull/6490)] +* core: Fix deadlock that could happen when using the UI [[GH-6560](https://github.com/hashicorp/vault/pull/6560)] +* identity: Fix updating groups removing existing members [[GH-6527](https://github.com/hashicorp/vault/pull/6527)] +* identity: Properly invalidate group alias in performance secondary [[GH-6564](https://github.com/hashicorp/vault/pull/6564)] +* identity: Use namespace context when loading entities and groups to ensure + merging of duplicate entries works properly [[GH-6563](https://github.com/hashicorp/vault/pull/6563)] +* replication: Fix performance standby election failure [[GH-6561](https://github.com/hashicorp/vault/pull/6561)] +* replication: Fix mount filter invalidation on performance standby nodes +* replication: Fix license reloading on performance standby nodes +* replication: Fix handling of control groups on performance standby nodes +* replication: Fix some forwarding scenarios with request bodies using + performance standby nodes [[GH-6538](https://github.com/hashicorp/vault/pull/6538)] +* secret/gcp: Fix roleset binding when using JSON [[GH-27](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/27)] +* secret/pki: Use `uri_sans` param in when not using CSR parameters [[GH-6505](https://github.com/hashicorp/vault/pull/6505)] +* storage/dynamodb: Fix a race condition possible in HA configurations that could + leave the cluster without a leader [[GH-6512](https://github.com/hashicorp/vault/pull/6512)] +* ui: Fix an issue where in production builds OpenAPI model generation was + failing, causing any form using it to render labels with missing fields [[GH-6474](https://github.com/hashicorp/vault/pull/6474)] +* ui: Fix issue nav-hiding when moving between namespaces [[GH-6473](https://github.com/hashicorp/vault/pull/6473)] +* ui: Secrets will always show in the nav regardless of access to cubbyhole [[GH-6477](https://github.com/hashicorp/vault/pull/6477)] +* ui: fix SSH OTP generation [[GH-6540](https://github.com/hashicorp/vault/pull/6540)] +* ui: add polyfill to load UI in IE11 [[GH-6567](https://github.com/hashicorp/vault/pull/6567)] +* ui: Fix issue where some elements would fail to work properly if using ACLs + with segment-wildcard paths (`/+/` segments) [[GH-6525](https://github.com/hashicorp/vault/pull/6525)] + +## 1.1.0 (March 18th, 2019) + +CHANGES: + +* auth/jwt: The `groups_claim_delimiter_pattern` field has been removed. If the + groups claim is not at the top level, it can now be specified as a + [JSONPointer](https://tools.ietf.org/html/rfc6901). +* auth/jwt: Roles now have a "role type" parameter with a default type of + "oidc". To configure new JWT roles, a role type of "jwt" must be explicitly + specified. +* cli: CLI commands deprecated in 0.9.2 are now removed. Please see the CLI + help/warning output in previous versions of Vault for updated commands. +* core: Vault no longer automatically mounts a K/V backend at the "secret/" + path when initializing Vault +* core: Vault's cluster port will now be open at all times on HA standby nodes +* plugins: Vault no longer supports running netRPC plugins. These were + deprecated in favor of gRPC based plugins and any plugin built since 0.9.4 + defaults to gRPC. Older plugins may need to be recompiled against the latest + Vault dependencies. + +FEATURES: + +* **Vault Agent Caching**: Vault Agent can now be configured to act as a + caching proxy to Vault. Clients can send requests to Vault Agent and the + request will be proxied to the Vault server and cached locally in Agent. + Currently Agent will cache generated leases and tokens and keep them + renewed. The proxy can also use the Auto Auth feature so clients do not need + to authenticate to Vault, but rather can make requests to Agent and have + Agent fully manage token lifecycle. +* **OIDC Redirect Flow Support**: The JWT auth backend now supports OIDC + roles. These allow authentication via an OIDC-compliant provider via the + user's browser. The login may be initiated from the Vault UI or through + the `vault login` command. +* **ACL Path Wildcard**: ACL paths can now use the `+` character to enable + wild card matching for a single directory in the path definition. +* **Transit Auto Unseal**: Vault can now be configured to use the Transit + Secret Engine in another Vault cluster as an auto unseal provider. + +IMPROVEMENTS: + +* auth/jwt: A default role can be set. It will be used during JWT/OIDC logins if + a role is not specified. +* auth/jwt: Arbitrary claims data can now be copied into token & alias metadata. +* auth/jwt: An arbitrary set of bound claims can now be configured for a role. +* auth/jwt: The name "oidc" has been added as an alias for the jwt backend. Either + name may be specified in the `auth enable` command. +* command/server: A warning will be printed when 'tls_cipher_suites' includes a + blacklisted cipher suite or all cipher suites are blacklisted by the HTTP/2 + specification [[GH-6300](https://github.com/hashicorp/vault/pull/6300)] +* core/metrics: Prometheus pull support using a new sys/metrics endpoint. [[GH-5308](https://github.com/hashicorp/vault/pull/5308)] +* core: On non-windows platforms a SIGUSR2 will make the server log a dump of + all running goroutines' stack traces for debugging purposes [[GH-6240](https://github.com/hashicorp/vault/pull/6240)] +* replication: The initial replication indexing process on newly initialized or upgraded + clusters now runs asynchronously +* sentinel: Add token namespace id and path, available in rules as + token.namespace.id and token.namespace.path +* ui: The UI is now leveraging OpenAPI definitions to pull in fields for various forms. + This means, it will not be necessary to add fields on the go and JS sides in the future. + [[GH-6209](https://github.com/hashicorp/vault/pull/6209)] + +BUG FIXES: + +* auth/jwt: Apply `bound_claims` validation across all login paths +* auth/jwt: Update `bound_audiences` validation during non-OIDC logins to accept + any matched audience, as documented and handled in OIDC logins [[GH-30](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/30)] +* auth/token: Fix issue where empty values for token role update call were + ignored [[GH-6314](https://github.com/hashicorp/vault/pull/6314)] +* core: The `operator migrate` command will no longer hang on empty key names + [[GH-6371](https://github.com/hashicorp/vault/pull/6371)] +* identity: Fix a panic at login when external group has a nil alias [[GH-6230](https://github.com/hashicorp/vault/pull/6230)] +* namespaces: Clear out identity store items upon namespace deletion +* replication/perfstandby: Fixed a bug causing performance standbys to wait + longer than necessary after forwarding a write to the active node +* replication/mountfilter: Fix a deadlock that could occur when mount filters + were updated [[GH-6426](https://github.com/hashicorp/vault/pull/6426)] +* secret/kv: Fix issue where a v1→v2 upgrade could run on a performance + standby when using a local mount +* secret/ssh: Fix for a bug where attempting to delete the last ssh role + in the zeroaddress configuration could fail [[GH-6390](https://github.com/hashicorp/vault/pull/6390)] +* secret/totp: Uppercase provided keys so they don't fail base32 validation + [[GH-6400](https://github.com/hashicorp/vault/pull/6400)] +* secret/transit: Multiple HMAC, Sign or Verify operations can now be + performed with one API call using the new `batch_input` parameter [[GH-5875](https://github.com/hashicorp/vault/pull/5875)] +* sys: `sys/internal/ui/mounts` will no longer return secret or auth mounts + that have been filtered. Similarly, `sys/internal/ui/mount/:path` will + return a error response if a filtered mount path is requested. [[GH-6412](https://github.com/hashicorp/vault/pull/6412)] +* ui: Fix for a bug where you couldn't access the data tab after clicking on + wrap details on the unwrap page [[GH-6404](https://github.com/hashicorp/vault/pull/6404)] +* ui: Fix an issue where the policies tab was erroneously hidden [[GH-6301](https://github.com/hashicorp/vault/pull/6301)] +* ui: Fix encoding issues with kv interfaces [[GH-6294](https://github.com/hashicorp/vault/pull/6294)] + +## 1.0.3.1 (March 14th, 2019) (Enterprise Only) + +SECURITY: + +* A regression was fixed in replication mount filter code introduced in Vault + 1.0 that caused the underlying filtered data to be replicated to + secondaries. This data was not accessible to users via Vault's API but via a + combination of privileged configuration file changes/Vault commands it could + be read. Upgrading to this version or 1.1 will fix this issue and cause the + replicated data to be deleted from filtered secondaries. More information + was sent to customer contacts on file. + +## 1.0.3 (February 12th, 2019) + +CHANGES: + +* New AWS authentication plugin mounts will default to using the generated + role ID as the Identity alias name. This applies to both EC2 and IAM auth. + Existing mounts that explicitly set this value will not be affected but + mounts that specified no preference will switch over on upgrade. +* The default policy now allows a token to look up its associated identity + entity either by name or by id [[GH-6105](https://github.com/hashicorp/vault/pull/6105)] +* The Vault UI's navigation and onboarding wizard now only displays items that + are permitted in a users' policy [[GH-5980](https://github.com/hashicorp/vault/pull/5980), [GH-6094](https://github.com/hashicorp/vault/pull/6094)] +* An issue was fixed that caused recovery keys to not work on secondary + clusters when using a different unseal mechanism/key than the primary. This + would be hit if the cluster was rekeyed or initialized after 1.0. We recommend + rekeying the recovery keys on the primary cluster if you meet the above + requirements. + +FEATURES: + +* **cURL Command Output**: CLI commands can now use the `-output-curl-string` + flag to print out an equivalent cURL command. +* **Response Headers From Plugins**: Plugins can now send back headers that + will be included in the response to a client. The set of allowed headers can + be managed by the operator. + +IMPROVEMENTS: + +* auth/aws: AWS EC2 authentication can optionally create entity aliases by + role ID [[GH-6133](https://github.com/hashicorp/vault/pull/6133)] +* auth/jwt: The supported set of signing algorithms is now configurable [JWT + plugin [GH-16](https://github.com/hashicorp/vault/pull/16)] +* core: When starting from an uninitialized state, HA nodes will now attempt + to auto-unseal using a configured auto-unseal mechanism after the active + node initializes Vault [[GH-6039](https://github.com/hashicorp/vault/pull/6039)] +* secret/database: Add socket keepalive option for Cassandra [[GH-6201](https://github.com/hashicorp/vault/pull/6201)] +* secret/ssh: Add signed key constraints, allowing enforcement of key types + and minimum key sizes [[GH-6030](https://github.com/hashicorp/vault/pull/6030)] +* secret/transit: ECDSA signatures can now be marshaled in JWS-compatible + fashion [[GH-6077](https://github.com/hashicorp/vault/pull/6077)] +* storage/etcd: Support SRV service names [[GH-6087](https://github.com/hashicorp/vault/pull/6087)] +* storage/aws: Support specifying a KMS key ID for server-side encryption + [[GH-5996](https://github.com/hashicorp/vault/pull/5996)] + +BUG FIXES: + +* core: Fix a rare case where a standby whose connection is entirely torn down + to the active node, then reconnects to the same active node, may not + successfully resume operation [[GH-6167](https://github.com/hashicorp/vault/pull/6167)] +* cors: Don't duplicate headers when they're written [[GH-6207](https://github.com/hashicorp/vault/pull/6207)] +* identity: Persist merged entities only on the primary [[GH-6075](https://github.com/hashicorp/vault/pull/6075)] +* replication: Fix a potential race when a token is created and then used with + a performance standby very quickly, before an associated entity has been + replicated. If the entity is not found in this scenario, the request will + forward to the active node. +* replication: Fix issue where recovery keys would not work on secondary + clusters if using a different unseal mechanism than the primary. +* replication: Fix a "failed to register lease" error when using performance + standbys +* storage/postgresql: The `Get` method will now return an Entry object with + the `Key` member correctly populated with the full path that was requested + instead of just the last path element [[GH-6044](https://github.com/hashicorp/vault/pull/6044)] + +## 1.0.2 (January 15th, 2019) + +SECURITY: + +* When creating a child token from a parent with `bound_cidrs`, the list of + CIDRs would not be propagated to the child token, allowing the child token + to be used from any address. + +CHANGES: + +* secret/aws: Role now returns `credential_type` instead of `credential_types` + to match role input. If a legacy role that can supply more than one + credential type, they will be concatenated with a `,`. +* physical/dynamodb, autoseal/aws: Instead of Vault performing environment + variable handling, and overriding static (config file) values if found, we + use the default AWS SDK env handling behavior, which also looks for + deprecated values. If you were previously providing both config values and + environment values, please ensure the config values are unset if you want to + use environment values. +* Namespaces (Enterprise): Providing "root" as the header value for + `X-Vault-Namespace` will perform the request on the root namespace. This is + equivalent to providing an empty value. Creating a namespace called "root" in + the root namespace is disallowed. + +FEATURES: + +* **InfluxDB Database Plugin**: Use Vault to dynamically create and manage InfluxDB + users + +IMPROVEMENTS: + +* auth/aws: AWS EC2 authentication can optionally create entity aliases by + image ID [[GH-5846](https://github.com/hashicorp/vault/pull/5846)] +* autoseal/gcpckms: Reduce the required permissions for the GCPCKMS autounseal + [[GH-5999](https://github.com/hashicorp/vault/pull/5999)] +* physical/foundationdb: TLS support added. [[GH-5800](https://github.com/hashicorp/vault/pull/5800)] + +BUG FIXES: + +* api: Fix a couple of places where we were using the `LIST` HTTP verb + (necessary to get the right method into the wrapping lookup function) and + not then modifying it to a `GET`; although this is officially the verb Vault + uses for listing and it's fully legal to use custom verbs, since many WAFs + and API gateways choke on anything outside of RFC-standardized verbs we fall + back to `GET` [[GH-6026](https://github.com/hashicorp/vault/pull/6026)] +* autoseal/aws: Fix reading session tokens when AWS access key/secret key are + also provided [[GH-5965](https://github.com/hashicorp/vault/pull/5965)] +* command/operator/rekey: Fix help output showing `-delete-backup` when it + should show `-backup-delete` [[GH-5981](https://github.com/hashicorp/vault/pull/5981)] +* core: Fix bound_cidrs not being propagated to child tokens +* replication: Correctly forward identity entity creation that originates from + performance standby nodes (Enterprise) +* secret/aws: Make input `credential_type` match the output type (string, not + array) [[GH-5972](https://github.com/hashicorp/vault/pull/5972)] +* secret/cubbyhole: Properly cleanup cubbyhole after token revocation [[GH-6006](https://github.com/hashicorp/vault/pull/6006)] +* secret/pki: Fix reading certificates on windows with the file storage backend [[GH-6013](https://github.com/hashicorp/vault/pull/6013)] +* ui (enterprise): properly display perf-standby count on the license page [[GH-5971](https://github.com/hashicorp/vault/pull/5971)] +* ui: fix disappearing nested secrets and go to the nearest parent when deleting + a secret - [[GH-5976](https://github.com/hashicorp/vault/pull/5976)] +* ui: fix error where deleting an item via the context menu would fail if the + item name contained dots [[GH-6018](https://github.com/hashicorp/vault/pull/6018)] +* ui: allow saving of kv secret after an errored save attempt [[GH-6022](https://github.com/hashicorp/vault/pull/6022)] +* ui: fix display of kv-v1 secret containing a key named "keys" [[GH-6023](https://github.com/hashicorp/vault/pull/6023)] + +## 1.0.1 (December 14th, 2018) + +SECURITY: + +* Update version of Go to 1.11.3 to fix Go bug + which corresponds to + CVE-2018-16875 +* Database user revocation: If a client has configured custom revocation + statements for a role with a value of `""`, that statement would be executed + verbatim, resulting in a lack of actual revocation but success for the + operation. Vault will now strip empty statements from any provided; as a + result if an empty statement is provided, it will behave as if no statement + is provided, falling back to the default revocation statement. + +CHANGES: + +* secret/database: On role read, empty statements will be returned as empty + slices instead of potentially being returned as JSON null values. This makes + it more in line with other parts of Vault and makes it easier for statically + typed languages to interpret the values. + +IMPROVEMENTS: + +* cli: Strip iTerm extra characters from password manager input [[GH-5837](https://github.com/hashicorp/vault/pull/5837)] +* command/server: Setting default kv engine to v1 in -dev mode can now be + specified via -dev-kv-v1 [[GH-5919](https://github.com/hashicorp/vault/pull/5919)] +* core: Add operationId field to OpenAPI output [[GH-5876](https://github.com/hashicorp/vault/pull/5876)] +* ui: Added ability to search for Group and Policy IDs when creating Groups + and Entities instead of typing them in manually + +BUG FIXES: + +* auth/azure: Cache azure authorizer [15] +* auth/gcp: Remove explicit project for service account in GCE authorizer [[GH-58](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/58)] +* cli: Show correct stored keys/threshold for autoseals [[GH-5910](https://github.com/hashicorp/vault/pull/5910)] +* cli: Fix backwards compatibility fallback when listing plugins [[GH-5913](https://github.com/hashicorp/vault/pull/5913)] +* core: Fix upgrades when the seal config had been created on early versions + of vault [[GH-5956](https://github.com/hashicorp/vault/pull/5956)] +* namespaces: Correctly reload the proper mount when tuning or reloading the + mount [[GH-5937](https://github.com/hashicorp/vault/pull/5937)] +* secret/azure: Cache azure authorizer [19] +* secret/database: Strip empty statements on user input [[GH-5955](https://github.com/hashicorp/vault/pull/5955)] +* secret/gcpkms: Add path for retrieving the public key [[GH-5](https://github.com/hashicorp/vault-plugin-secrets-gcpkms/pull/5)] +* secret/pki: Fix panic that could occur during tidy operation when malformed + data was found [[GH-5931](https://github.com/hashicorp/vault/pull/5931)] +* secret/pki: Strip empty line in ca_chain output [[GH-5779](https://github.com/hashicorp/vault/pull/5779)] +* ui: Fixed a bug where the web CLI was not usable via the `fullscreen` + command - [[GH-5909](https://github.com/hashicorp/vault/pull/5909)] +* ui: Fix a bug where you couldn't write a jwt auth method config [[GH-5936](https://github.com/hashicorp/vault/pull/5936)] + +## 1.0.0 (December 3rd, 2018) + +SECURITY: + +* When debugging a customer incident we discovered that in the case of + malformed data from an autoseal mechanism, Vault's master key could be + logged in Vault's server log. For this to happen, the data would need to be + modified by the autoseal mechanism after being submitted to it by Vault but + prior to encryption, or after decryption, prior to it being returned to + Vault. To put it another way, it requires the data that Vault submits for + encryption to not match the data returned after decryption. It is not + sufficient for the autoseal mechanism to return an error, and it cannot be + triggered by an outside attacker changing the on-disk ciphertext as all + autoseal mechanisms use authenticated encryption. We do not believe that + this is generally a cause for concern; since it involves the autoseal + mechanism returning bad data to Vault but with no error, in a working Vault + configuration this code path should never be hit, and if hitting this issue + Vault will not be unsealing properly anyways so it will be obvious what is + happening and an immediate rekey of the master key can be performed after + service is restored. We have filed for a CVE (CVE-2018-19786) and a CVSS V3 + score of 5.2 has been assigned. + +CHANGES: + +* Tokens are now prefixed by a designation to indicate what type of token they + are. Service tokens start with `s.` and batch tokens start with `b.`. + Existing tokens will still work (they are all of service type and will be + considered as such). Prefixing allows us to be more efficient when consuming + a token, which keeps the critical path of requests faster. +* Paths within `auth/token` that allow specifying a token or accessor in the + URL have been removed. These have been deprecated since March 2016 and + undocumented, but were retained for backwards compatibility. They shouldn't + be used due to the possibility of those paths being logged, so at this point + they are simply being removed. +* Vault will no longer accept updates when the storage key has invalid UTF-8 + character encoding [[GH-5819](https://github.com/hashicorp/vault/pull/5819)] +* Mount/Auth tuning the `options` map on backends will now upsert any provided + values, and keep any of the existing values in place if not provided. The + options map itself cannot be unset once it's set, but the keypairs within the + map can be unset if an empty value is provided, with the exception of the + `version` keypair which is handled differently for KVv2 purposes. +* Agent no longer automatically reauthenticates when new credentials are + detected. It's not strictly necessary and in some cases was causing + reauthentication much more often than intended. +* HSM Regenerate Key Support Removed: Vault no longer supports destroying and + regenerating encryption keys on an HSM; it only supports creating them. + Although this has never been a source of a customer incident, it is simply a + code path that is too trivial to activate, especially by mistyping + `regenerate_key` instead of `generate_key`. +* Barrier Config Upgrade (Enterprise): When upgrading from Vault 0.8.x, the + seal type in the barrier config storage entry will be upgraded from + "hsm-auto" to "awskms" or "pkcs11" upon unseal if using AWSKMS or HSM seals. + If performing seal migration, the barrier config should first be upgraded + prior to starting migration. +* Go API client uses pooled HTTP client: The Go API client now uses a + connection-pooling HTTP client by default. For CLI operations this makes no + difference but it should provide significant performance benefits for those + writing custom clients using the Go API library. As before, this can be + changed to any custom HTTP client by the caller. +* Builtin Secret Engines and Auth Methods are integrated deeper into the + plugin system. The plugin catalog can now override builtin plugins with + custom versions of the same name. Additionally the plugin system now + requires a plugin `type` field when configuring plugins, this can be "auth", + "database", or "secret". + +FEATURES: + +* **Auto-Unseal in Open Source**: Cloud-based auto-unseal has been migrated + from Enterprise to Open Source. We've created a migrator to allow migrating + between Shamir seals and auto unseal methods. +* **Batch Tokens**: Batch tokens trade off some features of service tokens for no + storage overhead, and in most cases can be used across performance + replication clusters. +* **Replication Speed Improvements**: We've worked hard to speed up a lot of + operations when using Vault Enterprise Replication. +* **GCP KMS Secrets Engine**: This new secrets engine provides a Transit-like + pattern to keys stored within GCP Cloud KMS. +* **AppRole support in Vault Agent Auto-Auth**: You can now use AppRole + credentials when having Agent automatically authenticate to Vault +* **OpenAPI Support**: Descriptions of mounted backends can be served directly + from Vault +* **Kubernetes Projected Service Account Tokens**: Projected Service Account + Tokens are now supported in Kubernetes auth +* **Response Wrapping in UI**: Added ability to wrap secrets and easily copy + the wrap token or secret JSON in the UI + +IMPROVEMENTS: + +* agent: Support for configuring the location of the kubernetes service account + [[GH-5725](https://github.com/hashicorp/vault/pull/5725)] +* auth/token: New tokens are indexed in storage HMAC-SHA256 instead of SHA1 +* secret/totp: Allow @ character to be part of key name [[GH-5652](https://github.com/hashicorp/vault/pull/5652)] +* secret/consul: Add support for new policy based tokens added in Consul 1.4 + [[GH-5586](https://github.com/hashicorp/vault/pull/5586)] +* ui: Improve the token auto-renew warning, and automatically begin renewal + when a user becomes active again [[GH-5662](https://github.com/hashicorp/vault/pull/5662)] +* ui: The unbundled UI page now has some styling [[GH-5665](https://github.com/hashicorp/vault/pull/5665)] +* ui: Improved banner and popup design [[GH-5672](https://github.com/hashicorp/vault/pull/5672)] +* ui: Added token type to auth method mount config [[GH-5723](https://github.com/hashicorp/vault/pull/5723)] +* ui: Display additonal wrap info when unwrapping. [[GH-5664](https://github.com/hashicorp/vault/pull/5664)] +* ui: Empty states have updated styling and link to relevant actions and + documentation [[GH-5758](https://github.com/hashicorp/vault/pull/5758)] +* ui: Allow editing of KV V2 data when a token doesn't have capabilities to + read secret metadata [[GH-5879](https://github.com/hashicorp/vault/pull/5879)] + +BUG FIXES: + +* agent: Fix auth when multiple redirects [[GH-5814](https://github.com/hashicorp/vault/pull/5814)] +* cli: Restore the `-policy-override` flag [[GH-5826](https://github.com/hashicorp/vault/pull/5826)] +* core: Fix rekey progress reset which did not happen under certain + circumstances. [[GH-5743](https://github.com/hashicorp/vault/pull/5743)] +* core: Migration from autounseal to shamir will clean up old keys [[GH-5671](https://github.com/hashicorp/vault/pull/5671)] +* identity: Update group memberships when entity is deleted [[GH-5786](https://github.com/hashicorp/vault/pull/5786)] +* replication/perfstandby: Fix audit table upgrade on standbys [[GH-5811](https://github.com/hashicorp/vault/pull/5811)] +* replication/perfstandby: Fix redirect on approle update [[GH-5820](https://github.com/hashicorp/vault/pull/5820)] +* secrets/azure: Fix valid roles being rejected for duplicate ids despite + having distinct scopes + [[GH-16](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/16)] +* storage/gcs: Send md5 of values to GCS to avoid potential corruption + [[GH-5804](https://github.com/hashicorp/vault/pull/5804)] +* secrets/kv: Fix issue where storage version would get incorrectly downgraded + [[GH-5809](https://github.com/hashicorp/vault/pull/5809)] +* secrets/kv: Disallow empty paths on a `kv put` while accepting empty paths + for all other operations for backwards compatibility + [[GH-19](https://github.com/hashicorp/vault-plugin-secrets-kv/pull/19)] +* ui: Allow for secret creation in kv v2 when cas_required=true [[GH-5823](https://github.com/hashicorp/vault/pull/5823)] +* ui: Fix dr secondary operation token generation via the ui [[GH-5818](https://github.com/hashicorp/vault/pull/5818)] +* ui: Fix the PKI context menu so that items load [[GH-5824](https://github.com/hashicorp/vault/pull/5824)] +* ui: Update DR Secondary Token generation command [[GH-5857](https://github.com/hashicorp/vault/pull/5857)] +* ui: Fix pagination bug where controls would be rendered once for each + item when viewing policies [[GH-5866](https://github.com/hashicorp/vault/pull/5866)] +* ui: Fix bug where `sys/leases/revoke` required 'sudo' capability to show + the revoke button in the UI [[GH-5647](https://github.com/hashicorp/vault/pull/5647)] +* ui: Fix issue where certain pages wouldn't render in a namespace [[GH-5692](https://github.com/hashicorp/vault/pull/5692)] diff --git a/CHANGELOG-v0.md b/CHANGELOG-v0.md new file mode 100644 index 000000000000..4a3f1931395b --- /dev/null +++ b/CHANGELOG-v0.md @@ -0,0 +1,3494 @@ +## 0.11.6 (December 14th, 2018) + +This release contains the three security fixes from 1.0.0 and 1.0.1 and the +following bug fixes from 1.0.0/1.0.1: + + * namespaces: Correctly reload the proper mount when tuning or reloading the + mount [[GH-5937](https://github.com/hashicorp/vault/pull/5937)] + * replication/perfstandby: Fix audit table upgrade on standbys [[GH-5811](https://github.com/hashicorp/vault/pull/5811)] + * replication/perfstandby: Fix redirect on approle update [[GH-5820](https://github.com/hashicorp/vault/pull/5820)] + * secrets/kv: Fix issue where storage version would get incorrectly downgraded + [[GH-5809](https://github.com/hashicorp/vault/pull/5809)] + +It is otherwise identical to 0.11.5. + +## 0.11.5 (November 13th, 2018) + +BUG FIXES: + + * agent: Fix issue when specifying two file sinks [[GH-5610](https://github.com/hashicorp/vault/pull/5610)] + * auth/userpass: Fix minor timing issue that could leak the presence of a + username [[GH-5614](https://github.com/hashicorp/vault/pull/5614)] + * autounseal/alicloud: Fix issue interacting with the API (Enterprise) + * autounseal/azure: Fix key version tracking (Enterprise) + * cli: Fix panic that could occur if parameters were not provided [[GH-5603](https://github.com/hashicorp/vault/pull/5603)] + * core: Fix buggy behavior if trying to remount into a namespace + * identity: Fix duplication of entity alias entity during alias transfer + between entities [[GH-5733](https://github.com/hashicorp/vault/pull/5733)] + * namespaces: Fix tuning of auth mounts in a namespace + * ui: Fix bug where editing secrets as JSON doesn't save properly [[GH-5660](https://github.com/hashicorp/vault/pull/5660)] + * ui: Fix issue where IE 11 didn't render the UI and also had a broken form + when trying to use tool/hash [[GH-5714](https://github.com/hashicorp/vault/pull/5714)] + +## 0.11.4 (October 23rd, 2018) + +CHANGES: + + * core: HA lock file is no longer copied during `operator migrate` [[GH-5503](https://github.com/hashicorp/vault/pull/5503)]. + We've categorized this as a change, but generally this can be considered + just a bug fix, and no action is needed. + +FEATURES: + + * **Transit Key Trimming**: Keys in transit secret engine can now be trimmed to + remove older unused key versions + * **Web UI support for KV Version 2**: Browse, delete, undelete and destroy + individual secret versions in the UI + * **Azure Existing Service Principal Support**: Credentials can now be generated + against an existing service principal + +IMPROVEMENTS: + + * core: Add last WAL in leader/health output for easier debugging [[GH-5523](https://github.com/hashicorp/vault/pull/5523)] + * identity: Identity names will now be handled case insensitively by default. + This includes names of entities, aliases and groups [[GH-5404](https://github.com/hashicorp/vault/pull/5404)] + * secrets/aws: Added role-option max_sts_ttl to cap TTL for AWS STS + credentials [[GH-5500](https://github.com/hashicorp/vault/pull/5500)] + * secret/database: Allow Cassandra user to be non-superuser so long as it has + role creation permissions [[GH-5402](https://github.com/hashicorp/vault/pull/5402)] + * secret/radius: Allow setting the NAS Identifier value in the generated + packet [[GH-5465](https://github.com/hashicorp/vault/pull/5465)] + * secret/ssh: Allow usage of JSON arrays when setting zero addresses [[GH-5528](https://github.com/hashicorp/vault/pull/5528)] + * secret/transit: Allow trimming unused keys [[GH-5388](https://github.com/hashicorp/vault/pull/5388)] + * ui: Support KVv2 [[GH-5547](https://github.com/hashicorp/vault/pull/5547)], [[GH-5563](https://github.com/hashicorp/vault/pull/5563)] + * ui: Allow viewing and updating Vault license via the UI + * ui: Onboarding will now display your progress through the chosen tutorials + * ui: Dynamic secret backends obfuscate sensitive data by default and + visibility is toggleable + +BUG FIXES: + + * agent: Fix potential hang during agent shutdown [[GH-5026](https://github.com/hashicorp/vault/pull/5026)] + * auth/ldap: Fix listing of users/groups that contain slashes [[GH-5537](https://github.com/hashicorp/vault/pull/5537)] + * core: Fix memory leak during some expiration calls [[GH-5505](https://github.com/hashicorp/vault/pull/5505)] + * core: Fix generate-root operations requiring empty `otp` to be provided + instead of an empty body [[GH-5495](https://github.com/hashicorp/vault/pull/5495)] + * identity: Remove lookup check during alias removal from entity [[GH-5524](https://github.com/hashicorp/vault/pull/5524)] + * secret/pki: Fix TTL/MaxTTL check when using `sign-verbatim` [[GH-5549](https://github.com/hashicorp/vault/pull/5549)] + * secret/pki: Fix regression in 0.11.2+ causing the NotBefore value of + generated certificates to be set to the Unix epoch if the role value was not + set, instead of using the default of 30 seconds [[GH-5481](https://github.com/hashicorp/vault/pull/5481)] + * storage/mysql: Use `varbinary` instead of `varchar` when creating HA tables + [[GH-5529](https://github.com/hashicorp/vault/pull/5529)] + +## 0.11.3 (October 8th, 2018) + +SECURITY: + + * Revocation: A regression in 0.11.2 (OSS) and 0.11.0 (Enterprise) caused + lease IDs containing periods (`.`) to not be revoked properly. Upon startup + when revocation is tried again these should now revoke successfully. + +IMPROVEMENTS: + + * auth/ldap: Listing of users and groups return absolute paths [[GH-5537](https://github.com/hashicorp/vault/pull/5537)] + * secret/pki: OID SANs can now specify `*` to allow any value [[GH-5459](https://github.com/hashicorp/vault/pull/5459)] + +BUG FIXES: + + * auth/ldap: Fix panic if specific values were given to be escaped [[GH-5471](https://github.com/hashicorp/vault/pull/5471)] + * cli/auth: Fix panic if `vault auth` was given no parameters [[GH-5473](https://github.com/hashicorp/vault/pull/5473)] + * secret/database/mongodb: Fix panic that could occur at high load [[GH-5463](https://github.com/hashicorp/vault/pull/5463)] + * secret/pki: Fix CA generation not allowing OID SANs [[GH-5459](https://github.com/hashicorp/vault/pull/5459)] + +## 0.11.2 (October 2nd, 2018) + +CHANGES: + + * `sys/seal-status` now includes an `initialized` boolean in the output. If + Vault is not initialized, it will return a `200` with this value set `false` + instead of a `400`. + * `passthrough_request_headers` will now deny certain headers from being + provided to backends based on a global denylist. + * Token Format: Tokens are now represented as a base62 value; tokens in + namespaces will have the namespace identifier appended. (This appeared in + Enterprise in 0.11.0, but is only in OSS in 0.11.2.) + +FEATURES: + + * **AWS Secret Engine Root Credential Rotation**: The credential used by the AWS + secret engine can now be rotated, to ensure that only Vault knows the + credentials it is using [[GH-5140](https://github.com/hashicorp/vault/pull/5140)] + * **Storage Backend Migrator**: A new `operator migrate` command allows offline + migration of data between two storage backends + * **AliCloud KMS Auto Unseal and Seal Wrap Support (Enterprise)**: AliCloud KMS can now be used a support seal for + Auto Unseal and Seal Wrapping + +BUG FIXES: + + * auth/okta: Fix reading deprecated `token` parameter if a token was + previously set in the configuration [[GH-5409](https://github.com/hashicorp/vault/pull/5409)] + * core: Re-add deprecated capabilities information for now [[GH-5360](https://github.com/hashicorp/vault/pull/5360)] + * core: Fix handling of cyclic token relationships [[GH-4803](https://github.com/hashicorp/vault/pull/4803)] + * storage/mysql: Fix locking on MariaDB [[GH-5343](https://github.com/hashicorp/vault/pull/5343)] + * replication: Fix DR API when using a token [[GH-5398](https://github.com/hashicorp/vault/pull/5398)] + * identity: Ensure old group alias is removed when a new one is written [[GH-5350](https://github.com/hashicorp/vault/pull/5350)] + * storage/alicloud: Don't call uname on package init [[GH-5358](https://github.com/hashicorp/vault/pull/5358)] + * secrets/jwt: Fix issue where request context would be canceled too early + * ui: fix need to have update for aws iam creds generation [GF-5294] + * ui: fix calculation of token expiry [[GH-5435](https://github.com/hashicorp/vault/pull/5435)] + +IMPROVEMENTS: + + * auth/aws: The identity alias name can now configured to be either IAM unique + ID of the IAM Principal, or ARN of the caller identity [[GH-5247](https://github.com/hashicorp/vault/pull/5247)] + * auth/cert: Add allowed_organizational_units support [[GH-5252](https://github.com/hashicorp/vault/pull/5252)] + * cli: Format TTLs for non-secret responses [[GH-5367](https://github.com/hashicorp/vault/pull/5367)] + * identity: Support operating on entities and groups by their names [[GH-5355](https://github.com/hashicorp/vault/pull/5355)] + * plugins: Add `env` parameter when registering plugins to the catalog to allow + operators to include environment variables during plugin execution. [[GH-5359](https://github.com/hashicorp/vault/pull/5359)] + * secrets/aws: WAL Rollback improvements [[GH-5202](https://github.com/hashicorp/vault/pull/5202)] + * secrets/aws: Allow specifying STS role-default TTLs [[GH-5138](https://github.com/hashicorp/vault/pull/5138)] + * secrets/pki: Add configuration support for setting NotBefore [[GH-5325](https://github.com/hashicorp/vault/pull/5325)] + * core: Support for passing the Vault token via an Authorization Bearer header [[GH-5397](https://github.com/hashicorp/vault/pull/5397)] + * replication: Reindex process now runs in the background and does not block other + vault operations + * storage/zookeeper: Enable TLS based communication with Zookeeper [[GH-4856](https://github.com/hashicorp/vault/pull/4856)] + * ui: you can now init a cluster with a seal config [[GH-5428](https://github.com/hashicorp/vault/pull/5428)] + * ui: added the option to force promote replication clusters [[GH-5438](https://github.com/hashicorp/vault/pull/5438)] + * replication: Allow promotion of a secondary when data is syncing with a "force" flag + +## 0.11.1.1 (September 17th, 2018) (Enterprise Only) + +BUG FIXES: + + * agent: Fix auth handler-based wrapping of output tokens [[GH-5316](https://github.com/hashicorp/vault/pull/5316)] + * core: Properly store the replication checkpoint file if it's larger than the + storage engine's per-item limit + * core: Improve WAL deletion rate + * core: Fix token creation on performance standby nodes + * core: Fix unwrapping inside a namespace + * core: Always forward tidy operations from performance standby nodes + +IMPROVEMENTS: + + * auth/aws: add support for key/value pairs or JSON values for + `iam_request_headers` with IAM auth method [[GH-5320](https://github.com/hashicorp/vault/pull/5320)] + * auth/aws, secret/aws: Throttling errors from the AWS API will now be + reported as 502 errors by Vault, along with the original error [[GH-5270](https://github.com/hashicorp/vault/pull/5270)] + * replication: Start fetching during a sync from where it previously errored + +## 0.11.1 (September 6th, 2018) + +SECURITY: + + * Random Byte Reading in Barrier: Prior to this release, Vault was not + properly checking the error code when reading random bytes for the IV for + AES operations in its cryptographic barrier. Specifically, this means that + such an IV could potentially be zero multiple times, causing nonce re-use + and weakening the security of the key. On most platforms this should never + happen because reading from kernel random sources is non-blocking and always + successful, but there may be platform-specific behavior that has not been + accounted for. (Vault has tests to check exactly this, and the tests have + never seen nonce re-use.) + +FEATURES: + + * AliCloud Agent Support: Vault Agent can now authenticate against the + AliCloud auth method. + * UI: Enable AliCloud auth method and Azure secrets engine via the UI. + +IMPROVEMENTS: + + * core: Logging level for most logs (not including secrets/auth plugins) can + now be changed on-the-fly via `SIGHUP`, reading the desired value from + Vault's config file [[GH-5280](https://github.com/hashicorp/vault/pull/5280)] + +BUG FIXES: + + * core: Ensure we use a background context when stepping down [[GH-5290](https://github.com/hashicorp/vault/pull/5290)] + * core: Properly check error return from random byte reading [[GH-5277](https://github.com/hashicorp/vault/pull/5277)] + * core: Re-add `sys/` top-route injection for now [[GH-5241](https://github.com/hashicorp/vault/pull/5241)] + * core: Policies stored in minified JSON would return an error [[GH-5229](https://github.com/hashicorp/vault/pull/5229)] + * core: Evaluate templated policies in capabilities check [[GH-5250](https://github.com/hashicorp/vault/pull/5250)] + * identity: Update MemDB with identity group alias while loading groups [[GH-5289](https://github.com/hashicorp/vault/pull/5289)] + * secrets/database: Fix nil pointer when revoking some leases [[GH-5262](https://github.com/hashicorp/vault/pull/5262)] + * secrets/pki: Fix sign-verbatim losing extra Subject attributes [[GH-5245](https://github.com/hashicorp/vault/pull/5245)] + * secrets/pki: Remove certificates from store when tidying revoked + certificates and simplify API [[GH-5231](https://github.com/hashicorp/vault/pull/5231)] + * ui: JSON editor will not coerce input to an object, and will now show an + error about Vault expecting an object [[GH-5271](https://github.com/hashicorp/vault/pull/5271)] + * ui: authentication form will now default to any methods that have been tuned + to show up for unauthenticated users [[GH-5281](https://github.com/hashicorp/vault/pull/5281)] + + +## 0.11.0 (August 28th, 2018) + +DEPRECATIONS/CHANGES: + + * Request Timeouts: A default request timeout of 90s is now enforced. This + setting can be overwritten in the config file. If you anticipate requests + taking longer than 90s this setting should be updated before upgrading. + * (NOTE: will be re-added into 0.11.1 as it broke more than anticipated. There + will be some further guidelines around when this will be removed again.) + * `sys/` Top Level Injection: For the last two years for backwards + compatibility data for various `sys/` routes has been injected into both the + Secret's Data map and into the top level of the JSON response object. + However, this has some subtle issues that pop up from time to time and is + becoming increasingly complicated to maintain, so it's finally being + removed. + * Path Fallback for List Operations: For a very long time Vault has + automatically adjusted `list` operations to always end in a `/`, as list + operations operates on prefixes, so all list operations by definition end + with `/`. This was done server-side so affects all clients. However, this + has also led to a lot of confusion for users writing policies that assume + that the path that they use in the CLI is the path used internally. Starting + in 0.11, ACL policies gain a new fallback rule for listing: they will use a + matching path ending in `/` if available, but if not found, they will look + for the same path without a trailing `/`. This allows putting `list` + capabilities in the same path block as most other capabilities for that + path, while not providing any extra access if `list` wasn't actually + provided there. + * Performance Standbys On By Default: If you flavor/license of Vault + Enterprise supports Performance Standbys, they are on by default. You can + disable this behavior per-node with the `disable_performance_standby` + configuration flag. + * AWS Secret Engine Roles: The AWS Secret Engine roles are now explicit about + the type of AWS credential they are generating; this reduces reduce + ambiguity that existed previously as well as enables new features for + specific credential types. Writing role data and generating credentials + remain backwards compatible; however, the data returned when reading a + role's configuration has changed in backwards-incompatible ways. Anything + that depended on reading role data from the AWS secret engine will break + until it is updated to work with the new format. + * Token Format (Enterprise): Tokens are now represented as a base62 value; + tokens in namespaces will have the namespace identifier appended. + +FEATURES: + + * **Namespaces (Enterprise)**: A set of features within Vault Enterprise + that allows Vault environments to support *Secure Multi-tenancy* within a + single Vault Enterprise infrastructure. Through namespaces, Vault + administrators can support tenant isolation for teams and individuals as + well as empower those individuals to self-manage their own tenant + environment. + * **Performance Standbys (Enterprise)**: Standby nodes can now service + requests that do not modify storage. This provides near-horizontal scaling + of a cluster in some workloads, and is the intra-cluster analogue of + the existing Performance Replication feature, which replicates to distinct + clusters in other datacenters, geos, etc. + * **AliCloud OSS Storage**: AliCloud OSS can now be used for Vault storage. + * **AliCloud Auth Plugin**: AliCloud's identity services can now be used to + grant access to Vault. See the [plugin + repository](https://github.com/hashicorp/vault-plugin-auth-alicloud) for + more information. + * **Azure Secrets Plugin**: There is now a plugin (pulled in to Vault) that + allows generating credentials to allow access to Azure. See the [plugin + repository](https://github.com/hashicorp/vault-plugin-secrets-azure) for + more information. + * **HA Support for MySQL Storage**: MySQL storage now supports HA. + * **ACL Templating**: ACL policies can now be templated using identity Entity, + Groups, and Metadata. + * **UI Onboarding wizards**: The Vault UI can provide contextual help and + guidance, linking out to relevant links or guides on vaultproject.io for + various workflows in Vault. + +IMPROVEMENTS: + + * agent: Add `exit_after_auth` to be able to use the Agent for a single + authentication [[GH-5013](https://github.com/hashicorp/vault/pull/5013)] + * auth/approle: Add ability to set token bound CIDRs on individual Secret IDs + [[GH-5034](https://github.com/hashicorp/vault/pull/5034)] + * cli: Add support for passing parameters to `vault read` operations [[GH-5093](https://github.com/hashicorp/vault/pull/5093)] + * secrets/aws: Make credential types more explicit [[GH-4360](https://github.com/hashicorp/vault/pull/4360)] + * secrets/nomad: Support for longer token names [[GH-5117](https://github.com/hashicorp/vault/pull/5117)] + * secrets/pki: Allow disabling CRL generation [[GH-5134](https://github.com/hashicorp/vault/pull/5134)] + * storage/azure: Add support for different Azure environments [[GH-4997](https://github.com/hashicorp/vault/pull/4997)] + * storage/file: Sort keys in list responses [[GH-5141](https://github.com/hashicorp/vault/pull/5141)] + * storage/mysql: Support special characters in database and table names. + +BUG FIXES: + + * auth/jwt: Always validate `aud` claim even if `bound_audiences` isn't set + (IOW, error in this case) + * core: Prevent Go's HTTP library from interspersing logs in a different + format and/or interleaved [[GH-5135](https://github.com/hashicorp/vault/pull/5135)] + * identity: Properly populate `mount_path` and `mount_type` on group lookup + [[GH-5074](https://github.com/hashicorp/vault/pull/5074)] + * identity: Fix persisting alias metadata [[GH-5188](https://github.com/hashicorp/vault/pull/5188)] + * identity: Fix carryover issue from previously fixed race condition that + could cause Vault not to start up due to two entities referencing the same + alias. These entities are now merged. [[GH-5000](https://github.com/hashicorp/vault/pull/5000)] + * replication: Fix issue causing some pages not to flush to storage + * secrets/database: Fix inability to update custom SQL statements on + database roles. [[GH-5080](https://github.com/hashicorp/vault/pull/5080)] + * secrets/pki: Disallow putting the CA's serial on its CRL. While technically + legal, doing so inherently means the CRL can't be trusted anyways, so it's + not useful and easy to footgun. [[GH-5134](https://github.com/hashicorp/vault/pull/5134)] + * storage/gcp,spanner: Fix data races [[GH-5081](https://github.com/hashicorp/vault/pull/5081)] + +## 0.10.4 (July 25th, 2018) + +SECURITY: + + * Control Groups: The associated Identity entity with a request was not being + properly persisted. As a result, the same authorizer could provide more than + one authorization. + +DEPRECATIONS/CHANGES: + + * Revocations of dynamic secrets leases are now queued/asynchronous rather + than synchronous. This allows Vault to take responsibility for revocation + even if the initial attempt fails. The previous synchronous behavior can be + attained via the `-sync` CLI flag or `sync` API parameter. When in + synchronous mode, if the operation results in failure it is up to the user + to retry. + * CLI Retries: The CLI will no longer retry commands on 5xx errors. This was a + source of confusion to users as to why Vault would "hang" before returning a + 5xx error. The Go API client still defaults to two retries. + * Identity Entity Alias metadata: You can no longer manually set metadata on + entity aliases. All alias data (except the canonical entity ID it refers to) + is intended to be managed by the plugin providing the alias information, so + allowing it to be set manually didn't make sense. + +FEATURES: + + * **JWT/OIDC Auth Method**: The new `jwt` auth method accepts JWTs and either + validates signatures locally or uses OIDC Discovery to fetch the current set + of keys for signature validation. Various claims can be specified for + validation (in addition to the cryptographic signature) and a user and + optional groups claim can be used to provide Identity information. + * **FoundationDB Storage**: You can now use FoundationDB for storing Vault + data. + * **UI Control Group Workflow (enterprise)**: The UI will now detect control + group responses and provides a workflow to view the status of the request + and to authorize requests. + * **Vault Agent (Beta)**: Vault Agent is a daemon that can automatically + authenticate for you across a variety of authentication methods, provide + tokens to clients, and keep the tokens renewed, reauthenticating as + necessary. + +IMPROVEMENTS: + + * auth/azure: Add support for virtual machine scale sets + * auth/gcp: Support multiple bindings for region, zone, and instance group + * cli: Add subcommands for interacting with the plugin catalog [[GH-4911](https://github.com/hashicorp/vault/pull/4911)] + * cli: Add a `-description` flag to secrets and auth tune subcommands to allow + updating an existing secret engine's or auth method's description. This + change also allows the description to be unset by providing an empty string. + * core: Add config flag to disable non-printable character check [[GH-4917](https://github.com/hashicorp/vault/pull/4917)] + * core: A `max_request_size` parameter can now be set per-listener to adjust + the maximum allowed size per request [[GH-4824](https://github.com/hashicorp/vault/pull/4824)] + * core: Add control group request endpoint to default policy [[GH-4904](https://github.com/hashicorp/vault/pull/4904)] + * identity: Identity metadata is now passed through to plugins [[GH-4967](https://github.com/hashicorp/vault/pull/4967)] + * replication: Add additional saftey checks and logging when replication is + in a bad state + * secrets/kv: Add support for using `-field=data` to KVv2 when using `vault + kv` [[GH-4895](https://github.com/hashicorp/vault/pull/4895)] + * secrets/pki: Add the ability to tidy revoked but unexpired certificates + [[GH-4916](https://github.com/hashicorp/vault/pull/4916)] + * secrets/ssh: Allow Vault to work with single-argument SSH flags [[GH-4825](https://github.com/hashicorp/vault/pull/4825)] + * secrets/ssh: SSH executable path can now be configured in the CLI [[GH-4937](https://github.com/hashicorp/vault/pull/4937)] + * storage/swift: Add additional configuration options [[GH-4901](https://github.com/hashicorp/vault/pull/4901)] + * ui: Choose which auth methods to show to unauthenticated users via + `listing_visibility` in the auth method edit forms [[GH-4854](https://github.com/hashicorp/vault/pull/4854)] + * ui: Authenticate users automatically by passing a wrapped token to the UI via + the new `wrapped_token` query parameter [[GH-4854](https://github.com/hashicorp/vault/pull/4854)] + +BUG FIXES: + + * api: Fix response body being cleared too early [[GH-4987](https://github.com/hashicorp/vault/pull/4987)] + * auth/approle: Fix issue with tidy endpoint that would unnecessarily remove + secret accessors [[GH-4981](https://github.com/hashicorp/vault/pull/4981)] + * auth/aws: Fix updating `max_retries` [[GH-4980](https://github.com/hashicorp/vault/pull/4980)] + * auth/kubernetes: Trim trailing whitespace when sending JWT + * cli: Fix parsing of environment variables for integer flags [[GH-4925](https://github.com/hashicorp/vault/pull/4925)] + * core: Fix returning 500 instead of 503 if a rekey is attempted when Vault is + sealed [[GH-4874](https://github.com/hashicorp/vault/pull/4874)] + * core: Fix issue releasing the leader lock in some circumstances [[GH-4915](https://github.com/hashicorp/vault/pull/4915)] + * core: Fix a panic that could happen if the server was shut down while still + starting up + * core: Fix deadlock that would occur if a leadership loss occurs at the same + time as a seal operation [[GH-4932](https://github.com/hashicorp/vault/pull/4932)] + * core: Fix issue with auth mounts failing to renew tokens due to policies + changing [[GH-4960](https://github.com/hashicorp/vault/pull/4960)] + * auth/radius: Fix issue where some radius logins were being canceled too early + [[GH-4941](https://github.com/hashicorp/vault/pull/4941)] + * core: Fix accidental seal of vault of we lose leadership during startup + [[GH-4924](https://github.com/hashicorp/vault/pull/4924)] + * core: Fix standby not being able to forward requests larger than 4MB + [[GH-4844](https://github.com/hashicorp/vault/pull/4844)] + * core: Avoid panic while processing group memberships [[GH-4841](https://github.com/hashicorp/vault/pull/4841)] + * identity: Fix a race condition creating aliases [[GH-4965](https://github.com/hashicorp/vault/pull/4965)] + * plugins: Fix being unable to send very large payloads to or from plugins + [[GH-4958](https://github.com/hashicorp/vault/pull/4958)] + * physical/azure: Long list responses would sometimes be truncated [[GH-4983](https://github.com/hashicorp/vault/pull/4983)] + * replication: Allow replication status requests to be processed while in + merkle sync + * replication: Ensure merkle reindex flushes all changes to storage immediately + * replication: Fix a case where a network interruption could cause a secondary + to be unable to reconnect to a primary + * secrets/pki: Fix permitted DNS domains performing improper validation + [[GH-4863](https://github.com/hashicorp/vault/pull/4863)] + * secrets/database: Fix panic during DB creds revocation [[GH-4846](https://github.com/hashicorp/vault/pull/4846)] + * ui: Fix usage of cubbyhole backend in the UI [[GH-4851](https://github.com/hashicorp/vault/pull/4851)] + * ui: Fix toggle state when a secret is JSON-formatted [[GH-4913](https://github.com/hashicorp/vault/pull/4913)] + * ui: Fix coercion of falsey values to empty string when editing secrets as + JSON [[GH-4977](https://github.com/hashicorp/vault/pull/4977)] + +## 0.10.3 (June 20th, 2018) + +DEPRECATIONS/CHANGES: + + * In the audit log and in client responses, policies are now split into three + parameters: policies that came only from tokens, policies that came only + from Identity, and the combined set. Any previous location of policies via + the API now contains the full, combined set. + * When a token is tied to an Identity entity and the entity is deleted, the + token will no longer be usable, regardless of the validity of the token + itself. + * When authentication succeeds but no policies were defined for that specific + user, most auth methods would allow a token to be generated but a few would + reject the authentication, namely `ldap`, `okta`, and `radius`. Since the + `default` policy is added by Vault's core, this would incorrectly reject + valid authentications before they would in fact be granted policies. This + inconsistency has been addressed; valid authentications for these methods + now succeed even if no policy was specifically defined in that method for + that user. + +FEATURES: + + * Root Rotation for Active Directory: You can now command Vault to rotate the + configured root credentials used in the AD secrets engine, to ensure that + only Vault knows the credentials it's using. + * URI SANs in PKI: You can now configure URI Subject Alternate Names in the + `pki` backend. Roles can limit which SANs are allowed via globbing. + * `kv rollback` Command: You can now use `vault kv rollback` to roll a KVv2 + path back to a previous non-deleted/non-destroyed version. The previous + version becomes the next/newest version for the path. + * Token Bound CIDRs in AppRole: You can now add CIDRs to which a token + generated from AppRole will be bound. + +IMPROVEMENTS: + + * approle: Return 404 instead of 202 on invalid role names during POST + operations [[GH-4778](https://github.com/hashicorp/vault/pull/4778)] + * core: Add idle and initial header read/TLS handshake timeouts to connections + to ensure server resources are cleaned up [[GH-4760](https://github.com/hashicorp/vault/pull/4760)] + * core: Report policies in token, identity, and full sets [[GH-4747](https://github.com/hashicorp/vault/pull/4747)] + * secrets/databases: Add `create`/`update` distinction for connection + configurations [[GH-3544](https://github.com/hashicorp/vault/pull/3544)] + * secrets/databases: Add `create`/`update` distinction for role configurations + [[GH-3544](https://github.com/hashicorp/vault/pull/3544)] + * secrets/databases: Add best-effort revocation logic for use when a role has + been deleted [[GH-4782](https://github.com/hashicorp/vault/pull/4782)] + * secrets/kv: Add `kv rollback` [[GH-4774](https://github.com/hashicorp/vault/pull/4774)] + * secrets/pki: Add URI SANs support [[GH-4675](https://github.com/hashicorp/vault/pull/4675)] + * secrets/ssh: Allow standard SSH command arguments to be used, without + requiring username@hostname syntax [[GH-4710](https://github.com/hashicorp/vault/pull/4710)] + * storage/consul: Add context support so that requests are cancelable + [[GH-4739](https://github.com/hashicorp/vault/pull/4739)] + * sys: Added `hidden` option to `listing_visibility` field on `sys/mounts` + API [[GH-4827](https://github.com/hashicorp/vault/pull/4827)] + * ui: Secret values are obfuscated by default and visibility is toggleable [[GH-4422](https://github.com/hashicorp/vault/pull/4422)] + +BUG FIXES: + + * auth/approle: Fix panic due to metadata being nil [[GH-4719](https://github.com/hashicorp/vault/pull/4719)] + * auth/aws: Fix delete path for tidy operations [[GH-4799](https://github.com/hashicorp/vault/pull/4799)] + * core: Optimizations to remove some speed regressions due to the + security-related changes in 0.10.2 + * storage/dynamodb: Fix errors seen when reading existing DynamoDB data [[GH-4721](https://github.com/hashicorp/vault/pull/4721)] + * secrets/database: Fix default MySQL root rotation statement [[GH-4748](https://github.com/hashicorp/vault/pull/4748)] + * secrets/gcp: Fix renewal for GCP account keys + * secrets/kv: Fix writing to the root of a KVv2 mount from `vault kv` commands + incorrectly operating on a root+mount path instead of being an error + [[GH-4726](https://github.com/hashicorp/vault/pull/4726)] + * seal/pkcs11: Add `CKK_SHA256_HMAC` to the search list when finding HMAC + keys, fixing lookup on some Thales devices + * replication: Fix issue enabling replication when a non-auth mount and auth + mount have the same name + * auth/kubernetes: Fix issue verifying ECDSA signed JWTs + * ui: add missing edit mode for auth method configs [[GH-4770](https://github.com/hashicorp/vault/pull/4770)] + +## 0.10.2 (June 6th, 2018) + +SECURITY: + + * Tokens: A race condition was identified that could occur if a token's + lease expired while Vault was not running. In this case, when Vault came + back online, sometimes it would properly revoke the lease but other times it + would not, leading to a Vault token that no longer had an expiration and had + essentially unlimited lifetime. This race was per-token, not all-or-nothing + for all tokens that may have expired during Vault's downtime. We have fixed + the behavior and put extra checks in place to help prevent any similar + future issues. In addition, the logic we have put in place ensures that such + lease-less tokens can no longer be used (unless they are root tokens that + never had an expiration to begin with). + * Convergent Encryption: The version 2 algorithm used in `transit`'s + convergent encryption feature is susceptible to offline + plaintext-confirmation attacks. As a result, we are introducing a version 3 + algorithm that mitigates this. If you are currently using convergent + encryption, we recommend upgrading, rotating your encryption key (the new + key version will use the new algorithm), and rewrapping your data (the + `rewrap` endpoint can be used to allow a relatively non-privileged user to + perform the rewrapping while never divulging the plaintext). + * AppRole case-sensitive role name secret-id leaking: When using a mixed-case + role name via AppRole, deleting a secret-id via accessor or other operations + could end up leaving the secret-id behind and valid but without an accessor. + This has now been fixed, and we have put checks in place to prevent these + secret-ids from being used. + +DEPRECATIONS/CHANGES: + + * PKI duration return types: The PKI backend now returns durations (e.g. when + reading a role) as an integer number of seconds instead of a Go-style + string, in line with how the rest of Vault's API returns durations. + +FEATURES: + + * Active Directory Secrets Engine: A new `ad` secrets engine has been created + which allows Vault to rotate and provide credentials for configured AD + accounts. + * Rekey Verification: Rekey operations can now require verification. This + turns on a two-phase process where the existing key shares authorize + generating a new master key, and a threshold of the new, returned key shares + must be provided to verify that they have been successfully received in + order for the actual master key to be rotated. + * CIDR restrictions for `cert`, `userpass`, and `kubernetes` auth methods: + You can now limit authentication to specific CIDRs; these will also be + encoded in resultant tokens to limit their use. + * Vault UI Browser CLI: The UI now supports usage of read/write/list/delete + commands in a CLI that can be accessed from the nav bar. Complex inputs such + as JSON files are not currently supported. This surfaces features otherwise + unsupported in Vault's UI. + * Azure Key Vault Auto Unseal/Seal Wrap Support (Enterprise): Azure Key Vault + can now be used a support seal for Auto Unseal and Seal Wrapping. + +IMPROVEMENTS: + + * api: Close renewer's doneCh when the renewer is stopped, so that programs + expecting a final value through doneCh behave correctly [[GH-4472](https://github.com/hashicorp/vault/pull/4472)] + * auth/cert: Break out `allowed_names` into component parts and add + `allowed_uri_sans` [[GH-4231](https://github.com/hashicorp/vault/pull/4231)] + * auth/ldap: Obfuscate error messages pre-bind for greater security [[GH-4700](https://github.com/hashicorp/vault/pull/4700)] + * cli: `vault login` now supports a `-no-print` flag to suppress printing + token information but still allow storing into the token helper [[GH-4454](https://github.com/hashicorp/vault/pull/4454)] + * core/pkcs11 (enterprise): Add support for CKM_AES_CBC_PAD, CKM_RSA_PKCS, and + CKM_RSA_PKCS_OAEP mechanisms + * core/pkcs11 (enterprise): HSM slots can now be selected by token label + instead of just slot number + * core/token: Optimize token revocation by removing unnecessary list call + against the storage backend when calling revoke-orphan on tokens [[GH-4465](https://github.com/hashicorp/vault/pull/4465)] + * core/token: Refactor token revocation logic to not block on the call when + underlying leases are pending revocation by moving the expiration logic to + the expiration manager [[GH-4512](https://github.com/hashicorp/vault/pull/4512)] + * expiration: Allow revoke-prefix and revoke-force to work on single leases as + well as prefixes [[GH-4450](https://github.com/hashicorp/vault/pull/4450)] + * identity: Return parent group info when reading a group [[GH-4648](https://github.com/hashicorp/vault/pull/4648)] + * identity: Provide more contextual key information when listing entities, + groups, and aliases + * identity: Passthrough EntityID to backends [[GH-4663](https://github.com/hashicorp/vault/pull/4663)] + * identity: Adds ability to request entity information through system view + [GH_4681] + * secret/pki: Add custom extended key usages [[GH-4667](https://github.com/hashicorp/vault/pull/4667)] + * secret/pki: Add custom PKIX serial numbers [[GH-4694](https://github.com/hashicorp/vault/pull/4694)] + * secret/ssh: Use hostname instead of IP in OTP mode, similar to CA mode + [[GH-4673](https://github.com/hashicorp/vault/pull/4673)] + * storage/file: Attempt in some error conditions to do more cleanup [[GH-4684](https://github.com/hashicorp/vault/pull/4684)] + * ui: wrapping lookup now distplays the path [[GH-4644](https://github.com/hashicorp/vault/pull/4644)] + * ui: Identity interface now has more inline actions to make editing and adding + aliases to an entity or group easier [[GH-4502](https://github.com/hashicorp/vault/pull/4502)] + * ui: Identity interface now lists groups by name [[GH-4655](https://github.com/hashicorp/vault/pull/4655)] + * ui: Permission denied errors still render the sidebar in the Access section + [[GH-4658](https://github.com/hashicorp/vault/pull/4658)] + * replication: Improve performance of index page flushes and WAL garbage + collecting + +BUG FIXES: + + * auth/approle: Make invalid role_id a 400 error instead of 500 [[GH-4470](https://github.com/hashicorp/vault/pull/4470)] + * auth/cert: Fix Identity alias using serial number instead of common name + [[GH-4475](https://github.com/hashicorp/vault/pull/4475)] + * cli: Fix panic running `vault token capabilities` with multiple paths + [[GH-4552](https://github.com/hashicorp/vault/pull/4552)] + * core: When using the `use_always` option with PROXY protocol support, do not + require `authorized_addrs` to be set [[GH-4065](https://github.com/hashicorp/vault/pull/4065)] + * core: Fix panic when certain combinations of policy paths and allowed/denied + parameters were used [[GH-4582](https://github.com/hashicorp/vault/pull/4582)] + * secret/gcp: Make `bound_region` able to use short names + * secret/kv: Fix response wrapping for KV v2 [[GH-4511](https://github.com/hashicorp/vault/pull/4511)] + * secret/kv: Fix address flag not being honored correctly [[GH-4617](https://github.com/hashicorp/vault/pull/4617)] + * secret/pki: Fix `safety_buffer` for tidy being allowed to be negative, + clearing all certs [[GH-4641](https://github.com/hashicorp/vault/pull/4641)] + * secret/pki: Fix `key_type` not being allowed to be set to `any` [[GH-4595](https://github.com/hashicorp/vault/pull/4595)] + * secret/pki: Fix path length parameter being ignored when using + `use_csr_values` and signing an intermediate CA cert [[GH-4459](https://github.com/hashicorp/vault/pull/4459)] + * secret/ssh: Only append UserKnownHostsFile to args when configured with a + value [[GH-4674](https://github.com/hashicorp/vault/pull/4674)] + * storage/dynamodb: Fix listing when one child is left within a nested path + [[GH-4570](https://github.com/hashicorp/vault/pull/4570)] + * storage/gcs: Fix swallowing an error on connection close [[GH-4691](https://github.com/hashicorp/vault/pull/4691)] + * ui: Fix HMAC algorithm in transit [[GH-4604](https://github.com/hashicorp/vault/pull/4604)] + * ui: Fix unwrap of auth responses via the UI's unwrap tool [[GH-4611](https://github.com/hashicorp/vault/pull/4611)] + * ui (enterprise): Fix parsing of version string that blocked some users from seeing + enterprise-specific pages in the UI [[GH-4547](https://github.com/hashicorp/vault/pull/4547)] + * ui: Fix incorrect capabilities path check when viewing policies [[GH-4566](https://github.com/hashicorp/vault/pull/4566)] + * replication: Fix error while running plugins on a newly created replication + secondary + * replication: Fix issue with token store lookups after a secondary's mount table + is invalidated. + * replication: Improve startup time when a large merkle index is in use. + * replication: Fix panic when storage becomes unreachable during unseal. + +## 0.10.1/0.9.7 (April 25th, 2018) + +The following two items are in both 0.9.7 and 0.10.1. They only affect +Enterprise, and as such 0.9.7 is an Enterprise-only release: + +SECURITY: + + * EGPs: A regression affecting 0.9.6 and 0.10.0 causes EGPs to not be applied + correctly if an EGP is updated in a running Vault after initial write or + after it is loaded on unseal. This has been fixed. + +BUG FIXES: + + * Fixed an upgrade issue affecting performance secondaries when migrating from + a version that did not include Identity to one that did. + +All other content in this release is for 0.10.1 only. + +DEPRECATIONS/CHANGES: + + * `vault kv` and Vault versions: In 0.10.1 some issues with `vault kv` against + v1 K/V engine mounts are fixed. However, using 0.10.1 for both the server + and CLI versions is required. + * Mount information visibility: Users that have access to any path within a + mount can now see information about that mount, such as its type and + options, via some API calls. + * Identity and Local Mounts: Local mounts would allow creating Identity + entities but these would not be able to be used successfully (even locally) + in replicated scenarios. We have now disallowed entities and groups from + being created for local mounts in the first place. + +FEATURES: + + * X-Forwarded-For support: `X-Forwarded-For` headers can now be used to set the + client IP seen by Vault. See the [TCP listener configuration + page](https://www.vaultproject.io/docs/configuration/listener/tcp.html) for + details. + * CIDR IP Binding for Tokens: Tokens now support being bound to specific + CIDR(s) for usage. Currently this is implemented in Token Roles; usage can be + expanded to other authentication backends over time. + * `vault kv patch` command: A new `kv patch` helper command that allows + modifying only some values in existing data at a K/V path, but uses + check-and-set to ensure that this modification happens safely. + * AppRole Local Secret IDs: Roles can now be configured to generate secret IDs + local to the cluster. This enables performance secondaries to generate and + consume secret IDs without contacting the primary. + * AES-GCM Support for PKCS#11 [BETA] (Enterprise): For supporting HSMs, + AES-GCM can now be used in lieu of AES-CBC/HMAC-SHA256. This has currently + only been fully tested on AWS CloudHSM. + * Auto Unseal/Seal Wrap Key Rotation Support (Enterprise): Auto Unseal + mechanisms, including PKCS#11 HSMs, now support rotation of encryption keys, + and migration between key and encryption types, such as from AES-CBC to + AES-GCM, can be performed at the same time (where supported). + +IMPROVEMENTS: + + * auth/approle: Support for cluster local secret IDs. This enables secondaries + to generate secret IDs without contacting the primary [[GH-4427](https://github.com/hashicorp/vault/pull/4427)] + * auth/token: Add to the token lookup response, the policies inherited due to + identity associations [[GH-4366](https://github.com/hashicorp/vault/pull/4366)] + * auth/token: Add CIDR binding to token roles [[GH-815](https://github.com/hashicorp/vault/pull/815)] + * cli: Add `vault kv patch` [[GH-4432](https://github.com/hashicorp/vault/pull/4432)] + * core: Add X-Forwarded-For support [[GH-4380](https://github.com/hashicorp/vault/pull/4380)] + * core: Add token CIDR-binding support [[GH-815](https://github.com/hashicorp/vault/pull/815)] + * identity: Add the ability to disable an entity. Disabling an entity does not + revoke associated tokens, but while the entity is disabled they cannot be + used. [[GH-4353](https://github.com/hashicorp/vault/pull/4353)] + * physical/consul: Allow tuning of session TTL and lock wait time [[GH-4352](https://github.com/hashicorp/vault/pull/4352)] + * replication: Dynamically adjust WAL cleanup over a period of time based on + the rate of writes committed + * secret/ssh: Update dynamic key install script to use shell locking to avoid + concurrent modifications [[GH-4358](https://github.com/hashicorp/vault/pull/4358)] + * ui: Access to `sys/mounts` is no longer needed to use the UI - the list of + engines will show you the ones you implicitly have access to (because you have + access to to secrets in those engines) [[GH-4439](https://github.com/hashicorp/vault/pull/4439)] + +BUG FIXES: + + * cli: Fix `vault kv` backwards compatibility with KV v1 engine mounts + [[GH-4430](https://github.com/hashicorp/vault/pull/4430)] + * identity: Persist entity memberships in external identity groups across + mounts [[GH-4365](https://github.com/hashicorp/vault/pull/4365)] + * identity: Fix error preventing authentication using local mounts on + performance secondary replication clusters [[GH-4407](https://github.com/hashicorp/vault/pull/4407)] + * replication: Fix issue causing secondaries to not connect properly to a + pre-0.10 primary until the primary was upgraded + * secret/gcp: Fix panic on rollback when a roleset wasn't created properly + [[GH-4344](https://github.com/hashicorp/vault/pull/4344)] + * secret/gcp: Fix panic on renewal + * ui: Fix IE11 form submissions in a few parts of the application [[GH-4378](https://github.com/hashicorp/vault/pull/4378)] + * ui: Fix IE file saving on policy pages and init screens [[GH-4376](https://github.com/hashicorp/vault/pull/4376)] + * ui: Fixed an issue where the AWS secret backend would show the wrong menu + [[GH-4371](https://github.com/hashicorp/vault/pull/4371)] + * ui: Fixed an issue where policies with commas would not render in the + interface properly [[GH-4398](https://github.com/hashicorp/vault/pull/4398)] + * ui: Corrected the saving of mount tune ttls for auth methods [[GH-4431](https://github.com/hashicorp/vault/pull/4431)] + * ui: Credentials generation no longer checks capabilities before making + api calls. This should fix needing "update" capabilites to read IAM + credentials in the AWS secrets engine [[GH-4446](https://github.com/hashicorp/vault/pull/4446)] + +## 0.10.0 (April 10th, 2018) + +SECURITY: + + * Log sanitization for Combined Database Secret Engine: In certain failure + scenarios with incorrectly formatted connection urls, the raw connection + errors were being returned to the user with the configured database + credentials. Errors are now sanitized before being returned to the user. + +DEPRECATIONS/CHANGES: + + * Database plugin compatibility: The database plugin interface was enhanced to + support some additional functionality related to root credential rotation + and supporting templated URL strings. The changes were made in a + backwards-compatible way and all builtin plugins were updated with the new + features. Custom plugins not built into Vault will need to be upgraded to + support templated URL strings and root rotation. Additionally, the + Initialize method was deprecated in favor of a new Init method that supports + configuration modifications that occur in the plugin back to the primary + data store. + * Removal of returned secret information: For a long time Vault has returned + configuration given to various secret engines and auth methods with secret + values (such as secret API keys or passwords) still intact, and with a + warning to the user on write that anyone with read access could see the + secret. This was mostly done to make it easy for tools like Terraform to + judge whether state had drifted. However, it also feels quite un-Vault-y to + do this and we've never felt very comfortable doing so. In 0.10 we have gone + through and removed this behavior from the various backends; fields which + contained secret values are simply no longer returned on read. We are + working with the Terraform team to make changes to their provider to + accommodate this as best as possible, and users of other tools may have to + make adjustments, but in the end we felt that the ends did not justify the + means and we needed to prioritize security over operational convenience. + * LDAP auth method case sensitivity: We now treat usernames and groups + configured locally for policy assignment in a case insensitive fashion by + default. Existing configurations will continue to work as they do now; + however, the next time a configuration is written `case_sensitive_names` + will need to be explicitly set to `true`. + * TTL handling within core: All lease TTL handling has been centralized within + the core of Vault to ensure consistency across all backends. Since this was + previously delegated to individual backends, there may be some slight + differences in TTLs generated from some backends. + * Removal of default `secret/` mount: In 0.12 we will stop mounting `secret/` + by default at initialization time (it will still be available in `dev` + mode). + +FEATURES: + + * OSS UI: The Vault UI is now fully open-source. Similarly to the CLI, some + features are only available with a supporting version of Vault, but the code + base is entirely open. + * Versioned K/V: The `kv` backend has been completely revamped, featuring + flexible versioning of values, check-and-set protections, and more. A new + `vault kv` subcommand allows friendly interactions with it. Existing mounts + of the `kv` backend can be upgraded to the new versioned mode (downgrades + are not currently supported). The old "passthrough" mode is still the + default for new mounts; versioning can be turned on by setting the + `-version=2` flag for the `vault secrets enable` command. + * Database Root Credential Rotation: Database configurations can now rotate + their own configured admin/root credentials, allowing configured credentials + for a database connection to be rotated immediately after sending them into + Vault, invalidating the old credentials and ensuring only Vault knows the + actual valid values. + * Azure Authentication Plugin: There is now a plugin (pulled in to Vault) that + allows authenticating Azure machines to Vault using Azure's Managed Service + Identity credentials. See the [plugin + repository](https://github.com/hashicorp/vault-plugin-auth-azure) for more + information. + * GCP Secrets Plugin: There is now a plugin (pulled in to Vault) that allows + generating secrets to allow access to GCP. See the [plugin + repository](https://github.com/hashicorp/vault-plugin-secrets-gcp) for more + information. + * Selective Audit HMACing of Request and Response Data Keys: HMACing in audit + logs can be turned off for specific keys in the request input map and + response `data` map on a per-mount basis. + * Passthrough Request Headers: Request headers can now be selectively passed + through to backends on a per-mount basis. This is useful in various cases + when plugins are interacting with external services. + * HA for Google Cloud Storage: The GCS storage type now supports HA. + * UI support for identity: Add and edit entities, groups, and their associated + aliases. + * UI auth method support: Enable, disable, and configure all of the built-in + authentication methods. + * UI (Enterprise): View and edit Sentinel policies. + +IMPROVEMENTS: + + * core: Centralize TTL generation for leases in core [[GH-4230](https://github.com/hashicorp/vault/pull/4230)] + * identity: API to update group-alias by ID [[GH-4237](https://github.com/hashicorp/vault/pull/4237)] + * secret/cassandra: Update Cassandra storage delete function to not use batch + operations [[GH-4054](https://github.com/hashicorp/vault/pull/4054)] + * storage/mysql: Allow setting max idle connections and connection lifetime + [[GH-4211](https://github.com/hashicorp/vault/pull/4211)] + * storage/gcs: Add HA support [[GH-4226](https://github.com/hashicorp/vault/pull/4226)] + * ui: Add Nomad to the list of available secret engines + * ui: Adds ability to set static headers to be returned by the UI + +BUG FIXES: + + * api: Fix retries not working [[GH-4322](https://github.com/hashicorp/vault/pull/4322)] + * auth/gcp: Invalidate clients on config change + * auth/token: Revoke-orphan and tidy operations now correctly cleans up the + parent prefix entry in the underlying storage backend. These operations also + mark corresponding child tokens as orphans by removing the parent/secondary + index from the entries. [[GH-4193](https://github.com/hashicorp/vault/pull/4193)] + * command: Re-add `-mfa` flag and migrate to OSS binary [[GH-4223](https://github.com/hashicorp/vault/pull/4223)] + * core: Fix issue occurring from mounting two auth backends with the same path + with one mount having `auth/` in front [[GH-4206](https://github.com/hashicorp/vault/pull/4206)] + * mfa: Invalidation of MFA configurations (Enterprise) + * replication: Fix a panic on some non-64-bit platforms + * replication: Fix invalidation of policies on performance secondaries + * secret/pki: When tidying if a value is unexpectedly nil, delete it and move + on [[GH-4214](https://github.com/hashicorp/vault/pull/4214)] + * storage/s3: Fix panic if S3 returns no Content-Length header [[GH-4222](https://github.com/hashicorp/vault/pull/4222)] + * ui: Fixed an issue where the UI was checking incorrect paths when operating + on transit keys. Capabilities are now checked when attempting to encrypt / + decrypt, etc. + * ui: Fixed IE 11 layout issues and JS errors that would stop the application + from running. + * ui: Fixed the link that gets rendered when a user doesn't have permissions + to view the root of a secret engine. The link now sends them back to the list + of secret engines. + * replication: Fix issue with DR secondaries when using mount specified local + paths. + * cli: Fix an issue where generating a dr operation token would not output the + token [[GH-4328](https://github.com/hashicorp/vault/pull/4328)] + +## 0.9.6 (March 20th, 2018) + +DEPRECATIONS/CHANGES: + + * The AWS authentication backend now allows binds for inputs as either a + comma-delimited string or a string array. However, to keep consistency with + input and output, when reading a role the binds will now be returned as + string arrays rather than strings. + * In order to prefix-match IAM role and instance profile ARNs in AWS auth + backend, you now must explicitly opt-in by adding a `*` to the end of the + ARN. Existing configurations will be upgraded automatically, but when + writing a new role configuration the updated behavior will be used. + +FEATURES: + + * Replication Activation Enhancements: When activating a replication + secondary, a public key can now be fetched first from the target cluster. + This public key can be provided to the primary when requesting the + activation token. If provided, the public key will be used to perform a + Diffie-Hellman key exchange resulting in a shared key that encrypts the + contents of the activation token. The purpose is to protect against + accidental disclosure of the contents of the token if unwrapped by the wrong + party, given that the contents of the token are highly sensitive. If + accidentally unwrapped, the contents of the token are not usable by the + unwrapping party. It is important to note that just as a malicious operator + could unwrap the contents of the token, a malicious operator can pretend to + be a secondary and complete the Diffie-Hellman exchange on their own; this + feature provides defense in depth but still requires due diligence around + replication activation, including multiple eyes on the commands/tokens and + proper auditing. + +IMPROVEMENTS: + + * api: Update renewer grace period logic. It no longer is static, but rather + dynamically calculates one based on the current lease duration after each + renew. [[GH-4090](https://github.com/hashicorp/vault/pull/4090)] + * auth/approle: Allow array input for bound_cidr_list [4078] + * auth/aws: Allow using lists in role bind parameters [[GH-3907](https://github.com/hashicorp/vault/pull/3907)] + * auth/aws: Allow binding by EC2 instance IDs [[GH-3816](https://github.com/hashicorp/vault/pull/3816)] + * auth/aws: Allow non-prefix-matched IAM role and instance profile ARNs + [[GH-4071](https://github.com/hashicorp/vault/pull/4071)] + * auth/ldap: Set a very large size limit on queries [[GH-4169](https://github.com/hashicorp/vault/pull/4169)] + * core: Log info notifications of revoked leases for all leases/reasons, not + just expirations [[GH-4164](https://github.com/hashicorp/vault/pull/4164)] + * physical/couchdb: Removed limit on the listing of items [[GH-4149](https://github.com/hashicorp/vault/pull/4149)] + * secret/pki: Support certificate policies [[GH-4125](https://github.com/hashicorp/vault/pull/4125)] + * secret/pki: Add ability to have CA:true encoded into intermediate CSRs, to + improve compatibility with some ADFS scenarios [[GH-3883](https://github.com/hashicorp/vault/pull/3883)] + * secret/transit: Allow selecting signature algorithm as well as hash + algorithm when signing/verifying [[GH-4018](https://github.com/hashicorp/vault/pull/4018)] + * server: Make sure `tls_disable_client_cert` is actually a true value rather + than just set [[GH-4049](https://github.com/hashicorp/vault/pull/4049)] + * storage/dynamodb: Allow specifying max retries for dynamo client [[GH-4115](https://github.com/hashicorp/vault/pull/4115)] + * storage/gcs: Allow specifying chunk size for transfers, which can reduce + memory utilization [[GH-4060](https://github.com/hashicorp/vault/pull/4060)] + * sys/capabilities: Add the ability to use multiple paths for capability + checking [[GH-3663](https://github.com/hashicorp/vault/pull/3663)] + +BUG FIXES: + + * auth/aws: Fix honoring `max_ttl` when a corresponding role `ttl` is not also + set [[GH-4107](https://github.com/hashicorp/vault/pull/4107)] + * auth/okta: Fix honoring configured `max_ttl` value [[GH-4110](https://github.com/hashicorp/vault/pull/4110)] + * auth/token: If a periodic token being issued has a period greater than the + max_lease_ttl configured on the token store mount, truncate it. This matches + renewal behavior; before it was inconsistent between issuance and renewal. + [[GH-4112](https://github.com/hashicorp/vault/pull/4112)] + * cli: Improve error messages around `vault auth help` when there is no CLI + helper for a particular method [[GH-4056](https://github.com/hashicorp/vault/pull/4056)] + * cli: Fix autocomplete installation when using Fish as the shell [[GH-4094](https://github.com/hashicorp/vault/pull/4094)] + * secret/database: Properly honor mount-tuned max TTL [[GH-4051](https://github.com/hashicorp/vault/pull/4051)] + * secret/ssh: Return `key_bits` value when reading a role [[GH-4098](https://github.com/hashicorp/vault/pull/4098)] + * sys: When writing policies on a performance replication secondary, properly + forward requests to the primary [[GH-4129](https://github.com/hashicorp/vault/pull/4129)] + +## 0.9.5 (February 26th, 2018) + +IMPROVEMENTS: + + * auth: Allow sending default_lease_ttl and max_lease_ttl values when enabling + auth methods. [[GH-4019](https://github.com/hashicorp/vault/pull/4019)] + * secret/database: Add list functionality to `database/config` endpoint + [[GH-4026](https://github.com/hashicorp/vault/pull/4026)] + * physical/consul: Allow setting a specific service address [[GH-3971](https://github.com/hashicorp/vault/pull/3971)] + * replication: When bootstrapping a new secondary, if the initial cluster + connection fails, Vault will attempt to roll back state so that + bootstrapping can be tried again, rather than having to recreate the + downstream cluster. This will still require fetching a new secondary + activation token. + +BUG FIXES: + + * auth/aws: Update libraries to fix regression verifying PKCS#7 identity + documents [[GH-4014](https://github.com/hashicorp/vault/pull/4014)] + * listener: Revert to Go 1.9 for now to allow certificates with non-DNS names + in their DNS SANs to be used for Vault's TLS connections [[GH-4028](https://github.com/hashicorp/vault/pull/4028)] + * replication: Fix issue with a performance secondary/DR primary node losing + its DR primary status when performing an update-primary operation + * replication: Fix issue where performance secondaries could be unable to + automatically connect to a performance primary after that performance + primary has been promoted to a DR primary from a DR secondary + * ui: Fix behavior when a value contains a `.` + +## 0.9.4 (February 20th, 2018) + +SECURITY: + + * Role Tags used with the EC2 style of AWS auth were being improperly parsed; + as a result they were not being used to properly restrict values. + Implementations following our suggestion of using these as defense-in-depth + rather than the only source of restriction should not have significant + impact. + +FEATURES: + + * **ChaCha20-Poly1305 support in `transit`**: You can now encrypt and decrypt + with ChaCha20-Poly1305 in `transit`. Key derivation and convergent + encryption is also supported. + * **Okta Push support in Okta Auth Backend**: If a user account has MFA + required within Okta, an Okta Push MFA flow can be used to successfully + finish authentication. + * **PKI Improvements**: Custom OID subject alternate names can now be set, + subject to allow restrictions that support globbing. Additionally, Country, + Locality, Province, Street Address, and Postal Code can now be set in + certificate subjects. + * **Manta Storage**: Joyent Triton Manta can now be used for Vault storage + * **Google Cloud Spanner Storage**: Google Cloud Spanner can now be used for + Vault storage + +IMPROVEMENTS: + + * auth/centrify: Add CLI helper + * audit: Always log failure metrics, even if zero, to ensure the values appear + on dashboards [[GH-3937](https://github.com/hashicorp/vault/pull/3937)] + * cli: Disable color when output is not a TTY [[GH-3897](https://github.com/hashicorp/vault/pull/3897)] + * cli: Add `-format` flag to all subcommands [[GH-3897](https://github.com/hashicorp/vault/pull/3897)] + * cli: Do not display deprecation warnings when the format is not table + [[GH-3897](https://github.com/hashicorp/vault/pull/3897)] + * core: If over a predefined lease count (256k), log a warning not more than + once a minute. Too many leases can be problematic for many of the storage + backends and often this number of leases is indicative of a need for + workflow improvements. [[GH-3957](https://github.com/hashicorp/vault/pull/3957)] + * secret/nomad: Have generated ACL tokens cap out at 64 characters [[GH-4009](https://github.com/hashicorp/vault/pull/4009)] + * secret/pki: Country, Locality, Province, Street Address, and Postal Code can + now be set on certificates [[GH-3992](https://github.com/hashicorp/vault/pull/3992)] + * secret/pki: UTF-8 Other Names can now be set in Subject Alternate Names in + issued certs; allowed values can be set per role and support globbing + [[GH-3889](https://github.com/hashicorp/vault/pull/3889)] + * secret/pki: Add a flag to make the common name optional on certs [[GH-3940](https://github.com/hashicorp/vault/pull/3940)] + * secret/pki: Ensure only DNS-compatible names go into DNS SANs; additionally, + properly handle IDNA transformations for these DNS names [[GH-3953](https://github.com/hashicorp/vault/pull/3953)] + * secret/ssh: Add `valid-principles` flag to CLI for CA mode [[GH-3922](https://github.com/hashicorp/vault/pull/3922)] + * storage/manta: Add Manta storage [[GH-3270](https://github.com/hashicorp/vault/pull/3270)] + * ui (Enterprise): Support for ChaCha20-Poly1305 keys in the transit engine. + +BUG FIXES: + * api/renewer: Honor increment value in renew auth calls [[GH-3904](https://github.com/hashicorp/vault/pull/3904)] + * auth/approle: Fix inability to use limited-use-count secret IDs on + replication performance secondaries + * auth/approle: Cleanup of secret ID accessors during tidy and removal of + dangling accessor entries [[GH-3924](https://github.com/hashicorp/vault/pull/3924)] + * auth/aws-ec2: Avoid masking of role tag response [[GH-3941](https://github.com/hashicorp/vault/pull/3941)] + * auth/cert: Verify DNS SANs in the authenticating certificate [[GH-3982](https://github.com/hashicorp/vault/pull/3982)] + * auth/okta: Return configured durations as seconds, not nanoseconds [[GH-3871](https://github.com/hashicorp/vault/pull/3871)] + * auth/okta: Get all okta groups for a user vs. default 200 limit [[GH-4034](https://github.com/hashicorp/vault/pull/4034)] + * auth/token: Token creation via the CLI no longer forces periodic token + creation. Passing an explicit zero value for the period no longer create + periodic tokens. [[GH-3880](https://github.com/hashicorp/vault/pull/3880)] + * command: Fix interpreted formatting directives when printing raw fields + [[GH-4005](https://github.com/hashicorp/vault/pull/4005)] + * command: Correctly format output when using -field and -format flags at the + same time [[GH-3987](https://github.com/hashicorp/vault/pull/3987)] + * command/rekey: Re-add lost `stored-shares` parameter [[GH-3974](https://github.com/hashicorp/vault/pull/3974)] + * command/ssh: Create and reuse the api client [[GH-3909](https://github.com/hashicorp/vault/pull/3909)] + * command/status: Fix panic when status returns 500 from leadership lookup + [[GH-3998](https://github.com/hashicorp/vault/pull/3998)] + * identity: Fix race when creating entities [[GH-3932](https://github.com/hashicorp/vault/pull/3932)] + * plugin/gRPC: Fixed an issue with list requests and raw responses coming from + plugins using gRPC transport [[GH-3881](https://github.com/hashicorp/vault/pull/3881)] + * plugin/gRPC: Fix panic when special paths are not set [[GH-3946](https://github.com/hashicorp/vault/pull/3946)] + * secret/pki: Verify a name is a valid hostname before adding to DNS SANs + [[GH-3918](https://github.com/hashicorp/vault/pull/3918)] + * secret/transit: Fix auditing when reading a key after it has been backed up + or restored [[GH-3919](https://github.com/hashicorp/vault/pull/3919)] + * secret/transit: Fix storage/memory consistency when persistence fails + [[GH-3959](https://github.com/hashicorp/vault/pull/3959)] + * storage/consul: Validate that service names are RFC 1123 compliant [[GH-3960](https://github.com/hashicorp/vault/pull/3960)] + * storage/etcd3: Fix memory ballooning with standby instances [[GH-3798](https://github.com/hashicorp/vault/pull/3798)] + * storage/etcd3: Fix large lists (like token loading at startup) not being + handled [[GH-3772](https://github.com/hashicorp/vault/pull/3772)] + * storage/postgresql: Fix compatibility with versions using custom string + version tags [[GH-3949](https://github.com/hashicorp/vault/pull/3949)] + * storage/zookeeper: Update vendoring to fix freezing issues [[GH-3896](https://github.com/hashicorp/vault/pull/3896)] + * ui (Enterprise): Decoding the replication token should no longer error and + prevent enabling of a secondary replication cluster via the ui. + * plugin/gRPC: Add connection info to the request object [[GH-3997](https://github.com/hashicorp/vault/pull/3997)] + +## 0.9.3 (January 28th, 2018) + +A regression from a feature merge disabled the Nomad secrets backend in 0.9.2. +This release re-enables the Nomad secrets backend; it is otherwise identical to +0.9.2. + +## 0.9.2 (January 26th, 2018) + +SECURITY: + + * Okta Auth Backend: While the Okta auth backend was successfully verifying + usernames and passwords, it was not checking the returned state of the + account, so accounts that had been marked locked out could still be used to + log in. Only accounts in SUCCESS or PASSWORD_WARN states are now allowed. + * Periodic Tokens: A regression in 0.9.1 meant that periodic tokens created by + the AppRole, AWS, and Cert auth backends would expire when the max TTL for + the backend/mount/system was hit instead of their stated behavior of living + as long as they are renewed. This is now fixed; existing tokens do not have + to be reissued as this was purely a regression in the renewal logic. + * Seal Wrapping: During certain replication states values written marked for + seal wrapping may not be wrapped on the secondaries. This has been fixed, + and existing values will be wrapped on next read or write. This does not + affect the barrier keys. + +DEPRECATIONS/CHANGES: + + * `sys/health` DR Secondary Reporting: The `replication_dr_secondary` bool + returned by `sys/health` could be misleading since it would be `false` both + when a cluster was not a DR secondary but also when the node is a standby in + the cluster and has not yet fully received state from the active node. This + could cause health checks on LBs to decide that the node was acceptable for + traffic even though DR secondaries cannot handle normal Vault traffic. (In + other words, the bool could only convey "yes" or "no" but not "not sure + yet".) This has been replaced by `replication_dr_mode` and + `replication_perf_mode` which are string values that convey the current + state of the node; a value of `disabled` indicates that replication is + disabled or the state is still being discovered. As a result, an LB check + can positively verify that the node is both not `disabled` and is not a DR + secondary, and avoid sending traffic to it if either is true. + * PKI Secret Backend Roles parameter types: For `ou` and `organization` + in role definitions in the PKI secret backend, input can now be a + comma-separated string or an array of strings. Reading a role will + now return arrays for these parameters. + * Plugin API Changes: The plugin API has been updated to utilize golang's + context.Context package. Many function signatures now accept a context + object as the first parameter. Existing plugins will need to pull in the + latest Vault code and update their function signatures to begin using + context and the new gRPC transport. + +FEATURES: + + * **gRPC Backend Plugins**: Backend plugins now use gRPC for transport, + allowing them to be written in other languages. + * **Brand New CLI**: Vault has a brand new CLI interface that is significantly + streamlined, supports autocomplete, and is almost entirely backwards + compatible. + * **UI: PKI Secret Backend (Enterprise)**: Configure PKI secret backends, + create and browse roles and certificates, and issue and sign certificates via + the listed roles. + +IMPROVEMENTS: + + * auth/aws: Handle IAM headers produced by clients that formulate numbers as + ints rather than strings [[GH-3763](https://github.com/hashicorp/vault/pull/3763)] + * auth/okta: Support JSON lists when specifying groups and policies [[GH-3801](https://github.com/hashicorp/vault/pull/3801)] + * autoseal/hsm: Attempt reconnecting to the HSM on certain kinds of issues, + including HA scenarios for some Gemalto HSMs. + (Enterprise) + * cli: Output password prompts to stderr to make it easier to pipe an output + token to another command [[GH-3782](https://github.com/hashicorp/vault/pull/3782)] + * core: Report replication status in `sys/health` [[GH-3810](https://github.com/hashicorp/vault/pull/3810)] + * physical/s3: Allow using paths with S3 for non-AWS deployments [[GH-3730](https://github.com/hashicorp/vault/pull/3730)] + * physical/s3: Add ability to disable SSL for non-AWS deployments [[GH-3730](https://github.com/hashicorp/vault/pull/3730)] + * plugins: Args for plugins can now be specified separately from the command, + allowing the same output format and input format for plugin information + [[GH-3778](https://github.com/hashicorp/vault/pull/3778)] + * secret/pki: `ou` and `organization` can now be specified as a + comma-separated string or an array of strings [[GH-3804](https://github.com/hashicorp/vault/pull/3804)] + * plugins: Plugins will fall back to using netrpc as the communication protocol + on older versions of Vault [[GH-3833](https://github.com/hashicorp/vault/pull/3833)] + +BUG FIXES: + + * auth/(approle,aws,cert): Fix behavior where periodic tokens generated by + these backends could not have their TTL renewed beyond the system/mount max + TTL value [[GH-3803](https://github.com/hashicorp/vault/pull/3803)] + * auth/aws: Fix error returned if `bound_iam_principal_arn` was given to an + existing role update [[GH-3843](https://github.com/hashicorp/vault/pull/3843)] + * core/sealwrap: Speed improvements and bug fixes (Enterprise) + * identity: Delete group alias when an external group is deleted [[GH-3773](https://github.com/hashicorp/vault/pull/3773)] + * legacymfa/duo: Fix intermittent panic when Duo could not be reached + [[GH-2030](https://github.com/hashicorp/vault/pull/2030)] + * secret/database: Fix a location where a lock could potentially not be + released, leading to deadlock [[GH-3774](https://github.com/hashicorp/vault/pull/3774)] + * secret/(all databases) Fix behavior where if a max TTL was specified but no + default TTL was specified the system/mount default TTL would be used but not + be capped by the local max TTL [[GH-3814](https://github.com/hashicorp/vault/pull/3814)] + * secret/database: Fix an issue where plugins were not closed properly if they + failed to initialize [[GH-3768](https://github.com/hashicorp/vault/pull/3768)] + * ui: mounting a secret backend will now properly set `max_lease_ttl` and + `default_lease_ttl` when specified - previously both fields set + `default_lease_ttl`. + +## 0.9.1 (December 21st, 2017) + +DEPRECATIONS/CHANGES: + + * AppRole Case Sensitivity: In prior versions of Vault, `list` operations + against AppRole roles would require preserving case in the role name, even + though most other operations within AppRole are case-insensitive with + respect to the role name. This has been fixed; existing roles will behave as + they have in the past, but new roles will act case-insensitively in these + cases. + * Token Auth Backend Roles parameter types: For `allowed_policies` and + `disallowed_policies` in role definitions in the token auth backend, input + can now be a comma-separated string or an array of strings. Reading a role + will now return arrays for these parameters. + * Transit key exporting: You can now mark a key in the `transit` backend as + `exportable` at any time, rather than just at creation time; however, once + this value is set, it still cannot be unset. + * PKI Secret Backend Roles parameter types: For `allowed_domains` and + `key_usage` in role definitions in the PKI secret backend, input + can now be a comma-separated string or an array of strings. Reading a role + will now return arrays for these parameters. + * SSH Dynamic Keys Method Defaults to 2048-bit Keys: When using the dynamic + key method in the SSH backend, the default is now to use 2048-bit keys if no + specific key bit size is specified. + * Consul Secret Backend lease handling: The `consul` secret backend can now + accept both strings and integer numbers of seconds for its lease value. The + value returned on a role read will be an integer number of seconds instead + of a human-friendly string. + * Unprintable characters not allowed in API paths: Unprintable characters are + no longer allowed in names in the API (paths and path parameters), with an + extra restriction on whitespace characters. Allowed characters are those + that are considered printable by Unicode plus spaces. + +FEATURES: + + * **Transit Backup/Restore**: The `transit` backend now supports a backup + operation that can export a given key, including all key versions and + configuration, as well as a restore operation allowing import into another + Vault. + * **gRPC Database Plugins**: Database plugins now use gRPC for transport, + allowing them to be written in other languages. + * **Nomad Secret Backend**: Nomad ACL tokens can now be generated and revoked + using Vault. + * **TLS Cert Auth Backend Improvements**: The `cert` auth backend can now + match against custom certificate extensions via exact or glob matching, and + additionally supports max_ttl and periodic token toggles. + +IMPROVEMENTS: + + * auth/cert: Support custom certificate constraints [[GH-3634](https://github.com/hashicorp/vault/pull/3634)] + * auth/cert: Support setting `max_ttl` and `period` [[GH-3642](https://github.com/hashicorp/vault/pull/3642)] + * audit/file: Setting a file mode of `0000` will now disable Vault from + automatically `chmod`ing the log file [[GH-3649](https://github.com/hashicorp/vault/pull/3649)] + * auth/github: The legacy MFA system can now be used with the GitHub auth + backend [[GH-3696](https://github.com/hashicorp/vault/pull/3696)] + * auth/okta: The legacy MFA system can now be used with the Okta auth backend + [[GH-3653](https://github.com/hashicorp/vault/pull/3653)] + * auth/token: `allowed_policies` and `disallowed_policies` can now be specified + as a comma-separated string or an array of strings [[GH-3641](https://github.com/hashicorp/vault/pull/3641)] + * command/server: The log level can now be specified with `VAULT_LOG_LEVEL` + [[GH-3721](https://github.com/hashicorp/vault/pull/3721)] + * core: Period values from auth backends will now be checked and applied to the + TTL value directly by core on login and renewal requests [[GH-3677](https://github.com/hashicorp/vault/pull/3677)] + * database/mongodb: Add optional `write_concern` parameter, which can be set + during database configuration. This establishes a session-wide [write + concern](https://docs.mongodb.com/manual/reference/write-concern/) for the + lifecycle of the mount [[GH-3646](https://github.com/hashicorp/vault/pull/3646)] + * http: Request path containing non-printable characters will return 400 - Bad + Request [[GH-3697](https://github.com/hashicorp/vault/pull/3697)] + * mfa/okta: Filter a given email address as a login filter, allowing operation + when login email and account email are different + * plugins: Make Vault more resilient when unsealing when plugins are + unavailable [[GH-3686](https://github.com/hashicorp/vault/pull/3686)] + * secret/pki: `allowed_domains` and `key_usage` can now be specified + as a comma-separated string or an array of strings [[GH-3642](https://github.com/hashicorp/vault/pull/3642)] + * secret/ssh: Allow 4096-bit keys to be used in dynamic key method [[GH-3593](https://github.com/hashicorp/vault/pull/3593)] + * secret/consul: The Consul secret backend now uses the value of `lease` set + on the role, if set, when renewing a secret. [[GH-3796](https://github.com/hashicorp/vault/pull/3796)] + * storage/mysql: Don't attempt database creation if it exists, which can help + under certain permissions constraints [[GH-3716](https://github.com/hashicorp/vault/pull/3716)] + +BUG FIXES: + + * api/status (enterprise): Fix status reporting when using an auto seal + * auth/approle: Fix case-sensitive/insensitive comparison issue [[GH-3665](https://github.com/hashicorp/vault/pull/3665)] + * auth/cert: Return `allowed_names` on role read [[GH-3654](https://github.com/hashicorp/vault/pull/3654)] + * auth/ldap: Fix incorrect control information being sent [[GH-3402](https://github.com/hashicorp/vault/pull/3402)] [[GH-3496](https://github.com/hashicorp/vault/pull/3496)] + [[GH-3625](https://github.com/hashicorp/vault/pull/3625)] [[GH-3656](https://github.com/hashicorp/vault/pull/3656)] + * core: Fix seal status reporting when using an autoseal + * core: Add creation path to wrap info for a control group token + * core: Fix potential panic that could occur using plugins when a node + transitioned from active to standby [[GH-3638](https://github.com/hashicorp/vault/pull/3638)] + * core: Fix memory ballooning when a connection would connect to the cluster + port and then go away -- redux! [[GH-3680](https://github.com/hashicorp/vault/pull/3680)] + * core: Replace recursive token revocation logic with depth-first logic, which + can avoid hitting stack depth limits in extreme cases [[GH-2348](https://github.com/hashicorp/vault/pull/2348)] + * core: When doing a read on configured audited-headers, properly handle case + insensitivity [[GH-3701](https://github.com/hashicorp/vault/pull/3701)] + * core/pkcs11 (enterprise): Fix panic when PKCS#11 library is not readable + * database/mysql: Allow the creation statement to use commands that are not yet + supported by the prepare statement protocol [[GH-3619](https://github.com/hashicorp/vault/pull/3619)] + * plugin/auth-gcp: Fix IAM roles when using `allow_gce_inference` [VPAG-19] + +## 0.9.0.1 (November 21st, 2017) (Enterprise Only) + +IMPROVEMENTS: + + * auth/gcp: Support seal wrapping of configuration parameters + * auth/kubernetes: Support seal wrapping of configuration parameters + +BUG FIXES: + + * Fix an upgrade issue with some physical backends when migrating from legacy + HSM stored key support to the new Seal Wrap mechanism (Enterprise) + * mfa: Add the 'mfa' flag that was removed by mistake [[GH-4223](https://github.com/hashicorp/vault/pull/4223)] + +## 0.9.0 (November 14th, 2017) + +DEPRECATIONS/CHANGES: + + * HSM config parameter requirements: When using Vault with an HSM, a new + parameter is required: `hmac_key_label`. This performs a similar function to + `key_label` but for the HMAC key Vault will use. Vault will generate a + suitable key if this value is specified and `generate_key` is set true. + * API HTTP client behavior: When calling `NewClient` the API no longer + modifies the provided client/transport. In particular this means it will no + longer enable redirection limiting and HTTP/2 support on custom clients. It + is suggested that if you want to make changes to an HTTP client that you use + one created by `DefaultConfig` as a starting point. + * AWS EC2 client nonce behavior: The client nonce generated by the backend + that gets returned along with the authentication response will be audited in + plaintext. If this is undesired, the clients can choose to supply a custom + nonce to the login endpoint. The custom nonce set by the client will from + now on, not be returned back with the authentication response, and hence not + audit logged. + * AWS Auth role options: The API will now error when trying to create or + update a role with the mutually-exclusive options + `disallow_reauthentication` and `allow_instance_migration`. + * SSH CA role read changes: When reading back a role from the `ssh` backend, + the TTL/max TTL values will now be an integer number of seconds rather than + a string. This better matches the API elsewhere in Vault. + * SSH role list changes: When listing roles from the `ssh` backend via the API, + the response data will additionally return a `key_info` map that will contain + a map of each key with a corresponding object containing the `key_type`. + * More granularity in audit logs: Audit request and response entries are still + in RFC3339 format but now have a granularity of nanoseconds. + * High availability related values have been moved out of the `storage` and + `ha_storage` stanzas, and into the top-level configuration. `redirect_addr` + has been renamed to `api_addr`. The stanzas still support accepting + HA-related values to maintain backward compatibility, but top-level values + will take precedence. + * A new `seal` stanza has been added to the configuration file, which is + optional and enables configuration of the seal type to use for additional + data protection, such as using HSM or Cloud KMS solutions to encrypt and + decrypt data. + +FEATURES: + + * **RSA Support for Transit Backend**: Transit backend can now generate RSA + keys which can be used for encryption and signing. [[GH-3489](https://github.com/hashicorp/vault/pull/3489)] + * **Identity System**: Now in open source and with significant enhancements, + Identity is an integrated system for understanding users across tokens and + enabling easier management of users directly and via groups. + * **External Groups in Identity**: Vault can now automatically assign users + and systems to groups in Identity based on their membership in external + groups. + * **Seal Wrap / FIPS 140-2 Compatibility (Enterprise)**: Vault can now take + advantage of FIPS 140-2-certified HSMs to ensure that Critical Security + Parameters are protected in a compliant fashion. Vault's implementation has + received a statement of compliance from Leidos. + * **Control Groups (Enterprise)**: Require multiple members of an Identity + group to authorize a requested action before it is allowed to run. + * **Cloud Auto-Unseal (Enterprise)**: Automatically unseal Vault using AWS KMS + and GCP CKMS. + * **Sentinel Integration (Enterprise)**: Take advantage of HashiCorp Sentinel + to create extremely flexible access control policies -- even on + unauthenticated endpoints. + * **Barrier Rekey Support for Auto-Unseal (Enterprise)**: When using auto-unsealing + functionality, the `rekey` operation is now supported; it uses recovery keys + to authorize the master key rekey. + * **Operation Token for Disaster Recovery Actions (Enterprise)**: When using + Disaster Recovery replication, a token can be created that can be used to + authorize actions such as promotion and updating primary information, rather + than using recovery keys. + * **Trigger Auto-Unseal with Recovery Keys (Enterprise)**: When using + auto-unsealing, a request to unseal Vault can be triggered by a threshold of + recovery keys, rather than requiring the Vault process to be restarted. + * **UI Redesign (Enterprise)**: All new experience for the Vault Enterprise + UI. The look and feel has been completely redesigned to give users a better + experience and make managing secrets fast and easy. + * **UI: SSH Secret Backend (Enterprise)**: Configure an SSH secret backend, + create and browse roles. And use them to sign keys or generate one time + passwords. + * **UI: AWS Secret Backend (Enterprise)**: You can now configure the AWS + backend via the Vault Enterprise UI. In addition you can create roles, + browse the roles and Generate IAM Credentials from them in the UI. + +IMPROVEMENTS: + + * api: Add ability to set custom headers on each call [[GH-3394](https://github.com/hashicorp/vault/pull/3394)] + * command/server: Add config option to disable requesting client certificates + [[GH-3373](https://github.com/hashicorp/vault/pull/3373)] + * auth/aws: Max retries can now be customized for the AWS client [[GH-3965](https://github.com/hashicorp/vault/pull/3965)] + * core: Disallow mounting underneath an existing path, not just over [[GH-2919](https://github.com/hashicorp/vault/pull/2919)] + * physical/file: Use `700` as permissions when creating directories. The files + themselves were `600` and are all encrypted, but this doesn't hurt. + * secret/aws: Add ability to use custom IAM/STS endpoints [[GH-3416](https://github.com/hashicorp/vault/pull/3416)] + * secret/aws: Max retries can now be customized for the AWS client [[GH-3965](https://github.com/hashicorp/vault/pull/3965)] + * secret/cassandra: Work around Cassandra ignoring consistency levels for a + user listing query [[GH-3469](https://github.com/hashicorp/vault/pull/3469)] + * secret/pki: Private keys can now be marshalled as PKCS#8 [[GH-3518](https://github.com/hashicorp/vault/pull/3518)] + * secret/pki: Allow entering URLs for `pki` as both comma-separated strings and JSON + arrays [[GH-3409](https://github.com/hashicorp/vault/pull/3409)] + * secret/ssh: Role TTL/max TTL can now be specified as either a string or an + integer [[GH-3507](https://github.com/hashicorp/vault/pull/3507)] + * secret/transit: Sign and verify operations now support a `none` hash + algorithm to allow signing/verifying pre-hashed data [[GH-3448](https://github.com/hashicorp/vault/pull/3448)] + * secret/database: Add the ability to glob allowed roles in the Database Backend [[GH-3387](https://github.com/hashicorp/vault/pull/3387)] + * ui (enterprise): Support for RSA keys in the transit backend + * ui (enterprise): Support for DR Operation Token generation, promoting, and + updating primary on DR Secondary clusters + +BUG FIXES: + + * api: Fix panic when setting a custom HTTP client but with a nil transport + [[GH-3435](https://github.com/hashicorp/vault/pull/3435)] [[GH-3437](https://github.com/hashicorp/vault/pull/3437)] + * api: Fix authing to the `cert` backend when the CA for the client cert is + not known to the server's listener [[GH-2946](https://github.com/hashicorp/vault/pull/2946)] + * auth/approle: Create role ID index during read if a role is missing one [[GH-3561](https://github.com/hashicorp/vault/pull/3561)] + * auth/aws: Don't allow mutually exclusive options [[GH-3291](https://github.com/hashicorp/vault/pull/3291)] + * auth/radius: Fix logging in in some situations [[GH-3461](https://github.com/hashicorp/vault/pull/3461)] + * core: Fix memleak when a connection would connect to the cluster port and + then go away [[GH-3513](https://github.com/hashicorp/vault/pull/3513)] + * core: Fix panic if a single-use token is used to step-down or seal [[GH-3497](https://github.com/hashicorp/vault/pull/3497)] + * core: Set rather than add headers to prevent some duplicated headers in + responses when requests were forwarded to the active node [[GH-3485](https://github.com/hashicorp/vault/pull/3485)] + * physical/etcd3: Fix some listing issues due to how etcd3 does prefix + matching [[GH-3406](https://github.com/hashicorp/vault/pull/3406)] + * physical/etcd3: Fix case where standbys can lose their etcd client lease + [[GH-3031](https://github.com/hashicorp/vault/pull/3031)] + * physical/file: Fix listing when underscores are the first component of a + path [[GH-3476](https://github.com/hashicorp/vault/pull/3476)] + * plugins: Allow response errors to be returned from backend plugins [[GH-3412](https://github.com/hashicorp/vault/pull/3412)] + * secret/transit: Fix panic if the length of the input ciphertext was less + than the expected nonce length [[GH-3521](https://github.com/hashicorp/vault/pull/3521)] + * ui (enterprise): Reinstate support for generic secret backends - this was + erroneously removed in a previous release + +## 0.8.3 (September 19th, 2017) + +CHANGES: + + * Policy input/output standardization: For all built-in authentication + backends, policies can now be specified as a comma-delimited string or an + array if using JSON as API input; on read, policies will be returned as an + array; and the `default` policy will not be forcefully added to policies + saved in configurations. Please note that the `default` policy will continue + to be added to generated tokens, however, rather than backends adding + `default` to the given set of input policies (in some cases, and not in + others), the stored set will reflect the user-specified set. + * `sign-self-issued` modifies Issuer in generated certificates: In 0.8.2 the + endpoint would not modify the Issuer in the generated certificate, leaving + the output self-issued. Although theoretically valid, in practice crypto + stacks were unhappy validating paths containing such certs. As a result, + `sign-self-issued` now encodes the signing CA's Subject DN into the Issuer + DN of the generated certificate. + * `sys/raw` requires enabling: While the `sys/raw` endpoint can be extremely + useful in break-glass or support scenarios, it is also extremely dangerous. + As of now, a configuration file option `raw_storage_endpoint` must be set in + order to enable this API endpoint. Once set, the available functionality has + been enhanced slightly; it now supports listing and decrypting most of + Vault's core data structures, except for the encryption keyring itself. + * `generic` is now `kv`: To better reflect its actual use, the `generic` + backend is now `kv`. Using `generic` will still work for backwards + compatibility. + +FEATURES: + + * **GCE Support for GCP Auth**: GCE instances can now authenticate to Vault + using machine credentials. + * **Support for Kubernetes Service Account Auth**: Kubernetes Service Accounts + can now authenticate to vault using JWT tokens. + +IMPROVEMENTS: + + * configuration: Provide a config option to store Vault server's process ID + (PID) in a file [[GH-3321](https://github.com/hashicorp/vault/pull/3321)] + * mfa (Enterprise): Add the ability to use identity metadata in username format + * mfa/okta (Enterprise): Add support for configuring base_url for API calls + * secret/pki: `sign-intermediate` will now allow specifying a `ttl` value + longer than the signing CA certificate's NotAfter value. [[GH-3325](https://github.com/hashicorp/vault/pull/3325)] + * sys/raw: Raw storage access is now disabled by default [[GH-3329](https://github.com/hashicorp/vault/pull/3329)] + +BUG FIXES: + + * auth/okta: Fix regression that removed the ability to set base_url [[GH-3313](https://github.com/hashicorp/vault/pull/3313)] + * core: Fix panic while loading leases at startup on ARM processors + [[GH-3314](https://github.com/hashicorp/vault/pull/3314)] + * secret/pki: Fix `sign-self-issued` encoding the wrong subject public key + [[GH-3325](https://github.com/hashicorp/vault/pull/3325)] + +## 0.8.2.1 (September 11th, 2017) (Enterprise Only) + +BUG FIXES: + + * Fix an issue upgrading to 0.8.2 for Enterprise customers. + +## 0.8.2 (September 5th, 2017) + +SECURITY: + +* In prior versions of Vault, if authenticating via AWS IAM and requesting a + periodic token, the period was not properly respected. This could lead to + tokens expiring unexpectedly, or a token lifetime being longer than expected. + Upon token renewal with Vault 0.8.2 the period will be properly enforced. + +DEPRECATIONS/CHANGES: + +* `vault ssh` users should supply `-mode` and `-role` to reduce the number of + API calls. A future version of Vault will mark these optional values are + required. Failure to supply `-mode` or `-role` will result in a warning. +* Vault plugins will first briefly run a restricted version of the plugin to + fetch metadata, and then lazy-load the plugin on first request to prevent + crash/deadlock of Vault during the unseal process. Plugins will need to be + built with the latest changes in order for them to run properly. + +FEATURES: + +* **Lazy Lease Loading**: On startup, Vault will now load leases from storage + in a lazy fashion (token checks and revocation/renewal requests still force + an immediate load). For larger installations this can significantly reduce + downtime when switching active nodes or bringing Vault up from cold start. +* **SSH CA Login with `vault ssh`**: `vault ssh` now supports the SSH CA + backend for authenticating to machines. It also supports remote host key + verification through the SSH CA backend, if enabled. +* **Signing of Self-Issued Certs in PKI**: The `pki` backend now supports + signing self-issued CA certs. This is useful when switching root CAs. + +IMPROVEMENTS: + + * audit/file: Allow specifying `stdout` as the `file_path` to log to standard + output [[GH-3235](https://github.com/hashicorp/vault/pull/3235)] + * auth/aws: Allow wildcards in `bound_iam_principal_arn` [[GH-3213](https://github.com/hashicorp/vault/pull/3213)] + * auth/okta: Compare groups case-insensitively since Okta is only + case-preserving [[GH-3240](https://github.com/hashicorp/vault/pull/3240)] + * auth/okta: Standardize Okta configuration APIs across backends [[GH-3245](https://github.com/hashicorp/vault/pull/3245)] + * cli: Add subcommand autocompletion that can be enabled with + `vault -autocomplete-install` [[GH-3223](https://github.com/hashicorp/vault/pull/3223)] + * cli: Add ability to handle wrapped responses when using `vault auth`. What + is output depends on the other given flags; see the help output for that + command for more information. [[GH-3263](https://github.com/hashicorp/vault/pull/3263)] + * core: TLS cipher suites used for cluster behavior can now be set via + `cluster_cipher_suites` in configuration [[GH-3228](https://github.com/hashicorp/vault/pull/3228)] + * core: The `plugin_name` can now either be specified directly as part of the + parameter or within the `config` object when mounting a secret or auth backend + via `sys/mounts/:path` or `sys/auth/:path` respectively [[GH-3202](https://github.com/hashicorp/vault/pull/3202)] + * core: It is now possible to update the `description` of a mount when + mount-tuning, although this must be done through the HTTP layer [[GH-3285](https://github.com/hashicorp/vault/pull/3285)] + * secret/databases/mongo: If an EOF is encountered, attempt reconnecting and + retrying the operation [[GH-3269](https://github.com/hashicorp/vault/pull/3269)] + * secret/pki: TTLs can now be specified as a string or an integer number of + seconds [[GH-3270](https://github.com/hashicorp/vault/pull/3270)] + * secret/pki: Self-issued certs can now be signed via + `pki/root/sign-self-issued` [[GH-3274](https://github.com/hashicorp/vault/pull/3274)] + * storage/gcp: Use application default credentials if they exist [[GH-3248](https://github.com/hashicorp/vault/pull/3248)] + +BUG FIXES: + + * auth/aws: Properly use role-set period values for IAM-derived token renewals + [[GH-3220](https://github.com/hashicorp/vault/pull/3220)] + * auth/okta: Fix updating organization/ttl/max_ttl after initial setting + [[GH-3236](https://github.com/hashicorp/vault/pull/3236)] + * core: Fix PROXY when underlying connection is TLS [[GH-3195](https://github.com/hashicorp/vault/pull/3195)] + * core: Policy-related commands would sometimes fail to act case-insensitively + [[GH-3210](https://github.com/hashicorp/vault/pull/3210)] + * storage/consul: Fix parsing TLS configuration when using a bare IPv6 address + [[GH-3268](https://github.com/hashicorp/vault/pull/3268)] + * plugins: Lazy-load plugins to prevent crash/deadlock during unseal process. + [[GH-3255](https://github.com/hashicorp/vault/pull/3255)] + * plugins: Skip mounting plugin-based secret and credential mounts when setting + up mounts if the plugin is no longer present in the catalog. [[GH-3255](https://github.com/hashicorp/vault/pull/3255)] + +## 0.8.1 (August 16th, 2017) + +DEPRECATIONS/CHANGES: + + * PKI Root Generation: Calling `pki/root/generate` when a CA cert/key already + exists will now return a `204` instead of overwriting an existing root. If + you want to recreate the root, first run a delete operation on `pki/root` + (requires `sudo` capability), then generate it again. + +FEATURES: + + * **Oracle Secret Backend**: There is now an external plugin to support leased + credentials for Oracle databases (distributed separately). + * **GCP IAM Auth Backend**: There is now an authentication backend that allows + using GCP IAM credentials to retrieve Vault tokens. This is available as + both a plugin and built-in to Vault. + * **PingID Push Support for Path-Based MFA (Enterprise)**: PingID Push can + now be used for MFA with the new path-based MFA introduced in Vault + Enterprise 0.8. + * **Permitted DNS Domains Support in PKI**: The `pki` backend now supports + specifying permitted DNS domains for CA certificates, allowing you to + narrowly scope the set of domains for which a CA can issue or sign child + certificates. + * **Plugin Backend Reload Endpoint**: Plugin backends can now be triggered to + reload using the `sys/plugins/reload/backend` endpoint and providing either + the plugin name or the mounts to reload. + * **Self-Reloading Plugins**: The plugin system will now attempt to reload a + crashed or stopped plugin, once per request. + +IMPROVEMENTS: + + * auth/approle: Allow array input for policies in addition to comma-delimited + strings [[GH-3163](https://github.com/hashicorp/vault/pull/3163)] + * plugins: Send logs through Vault's logger rather than stdout [[GH-3142](https://github.com/hashicorp/vault/pull/3142)] + * secret/pki: Add `pki/root` delete operation [[GH-3165](https://github.com/hashicorp/vault/pull/3165)] + * secret/pki: Don't overwrite an existing root cert/key when calling generate + [[GH-3165](https://github.com/hashicorp/vault/pull/3165)] + +BUG FIXES: + + * aws: Don't prefer a nil HTTP client over an existing one [[GH-3159](https://github.com/hashicorp/vault/pull/3159)] + * core: If there is an error when checking for create/update existence, return + 500 instead of 400 [[GH-3162](https://github.com/hashicorp/vault/pull/3162)] + * secret/database: Avoid creating usernames that are too long for legacy MySQL + [[GH-3138](https://github.com/hashicorp/vault/pull/3138)] + +## 0.8.0 (August 9th, 2017) + +SECURITY: + + * We've added a note to the docs about the way the GitHub auth backend works + as it may not be readily apparent that GitHub personal access tokens, which + are used by the backend, can be used for unauthorized access if they are + stolen from third party services and access to Vault is public. + +DEPRECATIONS/CHANGES: + + * Database Plugin Backends: Passwords generated for these backends now + enforce stricter password requirements, as opposed to the previous behavior + of returning a randomized UUID. Passwords are of length 20, and have a `A1a-` + characters prepended to ensure stricter requirements. No regressions are + expected from this change. (For database backends that were previously + substituting underscores for hyphens in passwords, this will remain the + case.) + * Lease Endpoints: The endpoints `sys/renew`, `sys/revoke`, `sys/revoke-prefix`, + `sys/revoke-force` have been deprecated and relocated under `sys/leases`. + Additionally, the deprecated path `sys/revoke-force` now requires the `sudo` + capability. + * Response Wrapping Lookup Unauthenticated: The `sys/wrapping/lookup` endpoint + is now unauthenticated. This allows introspection of the wrapping info by + clients that only have the wrapping token without then invalidating the + token. Validation functions/checks are still performed on the token. + +FEATURES: + + * **Cassandra Storage**: Cassandra can now be used for Vault storage + * **CockroachDB Storage**: CockroachDB can now be used for Vault storage + * **CouchDB Storage**: CouchDB can now be used for Vault storage + * **SAP HANA Database Plugin**: The `databases` backend can now manage users + for SAP HANA databases + * **Plugin Backends**: Vault now supports running secret and auth backends as + plugins. Plugins can be mounted like normal backends and can be developed + independently from Vault. + * **PROXY Protocol Support** Vault listeners can now be configured to honor + PROXY protocol v1 information to allow passing real client IPs into Vault. A + list of authorized addresses (IPs or subnets) can be defined and + accept/reject behavior controlled. + * **Lease Lookup and Browsing in the Vault Enterprise UI**: Vault Enterprise UI + now supports lookup and listing of leases and the associated actions from the + `sys/leases` endpoints in the API. These are located in the new top level + navigation item "Leases". + * **Filtered Mounts for Performance Mode Replication**: Whitelists or + blacklists of mounts can be defined per-secondary to control which mounts + are actually replicated to that secondary. This can allow targeted + replication of specific sets of data to specific geolocations/datacenters. + * **Disaster Recovery Mode Replication (Enterprise Only)**: There is a new + replication mode, Disaster Recovery (DR), that performs full real-time + replication (including tokens and leases) to DR secondaries. DR secondaries + cannot handle client requests, but can be promoted to primary as needed for + failover. + * **Manage New Replication Features in the Vault Enterprise UI**: Support for + Replication features in Vault Enterprise UI has expanded to include new DR + Replication mode and management of Filtered Mounts in Performance Replication + mode. + * **Vault Identity (Enterprise Only)**: Vault's new Identity system allows + correlation of users across tokens. At present this is only used for MFA, + but will be the foundation of many other features going forward. + * **Duo Push, Okta Push, and TOTP MFA For All Authenticated Paths (Enterprise + Only)**: A brand new MFA system built on top of Identity allows MFA + (currently Duo Push, Okta Push, and TOTP) for any authenticated path within + Vault. MFA methods can be configured centrally, and TOTP keys live within + the user's Identity information to allow using the same key across tokens. + Specific MFA method(s) required for any given path within Vault can be + specified in normal ACL path statements. + +IMPROVEMENTS: + + * api: Add client method for a secret renewer background process [[GH-2886](https://github.com/hashicorp/vault/pull/2886)] + * api: Add `RenewTokenAsSelf` [[GH-2886](https://github.com/hashicorp/vault/pull/2886)] + * api: Client timeout can now be adjusted with the `VAULT_CLIENT_TIMEOUT` env + var or with a new API function [[GH-2956](https://github.com/hashicorp/vault/pull/2956)] + * api/cli: Client will now attempt to look up SRV records for the given Vault + hostname [[GH-3035](https://github.com/hashicorp/vault/pull/3035)] + * audit/socket: Enhance reconnection logic and don't require the connection to + be established at unseal time [[GH-2934](https://github.com/hashicorp/vault/pull/2934)] + * audit/file: Opportunistically try re-opening the file on error [[GH-2999](https://github.com/hashicorp/vault/pull/2999)] + * auth/approle: Add role name to token metadata [[GH-2985](https://github.com/hashicorp/vault/pull/2985)] + * auth/okta: Allow specifying `ttl`/`max_ttl` inside the mount [[GH-2915](https://github.com/hashicorp/vault/pull/2915)] + * cli: Client timeout can now be adjusted with the `VAULT_CLIENT_TIMEOUT` env + var [[GH-2956](https://github.com/hashicorp/vault/pull/2956)] + * command/auth: Add `-token-only` flag to `vault auth` that returns only the + token on stdout and does not store it via the token helper [[GH-2855](https://github.com/hashicorp/vault/pull/2855)] + * core: CORS allowed origins can now be configured [[GH-2021](https://github.com/hashicorp/vault/pull/2021)] + * core: Add metrics counters for audit log failures [[GH-2863](https://github.com/hashicorp/vault/pull/2863)] + * cors: Allow setting allowed headers via the API instead of always using + wildcard [[GH-3023](https://github.com/hashicorp/vault/pull/3023)] + * secret/ssh: Allow specifying the key ID format using template values for CA + type [[GH-2888](https://github.com/hashicorp/vault/pull/2888)] + * server: Add `tls_client_ca_file` option for specifying a CA file to use for + client certificate verification when `tls_require_and_verify_client_cert` is + enabled [[GH-3034](https://github.com/hashicorp/vault/pull/3034)] + * storage/cockroachdb: Add CockroachDB storage backend [[GH-2713](https://github.com/hashicorp/vault/pull/2713)] + * storage/couchdb: Add CouchDB storage backend [[GH-2880](https://github.com/hashicorp/vault/pull/2880)] + * storage/mssql: Add `max_parallel` [[GH-3026](https://github.com/hashicorp/vault/pull/3026)] + * storage/postgresql: Add `max_parallel` [[GH-3026](https://github.com/hashicorp/vault/pull/3026)] + * storage/postgresql: Improve listing speed [[GH-2945](https://github.com/hashicorp/vault/pull/2945)] + * storage/s3: More efficient paging when an object has a lot of subobjects + [[GH-2780](https://github.com/hashicorp/vault/pull/2780)] + * sys/wrapping: Make `sys/wrapping/lookup` unauthenticated [[GH-3084](https://github.com/hashicorp/vault/pull/3084)] + * sys/wrapping: Wrapped tokens now store the original request path of the data + [[GH-3100](https://github.com/hashicorp/vault/pull/3100)] + * telemetry: Add support for DogStatsD [[GH-2490](https://github.com/hashicorp/vault/pull/2490)] + +BUG FIXES: + + * api/health: Don't treat standby `429` codes as an error [[GH-2850](https://github.com/hashicorp/vault/pull/2850)] + * api/leases: Fix lease lookup returning lease properties at the top level + * audit: Fix panic when audit logging a read operation on an asymmetric + `transit` key [[GH-2958](https://github.com/hashicorp/vault/pull/2958)] + * auth/approle: Fix panic when secret and cidr list not provided in role + [[GH-3075](https://github.com/hashicorp/vault/pull/3075)] + * auth/aws: Look up proper account ID on token renew [[GH-3012](https://github.com/hashicorp/vault/pull/3012)] + * auth/aws: Store IAM header in all cases when it changes [[GH-3004](https://github.com/hashicorp/vault/pull/3004)] + * auth/ldap: Verify given certificate is PEM encoded instead of failing + silently [[GH-3016](https://github.com/hashicorp/vault/pull/3016)] + * auth/token: Don't allow using the same token ID twice when manually + specifying [[GH-2916](https://github.com/hashicorp/vault/pull/2916)] + * cli: Fix issue with parsing keys that start with special characters [[GH-2998](https://github.com/hashicorp/vault/pull/2998)] + * core: Relocated `sys/leases/renew` returns same payload as original + `sys/leases` endpoint [[GH-2891](https://github.com/hashicorp/vault/pull/2891)] + * secret/ssh: Fix panic when signing with incorrect key type [[GH-3072](https://github.com/hashicorp/vault/pull/3072)] + * secret/totp: Ensure codes can only be used once. This makes some automated + workflows harder but complies with the RFC. [[GH-2908](https://github.com/hashicorp/vault/pull/2908)] + * secret/transit: Fix locking when creating a key with unsupported options + [[GH-2974](https://github.com/hashicorp/vault/pull/2974)] + +## 0.7.3 (June 7th, 2017) + +SECURITY: + + * Cert auth backend now checks validity of individual certificates: In + previous versions of Vault, validity (e.g. expiration) of individual leaf + certificates added for authentication was not checked. This was done to make + it easier for administrators to control lifecycles of individual + certificates added to the backend, e.g. the authentication material being + checked was access to that specific certificate's private key rather than + all private keys signed by a CA. However, this behavior is often unexpected + and as a result can lead to insecure deployments, so we are now validating + these certificates as well. + * App-ID path salting was skipped in 0.7.1/0.7.2: A regression in 0.7.1/0.7.2 + caused the HMACing of any App-ID information stored in paths (including + actual app-IDs and user-IDs) to be unsalted and written as-is from the API. + In 0.7.3 any such paths will be automatically changed to salted versions on + access (e.g. login or read); however, if you created new app-IDs or user-IDs + in 0.7.1/0.7.2, you may want to consider whether any users with access to + Vault's underlying data store may have intercepted these values, and + revoke/roll them. + +DEPRECATIONS/CHANGES: + + * Step-Down is Forwarded: When a step-down is issued against a non-active node + in an HA cluster, it will now forward the request to the active node. + +FEATURES: + + * **ed25519 Signing/Verification in Transit with Key Derivation**: The + `transit` backend now supports generating + [ed25519](https://ed25519.cr.yp.to/) keys for signing and verification + functionality. These keys support derivation, allowing you to modify the + actual encryption key used by supplying a `context` value. + * **Key Version Specification for Encryption in Transit**: You can now specify + the version of a key you use to wish to generate a signature, ciphertext, or + HMAC. This can be controlled by the `min_encryption_version` key + configuration property. + * **Replication Primary Discovery (Enterprise)**: Replication primaries will + now advertise the addresses of their local HA cluster members to replication + secondaries. This helps recovery if the primary active node goes down and + neither service discovery nor load balancers are in use to steer clients. + +IMPROVEMENTS: + + * api/health: Add Sys().Health() [[GH-2805](https://github.com/hashicorp/vault/pull/2805)] + * audit: Add auth information to requests that error out [[GH-2754](https://github.com/hashicorp/vault/pull/2754)] + * command/auth: Add `-no-store` option that prevents the auth command from + storing the returned token into the configured token helper [[GH-2809](https://github.com/hashicorp/vault/pull/2809)] + * core/forwarding: Request forwarding now heartbeats to prevent unused + connections from being terminated by firewalls or proxies + * plugins/databases: Add MongoDB as an internal database plugin [[GH-2698](https://github.com/hashicorp/vault/pull/2698)] + * storage/dynamodb: Add a method for checking the existence of children, + speeding up deletion operations in the DynamoDB storage backend [[GH-2722](https://github.com/hashicorp/vault/pull/2722)] + * storage/mysql: Add max_parallel parameter to MySQL backend [[GH-2760](https://github.com/hashicorp/vault/pull/2760)] + * secret/databases: Support listing connections [[GH-2823](https://github.com/hashicorp/vault/pull/2823)] + * secret/databases: Support custom renewal statements in Postgres database + plugin [[GH-2788](https://github.com/hashicorp/vault/pull/2788)] + * secret/databases: Use the role name as part of generated credentials + [[GH-2812](https://github.com/hashicorp/vault/pull/2812)] + * ui (Enterprise): Transit key and secret browsing UI handle large lists better + * ui (Enterprise): root tokens are no longer persisted + * ui (Enterprise): support for mounting Database and TOTP secret backends + +BUG FIXES: + + * auth/app-id: Fix regression causing loading of salts to be skipped + * auth/aws: Improve EC2 describe instances performance [[GH-2766](https://github.com/hashicorp/vault/pull/2766)] + * auth/aws: Fix lookup of some instance profile ARNs [[GH-2802](https://github.com/hashicorp/vault/pull/2802)] + * auth/aws: Resolve ARNs to internal AWS IDs which makes lookup at various + points (e.g. renewal time) more robust [[GH-2814](https://github.com/hashicorp/vault/pull/2814)] + * auth/aws: Properly honor configured period when using IAM authentication + [[GH-2825](https://github.com/hashicorp/vault/pull/2825)] + * auth/aws: Check that a bound IAM principal is not empty (in the current + state of the role) before requiring it match the previously authenticated + client [[GH-2781](https://github.com/hashicorp/vault/pull/2781)] + * auth/cert: Fix panic on renewal [[GH-2749](https://github.com/hashicorp/vault/pull/2749)] + * auth/cert: Certificate verification for non-CA certs [[GH-2761](https://github.com/hashicorp/vault/pull/2761)] + * core/acl: Prevent race condition when compiling ACLs in some scenarios + [[GH-2826](https://github.com/hashicorp/vault/pull/2826)] + * secret/database: Increase wrapping token TTL; in a loaded scenario it could + be too short + * secret/generic: Allow integers to be set as the value of `ttl` field as the + documentation claims is supported [[GH-2699](https://github.com/hashicorp/vault/pull/2699)] + * secret/ssh: Added host key callback to ssh client config [[GH-2752](https://github.com/hashicorp/vault/pull/2752)] + * storage/s3: Avoid a panic when some bad data is returned [[GH-2785](https://github.com/hashicorp/vault/pull/2785)] + * storage/dynamodb: Fix list functions working improperly on Windows [[GH-2789](https://github.com/hashicorp/vault/pull/2789)] + * storage/file: Don't leak file descriptors in some error cases + * storage/swift: Fix pre-v3 project/tenant name reading [[GH-2803](https://github.com/hashicorp/vault/pull/2803)] + +## 0.7.2 (May 8th, 2017) + +BUG FIXES: + + * audit: Fix auditing entries containing certain kinds of time values + [[GH-2689](https://github.com/hashicorp/vault/pull/2689)] + +## 0.7.1 (May 5th, 2017) + +DEPRECATIONS/CHANGES: + + * LDAP Auth Backend: Group membership queries will now run as the `binddn` + user when `binddn`/`bindpass` are configured, rather than as the + authenticating user as was the case previously. + +FEATURES: + + * **AWS IAM Authentication**: IAM principals can get Vault tokens + automatically, opening AWS-based authentication to users, ECS containers, + Lambda instances, and more. Signed client identity information retrieved + using the AWS API `sts:GetCallerIdentity` is validated against the AWS STS + service before issuing a Vault token. This backend is unified with the + `aws-ec2` authentication backend under the name `aws`, and allows additional + EC2-related restrictions to be applied during the IAM authentication; the + previous EC2 behavior is also still available. [[GH-2441](https://github.com/hashicorp/vault/pull/2441)] + * **MSSQL Physical Backend**: You can now use Microsoft SQL Server as your + Vault physical data store [[GH-2546](https://github.com/hashicorp/vault/pull/2546)] + * **Lease Listing and Lookup**: You can now introspect a lease to get its + creation and expiration properties via `sys/leases/lookup`; with `sudo` + capability you can also list leases for lookup, renewal, or revocation via + that endpoint. Various lease functions (renew, revoke, revoke-prefix, + revoke-force) have also been relocated to `sys/leases/`, but they also work + at the old paths for compatibility. Reading (but not listing) leases via + `sys/leases/lookup` is now a part of the current `default` policy. [[GH-2650](https://github.com/hashicorp/vault/pull/2650)] + * **TOTP Secret Backend**: You can now store multi-factor authentication keys + in Vault and use the API to retrieve time-based one-time use passwords on + demand. The backend can also be used to generate a new key and validate + passwords generated by that key. [[GH-2492](https://github.com/hashicorp/vault/pull/2492)] + * **Database Secret Backend & Secure Plugins (Beta)**: This new secret backend + combines the functionality of the MySQL, PostgreSQL, MSSQL, and Cassandra + backends. It also provides a plugin interface for extendability through + custom databases. [[GH-2200](https://github.com/hashicorp/vault/pull/2200)] + +IMPROVEMENTS: + + * auth/cert: Support for constraints on subject Common Name and DNS/email + Subject Alternate Names in certificates [[GH-2595](https://github.com/hashicorp/vault/pull/2595)] + * auth/ldap: Use the binding credentials to search group membership rather + than the user credentials [[GH-2534](https://github.com/hashicorp/vault/pull/2534)] + * cli/revoke: Add `-self` option to allow revoking the currently active token + [[GH-2596](https://github.com/hashicorp/vault/pull/2596)] + * core: Randomize x coordinate in Shamir shares [[GH-2621](https://github.com/hashicorp/vault/pull/2621)] + * replication: Fix a bug when enabling `approle` on a primary before + secondaries were connected + * replication: Add heartbeating to ensure firewalls don't kill connections to + primaries + * secret/pki: Add `no_store` option that allows certificates to be issued + without being stored. This removes the ability to look up and/or add to a + CRL but helps with scaling to very large numbers of certificates. [[GH-2565](https://github.com/hashicorp/vault/pull/2565)] + * secret/pki: If used with a role parameter, the `sign-verbatim/` + endpoint honors the values of `generate_lease`, `no_store`, `ttl` and + `max_ttl` from the given role [[GH-2593](https://github.com/hashicorp/vault/pull/2593)] + * secret/pki: Add role parameter `allow_glob_domains` that enables defining + names in `allowed_domains` containing `*` glob patterns [[GH-2517](https://github.com/hashicorp/vault/pull/2517)] + * secret/pki: Update certificate storage to not use characters that are not + supported on some filesystems [[GH-2575](https://github.com/hashicorp/vault/pull/2575)] + * storage/etcd3: Add `discovery_srv` option to query for SRV records to find + servers [[GH-2521](https://github.com/hashicorp/vault/pull/2521)] + * storage/s3: Support `max_parallel` option to limit concurrent outstanding + requests [[GH-2466](https://github.com/hashicorp/vault/pull/2466)] + * storage/s3: Use pooled transport for http client [[GH-2481](https://github.com/hashicorp/vault/pull/2481)] + * storage/swift: Allow domain values for V3 authentication [[GH-2554](https://github.com/hashicorp/vault/pull/2554)] + * tidy: Improvements to `auth/token/tidy` and `sys/leases/tidy` to handle more + cleanup cases [[GH-2452](https://github.com/hashicorp/vault/pull/2452)] + +BUG FIXES: + + * api: Respect a configured path in Vault's address [[GH-2588](https://github.com/hashicorp/vault/pull/2588)] + * auth/aws-ec2: New bounds added as criteria to allow role creation [[GH-2600](https://github.com/hashicorp/vault/pull/2600)] + * auth/ldap: Don't lowercase groups attached to users [[GH-2613](https://github.com/hashicorp/vault/pull/2613)] + * cli: Don't panic if `vault write` is used with the `force` flag but no path + [[GH-2674](https://github.com/hashicorp/vault/pull/2674)] + * core: Help operations should request forward since standbys may not have + appropriate info [[GH-2677](https://github.com/hashicorp/vault/pull/2677)] + * replication: Fix enabling secondaries when certain mounts already existed on + the primary + * secret/mssql: Update mssql driver to support queries with colons [[GH-2610](https://github.com/hashicorp/vault/pull/2610)] + * secret/pki: Don't lowercase O/OU values in certs [[GH-2555](https://github.com/hashicorp/vault/pull/2555)] + * secret/pki: Don't attempt to validate IP SANs if none are provided [[GH-2574](https://github.com/hashicorp/vault/pull/2574)] + * secret/ssh: Don't automatically lowercase principles in issued SSH certs + [[GH-2591](https://github.com/hashicorp/vault/pull/2591)] + * storage/consul: Properly handle state events rather than timing out + [[GH-2548](https://github.com/hashicorp/vault/pull/2548)] + * storage/etcd3: Ensure locks are released if client is improperly shut down + [[GH-2526](https://github.com/hashicorp/vault/pull/2526)] + +## 0.7.0 (March 21th, 2017) + +SECURITY: + + * Common name not being validated when `exclude_cn_from_sans` option used in + `pki` backend: When using a role in the `pki` backend that specified the + `exclude_cn_from_sans` option, the common name would not then be properly + validated against the role's constraints. This has been fixed. We recommend + any users of this feature to upgrade to 0.7 as soon as feasible. + +DEPRECATIONS/CHANGES: + + * List Operations Always Use Trailing Slash: Any list operation, whether via + the `GET` or `LIST` HTTP verb, will now internally canonicalize the path to + have a trailing slash. This makes policy writing more predictable, as it + means clients will no longer work or fail based on which client they're + using or which HTTP verb they're using. However, it also means that policies + allowing `list` capability must be carefully checked to ensure that they + contain a trailing slash; some policies may need to be split into multiple + stanzas to accommodate. + * PKI Defaults to Unleased Certificates: When issuing certificates from the + PKI backend, by default, no leases will be issued. If you want to manually + revoke a certificate, its serial number can be used with the `pki/revoke` + endpoint. Issuing leases is still possible by enabling the `generate_lease` + toggle in PKI role entries (this will default to `true` for upgrades, to + keep existing behavior), which will allow using lease IDs to revoke + certificates. For installations issuing large numbers of certificates (tens + to hundreds of thousands, or millions), this will significantly improve + Vault startup time since leases associated with these certificates will not + have to be loaded; however note that it also means that revocation of a + token used to issue certificates will no longer add these certificates to a + CRL. If this behavior is desired or needed, consider keeping leases enabled + and ensuring lifetimes are reasonable, and issue long-lived certificates via + a different role with leases disabled. + +FEATURES: + + * **Replication (Enterprise)**: Vault Enterprise now has support for creating + a multi-datacenter replication set between clusters. The current replication + offering is based on an asynchronous primary/secondary (1:N) model that + replicates static data while keeping dynamic data (leases, tokens) + cluster-local, focusing on horizontal scaling for high-throughput and + high-fanout deployments. + * **Response Wrapping & Replication in the Vault Enterprise UI**: Vault + Enterprise UI now supports looking up and rotating response wrapping tokens, + as well as creating tokens with arbitrary values inside. It also now + supports replication functionality, enabling the configuration of a + replication set in the UI. + * **Expanded Access Control Policies**: Access control policies can now + specify allowed and denied parameters -- and, optionally, their values -- to + control what a client can and cannot submit during an API call. Policies can + also specify minimum/maximum response wrapping TTLs to both enforce the use + of response wrapping and control the duration of resultant wrapping tokens. + See the [policies concepts + page](https://www.vaultproject.io/docs/concepts/policies.html) for more + information. + * **SSH Backend As Certificate Authority**: The SSH backend can now be + configured to sign host and user certificates. Each mount of the backend + acts as an independent signing authority. The CA key pair can be configured + for each mount and the public key is accessible via an unauthenticated API + call; additionally, the backend can generate a public/private key pair for + you. We recommend using separate mounts for signing host and user + certificates. + +IMPROVEMENTS: + + * api/request: Passing username and password information in API request + [GH-2469] + * audit: Logging the token's use count with authentication response and + logging the remaining uses of the client token with request [GH-2437] + * auth/approle: Support for restricting the number of uses on the tokens + issued [GH-2435] + * auth/aws-ec2: AWS EC2 auth backend now supports constraints for VPC ID, + Subnet ID and Region [GH-2407] + * auth/ldap: Use the value of the `LOGNAME` or `USER` env vars for the + username if not explicitly set on the command line when authenticating + [GH-2154] + * audit: Support adding a configurable prefix (such as `@cee`) before each + line [GH-2359] + * core: Canonicalize list operations to use a trailing slash [GH-2390] + * core: Add option to disable caching on a per-mount level [GH-2455] + * core: Add ability to require valid client certs in listener config [GH-2457] + * physical/dynamodb: Implement a session timeout to avoid having to use + recovery mode in the case of an unclean shutdown, which makes HA much safer + [GH-2141] + * secret/pki: O (Organization) values can now be set to role-defined values + for issued/signed certificates [GH-2369] + * secret/pki: Certificates issued/signed from PKI backend do not generate + leases by default [GH-2403] + * secret/pki: When using DER format, still return the private key type + [GH-2405] + * secret/pki: Add an intermediate to the CA chain even if it lacks an + authority key ID [GH-2465] + * secret/pki: Add role option to use CSR SANs [GH-2489] + * secret/ssh: SSH backend as CA to sign user and host certificates [GH-2208] + * secret/ssh: Support reading of SSH CA public key from `config/ca` endpoint + and also return it when CA key pair is generated [GH-2483] + +BUG FIXES: + + * audit: When auditing headers use case-insensitive comparisons [GH-2362] + * auth/aws-ec2: Return role period in seconds and not nanoseconds [GH-2374] + * auth/okta: Fix panic if user had no local groups and/or policies set + [GH-2367] + * command/server: Fix parsing of redirect address when port is not mentioned + [GH-2354] + * physical/postgresql: Fix listing returning incorrect results if there were + multiple levels of children [GH-2393] + +## 0.6.5 (February 7th, 2017) + +FEATURES: + + * **Okta Authentication**: A new Okta authentication backend allows you to use + Okta usernames and passwords to authenticate to Vault. If provided with an + appropriate Okta API token, group membership can be queried to assign + policies; users and groups can be defined locally as well. + * **RADIUS Authentication**: A new RADIUS authentication backend allows using + a RADIUS server to authenticate to Vault. Policies can be configured for + specific users or for any authenticated user. + * **Exportable Transit Keys**: Keys in `transit` can now be marked as + `exportable` at creation time. This allows a properly ACL'd user to retrieve + the associated signing key, encryption key, or HMAC key. The `exportable` + value is returned on a key policy read and cannot be changed, so if a key is + marked `exportable` it will always be exportable, and if it is not it will + never be exportable. + * **Batch Transit Operations**: `encrypt`, `decrypt` and `rewrap` operations + in the transit backend now support processing multiple input items in one + call, returning the output of each item in the response. + * **Configurable Audited HTTP Headers**: You can now specify headers that you + want to have included in each audit entry, along with whether each header + should be HMAC'd or kept plaintext. This can be useful for adding additional + client or network metadata to the audit logs. + * **Transit Backend UI (Enterprise)**: Vault Enterprise UI now supports the transit + backend, allowing creation, viewing and editing of named keys as well as using + those keys to perform supported transit operations directly in the UI. + * **Socket Audit Backend** A new socket audit backend allows audit logs to be sent + through TCP, UDP, or UNIX Sockets. + +IMPROVEMENTS: + + * auth/aws-ec2: Add support for cross-account auth using STS [GH-2148] + * auth/aws-ec2: Support issuing periodic tokens [GH-2324] + * auth/github: Support listing teams and users [GH-2261] + * auth/ldap: Support adding policies to local users directly, in addition to + local groups [GH-2152] + * command/server: Add ability to select and prefer server cipher suites + [GH-2293] + * core: Add a nonce to unseal operations as a check (useful mostly for + support, not as a security principle) [GH-2276] + * duo: Added ability to supply extra context to Duo pushes [GH-2118] + * physical/consul: Add option for setting consistency mode on Consul gets + [GH-2282] + * physical/etcd: Full v3 API support; code will autodetect which API version + to use. The v3 code path is significantly less complicated and may be much + more stable. [GH-2168] + * secret/pki: Allow specifying OU entries in generated certificate subjects + [GH-2251] + * secret mount ui (Enterprise): the secret mount list now shows all mounted + backends even if the UI cannot browse them. Additional backends can now be + mounted from the UI as well. + +BUG FIXES: + + * auth/token: Fix regression in 0.6.4 where using token store roles as a + blacklist (with only `disallowed_policies` set) would not work in most + circumstances [GH-2286] + * physical/s3: Page responses in client so list doesn't truncate [GH-2224] + * secret/cassandra: Stop a connection leak that could occur on active node + failover [GH-2313] + * secret/pki: When using `sign-verbatim`, don't require a role and use the + CSR's common name [GH-2243] + +## 0.6.4 (December 16, 2016) + +SECURITY: + +Further details about these security issues can be found in the 0.6.4 upgrade +guide. + + * `default` Policy Privilege Escalation: If a parent token did not have the + `default` policy attached to its token, it could still create children with + the `default` policy. This is no longer allowed (unless the parent has + `sudo` capability for the creation path). In most cases this is low severity + since the access grants in the `default` policy are meant to be access + grants that are acceptable for all tokens to have. + * Leases Not Expired When Limited Use Token Runs Out of Uses: When using + limited-use tokens to create leased secrets, if the limited-use token was + revoked due to running out of uses (rather than due to TTL expiration or + explicit revocation) it would fail to revoke the leased secrets. These + secrets would still be revoked when their TTL expired, limiting the severity + of this issue. An endpoint has been added (`auth/token/tidy`) that can + perform housekeeping tasks on the token store; one of its tasks can detect + this situation and revoke the associated leases. + +FEATURES: + + * **Policy UI (Enterprise)**: Vault Enterprise UI now supports viewing, + creating, and editing policies. + +IMPROVEMENTS: + + * http: Vault now sets a `no-store` cache control header to make it more + secure in setups that are not end-to-end encrypted [GH-2183] + +BUG FIXES: + + * auth/ldap: Don't panic if dialing returns an error and starttls is enabled; + instead, return the error [GH-2188] + * ui (Enterprise): Submitting an unseal key now properly resets the + form so a browser refresh isn't required to continue. + +## 0.6.3 (December 6, 2016) + +DEPRECATIONS/CHANGES: + + * Request size limitation: A maximum request size of 32MB is imposed to + prevent a denial of service attack with arbitrarily large requests [GH-2108] + * LDAP denies passwordless binds by default: In new LDAP mounts, or when + existing LDAP mounts are rewritten, passwordless binds will be denied by + default. The new `deny_null_bind` parameter can be set to `false` to allow + these. [GH-2103] + * Any audit backend activated satisfies conditions: Previously, when a new + Vault node was taking over service in an HA cluster, all audit backends were + required to be loaded successfully to take over active duty. This behavior + now matches the behavior of the audit logging system itself: at least one + audit backend must successfully be loaded. The server log contains an error + when this occurs. This helps keep a Vault HA cluster working when there is a + misconfiguration on a standby node. [GH-2083] + +FEATURES: + + * **Web UI (Enterprise)**: Vault Enterprise now contains a built-in web UI + that offers access to a number of features, including init/unsealing/sealing, + authentication via userpass or LDAP, and K/V reading/writing. The capability + set of the UI will be expanding rapidly in further releases. To enable it, + set `ui = true` in the top level of Vault's configuration file and point a + web browser at your Vault address. + * **Google Cloud Storage Physical Backend**: You can now use GCS for storing + Vault data [GH-2099] + +IMPROVEMENTS: + + * auth/github: Policies can now be assigned to users as well as to teams + [GH-2079] + * cli: Set the number of retries on 500 down to 0 by default (no retrying). It + can be very confusing to users when there is a pause while the retries + happen if they haven't explicitly set it. With request forwarding the need + for this is lessened anyways. [GH-2093] + * core: Response wrapping is now allowed to be specified by backend responses + (requires backends gaining support) [GH-2088] + * physical/consul: When announcing service, use the scheme of the Vault server + rather than the Consul client [GH-2146] + * secret/consul: Added listing functionality to roles [GH-2065] + * secret/postgresql: Added `revocation_sql` parameter on the role endpoint to + enable customization of user revocation SQL statements [GH-2033] + * secret/transit: Add listing of keys [GH-1987] + +BUG FIXES: + + * api/unwrap, command/unwrap: Increase compatibility of `unwrap` command with + Vault 0.6.1 and older [GH-2014] + * api/unwrap, command/unwrap: Fix error when no client token exists [GH-2077] + * auth/approle: Creating the index for the role_id properly [GH-2004] + * auth/aws-ec2: Handle the case of multiple upgrade attempts when setting the + instance-profile ARN [GH-2035] + * auth/ldap: Avoid leaking connections on login [GH-2130] + * command/path-help: Use the actual error generated by Vault rather than + always using 500 when there is a path help error [GH-2153] + * command/ssh: Use temporary file for identity and ensure its deletion before + the command returns [GH-2016] + * cli: Fix error printing values with `-field` if the values contained + formatting directives [GH-2109] + * command/server: Don't say mlock is supported on OSX when it isn't. [GH-2120] + * core: Fix bug where a failure to come up as active node (e.g. if an audit + backend failed) could lead to deadlock [GH-2083] + * physical/mysql: Fix potential crash during setup due to a query failure + [GH-2105] + * secret/consul: Fix panic on user error [GH-2145] + +## 0.6.2 (October 5, 2016) + +DEPRECATIONS/CHANGES: + + * Convergent Encryption v2: New keys in `transit` using convergent mode will + use a new nonce derivation mechanism rather than require the user to supply + a nonce. While not explicitly increasing security, it minimizes the + likelihood that a user will use the mode improperly and impact the security + of their keys. Keys in convergent mode that were created in v0.6.1 will + continue to work with the same mechanism (user-supplied nonce). + * `etcd` HA off by default: Following in the footsteps of `dynamodb`, the + `etcd` storage backend now requires that `ha_enabled` be explicitly + specified in the configuration file. The backend currently has known broken + HA behavior, so this flag discourages use by default without explicitly + enabling it. If you are using this functionality, when upgrading, you should + set `ha_enabled` to `"true"` *before* starting the new versions of Vault. + * Default/Max lease/token TTLs are now 32 days: In previous versions of Vault + the default was 30 days, but moving it to 32 days allows some operations + (e.g. reauthenticating, renewing, etc.) to be performed via a monthly cron + job. + * AppRole Secret ID endpoints changed: Secret ID and Secret ID accessors are + no longer part of request URLs. The GET and DELETE operations are now moved + to new endpoints (`/lookup` and `/destroy`) which consumes the input from + the body and not the URL. + * AppRole requires at least one constraint: previously it was sufficient to + turn off all AppRole authentication constraints (secret ID, CIDR block) and + use the role ID only. It is now required that at least one additional + constraint is enabled. Existing roles are unaffected, but any new roles or + updated roles will require this. + * Reading wrapped responses from `cubbyhole/response` is deprecated. The + `sys/wrapping/unwrap` endpoint should be used instead as it provides + additional security, auditing, and other benefits. The ability to read + directly will be removed in a future release. + * Request Forwarding is now on by default: in 0.6.1 this required toggling on, + but is now enabled by default. This can be disabled via the + `"disable_clustering"` parameter in Vault's + [config](https://www.vaultproject.io/docs/config/index.html), or per-request + with the `X-Vault-No-Request-Forwarding` header. + * In prior versions a bug caused the `bound_iam_role_arn` value in the + `aws-ec2` authentication backend to actually use the instance profile ARN. + This has been corrected, but as a result there is a behavior change. To + match using the instance profile ARN, a new parameter + `bound_iam_instance_profile_arn` has been added. Existing roles will + automatically transfer the value over to the correct parameter, but the next + time the role is updated, the new meanings will take effect. + +FEATURES: + + * **Secret ID CIDR Restrictions in `AppRole`**: Secret IDs generated under an + approle can now specify a list of CIDR blocks from where the requests to + generate secret IDs should originate from. If an approle already has CIDR + restrictions specified, the CIDR restrictions on the secret ID should be a + subset of those specified on the role [GH-1910] + * **Initial Root Token PGP Encryption**: Similar to `generate-root`, the root + token created at initialization time can now be PGP encrypted [GH-1883] + * **Support Chained Intermediate CAs in `pki`**: The `pki` backend now allows, + when a CA cert is being supplied as a signed root or intermediate, a trust + chain of arbitrary length. The chain is returned as a parameter at + certificate issue/sign time and is retrievable independently as well. + [GH-1694] + * **Response Wrapping Enhancements**: There are new endpoints to look up + response wrapped token parameters; wrap arbitrary values; rotate wrapping + tokens; and unwrap with enhanced validation. In addition, list operations + can now be response-wrapped. [GH-1927] + * **Transit Features**: The `transit` backend now supports generating random + bytes and SHA sums; HMACs; and signing and verification functionality using + EC keys (P-256 curve) + +IMPROVEMENTS: + + * api: Return error when an invalid (as opposed to incorrect) unseal key is + submitted, rather than ignoring it [GH-1782] + * api: Add method to call `auth/token/create-orphan` endpoint [GH-1834] + * api: Rekey operation now redirects from standbys to master [GH-1862] + * audit/file: Sending a `SIGHUP` to Vault now causes Vault to close and + re-open the log file, making it easier to rotate audit logs [GH-1953] + * auth/aws-ec2: EC2 instances can get authenticated by presenting the identity + document and its SHA256 RSA digest [GH-1961] + * auth/aws-ec2: IAM bound parameters on the aws-ec2 backend will perform a + prefix match instead of exact match [GH-1943] + * auth/aws-ec2: Added a new constraint `bound_iam_instance_profile_arn` to + refer to IAM instance profile ARN and fixed the earlier `bound_iam_role_arn` + to refer to IAM role ARN instead of the instance profile ARN [GH-1913] + * auth/aws-ec2: Backend generates the nonce by default and clients can + explicitly disable reauthentication by setting empty nonce [GH-1889] + * auth/token: Added warnings if tokens and accessors are used in URLs [GH-1806] + * command/format: The `format` flag on select CLI commands takes `yml` as an + alias for `yaml` [GH-1899] + * core: Allow the size of the read cache to be set via the config file, and + change the default value to 1MB (from 32KB) [GH-1784] + * core: Allow single and two-character path parameters for most places + [GH-1811] + * core: Allow list operations to be response-wrapped [GH-1814] + * core: Provide better protection against timing attacks in Shamir code + [GH-1877] + * core: Unmounting/disabling backends no longer returns an error if the mount + didn't exist. This is line with elsewhere in Vault's API where `DELETE` is + an idempotent operation. [GH-1903] + * credential/approle: At least one constraint is required to be enabled while + creating and updating a role [GH-1882] + * secret/cassandra: Added consistency level for use with roles [GH-1931] + * secret/mysql: SQL for revoking user can be configured on the role [GH-1914] + * secret/transit: Use HKDF (RFC 5869) as the key derivation function for new + keys [GH-1812] + * secret/transit: Empty plaintext values are now allowed [GH-1874] + +BUG FIXES: + + * audit: Fix panic being caused by some values logging as underlying Go types + instead of formatted strings [GH-1912] + * auth/approle: Fixed panic on deleting approle that doesn't exist [GH-1920] + * auth/approle: Not letting secret IDs and secret ID accessors to get logged + in plaintext in audit logs [GH-1947] + * auth/aws-ec2: Allow authentication if the underlying host is in a bad state + but the instance is running [GH-1884] + * auth/token: Fixed metadata getting missed out from token lookup response by + gracefully handling token entry upgrade [GH-1924] + * cli: Don't error on newline in token file [GH-1774] + * core: Pass back content-type header for forwarded requests [GH-1791] + * core: Fix panic if the same key was given twice to `generate-root` [GH-1827] + * core: Fix potential deadlock on unmount/remount [GH-1793] + * physical/file: Remove empty directories from the `file` storage backend [GH-1821] + * physical/zookeeper: Remove empty directories from the `zookeeper` storage + backend and add a fix to the `file` storage backend's logic [GH-1964] + * secret/aws: Added update operation to `aws/sts` path to consider `ttl` + parameter [39b75c6] + * secret/aws: Mark STS secrets as non-renewable [GH-1804] + * secret/cassandra: Properly store session for re-use [GH-1802] + * secret/ssh: Fix panic when revoking SSH dynamic keys [GH-1781] + +## 0.6.1 (August 22, 2016) + +DEPRECATIONS/CHANGES: + + * Once the active node is 0.6.1, standby nodes must also be 0.6.1 in order to + connect to the HA cluster. We recommend following our [general upgrade + instructions](https://www.vaultproject.io/docs/install/upgrade.html) in + addition to 0.6.1-specific upgrade instructions to ensure that this is not + an issue. + * Status codes for sealed/uninitialized Vaults have changed to `503`/`501` + respectively. See the [version-specific upgrade + guide](https://www.vaultproject.io/docs/install/upgrade-to-0.6.1.html) for + more details. + * Root tokens (tokens with the `root` policy) can no longer be created except + by another root token or the `generate-root` endpoint. + * Issued certificates from the `pki` backend against new roles created or + modified after upgrading will contain a set of default key usages. + * The `dynamodb` physical data store no longer supports HA by default. It has + some non-ideal behavior around failover that was causing confusion. See the + [documentation](https://www.vaultproject.io/docs/config/index.html#ha_enabled) + for information on enabling HA mode. It is very important that this + configuration is added _before upgrading_. + * The `ldap` backend no longer searches for `memberOf` groups as part of its + normal flow. Instead, the desired group filter must be specified. This fixes + some errors and increases speed for directories with different structures, + but if this behavior has been relied upon, ensure that you see the upgrade + notes _before upgrading_. + * `app-id` is now deprecated with the addition of the new AppRole backend. + There are no plans to remove it, but we encourage using AppRole whenever + possible, as it offers enhanced functionality and can accommodate many more + types of authentication paradigms. + +FEATURES: + + * **AppRole Authentication Backend**: The `approle` backend is a + machine-oriented authentication backend that provides a similar concept to + App-ID while adding many missing features, including a pull model that + allows for the backend to generate authentication credentials rather than + requiring operators or other systems to push credentials in. It should be + useful in many more situations than App-ID. The inclusion of this backend + deprecates App-ID. [GH-1426] + * **Request Forwarding**: Vault servers can now forward requests to each other + rather than redirecting clients. This feature is off by default in 0.6.1 but + will be on by default in the next release. See the [HA concepts + page](https://www.vaultproject.io/docs/concepts/ha.html) for information on + enabling and configuring it. [GH-443] + * **Convergent Encryption in `Transit`**: The `transit` backend now supports a + convergent encryption mode where the same plaintext will produce the same + ciphertext. Although very useful in some situations, this has potential + security implications, which are mostly mitigated by requiring the use of + key derivation when convergent encryption is enabled. See [the `transit` + backend + documentation](https://www.vaultproject.io/docs/secrets/transit/index.html) + for more details. [GH-1537] + * **Improved LDAP Group Filters**: The `ldap` auth backend now uses templates + to define group filters, providing the capability to support some + directories that could not easily be supported before (especially specific + Active Directory setups with nested groups). [GH-1388] + * **Key Usage Control in `PKI`**: Issued certificates from roles created or + modified after upgrading contain a set of default key usages for increased + compatibility with OpenVPN and some other software. This set can be changed + when writing a role definition. Existing roles are unaffected. [GH-1552] + * **Request Retrying in the CLI and Go API**: Requests that fail with a `5xx` + error code will now retry after a backoff. The maximum total number of + retries (including disabling this functionality) can be set with an + environment variable. See the [environment variable + documentation](https://www.vaultproject.io/docs/commands/environment.html) + for more details. [GH-1594] + * **Service Discovery in `vault init`**: The new `-auto` option on `vault init` + will perform service discovery using Consul. When only one node is discovered, + it will be initialized and when more than one node is discovered, they will + be output for easy selection. See `vault init --help` for more details. [GH-1642] + * **MongoDB Secret Backend**: Generate dynamic unique MongoDB database + credentials based on configured roles. Sponsored by + [CommerceHub](http://www.commercehub.com/). [GH-1414] + * **Circonus Metrics Integration**: Vault can now send metrics to + [Circonus](http://www.circonus.com/). See the [configuration + documentation](https://www.vaultproject.io/docs/config/index.html) for + details. [GH-1646] + +IMPROVEMENTS: + + * audit: Added a unique identifier to each request which will also be found in + the request portion of the response. [GH-1650] + * auth/aws-ec2: Added a new constraint `bound_account_id` to the role + [GH-1523] + * auth/aws-ec2: Added a new constraint `bound_iam_role_arn` to the role + [GH-1522] + * auth/aws-ec2: Added `ttl` field for the role [GH-1703] + * auth/ldap, secret/cassandra, physical/consul: Clients with `tls.Config` + have the minimum TLS version set to 1.2 by default. This is configurable. + * auth/token: Added endpoint to list accessors [GH-1676] + * auth/token: Added `disallowed_policies` option to token store roles [GH-1681] + * auth/token: `root` or `sudo` tokens can now create periodic tokens via + `auth/token/create`; additionally, the same token can now be periodic and + have an explicit max TTL [GH-1725] + * build: Add support for building on Solaris/Illumos [GH-1726] + * cli: Output formatting in the presence of warnings in the response object + [GH-1533] + * cli: `vault auth` command supports a `-path` option to take in the path at + which the auth backend is enabled, thereby allowing authenticating against + different paths using the command options [GH-1532] + * cli: `vault auth -methods` will now display the config settings of the mount + [GH-1531] + * cli: `vault read/write/unwrap -field` now allows selecting token response + fields [GH-1567] + * cli: `vault write -field` now allows selecting wrapped response fields + [GH-1567] + * command/status: Version information and cluster details added to the output + of `vault status` command [GH-1671] + * core: Response wrapping is now enabled for login endpoints [GH-1588] + * core: The duration of leadership is now exported via events through + telemetry [GH-1625] + * core: `sys/capabilities-self` is now accessible as part of the `default` + policy [GH-1695] + * core: `sys/renew` is now accessible as part of the `default` policy [GH-1701] + * core: Unseal keys will now be returned in both hex and base64 forms, and + either can be used [GH-1734] + * core: Responses from most `/sys` endpoints now return normal `api.Secret` + structs in addition to the values they carried before. This means that + response wrapping can now be used with most authenticated `/sys` operations + [GH-1699] + * physical/etcd: Support `ETCD_ADDR` env var for specifying addresses [GH-1576] + * physical/consul: Allowing additional tags to be added to Consul service + registration via `service_tags` option [GH-1643] + * secret/aws: Listing of roles is supported now [GH-1546] + * secret/cassandra: Add `connect_timeout` value for Cassandra connection + configuration [GH-1581] + * secret/mssql,mysql,postgresql: Reading of connection settings is supported + in all the sql backends [GH-1515] + * secret/mysql: Added optional maximum idle connections value to MySQL + connection configuration [GH-1635] + * secret/mysql: Use a combination of the role name and token display name in + generated user names and allow the length to be controlled [GH-1604] + * secret/{cassandra,mssql,mysql,postgresql}: SQL statements can now be passed + in via one of four ways: a semicolon-delimited string, a base64-delimited + string, a serialized JSON string array, or a base64-encoded serialized JSON + string array [GH-1686] + * secret/ssh: Added `allowed_roles` to vault-ssh-helper's config and returning + role name as part of response of `verify` API + * secret/ssh: Added passthrough of command line arguments to `ssh` [GH-1680] + * sys/health: Added version information to the response of health status + endpoint [GH-1647] + * sys/health: Cluster information isbe returned as part of health status when + Vault is unsealed [GH-1671] + * sys/mounts: MountTable data is compressed before serializing to accommodate + thousands of mounts [GH-1693] + * website: The [token + concepts](https://www.vaultproject.io/docs/concepts/tokens.html) page has + been completely rewritten [GH-1725] + +BUG FIXES: + + * auth/aws-ec2: Added a nil check for stored whitelist identity object + during renewal [GH-1542] + * auth/cert: Fix panic if no client certificate is supplied [GH-1637] + * auth/token: Don't report that a non-expiring root token is renewable, as + attempting to renew it results in an error [GH-1692] + * cli: Don't retry a command when a redirection is received [GH-1724] + * core: Fix regression causing status codes to be `400` in most non-5xx error + cases [GH-1553] + * core: Fix panic that could occur during a leadership transition [GH-1627] + * physical/postgres: Remove use of prepared statements as this causes + connection multiplexing software to break [GH-1548] + * physical/consul: Multiple Vault nodes on the same machine leading to check ID + collisions were resulting in incorrect health check responses [GH-1628] + * physical/consul: Fix deregistration of health checks on exit [GH-1678] + * secret/postgresql: Check for existence of role before attempting deletion + [GH-1575] + * secret/postgresql: Handle revoking roles that have privileges on sequences + [GH-1573] + * secret/postgresql(,mysql,mssql): Fix incorrect use of database over + transaction object which could lead to connection exhaustion [GH-1572] + * secret/pki: Fix parsing CA bundle containing trailing whitespace [GH-1634] + * secret/pki: Fix adding email addresses as SANs [GH-1688] + * secret/pki: Ensure that CRL values are always UTC, per RFC [GH-1727] + * sys/seal-status: Fixed nil Cluster object while checking seal status [GH-1715] + +## 0.6.0 (June 14th, 2016) + +SECURITY: + + * Although `sys/revoke-prefix` was intended to revoke prefixes of secrets (via + lease IDs, which incorporate path information) and + `auth/token/revoke-prefix` was intended to revoke prefixes of tokens (using + the tokens' paths and, since 0.5.2, role information), in implementation + they both behaved exactly the same way since a single component in Vault is + responsible for managing lifetimes of both, and the type of the tracked + lifetime was not being checked. The end result was that either endpoint + could revoke both secret leases and tokens. We consider this a very minor + security issue as there are a number of mitigating factors: both endpoints + require `sudo` capability in addition to write capability, preventing + blanket ACL path globs from providing access; both work by using the prefix + to revoke as a part of the endpoint path, allowing them to be properly + ACL'd; and both are intended for emergency scenarios and users should + already not generally have access to either one. In order to prevent + confusion, we have simply removed `auth/token/revoke-prefix` in 0.6, and + `sys/revoke-prefix` will be meant for both leases and tokens instead. + +DEPRECATIONS/CHANGES: + + * `auth/token/revoke-prefix` has been removed. See the security notice for + details. [GH-1280] + * Vault will now automatically register itself as the `vault` service when + using the `consul` backend and will perform its own health checks. See + the Consul backend documentation for information on how to disable + auto-registration and service checks. + * List operations that do not find any keys now return a `404` status code + rather than an empty response object [GH-1365] + * CA certificates issued from the `pki` backend no longer have associated + leases, and any CA certs already issued will ignore revocation requests from + the lease manager. This is to prevent CA certificates from being revoked + when the token used to issue the certificate expires; it was not be obvious + to users that they need to ensure that the token lifetime needed to be at + least as long as a potentially very long-lived CA cert. + +FEATURES: + + * **AWS EC2 Auth Backend**: Provides a secure introduction mechanism for AWS + EC2 instances allowing automated retrieval of Vault tokens. Unlike most + Vault authentication backends, this backend does not require first deploying + or provisioning security-sensitive credentials (tokens, username/password, + client certificates, etc). Instead, it treats AWS as a Trusted Third Party + and uses the cryptographically signed dynamic metadata information that + uniquely represents each EC2 instance. [Vault + Enterprise](https://www.hashicorp.com/vault.html) customers have access to a + turnkey client that speaks the backend API and makes access to a Vault token + easy. + * **Response Wrapping**: Nearly any response within Vault can now be wrapped + inside a single-use, time-limited token's cubbyhole, taking the [Cubbyhole + Authentication + Principles](https://www.hashicorp.com/blog/vault-cubbyhole-principles.html) + mechanism to its logical conclusion. Retrieving the original response is as + simple as a single API command or the new `vault unwrap` command. This makes + secret distribution easier and more secure, including secure introduction. + * **Azure Physical Backend**: You can now use Azure blob object storage as + your Vault physical data store [GH-1266] + * **Swift Physical Backend**: You can now use Swift blob object storage as + your Vault physical data store [GH-1425] + * **Consul Backend Health Checks**: The Consul backend will automatically + register a `vault` service and perform its own health checking. By default + the active node can be found at `active.vault.service.consul` and all with + standby nodes are `standby.vault.service.consul`. Sealed vaults are marked + critical and are not listed by default in Consul's service discovery. See + the documentation for details. [GH-1349] + * **Explicit Maximum Token TTLs**: You can now set explicit maximum TTLs on + tokens that do not honor changes in the system- or mount-set values. This is + useful, for instance, when the max TTL of the system or the `auth/token` + mount must be set high to accommodate certain needs but you want more + granular restrictions on tokens being issued directly from the Token + authentication backend at `auth/token`. [GH-1399] + * **Non-Renewable Tokens**: When creating tokens directly through the token + authentication backend, you can now specify in both token store roles and + the API whether or not a token should be renewable, defaulting to `true`. + * **RabbitMQ Secret Backend**: Vault can now generate credentials for + RabbitMQ. Vhosts and tags can be defined within roles. [GH-788] + +IMPROVEMENTS: + + * audit: Add the DisplayName value to the copy of the Request object embedded + in the associated Response, to match the original Request object [GH-1387] + * audit: Enable auditing of the `seal` and `step-down` commands [GH-1435] + * backends: Remove most `root`/`sudo` paths in favor of normal ACL mechanisms. + A particular exception are any current MFA paths. A few paths in `token` and + `sys` also require `root` or `sudo`. [GH-1478] + * command/auth: Restore the previous authenticated token if the `auth` command + fails to authenticate the provided token [GH-1233] + * command/write: `-format` and `-field` can now be used with the `write` + command [GH-1228] + * core: Add `mlock` support for FreeBSD, OpenBSD, and Darwin [GH-1297] + * core: Don't keep lease timers around when tokens are revoked [GH-1277] + * core: If using the `disable_cache` option, caches for the policy store and + the `transit` backend are now disabled as well [GH-1346] + * credential/cert: Renewal requests are rejected if the set of policies has + changed since the token was issued [GH-477] + * credential/cert: Check CRLs for specific non-CA certs configured in the + backend [GH-1404] + * credential/ldap: If `groupdn` is not configured, skip searching LDAP and + only return policies for local groups, plus a warning [GH-1283] + * credential/ldap: `vault list` support for users and groups [GH-1270] + * credential/ldap: Support for the `memberOf` attribute for group membership + searching [GH-1245] + * credential/userpass: Add list support for users [GH-911] + * credential/userpass: Remove user configuration paths from requiring sudo, in + favor of normal ACL mechanisms [GH-1312] + * credential/token: Sanitize policies and add `default` policies in appropriate + places [GH-1235] + * credential/token: Setting the renewable status of a token is now possible + via `vault token-create` and the API. The default is true, but tokens can be + specified as non-renewable. [GH-1499] + * secret/aws: Use chain credentials to allow environment/EC2 instance/shared + providers [GH-307] + * secret/aws: Support for STS AssumeRole functionality [GH-1318] + * secret/consul: Reading consul access configuration supported. The response + will contain non-sensitive information only [GH-1445] + * secret/pki: Added `exclude_cn_from_sans` field to prevent adding the CN to + DNS or Email Subject Alternate Names [GH-1220] + * secret/pki: Added list support for certificates [GH-1466] + * sys/capabilities: Enforce ACL checks for requests that query the capabilities + of a token on a given path [GH-1221] + * sys/health: Status information can now be retrieved with `HEAD` [GH-1509] + +BUG FIXES: + + * command/read: Fix panic when using `-field` with a non-string value [GH-1308] + * command/token-lookup: Fix TTL showing as 0 depending on how a token was + created. This only affected the value shown at lookup, not the token + behavior itself. [GH-1306] + * command/various: Tell the JSON decoder to not convert all numbers to floats; + fixes some various places where numbers were showing up in scientific + notation + * command/server: Prioritized `devRootTokenID` and `devListenAddress` flags + over their respective env vars [GH-1480] + * command/ssh: Provided option to disable host key checking. The automated + variant of `vault ssh` command uses `sshpass` which was failing to handle + host key checking presented by the `ssh` binary. [GH-1473] + * core: Properly persist mount-tuned TTLs for auth backends [GH-1371] + * core: Don't accidentally crosswire SIGINT to the reload handler [GH-1372] + * credential/github: Make organization comparison case-insensitive during + login [GH-1359] + * credential/github: Fix panic when renewing a token created with some earlier + versions of Vault [GH-1510] + * credential/github: The token used to log in via `vault auth` can now be + specified in the `VAULT_AUTH_GITHUB_TOKEN` environment variable [GH-1511] + * credential/ldap: Fix problem where certain error conditions when configuring + or opening LDAP connections would cause a panic instead of return a useful + error message [GH-1262] + * credential/token: Fall back to normal parent-token semantics if + `allowed_policies` is empty for a role. Using `allowed_policies` of + `default` resulted in the same behavior anyways. [GH-1276] + * credential/token: Fix issues renewing tokens when using the "suffix" + capability of token roles [GH-1331] + * credential/token: Fix lookup via POST showing the request token instead of + the desired token [GH-1354] + * credential/various: Fix renewal conditions when `default` policy is not + contained in the backend config [GH-1256] + * physical/s3: Don't panic in certain error cases from bad S3 responses [GH-1353] + * secret/consul: Use non-pooled Consul API client to avoid leaving files open + [GH-1428] + * secret/pki: Don't check whether a certificate is destined to be a CA + certificate if sign-verbatim endpoint is used [GH-1250] + +## 0.5.3 (May 27th, 2016) + +SECURITY: + + * Consul ACL Token Revocation: An issue was reported to us indicating that + generated Consul ACL tokens were not being properly revoked. Upon + investigation, we found that this behavior was reproducible in a specific + scenario: when a generated lease for a Consul ACL token had been renewed + prior to revocation. In this case, the generated token was not being + properly persisted internally through the renewal function, leading to an + error during revocation due to the missing token. Unfortunately, this was + coded as a user error rather than an internal error, and the revocation + logic was expecting internal errors if revocation failed. As a result, the + revocation logic believed the revocation to have succeeded when it in fact + failed, causing the lease to be dropped while the token was still valid + within Consul. In this release, the Consul backend properly persists the + token through renewals, and the revocation logic has been changed to + consider any error type to have been a failure to revoke, causing the lease + to persist and attempt to be revoked later. + +We have written an example shell script that searches through Consul's ACL +tokens and looks for those generated by Vault, which can be used as a template +for a revocation script as deemed necessary for any particular security +response. The script is available at +https://gist.github.com/jefferai/6233c2963f9407a858d84f9c27d725c0 + +Please note that any outstanding leases for Consul tokens produced prior to +0.5.3 that have been renewed will continue to exhibit this behavior. As a +result, we recommend either revoking all tokens produced by the backend and +issuing new ones, or if needed, a more advanced variant of the provided example +could use the timestamp embedded in each generated token's name to decide which +tokens are too old and should be deleted. This could then be run periodically +up until the maximum lease time for any outstanding pre-0.5.3 tokens has +expired. + +This is a security-only release. There are no other code changes since 0.5.2. +The binaries have one additional change: they are built against Go 1.6.1 rather +than Go 1.6, as Go 1.6.1 contains two security fixes to the Go programming +language itself. + +## 0.5.2 (March 16th, 2016) + +FEATURES: + + * **MSSQL Backend**: Generate dynamic unique MSSQL database credentials based + on configured roles [GH-998] + * **Token Accessors**: Vault now provides an accessor with each issued token. + This accessor is an identifier that can be used for a limited set of + actions, notably for token revocation. This value can be logged in + plaintext to audit logs, and in combination with the plaintext metadata + logged to audit logs, provides a searchable and straightforward way to + revoke particular users' or services' tokens in many cases. To enable + plaintext audit logging of these accessors, set `hmac_accessor=false` when + enabling an audit backend. + * **Token Credential Backend Roles**: Roles can now be created in the `token` + credential backend that allow modifying token behavior in ways that are not + otherwise exposed or easily delegated. This allows creating tokens with a + fixed set (or subset) of policies (rather than a subset of the calling + token's), periodic tokens with a fixed TTL but no expiration, specified + prefixes, and orphans. + * **Listener Certificate Reloading**: Vault's configured listeners now reload + their TLS certificate and private key when the Vault process receives a + SIGHUP. + +IMPROVEMENTS: + + * auth/token: Endpoints optionally accept tokens from the HTTP body rather + than just from the URLs [GH-1211] + * auth/token,sys/capabilities: Added new endpoints + `auth/token/lookup-accessor`, `auth/token/revoke-accessor` and + `sys/capabilities-accessor`, which enables performing the respective actions + with just the accessor of the tokens, without having access to the actual + token [GH-1188] + * core: Ignore leading `/` in policy paths [GH-1170] + * core: Ignore leading `/` in mount paths [GH-1172] + * command/policy-write: Provided HCL is now validated for format violations + and provides helpful information around where the violation occurred + [GH-1200] + * command/server: The initial root token ID when running in `-dev` mode can + now be specified via `-dev-root-token-id` or the environment variable + `VAULT_DEV_ROOT_TOKEN_ID` [GH-1162] + * command/server: The listen address when running in `-dev` mode can now be + specified via `-dev-listen-address` or the environment variable + `VAULT_DEV_LISTEN_ADDRESS` [GH-1169] + * command/server: The configured listeners now reload their TLS + certificates/keys when Vault is SIGHUP'd [GH-1196] + * command/step-down: New `vault step-down` command and API endpoint to force + the targeted node to give up active status, but without sealing. The node + will wait ten seconds before attempting to grab the lock again. [GH-1146] + * command/token-renew: Allow no token to be passed in; use `renew-self` in + this case. Change the behavior for any token being passed in to use `renew`. + [GH-1150] + * credential/app-id: Allow `app-id` parameter to be given in the login path; + this causes the `app-id` to be part of the token path, making it easier to + use with `revoke-prefix` [GH-424] + * credential/cert: Non-CA certificates can be used for authentication. They + must be matched exactly (issuer and serial number) for authentication, and + the certificate must carry the client authentication or 'any' extended usage + attributes. [GH-1153] + * credential/cert: Subject and Authority key IDs are output in metadata; this + allows more flexible searching/revocation in the audit logs [GH-1183] + * credential/cert: Support listing configured certs [GH-1212] + * credential/userpass: Add support for `create`/`update` capability + distinction in user path, and add user-specific endpoints to allow changing + the password and policies [GH-1216] + * credential/token: Add roles [GH-1155] + * secret/mssql: Add MSSQL backend [GH-998] + * secret/pki: Add revocation time (zero or Unix epoch) to `pki/cert/SERIAL` + endpoint [GH-1180] + * secret/pki: Sanitize serial number in `pki/revoke` endpoint to allow some + other formats [GH-1187] + * secret/ssh: Added documentation for `ssh/config/zeroaddress` endpoint. + [GH-1154] + * sys: Added new endpoints `sys/capabilities` and `sys/capabilities-self` to + fetch the capabilities of a token on a given path [GH-1171] + * sys: Added `sys/revoke-force`, which enables a user to ignore backend errors + when revoking a lease, necessary in some emergency/failure scenarios + [GH-1168] + * sys: The return codes from `sys/health` can now be user-specified via query + parameters [GH-1199] + +BUG FIXES: + + * logical/cassandra: Apply hyphen/underscore replacement to the entire + generated username, not just the UUID, in order to handle token display name + hyphens [GH-1140] + * physical/etcd: Output actual error when cluster sync fails [GH-1141] + * vault/expiration: Not letting the error responses from the backends to skip + during renewals [GH-1176] + +## 0.5.1 (February 25th, 2016) + +DEPRECATIONS/CHANGES: + + * RSA keys less than 2048 bits are no longer supported in the PKI backend. + 1024-bit keys are considered unsafe and are disallowed in the Internet PKI. + The `pki` backend has enforced SHA256 hashes in signatures from the + beginning, and software that can handle these hashes should be able to + handle larger key sizes. [GH-1095] + * The PKI backend now does not automatically delete expired certificates, + including from the CRL. Doing so could lead to a situation where a time + mismatch between the Vault server and clients could result in a certificate + that would not be considered expired by a client being removed from the CRL. + The new `pki/tidy` endpoint can be used to trigger expirations. [GH-1129] + * The `cert` backend now performs a variant of channel binding at renewal time + for increased security. In order to not overly burden clients, a notion of + identity is used. This functionality can be disabled. See the 0.5.1 upgrade + guide for more specific information [GH-1127] + +FEATURES: + + * **Codebase Audit**: Vault's 0.5 codebase was audited by iSEC. (The terms of + the audit contract do not allow us to make the results public.) [GH-220] + +IMPROVEMENTS: + + * api: The `VAULT_TLS_SERVER_NAME` environment variable can be used to control + the SNI header during TLS connections [GH-1131] + * api/health: Add the server's time in UTC to health responses [GH-1117] + * command/rekey and command/generate-root: These now return the status at + attempt initialization time, rather than requiring a separate fetch for the + nonce [GH-1054] + * credential/cert: Don't require root/sudo tokens for the `certs/` and `crls/` + paths; use normal ACL behavior instead [GH-468] + * credential/github: The validity of the token used for login will be checked + at renewal time [GH-1047] + * credential/github: The `config` endpoint no longer requires a root token; + normal ACL path matching applies + * deps: Use the standardized Go 1.6 vendoring system + * secret/aws: Inform users of AWS-imposed policy restrictions around STS + tokens if they attempt to use an invalid policy [GH-1113] + * secret/mysql: The MySQL backend now allows disabling verification of the + `connection_url` [GH-1096] + * secret/pki: Submitted CSRs are now verified to have the correct key type and + minimum number of bits according to the role. The exception is intermediate + CA signing and the `sign-verbatim` path [GH-1104] + * secret/pki: New `tidy` endpoint to allow expunging expired certificates. + [GH-1129] + * secret/postgresql: The PostgreSQL backend now allows disabling verification + of the `connection_url` [GH-1096] + * secret/ssh: When verifying an OTP, return 400 if it is not valid instead of + 204 [GH-1086] + * credential/app-id: App ID backend will check the validity of app-id and user-id + during renewal time [GH-1039] + * credential/cert: TLS Certificates backend, during renewal, will now match the + client identity with the client identity used during login [GH-1127] + +BUG FIXES: + + * credential/ldap: Properly escape values being provided to search filters + [GH-1100] + * secret/aws: Capping on length of usernames for both IAM and STS types + [GH-1102] + * secret/pki: If a cert is not found during lookup of a serial number, + respond with a 400 rather than a 500 [GH-1085] + * secret/postgresql: Add extra revocation statements to better handle more + permission scenarios [GH-1053] + * secret/postgresql: Make connection_url work properly [GH-1112] + +## 0.5.0 (February 10, 2016) + +SECURITY: + + * Previous versions of Vault could allow a malicious user to hijack the rekey + operation by canceling an operation in progress and starting a new one. The + practical application of this is very small. If the user was an unseal key + owner, they could attempt to do this in order to either receive unencrypted + reseal keys or to replace the PGP keys used for encryption with ones under + their control. However, since this would invalidate any rekey progress, they + would need other unseal key holders to resubmit, which would be rather + suspicious during this manual operation if they were not also the original + initiator of the rekey attempt. If the user was not an unseal key holder, + there is no benefit to be gained; the only outcome that could be attempted + would be a denial of service against a legitimate rekey operation by sending + cancel requests over and over. Thanks to Josh Snyder for the report! + +DEPRECATIONS/CHANGES: + + * `s3` physical backend: Environment variables are now preferred over + configuration values. This makes it behave similar to the rest of Vault, + which, in increasing order of preference, uses values from the configuration + file, environment variables, and CLI flags. [GH-871] + * `etcd` physical backend: `sync` functionality is now supported and turned on + by default. This can be disabled. [GH-921] + * `transit`: If a client attempts to encrypt a value with a key that does not + yet exist, what happens now depends on the capabilities set in the client's + ACL policies. If the client has `create` (or `create` and `update`) + capability, the key will upsert as in the past. If the client has `update` + capability, they will receive an error. [GH-1012] + * `token-renew` CLI command: If the token given for renewal is the same as the + client token, the `renew-self` endpoint will be used in the API. Given that + the `default` policy (by default) allows all clients access to the + `renew-self` endpoint, this makes it much more likely that the intended + operation will be successful. [GH-894] + * Token `lookup`: the `ttl` value in the response now reflects the actual + remaining TTL rather than the original TTL specified when the token was + created; this value is now located in `creation_ttl` [GH-986] + * Vault no longer uses grace periods on leases or token TTLs. Uncertainty + about the length grace period for any given backend could cause confusion + and uncertainty. [GH-1002] + * `rekey`: Rekey now requires a nonce to be supplied with key shares. This + nonce is generated at the start of a rekey attempt and is unique for that + attempt. + * `status`: The exit code for the `status` CLI command is now `2` for an + uninitialized Vault instead of `1`. `1` is returned for errors. This better + matches the rest of the CLI. + +FEATURES: + + * **Split Data/High Availability Physical Backends**: You can now configure + two separate physical backends: one to be used for High Availability + coordination and another to be used for encrypted data storage. See the + [configuration + documentation](https://vaultproject.io/docs/config/index.html) for details. + [GH-395] + * **Fine-Grained Access Control**: Policies can now use the `capabilities` set + to specify fine-grained control over operations allowed on a path, including + separation of `sudo` privileges from other privileges. These can be mixed + and matched in any way desired. The `policy` value is kept for backwards + compatibility. See the [updated policy + documentation](https://vaultproject.io/docs/concepts/policies.html) for + details. [GH-914] + * **List Support**: Listing is now supported via the API and the new `vault + list` command. This currently supports listing keys in the `generic` and + `cubbyhole` backends and a few other places (noted in the IMPROVEMENTS + section below). Different parts of the API and backends will need to + implement list capabilities in ways that make sense to particular endpoints, + so further support will appear over time. [GH-617] + * **Root Token Generation via Unseal Keys**: You can now use the + `generate-root` CLI command to generate new orphaned, non-expiring root + tokens in case the original is lost or revoked (accidentally or + purposefully). This requires a quorum of unseal key holders. The output + value is protected via any PGP key of the initiator's choosing or a one-time + pad known only to the initiator (a suitable pad can be generated via the + `-genotp` flag to the command. [GH-915] + * **Unseal Key Archiving**: You can now optionally have Vault store your + unseal keys in your chosen physical store for disaster recovery purposes. + This option is only available when the keys are encrypted with PGP. [GH-907] + * **Keybase Support for PGP Encryption Keys**: You can now specify Keybase + users when passing in PGP keys to the `init`, `rekey`, and `generate-root` + CLI commands. Public keys for these users will be fetched automatically. + [GH-901] + * **DynamoDB HA Physical Backend**: There is now a new, community-supported + HA-enabled physical backend using Amazon DynamoDB. See the [configuration + documentation](https://vaultproject.io/docs/config/index.html) for details. + [GH-878] + * **PostgreSQL Physical Backend**: There is now a new, community-supported + physical backend using PostgreSQL. See the [configuration + documentation](https://vaultproject.io/docs/config/index.html) for details. + [GH-945] + * **STS Support in AWS Secret Backend**: You can now use the AWS secret + backend to fetch STS tokens rather than IAM users. [GH-927] + * **Speedups in the transit backend**: The `transit` backend has gained a + cache, and now loads only the working set of keys (e.g. from the + `min_decryption_version` to the current key version) into its working set. + This provides large speedups and potential memory savings when the `rotate` + feature of the backend is used heavily. + +IMPROVEMENTS: + + * cli: Output secrets sorted by key name [GH-830] + * cli: Support YAML as an output format [GH-832] + * cli: Show an error if the output format is incorrect, rather than falling + back to an empty table [GH-849] + * cli: Allow setting the `advertise_addr` for HA via the + `VAULT_ADVERTISE_ADDR` environment variable [GH-581] + * cli/generate-root: Add generate-root and associated functionality [GH-915] + * cli/init: Add `-check` flag that returns whether Vault is initialized + [GH-949] + * cli/server: Use internal functions for the token-helper rather than shelling + out, which fixes some problems with using a static binary in Docker or paths + with multiple spaces when launching in `-dev` mode [GH-850] + * cli/token-lookup: Add token-lookup command [GH-892] + * command/{init,rekey}: Allow ASCII-armored keychain files to be arguments for + `-pgp-keys` [GH-940] + * conf: Use normal bool values rather than empty/non-empty for the + `tls_disable` option [GH-802] + * credential/ldap: Add support for binding, both anonymously (to discover a + user DN) and via a username and password [GH-975] + * credential/token: Add `last_renewal_time` to token lookup calls [GH-896] + * credential/token: Change `ttl` to reflect the current remaining TTL; the + original value is in `creation_ttl` [GH-1007] + * helper/certutil: Add ability to parse PKCS#8 bundles [GH-829] + * logical/aws: You can now get STS tokens instead of IAM users [GH-927] + * logical/cassandra: Add `protocol_version` parameter to set the CQL proto + version [GH-1005] + * logical/cubbyhole: Add cubbyhole access to default policy [GH-936] + * logical/mysql: Add list support for roles path [GH-984] + * logical/pki: Fix up key usages being specified for CAs [GH-989] + * logical/pki: Add list support for roles path [GH-985] + * logical/pki: Allow `pem_bundle` to be specified as the format, which + provides a concatenated PEM bundle of returned values [GH-1008] + * logical/pki: Add 30 seconds of slack to the validity start period to + accommodate some clock skew in machines [GH-1036] + * logical/postgres: Add `max_idle_connections` parameter [GH-950] + * logical/postgres: Add list support for roles path + * logical/ssh: Add list support for roles path [GH-983] + * logical/transit: Keys are archived and only keys between the latest version + and `min_decryption_version` are loaded into the working set. This can + provide a very large speed increase when rotating keys very often. [GH-977] + * logical/transit: Keys are now cached, which should provide a large speedup + in most cases [GH-979] + * physical/cache: Use 2Q cache instead of straight LRU [GH-908] + * physical/etcd: Support basic auth [GH-859] + * physical/etcd: Support sync functionality and enable by default [GH-921] + +BUG FIXES: + + * api: Correct the HTTP verb used in the LookupSelf method [GH-887] + * api: Fix the output of `Sys().MountConfig(...)` to return proper values + [GH-1017] + * command/read: Fix panic when an empty argument was given [GH-923] + * command/ssh: Fix panic when username lookup fails [GH-886] + * core: When running in standalone mode, don't advertise that we are active + until post-unseal setup completes [GH-872] + * core: Update go-cleanhttp dependency to ensure idle connections aren't + leaked [GH-867] + * core: Don't allow tokens to have duplicate policies [GH-897] + * core: Fix regression in `sys/renew` that caused information stored in the + Secret part of the response to be lost [GH-912] + * physical: Use square brackets when setting an IPv6-based advertise address + as the auto-detected advertise address [GH-883] + * physical/s3: Use an initialized client when using IAM roles to fix a + regression introduced against newer versions of the AWS Go SDK [GH-836] + * secret/pki: Fix a condition where unmounting could fail if the CA + certificate was not properly loaded [GH-946] + * secret/ssh: Fix a problem where SSH connections were not always closed + properly [GH-942] + +MISC: + + * Clarified our stance on support for community-derived physical backends. + See the [configuration + documentation](https://vaultproject.io/docs/config/index.html) for details. + * Add `vault-java` to libraries [GH-851] + * Various minor documentation fixes and improvements [GH-839] [GH-854] + [GH-861] [GH-876] [GH-899] [GH-900] [GH-904] [GH-923] [GH-924] [GH-958] + [GH-959] [GH-981] [GH-990] [GH-1024] [GH-1025] + +BUILD NOTE: + + * The HashiCorp-provided binary release of Vault 0.5.0 is built against a + patched version of Go 1.5.3 containing two specific bug fixes affecting TLS + certificate handling. These fixes are in the Go 1.6 tree and were + cherry-picked on top of stock Go 1.5.3. If you want to examine the way in + which the releases were built, please look at our [cross-compilation + Dockerfile](https://github.com/hashicorp/vault/blob/v0.5.0/scripts/cross/Dockerfile-patched-1.5.3). + +## 0.4.1 (January 13, 2016) + +SECURITY: + + * Build against Go 1.5.3 to mitigate a security vulnerability introduced in + Go 1.5. For more information, please see + https://groups.google.com/forum/#!topic/golang-dev/MEATuOi_ei4 + +This is a security-only release; other than the version number and building +against Go 1.5.3, there are no changes from 0.4.0. + +## 0.4.0 (December 10, 2015) + +DEPRECATIONS/CHANGES: + + * Policy Name Casing: Policy names are now normalized to lower-case on write, + helping prevent accidental case mismatches. For backwards compatibility, + policy names are not currently normalized when reading or deleting. [GH-676] + * Default etcd port number: the default connection string for the `etcd` + physical store uses port 2379 instead of port 4001, which is the port used + by the supported version 2.x of etcd. [GH-753] + * As noted below in the FEATURES section, if your Vault installation contains + a policy called `default`, new tokens created will inherit this policy + automatically. + * In the PKI backend there have been a few minor breaking changes: + * The token display name is no longer a valid option for providing a base + domain for issuance. Since this name is prepended with the name of the + authentication backend that issued it, it provided a faulty use-case at best + and a confusing experience at worst. We hope to figure out a better + per-token value in a future release. + * The `allowed_base_domain` parameter has been changed to `allowed_domains`, + which accepts a comma-separated list of domains. This allows issuing + certificates with DNS subjects across multiple domains. If you had a + configured `allowed_base_domain` parameter, it will be migrated + automatically when the role is read (either via a normal read, or via + issuing a certificate). + +FEATURES: + + * **Significantly Enhanced PKI Backend**: The `pki` backend can now generate + and sign root CA certificates and intermediate CA CSRs. It can also now sign + submitted client CSRs, as well as a significant number of other + enhancements. See the updated documentation for the full API. [GH-666] + * **CRL Checking for Certificate Authentication**: The `cert` backend now + supports pushing CRLs into the mount and using the contained serial numbers + for revocation checking. See the documentation for the `cert` backend for + more info. [GH-330] + * **Default Policy**: Vault now ensures that a policy named `default` is added + to every token. This policy cannot be deleted, but it can be modified + (including to an empty policy). There are three endpoints allowed in the + default `default` policy, related to token self-management: `lookup-self`, + which allows a token to retrieve its own information, and `revoke-self` and + `renew-self`, which are self-explanatory. If your existing Vault + installation contains a policy called `default`, it will not be overridden, + but it will be added to each new token created. You can override this + behavior when using manual token creation (i.e. not via an authentication + backend) by setting the "no_default_policy" flag to true. [GH-732] + +IMPROVEMENTS: + + * api: API client now uses a 60 second timeout instead of indefinite [GH-681] + * api: Implement LookupSelf, RenewSelf, and RevokeSelf functions for auth + tokens [GH-739] + * api: Standardize environment variable reading logic inside the API; the CLI + now uses this but can still override via command-line parameters [GH-618] + * audit: HMAC-SHA256'd client tokens are now stored with each request entry. + Previously they were only displayed at creation time; this allows much + better traceability of client actions. [GH-713] + * audit: There is now a `sys/audit-hash` endpoint that can be used to generate + an HMAC-SHA256'd value from provided data using the given audit backend's + salt [GH-784] + * core: The physical storage read cache can now be disabled via + "disable_cache" [GH-674] + * core: The unsealing process can now be reset midway through (this feature + was documented before, but not enabled) [GH-695] + * core: Tokens can now renew themselves [GH-455] + * core: Base64-encoded PGP keys can be used with the CLI for `init` and + `rekey` operations [GH-653] + * core: Print version on startup [GH-765] + * core: Access to `sys/policy` and `sys/mounts` now uses the normal ACL system + instead of requiring a root token [GH-769] + * credential/token: Display whether or not a token is an orphan in the output + of a lookup call [GH-766] + * logical: Allow `.` in path-based variables in many more locations [GH-244] + * logical: Responses now contain a "warnings" key containing a list of + warnings returned from the server. These are conditions that did not require + failing an operation, but of which the client should be aware. [GH-676] + * physical/(consul,etcd): Consul and etcd now use a connection pool to limit + the number of outstanding operations, improving behavior when a lot of + operations must happen at once [GH-677] [GH-780] + * physical/consul: The `datacenter` parameter was removed; It could not be + effective unless the Vault node (or the Consul node it was connecting to) + was in the datacenter specified, in which case it wasn't needed [GH-816] + * physical/etcd: Support TLS-encrypted connections and use a connection pool + to limit the number of outstanding operations [GH-780] + * physical/s3: The S3 endpoint can now be configured, allowing using + S3-API-compatible storage solutions [GH-750] + * physical/s3: The S3 bucket can now be configured with the `AWS_S3_BUCKET` + environment variable [GH-758] + * secret/consul: Management tokens can now be created [GH-714] + +BUG FIXES: + + * api: API client now checks for a 301 response for redirects. Vault doesn't + generate these, but in certain conditions Go's internal HTTP handler can + generate them, leading to client errors. + * cli: `token-create` now supports the `ttl` parameter in addition to the + deprecated `lease` parameter. [GH-688] + * core: Return data from `generic` backends on the last use of a limited-use + token [GH-615] + * core: Fix upgrade path for leases created in `generic` prior to 0.3 [GH-673] + * core: Stale leader entries will now be reaped [GH-679] + * core: Using `mount-tune` on the auth/token path did not take effect. + [GH-688] + * core: Fix a potential race condition when (un)sealing the vault with metrics + enabled [GH-694] + * core: Fix an error that could happen in some failure scenarios where Vault + could fail to revert to a clean state [GH-733] + * core: Ensure secondary indexes are removed when a lease is expired [GH-749] + * core: Ensure rollback manager uses an up-to-date mounts table [GH-771] + * everywhere: Don't use http.DefaultClient, as it shares state implicitly and + is a source of hard-to-track-down bugs [GH-700] + * credential/token: Allow creating orphan tokens via an API path [GH-748] + * secret/generic: Validate given duration at write time, not just read time; + if stored durations are not parseable, return a warning and the default + duration rather than an error [GH-718] + * secret/generic: Return 400 instead of 500 when `generic` backend is written + to with no data fields [GH-825] + * secret/postgresql: Revoke permissions before dropping a user or revocation + may fail [GH-699] + +MISC: + + * Various documentation fixes and improvements [GH-685] [GH-688] [GH-697] + [GH-710] [GH-715] [GH-831] + +## 0.3.1 (October 6, 2015) + +SECURITY: + + * core: In certain failure scenarios, the full values of requests and + responses would be logged [GH-665] + +FEATURES: + + * **Settable Maximum Open Connections**: The `mysql` and `postgresql` backends + now allow setting the number of maximum open connections to the database, + which was previously capped to 2. [GH-661] + * **Renewable Tokens for GitHub**: The `github` backend now supports + specifying a TTL, enabling renewable tokens. [GH-664] + +BUG FIXES: + + * dist: linux-amd64 distribution was dynamically linked [GH-656] + * credential/github: Fix acceptance tests [GH-651] + +MISC: + + * Various minor documentation fixes and improvements [GH-649] [GH-650] + [GH-654] [GH-663] + +## 0.3.0 (September 28, 2015) + +DEPRECATIONS/CHANGES: + +Note: deprecations and breaking changes in upcoming releases are announced +ahead of time on the "vault-tool" mailing list. + + * **Cookie Authentication Removed**: As of 0.3 the only way to authenticate is + via the X-Vault-Token header. Cookie authentication was hard to properly + test, could result in browsers/tools/applications saving tokens in plaintext + on disk, and other issues. [GH-564] + * **Terminology/Field Names**: Vault is transitioning from overloading the + term "lease" to mean both "a set of metadata" and "the amount of time the + metadata is valid". The latter is now being referred to as TTL (or + "lease_duration" for backwards-compatibility); some parts of Vault have + already switched to using "ttl" and others will follow in upcoming releases. + In particular, the "token", "generic", and "pki" backends accept both "ttl" + and "lease" but in 0.4 only "ttl" will be accepted. [GH-528] + * **Downgrade Not Supported**: Due to enhancements in the storage subsystem, + values written by Vault 0.3+ will not be able to be read by prior versions + of Vault. There are no expected upgrade issues, however, as with all + critical infrastructure it is recommended to back up Vault's physical + storage before upgrading. + +FEATURES: + + * **SSH Backend**: Vault can now be used to delegate SSH access to machines, + via a (recommended) One-Time Password approach or by issuing dynamic keys. + [GH-385] + * **Cubbyhole Backend**: This backend works similarly to the "generic" backend + but provides a per-token workspace. This enables some additional + authentication workflows (especially for containers) and can be useful to + applications to e.g. store local credentials while being restarted or + upgraded, rather than persisting to disk. [GH-612] + * **Transit Backend Improvements**: The transit backend now allows key + rotation and datakey generation. For rotation, data encrypted with previous + versions of the keys can still be decrypted, down to a (configurable) + minimum previous version; there is a rewrap function for manual upgrades of + ciphertext to newer versions. Additionally, the backend now allows + generating and returning high-entropy keys of a configurable bitsize + suitable for AES and other functions; this is returned wrapped by a named + key, or optionally both wrapped and plaintext for immediate use. [GH-626] + * **Global and Per-Mount Default/Max TTL Support**: You can now set the + default and maximum Time To Live for leases both globally and per-mount. + Per-mount settings override global settings. Not all backends honor these + settings yet, but the maximum is a hard limit enforced outside the backend. + See the documentation for "/sys/mounts/" for details on configuring + per-mount TTLs. [GH-469] + * **PGP Encryption for Unseal Keys**: When initializing or rotating Vault's + master key, PGP/GPG public keys can now be provided. The output keys will be + encrypted with the given keys, in order. [GH-570] + * **Duo Multifactor Authentication Support**: Backends that support MFA can + now use Duo as the mechanism. [GH-464] + * **Performance Improvements**: Users of the "generic" backend will see a + significant performance improvement as the backend no longer creates leases, + although it does return TTLs (global/mount default, or set per-item) as + before. [GH-631] + * **Codebase Audit**: Vault's codebase was audited by iSEC. (The terms of the + audit contract do not allow us to make the results public.) [GH-220] + +IMPROVEMENTS: + + * audit: Log entries now contain a time field [GH-495] + * audit: Obfuscated audit entries now use hmac-sha256 instead of sha1 [GH-627] + * backends: Add ability for a cleanup function to be called on backend unmount + [GH-608] + * config: Allow specifying minimum acceptable TLS version [GH-447] + * core: If trying to mount in a location that is already mounted, be more + helpful about the error [GH-510] + * core: Be more explicit on failure if the issue is invalid JSON [GH-553] + * core: Tokens can now revoke themselves [GH-620] + * credential/app-id: Give a more specific error when sending a duplicate POST + to sys/auth/app-id [GH-392] + * credential/github: Support custom API endpoints (e.g. for Github Enterprise) + [GH-572] + * credential/ldap: Add per-user policies and option to login with + userPrincipalName [GH-420] + * credential/token: Allow root tokens to specify the ID of a token being + created from CLI [GH-502] + * credential/userpass: Enable renewals for login tokens [GH-623] + * scripts: Use /usr/bin/env to find Bash instead of hardcoding [GH-446] + * scripts: Use godep for build scripts to use same environment as tests + [GH-404] + * secret/mysql: Allow reading configuration data [GH-529] + * secret/pki: Split "allow_any_name" logic to that and "enforce_hostnames", to + allow for non-hostname values (e.g. for client certificates) [GH-555] + * storage/consul: Allow specifying certificates used to talk to Consul + [GH-384] + * storage/mysql: Allow SSL encrypted connections [GH-439] + * storage/s3: Allow using temporary security credentials [GH-433] + * telemetry: Put telemetry object in configuration to allow more flexibility + [GH-419] + * testing: Disable mlock for testing of logical backends so as not to require + root [GH-479] + +BUG FIXES: + + * audit/file: Do not enable auditing if file permissions are invalid [GH-550] + * backends: Allow hyphens in endpoint patterns (fixes AWS and others) [GH-559] + * cli: Fixed missing setup of client TLS certificates if no custom CA was + provided + * cli/read: Do not include a carriage return when using raw field output + [GH-624] + * core: Bad input data could lead to a panic for that session, rather than + returning an error [GH-503] + * core: Allow SHA2-384/SHA2-512 hashed certificates [GH-448] + * core: Do not return a Secret if there are no uses left on a token (since it + will be unable to be used) [GH-615] + * core: Code paths that called lookup-self would decrement num_uses and + potentially immediately revoke a token [GH-552] + * core: Some /sys/ paths would not properly redirect from a standby to the + leader [GH-499] [GH-551] + * credential/aws: Translate spaces in a token's display name to avoid making + IAM unhappy [GH-567] + * credential/github: Integration failed if more than ten organizations or + teams [GH-489] + * credential/token: Tokens with sudo access to "auth/token/create" can now use + root-only options [GH-629] + * secret/cassandra: Work around backwards-incompatible change made in + Cassandra 2.2 preventing Vault from properly setting/revoking leases + [GH-549] + * secret/mysql: Use varbinary instead of varchar to avoid InnoDB/UTF-8 issues + [GH-522] + * secret/postgres: Explicitly set timezone in connections [GH-597] + * storage/etcd: Renew semaphore periodically to prevent leadership flapping + [GH-606] + * storage/zk: Fix collisions in storage that could lead to data unavailability + [GH-411] + +MISC: + + * Various documentation fixes and improvements [GH-412] [GH-474] [GH-476] + [GH-482] [GH-483] [GH-486] [GH-508] [GH-568] [GH-574] [GH-586] [GH-590] + [GH-591] [GH-592] [GH-595] [GH-613] [GH-637] + * Less "armon" in stack traces [GH-453] + * Sourcegraph integration [GH-456] + +## 0.2.0 (July 13, 2015) + +FEATURES: + + * **Key Rotation Support**: The `rotate` command can be used to rotate the + master encryption key used to write data to the storage (physical) backend. + [GH-277] + * **Rekey Support**: Rekey can be used to rotate the master key and change the + configuration of the unseal keys (number of shares, threshold required). + [GH-277] + * **New secret backend: `pki`**: Enable Vault to be a certificate authority + and generate signed TLS certificates. [GH-310] + * **New secret backend: `cassandra`**: Generate dynamic credentials for + Cassandra [GH-363] + * **New storage backend: `etcd`**: store physical data in etcd [GH-259] + [GH-297] + * **New storage backend: `s3`**: store physical data in S3. Does not support + HA. [GH-242] + * **New storage backend: `MySQL`**: store physical data in MySQL. Does not + support HA. [GH-324] + * `transit` secret backend supports derived keys for per-transaction unique + keys [GH-399] + +IMPROVEMENTS: + + * cli/auth: Enable `cert` method [GH-380] + * cli/auth: read input from stdin [GH-250] + * cli/read: Ability to read a single field from a secret [GH-257] + * cli/write: Adding a force flag when no input required + * core: allow time duration format in place of seconds for some inputs + * core: audit log provides more useful information [GH-360] + * core: graceful shutdown for faster HA failover + * core: **change policy format** to use explicit globbing [GH-400] Any + existing policy in Vault is automatically upgraded to avoid issues. All + policy files must be updated for future writes. Adding the explicit glob + character `*` to the path specification is all that is required. + * core: policy merging to give deny highest precedence [GH-400] + * credential/app-id: Protect against timing attack on app-id + * credential/cert: Record the common name in the metadata [GH-342] + * credential/ldap: Allow TLS verification to be disabled [GH-372] + * credential/ldap: More flexible names allowed [GH-245] [GH-379] [GH-367] + * credential/userpass: Protect against timing attack on password + * credential/userpass: Use bcrypt for password matching + * http: response codes improved to reflect error [GH-366] + * http: the `sys/health` endpoint supports `?standbyok` to return 200 on + standby [GH-389] + * secret/app-id: Support deleting AppID and UserIDs [GH-200] + * secret/consul: Fine grained lease control [GH-261] + * secret/transit: Decouple raw key from key management endpoint [GH-355] + * secret/transit: Upsert named key when encrypt is used [GH-355] + * storage/zk: Support for HA configuration [GH-252] + * storage/zk: Changing node representation. **Backwards incompatible**. + [GH-416] + +BUG FIXES: + + * audit/file: file removing TLS connection state + * audit/syslog: fix removing TLS connection state + * command/*: commands accepting `k=v` allow blank values + * core: Allow building on FreeBSD [GH-365] + * core: Fixed various panics when audit logging enabled + * core: Lease renewal does not create redundant lease + * core: fixed leases with negative duration [GH-354] + * core: token renewal does not create child token + * core: fixing panic when lease increment is null [GH-408] + * credential/app-id: Salt the paths in storage backend to avoid information + leak + * credential/cert: Fixing client certificate not being requested + * credential/cert: Fixing panic when no certificate match found [GH-361] + * http: Accept PUT as POST for sys/auth + * http: Accept PUT as POST for sys/mounts [GH-349] + * http: Return 503 when sealed [GH-225] + * secret/postgres: Username length is capped to exceeding limit + * server: Do not panic if backend not configured [GH-222] + * server: Explicitly check value of tls_diable [GH-201] + * storage/zk: Fixed issues with version conflicts [GH-190] + +MISC: + + * cli/path-help: renamed from `help` to avoid confusion + +## 0.1.2 (May 11, 2015) + +FEATURES: + + * **New physical backend: `zookeeper`**: store physical data in Zookeeper. + HA not supported yet. + * **New credential backend: `ldap`**: authenticate using LDAP credentials. + +IMPROVEMENTS: + + * core: Auth backends can store internal data about auth creds + * audit: display name for auth is shown in logs [GH-176] + * command/*: `-insecure` has been renamed to `-tls-skip-verify` [GH-130] + * command/*: `VAULT_TOKEN` overrides local stored auth [GH-162] + * command/server: environment variables are copy-pastable + * credential/app-id: hash of app and user ID are in metadata [GH-176] + * http: HTTP API accepts `X-Vault-Token` as auth header [GH-124] + * logical/*: Generate help output even if no synopsis specified + +BUG FIXES: + + * core: login endpoints should never return secrets + * core: Internal data should never be returned from core endpoints + * core: defer barrier initialization to as late as possible to avoid error + cases during init that corrupt data (no data loss) + * core: guard against invalid init config earlier + * audit/file: create file if it doesn't exist [GH-148] + * command/*: ignore directories when traversing CA paths [GH-181] + * credential/*: all policy mapping keys are case insensitive [GH-163] + * physical/consul: Fixing path for locking so HA works in every case + +## 0.1.1 (May 2, 2015) + +SECURITY CHANGES: + + * physical/file: create the storge with 0600 permissions [GH-102] + * token/disk: write the token to disk with 0600 perms + +IMPROVEMENTS: + + * core: Very verbose error if mlock fails [GH-59] + * command/*: On error with TLS oversized record, show more human-friendly + error message. [GH-123] + * command/read: `lease_renewable` is now outputted along with the secret to + show whether it is renewable or not + * command/server: Add configuration option to disable mlock + * command/server: Disable mlock for dev mode so it works on more systems + +BUG FIXES: + + * core: if token helper isn't absolute, prepend with path to Vault + executable, not "vault" (which requires PATH) [GH-60] + * core: Any "mapping" routes allow hyphens in keys [GH-119] + * core: Validate `advertise_addr` is a valid URL with scheme [GH-106] + * command/auth: Using an invalid token won't crash [GH-75] + * credential/app-id: app and user IDs can have hyphens in keys [GH-119] + * helper/password: import proper DLL for Windows to ask password [GH-83] + +## 0.1.0 (April 28, 2015) + + * Initial release diff --git a/CHANGELOG.md b/CHANGELOG.md index a435d0fecc2b..3556b87346ee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8517 +1,4764 @@ -## 1.13.0 -### March 01, 2023 +## Previous versions +- [v1.0.0 - v1.9.10](CHANGELOG-pre-v1.10.md) +- [v0.11.6 and earlier](CHANGELOG-v0.md) + +## 1.17.5 +## August 30, 2024 + +SECURITY: + +core/audit: fix regression where client tokens and token accessors were being +displayed in the audit log in plaintext [HCSEC-2024-18](https://discuss.hashicorp.com/t/hcsec-2024-18-vault-leaks-client-token-and-token-accessor-in-audit-devices) + +BUG FIXES: + +* proxy/cache (enterprise): Fixed an issue where Proxy with static secret caching enabled would not correctly handle requests to older secret versions for KVv2 secrets. Proxy's static secret cache now properly handles all requests relating to older versions for KVv2 secrets. [[GH-28207](https://github.com/hashicorp/vault/pull/28207)] +* ui: fixes renew-self being called right after login for non-renewable tokens [[GH-28204](https://github.com/hashicorp/vault/pull/28204)] + +## 1.17.4 +### August 29, 2024 + +CHANGES: + +* activity (enterprise): filter all fields in client count responses by the request namespace [[GH-27790](https://github.com/hashicorp/vault/pull/27790)] +* core: Bump Go version to 1.22.6 +* secrets/terraform: Update plugin to v0.9.0 [[GH-28016](https://github.com/hashicorp/vault/pull/28016)] + +IMPROVEMENTS: + +* activity log: Changes how new client counts in the current month are estimated, in order to return more +visibly sensible totals. [[GH-27547](https://github.com/hashicorp/vault/pull/27547)] +* activity: `/sys/internal/counters/activity` will now include a warning if the specified usage period contains estimated client counts. [[GH-28068](https://github.com/hashicorp/vault/pull/28068)] +* audit: Adds TRACE logging to log request/response under certain circumstances, and further improvements to the audit subsystem. [[GH-28056](https://github.com/hashicorp/vault/pull/28056)] +* cli: `vault operator usage` will now include a warning if the specified usage period contains estimated client counts. [[GH-28068](https://github.com/hashicorp/vault/pull/28068)] +* core/activity: Ensure client count queries that include the current month return consistent results by sorting the clients before performing estimation [[GH-28062](https://github.com/hashicorp/vault/pull/28062)] +* raft-snapshot (enterprise): add support for managed identity credentials for azure snapshots + +BUG FIXES: + +* activity: The sys/internal/counters/activity endpoint will return current month data when the end_date parameter is set to a future date. [[GH-28042](https://github.com/hashicorp/vault/pull/28042)] +* auth/aws: fixes an issue where not supplying an external id was interpreted as an empty external id [[GH-27858](https://github.com/hashicorp/vault/pull/27858)] +* command: The `vault secrets move` and `vault auth move` command will no longer attempt to write to storage on performance standby nodes. [[GH-28059](https://github.com/hashicorp/vault/pull/28059)] +* core (enterprise): Fix deletion of MFA login-enforcement configurations on standby nodes +* secrets/database: Skip connection verification on reading existing DB connection configuration [[GH-28139](https://github.com/hashicorp/vault/pull/28139)] +* ui: fixes toast (flash) alert message saying "created" when deleting a kv v2 secret [[GH-28093](https://github.com/hashicorp/vault/pull/28093)] + +## 1.17.3 +### August 07, 2024 + +CHANGES: + +* auth/cf: Update plugin to v0.18.0 [[GH-27724](https://github.com/hashicorp/vault/pull/27724)] + +IMPROVEMENTS: + +* audit: Ensure that any underyling errors from audit devices are logged even if we consider auditing to be a success. [[GH-27809](https://github.com/hashicorp/vault/pull/27809)] +* audit: Internal implementation changes to the audit subsystem which improve performance. [[GH-27952](https://github.com/hashicorp/vault/pull/27952)] +* audit: sinks (file, socket, syslog) will attempt to log errors to the server operational +log before returning (if there are errors to log, and the context is done). [[GH-27859](https://github.com/hashicorp/vault/pull/27859)] +* auth/cert: Cache full list of role trust information separately to avoid +eviction, and avoid duplicate loading during multiple simultaneous logins on +the same role. [[GH-27902](https://github.com/hashicorp/vault/pull/27902)] +* license utilization reporting (enterprise): Auto-roll billing start date. [[GH-27656](https://github.com/hashicorp/vault/pull/27656)] +* website/docs: Added API documentation for Azure Secrets Engine delete role [[GH-27883](https://github.com/hashicorp/vault/pull/27883)] + +BUG FIXES: + +* auth/cert: Use subject's serial number, not issuer's within error message text in OCSP request errors [[GH-27696](https://github.com/hashicorp/vault/pull/27696)] +* core (enterprise): Fix 500 errors that occurred querying `sys/internal/ui/mounts` for a mount prefixed by a namespace path when path filters are configured. [[GH-27939](https://github.com/hashicorp/vault/pull/27939)] +* core/identity: Fixed an issue where deleted/reassigned entity-aliases were not removed from in-memory database. [[GH-27750](https://github.com/hashicorp/vault/pull/27750)] +* proxy/cache (enterprise): Fixed an issue where Proxy would not correctly update KV secrets when talking to a perf standby. Proxy will now attempt to forward requests to update secrets triggered by events to the active node. Note that this requires `allow_forwarding_via_header` to be configured on the cluster. [[GH-27891](https://github.com/hashicorp/vault/pull/27891)] +* proxy/cache (enterprise): Fixed an issue where cached static secrets could fail to update if the secrets belonged to a non-root namespace. [[GH-27730](https://github.com/hashicorp/vault/pull/27730)] +* raft/autopilot: Fixed panic that may occur during shutdown [[GH-27726](https://github.com/hashicorp/vault/pull/27726)] +* secrets-sync (enterprise): Destination set/remove operations will no longer be blocked as "purge in progress" after a purge job ended in failure. +* secrets-sync (enterprise): Normalize custom_tag keys and values for recoverable invalid characters. +* secrets-sync (enterprise): Normalize secret key names before storing the external_name in a secret association. +* secrets-sync (enterprise): Patching github sync destination credentials will properly update and save the new credentials. +* secrets-sync (enterprise): Return an error immediately on destination creation when providing invalid custom_tags based on destination type. +* secrets/identity (enterprise): Fix a bug that can cause DR promotion to fail in rare cases where a PR secondary has inconsistent alias information in storage. +* sys: Fix a bug where mounts of external plugins that were registered before Vault v1.0.0 could not be tuned to +use versioned plugins. [[GH-27881](https://github.com/hashicorp/vault/pull/27881)] +* ui: Fix cursor jump on KVv2 json editor that would occur after pressing ENTER. [[GH-27569](https://github.com/hashicorp/vault/pull/27569)] +* ui: fix issue where enabling then disabling "Tidy ACME" in PKI results in failed API call. [[GH-27742](https://github.com/hashicorp/vault/pull/27742)] +* ui: fix namespace picker not working when in small screen where the sidebar is collapsed by default. [[GH-27728](https://github.com/hashicorp/vault/pull/27728)] + + +## 1.17.2 +### July 10, 2024 + +CHANGES: + +* core: Bump Go version to 1.22.5 +* secrets/azure: Update plugin to v0.19.2 [[GH-27652](https://github.com/hashicorp/vault/pull/27652)] + +FEATURES: + +* **AWS secrets engine STS session tags support**: Adds support for setting STS +session tags when generating temporary credentials using the AWS secrets +engine. [[GH-27620](https://github.com/hashicorp/vault/pull/27620)] + +BUG FIXES: + +* cli: Fixed issue with `vault hcp connect` where HCP resources with uppercase letters were inaccessible when entering the correct project name. [[GH-27694](https://github.com/hashicorp/vault/pull/27694)] +* core (enterprise): Fix HTTP redirects in namespaces to use the correct path and (in the case of event subscriptions) the correct URI scheme. [[GH-27660](https://github.com/hashicorp/vault/pull/27660)] +* core/config: fix issue when using `proxy_protocol_behavior` with `deny_unauthorized`, +which causes the Vault TCP listener to close after receiving an untrusted upstream proxy connection. [[GH-27589](https://github.com/hashicorp/vault/pull/27589)] +* core: Fixed an issue with performance standbys not being able to handle rotate root requests. [[GH-27631](https://github.com/hashicorp/vault/pull/27631)] +* secrets/transit (enterprise): Fix an issue that caused input data be returned as part of generated CMAC values. +* ui: Display an error and force a timeout when TOTP passcode is incorrect [[GH-27574](https://github.com/hashicorp/vault/pull/27574)] +* ui: Ensure token expired banner displays when batch token expires [[GH-27479](https://github.com/hashicorp/vault/pull/27479)] + +## 1.17.1 +### June 26, 2024 + +CHANGES: + +* auth/jwt: Update plugin to v0.21.0 [[GH-27498](https://github.com/hashicorp/vault/pull/27498)] + +IMPROVEMENTS: + +* storage/raft: Improve autopilot logging on startup to show config values clearly and avoid spurious logs [[GH-27464](https://github.com/hashicorp/vault/pull/27464)] +* ui/secrets-sync: Hide Secrets Sync from the sidebar nav if user does not have access to the feature. [[GH-27262](https://github.com/hashicorp/vault/pull/27262)] + +BUG FIXES: + +* agent: Fixed an issue causing excessive CPU usage during normal operation [[GH-27518](https://github.com/hashicorp/vault/pull/27518)] +* config: Vault TCP listener config now correctly supports the documented proxy_protocol_behavior +setting of 'deny_unauthorized' [[GH-27459](https://github.com/hashicorp/vault/pull/27459)] +* core/audit: Audit logging a Vault request/response checks if the existing context +is cancelled and will now use a new context with a 5 second timeout. +If the existing context is cancelled a new context, will be used. [[GH-27531](https://github.com/hashicorp/vault/pull/27531)] +* helper/pkcs7: Fix parsing certain messages containing only certificates [[GH-27435](https://github.com/hashicorp/vault/pull/27435)] +* proxy: Fixed an issue causing excessive CPU usage during normal operation [[GH-27518](https://github.com/hashicorp/vault/pull/27518)] +* replication (enterprise): fix cache invalidation issue leading to namespace custom metadata not being shown correctly on performance secondaries +* secrets-sync (enterprise): Properly remove tags from secrets in AWS when they are removed from the source association +* secrets-sync (enterprise): Return more accurate error code for invalid connection details +* secrets-sync (enterprise): Skip invalid GitHub repository names when creating destinations +* storage/azure: Fix invalid account name initialization bug [[GH-27563](https://github.com/hashicorp/vault/pull/27563)] +* storage/raft (enterprise): Fix issue with namespace cache not getting cleared on snapshot restore, resulting in namespaces not found in the snapshot being inaccurately represented by API responses. [[GH-27474](https://github.com/hashicorp/vault/pull/27474)] +* ui: Allow creation of session_token type roles for AWS secret backend [[GH-27424](https://github.com/hashicorp/vault/pull/27424)] + +## 1.17.0 +### June 12, 2024 SECURITY: -* secrets/ssh: removal of the deprecated dynamic keys mode. **When any remaining dynamic key leases expire**, an error stating `secret is unsupported by this backend` will be thrown by the lease manager. [[GH-18874](https://github.com/hashicorp/vault/pull/18874)] +* auth/jwt: Update plugin to v0.20.3 that resolves a security issue with validing JWTs [[GH-26890](https://github.com/hashicorp/vault/pull/26890), [HCSEC-2024-11](https://discuss.hashicorp.com/t/hcsec-2024-11-vault-incorrectly-validated-json-web-tokens-jwt-audience-claims/67770)] CHANGES: -* auth/alicloud: require the `role` field on login [[GH-19005](https://github.com/hashicorp/vault/pull/19005)] -* auth/approle: Add maximum length of 4096 for approle role_names, as this value results in HMAC calculation [[GH-17768](https://github.com/hashicorp/vault/pull/17768)] -* auth: Returns invalid credentials for ldap, userpass and approle when wrong credentials are provided for existent users. -This will only be used internally for implementing user lockout. [[GH-17104](https://github.com/hashicorp/vault/pull/17104)] -* core: Bump Go version to 1.20.1. -* core: Vault version has been moved out of sdk and into main vault module. -Plugins using sdk/useragent.String must instead use sdk/useragent.PluginString. [[GH-14229](https://github.com/hashicorp/vault/pull/14229)] -* logging: Removed legacy environment variable for log format ('LOGXI_FORMAT'), should use 'VAULT_LOG_FORMAT' instead [[GH-17822](https://github.com/hashicorp/vault/pull/17822)] -* plugins: Mounts can no longer be pinned to a specific _builtin_ version. Mounts previously pinned to a specific builtin version will now automatically upgrade to the latest builtin version, and may now be overridden if an unversioned plugin of the same name and type is registered. Mounts using plugin versions without `builtin` in their metadata remain unaffected. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] -* plugins: `GET /database/config/:name` endpoint now returns an additional `plugin_version` field in the response data. [[GH-16982](https://github.com/hashicorp/vault/pull/16982)] -* plugins: `GET /sys/auth/:path/tune` and `GET /sys/mounts/:path/tune` endpoints may now return an additional `plugin_version` field in the response data if set. [[GH-17167](https://github.com/hashicorp/vault/pull/17167)] -* plugins: `GET` for `/sys/auth`, `/sys/auth/:path`, `/sys/mounts`, and `/sys/mounts/:path` paths now return additional `plugin_version`, `running_plugin_version` and `running_sha256` fields in the response data for each mount. [[GH-17167](https://github.com/hashicorp/vault/pull/17167)] -* sdk: Remove version package, make useragent.String versionless. [[GH-19068](https://github.com/hashicorp/vault/pull/19068)] -* secrets/aws: do not create leases for non-renewable/non-revocable STS credentials to reduce storage calls [[GH-15869](https://github.com/hashicorp/vault/pull/15869)] -* secrets/gcpkms: Updated plugin from v0.13.0 to v0.14.0 [[GH-19063](https://github.com/hashicorp/vault/pull/19063)] -* sys/internal/inspect: Turns of this endpoint by default. A SIGHUP can now be used to reload the configs and turns this endpoint on. -* ui: Upgrade Ember to version 4.4.0 [[GH-17086](https://github.com/hashicorp/vault/pull/17086)] +* api: Upgrade from github.com/go-jose/go-jose/v3 v3.0.3 to github.com/go-jose/go-jose/v4 v4.0.1. [[GH-26527](https://github.com/hashicorp/vault/pull/26527)] +* audit: breaking change - Vault now allows audit logs to contain 'correlation-id' and 'x-correlation-id' headers when they +are present in the incoming request. By default they are not HMAC'ed (but can be configured to HMAC by Vault Operators). [[GH-26777](https://github.com/hashicorp/vault/pull/26777)] +* auth/alicloud: Update plugin to v0.18.0 [[GH-27133](https://github.com/hashicorp/vault/pull/27133)] +* auth/azure: Update plugin to v0.18.0 [[GH-27146](https://github.com/hashicorp/vault/pull/27146)] +* auth/centrify: Remove the deprecated Centrify auth method plugin [[GH-27130](https://github.com/hashicorp/vault/pull/27130)] +* auth/cf: Update plugin to v0.17.0 [[GH-27161](https://github.com/hashicorp/vault/pull/27161)] +* auth/gcp: Update plugin to v0.18.0 [[GH-27140](https://github.com/hashicorp/vault/pull/27140)] +* auth/jwt: Update plugin to v0.20.2 [[GH-26291](https://github.com/hashicorp/vault/pull/26291)] +* auth/kerberos: Update plugin to v0.12.0 [[GH-27177](https://github.com/hashicorp/vault/pull/27177)] +* auth/kubernetes: Update plugin to v0.19.0 [[GH-27186](https://github.com/hashicorp/vault/pull/27186)] +* auth/oci: Update plugin to v0.16.0 [[GH-27142](https://github.com/hashicorp/vault/pull/27142)] +* core (enterprise): Seal High Availability (HA) must be enabled by `enable_multiseal` in configuration. +* core/identity: improve performance for secondary nodes receiving identity related updates through replication [[GH-27184](https://github.com/hashicorp/vault/pull/27184)] +* core: Bump Go version to 1.22.4 +* core: return an additional "invalid token" error message in 403 response when the provided request token is expired, +exceeded the number of uses, or is a bogus value [[GH-25953](https://github.com/hashicorp/vault/pull/25953)] +* database/couchbase: Update plugin to v0.11.0 [[GH-27145](https://github.com/hashicorp/vault/pull/27145)] +* database/elasticsearch: Update plugin to v0.15.0 [[GH-27136](https://github.com/hashicorp/vault/pull/27136)] +* database/mongodbatlas: Update plugin to v0.12.0 [[GH-27143](https://github.com/hashicorp/vault/pull/27143)] +* database/redis-elasticache: Update plugin to v0.4.0 [[GH-27139](https://github.com/hashicorp/vault/pull/27139)] +* database/redis: Update plugin to v0.3.0 [[GH-27117](https://github.com/hashicorp/vault/pull/27117)] +* database/snowflake: Update plugin to v0.11.0 [[GH-27132](https://github.com/hashicorp/vault/pull/27132)] +* sdk: String templates now have a maximum size of 100,000 characters. [[GH-26110](https://github.com/hashicorp/vault/pull/26110)] +* secrets/ad: Update plugin to v0.18.0 [[GH-27172](https://github.com/hashicorp/vault/pull/27172)] +* secrets/alicloud: Update plugin to v0.17.0 [[GH-27134](https://github.com/hashicorp/vault/pull/27134)] +* secrets/azure: Update plugin to v0.17.1 [[GH-26528](https://github.com/hashicorp/vault/pull/26528)] +* secrets/azure: Update plugin to v0.19.0 [[GH-27141](https://github.com/hashicorp/vault/pull/27141)] +* secrets/gcp: Update plugin to v0.19.0 [[GH-27164](https://github.com/hashicorp/vault/pull/27164)] +* secrets/gcpkms: Update plugin to v0.17.0 [[GH-27163](https://github.com/hashicorp/vault/pull/27163)] +* secrets/keymgmt (enterprise): Removed `namespace` label on the `vault.kmse.key.count` metric. +* secrets/kmip (enterprise): Update plugin to v0.15.0 +* secrets/kubernetes: Update plugin to v0.8.0 [[GH-27187](https://github.com/hashicorp/vault/pull/27187)] +* secrets/kv: Update plugin to v0.18.0 [[GH-26877](https://github.com/hashicorp/vault/pull/26877)] +* secrets/kv: Update plugin to v0.19.0 [[GH-27159](https://github.com/hashicorp/vault/pull/27159)] +* secrets/mongodbatlas: Update plugin to v0.12.0 [[GH-27149](https://github.com/hashicorp/vault/pull/27149)] +* secrets/openldap: Update plugin to v0.13.0 [[GH-27137](https://github.com/hashicorp/vault/pull/27137)] +* secrets/pki: sign-intermediate API will truncate notAfter if calculated to go beyond the signing issuer's notAfter. Previously the notAfter was permitted to go beyond leading to invalid chains. [[GH-26796](https://github.com/hashicorp/vault/pull/26796)] +* secrets/terraform: Update plugin to v0.8.0 [[GH-27147](https://github.com/hashicorp/vault/pull/27147)] +* ui/kubernetes: Update the roles filter-input to use explicit search. [[GH-27178](https://github.com/hashicorp/vault/pull/27178)] +* ui: Update dependencies including D3 libraries [[GH-26346](https://github.com/hashicorp/vault/pull/26346)] +* ui: Upgrade Ember data from 4.11.3 to 4.12.4 [[GH-25272](https://github.com/hashicorp/vault/pull/25272)] +* ui: Upgrade Ember to version 5.4 [[GH-26708](https://github.com/hashicorp/vault/pull/26708)] +* ui: deleting a nested secret will no longer redirect you to the nearest path segment [[GH-26845](https://github.com/hashicorp/vault/pull/26845)] +* ui: flash messages render on right side of page [[GH-25459](https://github.com/hashicorp/vault/pull/25459)] FEATURES: -* **Azure Auth Managed Identities**: Allow any Azure resource that supports managed identities to authenticate with Vault [[GH-19077](https://github.com/hashicorp/vault/pull/19077)] -* **Azure Auth Rotate Root**: Add support for rotate root in Azure Auth engine [[GH-19077](https://github.com/hashicorp/vault/pull/19077)] -* **Event System (Alpha)**: Vault has a new opt-in experimental event system. Not yet suitable for production use. Events are currently only generated on writes to the KV secrets engine, but external plugins can also be updated to start generating events. [[GH-19194](https://github.com/hashicorp/vault/pull/19194)] -* **GCP Secrets Impersonated Account Support**: Add support for GCP service account impersonation, allowing callers to generate a GCP access token without requiring Vault to store or retrieve a GCP service account key for each role. [[GH-19018](https://github.com/hashicorp/vault/pull/19018)] -* **Kubernetes Secrets Engine UI**: Kubernetes is now available in the UI as a supported secrets engine. [[GH-17893](https://github.com/hashicorp/vault/pull/17893)] -* **New PKI UI**: Add beta support for new and improved PKI UI [[GH-18842](https://github.com/hashicorp/vault/pull/18842)] -* **PKI Cross-Cluster Revocations**: Revocation information can now be -synchronized across primary and performance replica clusters offering -a unified CRL/OCSP view of revocations across cluster boundaries. [[GH-19196](https://github.com/hashicorp/vault/pull/19196)] -* **Server UDS Listener**: Adding listener to Vault server to serve http request via unix domain socket [[GH-18227](https://github.com/hashicorp/vault/pull/18227)] -* **Transit managed keys**: The transit secrets engine now supports configuring and using managed keys -* **User Lockout**: Adds support to configure the user-lockout behaviour for failed logins to prevent -brute force attacks for userpass, approle and ldap auth methods. [[GH-19230](https://github.com/hashicorp/vault/pull/19230)] -* **VMSS Flex Authentication**: Adds support for Virtual Machine Scale Set Flex Authentication [[GH-19077](https://github.com/hashicorp/vault/pull/19077)] -* **Namespaces (enterprise)**: Added the ability to allow access to secrets and more to be shared across namespaces that do not share a namespace hierarchy. Using the new `sys/config/group-policy-application` API, policies can be configured to apply outside of namespace hierarchy, allowing this kind of cross-namespace sharing. -* **OpenAPI-based Go & .NET Client Libraries (Beta)**: We have now made available two new [[OpenAPI-based Go](https://github.com/hashicorp/vault-client-go/)] & [[OpenAPI-based .NET](https://github.com/hashicorp/vault-client-dotnet/)] Client libraries (beta). You can use them to perform various secret management operations easily from your applications. +* **PKI Certificate Metadata (enterprise)**: Add Certificate Metadata Functionality to Record and Return Client Information about a Certificate. +* **Adaptive Overload Protection (enterprise)**: Adds Adaptive Overload Protection +for write requests as a Beta feature (disabled by default). This automatically +prevents overloads caused by too many write requests while maintaining optimal +throughput for the hardware configuration and workload. +* **Audit Filtering (enterprise)** : Audit devices support expression-based filter rules (powered by go-bexpr) to determine which entries are written to the audit log. +* **LDAP Secrets engine hierarchical path support**: Hierarchical path handling is now supported for role and set APIs. [[GH-27203](https://github.com/hashicorp/vault/pull/27203)] +* **Plugin Identity Tokens**: Adds secret-less configuration of AWS auth engine using web identity federation. [[GH-26507](https://github.com/hashicorp/vault/pull/26507)] +* **Plugin Workload Identity** (enterprise): Vault can generate identity tokens for plugins to use in workload identity federation auth flows. +* **Transit AES-CMAC (enterprise)**: Added support to create and verify AES backed cipher-based message authentication codes IMPROVEMENTS: -* **Redis ElastiCache DB Engine**: Renamed configuration parameters for disambiguation; old parameters still supported for compatibility. [[GH-18752](https://github.com/hashicorp/vault/pull/18752)] -* Bump github.com/hashicorp/go-plugin version from 1.4.5 to 1.4.8 [[GH-19100](https://github.com/hashicorp/vault/pull/19100)] -* Reduced binary size [[GH-17678](https://github.com/hashicorp/vault/pull/17678)] -* agent/config: Allow config directories to be specified with -config, and allow multiple -configs to be supplied. [[GH-18403](https://github.com/hashicorp/vault/pull/18403)] -* agent: Add note in logs when starting Vault Agent indicating if the version differs to the Vault Server. [[GH-18684](https://github.com/hashicorp/vault/pull/18684)] -* agent: Added `token_file` auto-auth configuration to allow using a pre-existing token for Vault Agent. [[GH-18740](https://github.com/hashicorp/vault/pull/18740)] -* agent: Agent listeners can now be to be the `metrics_only` role, serving only metrics, as part of the listener's new top level `role` option. [[GH-18101](https://github.com/hashicorp/vault/pull/18101)] -* agent: Configured Vault Agent listeners now listen without the need for caching to be configured. [[GH-18137](https://github.com/hashicorp/vault/pull/18137)] -* agent: allows some parts of config to be reloaded without requiring a restart. [[GH-18638](https://github.com/hashicorp/vault/pull/18638)] -* agent: fix incorrectly used loop variables in parallel tests and when finalizing seals [[GH-16872](https://github.com/hashicorp/vault/pull/16872)] -* api: Remove dependency on sdk module. [[GH-18962](https://github.com/hashicorp/vault/pull/18962)] -* api: Support VAULT_DISABLE_REDIRECTS environment variable (and --disable-redirects flag) to disable default client behavior and prevent the client following any redirection responses. [[GH-17352](https://github.com/hashicorp/vault/pull/17352)] -* audit: Add `elide_list_responses` option, providing a countermeasure for a common source of oversized audit log entries [[GH-18128](https://github.com/hashicorp/vault/pull/18128)] -* audit: Include stack trace when audit logging recovers from a panic. [[GH-18121](https://github.com/hashicorp/vault/pull/18121)] -* auth/alicloud: upgrades dependencies [[GH-18021](https://github.com/hashicorp/vault/pull/18021)] -* auth/azure: Adds support for authentication with Managed Service Identity (MSI) from a -Virtual Machine Scale Set (VMSS) in flexible orchestration mode. [[GH-17540](https://github.com/hashicorp/vault/pull/17540)] -* auth/azure: upgrades dependencies [[GH-17857](https://github.com/hashicorp/vault/pull/17857)] -* auth/cert: Add configurable support for validating client certs with OCSP. [[GH-17093](https://github.com/hashicorp/vault/pull/17093)] -* auth/cert: Support listing provisioned CRLs within the mount. [[GH-18043](https://github.com/hashicorp/vault/pull/18043)] -* auth/cf: Remove incorrect usage of CreateOperation from path_config [[GH-19098](https://github.com/hashicorp/vault/pull/19098)] -* auth/gcp: Upgrades dependencies [[GH-17858](https://github.com/hashicorp/vault/pull/17858)] -* auth/oidc: Adds `abort_on_error` parameter to CLI login command to help in non-interactive contexts [[GH-19076](https://github.com/hashicorp/vault/pull/19076)] -* auth/oidc: Adds ability to set Google Workspace domain for groups search [[GH-19076](https://github.com/hashicorp/vault/pull/19076)] -* auth/token (enterprise): Allow batch token creation in perfStandby nodes -* auth: Allow naming login MFA methods and using those names instead of IDs in satisfying MFA requirement for requests. -Make passcode arguments consistent across login MFA method types. [[GH-18610](https://github.com/hashicorp/vault/pull/18610)] -* auth: Provide an IP address of the requests from Vault to a Duo challenge after successful authentication. [[GH-18811](https://github.com/hashicorp/vault/pull/18811)] -* autopilot: Update version to v.0.2.0 to add better support for respecting min quorum -* cli/kv: improve kv CLI to remove data or custom metadata using kv patch [[GH-18067](https://github.com/hashicorp/vault/pull/18067)] -* cli/pki: Add List-Intermediates functionality to pki client. [[GH-18463](https://github.com/hashicorp/vault/pull/18463)] -* cli/pki: Add health-check subcommand to evaluate the health of a PKI instance. [[GH-17750](https://github.com/hashicorp/vault/pull/17750)] -* cli/pki: Add pki issue command, which creates a CSR, has a vault mount sign it, then reimports it. [[GH-18467](https://github.com/hashicorp/vault/pull/18467)] -* cli/pki: Added "Reissue" command which allows extracting fields from an existing certificate to create a new certificate. [[GH-18499](https://github.com/hashicorp/vault/pull/18499)] -* cli/pki: Change the pki health-check --list default config output to JSON so it's a usable configuration file [[GH-19269](https://github.com/hashicorp/vault/pull/19269)] -* cli: Add support for creating requests to existing non-KVv2 PATCH-capable endpoints. [[GH-17650](https://github.com/hashicorp/vault/pull/17650)] -* cli: Add transit import key helper commands for BYOK to Transit/Transform. [[GH-18887](https://github.com/hashicorp/vault/pull/18887)] -* cli: Support the -format=raw option, to read non-JSON Vault endpoints and original response bodies. [[GH-14945](https://github.com/hashicorp/vault/pull/14945)] -* cli: updated `vault operator rekey` prompts to describe recovery keys when `-target=recovery` [[GH-18892](https://github.com/hashicorp/vault/pull/18892)] -* client/pki: Add a new command verify-sign which checks the relationship between two certificates. [[GH-18437](https://github.com/hashicorp/vault/pull/18437)] -* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] -* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. -* core/identity: Add machine-readable output to body of response upon alias clash during entity merge [[GH-17459](https://github.com/hashicorp/vault/pull/17459)] -* core/server: Added an environment variable to write goroutine stacktraces to a -temporary file for SIGUSR2 signals. [[GH-17929](https://github.com/hashicorp/vault/pull/17929)] -* core: Add RPCs to read and update userFailedLoginInfo map -* core: Add experiments system and `events.alpha1` experiment. [[GH-18682](https://github.com/hashicorp/vault/pull/18682)] -* core: Add read support to `sys/loggers` and `sys/loggers/:name` endpoints [[GH-17979](https://github.com/hashicorp/vault/pull/17979)] -* core: Add user lockout field to config and configuring this for auth mount using auth tune to prevent brute forcing in auth methods [[GH-17338](https://github.com/hashicorp/vault/pull/17338)] -* core: Add vault.core.locked_users telemetry metric to emit information about total number of locked users. [[GH-18718](https://github.com/hashicorp/vault/pull/18718)] -* core: Added sys/locked-users endpoint to list locked users. Changed api endpoint from -sys/lockedusers/[mount_accessor]/unlock/[alias_identifier] to sys/locked-users/[mount_accessor]/unlock/[alias_identifier]. [[GH-18675](https://github.com/hashicorp/vault/pull/18675)] -* core: Added sys/lockedusers/[mount_accessor]/unlock/[alias_identifier] endpoint to unlock an user -with given mount_accessor and alias_identifier if locked [[GH-18279](https://github.com/hashicorp/vault/pull/18279)] -* core: Added warning to /sys/seal-status and vault status command if potentially dangerous behaviour overrides are being used. [[GH-17855](https://github.com/hashicorp/vault/pull/17855)] -* core: Implemented background thread to update locked user entries every 15 minutes to prevent brute forcing in auth methods. [[GH-18673](https://github.com/hashicorp/vault/pull/18673)] -* core: License location is no longer cache exempt, meaning sys/health will not contribute as greatly to storage load when using consul as a storage backend. [[GH-17265](https://github.com/hashicorp/vault/pull/17265)] -* core: Update protoc from 3.21.5 to 3.21.7 [[GH-17499](https://github.com/hashicorp/vault/pull/17499)] -* core: add `detect_deadlocks` config to optionally detect core state deadlocks [[GH-18604](https://github.com/hashicorp/vault/pull/18604)] -* core: added changes for user lockout workflow. [[GH-17951](https://github.com/hashicorp/vault/pull/17951)] -* core: parallelize backend initialization to improve startup time for large numbers of mounts. [[GH-18244](https://github.com/hashicorp/vault/pull/18244)] -* database/postgres: Support multiline strings for revocation statements. [[GH-18632](https://github.com/hashicorp/vault/pull/18632)] -* database/redis-elasticache: changed config argument names for disambiguation [[GH-19044](https://github.com/hashicorp/vault/pull/19044)] -* database/snowflake: Allow parallel requests to Snowflake [[GH-17593](https://github.com/hashicorp/vault/pull/17593)] -* hcp/connectivity: Add foundational OSS support for opt-in secure communication between self-managed Vault nodes and [HashiCorp Cloud Platform](https://cloud.hashicorp.com) [[GH-18228](https://github.com/hashicorp/vault/pull/18228)] -* hcp/connectivity: Include HCP organization, project, and resource ID in server startup logs [[GH-18315](https://github.com/hashicorp/vault/pull/18315)] -* hcp/connectivity: Only update SCADA session metadata if status changes [[GH-18585](https://github.com/hashicorp/vault/pull/18585)] -* hcp/status: Add cluster-level status information [[GH-18351](https://github.com/hashicorp/vault/pull/18351)] -* hcp/status: Expand node-level status information [[GH-18302](https://github.com/hashicorp/vault/pull/18302)] -* logging: Vault Agent supports logging to a specified file path via environment variable, CLI or config [[GH-17841](https://github.com/hashicorp/vault/pull/17841)] -* logging: Vault agent and server commands support log file and log rotation. [[GH-18031](https://github.com/hashicorp/vault/pull/18031)] -* migration: allow parallelization of key migration for `vault operator migrate` in order to speed up a migration. [[GH-18817](https://github.com/hashicorp/vault/pull/18817)] -* namespaces (enterprise): Add new API, `sys/config/group-policy-application`, to allow group policies to be configurable -to apply to a group in `any` namespace. The default, `within_namespace_hierarchy`, is the current behaviour. -* openapi: Add default values to thing_mount_path parameters [[GH-18935](https://github.com/hashicorp/vault/pull/18935)] -* openapi: Add logic to generate openapi response structures [[GH-18192](https://github.com/hashicorp/vault/pull/18192)] -* openapi: Add openapi response definitions to approle/path_login.go & approle/path_tidy_user_id.go [[GH-18772](https://github.com/hashicorp/vault/pull/18772)] -* openapi: Add openapi response definitions to approle/path_role.go [[GH-18198](https://github.com/hashicorp/vault/pull/18198)] -* openapi: Change gen_openapi.sh to generate schema with generic mount paths [[GH-18934](https://github.com/hashicorp/vault/pull/18934)] -* openapi: Mark request body objects as required [[GH-17909](https://github.com/hashicorp/vault/pull/17909)] -* openapi: add openapi response defintions to /sys/audit endpoints [[GH-18456](https://github.com/hashicorp/vault/pull/18456)] -* openapi: generic_mount_paths: Move implementation fully into server, rather than partially in plugin framework; recognize all 4 singleton mounts (auth/token, cubbyhole, identity, system) rather than just 2; change parameter from `{mountPath}` to `{_mount_path}` [[GH-18663](https://github.com/hashicorp/vault/pull/18663)] -* plugins: Add plugin version information to key plugin lifecycle log lines. [[GH-17430](https://github.com/hashicorp/vault/pull/17430)] -* plugins: Allow selecting builtin plugins by their reported semantic version of the form `vX.Y.Z+builtin` or `vX.Y.Z+builtin.vault`. [[GH-17289](https://github.com/hashicorp/vault/pull/17289)] -* plugins: Let Vault unseal and mount deprecated builtin plugins in a -deactivated state if this is not the first unseal after an upgrade. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] -* plugins: Mark app-id auth method Removed and remove the plugin code. [[GH-18039](https://github.com/hashicorp/vault/pull/18039)] -* plugins: Mark logical database plugins Removed and remove the plugin code. [[GH-18039](https://github.com/hashicorp/vault/pull/18039)] -* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] -* sdk: Add response schema validation method framework/FieldData.ValidateStrict and two test helpers (ValidateResponse, ValidateResponseData) [[GH-18635](https://github.com/hashicorp/vault/pull/18635)] -* sdk: Adding FindResponseSchema test helper to assist with response schema validation in tests [[GH-18636](https://github.com/hashicorp/vault/pull/18636)] -* secrets/aws: Update dependencies [[PR-17747](https://github.com/hashicorp/vault/pull/17747)] [[GH-17747](https://github.com/hashicorp/vault/pull/17747)] -* secrets/azure: Adds ability to persist an application for the lifetime of a role. [[GH-19096](https://github.com/hashicorp/vault/pull/19096)] -* secrets/azure: upgrades dependencies [[GH-17964](https://github.com/hashicorp/vault/pull/17964)] -* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] -* secrets/gcp: Upgrades dependencies [[GH-17871](https://github.com/hashicorp/vault/pull/17871)] -* secrets/kubernetes: Add /check endpoint to determine if environment variables are set [[GH-18](https://github.com/hashicorp/vault-plugin-secrets-kubernetes/pull/18)] [[GH-18587](https://github.com/hashicorp/vault/pull/18587)] -* secrets/kubernetes: add /check endpoint to determine if environment variables are set [[GH-19084](https://github.com/hashicorp/vault/pull/19084)] -* secrets/kv: Emit events on write if events system enabled [[GH-19145](https://github.com/hashicorp/vault/pull/19145)] -* secrets/kv: make upgrade synchronous when no keys to upgrade [[GH-19056](https://github.com/hashicorp/vault/pull/19056)] -* secrets/kv: new KVv2 mounts and KVv1 mounts without any keys will upgrade synchronously, allowing for instant use [[GH-17406](https://github.com/hashicorp/vault/pull/17406)] -* secrets/pki: Add a new API that returns the serial numbers of revoked certificates on the local cluster [[GH-17779](https://github.com/hashicorp/vault/pull/17779)] -* secrets/pki: Add support to specify signature bits when generating CSRs through intermediate/generate apis [[GH-17388](https://github.com/hashicorp/vault/pull/17388)] -* secrets/pki: Added a new API that allows external actors to craft a CRL through JSON parameters [[GH-18040](https://github.com/hashicorp/vault/pull/18040)] -* secrets/pki: Allow UserID Field (https://www.rfc-editor.org/rfc/rfc1274#section-9.3.1) to be set on Certificates when -allowed by role [[GH-18397](https://github.com/hashicorp/vault/pull/18397)] -* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] -* secrets/pki: Allow templating performance replication cluster- and issuer-specific AIA URLs. [[GH-18199](https://github.com/hashicorp/vault/pull/18199)] -* secrets/pki: Allow tidying of expired issuer certificates. [[GH-17823](https://github.com/hashicorp/vault/pull/17823)] -* secrets/pki: Allow tidying of the legacy ca_bundle, improving startup on post-migrated, seal-wrapped PKI mounts. [[GH-18645](https://github.com/hashicorp/vault/pull/18645)] -* secrets/pki: Respond with written data to `config/auto-tidy`, `config/crl`, and `roles/:role`. [[GH-18222](https://github.com/hashicorp/vault/pull/18222)] -* secrets/pki: Return issuer_id and issuer_name on /issuer/:issuer_ref/json endpoint. [[GH-18482](https://github.com/hashicorp/vault/pull/18482)] -* secrets/pki: Return new fields revocation_time_rfc3339 and issuer_id to existing certificate serial lookup api if it is revoked [[GH-17774](https://github.com/hashicorp/vault/pull/17774)] -* secrets/ssh: Allow removing SSH host keys from the dynamic keys feature. [[GH-18939](https://github.com/hashicorp/vault/pull/18939)] -* secrets/ssh: Evaluate ssh validprincipals user template before splitting [[GH-16622](https://github.com/hashicorp/vault/pull/16622)] -* secrets/transit: Add an optional reference field to batch operation items -which is repeated on batch responses to help more easily correlate inputs with outputs. [[GH-18243](https://github.com/hashicorp/vault/pull/18243)] -* secrets/transit: Add associated_data parameter for additional authenticated data in AEAD ciphers [[GH-17638](https://github.com/hashicorp/vault/pull/17638)] -* secrets/transit: Add support for PKCSv1_5_NoOID RSA signatures [[GH-17636](https://github.com/hashicorp/vault/pull/17636)] -* secrets/transit: Allow configuring whether upsert of keys is allowed. [[GH-18272](https://github.com/hashicorp/vault/pull/18272)] -* storage/raft: Add `retry_join_as_non_voter` config option. [[GH-18030](https://github.com/hashicorp/vault/pull/18030)] -* storage/raft: add additional raft metrics relating to applied index and heartbeating; also ensure OSS standbys emit periodic metrics. [[GH-12166](https://github.com/hashicorp/vault/pull/12166)] -* sys/internal/inspect: Creates an endpoint to look to inspect internal subsystems. [[GH-17789](https://github.com/hashicorp/vault/pull/17789)] -* sys/internal/inspect: Creates an endpoint to look to inspect internal subsystems. -* ui: Add algorithm-signer as a SSH Secrets Engine UI field [[GH-10299](https://github.com/hashicorp/vault/pull/10299)] -* ui: Add inline policy creation when creating an identity entity or group [[GH-17749](https://github.com/hashicorp/vault/pull/17749)] -* ui: Added JWT authentication warning message about blocked pop-up windows and web browser settings. [[GH-18787](https://github.com/hashicorp/vault/pull/18787)] -* ui: Enable typescript for future development [[GH-17927](https://github.com/hashicorp/vault/pull/17927)] -* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] -* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] -* ui: adds allowed_response_headers as param for secret engine mount config [[GH-19216](https://github.com/hashicorp/vault/pull/19216)] -* ui: consolidate all tag usage [[GH-17866](https://github.com/hashicorp/vault/pull/17866)] -* ui: mfa: use proper request id generation [[GH-17835](https://github.com/hashicorp/vault/pull/17835)] -* ui: remove wizard [[GH-19220](https://github.com/hashicorp/vault/pull/19220)] -* ui: update DocLink component to use new host url: developer.hashicorp.com [[GH-18374](https://github.com/hashicorp/vault/pull/18374)] -* ui: update TTL picker for consistency [[GH-18114](https://github.com/hashicorp/vault/pull/18114)] -* ui: use the combined activity log (partial + historic) API for client count dashboard and remove use of monthly endpoint [[GH-17575](https://github.com/hashicorp/vault/pull/17575)] -* vault/diagnose: Upgrade `go.opentelemetry.io/otel`, `go.opentelemetry.io/otel/sdk`, `go.opentelemetry.io/otel/trace` to v1.11.2 [[GH-18589](https://github.com/hashicorp/vault/pull/18589)] +* activity (enterprise): Change minimum retention window in activity log to 48 months +* agent: Added a new config option, `lease_renewal_threshold`, that controls the refresh rate of non-renewable leases in Agent's template engine. [[GH-25212](https://github.com/hashicorp/vault/pull/25212)] +* agent: Agent will re-trigger auto auth if token used for rendering templates has been revoked, has exceeded the number of uses, or is a bogus value. [[GH-26172](https://github.com/hashicorp/vault/pull/26172)] +* api: Move CLI token helper functions to importable packages in `api` module. [[GH-25744](https://github.com/hashicorp/vault/pull/25744)] +* audit: timestamps across multiple audit devices for an audit entry will now match. [[GH-26088](https://github.com/hashicorp/vault/pull/26088)] +* auth/aws: Add inferred_hostname metadata for IAM AWS authentication method. [[GH-25418](https://github.com/hashicorp/vault/pull/25418)] +* auth/aws: add canonical ARN as entity alias option [[GH-22460](https://github.com/hashicorp/vault/pull/22460)] +* auth/aws: add support for external_ids in AWS assume-role [[GH-26628](https://github.com/hashicorp/vault/pull/26628)] +* auth/cert: Adds support for TLS certificate authenticaion through a reverse proxy that terminates the SSL connection [[GH-17272](https://github.com/hashicorp/vault/pull/17272)] +* cli: Add events subscriptions commands +* command/server: Removed environment variable requirement to generate pprof +files using SIGUSR2. Added CPU profile support. [[GH-25391](https://github.com/hashicorp/vault/pull/25391)] +* core (enterprise): persist seal rewrap status, so rewrap status API is consistent on secondary nodes. +* core/activity: Include ACME client metrics to precomputed queries [[GH-26519](https://github.com/hashicorp/vault/pull/26519)] +* core/activity: Include ACME clients in activity log responses [[GH-26020](https://github.com/hashicorp/vault/pull/26020)] +* core/activity: Include ACME clients in vault operator usage response [[GH-26525](https://github.com/hashicorp/vault/pull/26525)] +* core/config: reload service registration configuration on SIGHUP [[GH-17598](https://github.com/hashicorp/vault/pull/17598)] +* core: add deadlock detection in barrier and sealwrap +* license utilization reporting (enterprise): Add retention months to license utilization reports. +* proxy/cache (enterprise): Support new configuration parameter for static secret caching, `static_secret_token_capability_refresh_behavior`, to control the behavior when the capability refresh request receives an error from Vault. +* proxy: Proxy will re-trigger auto auth if the token used for requests has been revoked, has exceeded the number of uses, +or is an otherwise invalid value. [[GH-26307](https://github.com/hashicorp/vault/pull/26307)] +* raft/snapshotagent (enterprise): upgrade raft-snapshotagent to v0.0.0-20221104090112-13395acd02c5 +* replication (enterprise): Add replication heartbeat metric to telemetry +* replication (enterprise): Periodically write current time on the primary to storage, use that downstream to measure replication lag in time, expose that in health and replication status endpoints. [[GH-26406](https://github.com/hashicorp/vault/pull/26406)] +* sdk/decompression: DecompressWithCanary will now chunk the decompression in memory to prevent loading it all at once. [[GH-26464](https://github.com/hashicorp/vault/pull/26464)] +* sdk/helper/testcluster: add some new helpers, improve some error messages. [[GH-25329](https://github.com/hashicorp/vault/pull/25329)] +* sdk/helper/testhelpers: add namespace helpers [[GH-25270](https://github.com/hashicorp/vault/pull/25270)] +* secrets-sync (enterprise): Added global config path to the administrative namespace. +* secrets/pki (enterprise): Disable warnings about unknown parameters to the various CIEPS endpoints +* secrets/pki: Add a new ACME configuration parameter that allows increasing the maximum TTL for ACME leaf certificates [[GH-26797](https://github.com/hashicorp/vault/pull/26797)] +* secrets/transform (enterprise): Add delete by token and delete by plaintext operations to Tokenization. +* storage/azure: Perform validation on Azure account name and container name [[GH-26135](https://github.com/hashicorp/vault/pull/26135)] +* storage/raft (enterprise): add support for separate entry size limit for mount +and namespace table paths in storage to allow increased mount table size without +allowing other user storage entries to become larger. [[GH-25992](https://github.com/hashicorp/vault/pull/25992)] +* storage/raft: panic on unknown Raft operations [[GH-25991](https://github.com/hashicorp/vault/pull/25991)] +* ui (enterprise): Allow HVD users to access Secrets Sync. [[GH-26841](https://github.com/hashicorp/vault/pull/26841)] +* ui (enterprise): Update dashboard to make activity log query using the same start time as the metrics overview [[GH-26729](https://github.com/hashicorp/vault/pull/26729)] +* ui (enterprise): Update filters on the custom messages list view. [[GH-26653](https://github.com/hashicorp/vault/pull/26653)] +* ui: Allow users to wrap inputted data again instead of resetting form [[GH-27289](https://github.com/hashicorp/vault/pull/27289)] +* ui: Display ACME clients on a separate page in the UI. [[GH-26020](https://github.com/hashicorp/vault/pull/26020)] +* ui: Hide dashboard client count card if user does not have permission to view clients. [[GH-26848](https://github.com/hashicorp/vault/pull/26848)] +* ui: Show computed values from `sys/internal/ui/mounts` endpoint for auth mount configuration view [[GH-26663](https://github.com/hashicorp/vault/pull/26663)] +* ui: Update PGP display and show error for Generate Operation Token flow with PGP [[GH-26993](https://github.com/hashicorp/vault/pull/26993)] +* ui: Update language in Transit secret engine to reflect that not all keys are for encyryption [[GH-27346](https://github.com/hashicorp/vault/pull/27346)] +* ui: Update userpass user form to allow setting `password_hash` field. [[GH-26577](https://github.com/hashicorp/vault/pull/26577)] +* ui: fixes cases where inputs did not have associated labels [[GH-26263](https://github.com/hashicorp/vault/pull/26263)] +* ui: show banner instead of permission denied error when batch token is expired [[GH-26396](https://github.com/hashicorp/vault/pull/26396)] +* website/docs: Add note about eventual consietency with the MongoDB Atlas database secrets engine [[GH-24152](https://github.com/hashicorp/vault/pull/24152)] DEPRECATIONS: -* secrets/ad: Marks the Active Directory (AD) secrets engine as deprecated. [[GH-19334](https://github.com/hashicorp/vault/pull/19334)] +* Request Limiter Beta(enterprise): This Beta feature added in 1.16 has been +superseded by Adaptive Overload Protection and will be removed. +* secrets/azure: Deprecate field "password_policy" as we are not able to set it anymore with the new MS Graph API. [[GH-25637](https://github.com/hashicorp/vault/pull/25637)] + +BUG FIXES: + +* activity (enterprise): fix read-only storage error on upgrades +* agent: Correctly constructs kv-v2 secret paths in nested namespaces. [[GH-26863](https://github.com/hashicorp/vault/pull/26863)] +* agent: Fixes a high Vault load issue, by restarting the Conusl template server after backing off instead of immediately. [[GH-25497](https://github.com/hashicorp/vault/pull/25497)] +* agent: `vault.namespace` no longer gets incorrectly overridden by `auto_auth.namespace`, if set [[GH-26427](https://github.com/hashicorp/vault/pull/26427)] +* api: fixed a bug where LifetimeWatcher routines weren't respecting exponential backoff in the presence of unexpected errors [[GH-26383](https://github.com/hashicorp/vault/pull/26383)] +* audit: Operator changes to configured audit headers (via `/sys/config/auditing`) +will now force invalidation and be reloaded from storage when data is replicated +to other nodes. +* auth/ldap: Fix login error for group search anonymous bind. [[GH-26200](https://github.com/hashicorp/vault/pull/26200)] +* auth/ldap: Fix login error missing entity alias attribute value. [[GH-26200](https://github.com/hashicorp/vault/pull/26200)] +* auto-auth: Addressed issue where having no permissions to renew a renewable token caused auto-auth to attempt to renew constantly with no backoff [[GH-26844](https://github.com/hashicorp/vault/pull/26844)] +* cli/debug: Fix resource leak in CLI debug command. [[GH-26167](https://github.com/hashicorp/vault/pull/26167)] +* cli: fixed a bug where the Vault CLI would error out if +HOME was not set. [[GH-26243](https://github.com/hashicorp/vault/pull/26243)] +* core (enterprise): Fix 403s returned when forwarding invalid token to active node from secondary. +* core (enterprise): Fix an issue that prevented the seal re-wrap status from reporting that a re-wrap is in progress for up to a second. +* core (enterprise): fix bug where raft followers disagree with the seal type after returning to one seal from two. [[GH-26523](https://github.com/hashicorp/vault/pull/26523)] +* core (enterprise): fix issue where the Seal HA rewrap system may remain running when an active node steps down. +* core/audit: Audit logging a Vault request/response will now use a minimum 5 second context timeout. +If the existing context deadline occurs later than 5s in the future, it will be used, otherwise a +new context, separate from the original will be used. [[GH-26616](https://github.com/hashicorp/vault/pull/26616)] +* core/metrics: store cluster name in unencrypted storage to prevent blank cluster name [[GH-26878](https://github.com/hashicorp/vault/pull/26878)] +* core/namespace (enterprise): Privileged namespace paths provided in the `administrative_namespace_path` config will now be canonicalized. +* core/seal: During a seal reload through SIGHUP, only write updated seal barrier on an active node [[GH-26381](https://github.com/hashicorp/vault/pull/26381)] +* core/seal: allow overriding of VAULT_GCPCKMS_SEAL_KEY_RING and VAULT_GCPCKMS_SEAL_CRYPTO_KEY environment keys in seal-ha +* core: Add missing field delegated_auth_accessors to GET /sys/mounts/:path API response [[GH-26876](https://github.com/hashicorp/vault/pull/26876)] +* core: Address a data race updating a seal's last seen healthy time attribute [[GH-27014](https://github.com/hashicorp/vault/pull/27014)] +* core: Fix `redact_version` listener parameter being ignored for some OpenAPI related endpoints. [[GH-26607](https://github.com/hashicorp/vault/pull/26607)] +* core: Only reload seal configuration when enable_multiseal is set to true. [[GH-26166](https://github.com/hashicorp/vault/pull/26166)] +* core: when listener configuration `chroot_namespace` is active, Vault will no longer report that the configuration is invalid when Vault is sealed +* events (enterprise): Fix bug preventing subscribing and receiving events within a namepace. +* events (enterprise): Terminate WebSocket connection when token is revoked. +* openapi: Fixing approle reponse duration types [[GH-25510](https://github.com/hashicorp/vault/pull/25510)] +* openapi: added the missing migrate parameter for the unseal endpoint in vault/logical_system_paths.go [[GH-25550](https://github.com/hashicorp/vault/pull/25550)] +* pki: Fix error in cross-signing using ed25519 keys [[GH-27093](https://github.com/hashicorp/vault/pull/27093)] +* plugin/wif: fix a bug where the namespace was not set for external plugins using workload identity federation [[GH-26384](https://github.com/hashicorp/vault/pull/26384)] +* replication (enterprise): fix "given mount path is not in the same namespace as the request" error that can occur when enabling replication for the first time on a secondary cluster +* replication (enterprise): fixed data integrity issue with the processing of identity aliases causing duplicates to occur in rare cases +* router: Fix missing lock in MatchingSystemView. [[GH-25191](https://github.com/hashicorp/vault/pull/25191)] +* secret/database: Fixed race condition where database mounts may leak connections [[GH-26147](https://github.com/hashicorp/vault/pull/26147)] +* secrets-sync (enterprise): Fixed an issue with syncing to target projects in GCP +* secrets/azure: Update vault-plugin-secrets-azure to 0.17.2 to include a bug fix for azure role creation [[GH-26896](https://github.com/hashicorp/vault/pull/26896)] +* secrets/pki (enterprise): cert_role parameter within authenticators.cert EST configuration handler could not be set +* secrets/pki: fixed validation bug which rejected ldap schemed URLs in crl_distribution_points. [[GH-26477](https://github.com/hashicorp/vault/pull/26477)] +* secrets/transform (enterprise): Fix a bug preventing the use of alternate schemas on PostgreSQL token stores. +* secrets/transit: Use 'hash_algorithm' parameter if present in HMAC verify requests. Otherwise fall back to deprecated 'algorithm' parameter. [[GH-27211](https://github.com/hashicorp/vault/pull/27211)] +* storage/raft (enterprise): Fix a bug where autopilot automated upgrades could fail due to using the wrong upgrade version +* storage/raft (enterprise): Fix a regression introduced in 1.15.8 that causes +autopilot to fail to discover new server versions and so not trigger an upgrade. [[GH-27277](https://github.com/hashicorp/vault/pull/27277)] +* storage/raft: prevent writes from impeding leader transfers, e.g. during automated upgrades [[GH-25390](https://github.com/hashicorp/vault/pull/25390)] +* transform (enterprise): guard against a panic looking up a token in exportable mode with barrier storage. +* ui: Do not show resultant-ACL banner when ancestor namespace grants wildcard access. [[GH-27263](https://github.com/hashicorp/vault/pull/27263)] +* ui: Fix KVv2 cursor jumping inside json editor after initial input. [[GH-27120](https://github.com/hashicorp/vault/pull/27120)] +* ui: Fix KVv2 json editor to allow null values. [[GH-27094](https://github.com/hashicorp/vault/pull/27094)] +* ui: Fix a bug where disabling TTL on the AWS credential form would still send TTL value [[GH-27366](https://github.com/hashicorp/vault/pull/27366)] +* ui: Fix broken help link in console for the web command. [[GH-26858](https://github.com/hashicorp/vault/pull/26858)] +* ui: Fix configuration link from Secret Engine list view for Ember engines. [[GH-27131](https://github.com/hashicorp/vault/pull/27131)] +* ui: Fix link to v2 generic secrets engine from secrets list page. [[GH-27019](https://github.com/hashicorp/vault/pull/27019)] +* ui: Prevent perpetual loading screen when Vault needs initialization [[GH-26985](https://github.com/hashicorp/vault/pull/26985)] +* ui: Refresh model within a namespace on the Secrets Sync overview page. [[GH-26790](https://github.com/hashicorp/vault/pull/26790)] +* ui: Remove possibility of returning an undefined timezone from date-format helper [[GH-26693](https://github.com/hashicorp/vault/pull/26693)] +* ui: Resolved accessibility issues with Web REPL. Associated label and help text with input, added a conditional to show the console/ui-panel only when toggled open, added keyboard focus trap. [[GH-26872](https://github.com/hashicorp/vault/pull/26872)] +* ui: fix issue where a month without new clients breaks the client count dashboard [[GH-27352](https://github.com/hashicorp/vault/pull/27352)] +* ui: fixed a bug where the replication pages did not update display when navigating between DR and performance [[GH-26325](https://github.com/hashicorp/vault/pull/26325)] +* ui: fixes undefined start time in filename for downloaded client count attribution csv [[GH-26485](https://github.com/hashicorp/vault/pull/26485)] + +## 1.16.9 Enterprise +### August 30, 2024 + +**Enterprise LTS:** Vault Enterprise 1.16 is a [Long-Term Support (LTS)](https://developer.hashicorp.com/vault/docs/enterprise/lts) release. + +SECURITY: + +core/audit: fix regression where client tokens and token accessors were being +displayed in the audit log in plaintext [HCSEC-2024-18](https://discuss.hashicorp.com/t/hcsec-2024-18-vault-leaks-client-token-and-token-accessor-in-audit-devices) + +BUG FIXES: + +* proxy/cache (enterprise): Fixed an issue where Proxy with static secret caching enabled would not correctly handle requests to older secret versions for KVv2 secrets. Proxy's static secret cache now properly handles all requests relating to older versions for KVv2 secrets. [[GH-28207](https://github.com/hashicorp/vault/pull/28207)] +## 1.16.8 Enterprise +### August 29, 2024 + +**Enterprise LTS:** Vault Enterprise 1.16 is a [Long-Term Support (LTS)](https://developer.hashicorp.com/vault/docs/enterprise/lts) release. + +CHANGES: + +* activity (enterprise): filter all fields in client count responses by the request namespace [[GH-27790](https://github.com/hashicorp/vault/pull/27790)] +* core: Bump Go version to 1.22.6 + +IMPROVEMENTS: + +* activity log: Changes how new client counts in the current month are estimated, in order to return more +visibly sensible totals. [[GH-27547](https://github.com/hashicorp/vault/pull/27547)] +* activity: `/sys/internal/counters/activity` will now include a warning if the specified usage period contains estimated client counts. [[GH-28068](https://github.com/hashicorp/vault/pull/28068)] +* audit: Adds TRACE logging to log request/response under certain circumstances, and further improvements to the audit subsystem. [[GH-28056](https://github.com/hashicorp/vault/pull/28056)] +* cli: `vault operator usage` will now include a warning if the specified usage period contains estimated client counts. [[GH-28068](https://github.com/hashicorp/vault/pull/28068)] +* core/activity: Ensure client count queries that include the current month return consistent results by sorting the clients before performing estimation [[GH-28062](https://github.com/hashicorp/vault/pull/28062)] BUG FIXES: -* api: Remove timeout logic from ReadRaw functions and add ReadRawWithContext [[GH-18708](https://github.com/hashicorp/vault/pull/18708)] -* auth/alicloud: fix regression in vault login command that caused login to fail [[GH-19005](https://github.com/hashicorp/vault/pull/19005)] -* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] -* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] -* auth/cert: Address a race condition accessing the loaded crls without a lock [[GH-18945](https://github.com/hashicorp/vault/pull/18945)] -* auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#173](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/173)] [[GH-18716](https://github.com/hashicorp/vault/pull/18716)] -* auth/kubernetes: fixes and dep updates for the auth-kubernetes plugin (see plugin changelog for details) [[GH-19094](https://github.com/hashicorp/vault/pull/19094)] -* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] -* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] -* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] -* cli/pki: Decode integer values properly in health-check configuration file [[GH-19265](https://github.com/hashicorp/vault/pull/19265)] -* cli/pki: Fix path for role health-check warning messages [[GH-19274](https://github.com/hashicorp/vault/pull/19274)] -* cli/pki: Properly report permission issues within health-check mount tune checks [[GH-19276](https://github.com/hashicorp/vault/pull/19276)] -* cli/transit: Fix import, import-version command invocation [[GH-19373](https://github.com/hashicorp/vault/pull/19373)] -* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] -* cli: Fix vault read handling to return raw data as secret.Data when there is no top-level data object from api response. [[GH-17913](https://github.com/hashicorp/vault/pull/17913)] -* cli: Remove empty table heading for `vault secrets list -detailed` output. [[GH-17577](https://github.com/hashicorp/vault/pull/17577)] -* command/namespace: Fix vault cli namespace patch examples in help text. [[GH-18143](https://github.com/hashicorp/vault/pull/18143)] -* core (enterprise): Fix missing quotation mark in error message -* core (enterprise): Fix panic that could occur with SSCT alongside invoking external plugins for revocation. -* core (enterprise): Fix panic when using invalid accessor for control-group request -* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. -* core (enterprise): Supported storage check in `vault server` command will no longer prevent startup. Instead, a warning will be logged if configured to use storage backend other than `raft` or `consul`. -* core/activity: add namespace breakdown for new clients when date range spans multiple months, including the current month. [[GH-18766](https://github.com/hashicorp/vault/pull/18766)] -* core/activity: de-duplicate namespaces when historical and current month data are mixed [[GH-18452](https://github.com/hashicorp/vault/pull/18452)] -* core/activity: fix the end_date returned from the activity log endpoint when partial counts are computed [[GH-17856](https://github.com/hashicorp/vault/pull/17856)] -* core/activity: include mount counts when de-duplicating current and historical month data [[GH-18598](https://github.com/hashicorp/vault/pull/18598)] -* core/activity: report mount paths (rather than mount accessors) in current month activity log counts and include deleted mount paths in precomputed queries. [[GH-18916](https://github.com/hashicorp/vault/pull/18916)] -* core/activity: return partial month counts when querying a historical date range and no historical data exists. [[GH-17935](https://github.com/hashicorp/vault/pull/17935)] -* core/auth: Return a 403 instead of a 500 for wrapping requests when token is not provided [[GH-18859](https://github.com/hashicorp/vault/pull/18859)] -* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace -* core/managed-keys (enterprise): Return better error messages when encountering key creation failures -* core/managed-keys (enterprise): Switch to using hash length as PSS Salt length within the test/sign api for better PKCS#11 compatibility -* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. -* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. -* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] -* core/seal: Fix regression handling of the key_id parameter in seal configuration HCL. [[GH-17612](https://github.com/hashicorp/vault/pull/17612)] -* core: Fix panic caused in Vault Agent when rendering certificate templates [[GH-17419](https://github.com/hashicorp/vault/pull/17419)] -* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] -* core: Fix spurious `permission denied` for all HelpOperations on sudo-protected paths [[GH-18568](https://github.com/hashicorp/vault/pull/18568)] -* core: Fix vault operator init command to show the right curl string with -output-curl-string and right policy hcl with -output-policy [[GH-17514](https://github.com/hashicorp/vault/pull/17514)] -* core: Fixes spurious warnings being emitted relating to "unknown or unsupported fields" for JSON config [[GH-17660](https://github.com/hashicorp/vault/pull/17660)] -* core: Linux packages now have vendor label and set the default label to HashiCorp. -This fix is implemented for any future releases, but will not be updated for historical releases. -* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] -* core: Refactor lock grabbing code to simplify stateLock deadlock investigations [[GH-17187](https://github.com/hashicorp/vault/pull/17187)] -* core: fix GPG encryption to support subkeys. [[GH-16224](https://github.com/hashicorp/vault/pull/16224)] -* core: fix a start up race condition where performance standbys could go into a -mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] -* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. -* core: fix race when using SystemView.ReplicationState outside of a request context [[GH-17186](https://github.com/hashicorp/vault/pull/17186)] -* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] -* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] -* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] -* core: trying to unseal with the wrong key now returns HTTP 400 [[GH-17836](https://github.com/hashicorp/vault/pull/17836)] -* credential/cert: adds error message if no tls connection is found during the AliasLookahead operation [[GH-17904](https://github.com/hashicorp/vault/pull/17904)] -* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] -* expiration: Prevent panics on perf standbys when an irrevocable lease gets deleted. [[GH-18401](https://github.com/hashicorp/vault/pull/18401)] -* kmip (enterprise): Fix a problem with some multi-part MAC Verify operations. -* kmip (enterprise): Only require data to be full blocks on encrypt/decrypt operations using CBC and ECB block cipher modes. -* license (enterprise): Fix bug where license would update even if the license didn't change. -* licensing (enterprise): update autoloaded license cache after reload -* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] -* openapi: Fix many incorrect details in generated API spec, by using better techniques to parse path regexps [[GH-18554](https://github.com/hashicorp/vault/pull/18554)] -* openapi: fix gen_openapi.sh script to correctly load vault plugins [[GH-17752](https://github.com/hashicorp/vault/pull/17752)] -* plugins/kv: KV v2 returns 404 instead of 500 for request paths that incorrectly include a trailing slash. [[GH-17339](https://github.com/hashicorp/vault/pull/17339)] -* plugins: Allow running external plugins which override deprecated builtins. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] -* plugins: Corrected the path to check permissions on when the registered plugin name does not match the plugin binary's filename. [[GH-17340](https://github.com/hashicorp/vault/pull/17340)] -* plugins: Listing all plugins while audit logging is enabled will no longer result in an internal server error. [[GH-18173](https://github.com/hashicorp/vault/pull/18173)] -* plugins: Only report deprecation status for builtin plugins. [[GH-17816](https://github.com/hashicorp/vault/pull/17816)] -* plugins: Skip loading but still mount data associated with missing plugins on unseal. [[GH-18189](https://github.com/hashicorp/vault/pull/18189)] -* plugins: Vault upgrades will no longer fail if a mount has been created using an explicit builtin plugin version. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] -* replication (enterprise): Fix bug where reloading external plugin on a secondary would -break replication. -* sdk: Don't panic if system view or storage methods called during plugin setup. [[GH-18210](https://github.com/hashicorp/vault/pull/18210)] -* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] -* secrets/ad: Fix bug where updates to config would fail if password isn't provided [[GH-19061](https://github.com/hashicorp/vault/pull/19061)] -* secrets/gcp: fix issue where IAM bindings were not preserved during policy update [[GH-19018](https://github.com/hashicorp/vault/pull/19018)] -* secrets/mongodb-atlas: Fix a bug that did not allow WAL rollback to handle partial failures when creating API keys [[GH-19111](https://github.com/hashicorp/vault/pull/19111)] -* secrets/pki: Address nil panic when an empty POST request is sent to the OCSP handler [[GH-18184](https://github.com/hashicorp/vault/pull/18184)] -* secrets/pki: Allow patching issuer to set an empty issuer name. [[GH-18466](https://github.com/hashicorp/vault/pull/18466)] -* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17385](https://github.com/hashicorp/vault/pull/17385)] -* secrets/pki: Fix upgrade of missing expiry, delta_rebuild_interval by setting them to the default. [[GH-17693](https://github.com/hashicorp/vault/pull/17693)] -* secrets/pki: Fixes duplicate otherName in certificates created by the sign-verbatim endpoint. [[GH-16700](https://github.com/hashicorp/vault/pull/16700)] -* secrets/pki: OCSP GET request parameter was not being URL unescaped before processing. [[GH-18938](https://github.com/hashicorp/vault/pull/18938)] -* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] -* secrets/pki: Revert fix for PR [18938](https://github.com/hashicorp/vault/pull/18938) [[GH-19037](https://github.com/hashicorp/vault/pull/19037)] -* secrets/pki: consistently use UTC for CA's notAfter exceeded error message [[GH-18984](https://github.com/hashicorp/vault/pull/18984)] -* secrets/pki: fix race between tidy's cert counting and tidy status reporting. [[GH-18899](https://github.com/hashicorp/vault/pull/18899)] -* secrets/transit: Do not warn about unrecognized parameter 'batch_input' [[GH-18299](https://github.com/hashicorp/vault/pull/18299)] -* secrets/transit: Honor `partial_success_response_code` on decryption failures. [[GH-18310](https://github.com/hashicorp/vault/pull/18310)] -* server/config: Use file.Stat when checking file permissions when VAULT_ENABLE_FILE_PERMISSIONS_CHECK is enabled [[GH-19311](https://github.com/hashicorp/vault/pull/19311)] -* storage/raft (enterprise): An already joined node can rejoin by wiping storage -and re-issueing a join request, but in doing so could transiently become a -non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https://github.com/hashicorp/vault/pull/18263)] -* storage/raft: Don't panic on unknown raft ops [[GH-17732](https://github.com/hashicorp/vault/pull/17732)] -* storage/raft: Fix race with follower heartbeat tracker during teardown. [[GH-18704](https://github.com/hashicorp/vault/pull/18704)] -* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] -* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] -* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] -* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] -* ui: Remove `default` and add `default-service` and `default-batch` to UI token_type for auth mount and tuning. [[GH-19290](https://github.com/hashicorp/vault/pull/19290)] -* ui: Remove default value of 30 to TtlPicker2 if no value is passed in. [[GH-17376](https://github.com/hashicorp/vault/pull/17376)] -* ui: allow selection of "default" for ssh algorithm_signer in web interface [[GH-17894](https://github.com/hashicorp/vault/pull/17894)] -* ui: cleanup unsaved auth method ember data record when navigating away from mount backend form [[GH-18651](https://github.com/hashicorp/vault/pull/18651)] -* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] -* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] -* ui: fixes reliance on secure context (https) by removing methods using the Crypto interface [[GH-19403](https://github.com/hashicorp/vault/pull/19403)] -* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)] - -## 1.12.4 -### March 01, 2023 +* activity: The sys/internal/counters/activity endpoint will return current month data when the end_date parameter is set to a future date. [[GH-28042](https://github.com/hashicorp/vault/pull/28042)] +* command: The `vault secrets move` and `vault auth move` command will no longer attempt to write to storage on performance standby nodes. [[GH-28059](https://github.com/hashicorp/vault/pull/28059)] +* core (enterprise): Fix deletion of MFA login-enforcement configurations on standby nodes +* secrets/database: Skip connection verification on reading existing DB connection configuration [[GH-28139](https://github.com/hashicorp/vault/pull/28139)] +* ui: fixes toast (flash) alert message saying "created" when deleting a kv v2 secret [[GH-28093](https://github.com/hashicorp/vault/pull/28093)] + +## 1.16.7 Enterprise +### August 07, 2024 + +**Enterprise LTS:** Vault Enterprise 1.16 is a [Long-Term Support (LTS)](https://developer.hashicorp.com/vault/docs/enterprise/lts) release. CHANGES: -* core: Bump Go version to 1.19.6. +* auth/cf: Update plugin to v0.18.0 [[GH-27724](https://github.com/hashicorp/vault/pull/27724)] IMPROVEMENTS: -* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] -* ui: remove wizard [[GH-19220](https://github.com/hashicorp/vault/pull/19220)] +* audit: Ensure that any underyling errors from audit devices are logged even if we consider auditing to be a success. [[GH-27809](https://github.com/hashicorp/vault/pull/27809)] +* audit: Internal implementation changes to the audit subsystem which improve performance. [[GH-27952](https://github.com/hashicorp/vault/pull/27952)] +* audit: sinks (file, socket, syslog) will attempt to log errors to the server operational +log before returning (if there are errors to log, and the context is done). [[GH-27859](https://github.com/hashicorp/vault/pull/27859)] +* auth/cert: Cache full list of role trust information separately to avoid +eviction, and avoid duplicate loading during multiple simultaneous logins on +the same role. [[GH-27902](https://github.com/hashicorp/vault/pull/27902)] +* license utilization reporting (enterprise): Auto-roll billing start date. [[GH-27656](https://github.com/hashicorp/vault/pull/27656)] + +BUG FIXES: + +* auth/cert: Use subject's serial number, not issuer's within error message text in OCSP request errors [[GH-27696](https://github.com/hashicorp/vault/pull/27696)] +* cli: Fixed issue with `vault hcp connect` where HCP resources with uppercase letters were inaccessible when entering the correct project name. [[GH-27694](https://github.com/hashicorp/vault/pull/27694)] +* core (enterprise): Fix 500 errors that occurred querying `sys/internal/ui/mounts` for a mount prefixed by a namespace path when path filters are configured. [[GH-27939](https://github.com/hashicorp/vault/pull/27939)] +* core/identity: Fixed an issue where deleted/reassigned entity-aliases were not removed from in-memory database. [[GH-27750](https://github.com/hashicorp/vault/pull/27750)] +* proxy/cache (enterprise): Fixed an issue where Proxy would not correctly update KV secrets when talking to a perf standby. Proxy will now attempt to forward requests to update secrets triggered by events to the active node. Note that this requires `allow_forwarding_via_header` to be configured on the cluster. [[GH-27891](https://github.com/hashicorp/vault/pull/27891)] +* raft/autopilot: Fixed panic that may occur during shutdown [[GH-27726](https://github.com/hashicorp/vault/pull/27726)] +* secrets-sync (enterprise): Destination set/remove operations will no longer be blocked as "purge in progress" after a purge job ended in failure. +* secrets-sync (enterprise): Normalize custom_tag keys and values for recoverable invalid characters. +* secrets-sync (enterprise): Normalize secret key names before storing the external_name in a secret association. +* secrets-sync (enterprise): Patching github sync destination credentials will properly update and save the new credentials. +* secrets-sync (enterprise): Return an error immediately on destination creation when providing invalid custom_tags based on destination type. +* secrets/identity (enterprise): Fix a bug that can cause DR promotion to fail in rare cases where a PR secondary has inconsistent alias information in storage. +* sys: Fix a bug where mounts of external plugins that were registered before Vault v1.0.0 could not be tuned to +use versioned plugins. [[GH-27881](https://github.com/hashicorp/vault/pull/27881)] +* ui: Fix cursor jump on KVv2 json editor that would occur after pressing ENTER. [[GH-27569](https://github.com/hashicorp/vault/pull/27569)] +* ui: fix issue where enabling then disabling "Tidy ACME" in PKI results in failed API call. [[GH-27742](https://github.com/hashicorp/vault/pull/27742)] +* ui: fix namespace picker not working when in small screen where the sidebar is collapsed by default. [[GH-27728](https://github.com/hashicorp/vault/pull/27728)] + + +## 1.16.6 Enterprise +### July 10, 2024 + +**Enterprise LTS:** Vault Enterprise 1.16 is a [Long-Term Support (LTS)](https://developer.hashicorp.com/vault/docs/enterprise/lts) release. + +CHANGES: + +* core: Bump Go version to 1.22.5. +* auth/jwt: Revert [GH-295](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/295) which changed the way JWT `aud` claims were validated. BUG FIXES: -* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] -* core (enterprise): Fix panic when using invalid accessor for control-group request -* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. -* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] -* license (enterprise): Fix bug where license would update even if the license didn't change. -* replication (enterprise): Fix bug where reloading external plugin on a secondary would -break replication. -* secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. [[GH-18207](https://github.com/hashicorp/vault/pull/18207)] -* secrets/pki: Revert fix for PR [18938](https://github.com/hashicorp/vault/pull/18938) [[GH-19037](https://github.com/hashicorp/vault/pull/19037)] -* server/config: Use file.Stat when checking file permissions when VAULT_ENABLE_FILE_PERMISSIONS_CHECK is enabled [[GH-19311](https://github.com/hashicorp/vault/pull/19311)] -* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] -* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] -* ui: fixes reliance on secure context (https) by removing methods using the Crypto interface [[GH-19410](https://github.com/hashicorp/vault/pull/19410)] -* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)] +* agent: Correctly constructs kv-v2 secret paths in nested namespaces. [[GH-26863](https://github.com/hashicorp/vault/pull/26863)] +* core (enterprise): Fix HTTP redirects in namespaces to use the correct path and (in the case of event subscriptions) the correct URI scheme. [[GH-27660](https://github.com/hashicorp/vault/pull/27660)] +* core/config: fix issue when using `proxy_protocol_behavior` with `deny_unauthorized`, +which causes the Vault TCP listener to close after receiving an untrusted upstream proxy connection. [[GH-27589](https://github.com/hashicorp/vault/pull/27589)] +* core: Fixed an issue with performance standbys not being able to handle rotate root requests. [[GH-27631](https://github.com/hashicorp/vault/pull/27631)] +* ui: Display an error and force a timeout when TOTP passcode is incorrect [[GH-27574](https://github.com/hashicorp/vault/pull/27574)] +* ui: Ensure token expired banner displays when batch token expires [[GH-27479](https://github.com/hashicorp/vault/pull/27479)] + +## 1.16.5 Enterprise +### June 26, 2024 + +**Enterprise LTS:** Vault Enterprise 1.16 is a [Long-Term Support (LTS)](https://developer.hashicorp.com/vault/docs/enterprise/lts) release. + +BUG FIXES: + +* cli/debug: Fix resource leak in CLI debug command. [[GH-26167](https://github.com/hashicorp/vault/pull/26167)] +* config: Vault TCP listener config now correctly supports the documented proxy_protocol_behavior +setting of 'deny_unauthorized' [[GH-27459](https://github.com/hashicorp/vault/pull/27459)] +* core/audit: Audit logging a Vault request/response checks if the existing context +is cancelled and will now use a new context with a 5 second timeout. +If the existing context is cancelled a new context, will be used. [[GH-27531](https://github.com/hashicorp/vault/pull/27531)] +* helper/pkcs7: Fix parsing certain messages containing only certificates [[GH-27435](https://github.com/hashicorp/vault/pull/27435)] +* replication (enterprise): fix cache invalidation issue leading to namespace custom metadata not being shown correctly on performance secondaries +* secrets-sync (enterprise): Properly remove tags from secrets in AWS when they are removed from the source association +* secrets-sync (enterprise): Return more accurate error code for invalid connection details +* secrets-sync (enterprise): Skip invalid GitHub repository names when creating destinations +* storage/raft (enterprise): Fix issue with namespace cache not getting cleared on snapshot restore, resulting in namespaces not found in the snapshot being inaccurately represented by API responses. [[GH-27474](https://github.com/hashicorp/vault/pull/27474)] +* ui: Allow creation of session_token type roles for AWS secret backend [[GH-27424](https://github.com/hashicorp/vault/pull/27424)] + +## 1.16.4 Enterprise +### June 12, 2024 + +**Enterprise LTS:** Vault Enterprise 1.16 is a [Long-Term Support (LTS)](https://developer.hashicorp.com/vault/docs/enterprise/lts) release. -## 1.12.3 -### February 6, 2023 - CHANGES: -* core: Bump Go version to 1.19.4. +* core: Bump Go version to 1.22.4. +* ui/kubernetes: Update the roles filter-input to use explicit search. [[GH-27178](https://github.com/hashicorp/vault/pull/27178)] IMPROVEMENTS: -* audit: Include stack trace when audit logging recovers from a panic. [[GH-18121](https://github.com/hashicorp/vault/pull/18121)] -* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] -* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. -* core: Add read support to `sys/loggers` and `sys/loggers/:name` endpoints [[GH-17979](https://github.com/hashicorp/vault/pull/17979)] -* plugins: Let Vault unseal and mount deprecated builtin plugins in a -deactivated state if this is not the first unseal after an upgrade. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] -* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] -* secrets/kv: new KVv2 mounts and KVv1 mounts without any keys will upgrade synchronously, allowing for instant use [[GH-17406](https://github.com/hashicorp/vault/pull/17406)] -* storage/raft: add additional raft metrics relating to applied index and heartbeating; also ensure OSS standbys emit periodic metrics. [[GH-12166](https://github.com/hashicorp/vault/pull/12166)] -* ui: Added JWT authentication warning message about blocked pop-up windows and web browser settings. [[GH-18787](https://github.com/hashicorp/vault/pull/18787)] -* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] -* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] +* ui: Allow users to wrap inputted data again instead of resetting form [[GH-27289](https://github.com/hashicorp/vault/pull/27289)] +* ui: Update language in Transit secret engine to reflect that not all keys are for encyryption [[GH-27346](https://github.com/hashicorp/vault/pull/27346)] BUG FIXES: -* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] -* auth/cert: Address a race condition accessing the loaded crls without a lock [[GH-18945](https://github.com/hashicorp/vault/pull/18945)] -* auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#173](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/173)] [[GH-18716](https://github.com/hashicorp/vault/pull/18716)] -* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] -* command/namespace: Fix vault cli namespace patch examples in help text. [[GH-18143](https://github.com/hashicorp/vault/pull/18143)] -* core (enterprise): Fix a race condition resulting in login errors to PKCS#11 modules under high concurrency. -* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace -* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. -* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] -* core/seal: Fix regression handling of the key_id parameter in seal configuration HCL. [[GH-17612](https://github.com/hashicorp/vault/pull/17612)] -* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. -* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] -* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] -* expiration: Prevent panics on perf standbys when an irrevocable release gets deleted. [[GH-18401](https://github.com/hashicorp/vault/pull/18401)] -* kmip (enterprise): Fix Destroy operation response that omitted Unique Identifier on some batched responses. -* kmip (enterprise): Fix Locate operation response incompatibility with clients using KMIP versions prior to 1.3. -* kmip (enterprise): Fix Query operation response that omitted streaming capability and supported profiles. -* licensing (enterprise): update autoloaded license cache after reload -* plugins: Allow running external plugins which override deprecated builtins. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] -* plugins: Listing all plugins while audit logging is enabled will no longer result in an internal server error. [[GH-18173](https://github.com/hashicorp/vault/pull/18173)] -* plugins: Skip loading but still mount data associated with missing plugins on unseal. [[GH-18189](https://github.com/hashicorp/vault/pull/18189)] -* sdk: Don't panic if system view or storage methods called during plugin setup. [[GH-18210](https://github.com/hashicorp/vault/pull/18210)] -* secrets/pki: Address nil panic when an empty POST request is sent to the OCSP handler [[GH-18184](https://github.com/hashicorp/vault/pull/18184)] -* secrets/pki: Allow patching issuer to set an empty issuer name. [[GH-18466](https://github.com/hashicorp/vault/pull/18466)] -* secrets/pki: OCSP GET request parameter was not being URL unescaped before processing. [[GH-18938](https://github.com/hashicorp/vault/pull/18938)] -* secrets/pki: fix race between tidy's cert counting and tidy status reporting. [[GH-18899](https://github.com/hashicorp/vault/pull/18899)] -* secrets/transit: Do not warn about unrecognized parameter 'batch_input' [[GH-18299](https://github.com/hashicorp/vault/pull/18299)] -* secrets/transit: Honor `partial_success_response_code` on decryption failures. [[GH-18310](https://github.com/hashicorp/vault/pull/18310)] -* storage/raft (enterprise): An already joined node can rejoin by wiping storage -and re-issueing a join request, but in doing so could transiently become a -non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https://github.com/hashicorp/vault/pull/18263)] -* storage/raft: Don't panic on unknown raft ops [[GH-17732](https://github.com/hashicorp/vault/pull/17732)] -* ui: cleanup unsaved auth method ember data record when navigating away from mount backend form [[GH-18651](https://github.com/hashicorp/vault/pull/18651)] -* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] -## 1.12.2 -### November 30, 2022 +* secrets/transform (enterprise): Fix a bug preventing the use of alternate schemas on PostgreSQL token stores. +* storage/raft (enterprise): Fix a regression introduced in 1.15.8 that causes +autopilot to fail to discover new server versions and so not trigger an upgrade. [[GH-27277](https://github.com/hashicorp/vault/pull/27277)] +* ui: Do not show resultant-ACL banner when ancestor namespace grants wildcard access. [[GH-27263](https://github.com/hashicorp/vault/pull/27263)] +* ui: Fix a bug where disabling TTL on the AWS credential form would still send TTL value [[GH-27366](https://github.com/hashicorp/vault/pull/27366)] +* ui: fix issue where a month with total clients but no new clients breaks the client count dashboard [[GH-5962](https://github.com/hashicorp/vault/pull/5962)] + +## 1.16.3 +### May 30, 2024 + +**Enterprise LTS:** Vault Enterprise 1.16 is a [Long-Term Support (LTS)](https://developer.hashicorp.com/vault/docs/enterprise/lts) release. + +SECURITY: + +* auth/jwt: Update plugin to v0.20.3 that resolves a security issue with validing JWTs [[GH-26890](https://github.com/hashicorp/vault/pull/26890), [HCSEC-2024-11](https://discuss.hashicorp.com/t/hcsec-2024-11-vault-incorrectly-validated-json-web-tokens-jwt-audience-claims/67770)] CHANGES: -* core: Bump Go version to 1.19.3. -* plugins: Mounts can no longer be pinned to a specific _builtin_ version. Mounts previously pinned to a specific builtin version will now automatically upgrade to the latest builtin version, and may now be overridden if an unversioned plugin of the same name and type is registered. Mounts using plugin versions without `builtin` in their metadata remain unaffected. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] +* core/identity: improve performance for secondary nodes receiving identity related updates through replication [[GH-27184](https://github.com/hashicorp/vault/pull/27184)] +* core: Bump Go version to 1.22.2. IMPROVEMENTS: -* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] -* storage/raft: Add `retry_join_as_non_voter` config option. [[GH-18030](https://github.com/hashicorp/vault/pull/18030)] - -BUG FIXES: +* secrets/pki (enterprise): Disable warnings about unknown parameters to the various CIEPS endpoints +* ui: Update PGP display and show error for Generate Operation Token flow with PGP [[GH-26993](https://github.com/hashicorp/vault/pull/26993)] + +BUG FIXES: + +* activity (enterprise): fix read-only storage error on upgrades +* auto-auth: Addressed issue where having no permissions to renew a renewable token caused auto-auth to attempt to renew constantly with no backoff [[GH-26844](https://github.com/hashicorp/vault/pull/26844)] +* core (enterprise): Fix an issue that prevented the seal re-wrap status from reporting that a re-wrap is in progress for up to a second. +* core/audit: Audit logging a Vault request/response will now use a minimum 5 second context timeout. +If the existing context deadline occurs later than 5s in the future, it will be used, otherwise a new context, separate from the original will be used. [[GH-26616](https://github.com/hashicorp/vault/pull/26616)] +* core: Add missing field delegated_auth_accessors to GET /sys/mounts/:path API response [[GH-26876](https://github.com/hashicorp/vault/pull/26876)] +* core: Address a data race updating a seal's last seen healthy time attribute [[GH-27014](https://github.com/hashicorp/vault/pull/27014)] +* core: Fix `redact_version` listener parameter being ignored for some OpenAPI related endpoints. [[GH-26607](https://github.com/hashicorp/vault/pull/26607)] +* events (enterprise): Fix bug preventing subscribing and receiving events within a namepace. +* pki: Fix error in cross-signing using ed25519 keys [[GH-27093](https://github.com/hashicorp/vault/pull/27093)] +* replication (enterprise): fix "given mount path is not in the same namespace as the request" error that can occur when enabling replication for the first time on a secondary cluster +* secrets-sync (enterprise): Secondary nodes in a cluster now properly check activation-flags values. +* secrets/azure: Update vault-plugin-secrets-azure to 0.17.2 to include a bug fix for azure role creation [[GH-26896](https://github.com/hashicorp/vault/pull/26896)] +* secrets/pki (enterprise): cert_role parameter within authenticators.cert EST configuration handler could not be set +* secrets/transit: Use 'hash_algorithm' parameter if present in HMAC verify requests. Otherwise fall back to deprecated 'algorithm' parameter. [[GH-27211](https://github.com/hashicorp/vault/pull/27211)] +* ui: Fix KVv2 cursor jumping inside json editor after initial input. [[GH-27120](https://github.com/hashicorp/vault/pull/27120)] +* ui: Fix KVv2 json editor to allow null values. [[GH-27094](https://github.com/hashicorp/vault/pull/27094)] +* ui: Fix broken help link in console for the web command. [[GH-26858](https://github.com/hashicorp/vault/pull/26858)] +* ui: Fix link to v2 generic secrets engine from secrets list page. [[GH-27019](https://github.com/hashicorp/vault/pull/27019)] +* ui: Prevent perpetual loading screen when Vault needs initialization [[GH-26985](https://github.com/hashicorp/vault/pull/26985)] +* ui: Refresh model within a namespace on the Secrets Sync overview page. [[GH-26790](https://github.com/hashicorp/vault/pull/26790)] + +## 1.16.2 +### April 24, 2024 + +**Enterprise LTS:** Vault Enterprise 1.16 is a [Long-Term Support (LTS)](https://developer.hashicorp.com/vault/docs/enterprise/lts) release. -* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] -* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] -* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] -* core (enterprise): Supported storage check in `vault server` command will no longer prevent startup. Instead, a warning will be logged if configured to use storage backend other than `raft` or `consul`. -* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. -* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] -* core: fix a start up race condition where performance standbys could go into a - mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] -* plugins: Only report deprecation status for builtin plugins. [[GH-17816](https://github.com/hashicorp/vault/pull/17816)] -* plugins: Vault upgrades will no longer fail if a mount has been created using an explicit builtin plugin version. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] -* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] -* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18086](https://github.com/hashicorp/vault/pull/18086)] -* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18111](https://github.com/hashicorp/vault/pull/18111)] -* secrets/pki: Fix upgrade of missing expiry, delta_rebuild_interval by setting them to the default. [[GH-17693](https://github.com/hashicorp/vault/pull/17693)] -* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] -* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] +CHANGES: -## 1.12.1 -### November 2, 2022 +* auth/jwt: Update plugin to v0.20.2 [[GH-26291](https://github.com/hashicorp/vault/pull/26291)] +* core: Bump Go version to 1.21.9. +* secrets/azure: Update plugin to v0.17.1 [[GH-26528](https://github.com/hashicorp/vault/pull/26528)] +* ui: Update dependencies including D3 libraries [[GH-26346](https://github.com/hashicorp/vault/pull/26346)] IMPROVEMENTS: -* api: Support VAULT_DISABLE_REDIRECTS environment variable (and --disable-redirects flag) to disable default client behavior and prevent the client following any redirection responses. [[GH-17352](https://github.com/hashicorp/vault/pull/17352)] -* database/snowflake: Allow parallel requests to Snowflake [[GH-17593](https://github.com/hashicorp/vault/pull/17593)] -* plugins: Add plugin version information to key plugin lifecycle log lines. [[GH-17430](https://github.com/hashicorp/vault/pull/17430)] -* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] +* activity (enterprise): Change minimum retention window in activity log to 48 months +* audit: timestamps across multiple audit devices for an audit entry will now match. [[GH-26088](https://github.com/hashicorp/vault/pull/26088)] +* license utilization reporting (enterprise): Add retention months to license utilization reports. +* sdk/decompression: DecompressWithCanary will now chunk the decompression in memory to prevent loading it all at once. [[GH-26464](https://github.com/hashicorp/vault/pull/26464)] +* ui: fixes cases where inputs did not have associated labels [[GH-26263](https://github.com/hashicorp/vault/pull/26263)] +* ui: show banner instead of permission denied error when batch token is expired [[GH-26396](https://github.com/hashicorp/vault/pull/26396)] BUG FIXES: -* cli: Remove empty table heading for `vault secrets list -detailed` output. [[GH-17577](https://github.com/hashicorp/vault/pull/17577)] -* core/managed-keys (enterprise): Return better error messages when encountering key creation failures -* core/managed-keys (enterprise): Switch to using hash length as PSS Salt length within the test/sign api for better PKCS#11 compatibility -* core: Fix panic caused in Vault Agent when rendering certificate templates [[GH-17419](https://github.com/hashicorp/vault/pull/17419)] -* core: Fixes spurious warnings being emitted relating to "unknown or unsupported fields" for JSON config [[GH-17660](https://github.com/hashicorp/vault/pull/17660)] -* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] -* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] -* kmip (enterprise): Fix a problem in the handling of attributes that caused Import operations to fail. -* kmip (enterprise): Fix selection of Cryptographic Parameters for Encrypt/Decrypt operations. -* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] -* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] -* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] +* agent: `vault.namespace` no longer gets incorrectly overridden by `auto_auth.namespace`, if set [[GH-26427](https://github.com/hashicorp/vault/pull/26427)] +* api: fixed a bug where LifetimeWatcher routines weren't respecting exponential backoff in the presence of unexpected errors [[GH-26383](https://github.com/hashicorp/vault/pull/26383)] +* core (enterprise): fix bug where raft followers disagree with the seal type after returning to one seal from two. [[GH-26523](https://github.com/hashicorp/vault/pull/26523)] +* core/seal: During a seal reload through SIGHUP, only write updated seal barrier on an active node [[GH-26381](https://github.com/hashicorp/vault/pull/26381)] +* core/seal: allow overriding of VAULT_GCPCKMS_SEAL_KEY_RING and VAULT_GCPCKMS_SEAL_CRYPTO_KEY environment keys in seal-ha +* events (enterprise): Terminate WebSocket connection when token is revoked. +* secrets/pki: fixed validation bug which rejected ldap schemed URLs in crl_distribution_points. [[GH-26477](https://github.com/hashicorp/vault/pull/26477)] +* storage/raft (enterprise): Fix a bug where autopilot automated upgrades could fail due to using the wrong upgrade version +* ui: fixed a bug where the replication pages did not update display when navigating between DR and performance [[GH-26325](https://github.com/hashicorp/vault/pull/26325)] +* ui: fixes undefined start time in filename for downloaded client count attribution csv [[GH-26485](https://github.com/hashicorp/vault/pull/26485)] -## 1.12.0 -### October 13, 2022 +## 1.16.1 +### April 04, 2024 + +**Please note that Vault 1.16.1 is the first Enterprise release of the Vault Enterprise 1.16 series.** + +BUG FIXES: + +* auth/ldap: Fix login error for group search anonymous bind. [[GH-26200](https://github.com/hashicorp/vault/pull/26200)] +* auth/ldap: Fix login error missing entity alias attribute value. [[GH-26200](https://github.com/hashicorp/vault/pull/26200)] +* cli: fixed a bug where the Vault CLI would error out if HOME was not set. [[GH-26243](https://github.com/hashicorp/vault/pull/26243)] +* core: Only reload seal configuration when enable_multiseal is set to true. [[GH-26166](https://github.com/hashicorp/vault/pull/26166)] +* secret/database: Fixed race condition where database mounts may leak connections [[GH-26147](https://github.com/hashicorp/vault/pull/26147)] + +## 1.16.0 +### March 26, 2024 + +SECURITY: + +* auth/cert: compare public keys of trusted non-CA certificates with incoming +client certificates to prevent trusting certs with the same serial number +but not the same public/private key (CVE-2024-2048). [[GH-25649](https://github.com/hashicorp/vault/pull/25649), [HSEC-2024-05](https://discuss.hashicorp.com/t/hcsec-2024-05-vault-cert-auth-method-did-not-correctly-validate-non-ca-certificates/63382)] +* auth/cert: validate OCSP response was signed by the expected issuer and serial number matched request (CVE-2024-2660) [[GH-26091](https://github.com/hashicorp/vault/pull/26091), [HSEC-2024-07](https://discuss.hashicorp.com/t/hcsec-2024-07-vault-tls-cert-auth-method-did-not-correctly-validate-ocsp-responses/64573)] +* secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption (CVE-2023-4680) [[GH-22852](https://github.com/hashicorp/vault/pull/22852), [HSEC-2023-28](https://discuss.hashicorp.com/t/hcsec-2023-28-vault-s-transit-secrets-engine-allowed-nonce-specified-without-convergent-encryption/58249)] CHANGES: -* api: Exclusively use `GET /sys/plugins/catalog` endpoint for listing plugins, and add `details` field to list responses. [[GH-17347](https://github.com/hashicorp/vault/pull/17347)] -* auth: `GET /sys/auth/:name` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] -* auth: `GET /sys/auth` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] -* auth: `POST /sys/auth/:type` endpoint response contains a warning for `Deprecated` auth methods. [[GH-17058](https://github.com/hashicorp/vault/pull/17058)] -* auth: `auth enable` returns an error and `POST /sys/auth/:type` endpoint reports an error for `Pending Removal` auth methods. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] -* core/entities: Fixed stranding of aliases upon entity merge, and require explicit selection of which aliases should be kept when some must be deleted [[GH-16539](https://github.com/hashicorp/vault/pull/16539)] -* core: Bump Go version to 1.19.2. -* core: Validate input parameters for vault operator init command. Vault 1.12 CLI version is needed to run operator init now. [[GH-16379](https://github.com/hashicorp/vault/pull/16379)] -* identity: a request to `/identity/group` that includes `member_group_ids` that contains a cycle will now be responded to with a 400 rather than 500 [[GH-15912](https://github.com/hashicorp/vault/pull/15912)] -* licensing (enterprise): Terminated licenses will no longer result in shutdown. Instead, upgrades will not be allowed if the license expiration time is before the build date of the binary. -* plugins: Add plugin version to auth register, list, and mount table [[GH-16856](https://github.com/hashicorp/vault/pull/16856)] -* plugins: `GET /sys/plugins/catalog/:type/:name` endpoint contains deprecation status for builtin plugins. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] -* plugins: `GET /sys/plugins/catalog/:type/:name` endpoint now returns an additional `version` field in the response data. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] -* plugins: `GET /sys/plugins/catalog/` endpoint contains deprecation status in `detailed` list. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] -* plugins: `GET /sys/plugins/catalog` endpoint now returns an additional `detailed` field in the response data with a list of additional plugin metadata. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] -* plugins: `plugin info` displays deprecation status for builtin plugins. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] -* plugins: `plugin list` now accepts a `-detailed` flag, which display deprecation status and version info. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] -* secrets/azure: Removed deprecated AAD graph API support from the secrets engine. [[GH-17180](https://github.com/hashicorp/vault/pull/17180)] -* secrets: All database-specific (standalone DB) secrets engines are now marked `Pending Removal`. [[GH-17038](https://github.com/hashicorp/vault/pull/17038)] -* secrets: `GET /sys/mounts/:name` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] -* secrets: `GET /sys/mounts` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] -* secrets: `POST /sys/mounts/:type` endpoint response contains a warning for `Deprecated` secrets engines. [[GH-17058](https://github.com/hashicorp/vault/pull/17058)] -* secrets: `secrets enable` returns an error and `POST /sys/mount/:type` endpoint reports an error for `Pending Removal` secrets engines. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] +* Upgrade grpc to v1.58.3 [[GH-23703](https://github.com/hashicorp/vault/pull/23703)] +* Upgrade x/net to v0.17.0 [[GH-23703](https://github.com/hashicorp/vault/pull/23703)] +* api: add the `enterprise` parameter to the `/sys/health` endpoint [[GH-24270](https://github.com/hashicorp/vault/pull/24270)] +* auth/alicloud: Update plugin to v0.16.1 [[GH-25014](https://github.com/hashicorp/vault/pull/25014)] +* auth/alicloud: Update plugin to v0.17.0 [[GH-25217](https://github.com/hashicorp/vault/pull/25217)] +* auth/approle: Normalized error response messages when invalid credentials are provided [[GH-23786](https://github.com/hashicorp/vault/pull/23786)] +* auth/azure: Update plugin to v0.16.1 [[GH-22795](https://github.com/hashicorp/vault/pull/22795)] +* auth/azure: Update plugin to v0.17.0 [[GH-25258](https://github.com/hashicorp/vault/pull/25258)] +* auth/cf: Update plugin to v0.16.0 [[GH-25196](https://github.com/hashicorp/vault/pull/25196)] +* auth/gcp: Update plugin to v0.16.2 [[GH-25233](https://github.com/hashicorp/vault/pull/25233)] +* auth/jwt: Update plugin to v0.19.0 [[GH-24972](https://github.com/hashicorp/vault/pull/24972)] +* auth/jwt: Update plugin to v0.20.0 [[GH-25326](https://github.com/hashicorp/vault/pull/25326)] +* auth/jwt: Update plugin to v0.20.1 [[GH-25937](https://github.com/hashicorp/vault/pull/25937)] +* auth/kerberos: Update plugin to v0.10.1 [[GH-22797](https://github.com/hashicorp/vault/pull/22797)] +* auth/kerberos: Update plugin to v0.11.0 [[GH-25232](https://github.com/hashicorp/vault/pull/25232)] +* auth/kubernetes: Update plugin to v0.18.0 [[GH-25207](https://github.com/hashicorp/vault/pull/25207)] +* auth/oci: Update plugin to v0.14.1 [[GH-22774](https://github.com/hashicorp/vault/pull/22774)] +* auth/oci: Update plugin to v0.15.1 [[GH-25245](https://github.com/hashicorp/vault/pull/25245)] +* cli: Using `vault plugin reload` with `-plugin` in the root namespace will now reload the plugin across all namespaces instead of just the root namespace. [[GH-24878](https://github.com/hashicorp/vault/pull/24878)] +* cli: `vault plugin info` and `vault plugin deregister` now require 2 positional arguments instead of accepting either 1 or 2. [[GH-24250](https://github.com/hashicorp/vault/pull/24250)] +* core (enterprise): Seal High Availability (HA) must be enabled by `enable_multiseal` in configuration. +* core: Bump Go version to 1.21.8. +* database/couchbase: Update plugin to v0.10.1 [[GH-25275](https://github.com/hashicorp/vault/pull/25275)] +* database/elasticsearch: Update plugin to v0.14.0 [[GH-25263](https://github.com/hashicorp/vault/pull/25263)] +* database/mongodbatlas: Update plugin to v0.11.0 [[GH-25264](https://github.com/hashicorp/vault/pull/25264)] +* database/redis-elasticache: Update plugin to v0.3.0 [[GH-25296](https://github.com/hashicorp/vault/pull/25296)] +* database/redis: Update plugin to v0.2.3 [[GH-25289](https://github.com/hashicorp/vault/pull/25289)] +* database/snowflake: Update plugin to v0.10.0 [[GH-25143](https://github.com/hashicorp/vault/pull/25143)] +* database/snowflake: Update plugin to v0.9.1 [[GH-25020](https://github.com/hashicorp/vault/pull/25020)] +* events: Remove event noficiations websocket endpoint in non-Enterprise [[GH-25640](https://github.com/hashicorp/vault/pull/25640)] +* events: Source URL is now `vault://{vault node}` [[GH-24201](https://github.com/hashicorp/vault/pull/24201)] +* identity (enterprise): POST requests to the `/identity/entity/merge` endpoint +are now always forwarded from standbys to the active node. [[GH-24325](https://github.com/hashicorp/vault/pull/24325)] +* plugins/database: Reading connection config at `database/config/:name` will now return a computed `running_plugin_version` field if a non-builtin version is running. [[GH-25105](https://github.com/hashicorp/vault/pull/25105)] +* plugins: Add a warning to the response from sys/plugins/reload/backend if no plugins were reloaded. [[GH-24512](https://github.com/hashicorp/vault/pull/24512)] +* plugins: By default, environment variables provided during plugin registration will now take precedence over system environment variables. +Use the environment variable `VAULT_PLUGIN_USE_LEGACY_ENV_LAYERING=true` to opt out and keep higher preference for system environment +variables. When this flag is set, Vault will check during unseal for conflicts and print warnings for any plugins with environment +variables that conflict with system environment variables. [[GH-25128](https://github.com/hashicorp/vault/pull/25128)] +* plugins: `/sys/plugins/runtimes/catalog` response will always include a list of "runtimes" in the response, even if empty. [[GH-24864](https://github.com/hashicorp/vault/pull/24864)] +* sdk: Upgrade dependent packages by sdk. +This includes github.com/docker/docker to v24.0.7+incompatible, +google.golang.org/grpc to v1.57.2 and golang.org/x/net to v0.17.0. [[GH-23913](https://github.com/hashicorp/vault/pull/23913)] +* secrets/ad: Update plugin to v0.16.2 [[GH-25058](https://github.com/hashicorp/vault/pull/25058)] +* secrets/ad: Update plugin to v0.17.0 [[GH-25187](https://github.com/hashicorp/vault/pull/25187)] +* secrets/alicloud: Update plugin to v0.16.0 [[GH-25257](https://github.com/hashicorp/vault/pull/25257)] +* secrets/azure: Update plugin to v0.17.0 [[GH-25189](https://github.com/hashicorp/vault/pull/25189)] +* secrets/gcp: Update plugin to v0.18.0 [[GH-25173](https://github.com/hashicorp/vault/pull/25173)] +* secrets/gcpkms: Update plugin to v0.16.0 [[GH-25231](https://github.com/hashicorp/vault/pull/25231)] +* secrets/keymgmt: Update plugin to v0.10.0 +* secrets/kubernetes: Update plugin to v0.7.0 [[GH-25204](https://github.com/hashicorp/vault/pull/25204)] +* secrets/kv: Update plugin to v0.16.2 [[GH-22790](https://github.com/hashicorp/vault/pull/22790)] +* secrets/kv: Update plugin to v0.17.0 [[GH-25277](https://github.com/hashicorp/vault/pull/25277)] +* secrets/mongodbatlas: Update plugin to v0.10.2 [[GH-23849](https://github.com/hashicorp/vault/pull/23849)] +* secrets/mongodbatlas: Update plugin to v0.11.0 [[GH-25253](https://github.com/hashicorp/vault/pull/25253)] +* secrets/openldap: Update plugin to v0.11.3 [[GH-25040](https://github.com/hashicorp/vault/pull/25040)] +* secrets/openldap: Update plugin to v0.12.0 [[GH-25251](https://github.com/hashicorp/vault/pull/25251)] +* secrets/openldap: Update plugin to v0.12.1 [[GH-25524](https://github.com/hashicorp/vault/pull/25524)] +* secrets/terraform: Update plugin to v0.7.5 [[GH-25288](https://github.com/hashicorp/vault/pull/25288)] +* telemetry: Seal wrap encrypt/decrypt metrics now differentiate between seals using a metrics label of seal name rather than separate metric names. [[GH-23837](https://github.com/hashicorp/vault/pull/23837)] +* ui: Update icons to use Flight icons where available. [[GH-24823](https://github.com/hashicorp/vault/pull/24823)] +* ui: add subnav for replication items [[GH-24283](https://github.com/hashicorp/vault/pull/24283)] FEATURES: -* **GCP Cloud KMS support for managed keys**: Managed keys now support using GCP Cloud KMS keys -* **LDAP Secrets Engine**: Adds the `ldap` secrets engine with service account check-out functionality for all supported schemas. [[GH-17152](https://github.com/hashicorp/vault/pull/17152)] -* **OCSP Responder**: PKI mounts now have an OCSP responder that implements a subset of RFC6960, answering single serial number OCSP requests for a specific cluster's revoked certificates in a mount. [[GH-16723](https://github.com/hashicorp/vault/pull/16723)] -* **Redis DB Engine**: Adding the new Redis database engine that supports the generation of static and dynamic user roles and root credential rotation on a stand alone Redis server. [[GH-17070](https://github.com/hashicorp/vault/pull/17070)] -* **Redis ElastiCache DB Plugin**: Added Redis ElastiCache as a built-in plugin. [[GH-17075](https://github.com/hashicorp/vault/pull/17075)] -* **Secrets/auth plugin multiplexing**: manage multiple plugin configurations with a single plugin process [[GH-14946](https://github.com/hashicorp/vault/pull/14946)] -* **Transform Key Import (BYOK)**: The transform secrets engine now supports importing keys for tokenization and FPE transformations -* HCP (enterprise): Adding foundational support for self-managed vault nodes to securely communicate with [HashiCorp Cloud Platform](https://cloud.hashicorp.com) as an opt-in feature -* ui: UI support for Okta Number Challenge. [[GH-15998](https://github.com/hashicorp/vault/pull/15998)] -* **Plugin Versioning**: Vault supports registering, managing, and running plugins with semantic versions specified. +* **Add Snapshot Inspector Tool**: Add CLI tool to inspect Vault snapshots [[GH-23457](https://github.com/hashicorp/vault/pull/23457)] +* **Audit Filtering**: Audit devices support expression-based filter rules (powered by go-bexpr) to determine which entries are written to the audit log. [[GH-24558](https://github.com/hashicorp/vault/pull/24558)] +* **Controlled Access to Unauthenticated Endpoints (enterprise)**: Gives admins more control over how unauthenticated endpoints in Vault can be accessed and in some cases what information they return. [[GH-23547](https://github.com/hashicorp/vault/pull/23547)] [[GH-23534](https://github.com/hashicorp/vault/pull/23534)] [[GH-23740](https://github.com/hashicorp/vault/pull/23740)] +* **Custom messages (enterprise)**: Introduces custom messages settings, allowing users to view, and operators to configure system-wide messages. +* **Database Event Notifications**: The database plugin now emits event notifications. [[GH-24718](https://github.com/hashicorp/vault/pull/24718)] +* **Default Lease Count Quota (enterprise)**: Apply a new global default lease count quota of 300k leases for all +new installs of Vault. [[GH-24382](https://github.com/hashicorp/vault/pull/24382)] +* **Experimental Raft-WAL Option**: Reduces risk of infinite snapshot loops for follower nodes in large-scale Integrated Storage deployments. [[GH-21460](https://github.com/hashicorp/vault/pull/21460)] +* **Manual License Utilization Reporting**: Added manual license +utilization reporting, which allows users to create manual exports of product-license [metering +data] to report to Hashicorp. +* **Plugin Identity Tokens**: Adds secret-less configuration of AWS secret engine using web identity federation. [[GH-24987](https://github.com/hashicorp/vault/pull/24987)] +* **Plugin Workload Identity** (enterprise): Vault can generate identity tokens for plugins to use in workload identity federation auth flows. +* **Quotas in Privileged Namespaces**: Enable creation/update/deletion of quotas from the privileged namespace +* **Reload seal configuration on SIGHUP**: Seal configuration is reloaded on SIGHUP so that seal configuration can +be changed without shutting down vault [[GH-23571](https://github.com/hashicorp/vault/pull/23571)] +* **Request Limiter (enterprise)**: Add adaptive concurrency limits to +write-based HTTP methods and special-case `pki/issue` requests to prevent +overloading the Vault server. [[GH-25093](https://github.com/hashicorp/vault/pull/25093)] +* **Rotate Root for LDAP auth**: Rotate root operations are now supported for the LDAP auth engine. [[GH-24099](https://github.com/hashicorp/vault/pull/24099)] +* **Seal High Availability (enterprise)**: Operators can configure more than one automatic seal for resilience against seal provider outages. +* **Secrets Sync UI (enterprise)**: Adds secret syncing for KV v2 secrets to external destinations using the UI. [[GH-23667](https://github.com/hashicorp/vault/pull/23667)] +* **Vault PKI EST Server (Enterprise/Beta)**: Beta support for the PKI Enrollment over Secure Transport (EST) certificate management protocol has been added to the Vault PKI Plugin. This allows standard EST clients to request certificates from a Vault server with no knowledge of Vault APIs. +* **Vault Proxy Static Secret Caching (enterprise)**: Adds support for static secret (KVv1 and KVv2) caching to Vault Proxy. [[GH-23621](https://github.com/hashicorp/vault/pull/23621)] +* **secrets-import (enterprise)**: Support importing secrets from external sources into KVv2 +* **secrets/aws**: Support issuing an STS Session Token directly from the root credential. [[GH-23690](https://github.com/hashicorp/vault/pull/23690)] IMPROVEMENTS: -* core/managed-keys (enterprise): Allow operators to specify PSS signatures and/or hash algorithm for the test/sign api -* activity (enterprise): Added new clients unit tests to test accuracy of estimates -* agent/auto-auth: Add `exit_on_err` which when set to true, will cause Agent to exit if any errors are encountered during authentication. [[GH-17091](https://github.com/hashicorp/vault/pull/17091)] -* agent: Added `disable_idle_connections` configuration to disable leaving idle connections open in auto-auth, caching and templating. [[GH-15986](https://github.com/hashicorp/vault/pull/15986)] -* agent: Added `disable_keep_alives` configuration to disable keep alives in auto-auth, caching and templating. [[GH-16479](https://github.com/hashicorp/vault/pull/16479)] -* agent: JWT auto auth now supports a `remove_jwt_after_reading` config option which defaults to true. [[GH-11969](https://github.com/hashicorp/vault/pull/11969)] -* agent: Send notifications to systemd on start and stop. [[GH-9802](https://github.com/hashicorp/vault/pull/9802)] -* api/mfa: Add namespace path to the MFA read/list endpoint [[GH-16911](https://github.com/hashicorp/vault/pull/16911)] -* api: Add a sentinel error for missing KV secrets [[GH-16699](https://github.com/hashicorp/vault/pull/16699)] -* auth/alicloud: Enables AliCloud roles to be compatible with Vault's role based quotas. [[GH-17251](https://github.com/hashicorp/vault/pull/17251)] -* auth/approle: SecretIDs can now be generated with an per-request specified TTL and num_uses. -When either the ttl and num_uses fields are not specified, the role's configuration is used. [[GH-14474](https://github.com/hashicorp/vault/pull/14474)] -* auth/aws: PKCS7 signatures will now use SHA256 by default in prep for Go 1.18 [[GH-16455](https://github.com/hashicorp/vault/pull/16455)] -* auth/azure: Enables Azure roles to be compatible with Vault's role based quotas. [[GH-17194](https://github.com/hashicorp/vault/pull/17194)] -* auth/cert: Add metadata to identity-alias [[GH-14751](https://github.com/hashicorp/vault/pull/14751)] -* auth/cert: Operators can now specify a CRL distribution point URL, in which case the cert auth engine will fetch and use the CRL from that location rather than needing to push CRLs directly to auth/cert. [[GH-17136](https://github.com/hashicorp/vault/pull/17136)] -* auth/cf: Enables CF roles to be compatible with Vault's role based quotas. [[GH-17196](https://github.com/hashicorp/vault/pull/17196)] -* auth/gcp: Add support for GCE regional instance groups [[GH-16435](https://github.com/hashicorp/vault/pull/16435)] -* auth/gcp: Updates dependencies: `google.golang.org/api@v0.83.0`, `github.com/hashicorp/go-gcp-common@v0.8.0`. [[GH-17160](https://github.com/hashicorp/vault/pull/17160)] -* auth/jwt: Adds support for Microsoft US Gov L4 to the Azure provider for groups fetching. [[GH-16525](https://github.com/hashicorp/vault/pull/16525)] -* auth/jwt: Improves detection of Windows Subsystem for Linux (WSL) for CLI-based logins. [[GH-16525](https://github.com/hashicorp/vault/pull/16525)] -* auth/kerberos: add `add_group_aliases` config to include LDAP groups in Vault group aliases [[GH-16890](https://github.com/hashicorp/vault/pull/16890)] -* auth/kerberos: add `remove_instance_name` parameter to the login CLI and the Kerberos config in Vault. This removes any instance names found in the keytab service principal name. [[GH-16594](https://github.com/hashicorp/vault/pull/16594)] -* auth/kubernetes: Role resolution for K8S Auth [[GH-156](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/156)] [[GH-17161](https://github.com/hashicorp/vault/pull/17161)] -* auth/oci: Add support for role resolution. [[GH-17212](https://github.com/hashicorp/vault/pull/17212)] -* auth/oidc: Adds support for group membership parsing when using SecureAuth as an OIDC provider. [[GH-16274](https://github.com/hashicorp/vault/pull/16274)] -* cli: CLI commands will print a warning if flags will be ignored because they are passed after positional arguments. [[GH-16441](https://github.com/hashicorp/vault/pull/16441)] -* cli: `auth` and `secrets` list `-detailed` commands now show Deprecation Status for builtin plugins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] -* cli: `vault plugin list` now has a `details` field in JSON format, and version and type information in table format. [[GH-17347](https://github.com/hashicorp/vault/pull/17347)] -* command/audit: Improve missing type error message [[GH-16409](https://github.com/hashicorp/vault/pull/16409)] -* command/server: add `-dev-tls` and `-dev-tls-cert-dir` subcommands to create a Vault dev server with generated certificates and private key. [[GH-16421](https://github.com/hashicorp/vault/pull/16421)] -* command: Fix shell completion for KV v2 mounts [[GH-16553](https://github.com/hashicorp/vault/pull/16553)] -* core (enterprise): Add HTTP PATCH support for namespaces with an associated `namespace patch` CLI command -* core (enterprise): Add check to `vault server` command to ensure configured storage backend is supported. -* core (enterprise): Add custom metadata support for namespaces -* core/activity: generate hyperloglogs containing clientIds for each month during precomputation [[GH-16146](https://github.com/hashicorp/vault/pull/16146)] -* core/activity: refactor activity log api to reuse partial api functions in activity endpoint when current month is specified [[GH-16162](https://github.com/hashicorp/vault/pull/16162)] -* core/activity: use monthly hyperloglogs to calculate new clients approximation for current month [[GH-16184](https://github.com/hashicorp/vault/pull/16184)] -* core/quotas (enterprise): Added ability to add path suffixes for lease-count resource quotas -* core/quotas (enterprise): Added ability to add role information for lease-count resource quotas, to limit login requests on auth mounts made using that role -* core/quotas: Added ability to add path suffixes for rate-limit resource quotas [[GH-15989](https://github.com/hashicorp/vault/pull/15989)] -* core/quotas: Added ability to add role information for rate-limit resource quotas, to limit login requests on auth mounts made using that role [[GH-16115](https://github.com/hashicorp/vault/pull/16115)] -* core: Activity log goroutine management improvements to allow tests to be more deterministic. [[GH-17028](https://github.com/hashicorp/vault/pull/17028)] -* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] -* core: Handle and log deprecated builtin mounts. Introduces `VAULT_ALLOW_PENDING_REMOVAL_MOUNTS` to override shutdown and error when attempting to mount `Pending Removal` builtin plugins. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] -* core: Limit activity log client count usage by namespaces [[GH-16000](https://github.com/hashicorp/vault/pull/16000)] -* core: Upgrade github.com/hashicorp/raft [[GH-16609](https://github.com/hashicorp/vault/pull/16609)] -* core: remove gox [[GH-16353](https://github.com/hashicorp/vault/pull/16353)] -* docs: Clarify the behaviour of local mounts in the context of DR replication [[GH-16218](https://github.com/hashicorp/vault/pull/16218)] -* identity/oidc: Adds support for detailed listing of clients and providers. [[GH-16567](https://github.com/hashicorp/vault/pull/16567)] -* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] -* identity/oidc: allows filtering the list providers response by an allowed_client_id [[GH-16181](https://github.com/hashicorp/vault/pull/16181)] -* identity: Prevent possibility of data races on entity creation. [[GH-16487](https://github.com/hashicorp/vault/pull/16487)] -* physical/postgresql: pass context to queries to propagate timeouts and cancellations on requests. [[GH-15866](https://github.com/hashicorp/vault/pull/15866)] -* plugins/multiplexing: Added multiplexing support to database plugins if run as external plugins [[GH-16995](https://github.com/hashicorp/vault/pull/16995)] -* plugins: Add Deprecation Status method to builtinregistry. [[GH-16846](https://github.com/hashicorp/vault/pull/16846)] -* plugins: Added environment variable flag to opt-out specific plugins from multiplexing [[GH-16972](https://github.com/hashicorp/vault/pull/16972)] -* plugins: Adding version to plugin GRPC interface [[GH-17088](https://github.com/hashicorp/vault/pull/17088)] -* plugins: Plugin catalog supports registering and managing plugins with semantic version information. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] -* replication (enterprise): Fix race in merkle sync that can prevent streaming by returning key value matching provided hash if found in log shipper buffer. -* secret/nomad: allow reading CA and client auth certificate from /nomad/config/access [[GH-15809](https://github.com/hashicorp/vault/pull/15809)] -* secret/pki: Add RSA PSS signature support for issuing certificates, signing CRLs [[GH-16519](https://github.com/hashicorp/vault/pull/16519)] -* secret/pki: Add signature_bits to sign-intermediate, sign-verbatim endpoints [[GH-16124](https://github.com/hashicorp/vault/pull/16124)] -* secret/pki: Allow issuing certificates with non-domain, non-email Common Names from roles, sign-verbatim, and as issuers (`cn_validations`). [[GH-15996](https://github.com/hashicorp/vault/pull/15996)] -* secret/pki: Allow specifying SKID for cross-signed issuance from older Vault versions. [[GH-16494](https://github.com/hashicorp/vault/pull/16494)] -* secret/transit: Allow importing Ed25519 keys from PKCS#8 with inner RFC 5915 ECPrivateKey blobs (NSS-wrapped keys). [[GH-15742](https://github.com/hashicorp/vault/pull/15742)] -* secrets/ad: set config default length only if password_policy is missing [[GH-16140](https://github.com/hashicorp/vault/pull/16140)] -* secrets/azure: Adds option to permanently delete AzureAD objects created by Vault. [[GH-17045](https://github.com/hashicorp/vault/pull/17045)] -* secrets/database/hana: Add ability to customize dynamic usernames [[GH-16631](https://github.com/hashicorp/vault/pull/16631)] -* secrets/database/snowflake: Add multiplexing support [[GH-17159](https://github.com/hashicorp/vault/pull/17159)] -* secrets/gcp: Updates dependencies: `google.golang.org/api@v0.83.0`, `github.com/hashicorp/go-gcp-common@v0.8.0`. [[GH-17174](https://github.com/hashicorp/vault/pull/17174)] -* secrets/gcpkms: Update dependencies: google.golang.org/api@v0.83.0. [[GH-17199](https://github.com/hashicorp/vault/pull/17199)] -* secrets/kubernetes: upgrade to v0.2.0 [[GH-17164](https://github.com/hashicorp/vault/pull/17164)] -* secrets/pki/tidy: Add another pair of metrics counting certificates not deleted by the tidy operation. [[GH-16702](https://github.com/hashicorp/vault/pull/16702)] -* secrets/pki: Add a new flag to issue/sign APIs which can filter out root CAs from the returned ca_chain field [[GH-16935](https://github.com/hashicorp/vault/pull/16935)] -* secrets/pki: Add a warning to any successful response when the requested TTL is overwritten by MaxTTL [[GH-17073](https://github.com/hashicorp/vault/pull/17073)] -* secrets/pki: Add ability to cancel tidy operations, control tidy resource usage. [[GH-16958](https://github.com/hashicorp/vault/pull/16958)] -* secrets/pki: Add ability to periodically rebuild CRL before expiry [[GH-16762](https://github.com/hashicorp/vault/pull/16762)] -* secrets/pki: Add ability to periodically run tidy operations to remove expired certificates. [[GH-16900](https://github.com/hashicorp/vault/pull/16900)] -* secrets/pki: Add support for per-issuer Authority Information Access (AIA) URLs [[GH-16563](https://github.com/hashicorp/vault/pull/16563)] -* secrets/pki: Add support to specify signature bits when generating CSRs through intermediate/generate apis [[GH-17388](https://github.com/hashicorp/vault/pull/17388)] -* secrets/pki: Added gauge metrics "secrets.pki.total_revoked_certificates_stored" and "secrets.pki.total_certificates_stored" to track the number of certificates in storage. [[GH-16676](https://github.com/hashicorp/vault/pull/16676)] -* secrets/pki: Allow revocation of certificates with explicitly provided certificate (bring your own certificate / BYOC). [[GH-16564](https://github.com/hashicorp/vault/pull/16564)] -* secrets/pki: Allow revocation via proving possession of certificate's private key [[GH-16566](https://github.com/hashicorp/vault/pull/16566)] -* secrets/pki: Allow tidy to associate revoked certs with their issuers for OCSP performance [[GH-16871](https://github.com/hashicorp/vault/pull/16871)] -* secrets/pki: Honor If-Modified-Since header on CA, CRL fetch; requires passthrough_request_headers modification on the mount point. [[GH-16249](https://github.com/hashicorp/vault/pull/16249)] -* secrets/pki: Improve stability of association of revoked cert with its parent issuer; when an issuer loses crl-signing usage, do not place certs on default issuer's CRL. [[GH-16874](https://github.com/hashicorp/vault/pull/16874)] -* secrets/pki: Support generating delta CRLs for up-to-date CRLs when auto-building is enabled. [[GH-16773](https://github.com/hashicorp/vault/pull/16773)] -* secrets/ssh: Add allowed_domains_template to allow templating of allowed_domains. [[GH-16056](https://github.com/hashicorp/vault/pull/16056)] -* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] -* secrets/ssh: Allow the use of Identity templates in the `default_user` field [[GH-16351](https://github.com/hashicorp/vault/pull/16351)] -* secrets/transit: Add a dedicated HMAC key type, which can be used with key import. [[GH-16668](https://github.com/hashicorp/vault/pull/16668)] -* secrets/transit: Added a parameter to encrypt/decrypt batch operations to allow the caller to override the HTTP response code in case of partial user-input failures. [[GH-17118](https://github.com/hashicorp/vault/pull/17118)] -* secrets/transit: Allow configuring the possible salt lengths for RSA PSS signatures. [[GH-16549](https://github.com/hashicorp/vault/pull/16549)] -* ssh: Addition of an endpoint `ssh/issue/:role` to allow the creation of signed key pairs [[GH-15561](https://github.com/hashicorp/vault/pull/15561)] -* storage/cassandra: tuning parameters for clustered environments `connection_timeout`, `initial_connection_timeout`, `simple_retry_policy_retries`. [[GH-10467](https://github.com/hashicorp/vault/pull/10467)] -* storage/gcs: Add documentation explaining how to configure the gcs backend using environment variables instead of options in the configuration stanza [[GH-14455](https://github.com/hashicorp/vault/pull/14455)] -* ui: Changed the tokenBoundCidrs tooltip content to clarify that comma separated values are not accepted in this field. [[GH-15852](https://github.com/hashicorp/vault/pull/15852)] -* ui: Prevents requests to /sys/internal/ui/resultant-acl endpoint when unauthenticated [[GH-17139](https://github.com/hashicorp/vault/pull/17139)] -* ui: Removed deprecated version of core-js 2.6.11 [[GH-15898](https://github.com/hashicorp/vault/pull/15898)] -* ui: Renamed labels under Tools for wrap, lookup, rewrap and unwrap with description. [[GH-16489](https://github.com/hashicorp/vault/pull/16489)] -* ui: Replaces non-inclusive terms [[GH-17116](https://github.com/hashicorp/vault/pull/17116)] -* ui: redirect_to param forwards from auth route when authenticated [[GH-16821](https://github.com/hashicorp/vault/pull/16821)] -* website/docs: API generate-recovery-token documentation. [[GH-16213](https://github.com/hashicorp/vault/pull/16213)] -* website/docs: Add documentation around the expensiveness of making lots of lease count quotas in a short period [[GH-16950](https://github.com/hashicorp/vault/pull/16950)] -* website/docs: Removes mentions of unauthenticated from internal ui resultant-acl doc [[GH-17139](https://github.com/hashicorp/vault/pull/17139)] -* website/docs: Update replication docs to mention Integrated Storage [[GH-16063](https://github.com/hashicorp/vault/pull/16063)] -* website/docs: changed to echo for all string examples instead of (<<<) here-string. [[GH-9081](https://github.com/hashicorp/vault/pull/9081)] +* .release/linux: add LimitCORE=0 to vault.service [[GH-23272](https://github.com/hashicorp/vault/pull/23272)] +* agent/template: Added max_connections_per_host to limit total number of connections per Vault host. [[GH-24548](https://github.com/hashicorp/vault/pull/24548)] +* agent: Added new namespace top level configuration parameter, which can be used to make requests made by Agent to go to that namespace. [[GH-24667](https://github.com/hashicorp/vault/pull/24667)] +* agent: allow users to specify files for child process stdout/stderr [[GH-22812](https://github.com/hashicorp/vault/pull/22812)] +* api (enterprise): Enable the sys/license/features from any namespace +* api/plugins: add `tls-server-name` arg for plugin registration [[GH-23549](https://github.com/hashicorp/vault/pull/23549)] +* api: Add wrapper functions for GET /sys/mounts/:path and GET /sys/auth/:path [[GH-25499](https://github.com/hashicorp/vault/pull/25499)] +* api: Do not require sudo for API wrapper functions GetAuth and GetAuthWithContext [[GH-25968](https://github.com/hashicorp/vault/pull/25968)] +* api: added new API field to Vault responses, `mount_type`, returning mount information (e.g. `kv` for KVV1/KVV2) for mount when appropriate. [[GH-23047](https://github.com/hashicorp/vault/pull/23047)] +* api: sys/health and sys/ha-status now expose information about how long +the last heartbeat took, and the estimated clock skew between standby and +active node based on that heartbeat duration. [[GH-24343](https://github.com/hashicorp/vault/pull/24343)] +* auth/cert: Allow validation with OCSP responses with no NextUpdate time [[GH-25912](https://github.com/hashicorp/vault/pull/25912)] +* auth/cert: Cache trusted certs to reduce memory usage and improve performance of logins. [[GH-25421](https://github.com/hashicorp/vault/pull/25421)] +* auth/ldap: introduce cap/ldap.Client for LDAP authentication +auth/ldap: deprecates `connection_timeout` in favor of `request_timeout` for timeouts +sdk/ldaputil: deprecates Client in favor of cap/ldap.Client [[GH-22185](https://github.com/hashicorp/vault/pull/22185)] +* auth/saml: Update plugin to v0.2.0 +* auto-auth/azure: Support setting the `authenticate_from_environment` variable to "true" and "false" string literals, too. [[GH-22996](https://github.com/hashicorp/vault/pull/22996)] +* cli: introduce new command group hcp which groups subcommands for authentication of users or machines to HCP using +either provided arguments or retrieved HCP token through browser login. [[GH-23897](https://github.com/hashicorp/vault/pull/23897)] +* cli: Improved error messages for `vault plugin` sub-commands. [[GH-24250](https://github.com/hashicorp/vault/pull/24250)] +* cli: adds plugin identity token to enable and tune commands for secret engines and auth methods [[GH-24980](https://github.com/hashicorp/vault/pull/24980)] +* cli: include secret syncs counts in the `vault operator usage` command output [[GH-25751](https://github.com/hashicorp/vault/pull/25751)] +* command/server: display logs on startup immediately if disable-gated-logs flag is set [[GH-24280](https://github.com/hashicorp/vault/pull/24280)] +* command/token-capabilities: allow using accessor when listing token capabilities on a path [[GH-24479](https://github.com/hashicorp/vault/pull/24479)] +* core (enterprise): Avoid seal rewrapping in some specific unnecessary cases. +* core (enterprise): Improve seal unwrap performance when in degraded mode with one or more unhealthy seals. [[GH-25171](https://github.com/hashicorp/vault/pull/25171)] +* core (enterprise): Speed up unseal when using namespaces +* core (enterprise): persist seal rewrap status, so rewrap status API is consistent on secondary nodes. +* core/activity: Include secret_syncs in activity log responses [[GH-24710](https://github.com/hashicorp/vault/pull/24710)] +* core/cli: Warning related to VAULT_ADDR & -address not set with CLI requests. [[GH-17076](https://github.com/hashicorp/vault/pull/17076)] +* core/metrics: add metrics for secret sync client count [[GH-25713](https://github.com/hashicorp/vault/pull/25713)] +* core: Added new `plugin_tmpdir` config option for containerized plugins, in addition to the existing `VAULT_PLUGIN_TMPDIR` environment variable. [[GH-24978](https://github.com/hashicorp/vault/pull/24978)] +* core: make the best effort timeout for encryption count tracking persistence configurable via an environment variable. [[GH-25636](https://github.com/hashicorp/vault/pull/25636)] +* core: update sys/seal-status (and CLI vault status) to report the type of +the seal when unsealed, as well as the type of the recovery seal if an +auto-seal. [[GH-23022](https://github.com/hashicorp/vault/pull/23022)] +* events: Add support for event subscription plugins, including SQS [[GH-24352](https://github.com/hashicorp/vault/pull/24352)] +* identity/tokens: adds plugin issuer with openid-configuration and keys APIs [[GH-24898](https://github.com/hashicorp/vault/pull/24898)] +* limits: Add a listener configuration option `disable_request_limiter` to allow +disabling the request limiter per-listener. [[GH-25098](https://github.com/hashicorp/vault/pull/25098)] +* limits: Introduce a reloadable opt-in configuration for the Request Limiter. [[GH-25095](https://github.com/hashicorp/vault/pull/25095)] +* oidc/provider: Adds `code_challenge_methods_supported` to OpenID Connect Metadata [[GH-24979](https://github.com/hashicorp/vault/pull/24979)] +* plugins: Add new pin version APIs to enforce all plugins of a specific type and name to run the same version. [[GH-25105](https://github.com/hashicorp/vault/pull/25105)] +* plugins: Containerized plugins can be run fully rootless with the runsc runtime. [[GH-24236](https://github.com/hashicorp/vault/pull/24236)] +* plugins: New API `sys/plugins/reload/:type/:name` available in the root namespace for reloading a specific plugin across all namespaces. [[GH-24878](https://github.com/hashicorp/vault/pull/24878)] +* proxy: Added new namespace top level configuration parameter, and prepend_configured_namespace API Proxy configuration parameter, which can be used to make requests made to Proxy get proxied to that namespace. [[GH-24667](https://github.com/hashicorp/vault/pull/24667)] +* raft/snapshotagent (enterprise): upgrade raft-snapshotagent to v0.0.0-20221104090112-13395acd02c5 +* replication (enterprise): Add last_upstream_remote_wal metric to telemetry and stop emitting last_remote_wal on standby nodes +* replication (enterprise): Add re-index status metric to telemetry +* replication: Add re-index status metric to telemetry [[GH-23160](https://github.com/hashicorp/vault/pull/23160)] +* sdk/plugin: Fix an issue where external plugins were not reporting logs below INFO level [[GH-23771](https://github.com/hashicorp/vault/pull/23771)] +* sdk: Add identity token helpers to consistently apply new plugin WIF fields across integrations. [[GH-24925](https://github.com/hashicorp/vault/pull/24925)] +* sdk: adds new method to system view to allow plugins to request identity tokens [[GH-24929](https://github.com/hashicorp/vault/pull/24929)] +* secrets-sync (enterprise): Add ability to turn the sync system on and off +* secrets-sync (enterprise): Add reconciliation loop +* secrets-sync (enterprise): Added PATCH request on the sync destinations API +* secrets-sync (enterprise): Added delete request to reset global config to factory defaults +* secrets-sync (enterprise): Added field to define custom tags to add on synced secrets +* secrets-sync (enterprise): Added global config path to the administrative namespace. +* secrets-sync (enterprise): Added telemetry on number of destinations and associations per type. +* secrets-sync (enterprise): Adds ability to set target GCP project ID to sync secrets with +* secrets-sync (enterprise): Adjusted associations list responses to be more CLI-friendly +* secrets-sync (enterprise): Adjusted destination list responses to be more CLI-friendly & added endpoint to list destinations by type. +* secrets-sync (enterprise): Clean up membdb tests +* secrets-sync (enterprise): Support AWS IAM assume role and external ID +* secrets-sync (enterprise): Support custom GitHub apps +* secrets-sync (enterprise): Support custom templating of external secret names +* secrets-sync (enterprise): Support granular secrets syncing +* secrets-sync (enterprise): add purge field to the destination delete endpoint +* secrets/database: Add new reload/:plugin_name API to reload database plugins by name for a specific mount. [[GH-24472](https://github.com/hashicorp/vault/pull/24472)] +* secrets/database: Support reloading named database plugins using the sys/plugins/reload/backend API endpoint. [[GH-24512](https://github.com/hashicorp/vault/pull/24512)] +* secrets/pki: do not check TLS validity on ACME requests redirected to https [[GH-22521](https://github.com/hashicorp/vault/pull/22521)] +* storage/etcd: etcd should only return keys when calling List() [[GH-23872](https://github.com/hashicorp/vault/pull/23872)] +* storage/raft (enterprise): Replication WAL batches may now contain up to 4096 +entries rather than being limited to 62 like Consul is. Performance testing +shows improvements in throughput and latency under some write-heavy workloads. +* storage/raft: Add support for larger transactions when using raft storage. [[GH-24991](https://github.com/hashicorp/vault/pull/24991)] +* storage/raft: Upgrade to bbolt 1.3.8, along with an extra patch to reduce time scanning large freelist maps. [[GH-24010](https://github.com/hashicorp/vault/pull/24010)] +* sys (enterprise): Enable sys/config/group-application-policy in privileged namespace +* sys (enterprise): Adds the chroot_namespace field to this sys/internal/ui/resultant-acl endpoint, which exposes the value of the chroot namespace from the +listener config. +* sys: adds configuration of the key used to sign plugin identity tokens during mount enable and tune [[GH-24962](https://github.com/hashicorp/vault/pull/24962)] +* ui: Add `deletion_allowed` param to transformations and include `tokenization` as a type option [[GH-25436](https://github.com/hashicorp/vault/pull/25436)] +* ui: Add warning message to the namespace picker warning users about the behavior when logging in with a root token. [[GH-23277](https://github.com/hashicorp/vault/pull/23277)] +* ui: Adds a warning when whitespace is detected in a key of a KV secret [[GH-23702](https://github.com/hashicorp/vault/pull/23702)] +* ui: Adds allowed_response_headers, plugin_version and user_lockout_config params to auth method configuration [[GH-25646](https://github.com/hashicorp/vault/pull/25646)] +* ui: Adds toggle to KV secrets engine value download modal to optionally stringify value in downloaded file [[GH-23747](https://github.com/hashicorp/vault/pull/23747)] +* ui: Allow users in userpass auth mount to update their own password [[GH-23797](https://github.com/hashicorp/vault/pull/23797)] +* ui: Implement Helios Design System Breadcrumbs [[GH-24387](https://github.com/hashicorp/vault/pull/24387)] +* ui: Implement Helios Design System copy button component making copy buttons accessible [[GH-22333](https://github.com/hashicorp/vault/pull/22333)] +* ui: Implement Helios Design System footer component [[GH-24191](https://github.com/hashicorp/vault/pull/24191)] +* ui: Implement Helios Design System pagination component [[GH-23169](https://github.com/hashicorp/vault/pull/23169)] +* ui: Increase base font-size from 14px to 16px and update use of rem vs pixels for size variables [[GH-23994](https://github.com/hashicorp/vault/pull/23994)] +* ui: Makes modals accessible by implementing Helios Design System modal component [[GH-23382](https://github.com/hashicorp/vault/pull/23382)] +* ui: Replace inline confirm alert inside a popup-menu dropdown with confirm alert modal [[GH-21520](https://github.com/hashicorp/vault/pull/21520)] +* ui: Separates out client counts dashboard to overview and entity/non-entity tabs [[GH-24752](https://github.com/hashicorp/vault/pull/24752)] +* ui: Sort list view of entities and aliases alphabetically using the item name [[GH-24103](https://github.com/hashicorp/vault/pull/24103)] +* ui: Surface warning banner if UI has stopped auto-refreshing token [[GH-23143](https://github.com/hashicorp/vault/pull/23143)] +* ui: Update AlertInline component to use Helios Design System Alert component [[GH-24299](https://github.com/hashicorp/vault/pull/24299)] +* ui: Update flat, shell-quote and swagger-ui-dist packages. Remove swagger-ui styling overrides. [[GH-23700](https://github.com/hashicorp/vault/pull/23700)] +* ui: Update mount backend form to use selectable cards [[GH-14998](https://github.com/hashicorp/vault/pull/14998)] +* ui: Update sidebar Secrets engine to title case. [[GH-23964](https://github.com/hashicorp/vault/pull/23964)] +* ui: Use Hds::Dropdown component to replace list view popup menus [[GH-25321](https://github.com/hashicorp/vault/pull/25321)] +* ui: add error message when copy action fails [[GH-25479](https://github.com/hashicorp/vault/pull/25479)] +* ui: add granularity param to sync destinations [[GH-25500](https://github.com/hashicorp/vault/pull/25500)] +* ui: capabilities-self is always called in the user's root namespace [[GH-24168](https://github.com/hashicorp/vault/pull/24168)] +* ui: improve accessibility - color contrast, labels, and automatic testing [[GH-24476](https://github.com/hashicorp/vault/pull/24476)] +* ui: latest version of chrome does not automatically redirect back to the app after authentication unless triggered by the user, hence added a link to redirect back to the app. [[GH-18513](https://github.com/hashicorp/vault/pull/18513)] +* ui: obscure JSON values when KV v2 secret has nested objects [[GH-24530](https://github.com/hashicorp/vault/pull/24530)] +* ui: redirect back to current route after reauthentication when token expires [[GH-25335](https://github.com/hashicorp/vault/pull/25335)] +* ui: remove leading slash from KV version 2 secret paths [[GH-25874](https://github.com/hashicorp/vault/pull/25874)] +* ui: remove unnecessary OpenAPI calls for unmanaged auth methods [[GH-25364](https://github.com/hashicorp/vault/pull/25364)] +* ui: replace popup menu on list items (namespaces, auth items, KMIP, K8S, LDAP) [[GH-25588](https://github.com/hashicorp/vault/pull/25588)] +* ui: show banner when resultant-acl check fails due to permissions or wrong namespace. [[GH-23503](https://github.com/hashicorp/vault/pull/23503)] +* website/docs: Update references to Key Value secrets engine from 'K/V' to 'KV' [[GH-24529](https://github.com/hashicorp/vault/pull/24529)] +* website/docs: fix inaccuracies with unauthenticated_in_flight_requests_access parameter [[GH-23287](https://github.com/hashicorp/vault/pull/23287)] + +BUG FIXES: + +* Seal HA (enterprise/beta): Fix rejection of a seal configuration change +from two to one auto seal due to persistence of the previous seal type being +"multiseal". [[GH-23573](https://github.com/hashicorp/vault/pull/23573)] +* activity log (enterprise): De-duplicate client count estimates for license utilization reporting. +* agent/logging: Agent should now honor correct -log-format and -log-file settings in logs generated by the consul-template library. [[GH-24252](https://github.com/hashicorp/vault/pull/24252)] +* agent: Fix issue where Vault Agent was unable to render KVv2 secrets with delete_version_after set. [[GH-25387](https://github.com/hashicorp/vault/pull/25387)] +* agent: Fixed incorrect parsing of boolean environment variables for configuration. [[GH-24790](https://github.com/hashicorp/vault/pull/24790)] +* api/seal-status: Fix deadlock on calls to sys/seal-status with a namespace configured +on the request. [[GH-23861](https://github.com/hashicorp/vault/pull/23861)] +* api: Fix deadlock on calls to sys/leader with a namespace configured +on the request. [[GH-24256](https://github.com/hashicorp/vault/pull/24256)] +* api: sys/leader ActiveTime field no longer gets reset when we do an internal state change that doesn't change our active status. [[GH-24549](https://github.com/hashicorp/vault/pull/24549)] +* audit/socket: Provide socket based audit backends with 'prefix' configuration option when supplied. [[GH-25004](https://github.com/hashicorp/vault/pull/25004)] +* audit: Fix bug reopening 'file' audit devices on SIGHUP. [[GH-23598](https://github.com/hashicorp/vault/pull/23598)] +* audit: Fix bug where use of 'log_raw' option could result in other devices logging raw audit data [[GH-24968](https://github.com/hashicorp/vault/pull/24968)] +* audit: Handle a potential panic while formatting audit entries for an audit log [[GH-25605](https://github.com/hashicorp/vault/pull/25605)] +* audit: Operator changes to configured audit headers (via `/sys/config/auditing`) +will now force invalidation and be reloaded from storage when data is replicated +to other nodes. +* audit: Resolve potential race condition when auditing entries which use SSCT. [[GH-25443](https://github.com/hashicorp/vault/pull/25443)] +* auth/aws: Fixes a panic that can occur in IAM-based login when a [client config](https://developer.hashicorp.com/vault/api-docs/auth/aws#configure-client) does not exist. [[GH-23555](https://github.com/hashicorp/vault/pull/23555)] +* auth/cert: Address an issue in which OCSP query responses were not cached [[GH-25986](https://github.com/hashicorp/vault/pull/25986)] +* auth/cert: Allow cert auth login attempts if ocsp_fail_open is true and OCSP servers are unreachable [[GH-25982](https://github.com/hashicorp/vault/pull/25982)] +* auth/cert: Handle errors related to expired OCSP server responses [[GH-24193](https://github.com/hashicorp/vault/pull/24193)] +* auth/saml (enterprise): Fixes support for Microsoft Entra ID enterprise applications +* cap/ldap: Downgrade go-ldap client from v3.4.5 to v3.4.4 due to race condition found [[GH-23103](https://github.com/hashicorp/vault/pull/23103)] +* cassandra: Update Cassandra to set consistency prior to calling CreateSession, ensuring consistency setting is correct when opening connection. [[GH-24649](https://github.com/hashicorp/vault/pull/24649)] +* cli/kv: Undelete now properly handles KV-V2 mount paths that are more than one layer deep. [[GH-19811](https://github.com/hashicorp/vault/pull/19811)] +* cli: fixes plugin register CLI failure to error when plugin image doesn't exist [[GH-24990](https://github.com/hashicorp/vault/pull/24990)] +* command/server: Fix bug with sigusr2 where pprof files were not closed correctly [[GH-23636](https://github.com/hashicorp/vault/pull/23636)] +* core (Enterprise): Treat multiple disabled HA seals as a migration to Shamir. +* core (enterprise): Do not return an internal error when token policy type lookup fails, log it instead and continue. +* core (enterprise): Fix a deadlock that can occur on performance secondary clusters when there are many mounts and a mount is deleted or filtered [[GH-25448](https://github.com/hashicorp/vault/pull/25448)] +* core (enterprise): Fix a panic that can occur if only one seal exists but is unhealthy on the non-first restart of Vault. +* core (enterprise): fix a potential deadlock if an error is received twice from underlying storage for the same key +* core (enterprise): fix issue where the Seal HA rewrap system may remain running when an active node steps down. +* core/activity: Fixes segments fragment loss due to exceeding entry record size limit [[GH-23781](https://github.com/hashicorp/vault/pull/23781)] +* core/audit: Audit logging a Vault response will now use a 5 second context timeout, separate from the original request. [[GH-24238](https://github.com/hashicorp/vault/pull/24238)] +* core/config: Use correct HCL config value when configuring `log_requests_level`. [[GH-24056](https://github.com/hashicorp/vault/pull/24056)] +* core/ha: fix panic that can occur when an HA cluster contains an active node with version >=1.12.0 and another node with version <1.10 [[GH-24441](https://github.com/hashicorp/vault/pull/24441)] +* core/login: Fixed a potential deadlock when a login fails and user lockout is enabled. [[GH-25697](https://github.com/hashicorp/vault/pull/25697)] +* core/mounts: Fix reading an "auth" mount using "sys/internal/ui/mounts/" when filter paths are enforced returns 500 error code from the secondary [[GH-23802](https://github.com/hashicorp/vault/pull/23802)] +* core/quotas: Close rate-limit blocked client purge goroutines when sealing [[GH-24108](https://github.com/hashicorp/vault/pull/24108)] +* core/quotas: Deleting a namespace that contains a rate limit quota no longer breaks replication [[GH-25439](https://github.com/hashicorp/vault/pull/25439)] +* core: Fix a timeout initializing Vault by only using a short timeout persisting barrier keyring encryption counts. [[GH-24336](https://github.com/hashicorp/vault/pull/24336)] +* core: Fix an error that resulted in the wrong seal type being returned by sys/seal-status while +Vault is in seal migration mode. [[GH-24165](https://github.com/hashicorp/vault/pull/24165)] +* core: Skip unnecessary deriving of policies during Login MFA Check. [[GH-23894](https://github.com/hashicorp/vault/pull/23894)] +* core: fix bug where deadlock detection was always on for expiration and quotas. +These can now be configured individually with `detect_deadlocks`. [[GH-23902](https://github.com/hashicorp/vault/pull/23902)] +* core: fix policies with wildcards not matching list operations due to the policy path not having a trailing slash [[GH-23874](https://github.com/hashicorp/vault/pull/23874)] +* core: fix rare panic due to a race condition with metrics collection during seal [[GH-23906](https://github.com/hashicorp/vault/pull/23906)] +* core: upgrade github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 to +support azure workload identities. [[GH-24954](https://github.com/hashicorp/vault/pull/24954)] +* eventlogger: Update library to v0.2.7 to address race condition [[GH-24305](https://github.com/hashicorp/vault/pull/24305)] +* events: Ignore sending context to give more time for events to send [[GH-23500](https://github.com/hashicorp/vault/pull/23500)] +* expiration: Fix fatal error "concurrent map iteration and map write" when collecting metrics from leases. [[GH-24027](https://github.com/hashicorp/vault/pull/24027)] +* expiration: Prevent large lease loads from delaying state changes, e.g. becoming active or standby. [[GH-23282](https://github.com/hashicorp/vault/pull/23282)] +* fairshare: fix a race condition in JobManager.GetWorkerCounts [[GH-24616](https://github.com/hashicorp/vault/pull/24616)] +* helper/pkcs7: Fix slice out-of-bounds panic [[GH-24891](https://github.com/hashicorp/vault/pull/24891)] +* http: Include PATCH in the list of allowed CORS methods [[GH-24373](https://github.com/hashicorp/vault/pull/24373)] +* kmip (enterprise): Improve handling of failures due to storage replication issues. +* kmip (enterprise): Only return a Server Correlation Value to clients using KMIP version 1.4. +* kmip (enterprise): Return a structure in the response for query function Query Server Information. +* ldaputil: Disable tests for ARM64 [[GH-23118](https://github.com/hashicorp/vault/pull/23118)] +* mongo-db: allow non-admin database for root credential rotation [[GH-23240](https://github.com/hashicorp/vault/pull/23240)] +* openapi: Fixing response fields for rekey operations [[GH-25509](https://github.com/hashicorp/vault/pull/25509)] +* plugins: Fix panic when querying plugin runtimes from a performance secondary follower node. +* proxy: Fixed incorrect parsing of boolean environment variables for configuration. [[GH-24790](https://github.com/hashicorp/vault/pull/24790)] +* replication (enterprise): Fix a bug where undo logs would only get enabled on the initial node in a cluster. +* replication (enterprise): Fix a missing unlock when changing replication state +* replication (enterprise): disallow configuring paths filter for a mount path that does not exist +* replication (enterprise): fixed data integrity issue with the processing of identity aliases causing duplicates to occur in rare cases +* sdk: Return error when failure occurs setting up node 0 in NewDockerCluster, instead of ignoring it. [[GH-24136](https://github.com/hashicorp/vault/pull/24136)] +* secrets-sync (enterprise): Allow unsyncing secrets from an unmounted secrets engine +* secrets-sync (enterprise): Fix panic when setting usage_gauge_period to none +* secrets-sync (enterprise): Fixed an issue with syncing to target projects in GCP +* secrets-sync (enterprise): Fixed issue where we could sync a deleted secret +* secrets-sync (enterprise): Unsync secret when metadata is deleted +* secrets/aws: fix requeueing of rotation entry in cases where rotation fails [[GH-23673](https://github.com/hashicorp/vault/pull/23673)] +* secrets/aws: update credential rotation deadline when static role rotation period is updated [[GH-23528](https://github.com/hashicorp/vault/pull/23528)] +* secrets/consul: Fix revocations when Vault has an access token using specific namespace and admin partition policies [[GH-23010](https://github.com/hashicorp/vault/pull/23010)] +* secrets/pki: Do not set nextUpdate field in OCSP responses when ocsp_expiry is 0 [[GH-24192](https://github.com/hashicorp/vault/pull/24192)] +* secrets/pki: Stop processing in-flight ACME verifications when an active node steps down [[GH-23278](https://github.com/hashicorp/vault/pull/23278)] +* secrets/transit (enterprise): Address an issue using sign/verify operations with managed keys returning an error about it not containing a private key +* secrets/transit (enterprise): Address panic when using GCP,AWS,Azure managed keys for encryption operations. At this time all encryption operations for the cloud providers have been disabled, only signing operations are supported. +* secrets/transit (enterprise): Apply hashing arguments and defaults to managed key sign/verify operations +* secrets/transit: Do not allow auto rotation on managed_key key types [[GH-23723](https://github.com/hashicorp/vault/pull/23723)] +* secrets/transit: Fix a panic when attempting to export a public RSA key [[GH-24054](https://github.com/hashicorp/vault/pull/24054)] +* secrets/transit: When provided an invalid input with hash_algorithm=none, a lock was not released properly before reporting an error leading to deadlocks on a subsequent key configuration update. [[GH-25336](https://github.com/hashicorp/vault/pull/25336)] +* storage/consul: fix a bug where an active node in a specific sort of network +partition could continue to write data to Consul after a new leader is elected +potentially causing data loss or corruption for keys with many concurrent +writers. For Enterprise clusters this could cause corruption of the merkle trees +leading to failure to complete merkle sync without a full re-index. [[GH-23013](https://github.com/hashicorp/vault/pull/23013)] +* storage/file: Fixing spuriously deleting storage keys ending with .temp [[GH-25395](https://github.com/hashicorp/vault/pull/25395)] +* storage/raft: Fix a race whereby a new leader may present inconsistent node data to Autopilot. [[GH-24246](https://github.com/hashicorp/vault/pull/24246)] +* transform (enterprise): guard against a panic looking up a token in exportable mode with barrier storage. +* ui: Allows users to dismiss the resultant-acl banner. [[GH-25106](https://github.com/hashicorp/vault/pull/25106)] +* ui: Allows users to search within KV v2 directories from the Dashboard's quick action card. [[GH-25001](https://github.com/hashicorp/vault/pull/25001)] +* ui: Assumes version 1 for kv engines when options are null because no version is specified [[GH-23585](https://github.com/hashicorp/vault/pull/23585)] +* ui: Correctly handle directory redirects from pre 1.15.0 Kv v2 list view urls. [[GH-24281](https://github.com/hashicorp/vault/pull/24281)] +* ui: Correctly handle redirects from pre 1.15.0 Kv v2 edit, create, and show urls. [[GH-24339](https://github.com/hashicorp/vault/pull/24339)] +* ui: Decode the connection url for display on the connection details page [[GH-23695](https://github.com/hashicorp/vault/pull/23695)] +* ui: Do not disable JSON display toggle for KV version 2 secrets [[GH-25235](https://github.com/hashicorp/vault/pull/25235)] +* ui: Do not show resultant-acl banner on namespaces a user has access to [[GH-25256](https://github.com/hashicorp/vault/pull/25256)] +* ui: Fix AWS secret engine to allow empty policy_document field. [[GH-23470](https://github.com/hashicorp/vault/pull/23470)] +* ui: Fix JSON editor in KV V2 unable to handle pasted values [[GH-24224](https://github.com/hashicorp/vault/pull/24224)] +* ui: Fix PKI ca_chain display so value can be copied to clipboard [[GH-25399](https://github.com/hashicorp/vault/pull/25399)] +* ui: Fix bug where a change on OpenAPI added a double forward slash on some LIST endpoints. [[GH-23446](https://github.com/hashicorp/vault/pull/23446)] +* ui: Fix copy button not working on masked input when value is not a string [[GH-25269](https://github.com/hashicorp/vault/pull/25269)] +* ui: Fix error when tuning token auth configuration within namespace [[GH-24147](https://github.com/hashicorp/vault/pull/24147)] +* ui: Fix inconsistent empty state action link styles [[GH-25209](https://github.com/hashicorp/vault/pull/25209)] +* ui: Fix kubernetes auth method roles tab [[GH-25999](https://github.com/hashicorp/vault/pull/25999)] +* ui: Fix payload sent when disabling replication [[GH-24292](https://github.com/hashicorp/vault/pull/24292)] +* ui: Fix regression that broke the oktaNumberChallenge on the ui. [[GH-23565](https://github.com/hashicorp/vault/pull/23565)] +* ui: Fix the copy token button in the sidebar navigation window when in a collapsed state. [[GH-23331](https://github.com/hashicorp/vault/pull/23331)] +* ui: Fixed minor bugs with database secrets engine [[GH-24947](https://github.com/hashicorp/vault/pull/24947)] +* ui: Fixes input for jwks_ca_pem when configuring a JWT auth method [[GH-24697](https://github.com/hashicorp/vault/pull/24697)] +* ui: Fixes issue where you could not share the list view URL from the KV v2 secrets engine. [[GH-23620](https://github.com/hashicorp/vault/pull/23620)] +* ui: Fixes issue with no active tab when viewing transit encryption key [[GH-25614](https://github.com/hashicorp/vault/pull/25614)] +* ui: Fixes issue with sidebar navigation links disappearing when navigating to policies when a user is not authorized [[GH-23516](https://github.com/hashicorp/vault/pull/23516)] +* ui: Fixes issues displaying accurate TLS state in dashboard configuration details [[GH-23726](https://github.com/hashicorp/vault/pull/23726)] +* ui: Fixes policy input toolbar scrolling by default [[GH-23297](https://github.com/hashicorp/vault/pull/23297)] +* ui: The UI can now be used to create or update database roles by operator without permission on the database connection. [[GH-24660](https://github.com/hashicorp/vault/pull/24660)] +* ui: Update the KV secret data when you change the version you're viewing of a nested secret. [[GH-25152](https://github.com/hashicorp/vault/pull/25152)] +* ui: Updates OIDC/JWT login error handling to surface all role related errors [[GH-23908](https://github.com/hashicorp/vault/pull/23908)] +* ui: Upgrade HDS version to fix sidebar navigation issues when it collapses in smaller viewports. [[GH-23580](https://github.com/hashicorp/vault/pull/23580)] +* ui: When Kv v2 secret is an object, fix so details view defaults to readOnly JSON editor. [[GH-24290](https://github.com/hashicorp/vault/pull/24290)] +* ui: call resultant-acl without namespace header when user mounted at root namespace [[GH-25766](https://github.com/hashicorp/vault/pull/25766)] +* ui: fix KV v2 details view defaulting to JSON view when secret value includes `{` [[GH-24513](https://github.com/hashicorp/vault/pull/24513)] +* ui: fix broken GUI when accessing from listener with chroot_namespace defined [[GH-23942](https://github.com/hashicorp/vault/pull/23942)] +* ui: fix incorrectly calculated capabilities on PKI issuer endpoints [[GH-24686](https://github.com/hashicorp/vault/pull/24686)] +* ui: fix issue where kv v2 capabilities checks were not passing in the full secret path if secret was inside a directory. [[GH-24404](https://github.com/hashicorp/vault/pull/24404)] +* ui: fix navigation items shown to user when chroot_namespace configured [[GH-24492](https://github.com/hashicorp/vault/pull/24492)] +* ui: remove user_lockout_config settings for unsupported methods [[GH-25867](https://github.com/hashicorp/vault/pull/25867)] +* ui: show error from API when seal fails [[GH-23921](https://github.com/hashicorp/vault/pull/23921)] + +## 1.15.14 Enterprise +### August 29, 2024 + +CHANGES: + +* activity (enterprise): filter all fields in client count responses by the request namespace [[GH-27790](https://github.com/hashicorp/vault/pull/27790)] +* core: Bump Go version to 1.22.6 + +IMPROVEMENTS: + +* activity log: Changes how new client counts in the current month are estimated, in order to return more +visibly sensible totals. [[GH-27547](https://github.com/hashicorp/vault/pull/27547)] +* activity: `/sys/internal/counters/activity` will now include a warning if the specified usage period contains estimated client counts. [[GH-28068](https://github.com/hashicorp/vault/pull/28068)] +* cli: `vault operator usage` will now include a warning if the specified usage period contains estimated client counts. [[GH-28068](https://github.com/hashicorp/vault/pull/28068)] +* core/activity: Ensure client count queries that include the current month return consistent results by sorting the clients before performing estimation [[GH-28062](https://github.com/hashicorp/vault/pull/28062)] BUG FIXES: -* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] -* agent: Agent will now respect `max_retries` retry configuration even when caching is set. [[GH-16970](https://github.com/hashicorp/vault/pull/16970)] -* agent: Update consul-template for pkiCert bug fixes [[GH-16087](https://github.com/hashicorp/vault/pull/16087)] -* api/sys/internal/specs/openapi: support a new "dynamic" query parameter to generate generic mountpaths [[GH-15835](https://github.com/hashicorp/vault/pull/15835)] -* api: Fixed erroneous warnings of unrecognized parameters when unwrapping data. [[GH-16794](https://github.com/hashicorp/vault/pull/16794)] -* api: Fixed issue with internal/ui/mounts and internal/ui/mounts/(?P.+) endpoints where it was not properly handling /auth/ [[GH-15552](https://github.com/hashicorp/vault/pull/15552)] -* api: properly handle switching to/from unix domain socket when changing client address [[GH-11904](https://github.com/hashicorp/vault/pull/11904)] -* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] -* auth/kerberos: Maintain headers set by the client [[GH-16636](https://github.com/hashicorp/vault/pull/16636)] -* auth/kubernetes: Restore support for JWT signature algorithm ES384 [[GH-160](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/160)] [[GH-17161](https://github.com/hashicorp/vault/pull/17161)] -* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] -* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] -* core (enterprise): Fix bug where wrapping token lookup does not work within namespaces. [[GH-15583](https://github.com/hashicorp/vault/pull/15583)] -* core (enterprise): Fix creation of duplicate entities via alias metadata changes on local auth mounts. -* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] -* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] -* core/license (enterprise): Always remove stored license and allow unseal to complete when license cleanup fails -* core/managed-keys (enterprise): fix panic when having `cache_disable` true -* core/quotas (enterprise): Fixed issue with improper counting of leases if lease count quota created after leases -* core/quotas: Added globbing functionality on the end of path suffix quota paths [[GH-16386](https://github.com/hashicorp/vault/pull/16386)] -* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] -* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty -* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] -* core: Fix panic when the plugin catalog returns neither a plugin nor an error. [[GH-17204](https://github.com/hashicorp/vault/pull/17204)] -* core: Fixes parsing boolean values for ha_storage backends in config [[GH-15900](https://github.com/hashicorp/vault/pull/15900)] -* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] -* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] -* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] -* debug: Fix panic when capturing debug bundle on Windows [[GH-14399](https://github.com/hashicorp/vault/pull/14399)] -* debug: Remove extra empty lines from vault.log when debug command is run [[GH-16714](https://github.com/hashicorp/vault/pull/16714)] -* identity (enterprise): Fix a data race when creating an entity for a local alias. -* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] -* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] -* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] -* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] -* openapi: Fixed issue where information about /auth/token endpoints was not present with explicit policy permissions [[GH-15552](https://github.com/hashicorp/vault/pull/15552)] -* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] -* plugin/secrets/auth: Fix a bug with aliased backends such as aws-ec2 or generic [[GH-16673](https://github.com/hashicorp/vault/pull/16673)] -* plugins: Corrected the path to check permissions on when the registered plugin name does not match the plugin binary's filename. [[GH-17340](https://github.com/hashicorp/vault/pull/17340)] -* quotas/lease-count: Fix lease-count quotas on mounts not properly being enforced when the lease generating request is a read [[GH-15735](https://github.com/hashicorp/vault/pull/15735)] -* replication (enterprise): Fix data race in SaveCheckpoint() -* replication (enterprise): Fix data race in saveCheckpoint. -* replication (enterprise): Fix possible data race during merkle diff/sync -* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] -* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] -* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] -* secrets/kv: Fix `kv get` issue preventing the ability to read a secret when providing a leading slash [[GH-16443](https://github.com/hashicorp/vault/pull/16443)] -* secrets/pki: Allow import of issuers without CRLSign KeyUsage; prohibit setting crl-signing usage on such issuers [[GH-16865](https://github.com/hashicorp/vault/pull/16865)] -* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] -* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17385](https://github.com/hashicorp/vault/pull/17385)] -* secrets/pki: Fix migration to properly handle mounts that contain only keys, no certificates [[GH-16813](https://github.com/hashicorp/vault/pull/16813)] -* secrets/pki: Ignore EC PARAMETER PEM blocks during issuer import (/config/ca, /issuers/import/*, and /intermediate/set-signed) [[GH-16721](https://github.com/hashicorp/vault/pull/16721)] -* secrets/pki: LIST issuers endpoint is now unauthenticated. [[GH-16830](https://github.com/hashicorp/vault/pull/16830)] -* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. -* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. -* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. -* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] -* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] -* storage/raft: Nodes no longer get demoted to nonvoter if we don't know their version due to missing heartbeats. [[GH-17019](https://github.com/hashicorp/vault/pull/17019)] -* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] -* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] -* ui: Fix info tooltip submitting form [[GH-16659](https://github.com/hashicorp/vault/pull/16659)] -* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] -* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] -* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] -* ui: Fixed bug where red spellcheck underline appears in sensitive/secret kv values when it should not appear [[GH-15681](https://github.com/hashicorp/vault/pull/15681)] -* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] -* ui: OIDC login type uses localStorage instead of sessionStorage [[GH-16170](https://github.com/hashicorp/vault/pull/16170)] -* vault: Fix a bug where duplicate policies could be added to an identity group. [[GH-15638](https://github.com/hashicorp/vault/pull/15638)] +* activity: The sys/internal/counters/activity endpoint will return current month data when the end_date parameter is set to a future date. [[GH-28042](https://github.com/hashicorp/vault/pull/28042)] +* command: The `vault secrets move` and `vault auth move` command will no longer attempt to write to storage on performance standby nodes. [[GH-28059](https://github.com/hashicorp/vault/pull/28059)] +* core (enterprise): Fix deletion of MFA login-enforcement configurations on standby nodes -## 1.11.8 -### March 01, 2023 +## 1.15.13 Enterprise +### August 07, 2024 CHANGES: -* core: Bump Go version to 1.19.6. +* auth/cf: Update plugin to v0.18.0 [[GH-27724](https://github.com/hashicorp/vault/pull/27724)] IMPROVEMENTS: -* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] +* audit: Ensure that any underyling errors from audit devices are logged even if we consider auditing to be a success. [[GH-27809](https://github.com/hashicorp/vault/pull/27809)] +* auth/cert: Cache full list of role trust information separately to avoid +eviction, and avoid duplicate loading during multiple simultaneous logins on +the same role. [[GH-27902](https://github.com/hashicorp/vault/pull/27902)] BUG FIXES: -* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] -* core (enterprise): Fix panic when using invalid accessor for control-group request -* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. -* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] -* license (enterprise): Fix bug where license would update even if the license didn't change. -* replication (enterprise): Fix bug where reloading external plugin on a secondary would -break replication. -* secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. [[GH-18208](https://github.com/hashicorp/vault/pull/18208)] -* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] -* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] -* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)] +* auth/cert: Use subject's serial number, not issuer's within error message text in OCSP request errors [[GH-27696](https://github.com/hashicorp/vault/pull/27696)] +* core (enterprise): Fix 500 errors that occurred querying `sys/internal/ui/mounts` for a mount prefixed by a namespace path when path filters are configured. [[GH-27939](https://github.com/hashicorp/vault/pull/27939)] +* raft/autopilot: Fixed panic that may occur during shutdown [[GH-27726](https://github.com/hashicorp/vault/pull/27726)] +* secrets/identity (enterprise): Fix a bug that can cause DR promotion to fail in rare cases where a PR secondary has inconsistent alias information in storage. +* ui: Fix cursor jump on KVv2 json editor that would occur after pressing ENTER. [[GH-27569](https://github.com/hashicorp/vault/pull/27569)] +* ui: fix issue where enabling then disabling "Tidy ACME" in PKI results in failed API call. [[GH-27742](https://github.com/hashicorp/vault/pull/27742)] +* ui: fix namespace picker not working when in small screen where the sidebar is collapsed by default. [[GH-27728](https://github.com/hashicorp/vault/pull/27728)] -## 1.11.7 -### February 6, 2023 - -CHANGES: -* core: Bump Go version to 1.19.4. +## 1.15.12 Enterprise +### July 10, 2024 -IMPROVEMENTS: +CHANGES: -* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] -* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. -* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] -* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] -* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] +* core: Bump Go version to 1.22.5. +* auth/jwt: Revert [GH-295](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/295) which changed the way JWT `aud` claims were validated. BUG FIXES: -* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] -* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] -* core (enterprise): Fix a race condition resulting in login errors to PKCS#11 modules under high concurrency. -* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace -* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. -* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] -* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. -* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] -* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] -* identity (enterprise): Fix a data race when creating an entity for a local alias. -* kmip (enterprise): Fix Destroy operation response that omitted Unique Identifier on some batched responses. -* kmip (enterprise): Fix Locate operation response incompatibility with clients using KMIP versions prior to 1.3. -* kmip (enterprise): Fix Query operation response that omitted streaming capability and supported profiles. -* licensing (enterprise): update autoloaded license cache after reload -* secrets/pki: Allow patching issuer to set an empty issuer name. [[GH-18466](https://github.com/hashicorp/vault/pull/18466)] -* secrets/transit: Do not warn about unrecognized parameter 'batch_input' [[GH-18299](https://github.com/hashicorp/vault/pull/18299)] -* storage/raft (enterprise): An already joined node can rejoin by wiping storage -and re-issueing a join request, but in doing so could transiently become a -non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https://github.com/hashicorp/vault/pull/18263)] -* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. -* storage/raft: Don't panic on unknown raft ops [[GH-17732](https://github.com/hashicorp/vault/pull/17732)] -* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] +* core (enterprise): Fix HTTP redirects in namespaces to use the correct path and (in the case of event subscriptions) the correct URI scheme. [[GH-27660](https://github.com/hashicorp/vault/pull/27660)] +* core/config: fix issue when using `proxy_protocol_behavior` with `deny_unauthorized`, +which causes the Vault TCP listener to close after receiving an untrusted upstream proxy connection. [[GH-27589](https://github.com/hashicorp/vault/pull/27589)] +* core: Fixed an issue with performance standbys not being able to handle rotate root requests. [[GH-27631](https://github.com/hashicorp/vault/pull/27631)] +* ui: Display an error and force a timeout when TOTP passcode is incorrect [[GH-27574](https://github.com/hashicorp/vault/pull/27574)] +* ui: Ensure token expired banner displays when batch token expires [[GH-27479](https://github.com/hashicorp/vault/pull/27479)] -## 1.11.6 -### November 30, 2022 +## 1.15.11 Enterprise +### June 26, 2024 -IMPROVEMENTS: +BUG FIXES: -* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] +* cli/debug: Fix resource leak in CLI debug command. [[GH-26167](https://github.com/hashicorp/vault/pull/26167)] +* helper/pkcs7: Fix parsing certain messages containing only certificates [[GH-27435](https://github.com/hashicorp/vault/pull/27435)] +* replication (enterprise): fix cache invalidation issue leading to namespace custom metadata not being shown correctly on performance secondaries +* storage/raft (enterprise): Fix issue with namespace cache not getting cleared on snapshot restore, resulting in namespaces not found in the snapshot being inaccurately represented by API responses. [[GH-27474](https://github.com/hashicorp/vault/pull/27474)] -BUG FIXES: +## 1.15.10 Enterprise +### June 12, 2024 -* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] -* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] -* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] -* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. -* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] -* core: fix a start up race condition where performance standbys could go into a - mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] -* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] -* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18085](https://github.com/hashicorp/vault/pull/18085)] -* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18110](https://github.com/hashicorp/vault/pull/18110)] -* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] -* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] +CHANGES: -## 1.11.5 -### November 2, 2022 +* core: Bump Go version to 1.22.4. IMPROVEMENTS: -* database/snowflake: Allow parallel requests to Snowflake [[GH-17594](https://github.com/hashicorp/vault/pull/17594)] -* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] +* ui: Allow users to wrap inputted data again instead of resetting form [[GH-27289](https://github.com/hashicorp/vault/pull/27289)] +* ui: Update language in Transit secret engine to reflect that not all keys are for encyryption [[GH-27346](https://github.com/hashicorp/vault/pull/27346)] BUG FIXES: -* core/managed-keys (enterprise): Return better error messages when encountering key creation failures -* core/managed-keys (enterprise): fix panic when having `cache_disable` true -* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] -* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] -* kmip (enterprise): Fix a problem in the handling of attributes that caused Import operations to fail. -* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] -* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] -* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17384](https://github.com/hashicorp/vault/pull/17384)] -* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] -* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] -* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] - -## 1.11.4 -### September 30, 2022 +* secrets/transform (enterprise): Fix a bug preventing the use of alternate schemas on PostgreSQL token stores. +* storage/raft (enterprise): Fix a regression introduced in 1.15.8 that causes +autopilot to fail to discover new server versions and so not trigger an upgrade. [[GH-27277](https://github.com/hashicorp/vault/pull/27277)] +* ui: Do not show resultant-ACL banner when ancestor namespace grants wildcard access. [[GH-27263](https://github.com/hashicorp/vault/pull/27263)] +* ui: Fix a bug where disabling TTL on the AWS credential form would still send TTL value [[GH-27366](https://github.com/hashicorp/vault/pull/27366)] -IMPROVEMENTS: +## 1.15.9 Enterprise +### May 30, 2024 -* agent/auto-auth: Add `exit_on_err` which when set to true, will cause Agent to exit if any errors are encountered during authentication. [[GH-17091](https://github.com/hashicorp/vault/pull/17091)] -* agent: Send notifications to systemd on start and stop. [[GH-9802](https://github.com/hashicorp/vault/pull/9802)] +CHANGES: +* auth/jwt: Update plugin to v0.17.3 [[GH-27063](https://github.com/hashicorp/vault/pull/27063)] +* core: Bump Go version to 1.22.2. + +IMPROVEMENTS: +* secrets/pki (enterprise): Disable warnings about unknown parameters to the various CIEPS endpoints +* website/docs: Add note about eventual consietency with the MongoDB Atlas database secrets engine [[GH-24152](https://github.com/hashicorp/vault/pull/24152)] BUG FIXES: +* activity (enterprise): fix read-only storage error on upgrades +* core: Address a data race updating a seal's last seen healthy time attribute [[GH-27014](https://github.com/hashicorp/vault/pull/27014)] +* pki: Fix error in cross-signing using ed25519 keys [[GH-27093](https://github.com/hashicorp/vault/pull/27093)] +* replication (enterprise): fix "given mount path is not in the same namespace as the request" error that can occur when enabling replication for the first time on a secondary cluster +* secrets/transit: Use 'hash_algorithm' parameter if present in HMAC verify requests. Otherwise fall back to deprecated 'algorithm' parameter. [[GH-27211](https://github.com/hashicorp/vault/pull/27211)] +* ui: Fix KVv2 cursor jumping inside json editor after initial input. [[GH-27120](https://github.com/hashicorp/vault/pull/27120)] -* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] -* auth/kubernetes: Restore support for JWT signature algorithm ES384 [[GH-160](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/160)] [[GH-17162](https://github.com/hashicorp/vault/pull/17162)] -* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] -* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] -* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] -* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] -* replication (enterprise): Fix data race in SaveCheckpoint() -* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. -* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. -* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] -* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] +## 1.15.8 Enterprise +### April 24, 2024 -## 1.11.3 -### August 31, 2022 +SECURITY: + +* core (enterprise): fix bug where http headers were displayed in the audit log of a performance standby node [HCSEC-2024-10](https://discuss.hashicorp.com/t/hcsec-2024-10-vault-enterprise-leaks-sensitive-http-request-headers-in-audit-log-when-deployed-with-a-performance-standby-node) CHANGES: -* core: Bump Go version to 1.17.13. +* core: Bump Go version to 1.21.9. +* ui: Update dependencies including D3 libraries [[GH-26346](https://github.com/hashicorp/vault/pull/26346)] IMPROVEMENTS: -* auth/kerberos: add `add_group_aliases` config to include LDAP groups in Vault group aliases [[GH-16890](https://github.com/hashicorp/vault/pull/16890)] -* auth/kerberos: add `remove_instance_name` parameter to the login CLI and the -Kerberos config in Vault. This removes any instance names found in the keytab -service principal name. [[GH-16594](https://github.com/hashicorp/vault/pull/16594)] -* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] -* storage/gcs: Add documentation explaining how to configure the gcs backend using environment variables instead of options in the configuration stanza [[GH-14455](https://github.com/hashicorp/vault/pull/14455)] +* activity (enterprise): Change minimum retention window in activity log to 48 months +* core: make the best effort timeout for encryption count tracking persistence configurable via an environment variable. [[GH-25636](https://github.com/hashicorp/vault/pull/25636)] +* license utilization reporting (enterprise): Add retention months to license utilization reports. +* sdk/decompression: DecompressWithCanary will now chunk the decompression in memory to prevent loading it all at once. [[GH-26464](https://github.com/hashicorp/vault/pull/26464)] +* ui: show banner instead of permission denied error when batch token is expired [[GH-26396](https://github.com/hashicorp/vault/pull/26396)] BUG FIXES: -* api: Fixed erroneous warnings of unrecognized parameters when unwrapping data. [[GH-16794](https://github.com/hashicorp/vault/pull/16794)] -* auth/gcp: Fixes the ability to reset the configuration's credentials to use application default credentials. [[GH-16523](https://github.com/hashicorp/vault/pull/16523)] -* auth/kerberos: Maintain headers set by the client [[GH-16636](https://github.com/hashicorp/vault/pull/16636)] -* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] -* core/license (enterprise): Always remove stored license and allow unseal to complete when license cleanup fails -* database/elasticsearch: Fixes a bug in boolean parsing for initialize [[GH-16526](https://github.com/hashicorp/vault/pull/16526)] -* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] -* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the -Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] -* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] -* plugin/secrets/auth: Fix a bug with aliased backends such as aws-ec2 or generic [[GH-16673](https://github.com/hashicorp/vault/pull/16673)] -* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] -* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] -* secrets/pki: Fix migration to properly handle mounts that contain only keys, no certificates [[GH-16813](https://github.com/hashicorp/vault/pull/16813)] -* secrets/pki: Ignore EC PARAMETER PEM blocks during issuer import (/config/ca, /issuers/import/*, and /intermediate/set-signed) [[GH-16721](https://github.com/hashicorp/vault/pull/16721)] -* secrets/pki: LIST issuers endpoint is now unauthenticated. [[GH-16830](https://github.com/hashicorp/vault/pull/16830)] -* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] -* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] -* ui: Fix info tooltip submitting form [[GH-16659](https://github.com/hashicorp/vault/pull/16659)] -* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] +* core (enterprise): fix bug where raft followers disagree with the seal type after returning to one seal from two. [[GH-26523](https://github.com/hashicorp/vault/pull/26523)] +* secrets/pki: fixed validation bug which rejected ldap schemed URLs in crl_distribution_points. [[GH-26477](https://github.com/hashicorp/vault/pull/26477)] +* storage/raft (enterprise): Fix a bug where autopilot automated upgrades could fail due to using the wrong upgrade version +* ui: fixed a bug where the replication pages did not update display when navigating between DR and performance [[GH-26325](https://github.com/hashicorp/vault/pull/26325)] + +## 1.15.7 Enterprise +### March 28, 2024 SECURITY: -* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] +* auth/cert: validate OCSP response was signed by the expected issuer and serial number matched request (CVE-2024-2660) [[GH-26091](https://github.com/hashicorp/vault/pull/26091), [HSEC-2024-07](https://discuss.hashicorp.com/t/hcsec-2024-07-vault-tls-cert-auth-method-did-not-correctly-validate-ocsp-responses/64573)] -## 1.11.2 -### August 2, 2022 +IMPROVEMENTS: + +* auth/cert: Allow validation with OCSP responses with no NextUpdate time [[GH-25912](https://github.com/hashicorp/vault/pull/25912)] +* core (enterprise): Avoid seal rewrapping in some specific unnecessary cases. +* core (enterprise): persist seal rewrap status, so rewrap status API is consistent on secondary nodes. +* ui: remove leading slash from KV version 2 secret paths [[GH-25874](https://github.com/hashicorp/vault/pull/25874)] + +BUG FIXES: + +* audit: Operator changes to configured audit headers (via `/sys/config/auditing`) +will now force invalidation and be reloaded from storage when data is replicated +to other nodes. +* auth/cert: Address an issue in which OCSP query responses were not cached [[GH-25986](https://github.com/hashicorp/vault/pull/25986)] +* auth/cert: Allow cert auth login attempts if ocsp_fail_open is true and OCSP servers are unreachable [[GH-25982](https://github.com/hashicorp/vault/pull/25982)] +* cli: fixes plugin register CLI failure to error when plugin image doesn't exist [[GH-24990](https://github.com/hashicorp/vault/pull/24990)] +* core (enterprise): fix issue where the Seal HA rewrap system may remain running when an active node steps down. +* core/login: Fixed a potential deadlock when a login fails and user lockout is enabled. [[GH-25697](https://github.com/hashicorp/vault/pull/25697)] +* replication (enterprise): fixed data integrity issue with the processing of identity aliases causing duplicates to occur in rare cases +* ui: Fix kubernetes auth method roles tab [[GH-25999](https://github.com/hashicorp/vault/pull/25999)] +* ui: call resultant-acl without namespace header when user mounted at root namespace [[GH-25766](https://github.com/hashicorp/vault/pull/25766)] + +## 1.15.6 +### February 29, 2024 + +SECURITY: + +* auth/cert: compare public keys of trusted non-CA certificates with incoming +client certificates to prevent trusting certs with the same serial number +but not the same public/private key (CVE-2024-2048). [[GH-25649](https://github.com/hashicorp/vault/pull/25649), [HSEC-2024-05](https://discuss.hashicorp.com/t/hcsec-2024-05-vault-cert-auth-method-did-not-correctly-validate-non-ca-certificates/63382)] + +CHANGES: + +* core: Bump Go version to 1.21.7. +* secrets/openldap: Update plugin to v0.12.1 [[GH-25524](https://github.com/hashicorp/vault/pull/25524)] + +FEATURES: + +* **Manual License Utilization Reporting**: Added manual license +utilization reporting, which allows users to create manual exports of product-license [metering +data] to report to Hashicorp. IMPROVEMENTS: -* agent: Added `disable_keep_alives` configuration to disable keep alives in auto-auth, caching and templating. [[GH-16479](https://github.com/hashicorp/vault/pull/16479)] +* auth/cert: Cache trusted certs to reduce memory usage and improve performance of logins. [[GH-25421](https://github.com/hashicorp/vault/pull/25421)] +* ui: Add `deletion_allowed` param to transformations and include `tokenization` as a type option [[GH-25436](https://github.com/hashicorp/vault/pull/25436)] +* ui: redirect back to current route after reauthentication when token expires [[GH-25335](https://github.com/hashicorp/vault/pull/25335)] +* ui: remove unnecessary OpenAPI calls for unmanaged auth methods [[GH-25364](https://github.com/hashicorp/vault/pull/25364)] BUG FIXES: -* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] -* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] -* secrets/kv: Fix `kv get` issue preventing the ability to read a secret when providing a leading slash [[GH-16443](https://github.com/hashicorp/vault/pull/16443)] -* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] +* agent: Fix issue where Vault Agent was unable to render KVv2 secrets with delete_version_after set. [[GH-25387](https://github.com/hashicorp/vault/pull/25387)] +* audit: Handle a potential panic while formatting audit entries for an audit log [[GH-25605](https://github.com/hashicorp/vault/pull/25605)] +* core (enterprise): Fix a deadlock that can occur on performance secondary clusters when there are many mounts and a mount is deleted or filtered [[GH-25448](https://github.com/hashicorp/vault/pull/25448)] +* core (enterprise): Fix a panic that can occur if only one seal exists but is unhealthy on the non-first restart of Vault. +* core/quotas: Deleting a namespace that contains a rate limit quota no longer breaks replication [[GH-25439](https://github.com/hashicorp/vault/pull/25439)] +* openapi: Fixing response fields for rekey operations [[GH-25509](https://github.com/hashicorp/vault/pull/25509)] +* secrets/transit: When provided an invalid input with hash_algorithm=none, a lock was not released properly before reporting an error leading to deadlocks on a subsequent key configuration update. [[GH-25336](https://github.com/hashicorp/vault/pull/25336)] +* storage/file: Fixing spuriously deleting storage keys ending with .temp [[GH-25395](https://github.com/hashicorp/vault/pull/25395)] +* transform (enterprise): guard against a panic looking up a token in exportable mode with barrier storage. +* ui: Do not disable JSON display toggle for KV version 2 secrets [[GH-25235](https://github.com/hashicorp/vault/pull/25235)] +* ui: Do not show resultant-acl banner on namespaces a user has access to [[GH-25256](https://github.com/hashicorp/vault/pull/25256)] +* ui: Fix copy button not working on masked input when value is not a string [[GH-25269](https://github.com/hashicorp/vault/pull/25269)] +* ui: Update the KV secret data when you change the version you're viewing of a nested secret. [[GH-25152](https://github.com/hashicorp/vault/pull/25152)] -## 1.11.1 -### July 21, 2022 +## 1.15.5 +### January 31, 2024 + +SECURITY: + +* audit: Fix bug where use of 'log_raw' option could result in other devices logging raw audit data [[GH-24968](https://github.com/hashicorp/vault/pull/24968)] [[HCSEC-2024-01](https://discuss.hashicorp.com/t/hcsec-2024-01-vault-may-expose-sensitive-information-when-configuring-an-audit-log-device/62311)] CHANGES: -* core: Bump Go version to 1.17.12. +* core: Bump Go version to 1.21.5. +* database/snowflake: Update plugin to v0.9.1 [[GH-25020](https://github.com/hashicorp/vault/pull/25020)] +* secrets/ad: Update plugin to v0.16.2 [[GH-25058](https://github.com/hashicorp/vault/pull/25058)] +* secrets/openldap: Update plugin to v0.11.3 [[GH-25040](https://github.com/hashicorp/vault/pull/25040)] IMPROVEMENTS: -* agent: Added `disable_idle_connections` configuration to disable leaving idle connections open in auto-auth, caching and templating. [[GH-15986](https://github.com/hashicorp/vault/pull/15986)] -* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] -* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] +* command/server: display logs on startup immediately if disable-gated-logs flag is set [[GH-24280](https://github.com/hashicorp/vault/pull/24280)] +* core/activity: Include secret_syncs in activity log responses [[GH-24710](https://github.com/hashicorp/vault/pull/24710)] +* oidc/provider: Adds `code_challenge_methods_supported` to OpenID Connect Metadata [[GH-24979](https://github.com/hashicorp/vault/pull/24979)] +* storage/raft: Upgrade to bbolt 1.3.8, along with an extra patch to reduce time scanning large freelist maps. [[GH-24010](https://github.com/hashicorp/vault/pull/24010)] +* sys (enterprise): Adds the chroot_namespace field to this sys/internal/ui/resultant-acl endpoint, which exposes the value of the chroot namespace from the +listener config. +* ui: latest version of chrome does not automatically redirect back to the app after authentication unless triggered by the user, hence added a link to redirect back to the app. [[GH-18513](https://github.com/hashicorp/vault/pull/18513)] + +BUG FIXES: + +* audit/socket: Provide socket based audit backends with 'prefix' configuration option when supplied. [[GH-25004](https://github.com/hashicorp/vault/pull/25004)] +* auth/saml (enterprise): Fixes support for Microsoft Entra ID enterprise applications +* core (enterprise): fix a potential deadlock if an error is received twice from underlying storage for the same key +* core: upgrade github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 to +support azure workload identities. [[GH-24954](https://github.com/hashicorp/vault/pull/24954)] +* helper/pkcs7: Fix slice out-of-bounds panic [[GH-24891](https://github.com/hashicorp/vault/pull/24891)] +* kmip (enterprise): Only return a Server Correlation Value to clients using KMIP version 1.4. +* plugins: fix panic when registering containerized plugin with a custom runtime on a perf standby +* ui: Allows users to dismiss the resultant-acl banner. [[GH-25106](https://github.com/hashicorp/vault/pull/25106)] +* ui: Correctly handle redirects from pre 1.15.0 Kv v2 edit, create, and show urls. [[GH-24339](https://github.com/hashicorp/vault/pull/24339)] +* ui: Fixed minor bugs with database secrets engine [[GH-24947](https://github.com/hashicorp/vault/pull/24947)] +* ui: Fixes input for jwks_ca_pem when configuring a JWT auth method [[GH-24697](https://github.com/hashicorp/vault/pull/24697)] +* ui: Fixes policy input toolbar scrolling by default [[GH-23297](https://github.com/hashicorp/vault/pull/23297)] +* ui: The UI can now be used to create or update database roles by operator without permission on the database connection. [[GH-24660](https://github.com/hashicorp/vault/pull/24660)] +* ui: fix KV v2 details view defaulting to JSON view when secret value includes `{` [[GH-24513](https://github.com/hashicorp/vault/pull/24513)] +* ui: fix incorrectly calculated capabilities on PKI issuer endpoints [[GH-24686](https://github.com/hashicorp/vault/pull/24686)] +* ui: fix issue where kv v2 capabilities checks were not passing in the full secret path if secret was inside a directory. [[GH-24404](https://github.com/hashicorp/vault/pull/24404)] +* ui: fix navigation items shown to user when chroot_namespace configured [[GH-24492](https://github.com/hashicorp/vault/pull/24492)] + +## 1.15.4 +### December 06, 2023 + +SECURITY: + +* core: Fixes an issue present in both Vault and Vault Enterprise since Vault 1.12.0, where Vault is vulnerable to a denial of service through memory exhaustion of the host when handling large HTTP requests from a client. (see [CVE-2023-6337](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-6337) & [HCSEC-2023-34](https://discuss.hashicorp.com/t/hcsec-2023-34-vault-vulnerable-to-denial-of-service-through-memory-exhaustion-when-handling-large-http-requests/60741)) + +CHANGES: + +* identity (enterprise): POST requests to the `/identity/entity/merge` endpoint +are now always forwarded from standbys to the active node. [[GH-24325](https://github.com/hashicorp/vault/pull/24325)] BUG FIXES: -* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] -* agent: Update consul-template for pkiCert bug fixes [[GH-16087](https://github.com/hashicorp/vault/pull/16087)] -* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] -* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty -* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] -* kmip (enterprise): Return SecretData as supported Object Type. -* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] -* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] -* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] -* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. -* ui: OIDC login type uses localStorage instead of sessionStorage [[GH-16170](https://github.com/hashicorp/vault/pull/16170)] +* agent/logging: Agent should now honor correct -log-format and -log-file settings in logs generated by the consul-template library. [[GH-24252](https://github.com/hashicorp/vault/pull/24252)] +* api: Fix deadlock on calls to sys/leader with a namespace configured +on the request. [[GH-24256](https://github.com/hashicorp/vault/pull/24256)] +* core: Fix a timeout initializing Vault by only using a short timeout persisting barrier keyring encryption counts. [[GH-24336](https://github.com/hashicorp/vault/pull/24336)] +* ui: Correctly handle directory redirects from pre 1.15.0 Kv v2 list view urls. [[GH-24281](https://github.com/hashicorp/vault/pull/24281)] +* ui: Fix payload sent when disabling replication [[GH-24292](https://github.com/hashicorp/vault/pull/24292)] +* ui: When Kv v2 secret is an object, fix so details view defaults to readOnly JSON editor. [[GH-24290](https://github.com/hashicorp/vault/pull/24290)] + +## 1.15.3 +### November 30, 2023 + +CHANGES: + +* core: Bump Go version to 1.21.4. + +IMPROVEMENTS: + +* core (enterprise): Speed up unseal when using namespaces +* core: update sys/seal-status (and CLI vault status) to report the type of +the seal when unsealed, as well as the type of the recovery seal if an +auto-seal. [[GH-23022](https://github.com/hashicorp/vault/pull/23022)] +* secrets/pki: do not check TLS validity on ACME requests redirected to https [[GH-22521](https://github.com/hashicorp/vault/pull/22521)] +* ui: Sort list view of entities and aliases alphabetically using the item name [[GH-24103](https://github.com/hashicorp/vault/pull/24103)] +* ui: capabilities-self is always called in the user's root namespace [[GH-24168](https://github.com/hashicorp/vault/pull/24168)] + +BUG FIXES: + +* activity log (enterprise): De-duplicate client count estimates for license utilization reporting. +* auth/cert: Handle errors related to expired OCSP server responses [[GH-24193](https://github.com/hashicorp/vault/pull/24193)] +* core (Enterprise): Treat multiple disabled HA seals as a migration to Shamir. +* core/audit: Audit logging a Vault response will now use a 5 second context timeout, separate from the original request. [[GH-24238](https://github.com/hashicorp/vault/pull/24238)] +* core/config: Use correct HCL config value when configuring `log_requests_level`. [[GH-24059](https://github.com/hashicorp/vault/pull/24059)] +* core/quotas: Close rate-limit blocked client purge goroutines when sealing [[GH-24108](https://github.com/hashicorp/vault/pull/24108)] +* core: Fix an error that resulted in the wrong seal type being returned by sys/seal-status while +Vault is in seal migration mode. [[GH-24165](https://github.com/hashicorp/vault/pull/24165)] +* replication (enterprise): disallow configuring paths filter for a mount path that does not exist +* secrets-sync (enterprise): Fix panic when setting usage_gauge_period to none +* secrets/pki: Do not set nextUpdate field in OCSP responses when ocsp_expiry is 0 [[GH-24192](https://github.com/hashicorp/vault/pull/24192)] +* secrets/transit: Fix a panic when attempting to export a public RSA key [[GH-24054](https://github.com/hashicorp/vault/pull/24054)] +* ui: Fix JSON editor in KV V2 unable to handle pasted values [[GH-24224](https://github.com/hashicorp/vault/pull/24224)] +* ui: Fix error when tuning token auth configuration within namespace [[GH-24147](https://github.com/hashicorp/vault/pull/24147)] +* ui: show error from API when seal fails [[GH-23921](https://github.com/hashicorp/vault/pull/23921)] + +## 1.15.2 +### November 09, 2023 SECURITY: +* core: inbound client requests triggering a policy check can lead to an unbounded consumption of memory. A large number of these requests may lead to denial-of-service. This vulnerability, CVE-2023-5954, was introduced in Vault 1.15.0, 1.14.3, and 1.13.7, and is fixed in Vault 1.15.2, 1.14.6, and 1.13.10. [[HSEC-2023-33](https://discuss.hashicorp.com/t/hcsec-2023-33-vault-requests-triggering-policy-checks-may-lead-to-unbounded-memory-consumption/59926)] -* storage/raft (enterprise): Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HCSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] +CHANGES: -## 1.11.0 -### June 20, 2022 +* auth/approle: Normalized error response messages when invalid credentials are provided [[GH-23786](https://github.com/hashicorp/vault/pull/23786)] +* secrets/mongodbatlas: Update plugin to v0.10.2 [[GH-23849](https://github.com/hashicorp/vault/pull/23849)] + +FEATURES: + +* cli/snapshot: Add CLI tool to inspect Vault snapshots [[GH-23457](https://github.com/hashicorp/vault/pull/23457)] + +IMPROVEMENTS: + +* api (enterprise): Enable the sys/license/features from any namespace +* storage/etcd: etcd should only return keys when calling List() [[GH-23872](https://github.com/hashicorp/vault/pull/23872)] +* ui: Update flat, shell-quote and swagger-ui-dist packages. Remove swagger-ui styling overrides. [[GH-23700](https://github.com/hashicorp/vault/pull/23700)] +* ui: Update sidebar Secrets engine to title case. [[GH-23964](https://github.com/hashicorp/vault/pull/23964)] + +BUG FIXES: + +* api/seal-status: Fix deadlock on calls to sys/seal-status with a namespace configured +on the request. [[GH-23861](https://github.com/hashicorp/vault/pull/23861)] +* core (enterprise): Do not return an internal error when token policy type lookup fails, log it instead and continue. +* core/activity: Fixes segments fragment loss due to exceeding entry record size limit [[GH-23781](https://github.com/hashicorp/vault/pull/23781)] +* core/mounts: Fix reading an "auth" mount using "sys/internal/ui/mounts/" when filter paths are enforced returns 500 error code from the secondary [[GH-23802](https://github.com/hashicorp/vault/pull/23802)] +* core: Revert PR causing memory consumption bug [[GH-23986](https://github.com/hashicorp/vault/pull/23986)] +* core: Skip unnecessary deriving of policies during Login MFA Check. [[GH-23894](https://github.com/hashicorp/vault/pull/23894)] +* core: fix bug where deadlock detection was always on for expiration and quotas. +These can now be configured individually with `detect_deadlocks`. [[GH-23902](https://github.com/hashicorp/vault/pull/23902)] +* core: fix policies with wildcards not matching list operations due to the policy path not having a trailing slash [[GH-23874](https://github.com/hashicorp/vault/pull/23874)] +* expiration: Fix fatal error "concurrent map iteration and map write" when collecting metrics from leases. [[GH-24027](https://github.com/hashicorp/vault/pull/24027)] +* ui: fix broken GUI when accessing from listener with chroot_namespace defined [[GH-23942](https://github.com/hashicorp/vault/pull/23942)] + +## 1.15.1 +### October 25, 2023 CHANGES: -* auth/aws: Add RoleSession to DisplayName when using assumeRole for authentication [[GH-14954](https://github.com/hashicorp/vault/pull/14954)] -* auth/kubernetes: If `kubernetes_ca_cert` is unset, and there is no pod-local CA available, an error will be surfaced when writing config instead of waiting for login. [[GH-15584](https://github.com/hashicorp/vault/pull/15584)] -* auth: Remove support for legacy MFA -(https://www.vaultproject.io/docs/v1.10.x/auth/mfa) [[GH-14869](https://github.com/hashicorp/vault/pull/14869)] -* core/fips: Disable and warn about entropy augmentation in FIPS 140-2 Inside mode [[GH-15858](https://github.com/hashicorp/vault/pull/15858)] -* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] -* core: Bump Go version to 1.17.11. [[GH-go-ver-1110](https://github.com/hashicorp/vault/pull/go-ver-1110)] -* database & storage: Change underlying driver library from [lib/pq](https://github.com/lib/pq) to [pgx](https://github.com/jackc/pgx). This change affects Redshift & Postgres database secrets engines, and CockroachDB & Postgres storage engines [[GH-15343](https://github.com/hashicorp/vault/pull/15343)] -* licensing (enterprise): Remove support for stored licenses and associated `sys/license` and `sys/license/signed` -endpoints in favor of [autoloaded licenses](https://www.vaultproject.io/docs/enterprise/license/autoloading). -* replication (enterprise): The `/sys/replication/performance/primary/mount-filter` endpoint has been removed. Please use [Paths Filter](https://www.vaultproject.io/api-docs/system/replication/replication-performance#create-paths-filter) instead. -* secret/pki: Remove unused signature_bits parameter from intermediate CSR generation; this parameter doesn't control the final certificate's signature algorithm selection as that is up to the signing CA [[GH-15478](https://github.com/hashicorp/vault/pull/15478)] -* secrets/kubernetes: Split `additional_metadata` into `extra_annotations` and `extra_labels` parameters [[GH-15655](https://github.com/hashicorp/vault/pull/15655)] -* secrets/pki: A new aliased api path (/pki/issuer/:issuer_ref/sign-self-issued) -providing the same functionality as the existing API(/pki/root/sign-self-issued) -does not require sudo capabilities but the latter still requires it in an -effort to maintain backwards compatibility. [[GH-15211](https://github.com/hashicorp/vault/pull/15211)] -* secrets/pki: Err on unknown role during sign-verbatim. [[GH-15543](https://github.com/hashicorp/vault/pull/15543)] -* secrets/pki: Existing CRL API (/pki/crl) now returns an X.509 v2 CRL instead -of a v1 CRL. [[GH-15100](https://github.com/hashicorp/vault/pull/15100)] -* secrets/pki: The `ca_chain` response field within issuing (/pki/issue/:role) -and signing APIs will now include the root CA certificate if the mount is -aware of it. [[GH-15155](https://github.com/hashicorp/vault/pull/15155)] -* secrets/pki: existing Delete Root API (pki/root) will now delete all issuers -and keys within the mount path. [[GH-15004](https://github.com/hashicorp/vault/pull/15004)] -* secrets/pki: existing Generate Root (pki/root/generate/:type), -Set Signed Intermediate (/pki/intermediate/set-signed) APIs will -add new issuers/keys to a mount instead of warning that an existing CA exists [[GH-14975](https://github.com/hashicorp/vault/pull/14975)] -* secrets/pki: the signed CA certificate from the sign-intermediate api will now appear within the ca_chain -response field along with the issuer's ca chain. [[GH-15524](https://github.com/hashicorp/vault/pull/15524)] -* ui: Upgrade Ember to version 3.28 [[GH-14763](https://github.com/hashicorp/vault/pull/14763)] +* core: Bump Go version to 1.21.3. + +IMPROVEMENTS: + +* api/plugins: add `tls-server-name` arg for plugin registration [[GH-23549](https://github.com/hashicorp/vault/pull/23549)] +* auto-auth/azure: Support setting the `authenticate_from_environment` variable to "true" and "false" string literals, too. [[GH-22996](https://github.com/hashicorp/vault/pull/22996)] +* secrets-sync (enterprise): Added telemetry on number of destinations and associations per type. +* ui: Adds a warning when whitespace is detected in a key of a KV secret [[GH-23702](https://github.com/hashicorp/vault/pull/23702)] +* ui: Adds toggle to KV secrets engine value download modal to optionally stringify value in downloaded file [[GH-23747](https://github.com/hashicorp/vault/pull/23747)] +* ui: Surface warning banner if UI has stopped auto-refreshing token [[GH-23143](https://github.com/hashicorp/vault/pull/23143)] +* ui: show banner when resultant-acl check fails due to permissions or wrong namespace. [[GH-23503](https://github.com/hashicorp/vault/pull/23503)] + +BUG FIXES: + +* Seal HA (enterprise/beta): Fix rejection of a seal configuration change +from two to one auto seal due to persistence of the previous seal type being +"multiseal". [[GH-23573](https://github.com/hashicorp/vault/pull/23573)] +* audit: Fix bug reopening 'file' audit devices on SIGHUP. [[GH-23598](https://github.com/hashicorp/vault/pull/23598)] +* auth/aws: Fixes a panic that can occur in IAM-based login when a [client config](https://developer.hashicorp.com/vault/api-docs/auth/aws#configure-client) does not exist. [[GH-23555](https://github.com/hashicorp/vault/pull/23555)] +* command/server: Fix bug with sigusr2 where pprof files were not closed correctly [[GH-23636](https://github.com/hashicorp/vault/pull/23636)] +* events: Ignore sending context to give more time for events to send [[GH-23500](https://github.com/hashicorp/vault/pull/23500)] +* expiration: Prevent large lease loads from delaying state changes, e.g. becoming active or standby. [[GH-23282](https://github.com/hashicorp/vault/pull/23282)] +* kmip (enterprise): Improve handling of failures due to storage replication issues. +* kmip (enterprise): Return a structure in the response for query function Query Server Information. +* mongo-db: allow non-admin database for root credential rotation [[GH-23240](https://github.com/hashicorp/vault/pull/23240)] +* replication (enterprise): Fix a bug where undo logs would only get enabled on the initial node in a cluster. +* replication (enterprise): Fix a missing unlock when changing replication state +* secrets-sync (enterprise): Fixed issue where we could sync a deleted secret +* secrets/aws: update credential rotation deadline when static role rotation period is updated [[GH-23528](https://github.com/hashicorp/vault/pull/23528)] +* secrets/consul: Fix revocations when Vault has an access token using specific namespace and admin partition policies [[GH-23010](https://github.com/hashicorp/vault/pull/23010)] +* secrets/pki: Stop processing in-flight ACME verifications when an active node steps down [[GH-23278](https://github.com/hashicorp/vault/pull/23278)] +* secrets/transit (enterprise): Address an issue using sign/verify operations with managed keys returning an error about it not containing a private key +* secrets/transit (enterprise): Address panic when using GCP,AWS,Azure managed keys for encryption operations. At this time all encryption operations for the cloud providers have been disabled, only signing operations are supported. +* secrets/transit (enterprise): Apply hashing arguments and defaults to managed key sign/verify operations +* secrets/transit: Do not allow auto rotation on managed_key key types [[GH-23723](https://github.com/hashicorp/vault/pull/23723)] +* storage/consul: fix a bug where an active node in a specific sort of network +partition could continue to write data to Consul after a new leader is elected +potentially causing data loss or corruption for keys with many concurrent +writers. For Enterprise clusters this could cause corruption of the merkle trees +leading to failure to complete merkle sync without a full re-index. [[GH-23013](https://github.com/hashicorp/vault/pull/23013)] +* ui: Assumes version 1 for kv engines when options are null because no version is specified [[GH-23585](https://github.com/hashicorp/vault/pull/23585)] +* ui: Decode the connection url for display on the connection details page [[GH-23695](https://github.com/hashicorp/vault/pull/23695)] +* ui: Fix AWS secret engine to allow empty policy_document field. [[GH-23470](https://github.com/hashicorp/vault/pull/23470)] +* ui: Fix bug where auth items were not listed when within a namespace. [[GH-23446](https://github.com/hashicorp/vault/pull/23446)] +* ui: Fix regression that broke the oktaNumberChallenge on the ui. [[GH-23565](https://github.com/hashicorp/vault/pull/23565)] +* ui: Fix the copy token button in the sidebar navigation window when in a collapsed state. [[GH-23331](https://github.com/hashicorp/vault/pull/23331)] +* ui: Fixes issue where you could not share the list view URL from the KV v2 secrets engine. [[GH-23620](https://github.com/hashicorp/vault/pull/23620)] +* ui: Fixes issue with sidebar navigation links disappearing when navigating to policies when a user is not authorized [[GH-23516](https://github.com/hashicorp/vault/pull/23516)] +* ui: Fixes issues displaying accurate TLS state in dashboard configuration details [[GH-23726](https://github.com/hashicorp/vault/pull/23726)] + +## 1.15.0 +### September 27, 2023 + +SECURITY: + +* secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. This vulnerability, CVE-2023-4680, is fixed in Vault 1.14.3, 1.13.7, and 1.12.11. [[GH-22852](https://github.com/hashicorp/vault/pull/22852), [HSEC-2023-28](https://discuss.hashicorp.com/t/hcsec-2023-28-vault-s-transit-secrets-engine-allowed-nonce-specified-without-convergent-encryption/58249)] +* sentinel (enterprise): Sentinel RGP policies allowed for cross-namespace denial-of-service. This vulnerability, CVE-2023-3775, is fixed in Vault Enterprise 1.15.0, 1.14.4, and 1.13.8.[[HSEC-2023-29](https://discuss.hashicorp.com/t/hcsec-2023-29-vault-enterprise-s-sentinel-rgp-policies-allowed-for-cross-namespace-denial-of-service/58653)] + +CHANGES: + +* auth/alicloud: Update plugin to v0.16.0 [[GH-22646](https://github.com/hashicorp/vault/pull/22646)] +* auth/azure: Update plugin to v0.16.0 [[GH-22277](https://github.com/hashicorp/vault/pull/22277)] +* auth/azure: Update plugin to v0.16.1 [[GH-22795](https://github.com/hashicorp/vault/pull/22795)] +* auth/azure: Update plugin to v0.16.2 [[GH-23060](https://github.com/hashicorp/vault/pull/23060)] +* auth/cf: Update plugin to v0.15.1 [[GH-22758](https://github.com/hashicorp/vault/pull/22758)] +* auth/gcp: Update plugin to v0.16.1 [[GH-22612](https://github.com/hashicorp/vault/pull/22612)] +* auth/jwt: Update plugin to v0.17.0 [[GH-22678](https://github.com/hashicorp/vault/pull/22678)] +* auth/kerberos: Update plugin to v0.10.1 [[GH-22797](https://github.com/hashicorp/vault/pull/22797)] +* auth/kubernetes: Update plugin to v0.17.0 [[GH-22709](https://github.com/hashicorp/vault/pull/22709)] +* auth/kubernetes: Update plugin to v0.17.1 [[GH-22879](https://github.com/hashicorp/vault/pull/22879)] +* auth/ldap: Normalize HTTP response codes when invalid credentials are provided [[GH-21282](https://github.com/hashicorp/vault/pull/21282)] +* auth/oci: Update plugin to v0.14.2 [[GH-22805](https://github.com/hashicorp/vault/pull/22805)] +* core (enterprise): Ensure Role Governing Policies are only applied down the namespace hierarchy +* core/namespace (enterprise): Introduce the concept of high-privilege namespace (administrative namespace), +which will have access to some system backend paths that were previously only accessible in the root namespace. [[GH-21215](https://github.com/hashicorp/vault/pull/21215)] +* core: Bump Go version to 1.21.1. +* database/couchbase: Update plugin to v0.9.3 [[GH-22854](https://github.com/hashicorp/vault/pull/22854)] +* database/couchbase: Update plugin to v0.9.4 [[GH-22871](https://github.com/hashicorp/vault/pull/22871)] +* database/elasticsearch: Update plugin to v0.13.3 [[GH-22696](https://github.com/hashicorp/vault/pull/22696)] +* database/mongodbatlas: Update plugin to v0.10.1 [[GH-22655](https://github.com/hashicorp/vault/pull/22655)] +* database/redis-elasticache: Update plugin to v0.2.2 [[GH-22584](https://github.com/hashicorp/vault/pull/22584)] +* database/redis-elasticache: Update plugin to v0.2.3 [[GH-22598](https://github.com/hashicorp/vault/pull/22598)] +* database/redis: Update plugin to v0.2.2 [[GH-22654](https://github.com/hashicorp/vault/pull/22654)] +* database/snowflake: Update plugin to v0.9.0 [[GH-22516](https://github.com/hashicorp/vault/pull/22516)] +* events: Log level for processing an event dropped from info to debug. [[GH-22997](https://github.com/hashicorp/vault/pull/22997)] +* events: `data_path` will include full data path of secret, including name. [[GH-22487](https://github.com/hashicorp/vault/pull/22487)] +* replication (enterprise): Switch to non-deprecated gRPC field for resolver target host +* sdk/logical/events: `EventSender` interface method is now `SendEvent` instead of `Send`. [[GH-22487](https://github.com/hashicorp/vault/pull/22487)] +* secrets/ad: Update plugin to v0.16.1 [[GH-22856](https://github.com/hashicorp/vault/pull/22856)] +* secrets/alicloud: Update plugin to v0.15.1 [[GH-22533](https://github.com/hashicorp/vault/pull/22533)] +* secrets/azure: Update plugin to v0.16.2 [[GH-22799](https://github.com/hashicorp/vault/pull/22799)] +* secrets/azure: Update plugin to v0.16.3 [[GH-22824](https://github.com/hashicorp/vault/pull/22824)] +* secrets/gcp: Update plugin to v0.17.0 [[GH-22746](https://github.com/hashicorp/vault/pull/22746)] +* secrets/gcpkms: Update plugin to v0.15.1 [[GH-22757](https://github.com/hashicorp/vault/pull/22757)] +* secrets/keymgmt: Update plugin to v0.9.3 +* secrets/kubernetes: Update plugin to v0.6.0 [[GH-22823](https://github.com/hashicorp/vault/pull/22823)] +* secrets/kv: Update plugin to v0.16.1 [[GH-22716](https://github.com/hashicorp/vault/pull/22716)] +* secrets/mongodbatlas: Update plugin to v0.10.1 [[GH-22748](https://github.com/hashicorp/vault/pull/22748)] +* secrets/openldap: Update plugin to v0.11.2 [[GH-22734](https://github.com/hashicorp/vault/pull/22734)] +* secrets/terraform: Update plugin to v0.7.3 [[GH-22907](https://github.com/hashicorp/vault/pull/22907)] +* secrets/transform (enterprise): Enforce a transformation role's max_ttl setting on encode requests, a warning will be returned if max_ttl was applied. +* storage/aerospike: Aerospike storage shouldn't be used on 32-bit architectures and is now unsupported on them. [[GH-20825](https://github.com/hashicorp/vault/pull/20825)] +* telemetry: Replace `vault.rollback.attempt.{MOUNT_POINT}` and `vault.route.rollback.{MOUNT_POINT}` metrics with `vault.rollback.attempt` and `vault.route.rollback metrics` by default. Added a telemetry configuration `add_mount_point_rollback_metrics` which, when set to true, causes vault to emit the metrics with mount points in their names. [[GH-22400](https://github.com/hashicorp/vault/pull/22400)] FEATURES: -* **Autopilot Improvements (Enterprise)**: Autopilot on Vault Enterprise now supports automated upgrades and redundancy zones when using integrated storage. -* **KeyMgmt UI**: Add UI support for managing the Key Management Secrets Engine [[GH-15523](https://github.com/hashicorp/vault/pull/15523)] -* **Kubernetes Secrets Engine**: This new secrets engine generates Kubernetes service account tokens, service accounts, role bindings, and roles dynamically. [[GH-15551](https://github.com/hashicorp/vault/pull/15551)] -* **Non-Disruptive Intermediate/Root Certificate Rotation**: This allows -import, generation and configuration of any number of keys and/or issuers -within a PKI mount, providing operators the ability to rotate certificates -in place without affecting existing client configurations. [[GH-15277](https://github.com/hashicorp/vault/pull/15277)] -* **Print minimum required policy for any command**: The global CLI flag `-output-policy` can now be used with any command to print out the minimum required policy HCL for that operation, including whether the given path requires the "sudo" capability. [[GH-14899](https://github.com/hashicorp/vault/pull/14899)] -* **Snowflake Database Plugin**: Adds ability to manage RSA key pair credentials for dynamic and static Snowflake users. [[GH-15376](https://github.com/hashicorp/vault/pull/15376)] -* **Transit BYOK**: Allow import of externally-generated keys into the Transit secrets engine. [[GH-15414](https://github.com/hashicorp/vault/pull/15414)] -* nomad: Bootstrap Nomad ACL system if no token is provided [[GH-12451](https://github.com/hashicorp/vault/pull/12451)] -* storage/dynamodb: Added `AWS_DYNAMODB_REGION` environment variable. [[GH-15054](https://github.com/hashicorp/vault/pull/15054)] +* **Certificate Issuance External Policy Service (CIEPS) (enterprise)**: Allow highly-customizable operator control of certificate validation and generation through the PKI Secrets Engine. +* **Copyable KV v2 paths in UI**: KV v2 secret paths are copyable for use in CLI commands or API calls [[GH-22551](https://github.com/hashicorp/vault/pull/22551)] +* **Dashboard UI**: Dashboard is now available in the UI as the new landing page. [[GH-21057](https://github.com/hashicorp/vault/pull/21057)] +* **Database Static Role Advanced TTL Management**: Adds the ability to rotate +* **Event System**: Add subscribe capability and subscribe_event_types to policies for events. [[GH-22474](https://github.com/hashicorp/vault/pull/22474)] +static roles on a defined schedule. [[GH-22484](https://github.com/hashicorp/vault/pull/22484)] +* **GCP IAM Support**: Adds support for IAM-based authentication to MySQL and PostgreSQL backends using Google Cloud SQL. [[GH-22445](https://github.com/hashicorp/vault/pull/22445)] +* **Improved KV V2 UI**: Updated and restructured secret engine for KV (version 2 only) [[GH-22559](https://github.com/hashicorp/vault/pull/22559)] +* **Merkle Tree Corruption Detection (enterprise)**: Add a new endpoint to check merkle tree corruption. +* **Plugin Containers**: Vault supports registering, managing, and running plugins inside a container on Linux. [[GH-22712](https://github.com/hashicorp/vault/pull/22712)] +* **SAML Auth Method (enterprise)**: Enable users to authenticate with Vault using their identity in a SAML Identity Provider. +* **Seal High Availability Beta (enterprise)**: operators can try out configuring more than one automatic seal for resilience against seal provider outages. Not for production use at this time. +* **Secrets Sync (enterprise)**: Add the ability to synchronize KVv2 secret with external secrets manager solutions. +* **UI LDAP secrets engine**: Add LDAP secrets engine to the UI. [[GH-20790](https://github.com/hashicorp/vault/pull/20790)] IMPROVEMENTS: -* activity: return nil response months in activity log API when no month data exists [[GH-15420](https://github.com/hashicorp/vault/pull/15420)] -* agent/auto-auth: Add `min_backoff` to the method stanza for configuring initial backoff duration. [[GH-15204](https://github.com/hashicorp/vault/pull/15204)] -* agent: Update consul-template to v0.29.0 [[GH-15293](https://github.com/hashicorp/vault/pull/15293)] -* agent: Upgrade hashicorp/consul-template version for sprig template functions and improved writeTo function [[GH-15092](https://github.com/hashicorp/vault/pull/15092)] -* api/monitor: Add log_format option to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] -* api: Add ability to pass certificate as PEM bytes to api.Client. [[GH-14753](https://github.com/hashicorp/vault/pull/14753)] -* api: Add context-aware functions to vault/api for each API wrapper function. [[GH-14388](https://github.com/hashicorp/vault/pull/14388)] -* api: Added MFALogin() for handling MFA flow when using login helpers. [[GH-14900](https://github.com/hashicorp/vault/pull/14900)] -* api: If the parameters supplied over the API payload are ignored due to not -being what the endpoints were expecting, or if the parameters supplied get -replaced by the values in the endpoint's path itself, warnings will be added to -the non-empty responses listing all the ignored and replaced parameters. [[GH-14962](https://github.com/hashicorp/vault/pull/14962)] -* api: KV helper methods to simplify the common use case of reading and writing KV secrets [[GH-15305](https://github.com/hashicorp/vault/pull/15305)] -* api: Provide a helper method WithNamespace to create a cloned client with a new NS [[GH-14963](https://github.com/hashicorp/vault/pull/14963)] -* api: Support VAULT_PROXY_ADDR environment variable to allow overriding the Vault client's HTTP proxy. [[GH-15377](https://github.com/hashicorp/vault/pull/15377)] -* api: Use the context passed to the api/auth Login helpers. [[GH-14775](https://github.com/hashicorp/vault/pull/14775)] -* api: make ListPlugins parse only known plugin types [[GH-15434](https://github.com/hashicorp/vault/pull/15434)] -* audit: Add a policy_results block into the audit log that contains the set of -policies that granted this request access. [[GH-15457](https://github.com/hashicorp/vault/pull/15457)] -* audit: Include mount_accessor in audit request and response logs [[GH-15342](https://github.com/hashicorp/vault/pull/15342)] -* audit: added entity_created boolean to audit log, set when login operations create an entity [[GH-15487](https://github.com/hashicorp/vault/pull/15487)] -* auth/aws: Add rsa2048 signature type to API [[GH-15719](https://github.com/hashicorp/vault/pull/15719)] -* auth/gcp: Enable the Google service endpoints used by the underlying client to be customized [[GH-15592](https://github.com/hashicorp/vault/pull/15592)] -* auth/gcp: Vault CLI now infers the service account email when running on Google Cloud [[GH-15592](https://github.com/hashicorp/vault/pull/15592)] -* auth/jwt: Adds ability to use JSON pointer syntax for the `user_claim` value. [[GH-15593](https://github.com/hashicorp/vault/pull/15593)] -* auth/okta: Add support for Google provider TOTP type in the Okta auth method [[GH-14985](https://github.com/hashicorp/vault/pull/14985)] -* auth/okta: Add support for performing [the number -challenge](https://help.okta.com/en-us/Content/Topics/Mobile/ov-admin-config.htm?cshid=csh-okta-verify-number-challenge-v1#enable-number-challenge) -during an Okta Verify push challenge [[GH-15361](https://github.com/hashicorp/vault/pull/15361)] -* auth: Globally scoped Login MFA method Get/List endpoints [[GH-15248](https://github.com/hashicorp/vault/pull/15248)] -* auth: enforce a rate limit for TOTP passcode validation attempts [[GH-14864](https://github.com/hashicorp/vault/pull/14864)] -* auth: forward cached MFA auth response to the leader using RPC instead of forwarding all login requests [[GH-15469](https://github.com/hashicorp/vault/pull/15469)] -* cli/debug: added support for retrieving metrics from DR clusters if `unauthenticated_metrics_access` is enabled [[GH-15316](https://github.com/hashicorp/vault/pull/15316)] -* cli/vault: warn when policy name contains upper-case letter [[GH-14670](https://github.com/hashicorp/vault/pull/14670)] -* cli: Alternative flag-based syntax for KV to mitigate confusion from automatically appended /data [[GH-14807](https://github.com/hashicorp/vault/pull/14807)] -* cockroachdb: add high-availability support [[GH-12965](https://github.com/hashicorp/vault/pull/12965)] -* command/debug: Add log_format flag to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] -* command: Support optional '-log-level' flag to be passed to 'operator migrate' command (defaults to info). Also support VAULT_LOG_LEVEL env var. [[GH-15405](https://github.com/hashicorp/vault/pull/15405)] -* command: Support the optional '-detailed' flag to be passed to 'vault list' command to show ListResponseWithInfo data. Also supports the VAULT_DETAILED env var. [[GH-15417](https://github.com/hashicorp/vault/pull/15417)] -* core (enterprise): Include `termination_time` in `sys/license/status` response -* core (enterprise): Include termination time in `license inspect` command output -* core,transit: Allow callers to choose random byte source including entropy augmentation sources for the sys/tools/random and transit/random endpoints. [[GH-15213](https://github.com/hashicorp/vault/pull/15213)] -* core/activity: Order month data in ascending order of timestamps [[GH-15259](https://github.com/hashicorp/vault/pull/15259)] -* core/activity: allow client counts to be precomputed and queried on non-contiguous chunks of data [[GH-15352](https://github.com/hashicorp/vault/pull/15352)] -* core/managed-keys (enterprise): Allow configuring the number of parallel operations to PKCS#11 managed keys. -* core: Add an export API for historical activity log data [[GH-15586](https://github.com/hashicorp/vault/pull/15586)] -* core: Add new DB methods that do not prepare statements. [[GH-15166](https://github.com/hashicorp/vault/pull/15166)] -* core: check uid and permissions of config dir, config file, plugin dir and plugin binaries [[GH-14817](https://github.com/hashicorp/vault/pull/14817)] -* core: Fix some identity data races found by Go race detector (no known impact yet). [[GH-15123](https://github.com/hashicorp/vault/pull/15123)] -* core: Include build date in `sys/seal-status` and `sys/version-history` endpoints. [[GH-14957](https://github.com/hashicorp/vault/pull/14957)] -* core: Upgrade github.org/x/crypto/ssh [[GH-15125](https://github.com/hashicorp/vault/pull/15125)] -* kmip (enterprise): Implement operations Query, Import, Encrypt and Decrypt. Improve operations Locate, Add Attribute, Get Attributes and Get Attribute List to handle most supported attributes. -* mfa/okta: migrate to use official Okta SDK [[GH-15355](https://github.com/hashicorp/vault/pull/15355)] -* sdk: Change OpenAPI code generator to extract request objects into /components/schemas and reference them by name. [[GH-14217](https://github.com/hashicorp/vault/pull/14217)] -* secrets/consul: Add support for Consul node-identities and service-identities [[GH-15295](https://github.com/hashicorp/vault/pull/15295)] -* secrets/consul: Vault is now able to automatically bootstrap the Consul ACL system. [[GH-10751](https://github.com/hashicorp/vault/pull/10751)] -* secrets/database/elasticsearch: Use the new /_security base API path instead of /_xpack/security when managing elasticsearch. [[GH-15614](https://github.com/hashicorp/vault/pull/15614)] -* secrets/pki: Add not_before_duration to root CA generation, intermediate CA signing paths. [[GH-14178](https://github.com/hashicorp/vault/pull/14178)] -* secrets/pki: Add support for CPS URLs and User Notice to Policy Information [[GH-15751](https://github.com/hashicorp/vault/pull/15751)] -* secrets/pki: Allow operators to control the issuing certificate behavior when -the requested TTL is beyond the NotAfter value of the signing certificate [[GH-15152](https://github.com/hashicorp/vault/pull/15152)] -* secrets/pki: Always return CRLs, URLs configurations, even if using the default value. [[GH-15470](https://github.com/hashicorp/vault/pull/15470)] -* secrets/pki: Enable Patch Functionality for Roles and Issuers (API only) [[GH-15510](https://github.com/hashicorp/vault/pull/15510)] -* secrets/pki: Have pki/sign-verbatim use the not_before_duration field defined in the role [[GH-15429](https://github.com/hashicorp/vault/pull/15429)] -* secrets/pki: Warn on empty Subject field during issuer generation (root/generate and root/sign-intermediate). [[GH-15494](https://github.com/hashicorp/vault/pull/15494)] -* secrets/pki: Warn on missing AIA access information when generating issuers (config/urls). [[GH-15509](https://github.com/hashicorp/vault/pull/15509)] -* secrets/pki: Warn when `generate_lease` and `no_store` are both set to `true` on requests. [[GH-14292](https://github.com/hashicorp/vault/pull/14292)] -* secrets/ssh: Add connection timeout of 1 minute for outbound SSH connection in deprecated Dynamic SSH Keys mode. [[GH-15440](https://github.com/hashicorp/vault/pull/15440)] -* secrets/ssh: Support for `add_before_duration` in SSH [[GH-15250](https://github.com/hashicorp/vault/pull/15250)] -* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer -* storage/raft: Use larger timeouts at startup to reduce likelihood of inducing elections. [[GH-15042](https://github.com/hashicorp/vault/pull/15042)] -* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] -* ui: Default auto-rotation period in transit is 30 days [[GH-15474](https://github.com/hashicorp/vault/pull/15474)] -* ui: Parse schema refs from OpenAPI [[GH-14508](https://github.com/hashicorp/vault/pull/14508)] -* ui: Remove stored license references [[GH-15513](https://github.com/hashicorp/vault/pull/15513)] -* ui: Remove storybook. [[GH-15074](https://github.com/hashicorp/vault/pull/15074)] -* ui: Replaces the IvyCodemirror wrapper with a custom ember modifier. [[GH-14659](https://github.com/hashicorp/vault/pull/14659)] -* website/docs: Add usage documentation for Kubernetes Secrets Engine [[GH-15527](https://github.com/hashicorp/vault/pull/15527)] -* website/docs: added a link to an Enigma secret plugin. [[GH-14389](https://github.com/hashicorp/vault/pull/14389)] +* Bump github.com/hashicorp/go-plugin version v1.4.9 -> v1.4.10 [[GH-20966](https://github.com/hashicorp/vault/pull/20966)] +* api: add support for cloning a Client's tls.Config. [[GH-21424](https://github.com/hashicorp/vault/pull/21424)] +* api: adding a new api sys method for replication status [[GH-20995](https://github.com/hashicorp/vault/pull/20995)] +* audit: add core audit events experiment [[GH-21628](https://github.com/hashicorp/vault/pull/21628)] +* auth/aws: Added support for signed GET requests for authenticating to vault using the aws iam method. [[GH-10961](https://github.com/hashicorp/vault/pull/10961)] +* auth/azure: Add support for azure workload identity authentication (see issue +#18257). Update go-kms-wrapping dependency to include [PR +#155](https://github.com/hashicorp/go-kms-wrapping/pull/155) [[GH-22994](https://github.com/hashicorp/vault/pull/22994)] +* auth/azure: Added Azure API configurable retry options [[GH-23059](https://github.com/hashicorp/vault/pull/23059)] +* auth/cert: Adds support for requiring hexadecimal-encoded non-string certificate extension values [[GH-21830](https://github.com/hashicorp/vault/pull/21830)] +* auth/ldap: improved login speed by adding concurrency to LDAP token group searches [[GH-22659](https://github.com/hashicorp/vault/pull/22659)] +* auto-auth/azure: Added Azure Workload Identity Federation support to auto-auth (for Vault Agent and Vault Proxy). [[GH-22264](https://github.com/hashicorp/vault/pull/22264)] +* auto-auth: added support for LDAP auto-auth [[GH-21641](https://github.com/hashicorp/vault/pull/21641)] +* aws/auth: Adds a new config field `use_sts_region_from_client` which allows for using dynamic regional sts endpoints based on Authorization header when using IAM-based authentication. [[GH-21960](https://github.com/hashicorp/vault/pull/21960)] +* command/server: add `-dev-tls-san` flag to configure subject alternative names for the certificate generated when using `-dev-tls`. [[GH-22657](https://github.com/hashicorp/vault/pull/22657)] +* core (ent) : Add field that allows lease-count namespace quotas to be inherited by child namespaces. +* core : Add field that allows rate-limit namespace quotas to be inherited by child namespaces. [[GH-22452](https://github.com/hashicorp/vault/pull/22452)] +* core/fips: Add RPM, DEB packages of FIPS 140-2 and HSM+FIPS 140-2 Vault Enterprise. +* core/quotas: Add configuration to allow skipping of expensive role calculations [[GH-22651](https://github.com/hashicorp/vault/pull/22651)] +* core: Add a new periodic metric to track the number of available policies, `vault.policy.configured.count`. [[GH-21010](https://github.com/hashicorp/vault/pull/21010)] +* core: Fix OpenAPI representation and `-output-policy` recognition of some non-standard sudo paths [[GH-21772](https://github.com/hashicorp/vault/pull/21772)] +* core: Fix regexes for `sys/raw/` and `sys/leases/lookup/` to match prevailing conventions [[GH-21760](https://github.com/hashicorp/vault/pull/21760)] +* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)] +* core: Use a worker pool for the rollback manager. Add new metrics for the rollback manager to track the queued tasks. [[GH-22567](https://github.com/hashicorp/vault/pull/22567)] +* core: add a listener configuration "chroot_namespace" that forces requests to use a namespace hierarchy [[GH-22304](https://github.com/hashicorp/vault/pull/22304)] +* core: add a listener configuration "chroot_namespace" that forces requests to use a namespace hierarchy +* core: remove unnecessary *BarrierView field from backendEntry struct [[GH-20933](https://github.com/hashicorp/vault/pull/20933)] +* core: use Go stdlib functionalities instead of explicit byte/string conversions [[GH-21854](https://github.com/hashicorp/vault/pull/21854)] +* eventbus: updated go-eventlogger library to allow removal of nodes referenced by pipelines (used for subscriptions) [[GH-21623](https://github.com/hashicorp/vault/pull/21623)] +* events: Allow subscriptions to multiple namespaces [[GH-22540](https://github.com/hashicorp/vault/pull/22540)] +* events: Enabled by default [[GH-22815](https://github.com/hashicorp/vault/pull/22815)] +* events: WebSocket subscriptions add support for boolean filter expressions [[GH-22835](https://github.com/hashicorp/vault/pull/22835)] +* framework: Make it an error for `CreateOperation` to be defined without an `ExistenceCheck`, thereby fixing misleading `x-vault-createSupported` in OpenAPI [[GH-18492](https://github.com/hashicorp/vault/pull/18492)] +* kmip (enterprise): Add namespace lock and unlock support [[GH-21925](https://github.com/hashicorp/vault/pull/21925)] +* openapi: Better mount points for kv-v1 and kv-v2 in openapi.json [[GH-21563](https://github.com/hashicorp/vault/pull/21563)] +* openapi: Fix generated types for duration strings [[GH-20841](https://github.com/hashicorp/vault/pull/20841)] +* openapi: Fix generation of correct fields in some rarer cases [[GH-21942](https://github.com/hashicorp/vault/pull/21942)] +* openapi: Fix response definitions for list operations [[GH-21934](https://github.com/hashicorp/vault/pull/21934)] +* openapi: List operations are now given first-class representation in the OpenAPI document, rather than sometimes being overlaid with a read operation at the same path [[GH-21723](https://github.com/hashicorp/vault/pull/21723)] +* plugins: Containerized plugins can be configured to still work when running with systemd's PrivateTmp=true setting. [[GH-23215](https://github.com/hashicorp/vault/pull/23215)] +* replication (enterprise): Avoid logging warning if request is forwarded from a performance standby and not a performance secondary +* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase. +* sdk/framework: Adds replication state helper for backends to check for read-only storage [[GH-21743](https://github.com/hashicorp/vault/pull/21743)] +* secrets/database: Improves error logging for static role rotations by including the database and role names. [[GH-22253](https://github.com/hashicorp/vault/pull/22253)] +* secrets/db: Remove the `service_account_json` parameter when reading DB connection details [[GH-23256](https://github.com/hashicorp/vault/pull/23256)] +* secrets/pki: Add a parameter to allow ExtKeyUsage field usage from a role within ACME. [[GH-21702](https://github.com/hashicorp/vault/pull/21702)] +* secrets/transform (enterprise): Switch to pgx PostgreSQL driver for better timeout handling +* secrets/transit: Add support to create CSRs from keys in transit engine and import/export x509 certificates [[GH-21081](https://github.com/hashicorp/vault/pull/21081)] +* storage/dynamodb: Added three permit pool metrics for the DynamoDB backend, `pending_permits`, `active_permits`, and `pool_size`. [[GH-21742](https://github.com/hashicorp/vault/pull/21742)] +* storage/etcd: Make etcd parameter MaxCallSendMsgSize configurable [[GH-12666](https://github.com/hashicorp/vault/pull/12666)] +* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)] +* sys/metrics (enterprise): Adds a gauge metric that tracks whether enterprise builtin secret plugins are enabled. [[GH-21681](https://github.com/hashicorp/vault/pull/21681)] +* ui: Add API Explorer link to Sidebar, under Tools. [[GH-21578](https://github.com/hashicorp/vault/pull/21578)] +* ui: Add pagination to PKI roles, keys, issuers, and certificates list pages [[GH-23193](https://github.com/hashicorp/vault/pull/23193)] +* ui: Added allowed_domains_template field for CA type role in SSH engine [[GH-23119](https://github.com/hashicorp/vault/pull/23119)] +* ui: Adds mount configuration details to Kubernetes secrets engine configuration view [[GH-22926](https://github.com/hashicorp/vault/pull/22926)] +* ui: Adds tidy_revoked_certs to PKI tidy status page [[GH-23232](https://github.com/hashicorp/vault/pull/23232)] +* ui: Adds warning before downloading KV v2 secret values [[GH-23260](https://github.com/hashicorp/vault/pull/23260)] +* ui: Display minus icon for empty MaskedInput value. Show MaskedInput for KV secrets without values [[GH-22039](https://github.com/hashicorp/vault/pull/22039)] +* ui: JSON diff view available in "Create New Version" form for KV v2 [[GH-22593](https://github.com/hashicorp/vault/pull/22593)] +* ui: KV View Secret card will link to list view if input ends in "/" [[GH-22502](https://github.com/hashicorp/vault/pull/22502)] +* ui: Move access to KV V2 version diff view to toolbar in Version History [[GH-23200](https://github.com/hashicorp/vault/pull/23200)] +* ui: Update pki mount configuration details to match the new mount configuration details pattern [[GH-23166](https://github.com/hashicorp/vault/pull/23166)] +* ui: add example modal to policy form [[GH-21583](https://github.com/hashicorp/vault/pull/21583)] +* ui: adds allowed_user_ids field to create role form and user_ids to generate certificates form in pki [[GH-22191](https://github.com/hashicorp/vault/pull/22191)] +* ui: display CertificateCard instead of MaskedInput for certificates in PKI [[GH-22160](https://github.com/hashicorp/vault/pull/22160)] +* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)] +* ui: implement hashicorp design system [alert](https://helios.hashicorp.design/components/alert) component [[GH-21375](https://github.com/hashicorp/vault/pull/21375)] +* ui: update detail views that render ttl durations to display full unit instead of letter (i.e. 'days' instead of 'd') [[GH-20697](https://github.com/hashicorp/vault/pull/20697)] +* ui: update unseal and DR operation token flow components [[GH-21871](https://github.com/hashicorp/vault/pull/21871)] +* ui: upgrade Ember to 4.12 [[GH-22122](https://github.com/hashicorp/vault/pull/22122)] DEPRECATIONS: -* docs: Document removal of X.509 certificates with signatures who use SHA-1 in Vault 1.12 [[GH-15581](https://github.com/hashicorp/vault/pull/15581)] -* secrets/consul: Deprecate old parameters "token_type" and "policy" [[GH-15550](https://github.com/hashicorp/vault/pull/15550)] -* secrets/consul: Deprecate parameter "policies" in favor of "consul_policies" for consistency [[GH-15400](https://github.com/hashicorp/vault/pull/15400)] +* auth/centrify: Centrify plugin is deprecated as of 1.15, slated for removal in 1.17 [[GH-23050](https://github.com/hashicorp/vault/pull/23050)] + +BUG FIXES: + +* activity (enterprise): Fix misattribution of entities to no or child namespace auth methods [[GH-18809](https://github.com/hashicorp/vault/pull/18809)] +* agent: Environment variable VAULT_CACERT_BYTES now works for Vault Agent templates. [[GH-22322](https://github.com/hashicorp/vault/pull/22322)] +* agent: Fix "generate-config" command documentation URL [[GH-21466](https://github.com/hashicorp/vault/pull/21466)] +* api/client: Fix deadlock in client.CloneWithHeaders when used alongside other client methods. [[GH-22410](https://github.com/hashicorp/vault/pull/22410)] +* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)] +* audit: Prevent panic due to nil pointer receiver for audit header formatting. [[GH-22694](https://github.com/hashicorp/vault/pull/22694)] +* auth/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21800](https://github.com/hashicorp/vault/pull/21800)] +* auth/token, sys: Fix path-help being unavailable for some list-only endpoints [[GH-18571](https://github.com/hashicorp/vault/pull/18571)] +* auth/token: Fix parsing of `auth/token/create` fields to avoid incorrect warnings about ignored parameters [[GH-18556](https://github.com/hashicorp/vault/pull/18556)] +* awsutil: Update awsutil to v0.2.3 to fix a regression where Vault no longer +respects `AWS_ROLE_ARN`, `AWS_WEB_IDENTITY_TOKEN_FILE`, and `AWS_ROLE_SESSION_NAME`. [[GH-21951](https://github.com/hashicorp/vault/pull/21951)] +* cli: Avoid printing "Success" message when `-field` flag is provided during a `vault write`. [[GH-21546](https://github.com/hashicorp/vault/pull/21546)] +* cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. [[GH-22818](https://github.com/hashicorp/vault/pull/22818)] +* core (enterprise): Fix sentinel policy check logic so that sentinel +policies are not used when Sentinel feature isn't licensed. +* core (enterprise): Remove MFA Configuration for namespace when deleting namespace +* core/managed-keys (enterprise): Allow certain symmetric PKCS#11 managed key mechanisms (AES CBC with and without padding) to operate without an HMAC. +* core/metrics: vault.raft_storage.bolt.write.time should be a counter not a summary [[GH-22468](https://github.com/hashicorp/vault/pull/22468)] +* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. +Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)] +* core/quotas: Only perform ResolveRoleOperation for role-based quotas and lease creation. [[GH-22597](https://github.com/hashicorp/vault/pull/22597)] +* core/quotas: Reduce overhead for role calculation when using cloud auth methods. [[GH-22583](https://github.com/hashicorp/vault/pull/22583)] +* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)] +* core: All subloggers now reflect configured log level on reload. [[GH-22038](https://github.com/hashicorp/vault/pull/22038)] +* core: Fix bug where background thread to update locked user entries runs on DR secondaries. [[GH-22355](https://github.com/hashicorp/vault/pull/22355)] +* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)] +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)] +* core: Fixed issue with some durations not being properly parsed to include days. [[GH-21357](https://github.com/hashicorp/vault/pull/21357)] +* core: Fixes list password policy to include those with names containing / characters. [[GH-23155](https://github.com/hashicorp/vault/pull/23155)] +* core: fix race when updating a mount's route entry tainted status and incoming requests [[GH-21640](https://github.com/hashicorp/vault/pull/21640)] +* events: Ensure subscription resources are cleaned up on close. [[GH-23042](https://github.com/hashicorp/vault/pull/23042)] +* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)] +* identity/mfa: Fixes to OpenAPI representation and returned error codes for `identity/mfa/method/*` APIs [[GH-20879](https://github.com/hashicorp/vault/pull/20879)] +* identity: Remove caseSensitivityKey to prevent errors while loading groups which could result in missing groups in memDB when duplicates are found. [[GH-20965](https://github.com/hashicorp/vault/pull/20965)] +* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)] +* openapi: Fix response schema for PKI Issue requests [[GH-21449](https://github.com/hashicorp/vault/pull/21449)] +* openapi: Fix schema definitions for PKI EAB APIs [[GH-21458](https://github.com/hashicorp/vault/pull/21458)] +* plugins: Containerized plugins can be run with mlock enabled. [[GH-23215](https://github.com/hashicorp/vault/pull/23215)] +* plugins: Fix instance where Vault could fail to kill broken/unresponsive plugins. [[GH-22914](https://github.com/hashicorp/vault/pull/22914)] +* plugins: Fix instance where broken/unresponsive plugins could cause Vault to hang. [[GH-22914](https://github.com/hashicorp/vault/pull/22914)] +* plugins: Runtime catalog returns 404 instead of 500 when reading a runtime that does not exist [[GH-23171](https://github.com/hashicorp/vault/pull/23171)] +* plugins: `vault plugin runtime list` can successfully list plugin runtimes with GET [[GH-23171](https://github.com/hashicorp/vault/pull/23171)] +* raft/autopilot: Add dr-token flag for raft autopilot cli commands [[GH-21165](https://github.com/hashicorp/vault/pull/21165)] +* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath +* replication (enterprise): Fix discovery of bad primary cluster addresses to be more reliable +* replication (enterprise): Fix panic when update-primary was called on demoted clusters using update_primary_addrs +* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards. +* replication (enterprise): Sort cluster addresses returned by echo requests, so that primary-addrs only gets persisted when the +set of addrs changes. +* replication (enterprise): update primary cluster address after DR failover +* sdk/ldaputil: Properly escape user filters when using UPN domains +sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)] +* secrets/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21631](https://github.com/hashicorp/vault/pull/21631)] +* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22330](https://github.com/hashicorp/vault/pull/22330)] +* secrets/pki: Fix bug with ACME tidy, 'unable to determine acme base folder path'. [[GH-21870](https://github.com/hashicorp/vault/pull/21870)] +* secrets/pki: Fix preserving acme_account_safety_buffer on config/auto-tidy. [[GH-21870](https://github.com/hashicorp/vault/pull/21870)] +* secrets/pki: Fix removal of issuers to clean up unreferenced CRLs. [[GH-23007](https://github.com/hashicorp/vault/pull/23007)] +* secrets/pki: Prevent deleted issuers from reappearing when migrating from a version 1 bundle to a version 2 bundle (versions including 1.13.0, 1.12.2, and 1.11.6); when managed keys were removed but referenced in the Vault 1.10 legacy CA bundle, this the error: `no managed key found with uuid`. [[GH-21316](https://github.com/hashicorp/vault/pull/21316)] +* secrets/pki: allowed_domains are now compared in a case-insensitive manner if they use glob patterns [[GH-22126](https://github.com/hashicorp/vault/pull/22126)] +* secrets/transform (enterprise): Batch items with repeated tokens in the tokenization decode api will now contain the decoded_value element +* secrets/transform (enterprise): Fix nil panic when deleting a template with tokenization transformations present +* secrets/transform (enterprise): Fix nil panic when encoding a tokenization transformation on a non-active node +* secrets/transform (enterprise): Grab shared locks for various read operations, only escalating to write locks if work is required +* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute +* secrets/transit: fix panic when providing non-PEM formatted public key for import [[GH-22753](https://github.com/hashicorp/vault/pull/22753)] +* serviceregistration: Fix bug where multiple nodes in a secondary cluster could be labelled active after updating the cluster's primary [[GH-21642](https://github.com/hashicorp/vault/pull/21642)] +* storage/consul: Consul service registration tags are now case-sensitive. [[GH-6483](https://github.com/hashicorp/vault/pull/6483)] +* storage/raft: Fix race where new follower joining can get pruned by dead server cleanup. [[GH-20986](https://github.com/hashicorp/vault/pull/20986)] +* ui (enterprise): Fix error message when generating SSH credential with control group [[GH-23025](https://github.com/hashicorp/vault/pull/23025)] +* ui: Adds missing values to details view after generating PKI certificate [[GH-21635](https://github.com/hashicorp/vault/pull/21635)] +* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)] +* ui: Fix display for "Last Vault Rotation" timestamp for static database roles which was not rendering or copyable [[GH-22519](https://github.com/hashicorp/vault/pull/22519)] +* ui: Fix styling for username input when editing a user [[GH-21771](https://github.com/hashicorp/vault/pull/21771)] +* ui: Fix styling for viewing certificate in kubernetes configuration [[GH-21968](https://github.com/hashicorp/vault/pull/21968)] +* ui: Fix the issue where confirm delete dropdown is being cut off [[GH-23066](https://github.com/hashicorp/vault/pull/23066)] +* ui: Fixed an issue where editing an SSH role would clear `default_critical_options` and `default_extension` if left unchanged. [[GH-21739](https://github.com/hashicorp/vault/pull/21739)] +* ui: Fixed secrets, leases, and policies filter dropping focus after a single character [[GH-21767](https://github.com/hashicorp/vault/pull/21767)] +* ui: Fixes filter and search bug in secrets engines [[GH-23123](https://github.com/hashicorp/vault/pull/23123)] +* ui: Fixes form field label tooltip alignment [[GH-22832](https://github.com/hashicorp/vault/pull/22832)] +* ui: Fixes issue with certain navigational links incorrectly displaying in child namespaces [[GH-21562](https://github.com/hashicorp/vault/pull/21562)] +* ui: Fixes login screen display issue with Safari browser [[GH-21582](https://github.com/hashicorp/vault/pull/21582)] +* ui: Fixes problem displaying certificates issued with unsupported signature algorithms (i.e. ed25519) [[GH-21926](https://github.com/hashicorp/vault/pull/21926)] +* ui: Fixes styling of private key input when configuring an SSH key [[GH-21531](https://github.com/hashicorp/vault/pull/21531)] +* ui: Surface DOMException error when browser settings prevent localStorage. [[GH-21503](https://github.com/hashicorp/vault/pull/21503)] +* ui: correct doctype for index.html [[GH-22153](https://github.com/hashicorp/vault/pull/22153)] +* ui: don't exclude features present on license [[GH-22855](https://github.com/hashicorp/vault/pull/22855)] +* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)] +* ui: fixes long namespace names overflow in the sidebar +* ui: fixes model defaults overwriting input value when user tries to clear form input [[GH-22458](https://github.com/hashicorp/vault/pull/22458)] +* ui: fixes text readability issue in revoke token confirmation dialog [[GH-22390](https://github.com/hashicorp/vault/pull/22390)] + +## 1.14.13 Enterprise +### May 30, 2024 +CHANGES: +* auth/jwt: Update plugin to v0.16.1 [[GH-27122](https://github.com/hashicorp/vault/pull/27122)] +* core: Bump Go version to 1.22.2. + +IMPROVEMENTS: +* website/docs: Add note about eventual consietency with the MongoDB Atlas database secrets engine [[GH-24152](https://github.com/hashicorp/vault/pull/24152)] BUG FIXES: +* activity (enterprise): fix read-only storage error on upgrades +* pki: Fix error in cross-signing using ed25519 keys [[GH-27093](https://github.com/hashicorp/vault/pull/27093)] +* replication (enterprise): fix "given mount path is not in the same namespace as the request" error that can occur when enabling replication for the first time on a secondary cluster +* secrets/transit: Use 'hash_algorithm' parameter if present in HMAC verify requests. Otherwise fall back to deprecated 'algorithm' parameter. [[GH-27211](https://github.com/hashicorp/vault/pull/27211)] -* Fixed panic when adding or modifying a Duo MFA Method in Enterprise -* agent: Fix log level mismatch between ERR and ERROR [[GH-14424](https://github.com/hashicorp/vault/pull/14424)] -* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] -* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] -* api: Fixes bug where OutputCurlString field was unintentionally being copied over during client cloning [[GH-14968](https://github.com/hashicorp/vault/pull/14968)] -* api: Respect increment value in grace period calculations in LifetimeWatcher [[GH-14836](https://github.com/hashicorp/vault/pull/14836)] -* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] -* auth/kubernetes: Fix error code when using the wrong service account [[GH-15584](https://github.com/hashicorp/vault/pull/15584)] -* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set -has been fixed. The previous behavior would make a request to the LDAP server to -get `user_attr` before discarding it and using the username instead. This would -make it impossible for a user to connect if this attribute was missing or had -multiple values, even though it would not be used anyway. This has been fixed -and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] -* auth: Fixed erroneous success message when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Fixed erroneous token information being displayed when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Fixed two-phase MFA information missing from table format when using vault login [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Prevent deleting a valid MFA method ID using the endpoint for a different MFA method type [[GH-15482](https://github.com/hashicorp/vault/pull/15482)] -* auth: forward requests subject to login MFA from perfStandby to Active node [[GH-15009](https://github.com/hashicorp/vault/pull/15009)] -* auth: load login MFA configuration upon restart [[GH-15261](https://github.com/hashicorp/vault/pull/15261)] -* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] -* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] -* cli: kv get command now honors trailing spaces to retrieve secrets [[GH-15188](https://github.com/hashicorp/vault/pull/15188)] -* command: do not report listener and storage types as key not found warnings [[GH-15383](https://github.com/hashicorp/vault/pull/15383)] -* core (enterprise): Allow local alias create RPCs to persist alias metadata -* core (enterprise): Fix overcounting of lease count quota usage at startup. -* core (enterprise): Fix some races in merkle index flushing code found in testing -* core (enterprise): Handle additional edge cases reinitializing PKCS#11 libraries after login errors. -* core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} [[GH-15224](https://github.com/hashicorp/vault/pull/15224)] -* core/managed-keys (enterprise): Allow PKCS#11 managed keys to use 0 as a slot number -* core/metrics: Fix incorrect table size metric for local mounts [[GH-14755](https://github.com/hashicorp/vault/pull/14755)] -* core: Fix double counting for "route" metrics [[GH-12763](https://github.com/hashicorp/vault/pull/12763)] -* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] -* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] -* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] -* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] -* core: Limit SSCT WAL checks on perf standbys to raft backends only [[GH-15879](https://github.com/hashicorp/vault/pull/15879)] -* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] -* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] -* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] -* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] -* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] -* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] -* core: renaming the environment variable VAULT_DISABLE_FILE_PERMISSIONS_CHECK to VAULT_ENABLE_FILE_PERMISSIONS_CHECK and adjusting the logic [[GH-15452](https://github.com/hashicorp/vault/pull/15452)] -* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] -* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] -* identity: deduplicate policies when creating/updating identity groups [[GH-15055](https://github.com/hashicorp/vault/pull/15055)] -* mfa/okta: disable client side rate limiting causing delays in push notifications [[GH-15369](https://github.com/hashicorp/vault/pull/15369)] -* plugin: Fix a bug where plugin reload would falsely report success in certain scenarios. [[GH-15579](https://github.com/hashicorp/vault/pull/15579)] -* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] -* raft: Ensure initialMmapSize is set to 0 on Windows [[GH-14977](https://github.com/hashicorp/vault/pull/14977)] -* replication (enterprise): fix panic due to missing entity during invalidation of local aliases. [[GH-14622](https://github.com/hashicorp/vault/pull/14622)] -* sdk/cidrutil: Only check if cidr contains remote address for IP addresses [[GH-14487](https://github.com/hashicorp/vault/pull/14487)] -* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] -* sdk: Fix OpenApi spec generator to remove duplicate sha_256 parameter [[GH-15163](https://github.com/hashicorp/vault/pull/15163)] -* secrets/database: Ensure that a `connection_url` password is redacted in all cases. [[GH-14744](https://github.com/hashicorp/vault/pull/14744)] -* secrets/kv: Fix issue preventing the ability to reset the `delete_version_after` key metadata field to 0s via HTTP `PATCH`. [[GH-15792](https://github.com/hashicorp/vault/pull/15792)] -* secrets/pki: CRLs on performance secondary clusters are now automatically -rebuilt upon changes to the list of issuers. [[GH-15179](https://github.com/hashicorp/vault/pull/15179)] -* secrets/pki: Fix handling of "any" key type with default zero signature bits value. [[GH-14875](https://github.com/hashicorp/vault/pull/14875)] -* secrets/pki: Fixed bug where larger SHA-2 hashes were truncated with shorter ECDSA CA certificates [[GH-14943](https://github.com/hashicorp/vault/pull/14943)] -* secrets/ssh: Convert role field not_before_duration to seconds before returning it [[GH-15559](https://github.com/hashicorp/vault/pull/15559)] -* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. -* storage/raft: Forward autopilot state requests on perf standbys to active node. [[GH-15493](https://github.com/hashicorp/vault/pull/15493)] -* storage/raft: joining a node to a cluster now ignores any VAULT_NAMESPACE environment variable set on the server process [[GH-15519](https://github.com/hashicorp/vault/pull/15519)] -* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not accepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] -* ui: Fix KV secret showing in the edit form after a user creates a new version but doesn't have read capabilities [[GH-14794](https://github.com/hashicorp/vault/pull/14794)] -* ui: Fix inconsistent behavior in client count calendar widget [[GH-15789](https://github.com/hashicorp/vault/pull/15789)] -* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] -* ui: Fix issue with KV not recomputing model when you changed versions. [[GH-14941](https://github.com/hashicorp/vault/pull/14941)] -* ui: Fixed client count timezone for start and end months [[GH-15167](https://github.com/hashicorp/vault/pull/15167)] -* ui: Fixed unsupported revocation statements field for DB roles [[GH-15573](https://github.com/hashicorp/vault/pull/15573)] -* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] -* ui: Fixes issue logging in with OIDC from a listed auth mounts tab [[GH-14916](https://github.com/hashicorp/vault/pull/14916)] -* ui: Revert using localStorage in favor of sessionStorage [[GH-15769](https://github.com/hashicorp/vault/pull/15769)] -* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] -* ui: fix firefox inability to recognize file format of client count csv export [[GH-15364](https://github.com/hashicorp/vault/pull/15364)] -* ui: fix form validations ignoring default values and disabling submit button [[GH-15560](https://github.com/hashicorp/vault/pull/15560)] -* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] -* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] - -## 1.10.11 -### March 01, 2023 +## 1.14.12 Enterprise +### April 24, 2024 CHANGES: -* core: Bump Go version to 1.19.6. +* core: Bump Go version to 1.21.9. +* ui: Update dependencies including D3 libraries [[GH-26346](https://github.com/hashicorp/vault/pull/26346)] IMPROVEMENTS: -* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] +* activity (enterprise): Change minimum retention window in activity log to 48 months +* core: make the best effort timeout for encryption count tracking persistence configurable via an environment variable. [[GH-25636](https://github.com/hashicorp/vault/pull/25636)] +* license utilization reporting (enterprise): Add retention months to license utilization reports. +* sdk/decompression: DecompressWithCanary will now chunk the decompression in memory to prevent loading it all at once. [[GH-26464](https://github.com/hashicorp/vault/pull/26464)] +* ui: show banner instead of permission denied error when batch token is expired [[GH-26396](https://github.com/hashicorp/vault/pull/26396)] BUG FIXES: -* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] -* core (enterprise): Fix panic when using invalid accessor for control-group request -* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] -* replication (enterprise): Fix bug where reloading external plugin on a secondary would -break replication. -* secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. [[GH-18209](https://github.com/hashicorp/vault/pull/18209)] -* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] -* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] +* secrets/pki: fixed validation bug which rejected ldap schemed URLs in crl_distribution_points. [[GH-26477](https://github.com/hashicorp/vault/pull/26477)] +* storage/raft (enterprise): Fix a bug where autopilot automated upgrades could fail due to using the wrong upgrade version + +## 1.14.11 Enterprise +### March 28, 2024 + +SECURITY: + +* auth/cert: validate OCSP response was signed by the expected issuer and serial number matched request (CVE-2024-2660) [[GH-26091](https://github.com/hashicorp/vault/pull/26091), [HSEC-2024-07](https://discuss.hashicorp.com/t/hcsec-2024-07-vault-tls-cert-auth-method-did-not-correctly-validate-ocsp-responses/64573)] -## 1.10.10 -### February 6, 2023 - CHANGES: -* core: Bump Go version to 1.19.4. +* core: Bump Go version to 1.21.8. IMPROVEMENTS: -* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] -* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. -* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] -* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] -* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] +* auth/cert: Allow validation with OCSP responses with no NextUpdate time [[GH-25912](https://github.com/hashicorp/vault/pull/25912)] +* openapi: Fix generated types for duration strings [[GH-20841](https://github.com/hashicorp/vault/pull/20841)] +* raft/snapshotagent (enterprise): upgrade raft-snapshotagent to v0.0.0-20221104090112-13395acd02c5 BUG FIXES: -* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] -* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] -* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] -* core (enterprise): Fix a race condition resulting in login errors to PKCS#11 modules under high concurrency. -* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace -* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. -* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] -* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. -* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] -* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] -* identity (enterprise): Fix a data race when creating an entity for a local alias. -* kmip (enterprise): Fix Destroy operation response that omitted Unique Identifier on some batched responses. -* kmip (enterprise): Fix Locate operation response incompatibility with clients using KMIP versions prior to 1.3. -* licensing (enterprise): update autoloaded license cache after reload -* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. -* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] - -## 1.10.9 -### November 30, 2022 +* auth/cert: Address an issue in which OCSP query responses were not cached [[GH-25986](https://github.com/hashicorp/vault/pull/25986)] +* auth/cert: Allow cert auth login attempts if ocsp_fail_open is true and OCSP servers are unreachable [[GH-25982](https://github.com/hashicorp/vault/pull/25982)] +* core/login: Fixed a potential deadlock when a login fails and user lockout is enabled. [[GH-25697](https://github.com/hashicorp/vault/pull/25697)] +* openapi: Fixing response fields for rekey operations [[GH-25509](https://github.com/hashicorp/vault/pull/25509)] +* ui: Fix kubernetes auth method roles tab [[GH-25999](https://github.com/hashicorp/vault/pull/25999)] -BUG FIXES: +## 1.14.10 +### February 29, 2024 -* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] -* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. -* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] -* core: fix a start up race condition where performance standbys could go into a - mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] -* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18084](https://github.com/hashicorp/vault/pull/18084)] -* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18109](https://github.com/hashicorp/vault/pull/18109)] -* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] +SECURITY: -## 1.10.8 -### November 2, 2022 - -BUG FIXES: +* auth/cert: compare public keys of trusted non-CA certificates with incoming +client certificates to prevent trusting certs with the same serial number +but not the same public/private key (CVE-2024-2048). [[GH-25649](https://github.com/hashicorp/vault/pull/25649), [HSEC-2024-05](https://discuss.hashicorp.com/t/hcsec-2024-05-vault-cert-auth-method-did-not-correctly-validate-non-ca-certificates/63382)] -* core/managed-keys (enterprise): Return better error messages when encountering key creation failures -* core/managed-keys (enterprise): fix panic when having `cache_disable` true -* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] -* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] -* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] -* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] -* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] -* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] +CHANGES: -## 1.10.7 -### September 30, 2022 +* core: Bump Go version to 1.20.14. + +FEATURES: + +* **Manual License Utilization Reporting**: Added manual license +utilization reporting, which allows users to create manual exports of product-license [metering +data] to report to Hashicorp. + +IMPROVEMENTS: + +* auth/cert: Cache trusted certs to reduce memory usage and improve performance of logins. [[GH-25421](https://github.com/hashicorp/vault/pull/25421)] +* ui: redirect back to current route after reauthentication when token expires [[GH-25335](https://github.com/hashicorp/vault/pull/25335)] +* ui: remove unnecessary OpenAPI calls for unmanaged auth methods [[GH-25364](https://github.com/hashicorp/vault/pull/25364)] BUG FIXES: -* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] -* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] -* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] -* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] -* replication (enterprise): Fix data race in SaveCheckpoint() -* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. -* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. -* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] +* core (enterprise): Fix a deadlock that can occur on performance secondary clusters when there are many mounts and a mount is deleted or filtered [[GH-25448](https://github.com/hashicorp/vault/pull/25448)] +* core/quotas: Deleting a namespace that contains a rate limit quota no longer breaks replication [[GH-25439](https://github.com/hashicorp/vault/pull/25439)] +* secrets/transform (enterprise): guard against a panic looking up a token in exportable mode with barrier storage. +* secrets/transit: When provided an invalid input with hash_algorithm=none, a lock was not released properly before reporting an error leading to deadlocks on a subsequent key configuration update. [[GH-25336](https://github.com/hashicorp/vault/pull/25336)] +* storage/file: Fixing spuriously deleting storage keys ending with .temp [[GH-25395](https://github.com/hashicorp/vault/pull/25395)] -## 1.10.6 -### August 31, 2022 +## 1.14.9 +### January 31, 2024 CHANGES: -* core: Bump Go version to 1.17.13. +* core: Bump Go version to 1.20.12. +* database/snowflake: Update plugin to v0.9.2 [[GH-25057](https://github.com/hashicorp/vault/pull/25057)] IMPROVEMENTS: -* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] +* command/server: display logs on startup immediately if disable-gated-logs flag is set [[GH-24280](https://github.com/hashicorp/vault/pull/24280)] +* oidc/provider: Adds `code_challenge_methods_supported` to OpenID Connect Metadata [[GH-24979](https://github.com/hashicorp/vault/pull/24979)] +* storage/raft: Upgrade to bbolt 1.3.8, along with an extra patch to reduce time scanning large freelist maps. [[GH-24010](https://github.com/hashicorp/vault/pull/24010)] +* ui: latest version of chrome does not automatically redirect back to the app after authentication unless triggered by the user, hence added a link to redirect back to the app. [[GH-18513](https://github.com/hashicorp/vault/pull/18513)] BUG FIXES: -* auth/gcp: Fixes the ability to reset the configuration's credentials to use application default credentials. [[GH-16524](https://github.com/hashicorp/vault/pull/16524)] -* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] -* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] -* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] -* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] -* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] -* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the -Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] -* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] -* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] -* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] -* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] -* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] -* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] -* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] +* helper/pkcs7: Fix slice out-of-bounds panic [[GH-24891](https://github.com/hashicorp/vault/pull/24891)] +* kmip (enterprise): Only return a Server Correlation Value to clients using KMIP version 1.4. +* ui: Fixed minor bugs with database secrets engine [[GH-24947](https://github.com/hashicorp/vault/pull/24947)] +* ui: Fixes input for jwks_ca_pem when configuring a JWT auth method [[GH-24697](https://github.com/hashicorp/vault/pull/24697)] +* ui: The UI can now be used to create or update database roles by operator without permission on the database connection. [[GH-24660](https://github.com/hashicorp/vault/pull/24660)] +* ui: fix incorrectly calculated capabilities on PKI issuer endpoints [[GH-24686](https://github.com/hashicorp/vault/pull/24686)] + +## 1.14.8 +### December 06, 2023 SECURITY: -* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] - -## 1.10.5 -### July 21, 2022 +* core: Fixes an issue present in both Vault and Vault Enterprise since Vault 1.12.0, where Vault is vulnerable to a denial of service through memory exhaustion of the host when handling large HTTP requests from a client. (see [CVE-2023-6337](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-6337) & [HCSEC-2023-34](https://discuss.hashicorp.com/t/hcsec-2023-34-vault-vulnerable-to-denial-of-service-through-memory-exhaustion-when-handling-large-http-requests/60741)) CHANGES: -* core/fips: Disable and warn about entropy augmentation in FIPS 140-2 Inside mode [[GH-15858](https://github.com/hashicorp/vault/pull/15858)] -* core: Bump Go version to 1.17.12. - -IMPROVEMENTS: - -* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] -* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] +* identity (enterprise): POST requests to the `/identity/entity/merge` endpoint +are now always forwarded from standbys to the active node. [[GH-24325](https://github.com/hashicorp/vault/pull/24325)] BUG FIXES: -* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] -* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] -* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty -* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] -* core: Limit SSCT WAL checks on perf standbys to raft backends only [[GH-15879](https://github.com/hashicorp/vault/pull/15879)] -* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] -* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] -* storage/raft (enterprise): Prevent unauthenticated voter status with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] -* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. -* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] -* ui: Revert using localStorage in favor of sessionStorage [[GH-16169](https://github.com/hashicorp/vault/pull/16169)] -* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] +* agent/logging: Agent should now honor correct -log-format and -log-file settings in logs generated by the consul-template library. [[GH-24252](https://github.com/hashicorp/vault/pull/24252)] +* api: Fix deadlock on calls to sys/leader with a namespace configured +on the request. [[GH-24256](https://github.com/hashicorp/vault/pull/24256)] +* core: Fix a timeout initializing Vault by only using a short timeout persisting barrier keyring encryption counts. [[GH-24336](https://github.com/hashicorp/vault/pull/24336)] +* ui: Fix payload sent when disabling replication [[GH-24292](https://github.com/hashicorp/vault/pull/24292)] -## 1.10.4 -### June 10, 2022 +## 1.14.7 +### November 30, 2023 CHANGES: -* core: Bump Go version to 1.17.11. [[GH-go-ver-1104](https://github.com/hashicorp/vault/pull/go-ver-1104)] +* core: Bump Go version to 1.20.11. IMPROVEMENTS: -* api/monitor: Add log_format option to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] -* auth: Globally scoped Login MFA method Get/List endpoints [[GH-15248](https://github.com/hashicorp/vault/pull/15248)] -* auth: forward cached MFA auth response to the leader using RPC instead of forwarding all login requests [[GH-15469](https://github.com/hashicorp/vault/pull/15469)] -* cli/debug: added support for retrieving metrics from DR clusters if `unauthenticated_metrics_access` is enabled [[GH-15316](https://github.com/hashicorp/vault/pull/15316)] -* command/debug: Add log_format flag to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] -* core: Fix some identity data races found by Go race detector (no known impact yet). [[GH-15123](https://github.com/hashicorp/vault/pull/15123)] -* storage/raft: Use larger timeouts at startup to reduce likelihood of inducing elections. [[GH-15042](https://github.com/hashicorp/vault/pull/15042)] -* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] +* core (enterprise): Speed up unseal when using namespaces +* secrets/pki: do not check TLS validity on ACME requests redirected to https [[GH-22521](https://github.com/hashicorp/vault/pull/22521)] +* ui: Sort list view of entities and aliases alphabetically using the item name [[GH-24103](https://github.com/hashicorp/vault/pull/24103)] +* ui: Update flat, shell-quote and swagger-ui-dist packages. Remove swagger-ui styling overrides. [[GH-23700](https://github.com/hashicorp/vault/pull/23700)] BUG FIXES: -* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] -* auth/kubernetes: Fix error code when using the wrong service account [[GH-15585](https://github.com/hashicorp/vault/pull/15585)] -* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set -has been fixed. The previous behavior would make a request to the LDAP server to -get `user_attr` before discarding it and using the username instead. This would -make it impossible for a user to connect if this attribute was missing or had -multiple values, even though it would not be used anyway. This has been fixed -and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] -* auth: Fixed erroneous success message when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Fixed erroneous token information being displayed when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Fixed two-phase MFA information missing from table format when using vault login [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Prevent deleting a valid MFA method ID using the endpoint for a different MFA method type [[GH-15482](https://github.com/hashicorp/vault/pull/15482)] -* core (enterprise): Fix overcounting of lease count quota usage at startup. -* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] -* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] -* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] -* mfa/okta: disable client side rate limiting causing delays in push notifications [[GH-15369](https://github.com/hashicorp/vault/pull/15369)] -* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. -* transform (enterprise): Fix non-overridable column default value causing tokenization tokens to expire prematurely when using the MySQL storage backend. -* ui: Fix inconsistent behavior in client count calendar widget [[GH-15789](https://github.com/hashicorp/vault/pull/15789)] -* ui: Fixed client count timezone for start and end months [[GH-15167](https://github.com/hashicorp/vault/pull/15167)] -* ui: fix firefox inability to recognize file format of client count csv export [[GH-15364](https://github.com/hashicorp/vault/pull/15364)] +* activity log (enterprise): De-duplicate client count estimates for license utilization reporting. +* auth/cert: Handle errors related to expired OCSP server responses [[GH-24193](https://github.com/hashicorp/vault/pull/24193)] +* core/config: Use correct HCL config value when configuring `log_requests_level`. [[GH-24058](https://github.com/hashicorp/vault/pull/24058)] +* core/quotas: Close rate-limit blocked client purge goroutines when sealing [[GH-24108](https://github.com/hashicorp/vault/pull/24108)] +* replication (enterprise): disallow configuring paths filter for a mount path that does not exist +* secrets/pki: Do not set nextUpdate field in OCSP responses when ocsp_expiry is 0 [[GH-24192](https://github.com/hashicorp/vault/pull/24192)] +* secrets/transit: Fix a panic when attempting to export a public RSA key [[GH-24054](https://github.com/hashicorp/vault/pull/24054)] +* ui: Fix error when tuning token auth configuration within namespace [[GH-24147](https://github.com/hashicorp/vault/pull/24147)] -## 1.10.3 -### May 11, 2022 +## 1.14.6 +### November 09, 2023 SECURITY: -* auth: A vulnerability was identified in Vault and Vault Enterprise (“Vault”) from 1.10.0 to 1.10.2 where MFA may not be enforced on user logins after a server restart. This vulnerability, CVE-2022-30689, was fixed in Vault 1.10.3. +* core: inbound client requests triggering a policy check can lead to an unbounded consumption of memory. A large number of these requests may lead to denial-of-service. This vulnerability, CVE-2023-5954, was introduced in Vault 1.15.0, 1.14.3, and 1.13.7, and is fixed in Vault 1.15.2, 1.14.6, and 1.13.10. [[HSEC-2023-33](https://discuss.hashicorp.com/t/hcsec-2023-33-vault-requests-triggering-policy-checks-may-lead-to-unbounded-memory-consumption/59926)] -BUG FIXES: +CHANGES: -* auth: load login MFA configuration upon restart [[GH-15261](https://github.com/hashicorp/vault/pull/15261)] -* core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} [[GH-15224](https://github.com/hashicorp/vault/pull/15224)] -* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] +* auth/approle: Normalized error response messages when invalid credentials are provided [[GH-23786](https://github.com/hashicorp/vault/pull/23786)] +* secrets/mongodbatlas: Update plugin to v0.10.2 [[GH-23849](https://github.com/hashicorp/vault/pull/23849)] -## 1.10.2 -### April 29, 2022 +FEATURES: + +* cli/snapshot: Add CLI tool to inspect Vault snapshots [[GH-23457](https://github.com/hashicorp/vault/pull/23457)] + +IMPROVEMENTS: + +* storage/etcd: etcd should only return keys when calling List() [[GH-23872](https://github.com/hashicorp/vault/pull/23872)] BUG FIXES: -* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] -* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] +* api/seal-status: Fix deadlock on calls to sys/seal-status with a namespace configured +on the request. [[GH-23861](https://github.com/hashicorp/vault/pull/23861)] +* core (enterprise): Do not return an internal error when token policy type lookup fails, log it instead and continue. +* core/activity: Fixes segments fragment loss due to exceeding entry record size limit [[GH-23781](https://github.com/hashicorp/vault/pull/23781)] +* core/mounts: Fix reading an "auth" mount using "sys/internal/ui/mounts/" when filter paths are enforced returns 500 error code from the secondary [[GH-23802](https://github.com/hashicorp/vault/pull/23802)] +* core: Revert PR causing memory consumption bug [[GH-23986](https://github.com/hashicorp/vault/pull/23986)] +* core: Skip unnecessary deriving of policies during Login MFA Check. [[GH-23894](https://github.com/hashicorp/vault/pull/23894)] +* core: fix bug where deadlock detection was always on for expiration and quotas. +These can now be configured individually with `detect_deadlocks`. [[GH-23902](https://github.com/hashicorp/vault/pull/23902)] +* core: fix policies with wildcards not matching list operations due to the policy path not having a trailing slash [[GH-23874](https://github.com/hashicorp/vault/pull/23874)] +* expiration: Fix fatal error "concurrent map iteration and map write" when collecting metrics from leases. [[GH-24027](https://github.com/hashicorp/vault/pull/24027)] -## 1.10.1 -### April 22, 2022 +## 1.14.5 +### October 25, 2023 CHANGES: -* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] -* core: Bump Go version to 1.17.9. [[GH-15044](https://github.com/hashicorp/vault/pull/15044)] +* core: Bump Go version to 1.20.10. +* replication (enterprise): Switch to non-deprecated gRPC field for resolver target host IMPROVEMENTS: -* agent: Upgrade hashicorp/consul-template version for sprig template functions and improved writeTo function [[GH-15092](https://github.com/hashicorp/vault/pull/15092)] -* auth: enforce a rate limit for TOTP passcode validation attempts [[GH-14864](https://github.com/hashicorp/vault/pull/14864)] -* cli/vault: warn when policy name contains upper-case letter [[GH-14670](https://github.com/hashicorp/vault/pull/14670)] -* cockroachdb: add high-availability support [[GH-12965](https://github.com/hashicorp/vault/pull/12965)] -* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer +* api/plugins: add `tls-server-name` arg for plugin registration [[GH-23549](https://github.com/hashicorp/vault/pull/23549)] +* core: Use a worker pool for the rollback manager. Add new metrics for the rollback manager to track the queued tasks. [[GH-22567](https://github.com/hashicorp/vault/pull/22567)] +* ui: Adds toggle to KV secrets engine value download modal to optionally stringify value in downloaded file [[GH-23747](https://github.com/hashicorp/vault/pull/23747)] + +BUG FIXES: + +* command/server: Fix bug with sigusr2 where pprof files were not closed correctly [[GH-23636](https://github.com/hashicorp/vault/pull/23636)] +* events: Ignore sending context to give more time for events to send [[GH-23500](https://github.com/hashicorp/vault/pull/23500)] +* expiration: Prevent large lease loads from delaying state changes, e.g. becoming active or standby. [[GH-23282](https://github.com/hashicorp/vault/pull/23282)] +* kmip (enterprise): Improve handling of failures due to storage replication issues. +* kmip (enterprise): Return a structure in the response for query function Query Server Information. +* mongo-db: allow non-admin database for root credential rotation [[GH-23240](https://github.com/hashicorp/vault/pull/23240)] +* replication (enterprise): Fix a bug where undo logs would only get enabled on the initial node in a cluster. +* replication (enterprise): Fix a missing unlock when changing replication state +* secrets/consul: Fix revocations when Vault has an access token using specific namespace and admin partition policies [[GH-23010](https://github.com/hashicorp/vault/pull/23010)] +* secrets/pki: Stop processing in-flight ACME verifications when an active node steps down [[GH-23278](https://github.com/hashicorp/vault/pull/23278)] +* secrets/transit (enterprise): Address an issue using sign/verify operations with managed keys returning an error about it not containing a private key +* secrets/transit (enterprise): Address panic when using GCP,AWS,Azure managed keys for encryption operations. At this time all encryption operations for the cloud providers have been disabled, only signing operations are supported. +* secrets/transit (enterprise): Apply hashing arguments and defaults to managed key sign/verify operations +* secrets/transit: Do not allow auto rotation on managed_key key types [[GH-23723](https://github.com/hashicorp/vault/pull/23723)] +* storage/consul: fix a bug where an active node in a specific sort of network +partition could continue to write data to Consul after a new leader is elected +potentially causing data loss or corruption for keys with many concurrent +writers. For Enterprise clusters this could cause corruption of the merkle trees +leading to failure to complete merkle sync without a full re-index. [[GH-23013](https://github.com/hashicorp/vault/pull/23013)] +* ui: Decode the connection url for display on the connection details page [[GH-23695](https://github.com/hashicorp/vault/pull/23695)] +* ui: Fix AWS secret engine to allow empty policy_document field. [[GH-23470](https://github.com/hashicorp/vault/pull/23470)] +* ui: Fix the copy token button in the sidebar navigation window when in a collapsed state. [[GH-23331](https://github.com/hashicorp/vault/pull/23331)] +* ui: Fixes issue with sidebar navigation links disappearing when navigating to policies when a user is not authorized [[GH-23516](https://github.com/hashicorp/vault/pull/23516)] + +## 1.14.4 +### September 27, 2023 -BUG FIXES: +SECURITY: -* Fixed panic when adding or modifying a Duo MFA Method in Enterprise -* agent: Fix log level mismatch between ERR and ERROR [[GH-14424](https://github.com/hashicorp/vault/pull/14424)] -* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] -* api: Respect increment value in grace period calculations in LifetimeWatcher [[GH-14836](https://github.com/hashicorp/vault/pull/14836)] -* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] -* auth: forward requests subject to login MFA from perfStandby to Active node [[GH-15009](https://github.com/hashicorp/vault/pull/15009)] -* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] -* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] -* core (enterprise): Allow local alias create RPCs to persist alias metadata [[GH-changelog:_2747](https://github.com/hashicorp/vault/pull/changelog:_2747)] -* core/managed-keys (enterprise): Allow PKCS#11 managed keys to use 0 as a slot number -* core/metrics: Fix incorrect table size metric for local mounts [[GH-14755](https://github.com/hashicorp/vault/pull/14755)] -* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] -* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] -* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] -* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] -* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] -* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] -* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] -* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] -* raft: Ensure initialMmapSize is set to 0 on Windows [[GH-14977](https://github.com/hashicorp/vault/pull/14977)] -* replication (enterprise): fix panic due to missing entity during invalidation of local aliases. [[GH-14622](https://github.com/hashicorp/vault/pull/14622)] -* secrets/database: Ensure that a `connection_url` password is redacted in all cases. [[GH-14744](https://github.com/hashicorp/vault/pull/14744)] -* secrets/pki: Fix handling of "any" key type with default zero signature bits value. [[GH-14875](https://github.com/hashicorp/vault/pull/14875)] -* secrets/pki: Fixed bug where larger SHA-2 hashes were truncated with shorter ECDSA CA certificates [[GH-14943](https://github.com/hashicorp/vault/pull/14943)] -* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not excepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] -* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] -* ui: Fixes issue logging in with OIDC from a listed auth mounts tab [[GH-14916](https://github.com/hashicorp/vault/pull/14916)] -* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] -* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] - -## 1.10.0 -### March 23, 2022 +* sentinel (enterprise): Sentinel RGP policies allowed for cross-namespace denial-of-service. This vulnerability, CVE-2023-3775, is fixed in Vault Enterprise 1.15.0, 1.14.4, and 1.13.8. [[HSEC-2023-29](https://discuss.hashicorp.com/t/hcsec-2023-29-vault-enterprise-s-sentinel-rgp-policies-allowed-for-cross-namespace-denial-of-service/58653)] CHANGES: -* core (enterprise): requests with newly generated tokens to perf standbys which are lagging behind the active node return http 412 instead of 400/403/50x. -* core: Changes the unit of `default_lease_ttl` and `max_lease_ttl` values returned by -the `/sys/config/state/sanitized` endpoint from nanoseconds to seconds. [[GH-14206](https://github.com/hashicorp/vault/pull/14206)] -* core: Bump Go version to 1.17.7. [[GH-14232](https://github.com/hashicorp/vault/pull/14232)] -* plugin/database: The return value from `POST /database/config/:name` has been updated to "204 No Content" [[GH-14033](https://github.com/hashicorp/vault/pull/14033)] -* secrets/azure: Changes the configuration parameter `use_microsoft_graph_api` to use the Microsoft -Graph API by default. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] -* storage/etcd: Remove support for v2. [[GH-14193](https://github.com/hashicorp/vault/pull/14193)] -* ui: Upgrade Ember to version 3.24 [[GH-13443](https://github.com/hashicorp/vault/pull/13443)] - -FEATURES: - -* **Database plugin multiplexing**: manage multiple database connections with a single plugin process [[GH-14033](https://github.com/hashicorp/vault/pull/14033)] -* **Login MFA**: Single and two phase MFA is now available when authenticating to Vault. [[GH-14025](https://github.com/hashicorp/vault/pull/14025)] -* **Mount Migration**: Vault supports moving secrets and auth mounts both within and across namespaces. -* **Postgres in the UI**: Postgres DB is now supported by the UI [[GH-12945](https://github.com/hashicorp/vault/pull/12945)] -* **Report in-flight requests**: Adding a trace capability to show in-flight requests, and a new gauge metric to show the total number of in-flight requests [[GH-13024](https://github.com/hashicorp/vault/pull/13024)] -* **Server Side Consistent Tokens**: Service tokens have been updated to be longer (a minimum of 95 bytes) and token prefixes for all token types are updated from s., b., and r. to hvs., hvb., and hvr. for service, batch, and recovery tokens respectively. Vault clusters with integrated storage will now have read-after-write consistency by default. [[GH-14109](https://github.com/hashicorp/vault/pull/14109)] -* **Transit SHA-3 Support**: Add support for SHA-3 in the Transit backend. [[GH-13367](https://github.com/hashicorp/vault/pull/13367)] -* **Transit Time-Based Key Autorotation**: Add support for automatic, time-based key rotation to transit secrets engine, including in the UI. [[GH-13691](https://github.com/hashicorp/vault/pull/13691)] -* **UI Client Count Improvements**: Restructures client count dashboard, making use of billing start date to improve accuracy. Adds mount-level distribution and filtering. [[GH-client-counts](https://github.com/hashicorp/vault/pull/client-counts)] -* **Agent Telemetry**: The Vault Agent can now collect and return telemetry information at the `/agent/v1/metrics` endpoint. +* core (enterprise): Ensure Role Governing Policies are only applied down the namespace hierarchy IMPROVEMENTS: -* agent: Adds ability to configure specific user-assigned managed identities for Azure auto-auth. [[GH-14214](https://github.com/hashicorp/vault/pull/14214)] -* agent: The `agent/v1/quit` endpoint can now be used to stop the Vault Agent remotely [[GH-14223](https://github.com/hashicorp/vault/pull/14223)] -* api: Allow cloning `api.Client` tokens via `api.Config.CloneToken` or `api.Client.SetCloneToken()`. [[GH-13515](https://github.com/hashicorp/vault/pull/13515)] -* api: Define constants for X-Vault-Forward and X-Vault-Inconsistent headers [[GH-14067](https://github.com/hashicorp/vault/pull/14067)] -* api: Implements Login method in Go client libraries for GCP and Azure auth methods [[GH-13022](https://github.com/hashicorp/vault/pull/13022)] -* api: Implements Login method in Go client libraries for LDAP auth methods [[GH-13841](https://github.com/hashicorp/vault/pull/13841)] -* api: Trim newline character from wrapping token in logical.Unwrap from the api package [[GH-13044](https://github.com/hashicorp/vault/pull/13044)] -* api: add api method for modifying raft autopilot configuration [[GH-12428](https://github.com/hashicorp/vault/pull/12428)] -* api: respect WithWrappingToken() option during AppRole login authentication when used with secret ID specified from environment or from string [[GH-13241](https://github.com/hashicorp/vault/pull/13241)] -* audit: The audit logs now contain the port used by the client [[GH-12790](https://github.com/hashicorp/vault/pull/12790)] -* auth/aws: Enable region detection in the CLI by specifying the region as `auto` [[GH-14051](https://github.com/hashicorp/vault/pull/14051)] -* auth/cert: Add certificate extensions as metadata [[GH-13348](https://github.com/hashicorp/vault/pull/13348)] -* auth/jwt: The Authorization Code flow makes use of the Proof Key for Code Exchange (PKCE) extension. [[GH-13365](https://github.com/hashicorp/vault/pull/13365)] -* auth/kubernetes: Added support for dynamically reloading short-lived tokens for better Kubernetes 1.21+ compatibility [[GH-13595](https://github.com/hashicorp/vault/pull/13595)] -* auth/ldap: Add a response warning and server log whenever the config is accessed -if `userfilter` doesn't consider `userattr` [[GH-14095](https://github.com/hashicorp/vault/pull/14095)] -* auth/ldap: Add username to alias metadata [[GH-13669](https://github.com/hashicorp/vault/pull/13669)] -* auth/ldap: Add username_as_alias configurable to change how aliases are named [[GH-14324](https://github.com/hashicorp/vault/pull/14324)] -* auth/okta: Update [okta-sdk-golang](https://github.com/okta/okta-sdk-golang) dependency to version v2.9.1 for improved request backoff handling [[GH-13439](https://github.com/hashicorp/vault/pull/13439)] -* auth/token: The `auth/token/revoke-accessor` endpoint is now idempotent and will -not error out if the token has already been revoked. [[GH-13661](https://github.com/hashicorp/vault/pull/13661)] -* auth: reading `sys/auth/:path` now returns the configuration for the auth engine mounted at the given path [[GH-12793](https://github.com/hashicorp/vault/pull/12793)] -* cli: interactive CLI for login mfa [[GH-14131](https://github.com/hashicorp/vault/pull/14131)] -* command (enterprise): "vault license get" now uses non-deprecated endpoint /sys/license/status -* core/ha: Add new mechanism for keeping track of peers talking to active node, and new 'operator members' command to view them. [[GH-13292](https://github.com/hashicorp/vault/pull/13292)] -* core/identity: Support updating an alias' `custom_metadata` to be empty. [[GH-13395](https://github.com/hashicorp/vault/pull/13395)] -* core/pki: Support Y10K value in notAfter field to be compliant with IEEE 802.1AR-2018 standard [[GH-12795](https://github.com/hashicorp/vault/pull/12795)] -* core/pki: Support Y10K value in notAfter field when signing non-CA certificates [[GH-13736](https://github.com/hashicorp/vault/pull/13736)] -* core: Add duration and start_time to completed requests log entries [[GH-13682](https://github.com/hashicorp/vault/pull/13682)] -* core: Add support to list password policies at `sys/policies/password` [[GH-12787](https://github.com/hashicorp/vault/pull/12787)] -* core: Add support to list version history via API at `sys/version-history` and via CLI with `vault version-history` [[GH-13766](https://github.com/hashicorp/vault/pull/13766)] -* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] -* core: Periodically test the health of connectivity to auto-seal backends [[GH-13078](https://github.com/hashicorp/vault/pull/13078)] -* core: Reading `sys/mounts/:path` now returns the configuration for the secret engine at the given path [[GH-12792](https://github.com/hashicorp/vault/pull/12792)] -* core: Replace "master key" terminology with "root key" [[GH-13324](https://github.com/hashicorp/vault/pull/13324)] -* core: Small changes to ensure goroutines terminate in tests [[GH-14197](https://github.com/hashicorp/vault/pull/14197)] -* core: Systemd unit file included with the Linux packages now sets the service type to notify. [[GH-14385](https://github.com/hashicorp/vault/pull/14385)] -* core: Update github.com/prometheus/client_golang to fix security vulnerability CVE-2022-21698. [[GH-14190](https://github.com/hashicorp/vault/pull/14190)] -* core: Vault now supports the PROXY protocol v2. Support for UNKNOWN connections -has also been added to the PROXY protocol v1. [[GH-13540](https://github.com/hashicorp/vault/pull/13540)] -* http (enterprise): Serve /sys/license/status endpoint within namespaces -* identity/oidc: Adds a default OIDC provider [[GH-14119](https://github.com/hashicorp/vault/pull/14119)] -* identity/oidc: Adds a default key for OIDC clients [[GH-14119](https://github.com/hashicorp/vault/pull/14119)] -* identity/oidc: Adds an `allow_all` assignment that permits all entities to authenticate via an OIDC client [[GH-14119](https://github.com/hashicorp/vault/pull/14119)] -* identity/oidc: Adds proof key for code exchange (PKCE) support to OIDC providers. [[GH-13917](https://github.com/hashicorp/vault/pull/13917)] -* sdk: Add helper for decoding root tokens [[GH-10505](https://github.com/hashicorp/vault/pull/10505)] -* secrets/azure: Adds support for rotate-root. [#70](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/70) [[GH-13034](https://github.com/hashicorp/vault/pull/13034)] -* secrets/consul: Add support for consul enterprise namespaces and admin partitions. [[GH-13850](https://github.com/hashicorp/vault/pull/13850)] -* secrets/consul: Add support for consul roles. [[GH-14014](https://github.com/hashicorp/vault/pull/14014)] -* secrets/database/influxdb: Switch/upgrade to the `influxdb1-client` module [[GH-12262](https://github.com/hashicorp/vault/pull/12262)] -* secrets/database: Add database configuration parameter 'disable_escaping' for username and password when connecting to a database. [[GH-13414](https://github.com/hashicorp/vault/pull/13414)] -* secrets/kv: add full secret path output to table-formatted responses [[GH-14301](https://github.com/hashicorp/vault/pull/14301)] -* secrets/kv: add patch support for KVv2 key metadata [[GH-13215](https://github.com/hashicorp/vault/pull/13215)] -* secrets/kv: add subkeys endpoint to retrieve a secret's stucture without its values [[GH-13893](https://github.com/hashicorp/vault/pull/13893)] -* secrets/pki: Add ability to fetch individual certificate as DER or PEM [[GH-10948](https://github.com/hashicorp/vault/pull/10948)] -* secrets/pki: Add count and duration metrics to PKI issue and revoke calls. [[GH-13889](https://github.com/hashicorp/vault/pull/13889)] -* secrets/pki: Add error handling for error types other than UserError or InternalError [[GH-14195](https://github.com/hashicorp/vault/pull/14195)] -* secrets/pki: Allow URI SAN templates in allowed_uri_sans when allowed_uri_sans_template is set to true. [[GH-10249](https://github.com/hashicorp/vault/pull/10249)] -* secrets/pki: Allow other_sans in sign-intermediate and sign-verbatim [[GH-13958](https://github.com/hashicorp/vault/pull/13958)] -* secrets/pki: Calculate the Subject Key Identifier as suggested in [RFC 5280, Section 4.2.1.2](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.2). [[GH-11218](https://github.com/hashicorp/vault/pull/11218)] -* secrets/pki: Restrict issuance of wildcard certificates via role parameter (`allow_wildcard_certificates`) [[GH-14238](https://github.com/hashicorp/vault/pull/14238)] -* secrets/pki: Return complete chain (in `ca_chain` field) on calls to `pki/cert/ca_chain` [[GH-13935](https://github.com/hashicorp/vault/pull/13935)] -* secrets/pki: Use application/pem-certificate-chain for PEM certificates, application/x-pem-file for PEM CRLs [[GH-13927](https://github.com/hashicorp/vault/pull/13927)] -* secrets/pki: select appropriate signature algorithm for ECDSA signature on certificates. [[GH-11216](https://github.com/hashicorp/vault/pull/11216)] -* secrets/ssh: Add support for generating non-RSA SSH CAs [[GH-14008](https://github.com/hashicorp/vault/pull/14008)] -* secrets/ssh: Allow specifying multiple approved key lengths for a single algorithm [[GH-13991](https://github.com/hashicorp/vault/pull/13991)] -* secrets/ssh: Use secure default for algorithm signer (rsa-sha2-256) with RSA SSH CA keys on new roles [[GH-14006](https://github.com/hashicorp/vault/pull/14006)] -* secrets/transit: Don't abort transit encrypt or decrypt batches on single item failure. [[GH-13111](https://github.com/hashicorp/vault/pull/13111)] -* storage/aerospike: Upgrade `aerospike-client-go` to v5.6.0. [[GH-12165](https://github.com/hashicorp/vault/pull/12165)] -* storage/raft: Set InitialMmapSize to 100GB on 64bit architectures [[GH-13178](https://github.com/hashicorp/vault/pull/13178)] -* storage/raft: When using retry_join stanzas, join against all of them in parallel. [[GH-13606](https://github.com/hashicorp/vault/pull/13606)] -* sys/raw: Enhance sys/raw to read and write values that cannot be encoded in json. [[GH-13537](https://github.com/hashicorp/vault/pull/13537)] -* ui: Add support for ECDSA and Ed25519 certificate views [[GH-13894](https://github.com/hashicorp/vault/pull/13894)] -* ui: Add version diff view for KV V2 [[GH-13000](https://github.com/hashicorp/vault/pull/13000)] -* ui: Added client side paging for namespace list view [[GH-13195](https://github.com/hashicorp/vault/pull/13195)] -* ui: Adds flight icons to UI [[GH-12976](https://github.com/hashicorp/vault/pull/12976)] -* ui: Adds multi-factor authentication support [[GH-14049](https://github.com/hashicorp/vault/pull/14049)] -* ui: Allow static role credential rotation in Database secrets engines [[GH-14268](https://github.com/hashicorp/vault/pull/14268)] -* ui: Display badge for all versions in secrets engine header [[GH-13015](https://github.com/hashicorp/vault/pull/13015)] -* ui: Swap browser localStorage in favor of sessionStorage [[GH-14054](https://github.com/hashicorp/vault/pull/14054)] -* ui: The integrated web terminal now accepts both `-f` and `--force` as aliases -for `-force` for the `write` command. [[GH-13683](https://github.com/hashicorp/vault/pull/13683)] -* ui: Transform advanced templating with encode/decode format support [[GH-13908](https://github.com/hashicorp/vault/pull/13908)] -* ui: Updates ember blueprints to glimmer components [[GH-13149](https://github.com/hashicorp/vault/pull/13149)] -* ui: customizes empty state messages for transit and transform [[GH-13090](https://github.com/hashicorp/vault/pull/13090)] +* ui: Add pagination to PKI roles, keys, issuers, and certificates list pages [[GH-23193](https://github.com/hashicorp/vault/pull/23193)] +* ui: Added allowed_domains_template field for CA type role in SSH engine [[GH-23119](https://github.com/hashicorp/vault/pull/23119)] +* ui: Adds tidy_revoked_certs to PKI tidy status page [[GH-23232](https://github.com/hashicorp/vault/pull/23232)] +* ui: Adds warning before downloading KV v2 secret values [[GH-23260](https://github.com/hashicorp/vault/pull/23260)] BUG FIXES: -* Fixed bug where auth method only considers system-identity when multiple identities are available. [#50](https://github.com/hashicorp/vault-plugin-auth-azure/pull/50) [[GH-14138](https://github.com/hashicorp/vault/pull/14138)] -* activity log (enterprise): allow partial monthly client count to be accessed from namespaces [[GH-13086](https://github.com/hashicorp/vault/pull/13086)] -* agent: Fixes bug where vault agent is unaware of the namespace in the config when wrapping token -* api/client: Fixes an issue where the `replicateStateStore` was being set to `nil` upon consecutive calls to `client.SetReadYourWrites(true)`. [[GH-13486](https://github.com/hashicorp/vault/pull/13486)] -* auth/approle: Fix regression where unset cidrlist is returned as nil instead of zero-length array. [[GH-13235](https://github.com/hashicorp/vault/pull/13235)] -* auth/approle: Fix wrapping of nil errors in `login` endpoint [[GH-14107](https://github.com/hashicorp/vault/pull/14107)] -* auth/github: Use the Organization ID instead of the Organization name to verify the org membership. [[GH-13332](https://github.com/hashicorp/vault/pull/13332)] -* auth/kubernetes: Properly handle the migration of role storage entries containing an empty `alias_name_source` [[GH-13925](https://github.com/hashicorp/vault/pull/13925)] -* auth/kubernetes: ensure valid entity alias names created for projected volume tokens [[GH-14144](https://github.com/hashicorp/vault/pull/14144)] -* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13492](https://github.com/hashicorp/vault/pull/13492)] -* cli: Fix using kv patch with older server versions that don't support HTTP PATCH. [[GH-13615](https://github.com/hashicorp/vault/pull/13615)] -* core (enterprise): Fix a data race in logshipper. -* core (enterprise): Workaround AWS CloudHSM v5 SDK issue not allowing read-only sessions -* core/api: Fix overwriting of request headers when using JSONMergePatch. [[GH-14222](https://github.com/hashicorp/vault/pull/14222)] -* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13093](https://github.com/hashicorp/vault/pull/13093)] -* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13476](https://github.com/hashicorp/vault/pull/13476)] -* core/token: Fix null token panic from 'v1/auth/token/' endpoints and return proper error response. [[GH-13233](https://github.com/hashicorp/vault/pull/13233)] -* core/token: Fix null token_type panic resulting from 'v1/auth/token/roles/{role_name}' endpoint [[GH-13236](https://github.com/hashicorp/vault/pull/13236)] -* core: Fix warnings logged on perf standbys re stored versions [[GH-13042](https://github.com/hashicorp/vault/pull/13042)] -* core: `-output-curl-string` now properly sets cURL options for client and CA -certificates. [[GH-13660](https://github.com/hashicorp/vault/pull/13660)] -* core: add support for go-sockaddr templates in the top-level cluster_addr field [[GH-13678](https://github.com/hashicorp/vault/pull/13678)] -* core: authentication to "login" endpoint for non-existent mount path returns permission denied with status code 403 [[GH-13162](https://github.com/hashicorp/vault/pull/13162)] -* core: revert some unintentionally downgraded dependencies from 1.9.0-rc1 [[GH-13168](https://github.com/hashicorp/vault/pull/13168)] -* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes -* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node -* http:Fix /sys/monitor endpoint returning streaming not supported [[GH-13200](https://github.com/hashicorp/vault/pull/13200)] -* identity/oidc: Adds support for port-agnostic validation of loopback IP redirect URIs. [[GH-13871](https://github.com/hashicorp/vault/pull/13871)] -* identity/oidc: Check for a nil signing key on rotation to prevent panics. [[GH-13716](https://github.com/hashicorp/vault/pull/13716)] -* identity/oidc: Fixes inherited group membership when evaluating client assignments [[GH-14013](https://github.com/hashicorp/vault/pull/14013)] -* identity/oidc: Fixes potential write to readonly storage on performance secondary clusters during key rotation [[GH-14426](https://github.com/hashicorp/vault/pull/14426)] -* identity/oidc: Make the `nonce` parameter optional for the Authorization Endpoint of OIDC providers. [[GH-13231](https://github.com/hashicorp/vault/pull/13231)] -* identity/token: Fixes a bug where duplicate public keys could appear in the .well-known JWKS [[GH-14543](https://github.com/hashicorp/vault/pull/14543)] -* identity: Fix possible nil pointer dereference. [[GH-13318](https://github.com/hashicorp/vault/pull/13318)] -* identity: Fix regression preventing startup when aliases were created pre-1.9. [[GH-13169](https://github.com/hashicorp/vault/pull/13169)] -* identity: Fixes a panic in the OIDC key rotation due to a missing nil check. [[GH-13298](https://github.com/hashicorp/vault/pull/13298)] -* kmip (enterprise): Fix locate by name operations fail to find key after a rekey operation. -* licensing (enterprise): Revert accidental inclusion of the TDE feature from the `prem` build. -* metrics/autosnapshots (enterprise) : Fix bug that could cause -vault.autosnapshots.save.errors to not be incremented when there is an -autosnapshot save error. -* physical/mysql: Create table with wider `vault_key` column when initializing database tables. [[GH-14231](https://github.com/hashicorp/vault/pull/14231)] -* plugin/couchbase: Fix an issue in which the locking patterns did not allow parallel requests. [[GH-13033](https://github.com/hashicorp/vault/pull/13033)] -* replication (enterprise): When using encrypted secondary tokens, only clear the -private key after a successful connection to the primary cluster -* sdk/framework: Generate proper OpenAPI specs for path patterns that use an alternation as the root. [[GH-13487](https://github.com/hashicorp/vault/pull/13487)] -* sdk/helper/ldaputil: properly escape a trailing escape character to prevent panics. [[GH-13452](https://github.com/hashicorp/vault/pull/13452)] -* sdk/queue: move lock before length check to prevent panics. [[GH-13146](https://github.com/hashicorp/vault/pull/13146)] -* sdk: Fixes OpenAPI to distinguish between paths that can do only List, or both List and Read. [[GH-13643](https://github.com/hashicorp/vault/pull/13643)] -* secrets/azure: Fixed bug where Azure environment did not change Graph URL [[GH-13973](https://github.com/hashicorp/vault/pull/13973)] -* secrets/azure: Fixes service principal generation when assigning roles that have [DataActions](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-definitions#dataactions). [[GH-13277](https://github.com/hashicorp/vault/pull/13277)] -* secrets/azure: Fixes the [rotate root](https://www.vaultproject.io/api-docs/secret/azure#rotate-root) -operation for upgraded configurations with a `root_password_ttl` of zero. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] -* secrets/database/cassandra: change connect_timeout to 5s as documentation says [[GH-12443](https://github.com/hashicorp/vault/pull/12443)] -* secrets/database/mssql: Accept a boolean for `contained_db`, rather than just a string. [[GH-13469](https://github.com/hashicorp/vault/pull/13469)] -* secrets/gcp: Fixed bug where error was not reported for invalid bindings [[GH-13974](https://github.com/hashicorp/vault/pull/13974)] -* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13548](https://github.com/hashicorp/vault/pull/13548)] -* secrets/openldap: Fix panic from nil logger in backend [[GH-14171](https://github.com/hashicorp/vault/pull/14171)] -* secrets/pki: Default value for key_bits changed to 0, enabling key_type=ec key generation with default value [[GH-13080](https://github.com/hashicorp/vault/pull/13080)] -* secrets/pki: Fix issuance of wildcard certificates matching glob patterns [[GH-14235](https://github.com/hashicorp/vault/pull/14235)] -* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-13759](https://github.com/hashicorp/vault/pull/13759)] -* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-2456](https://github.com/hashicorp/vault/pull/2456)] -* secrets/pki: Fixes around NIST P-curve signature hash length, default value for signature_bits changed to 0. [[GH-12872](https://github.com/hashicorp/vault/pull/12872)] -* secrets/pki: Recognize ed25519 when requesting a response in PKCS8 format [[GH-13257](https://github.com/hashicorp/vault/pull/13257)] -* secrets/pki: Skip signature bits validation for ed25519 curve key type [[GH-13254](https://github.com/hashicorp/vault/pull/13254)] -* secrets/transit: Ensure that Vault does not panic for invalid nonce size when we aren't in convergent encryption mode. [[GH-13690](https://github.com/hashicorp/vault/pull/13690)] -* secrets/transit: Return an error if any required parameter is missing. [[GH-14074](https://github.com/hashicorp/vault/pull/14074)] -* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] -* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] -* storage/raft: Fix issues allowing invalid nodes to become leadership candidates. [[GH-13703](https://github.com/hashicorp/vault/pull/13703)] -* storage/raft: Fix regression in 1.9.0-rc1 that changed how time is represented in Raft logs; this prevented using a raft db created pre-1.9. [[GH-13165](https://github.com/hashicorp/vault/pull/13165)] -* storage/raft: On linux, use map_populate for bolt files to improve startup time. [[GH-13573](https://github.com/hashicorp/vault/pull/13573)] -* storage/raft: Units for bolt metrics now given in milliseconds instead of nanoseconds [[GH-13749](https://github.com/hashicorp/vault/pull/13749)] -* ui: Adds pagination to auth methods list view [[GH-13054](https://github.com/hashicorp/vault/pull/13054)] -* ui: Do not show verify connection value on database connection config page [[GH-13152](https://github.com/hashicorp/vault/pull/13152)] -* ui: Fix client count current month data not showing unless monthly history data exists [[GH-13396](https://github.com/hashicorp/vault/pull/13396)] -* ui: Fix default TTL display and set on database role [[GH-14224](https://github.com/hashicorp/vault/pull/14224)] -* ui: Fix incorrect validity message on transit secrets engine [[GH-14233](https://github.com/hashicorp/vault/pull/14233)] -* ui: Fix issue where UI incorrectly handled API errors when mounting backends [[GH-14551](https://github.com/hashicorp/vault/pull/14551)] -* ui: Fix kv engine access bug [[GH-13872](https://github.com/hashicorp/vault/pull/13872)] -* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] -* ui: Fixes caching issue on kv new version create [[GH-14489](https://github.com/hashicorp/vault/pull/14489)] -* ui: Fixes displaying empty masked values in PKI engine [[GH-14400](https://github.com/hashicorp/vault/pull/14400)] -* ui: Fixes horizontal bar chart hover issue when filtering namespaces and mounts [[GH-14493](https://github.com/hashicorp/vault/pull/14493)] -* ui: Fixes issue logging out with wrapped token query parameter [[GH-14329](https://github.com/hashicorp/vault/pull/14329)] -* ui: Fixes issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] -* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] -* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] -* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] -* ui: Fixes issue with SearchSelect component not holding focus [[GH-13590](https://github.com/hashicorp/vault/pull/13590)] -* ui: Fixes issue with automate secret deletion value not displaying initially if set in secret metadata edit view [[GH-13177](https://github.com/hashicorp/vault/pull/13177)] -* ui: Fixes issue with correct auth method not selected when logging out from OIDC or JWT methods [[GH-14545](https://github.com/hashicorp/vault/pull/14545)] -* ui: Fixes issue with placeholder not displaying for automatically deleted secrets when deletion time has passed [[GH-13166](https://github.com/hashicorp/vault/pull/13166)] -* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] -* ui: Fixes long secret key names overlapping masked values [[GH-13032](https://github.com/hashicorp/vault/pull/13032)] -* ui: Fixes node-forge error when parsing EC (elliptical curve) certs [[GH-13238](https://github.com/hashicorp/vault/pull/13238)] -* ui: Redirects to managed namespace if incorrect namespace in URL param [[GH-14422](https://github.com/hashicorp/vault/pull/14422)] -* ui: Removes ability to tune token_type for token auth methods [[GH-12904](https://github.com/hashicorp/vault/pull/12904)] -* ui: trigger token renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] +* core: Fixes list password policy to include those with names containing / characters. [[GH-23155](https://github.com/hashicorp/vault/pull/23155)] +* secrets/pki: Fix removal of issuers to clean up unreferenced CRLs. [[GH-23007](https://github.com/hashicorp/vault/pull/23007)] +* ui (enterprise): Fix error message when generating SSH credential with control group [[GH-23025](https://github.com/hashicorp/vault/pull/23025)] +* ui: Fix the issue where confirm delete dropdown is being cut off [[GH-23066](https://github.com/hashicorp/vault/pull/23066)] +* ui: Fixes filter and search bug in secrets engines [[GH-23123](https://github.com/hashicorp/vault/pull/23123)] +* ui: don't exclude features present on license [[GH-22855](https://github.com/hashicorp/vault/pull/22855)] -## 1.9.10 -### September 30, 2022 +## 1.14.3 +### September 13, 2023 -BUG FIXES: +SECURITY: -* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] -* replication (enterprise): Fix data race in SaveCheckpoint() -* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] - -## 1.9.9 -### August 31, 2022 +* secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. This vulnerability, CVE-2023-4680, is fixed in Vault 1.14.3, 1.13.7, and 1.12.11. [[GH-22852](https://github.com/hashicorp/vault/pull/22852), [HSEC-2023-28](https://discuss.hashicorp.com/t/hcsec-2023-28-vault-s-transit-secrets-engine-allowed-nonce-specified-without-convergent-encryption/58249)] CHANGES: -* core: Bump Go version to 1.17.13. +* core: Bump Go version to 1.20.8. -BUG FIXES: +FEATURES: -* core (enterprise): Fix some races in merkle index flushing code found in testing -* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] -* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] -* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] -* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] -* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] +* ** Merkle Tree Corruption Detection (enterprise) **: Add a new endpoint to check merkle tree corruption. -SECURITY: +IMPROVEMENTS: -* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] +* auth/ldap: improved login speed by adding concurrency to LDAP token group searches [[GH-22659](https://github.com/hashicorp/vault/pull/22659)] +* core/quotas: Add configuration to allow skipping of expensive role calculations [[GH-22651](https://github.com/hashicorp/vault/pull/22651)] +* kmip (enterprise): reduce latency of KMIP operation handling -## 1.9.8 -### July 21, 2022 +BUG FIXES: -CHANGES: +* cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. [[GH-22818](https://github.com/hashicorp/vault/pull/22818)] +* core/quotas: Only perform ResolveRoleOperation for role-based quotas and lease creation. [[GH-22597](https://github.com/hashicorp/vault/pull/22597)] +* core/quotas: Reduce overhead for role calculation when using cloud auth methods. [[GH-22583](https://github.com/hashicorp/vault/pull/22583)] +* core/seal: add a workaround for potential connection [[hangs](https://github.com/Azure/azure-sdk-for-go/issues/21346)] in Azure autoseals. [[GH-22760](https://github.com/hashicorp/vault/pull/22760)] +* core: All subloggers now reflect configured log level on reload. [[GH-22038](https://github.com/hashicorp/vault/pull/22038)] +* kmip (enterprise): fix date handling error with some re-key operations +* raft/autopilot: Add dr-token flag for raft autopilot cli commands [[GH-21165](https://github.com/hashicorp/vault/pull/21165)] +* replication (enterprise): Fix discovery of bad primary cluster addresses to be more reliable +* secrets/transit: fix panic when providing non-PEM formatted public key for import [[GH-22753](https://github.com/hashicorp/vault/pull/22753)] +* ui: fixes long namespace names overflow in the sidebar -* core: Bump Go version to 1.17.12. +## 1.14.2 +### August 30, 2023 -IMPROVEMENTS: +CHANGES: -* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] +* auth/azure: Update plugin to v0.16.0 [[GH-22277](https://github.com/hashicorp/vault/pull/22277)] +* core: Bump Go version to 1.20.7. +* database/snowflake: Update plugin to v0.9.0 [[GH-22516](https://github.com/hashicorp/vault/pull/22516)] -BUG FIXES: +IMPROVEMENTS: -* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] -* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty -* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] -* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] -* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. -* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] -* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] - -## 1.9.7 -### June 10, 2022 +* auto-auth/azure: Added Azure Workload Identity Federation support to auto-auth (for Vault Agent and Vault Proxy). [[GH-22264](https://github.com/hashicorp/vault/pull/22264)] +* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)] +* kmip (enterprise): Add namespace lock and unlock support [[GH-21925](https://github.com/hashicorp/vault/pull/21925)] +* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase. +* secrets/database: Improves error logging for static role rotations by including the database and role names. [[GH-22253](https://github.com/hashicorp/vault/pull/22253)] +* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)] +* ui: KV View Secret card will link to list view if input ends in "/" [[GH-22502](https://github.com/hashicorp/vault/pull/22502)] +* ui: adds allowed_user_ids field to create role form and user_ids to generate certificates form in pki [[GH-22191](https://github.com/hashicorp/vault/pull/22191)] +* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)] +* website/docs: Fix link formatting in Vault lambda extension docs [[GH-22396](https://github.com/hashicorp/vault/pull/22396)] + +BUG FIXES: + +* activity (enterprise): Fix misattribution of entities to no or child namespace auth methods [[GH-18809](https://github.com/hashicorp/vault/pull/18809)] +* agent: Environment variable VAULT_CACERT_BYTES now works for Vault Agent templates. [[GH-22322](https://github.com/hashicorp/vault/pull/22322)] +* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)] +* core (enterprise): Remove MFA Configuration for namespace when deleting namespace +* core/metrics: vault.raft_storage.bolt.write.time should be a counter not a summary [[GH-22468](https://github.com/hashicorp/vault/pull/22468)] +* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. +Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)] +* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)] +* core: Fix bug where background thread to update locked user entries runs on DR secondaries. [[GH-22355](https://github.com/hashicorp/vault/pull/22355)] +* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)] +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)] +* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)] +* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)] +* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath +* replication (enterprise): Fix panic when update-primary was called on demoted clusters using update_primary_addrs +* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards. +* sdk/ldaputil: Properly escape user filters when using UPN domains +sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)] +* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22330](https://github.com/hashicorp/vault/pull/22330)] +* secrets/transform (enterprise): Batch items with repeated tokens in the tokenization decode api will now contain the decoded_value element +* secrets/transform (enterprise): Fix nil panic when encoding a tokenization transformation on a non-active node +* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute +* storage/raft: Fix race where new follower joining can get pruned by dead server cleanup. [[GH-20986](https://github.com/hashicorp/vault/pull/20986)] +* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)] +* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)] +* ui: fixes model defaults overwriting input value when user tries to clear form input [[GH-22458](https://github.com/hashicorp/vault/pull/22458)] +* ui: fixes text readability issue in revoke token confirmation dialog [[GH-22390](https://github.com/hashicorp/vault/pull/22390)] + +## 1.14.1 +### July 25, 2023 + +SECURITY + +* auth/ldap: Normalize HTTP response codes when invalid credentials are provided to prevent user enumeration. This vulnerability, CVE-2023-3462, is fixed in Vault 1.14.1 and 1.13.5. [[GH-21282](https://github.com/hashicorp/vault/pull/21282), [HSEC-2023-24](https://discuss.hashicorp.com/t/hcsec-2023-24-vaults-ldap-auth-method-allows-for-user-enumeration/56714)] +* core/namespace (enterprise): An unhandled error in Vault Enterprise’s namespace creation may cause the Vault process to crash, potentially resulting in denial of service. This vulnerability, CVE-2023-3774, is fixed in Vault Enterprise 1.14.1, 1.13.5, and 1.12.9. [[HSEC_2023-23](https://discuss.hashicorp.com/t/hcsec-2023-23-vault-enterprise-namespace-creation-may-lead-to-denial-of-service/56617)] CHANGES: -* core: Bump Go version to 1.17.11. [[GH-go-ver-197](https://github.com/hashicorp/vault/pull/go-ver-197)] +* core/namespace (enterprise): Introduce the concept of high-privilege namespace (administrative namespace), +which will have access to some system backend paths that were previously only accessible in the root namespace. [[GH-21215](https://github.com/hashicorp/vault/pull/21215)] +* secrets/transform (enterprise): Enforce a transformation role's max_ttl setting on encode requests, a warning will be returned if max_ttl was applied. +* storage/aerospike: Aerospike storage shouldn't be used on 32-bit architectures and is now unsupported on them. [[GH-20825](https://github.com/hashicorp/vault/pull/20825)] IMPROVEMENTS: -* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] +* core/fips: Add RPM, DEB packages of FIPS 140-2 and HSM+FIPS 140-2 Vault Enterprise. +* eventbus: updated go-eventlogger library to allow removal of nodes referenced by pipelines (used for subscriptions) [[GH-21623](https://github.com/hashicorp/vault/pull/21623)] +* openapi: Better mount points for kv-v1 and kv-v2 in openapi.json [[GH-21563](https://github.com/hashicorp/vault/pull/21563)] +* replication (enterprise): Avoid logging warning if request is forwarded from a performance standby and not a performance secondary +* secrets/pki: Add a parameter to allow ExtKeyUsage field usage from a role within ACME. [[GH-21702](https://github.com/hashicorp/vault/pull/21702)] +* secrets/transform (enterprise): Switch to pgx PostgreSQL driver for better timeout handling +* sys/metrics (enterprise): Adds a gauge metric that tracks whether enterprise builtin secret plugins are enabled. [[GH-21681](https://github.com/hashicorp/vault/pull/21681)] + +BUG FIXES: + +* agent: Fix "generate-config" command documentation URL [[GH-21466](https://github.com/hashicorp/vault/pull/21466)] +* auth/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21800](https://github.com/hashicorp/vault/pull/21800)] +* auth/token, sys: Fix path-help being unavailable for some list-only endpoints [[GH-18571](https://github.com/hashicorp/vault/pull/18571)] +* auth/token: Fix parsing of `auth/token/create` fields to avoid incorrect warnings about ignored parameters [[GH-18556](https://github.com/hashicorp/vault/pull/18556)] +* awsutil: Update awsutil to v0.2.3 to fix a regression where Vault no longer +respects `AWS_ROLE_ARN`, `AWS_WEB_IDENTITY_TOKEN_FILE`, and `AWS_ROLE_SESSION_NAME`. [[GH-21951](https://github.com/hashicorp/vault/pull/21951)] +* core/managed-keys (enterprise): Allow certain symmetric PKCS#11 managed key mechanisms (AES CBC with and without padding) to operate without an HMAC. +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-24170](https://github.com/hashicorp/vault/pull/24170)] +* core: Fixed issue with some durations not being properly parsed to include days. [[GH-21357](https://github.com/hashicorp/vault/pull/21357)] +* identity: Remove caseSensitivityKey to prevent errors while loading groups which could result in missing groups in memDB when duplicates are found. [[GH-20965](https://github.com/hashicorp/vault/pull/20965)] +* openapi: Fix response schema for PKI Issue requests [[GH-21449](https://github.com/hashicorp/vault/pull/21449)] +* openapi: Fix schema definitions for PKI EAB APIs [[GH-21458](https://github.com/hashicorp/vault/pull/21458)] +* replication (enterprise): update primary cluster address after DR failover +* secrets/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21631](https://github.com/hashicorp/vault/pull/21631)] +* secrets/pki: Fix bug with ACME tidy, 'unable to determine acme base folder path'. [[GH-21870](https://github.com/hashicorp/vault/pull/21870)] +* secrets/pki: Fix preserving acme_account_safety_buffer on config/auto-tidy. [[GH-21870](https://github.com/hashicorp/vault/pull/21870)] +* secrets/pki: Prevent deleted issuers from reappearing when migrating from a version 1 bundle to a version 2 bundle (versions including 1.13.0, 1.12.2, and 1.11.6); when managed keys were removed but referenced in the Vault 1.10 legacy CA bundle, this the error: `no managed key found with uuid`. [[GH-21316](https://github.com/hashicorp/vault/pull/21316)] +* secrets/transform (enterprise): Fix nil panic when deleting a template with tokenization transformations present +* secrets/transform (enterprise): Grab shared locks for various read operations, only escalating to write locks if work is required +* serviceregistration: Fix bug where multiple nodes in a secondary cluster could be labelled active after updating the cluster's primary [[GH-21642](https://github.com/hashicorp/vault/pull/21642)] +* ui: Adds missing values to details view after generating PKI certificate [[GH-21635](https://github.com/hashicorp/vault/pull/21635)] +* ui: Fixed an issue where editing an SSH role would clear `default_critical_options` and `default_extension` if left unchanged. [[GH-21739](https://github.com/hashicorp/vault/pull/21739)] +* ui: Fixed secrets, leases, and policies filter dropping focus after a single character [[GH-21767](https://github.com/hashicorp/vault/pull/21767)] +* ui: Fixes issue with certain navigational links incorrectly displaying in child namespaces [[GH-21562](https://github.com/hashicorp/vault/pull/21562)] +* ui: Fixes login screen display issue with Safari browser [[GH-21582](https://github.com/hashicorp/vault/pull/21582)] +* ui: Fixes problem displaying certificates issued with unsupported signature algorithms (i.e. ed25519) [[GH-21926](https://github.com/hashicorp/vault/pull/21926)] +* ui: Fixes styling of private key input when configuring an SSH key [[GH-21531](https://github.com/hashicorp/vault/pull/21531)] +* ui: Surface DOMException error when browser settings prevent localStorage. [[GH-21503](https://github.com/hashicorp/vault/pull/21503)] + +## 1.14.0 +### June 21, 2023 -BUG FIXES: +SECURITY: -* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] -* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set -has been fixed. The previous behavior would make a request to the LDAP server to -get `user_attr` before discarding it and using the username instead. This would -make it impossible for a user to connect if this attribute was missing or had -multiple values, even though it would not be used anyway. This has been fixed -and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] -* core (enterprise): Fix overcounting of lease count quota usage at startup. -* core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} [[GH-15224](https://github.com/hashicorp/vault/pull/15224)] -* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] -* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] -* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] -* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] -* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. -* transform (enterprise): Fix non-overridable column default value causing tokenization tokens to expire prematurely when using the MySQL storage backend. -* ui: Fixes client count timezone bug [[GH-15743](https://github.com/hashicorp/vault/pull/15743)] -* ui: Fixes issue logging in with OIDC from a listed auth mounts tab [[GH-15666](https://github.com/hashicorp/vault/pull/15666)] +* ui: key-value v2 (kv-v2) diff viewer allowed HTML injection into the Vault web UI through key values. This vulnerability, CVE-2023-2121, is fixed in Vault 1.14.0, 1.13.3, 1.12.7, and 1.11.11. [[HSEC-2023-17](https://discuss.hashicorp.com/t/hcsec-2023-17-vault-s-kv-diff-viewer-allowed-html-injection/54814)] -## 1.9.6 -### April 29, 2022 +BREAKING CHANGES: -BUG FIXES: +* secrets/pki: Maintaining running count of certificates will be turned off by default. +To re-enable keeping these metrics available on the tidy status endpoint, enable +maintain_stored_certificate_counts on tidy-config, to also publish them to the +metrics consumer, enable publish_stored_certificate_count_metrics . [[GH-18186](https://github.com/hashicorp/vault/pull/18186)] -* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] -* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] +CHANGES: +* auth/alicloud: Updated plugin from v0.14.0 to v0.15.0 [[GH-20758](https://github.com/hashicorp/vault/pull/20758)] +* auth/azure: Updated plugin from v0.13.0 to v0.15.0 [[GH-20816](https://github.com/hashicorp/vault/pull/20816)] +* auth/centrify: Updated plugin from v0.14.0 to v0.15.1 [[GH-20745](https://github.com/hashicorp/vault/pull/20745)] +* auth/gcp: Updated plugin from v0.15.0 to v0.16.0 [[GH-20725](https://github.com/hashicorp/vault/pull/20725)] +* auth/jwt: Updated plugin from v0.15.0 to v0.16.0 [[GH-20799](https://github.com/hashicorp/vault/pull/20799)] +* auth/kubernetes: Update plugin to v0.16.0 [[GH-20802](https://github.com/hashicorp/vault/pull/20802)] +* core: Bump Go version to 1.20.5. +* core: Remove feature toggle for SSCTs, i.e. the env var VAULT_DISABLE_SERVER_SIDE_CONSISTENT_TOKENS. [[GH-20834](https://github.com/hashicorp/vault/pull/20834)] +* core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. [[GH-20826](https://github.com/hashicorp/vault/pull/20826)] +* database/couchbase: Updated plugin from v0.9.0 to v0.9.2 [[GH-20764](https://github.com/hashicorp/vault/pull/20764)] +* database/redis-elasticache: Updated plugin from v0.2.0 to v0.2.1 [[GH-20751](https://github.com/hashicorp/vault/pull/20751)] +* replication (enterprise): Add a new parameter for the update-primary API call +that allows for setting of the primary cluster addresses directly, instead of +via a token. +* secrets/ad: Updated plugin from v0.10.1-0.20230329210417-0b2cdb26cf5d to v0.16.0 [[GH-20750](https://github.com/hashicorp/vault/pull/20750)] +* secrets/alicloud: Updated plugin from v0.5.4-beta1.0.20230330124709-3fcfc5914a22 to v0.15.0 [[GH-20787](https://github.com/hashicorp/vault/pull/20787)] +* secrets/aure: Updated plugin from v0.15.0 to v0.16.0 [[GH-20777](https://github.com/hashicorp/vault/pull/20777)] +* secrets/database/mongodbatlas: Updated plugin from v0.9.0 to v0.10.0 [[GH-20882](https://github.com/hashicorp/vault/pull/20882)] +* secrets/database/snowflake: Updated plugin from v0.7.0 to v0.8.0 [[GH-20807](https://github.com/hashicorp/vault/pull/20807)] +* secrets/gcp: Updated plugin from v0.15.0 to v0.16.0 [[GH-20818](https://github.com/hashicorp/vault/pull/20818)] +* secrets/keymgmt: Updated plugin to v0.9.1 +* secrets/kubernetes: Update plugin to v0.5.0 [[GH-20802](https://github.com/hashicorp/vault/pull/20802)] +* secrets/mongodbatlas: Updated plugin from v0.9.1 to v0.10.0 [[GH-20742](https://github.com/hashicorp/vault/pull/20742)] +* secrets/pki: Allow issuance of root CAs without AIA, when templated AIA information includes issuer_id. [[GH-21209](https://github.com/hashicorp/vault/pull/21209)] +* secrets/pki: Warning when issuing leafs from CSRs with basic constraints. In the future, issuance of non-CA leaf certs from CSRs with asserted IsCA Basic Constraints will be prohibited. [[GH-20654](https://github.com/hashicorp/vault/pull/20654)] -## 1.9.5 -### April 22, 2022 +FEATURES: + +* **AWS Static Roles**: The AWS Secrets Engine can manage static roles configured by users. [[GH-20536](https://github.com/hashicorp/vault/pull/20536)] +* **Automated License Utilization Reporting**: Added automated license +utilization reporting, which sends minimal product-license [metering +data](https://developer.hashicorp.com/vault/docs/enterprise/license/utilization-reporting) +to HashiCorp without requiring you to manually collect and report them. +* **Environment Variables through Vault Agent**: Introducing a new process-supervisor mode for Vault Agent which allows injecting secrets as environment variables into a child process using a new `env_template` configuration stanza. The process-supervisor configuration can be generated with a new `vault agent generate-config` helper tool. [[GH-20530](https://github.com/hashicorp/vault/pull/20530)] +* **MongoDB Atlas Database Secrets**: Adds support for client certificate credentials [[GH-20425](https://github.com/hashicorp/vault/pull/20425)] +* **MongoDB Atlas Database Secrets**: Adds support for generating X.509 certificates on dynamic roles for user authentication [[GH-20882](https://github.com/hashicorp/vault/pull/20882)] +* **NEW PKI Workflow in UI**: Completes generally available rollout of new PKI UI that provides smoother mount configuration and a more guided user experience [[GH-pki-ui-improvements](https://github.com/hashicorp/vault/pull/pki-ui-improvements)] +* **Secrets/Auth Plugin Multiplexing**: The plugin will be multiplexed when run +as an external plugin by vault versions that support secrets/auth plugin +multiplexing (> 1.12) [[GH-19215](https://github.com/hashicorp/vault/pull/19215)] +* **Sidebar Navigation in UI**: A new sidebar navigation panel has been added in the UI to replace the top navigation bar. [[GH-19296](https://github.com/hashicorp/vault/pull/19296)] +* **Vault PKI ACME Server**: Support for the ACME certificate lifecycle management protocol has been added to the Vault PKI Plugin. This allows standard ACME clients, such as the EFF's certbot and the CNCF's k8s cert-manager, to request certificates from a Vault server with no knowledge of Vault APIs or authentication mechanisms. For public-facing Vault instances, we recommend requiring External Account Bindings (EAB) to limit the ability to request certificates to only authenticated clients. [[GH-20752](https://github.com/hashicorp/vault/pull/20752)] +* **Vault Proxy**: Introduced Vault Proxy, a new subcommand of the Vault binary that can be invoked using `vault proxy -config=config.hcl`. It currently has the same feature set as Vault Agent's API proxy, but the two may diverge in the future. We plan to deprecate the API proxy functionality of Vault Agent in a future release. [[GH-20548](https://github.com/hashicorp/vault/pull/20548)] +* **OCI Auto-Auth**: Add OCI (Oracle Cloud Infrastructure) auto-auth method [[GH-19260](https://github.com/hashicorp/vault/pull/19260)] + +IMPROVEMENTS: + +* * api: Add Config.TLSConfig method to fetch the TLS configuration from a client config. [[GH-20265](https://github.com/hashicorp/vault/pull/20265)] +* * physical/etcd: Upgrade etcd3 client to v3.5.7 [[GH-20261](https://github.com/hashicorp/vault/pull/20261)] +* activitylog: EntityRecord protobufs now contain a ClientType field for +distinguishing client sources. [[GH-20626](https://github.com/hashicorp/vault/pull/20626)] +* agent: Add integration tests for agent running in process supervisor mode [[GH-20741](https://github.com/hashicorp/vault/pull/20741)] +* agent: Add logic to validate env_template entries in configuration [[GH-20569](https://github.com/hashicorp/vault/pull/20569)] +* agent: Added `reload` option to cert auth configuration in case of external renewals of local x509 key-pairs. [[GH-19002](https://github.com/hashicorp/vault/pull/19002)] +* agent: JWT auto-auth has a new config option, `remove_jwt_follows_symlinks` (default: false), that, if set to true will now remove the JWT, instead of the symlink to the JWT, if a symlink to a JWT has been provided in the `path` option, and the `remove_jwt_after_reading` config option is set to true (default). [[GH-18863](https://github.com/hashicorp/vault/pull/18863)] +* agent: Vault Agent now reports its name and version as part of the User-Agent header in all requests issued. [[GH-19776](https://github.com/hashicorp/vault/pull/19776)] +* agent: initial implementation of a process runner for injecting secrets via environment variables via vault agent [[GH-20628](https://github.com/hashicorp/vault/pull/20628)] +* api: GET ... /sys/internal/counters/activity?current_billing_period=true now +results in a response which contains the full billing period [[GH-20694](https://github.com/hashicorp/vault/pull/20694)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`minimum_retention_months`. [[GH-20150](https://github.com/hashicorp/vault/pull/20150)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`reporting_enabled` and `billing_start_timestamp` fields. [[GH-20086](https://github.com/hashicorp/vault/pull/20086)] +* api: property based testing for LifetimeWatcher sleep duration calculation [[GH-17919](https://github.com/hashicorp/vault/pull/17919)] +* audit: add plugin metadata, including plugin name, type, version, sha256, and whether plugin is external, to audit logging [[GH-19814](https://github.com/hashicorp/vault/pull/19814)] +* audit: forwarded requests can now contain host metadata on the node it was sent 'from' or a flag to indicate that it was forwarded. +* auth/cert: Better return OCSP validation errors during login to the caller. [[GH-20234](https://github.com/hashicorp/vault/pull/20234)] +* auth/kerberos: Enable plugin multiplexing +auth/kerberos: Upgrade plugin dependencies [[GH-20771](https://github.com/hashicorp/vault/pull/20771)] +* auth/ldap: allow configuration of alias dereferencing in LDAP search [[GH-18230](https://github.com/hashicorp/vault/pull/18230)] +* auth/ldap: allow providing the LDAP password via an env var when authenticating via the CLI [[GH-18225](https://github.com/hashicorp/vault/pull/18225)] +* auth/oidc: Adds support for group membership parsing when using IBM ISAM as an OIDC provider. [[GH-19247](https://github.com/hashicorp/vault/pull/19247)] +* build: Prefer GOBIN when set over GOPATH/bin when building the binary [[GH-19862](https://github.com/hashicorp/vault/pull/19862)] +* cli: Add walkSecretsTree helper function, which recursively walks secrets rooted at the given path [[GH-20464](https://github.com/hashicorp/vault/pull/20464)] +* cli: Improve addPrefixToKVPath helper [[GH-20488](https://github.com/hashicorp/vault/pull/20488)] +* command/server (enterprise): -dev-three-node now creates perf standbys instead of regular standbys. [[GH-20629](https://github.com/hashicorp/vault/pull/20629)] +* command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when +`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. [[GH-20609](https://github.com/hashicorp/vault/pull/20609)] +* command/server: New -dev-cluster-json writes a file describing the dev cluster in -dev and -dev-three-node modes, plus -dev-three-node now enables unauthenticated metrics and pprof requests. [[GH-20224](https://github.com/hashicorp/vault/pull/20224)] +* core (enterprise): add configuration for license reporting [[GH-19891](https://github.com/hashicorp/vault/pull/19891)] +* core (enterprise): license updates trigger a reload of reporting and the activity log [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): support reloading configuration for automated reporting via SIGHUP [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): vault server command now allows for opt-out of automated +reporting via the `OPTOUT_LICENSE_REPORTING` environment variable. [[GH-3939](https://github.com/hashicorp/vault/pull/3939)] +* core, secrets/pki, audit: Update dependency go-jose to v3 due to v2 deprecation. [[GH-20559](https://github.com/hashicorp/vault/pull/20559)] +* core/activity: error when attempting to update retention configuration below the minimum [[GH-20078](https://github.com/hashicorp/vault/pull/20078)] +* core/activity: refactor the activity log's generation of precomputed queries [[GH-20073](https://github.com/hashicorp/vault/pull/20073)] +* core: Add possibility to decode a generated encoded root token via the rest API [[GH-20595](https://github.com/hashicorp/vault/pull/20595)] +* core: include namespace path in granting_policies block of audit log +* core: include reason for ErrReadOnly on PBPWF writing failures +* core: report intermediate error messages during request forwarding [[GH-20643](https://github.com/hashicorp/vault/pull/20643)] +* core:provide more descriptive error message when calling enterprise feature paths in open-source [[GH-18870](https://github.com/hashicorp/vault/pull/18870)] +* database/elasticsearch: Upgrade plugin dependencies [[GH-20767](https://github.com/hashicorp/vault/pull/20767)] +* database/mongodb: upgrade mongo driver to 1.11 [[GH-19954](https://github.com/hashicorp/vault/pull/19954)] +* database/redis: Upgrade plugin dependencies [[GH-20763](https://github.com/hashicorp/vault/pull/20763)] +* http: Support responding to HEAD operation from plugins [[GH-19520](https://github.com/hashicorp/vault/pull/19520)] +* openapi: Add openapi response definitions to /sys defined endpoints. [[GH-18633](https://github.com/hashicorp/vault/pull/18633)] +* openapi: Add openapi response definitions to pki/config_*.go [[GH-18376](https://github.com/hashicorp/vault/pull/18376)] +* openapi: Add openapi response definitions to vault/logical_system_paths.go defined endpoints. [[GH-18515](https://github.com/hashicorp/vault/pull/18515)] +* openapi: Consistently stop Vault server on exit in gen_openapi.sh [[GH-19252](https://github.com/hashicorp/vault/pull/19252)] +* openapi: Improve operationId/request/response naming strategy [[GH-19319](https://github.com/hashicorp/vault/pull/19319)] +* openapi: add openapi response definitions to /sys/internal endpoints [[GH-18542](https://github.com/hashicorp/vault/pull/18542)] +* openapi: add openapi response definitions to /sys/rotate endpoints [[GH-18624](https://github.com/hashicorp/vault/pull/18624)] +* openapi: add openapi response definitions to /sys/seal endpoints [[GH-18625](https://github.com/hashicorp/vault/pull/18625)] +* openapi: add openapi response definitions to /sys/tool endpoints [[GH-18626](https://github.com/hashicorp/vault/pull/18626)] +* openapi: add openapi response definitions to /sys/version-history, /sys/leader, /sys/ha-status, /sys/host-info, /sys/in-flight-req [[GH-18628](https://github.com/hashicorp/vault/pull/18628)] +* openapi: add openapi response definitions to /sys/wrapping endpoints [[GH-18627](https://github.com/hashicorp/vault/pull/18627)] +* openapi: add openapi response defintions to /sys/auth endpoints [[GH-18465](https://github.com/hashicorp/vault/pull/18465)] +* openapi: add openapi response defintions to /sys/capabilities endpoints [[GH-18468](https://github.com/hashicorp/vault/pull/18468)] +* openapi: add openapi response defintions to /sys/config and /sys/generate-root endpoints [[GH-18472](https://github.com/hashicorp/vault/pull/18472)] +* openapi: added ability to validate response structures against openapi schema for test clusters [[GH-19043](https://github.com/hashicorp/vault/pull/19043)] +* sdk/framework: Fix non-deterministic ordering of 'required' fields in OpenAPI spec [[GH-20881](https://github.com/hashicorp/vault/pull/20881)] +* sdk: Add new docker-based cluster testing framework to the sdk. [[GH-20247](https://github.com/hashicorp/vault/pull/20247)] +* secrets/ad: upgrades dependencies [[GH-19829](https://github.com/hashicorp/vault/pull/19829)] +* secrets/alicloud: upgrades dependencies [[GH-19846](https://github.com/hashicorp/vault/pull/19846)] +* secrets/consul: Improve error message when ACL bootstrapping fails. [[GH-20891](https://github.com/hashicorp/vault/pull/20891)] +* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] +* secrets/gcpkms: Enable plugin multiplexing +secrets/gcpkms: Upgrade plugin dependencies [[GH-20784](https://github.com/hashicorp/vault/pull/20784)] +* secrets/mongodbatlas: upgrades dependencies [[GH-19861](https://github.com/hashicorp/vault/pull/19861)] +* secrets/openldap: upgrades dependencies [[GH-19993](https://github.com/hashicorp/vault/pull/19993)] +* secrets/pki: Add missing fields to tidy-status, include new last_auto_tidy_finished field. [[GH-20442](https://github.com/hashicorp/vault/pull/20442)] +* secrets/pki: Add warning when issuer lacks KeyUsage during CRL rebuilds; expose in logs and on rotation. [[GH-20253](https://github.com/hashicorp/vault/pull/20253)] +* secrets/pki: Allow determining existing issuers and keys on import. [[GH-20441](https://github.com/hashicorp/vault/pull/20441)] +* secrets/pki: Include CA serial number, key UUID on issuers list endpoint. [[GH-20276](https://github.com/hashicorp/vault/pull/20276)] +* secrets/pki: Limit ACME issued certificates NotAfter TTL to a maximum of 90 days [[GH-20981](https://github.com/hashicorp/vault/pull/20981)] +* secrets/pki: Support TLS-ALPN-01 challenge type in ACME for DNS certificate identifiers. [[GH-20943](https://github.com/hashicorp/vault/pull/20943)] +* secrets/pki: add subject key identifier to read key response [[GH-20642](https://github.com/hashicorp/vault/pull/20642)] +* secrets/postgresql: Add configuration to scram-sha-256 encrypt passwords on Vault before sending them to PostgreSQL [[GH-19616](https://github.com/hashicorp/vault/pull/19616)] +* secrets/terraform: upgrades dependencies [[GH-19798](https://github.com/hashicorp/vault/pull/19798)] +* secrets/transit: Add support to import public keys in transit engine and allow encryption and verification of signed data [[GH-17934](https://github.com/hashicorp/vault/pull/17934)] +* secrets/transit: Allow importing RSA-PSS OID (1.2.840.113549.1.1.10) private keys via BYOK. [[GH-19519](https://github.com/hashicorp/vault/pull/19519)] +* secrets/transit: Respond to writes with updated key policy, cache configuration. [[GH-20652](https://github.com/hashicorp/vault/pull/20652)] +* secrets/transit: Support BYOK-encrypted export of keys to securely allow synchronizing specific keys and version across clusters. [[GH-20736](https://github.com/hashicorp/vault/pull/20736)] +* ui: Add download button for each secret value in KV v2 [[GH-20431](https://github.com/hashicorp/vault/pull/20431)] +* ui: Add filtering by auth type and auth name to the Authentication Method list view. [[GH-20747](https://github.com/hashicorp/vault/pull/20747)] +* ui: Add filtering by engine type and engine name to the Secret Engine list view. [[GH-20481](https://github.com/hashicorp/vault/pull/20481)] +* ui: Adds whitespace warning to secrets engine and auth method path inputs [[GH-19913](https://github.com/hashicorp/vault/pull/19913)] +* ui: Remove the Bulma CSS framework. [[GH-19878](https://github.com/hashicorp/vault/pull/19878)] +* ui: Update Web CLI with examples and a new `kv-get` command for reading kv v2 data and metadata [[GH-20590](https://github.com/hashicorp/vault/pull/20590)] +* ui: Updates UI javascript dependencies [[GH-19901](https://github.com/hashicorp/vault/pull/19901)] +* ui: add allowed_managed_keys field to secret engine mount options [[GH-19791](https://github.com/hashicorp/vault/pull/19791)] +* ui: adds warning for commas in stringArray inputs and updates tooltip help text to remove references to comma separation [[GH-20163](https://github.com/hashicorp/vault/pull/20163)] +* ui: updates clients configuration edit form state based on census reporting configuration [[GH-20125](https://github.com/hashicorp/vault/pull/20125)] +* website/docs: Add rotate root documentation for azure secrets engine [[GH-19187](https://github.com/hashicorp/vault/pull/19187)] +* website/docs: fix database static-user sample payload [[GH-19170](https://github.com/hashicorp/vault/pull/19170)] + +BUG FIXES: + +* agent: Fix agent generate-config to accept -namespace, VAULT_NAMESPACE, and other client-modifying flags. [[GH-21297](https://github.com/hashicorp/vault/pull/21297)] +* agent: Fix bug with 'cache' stanza validation [[GH-20934](https://github.com/hashicorp/vault/pull/20934)] +* api: Addressed a couple of issues that arose as edge cases for the -output-policy flag. Specifically around properly handling list commands, distinguishing kv V1/V2, and correctly recognizing protected paths. [[GH-19160](https://github.com/hashicorp/vault/pull/19160)] +* api: Properly Handle nil identity_policies in Secret Data [[GH-20636](https://github.com/hashicorp/vault/pull/20636)] +* auth/ldap: Set default value for `max_page_size` properly [[GH-20453](https://github.com/hashicorp/vault/pull/20453)] +* auth/token: Fix cubbyhole and revocation for legacy service tokens [[GH-19416](https://github.com/hashicorp/vault/pull/19416)] +* cli/kv: add -mount flag to kv list [[GH-19378](https://github.com/hashicorp/vault/pull/19378)] +* core (enterprise): Don't delete backend stored data that appears to be filterable +on this secondary if we don't have a corresponding mount entry. +* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. +* core (enterprise): Fix log shipper buffer size overflow issue for 32 bit architecture. +* core (enterprise): Fix logshipper buffer size to default to DefaultBufferSize only when reported system memory is zero. +* core (enterprise): Fix panic when using invalid accessor for control-group request +* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. +* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur +* core (enterprise): Remove MFA Enforcment configuration for namespace when deleting namespace +* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` +resulting in 412 errors. +* core: Change where we evaluate filtered paths as part of mount operations; this is part of an enterprise bugfix that will +have its own changelog entry. Fix wrong lock used in ListAuths link meta interface implementation. [[GH-21260](https://github.com/hashicorp/vault/pull/21260)] +* core: Do not cache seal configuration to fix a bug that resulted in sporadic auto unseal failures. [[GH-21223](https://github.com/hashicorp/vault/pull/21223)] +* core: Don't exit just because we think there's a potential deadlock. [[GH-21342](https://github.com/hashicorp/vault/pull/21342)] +* core: Fix Forwarded Writer construction to correctly find active nodes, allowing PKI cross-cluster functionality to succeed on existing mounts. +* core: Fix panic in sealed nodes using raft storage trying to emit raft metrics [[GH-21249](https://github.com/hashicorp/vault/pull/21249)] +* core: Fix writes to readonly storage on performance standbys when user lockout feature is enabled. [[GH-20783](https://github.com/hashicorp/vault/pull/20783)] +* identity: Fixes duplicate groups creation with the same name but unique IDs. [[GH-20964](https://github.com/hashicorp/vault/pull/20964)] +* license (enterprise): Fix bug where license would update even if the license didn't change. +* openapi: Small fixes for OpenAPI display attributes. Changed "log-in" to "login" [[GH-20285](https://github.com/hashicorp/vault/pull/20285)] +* plugin/reload: Fix a possible data race with rollback manager and plugin reload [[GH-19468](https://github.com/hashicorp/vault/pull/19468)] +* replication (enterprise): Fix a caching issue when replicating filtered data to +a performance secondary. This resulted in the data being set to nil in the cache +and a "invalid value" error being returned from the API. +* replication (enterprise): Fix a race condition with invalid tokens during WAL streaming that was causing Secondary clusters to be unable to connect to a Primary. +* replication (enterprise): Fix a race condition with update-primary that could result in data loss after a DR failover +* replication (enterprise): Fix bug where reloading external plugin on a secondary would +break replication. +* replication (enterprise): Fix path filters deleting data right after it's written by backend Initialize funcs +* replication (enterprise): Fix regression causing token creation against a role +with a new entity alias to be incorrectly forwarded from perf standbys. [[GH-21100](https://github.com/hashicorp/vault/pull/21100)] +* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil +* replication (enterprise): fix bug where secondary grpc connections would timeout when connecting to a primary host that no longer exists. +* sdk/backend: prevent panic when computing the zero value for a `TypeInt64` schema field. [[GH-18729](https://github.com/hashicorp/vault/pull/18729)] +* secrets/pki: Support setting both maintain_stored_certificate_counts=false and publish_stored_certificate_count_metrics=false explicitly in tidy config. [[GH-20664](https://github.com/hashicorp/vault/pull/20664)] +* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens +* secrets/transform (enterprise): Fix a caching bug affecting secondary nodes after a tokenization key rotation +* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions +* secrets/transform: Added importing of keys and key versions into the Transform secrets engine using the command 'vault transform import' and 'vault transform import-version'. [[GH-20668](https://github.com/hashicorp/vault/pull/20668)] +* secrets/transit: Fix export of HMAC-only key, correctly exporting the key used for sign operations. For consumers of the previously incorrect key, use the plaintext export to retrieve these incorrect keys and import them as new versions. +* secrets/transit: Fix bug related to shorter dedicated HMAC key sizing. +* sdk/helper/keysutil: New HMAC type policies will have HMACKey equal to Key and be copied over on import. [[GH-20864](https://github.com/hashicorp/vault/pull/20864)] +* shamir: change mul and div implementations to be constant-time [[GH-19495](https://github.com/hashicorp/vault/pull/19495)] +* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] +* ui: Fix secret render when path includes %. Resolves #11616. [[GH-20430](https://github.com/hashicorp/vault/pull/20430)] +* ui: Fixes issue unsealing cluster for seal types other than shamir [[GH-20897](https://github.com/hashicorp/vault/pull/20897)] +* ui: fixes auto_rotate_period ttl input for transit keys [[GH-20731](https://github.com/hashicorp/vault/pull/20731)] +* ui: fixes bug in kmip role form that caused `operation_all` to persist after deselecting all operation checkboxes [[GH-19139](https://github.com/hashicorp/vault/pull/19139)] +* ui: fixes key_bits and signature_bits reverting to default values when editing a pki role [[GH-20907](https://github.com/hashicorp/vault/pull/20907)] +* ui: wait for wanted message event during OIDC callback instead of using the first message event [[GH-18521](https://github.com/hashicorp/vault/pull/18521)] + +## 1.13.13 +### January 31, 2024 CHANGES: -* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] -* core: Bump Go version to 1.17.9. [[GH-15045](https://github.com/hashicorp/vault/pull/15045)] +* core: Bump Go version to 1.20.12. +* database/snowflake: Update plugin to v0.7.4 [[GH-25059](https://github.com/hashicorp/vault/pull/25059)] IMPROVEMENTS: -* auth/ldap: Add username_as_alias configurable to change how aliases are named [[GH-14324](https://github.com/hashicorp/vault/pull/14324)] -* core: Systemd unit file included with the Linux packages now sets the service type to notify. [[GH-14385](https://github.com/hashicorp/vault/pull/14385)] -* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer -* website/docs: added a link to an Enigma secret plugin. [[GH-14389](https://github.com/hashicorp/vault/pull/14389)] +* command/server: display logs on startup immediately if disable-gated-logs flag is set [[GH-24280](https://github.com/hashicorp/vault/pull/24280)] +* storage/raft: Upgrade to bbolt 1.3.8, along with an extra patch to reduce time scanning large freelist maps. [[GH-24010](https://github.com/hashicorp/vault/pull/24010)] +* ui: latest version of chrome does not automatically redirect back to the app after authentication unless triggered by the user, hence added a link to redirect back to the app. [[GH-18513](https://github.com/hashicorp/vault/pull/18513)] BUG FIXES: -* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] -* api: Respect increment value in grace period calculations in LifetimeWatcher [[GH-14836](https://github.com/hashicorp/vault/pull/14836)] -* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] -* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] -* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] -* core (enterprise): Allow local alias create RPCs to persist alias metadata -* core/metrics: Fix incorrect table size metric for local mounts [[GH-14755](https://github.com/hashicorp/vault/pull/14755)] -* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] -* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] -* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] -* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] -* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] -* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] -* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] -* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] -* identity/token: Fixes a bug where duplicate public keys could appear in the .well-known JWKS [[GH-14543](https://github.com/hashicorp/vault/pull/14543)] -* metrics/autosnapshots (enterprise) : Fix bug that could cause -vault.autosnapshots.save.errors to not be incremented when there is an -autosnapshot save error. -* replication (enterprise): fix panic due to missing entity during invalidation of local aliases. [[GH-14622](https://github.com/hashicorp/vault/pull/14622)] -* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not excepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] -* ui: Fix issue where UI incorrectly handled API errors when mounting backends [[GH-14551](https://github.com/hashicorp/vault/pull/14551)] -* ui: Fixes caching issue on kv new version create [[GH-14489](https://github.com/hashicorp/vault/pull/14489)] -* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] -* ui: Fixes issue logging out with wrapped token query parameter [[GH-14329](https://github.com/hashicorp/vault/pull/14329)] -* ui: Fixes issue with correct auth method not selected when logging out from OIDC or JWT methods [[GH-14545](https://github.com/hashicorp/vault/pull/14545)] -* ui: Redirects to managed namespace if incorrect namespace in URL param [[GH-14422](https://github.com/hashicorp/vault/pull/14422)] -* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] -* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] - - -## 1.9.4 -### March 3, 2022 +* helper/pkcs7: Fix slice out-of-bounds panic [[GH-24891](https://github.com/hashicorp/vault/pull/24891)] +* kmip (enterprise): Only return a Server Correlation Value to clients using KMIP version 1.4. +* ui: Fixed minor bugs with database secrets engine [[GH-24947](https://github.com/hashicorp/vault/pull/24947)] +* ui: Fixes input for jwks_ca_pem when configuring a JWT auth method [[GH-24697](https://github.com/hashicorp/vault/pull/24697)] +* ui: fix incorrectly calculated capabilities on PKI issuer endpoints [[GH-24686](https://github.com/hashicorp/vault/pull/24686)] + +## 1.13.12 +### December 06, 2023 SECURITY: -* secrets/pki: Vault and Vault Enterprise (“Vault”) allowed the PKI secrets engine under certain configurations to issue wildcard certificates to authorized users for a specified domain, even if the PKI role policy attribute allow_subdomains is set to false. This vulnerability, CVE-2022-25243, was fixed in Vault 1.8.9 and 1.9.4. -* transform (enterprise): Vault Enterprise (“Vault”) clusters using the tokenization transform feature can expose the tokenization key through the tokenization key configuration endpoint to authorized operators with read permissions on this endpoint. This vulnerability, CVE-2022-25244, was fixed in Vault Enterprise 1.7.10, 1.8.9, and 1.9.4. + +* core: Fixes an issue present in both Vault and Vault Enterprise since Vault 1.12.0, where Vault is vulnerable to a denial of service through memory exhaustion of the host when handling large HTTP requests from a client. (see [CVE-2023-6337](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-6337) & [HCSEC-2023-34](https://discuss.hashicorp.com/t/hcsec-2023-34-vault-vulnerable-to-denial-of-service-through-memory-exhaustion-when-handling-large-http-requests/60741)) CHANGES: -* secrets/azure: Changes the configuration parameter `use_microsoft_graph_api` to use the Microsoft -Graph API by default. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] +* identity (enterprise): POST requests to the `/identity/entity/merge` endpoint +are now always forwarded from standbys to the active node. [[GH-24325](https://github.com/hashicorp/vault/pull/24325)] -IMPROVEMENTS: +BUG FIXES: -* core: Bump Go version to 1.17.7. [[GH-14232](https://github.com/hashicorp/vault/pull/14232)] -* secrets/pki: Restrict issuance of wildcard certificates via role parameter (`allow_wildcard_certificates`) [[GH-14238](https://github.com/hashicorp/vault/pull/14238)] +* api: Fix deadlock on calls to sys/leader with a namespace configured +on the request. [[GH-24256](https://github.com/hashicorp/vault/pull/24256)] +* core: Fix a timeout initializing Vault by only using a short timeout persisting barrier keyring encryption counts. [[GH-24336](https://github.com/hashicorp/vault/pull/24336)] +* ui: Fix payload sent when disabling replication [[GH-24292](https://github.com/hashicorp/vault/pull/24292)] -BUG FIXES: +## 1.13.11 +### November 30, 2023 -* Fixed bug where auth method only considers system-identity when multiple identities are available. [#50](https://github.com/hashicorp/vault-plugin-auth-azure/pull/50) [[GH-14138](https://github.com/hashicorp/vault/pull/14138)] -* auth/kubernetes: Properly handle the migration of role storage entries containing an empty `alias_name_source` [[GH-13925](https://github.com/hashicorp/vault/pull/13925)] -* auth/kubernetes: ensure valid entity alias names created for projected volume tokens [[GH-14144](https://github.com/hashicorp/vault/pull/14144)] -* identity/oidc: Adds support for port-agnostic validation of loopback IP redirect URIs. [[GH-13871](https://github.com/hashicorp/vault/pull/13871)] -* identity/oidc: Fixes inherited group membership when evaluating client assignments [[GH-14013](https://github.com/hashicorp/vault/pull/14013)] -* secrets/azure: Fixed bug where Azure environment did not change Graph URL [[GH-13973](https://github.com/hashicorp/vault/pull/13973)] -* secrets/azure: Fixes the [rotate root](https://www.vaultproject.io/api-docs/secret/azure#rotate-root) -operation for upgraded configurations with a `root_password_ttl` of zero. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] -* secrets/gcp: Fixed bug where error was not reported for invalid bindings [[GH-13974](https://github.com/hashicorp/vault/pull/13974)] -* secrets/openldap: Fix panic from nil logger in backend [[GH-14171](https://github.com/hashicorp/vault/pull/14171)] -* secrets/pki: Fix issuance of wildcard certificates matching glob patterns [[GH-14235](https://github.com/hashicorp/vault/pull/14235)] -* storage/raft: Fix issues allowing invalid nodes to become leadership candidates. [[GH-13703](https://github.com/hashicorp/vault/pull/13703)] -* ui: Fix default TTL display and set on database role [[GH-14224](https://github.com/hashicorp/vault/pull/14224)] -* ui: Fix incorrect validity message on transit secrets engine [[GH-14233](https://github.com/hashicorp/vault/pull/14233)] -* ui: Fix kv engine access bug [[GH-13872](https://github.com/hashicorp/vault/pull/13872)] -* ui: Fix issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] -* ui: Trigger background token self-renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] +CHANGES: -## 1.9.3 -### January 27, 2022 +* core: Bump Go version to 1.20.11. IMPROVEMENTS: -* auth/kubernetes: Added support for dynamically reloading short-lived tokens for better Kubernetes 1.21+ compatibility [[GH-13698](https://github.com/hashicorp/vault/pull/13698)] -* auth/ldap: Add username to alias metadata [[GH-13669](https://github.com/hashicorp/vault/pull/13669)] -* core/identity: Support updating an alias' `custom_metadata` to be empty. [[GH-13395](https://github.com/hashicorp/vault/pull/13395)] -* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] -* http (enterprise): Serve /sys/license/status endpoint within namespaces +* core (enterprise): Speed up unseal when using namespaces +* ui: Sort list view of entities and aliases alphabetically using the item name [[GH-24103](https://github.com/hashicorp/vault/pull/24103)] BUG FIXES: -* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13492](https://github.com/hashicorp/vault/pull/13492)] -* cli: Fix using kv patch with older server versions that don't support HTTP PATCH. [[GH-13615](https://github.com/hashicorp/vault/pull/13615)] -* core (enterprise): Workaround AWS CloudHSM v5 SDK issue not allowing read-only sessions -* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13476](https://github.com/hashicorp/vault/pull/13476)] -* core: add support for go-sockaddr templates in the top-level cluster_addr field [[GH-13678](https://github.com/hashicorp/vault/pull/13678)] -* identity/oidc: Check for a nil signing key on rotation to prevent panics. [[GH-13716](https://github.com/hashicorp/vault/pull/13716)] -* kmip (enterprise): Fix locate by name operations fail to find key after a rekey operation. -* secrets/database/mssql: Accept a boolean for `contained_db`, rather than just a string. [[GH-13469](https://github.com/hashicorp/vault/pull/13469)] -* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13548](https://github.com/hashicorp/vault/pull/13548)] -* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-13759](https://github.com/hashicorp/vault/pull/13759)] -* storage/raft: On linux, use map_populate for bolt files to improve startup time. [[GH-13573](https://github.com/hashicorp/vault/pull/13573)] -* storage/raft: Units for bolt metrics now given in milliseconds instead of nanoseconds [[GH-13749](https://github.com/hashicorp/vault/pull/13749)] -* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] -* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] +* activity log (enterprise): De-duplicate client count estimates for license utilization reporting. +* auth/cert: Handle errors related to expired OCSP server responses [[GH-24193](https://github.com/hashicorp/vault/pull/24193)] +* core/config: Use correct HCL config value when configuring `log_requests_level`. [[GH-24057](https://github.com/hashicorp/vault/pull/24057)] +* core/quotas: Close rate-limit blocked client purge goroutines when sealing [[GH-24108](https://github.com/hashicorp/vault/pull/24108)] +* replication (enterprise): disallow configuring paths filter for a mount path that does not exist +* secrets/pki: Do not set nextUpdate field in OCSP responses when ocsp_expiry is 0 [[GH-24192](https://github.com/hashicorp/vault/pull/24192)] +* ui: Fix error when tuning token auth configuration within namespace [[GH-24147](https://github.com/hashicorp/vault/pull/24147)] + +## 1.13.10 +### November 09, 2023 -## 1.9.2 -### December 21, 2021 +SECURITY: +* core: inbound client requests triggering a policy check can lead to an unbounded consumption of memory. A large number of these requests may lead to denial-of-service. This vulnerability, CVE-2023-5954, was introduced in Vault 1.15.0, 1.14.3, and 1.13.7, and is fixed in Vault 1.15.2, 1.14.6, and 1.13.10. [[HSEC-2023-33](https://discuss.hashicorp.com/t/hcsec-2023-33-vault-requests-triggering-policy-checks-may-lead-to-unbounded-memory-consumption/59926)] CHANGES: -* go: Update go version to 1.17.5 [[GH-13408](https://github.com/hashicorp/vault/pull/13408)] +* auth/approle: Normalized error response messages when invalid credentials are provided [[GH-23786](https://github.com/hashicorp/vault/pull/23786)] +* secrets/mongodbatlas: Update plugin to v0.9.2 [[GH-23849](https://github.com/hashicorp/vault/pull/23849)] + +FEATURES: + +* cli/snapshot: Add CLI tool to inspect Vault snapshots [[GH-23457](https://github.com/hashicorp/vault/pull/23457)] IMPROVEMENTS: -* auth/jwt: The Authorization Code flow makes use of the Proof Key for Code Exchange (PKCE) extension. [[GH-13365](https://github.com/hashicorp/vault/pull/13365)] +* storage/etcd: etcd should only return keys when calling List() [[GH-23872](https://github.com/hashicorp/vault/pull/23872)] BUG FIXES: -* ui: Fix client count current month data not showing unless monthly history data exists [[GH-13396](https://github.com/hashicorp/vault/pull/13396)] +* api/seal-status: Fix deadlock on calls to sys/seal-status with a namespace configured +on the request. [[GH-23861](https://github.com/hashicorp/vault/pull/23861)] +* core (enterprise): Do not return an internal error when token policy type lookup fails, log it instead and continue. +* core/activity: Fixes segments fragment loss due to exceeding entry record size limit [[GH-23781](https://github.com/hashicorp/vault/pull/23781)] +* core/mounts: Fix reading an "auth" mount using "sys/internal/ui/mounts/" when filter paths are enforced returns 500 error code from the secondary [[GH-23802](https://github.com/hashicorp/vault/pull/23802)] +* core: Revert PR causing memory consumption bug [[GH-23986](https://github.com/hashicorp/vault/pull/23986)] +* core: Skip unnecessary deriving of policies during Login MFA Check. [[GH-23894](https://github.com/hashicorp/vault/pull/23894)] +* core: fix bug where deadlock detection was always on for expiration and quotas. +These can now be configured individually with `detect_deadlocks`. [[GH-23902](https://github.com/hashicorp/vault/pull/23902)] +* core: fix policies with wildcards not matching list operations due to the policy path not having a trailing slash [[GH-23874](https://github.com/hashicorp/vault/pull/23874)] +* expiration: Fix fatal error "concurrent map iteration and map write" when collecting metrics from leases. [[GH-24027](https://github.com/hashicorp/vault/pull/24027)] -## 1.9.1 -### December 9, 2021 +## 1.13.9 +### October 25, 2023 -SECURITY: +CHANGES: -* storage/raft: Integrated Storage backend could be caused to crash by an authenticated user with write permissions to the KV secrets engine. This vulnerability, CVE-2021-45042, was fixed in Vault 1.7.7, 1.8.6, and 1.9.1. +* core: Bump Go version to 1.20.10. +* replication (enterprise): Switch to non-deprecated gRPC field for resolver target host IMPROVEMENTS: -* storage/aerospike: Upgrade `aerospike-client-go` to v5.6.0. [[GH-12165](https://github.com/hashicorp/vault/pull/12165)] +* api/plugins: add `tls-server-name` arg for plugin registration [[GH-23549](https://github.com/hashicorp/vault/pull/23549)] +* core: Use a worker pool for the rollback manager. Add new metrics for the rollback manager to track the queued tasks. [[GH-22567](https://github.com/hashicorp/vault/pull/22567)] BUG FIXES: -* auth/approle: Fix regression where unset cidrlist is returned as nil instead of zero-length array. [[GH-13235](https://github.com/hashicorp/vault/pull/13235)] -* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes -* http:Fix /sys/monitor endpoint returning streaming not supported [[GH-13200](https://github.com/hashicorp/vault/pull/13200)] -* identity/oidc: Make the `nonce` parameter optional for the Authorization Endpoint of OIDC providers. [[GH-13231](https://github.com/hashicorp/vault/pull/13231)] -* identity: Fixes a panic in the OIDC key rotation due to a missing nil check. [[GH-13298](https://github.com/hashicorp/vault/pull/13298)] -* sdk/queue: move lock before length check to prevent panics. [[GH-13146](https://github.com/hashicorp/vault/pull/13146)] -* secrets/azure: Fixes service principal generation when assigning roles that have [DataActions](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-definitions#dataactions). [[GH-13277](https://github.com/hashicorp/vault/pull/13277)] -* secrets/pki: Recognize ed25519 when requesting a response in PKCS8 format [[GH-13257](https://github.com/hashicorp/vault/pull/13257)] -* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] -* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] -* ui: Do not show verify connection value on database connection config page [[GH-13152](https://github.com/hashicorp/vault/pull/13152)] -* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] -* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] -* ui: Fixes issue with automate secret deletion value not displaying initially if set in secret metadata edit view [[GH-13177](https://github.com/hashicorp/vault/pull/13177)] -* ui: Fixes issue with placeholder not displaying for automatically deleted secrets when deletion time has passed [[GH-13166](https://github.com/hashicorp/vault/pull/13166)] -* ui: Fixes node-forge error when parsing EC (elliptical curve) certs [[GH-13238](https://github.com/hashicorp/vault/pull/13238)] +* command/server: Fix bug with sigusr2 where pprof files were not closed correctly [[GH-23636](https://github.com/hashicorp/vault/pull/23636)] +* events: Ignore sending context to give more time for events to send [[GH-23500](https://github.com/hashicorp/vault/pull/23500)] +* expiration: Prevent large lease loads from delaying state changes, e.g. becoming active or standby. [[GH-23282](https://github.com/hashicorp/vault/pull/23282)] +* kmip (enterprise): Improve handling of failures due to storage replication issues. +* kmip (enterprise): Return a structure in the response for query function Query Server Information. +* mongo-db: allow non-admin database for root credential rotation [[GH-23240](https://github.com/hashicorp/vault/pull/23240)] +* replication (enterprise): Fix a bug where undo logs would only get enabled on the initial node in a cluster. +* replication (enterprise): Fix a missing unlock when changing replication state +* secrets/transit (enterprise): Address an issue using sign/verify operations with managed keys returning an error about it not containing a private key +* secrets/transit (enterprise): Address panic when using GCP,AWS,Azure managed keys for encryption operations. At this time all encryption operations for the cloud providers have been disabled, only signing operations are supported. +* secrets/transit (enterprise): Apply hashing arguments and defaults to managed key sign/verify operations +* secrets/transit: Do not allow auto rotation on managed_key key types [[GH-23723](https://github.com/hashicorp/vault/pull/23723)] -## 1.9.0 -### November 17, 2021 +## 1.13.6 +### August 30, 2023 CHANGES: -* auth/kubernetes: `disable_iss_validation` defaults to true. [#127](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/127) [[GH-12975](https://github.com/hashicorp/vault/pull/12975)] -* expiration: VAULT_16_REVOKE_PERMITPOOL environment variable has been removed. [[GH-12888](https://github.com/hashicorp/vault/pull/12888)] -* expiration: VAULT_LEASE_USE_LEGACY_REVOCATION_STRATEGY environment variable has -been removed. [[GH-12888](https://github.com/hashicorp/vault/pull/12888)] -* go: Update go version to 1.17.2 -* secrets/ssh: Roles with empty allowed_extensions will now forbid end-users -specifying extensions when requesting ssh key signing. Update roles setting -allowed_extensions to `*` to permit any extension to be specified by an end-user. [[GH-12847](https://github.com/hashicorp/vault/pull/12847)] +* core: Bump Go version to 1.20.7. -FEATURES: +IMPROVEMENTS: -* **Customizable HTTP Headers**: Add support to define custom HTTP headers for root path (`/`) and also on API endpoints (`/v1/*`) [[GH-12485](https://github.com/hashicorp/vault/pull/12485)] -* **Deduplicate Token With Entities in Activity Log**: Vault tokens without entities are now tracked with client IDs and deduplicated in the Activity Log [[GH-12820](https://github.com/hashicorp/vault/pull/12820)] -* **Elasticsearch Database UI**: The UI now supports adding and editing Elasticsearch connections in the database secret engine. [[GH-12672](https://github.com/hashicorp/vault/pull/12672)] -* **KV Custom Metadata**: Add ability in kv-v2 to specify version-agnostic custom key metadata via the -metadata endpoint. The data will be present in responses made to the data endpoint independent of the -calling token's `read` access to the metadata endpoint. [[GH-12907](https://github.com/hashicorp/vault/pull/12907)] -* **KV patch (Tech Preview)**: Add partial update support for the `//data/:path` kv-v2 -endpoint through HTTP `PATCH`. A new `patch` ACL capability has been added and -is required to make such requests. [[GH-12687](https://github.com/hashicorp/vault/pull/12687)] -* **Key Management Secrets Engine (Enterprise)**: Adds support for distributing and managing keys in GCP Cloud KMS. -* **Local Auth Mount Entities (enterprise)**: Logins on `local` auth mounts will -generate identity entities for the tokens issued. The aliases of the entity -resulting from local auth mounts (local-aliases), will be scoped by the cluster. -This means that the local-aliases will never leave the geographical boundary of -the cluster where they were issued. This is something to be mindful about for -those who have implemented local auth mounts for complying with GDPR guidelines. -* **Namespaces (Enterprise)**: Adds support for locking Vault API for particular namespaces. -* **OIDC Identity Provider (Tech Preview)**: Adds support for Vault to be an OpenID Connect (OIDC) provider. [[GH-12932](https://github.com/hashicorp/vault/pull/12932)] -* **Oracle Database UI**: The UI now supports adding and editing Oracle connections in the database secret engine. [[GH-12752](https://github.com/hashicorp/vault/pull/12752)] -* **Postgres Database UI**: The UI now supports adding and editing Postgres connections in the database secret engine. [[GH-12945](https://github.com/hashicorp/vault/pull/12945)] +* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)] +* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase. +* secrets/database: Improves error logging for static role rotations by including the database and role names. [[GH-22253](https://github.com/hashicorp/vault/pull/22253)] +* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)] +* ui: KV View Secret card will link to list view if input ends in "/" [[GH-22502](https://github.com/hashicorp/vault/pull/22502)] +* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)] + +BUG FIXES: + +* activity (enterprise): Fix misattribution of entities to no or child namespace auth methods [[GH-18809](https://github.com/hashicorp/vault/pull/18809)] +* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)] +* core (enterprise): Remove MFA Configuration for namespace when deleting namespace +* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. +Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)] +* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)] +* core: Fix bug where background thread to update locked user entries runs on DR secondaries. [[GH-22355](https://github.com/hashicorp/vault/pull/22355)] +* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)] +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)] +* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)] +* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)] +* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath +* replication (enterprise): Fix panic when update-primary was called on demoted clusters using update_primary_addrs +* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards. +* sdk/ldaputil: Properly escape user filters when using UPN domains +sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)] +* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22331](https://github.com/hashicorp/vault/pull/22331)] +* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute +* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)] +* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)] +* ui: fixes model defaults overwriting input value when user tries to clear form input [[GH-22458](https://github.com/hashicorp/vault/pull/22458)] + +## 1.13.8 +### September 27, 2023 SECURITY: -* core/identity: A Vault user with write permission to an entity alias ID sharing a mount accessor with another user may acquire this other user’s policies by merging their identities. This vulnerability, CVE-2021-41802, was fixed in Vault and Vault Enterprise 1.7.5, 1.8.4, and 1.9.0. -* core/identity: Templated ACL policies would always match the first-created entity alias if multiple entity aliases existed for a specified entity and mount combination, potentially resulting in incorrect policy enforcement. This vulnerability, CVE-2021-43998, was fixed in Vault and Vault Enterprise 1.7.6, 1.8.5, and 1.9.0. +* sentinel (enterprise): Sentinel RGP policies allowed for cross-namespace denial-of-service. This vulnerability, CVE-2023-3775, is fixed in Vault Enterprise 1.15.0, 1.14.4, and 1.13.8. [[HSEC-2023-29](https://discuss.hashicorp.com/t/hcsec-2023-29-vault-enterprise-s-sentinel-rgp-policies-allowed-for-cross-namespace-denial-of-service/58653)] -IMPROVEMENTS: +CHANGES: -* agent/cache: Process persistent cache leases in dependency order during restore to ensure child leases are always correctly restored [[GH-12843](https://github.com/hashicorp/vault/pull/12843)] -* agent/cache: Use an in-process listener between consul-template and vault-agent when caching is enabled and either templates or a listener is defined [[GH-12762](https://github.com/hashicorp/vault/pull/12762)] -* agent/cache: tolerate partial restore failure from persistent cache [[GH-12718](https://github.com/hashicorp/vault/pull/12718)] -* agent/template: add support for new 'writeToFile' template function [[GH-12505](https://github.com/hashicorp/vault/pull/12505)] -* api: Add configuration option for ensuring isolated read-after-write semantics for all Client requests. [[GH-12814](https://github.com/hashicorp/vault/pull/12814)] -* api: adds native Login method to Go client module with different auth method interfaces to support easier authentication [[GH-12796](https://github.com/hashicorp/vault/pull/12796)] -* api: Move mergeStates and other required utils from agent to api module [[GH-12731](https://github.com/hashicorp/vault/pull/12731)] -* api: Support VAULT_HTTP_PROXY environment variable to allow overriding the Vault client's HTTP proxy [[GH-12582](https://github.com/hashicorp/vault/pull/12582)] -* auth/approle: The `role/:name/secret-id-accessor/lookup` endpoint now returns a 404 status code when the `secret_id_accessor` cannot be found [[GH-12788](https://github.com/hashicorp/vault/pull/12788)] -* auth/approle: expose secret_id_accessor as WrappedAccessor when creating wrapped secret-id. [[GH-12425](https://github.com/hashicorp/vault/pull/12425)] -* auth/aws: add profile support for AWS credentials when using the AWS auth method [[GH-12621](https://github.com/hashicorp/vault/pull/12621)] -* auth/kubernetes: validate JWT against the provided role on alias look ahead operations [[GH-12688](https://github.com/hashicorp/vault/pull/12688)] -* auth/kubernetes: Add ability to configure entity alias names based on the serviceaccount's namespace and name. [#110](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/110) [#112](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/112) [[GH-12633](https://github.com/hashicorp/vault/pull/12633)] -* auth/ldap: include support for an optional user filter field when searching for users [[GH-11000](https://github.com/hashicorp/vault/pull/11000)] -* auth/oidc: Adds the `skip_browser` CLI option to allow users to skip opening the default browser during the authentication flow. [[GH-12876](https://github.com/hashicorp/vault/pull/12876)] -* auth/okta: Send x-forwarded-for in Okta Push Factor request [[GH-12320](https://github.com/hashicorp/vault/pull/12320)] -* auth/token: Add `allowed_policies_glob` and `disallowed_policies_glob` fields to token roles to allow glob matching of policies [[GH-7277](https://github.com/hashicorp/vault/pull/7277)] -* cli: Operator diagnose now tests for missing or partial telemetry configurations. [[GH-12802](https://github.com/hashicorp/vault/pull/12802)] -* cli: add new http option : -header which enable sending arbitrary headers with the cli [[GH-12508](https://github.com/hashicorp/vault/pull/12508)] -* command: operator generate-root -decode: allow passing encoded token via stdin [[GH-12881](https://github.com/hashicorp/vault/pull/12881)] -* core/token: Return the token_no_default_policy config on token role read if set [[GH-12565](https://github.com/hashicorp/vault/pull/12565)] -* core: Add support for go-sockaddr templated addresses in config. [[GH-9109](https://github.com/hashicorp/vault/pull/9109)] -* core: adds custom_metadata field for aliases [[GH-12502](https://github.com/hashicorp/vault/pull/12502)] -* core: Update Oracle Cloud library to enable seal integration with the uk-gov-london-1 region [[GH-12724](https://github.com/hashicorp/vault/pull/12724)] -* core: Update github.com/ulikunitz/xz to fix security vulnerability GHSA-25xm-hr59-7c27. [[GH-12253](https://github.com/hashicorp/vault/pull/12253)] -* core: Upgrade github.com/gogo/protobuf [[GH-12255](https://github.com/hashicorp/vault/pull/12255)] -* core: build with Go 1.17, and mitigate a breaking change they made that could impact how approle and ssh interpret IPs/CIDRs [[GH-12868](https://github.com/hashicorp/vault/pull/12868)] -* core: observe the client counts broken down by namespace for partial month client count [[GH-12393](https://github.com/hashicorp/vault/pull/12393)] -* core: Artifact builds will now only run on merges to the release branches or to `main` -* core: The [dockerfile](https://github.com/hashicorp/vault/blob/main/Dockerfile) that is used to build the vault docker image available at [hashicorp/vault](https://hub.docker.com/repository/docker/hashicorp/vault) now lives in the root of this repo, and the entrypoint is available under [.release/docker/docker-entrypoint.sh](https://github.com/hashicorp/vault/blob/main/.release/docker/docker-entrypoint.sh) -* core: The vault linux packaging service configs and pre/post install scripts are now available under [.release/linux](https://github.com/hashicorp/vault/blob/main/.release/linux) -* core: Vault linux packages are now available for all supported linux architectures including arm, arm64, 386, and amd64 -* db/cassandra: make the connect_timeout config option actually apply to connection timeouts, in addition to non-connection operations [[GH-12903](https://github.com/hashicorp/vault/pull/12903)] -* identity/token: Only return keys from the `.well-known/keys` endpoint that are being used by roles to sign/verify tokens. [[GH-12780](https://github.com/hashicorp/vault/pull/12780)] -* identity: fix issue where Cache-Control header causes stampede of requests for JWKS keys [[GH-12414](https://github.com/hashicorp/vault/pull/12414)] -* physical/etcd: Upgrade etcd3 client to v3.5.0 and etcd2 to v2.305.0. [[GH-11980](https://github.com/hashicorp/vault/pull/11980)] -* pki: adds signature_bits field to customize signature algorithm on CAs and certs signed by Vault [[GH-11245](https://github.com/hashicorp/vault/pull/11245)] -* plugin: update the couchbase gocb version in the couchbase plugin [[GH-12483](https://github.com/hashicorp/vault/pull/12483)] -* replication (enterprise): Add merkle.flushDirty.num_pages_outstanding metric which specifies number of -outstanding dirty pages that were not flushed. [[GH-2093](https://github.com/hashicorp/vault/pull/2093)] -* sdk/framework: The '+' wildcard is now supported for parameterizing unauthenticated paths. [[GH-12668](https://github.com/hashicorp/vault/pull/12668)] -* secrets/aws: Add conditional template that allows custom usernames for both STS and IAM cases [[GH-12185](https://github.com/hashicorp/vault/pull/12185)] -* secrets/azure: Adds support for rotate-root. [#70](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/70) [[GH-13034](https://github.com/hashicorp/vault/pull/13034)] -* secrets/azure: Adds support for using Microsoft Graph API since Azure Active Directory API is being removed in 2022. [#67](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/67) [[GH-12629](https://github.com/hashicorp/vault/pull/12629)] -* secrets/database: Update MSSQL dependency github.com/denisenkom/go-mssqldb to v0.11.0 and include support for contained databases in MSSQL plugin [[GH-12839](https://github.com/hashicorp/vault/pull/12839)] -* secrets/pki: Allow signing of self-issued certs with a different signature algorithm. [[GH-12514](https://github.com/hashicorp/vault/pull/12514)] -* secrets/pki: Use entropy augmentation when available when generating root and intermediate CA key material. [[GH-12559](https://github.com/hashicorp/vault/pull/12559)] -* secrets/pki: select appropriate signature algorithm for ECDSA signature on certificates. [[GH-11216](https://github.com/hashicorp/vault/pull/11216)] -* secrets/pki: Support ed25519 as a key for the pki backend [[GH-11780](https://github.com/hashicorp/vault/pull/11780)] -* secrets/rabbitmq: Update dependency github.com/michaelklishin/rabbit-hole to v2 and resolve UserInfo.tags regression from RabbitMQ v3.9 [[GH-12877](https://github.com/hashicorp/vault/pull/12877)] -* secrets/ssh: Let allowed_users template mix templated and non-templated parts. [[GH-10886](https://github.com/hashicorp/vault/pull/10886)] -* secrets/ssh: Use entropy augmentation when available for generation of the signing key. [[GH-12560](https://github.com/hashicorp/vault/pull/12560)] -* serviceregistration: add `external-source: "vault"` metadata value for Consul registration. [[GH-12163](https://github.com/hashicorp/vault/pull/12163)] -* storage/raft: Best-effort handling of cancelled contexts. [[GH-12162](https://github.com/hashicorp/vault/pull/12162)] -* transform (enterprise): Add advanced features for encoding and decoding for Transform FPE -* transform (enterprise): Add a `reference` field to batch items, and propogate it to the response -* ui: Add KV secret search box when no metadata list access. [[GH-12626](https://github.com/hashicorp/vault/pull/12626)] -* ui: Add custom metadata to KV secret engine and metadata to config [[GH-12169](https://github.com/hashicorp/vault/pull/12169)] -* ui: Creates new StatText component [[GH-12295](https://github.com/hashicorp/vault/pull/12295)] -* ui: client count monthly view [[GH-12554](https://github.com/hashicorp/vault/pull/12554)] -* ui: creates bar chart component for displaying client count data by namespace [[GH-12437](https://github.com/hashicorp/vault/pull/12437)] -* ui: Add creation time to KV 2 version history and version view [[GH-12663](https://github.com/hashicorp/vault/pull/12663)] -* ui: Added resize for JSON editor [[GH-12906](https://github.com/hashicorp/vault/pull/12906)] [[GH-12906](https://github.com/hashicorp/vault/pull/12906)] -* ui: Adds warning about white space in KV secret engine. [[GH-12921](https://github.com/hashicorp/vault/pull/12921)] -* ui: Click to copy database static role last rotation value in tooltip [[GH-12890](https://github.com/hashicorp/vault/pull/12890)] -* ui: Filter DB connection attributes so only relevant attrs POST to backend [[GH-12770](https://github.com/hashicorp/vault/pull/12770)] -* ui: Removes empty rows from DB config views [[GH-12819](https://github.com/hashicorp/vault/pull/12819)] -* ui: Standardizes toolbar presentation of destructive actions [[GH-12895](https://github.com/hashicorp/vault/pull/12895)] -* ui: Updates font for table row value fields [[GH-12908](https://github.com/hashicorp/vault/pull/12908)] -* ui: namespace search in client count views [[GH-12577](https://github.com/hashicorp/vault/pull/12577)] -* ui: parse and display pki cert metadata [[GH-12541](https://github.com/hashicorp/vault/pull/12541)] -* ui: replaces Vault's use of elazarl/go-bindata-assetfs in building the UI with Go's native Embed package [[GH-11208](https://github.com/hashicorp/vault/pull/11208)] -* ui: updated client tracking config view [[GH-12422](https://github.com/hashicorp/vault/pull/12422)] +* core (enterprise): Ensure Role Governing Policies are only applied down the namespace hierarchy -DEPRECATIONS: +IMPROVEMENTS: -* auth/kubernetes: deprecate `disable_iss_validation` and `issuer` configuration fields [#127](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/127) [[GH-12975](https://github.com/hashicorp/vault/pull/12975)] +* ui: Added allowed_domains_template field for CA type role in SSH engine [[GH-23119](https://github.com/hashicorp/vault/pull/23119)] BUG FIXES: -* activity log (enterprise): allow partial monthly client count to be accessed from namespaces [[GH-13086](https://github.com/hashicorp/vault/pull/13086)] -* agent: Avoid possible `unexpected fault address` panic when using persistent cache. [[GH-12534](https://github.com/hashicorp/vault/pull/12534)] -* api: Fixes storage APIs returning incorrect error when parsing responses [[GH-12338](https://github.com/hashicorp/vault/pull/12338)] -* auth/aws: Fix ec2 auth on instances that have a cert in their PKCS7 signature [[GH-12519](https://github.com/hashicorp/vault/pull/12519)] -* auth/aws: Fixes ec2 login no longer supporting DSA signature verification [[GH-12340](https://github.com/hashicorp/vault/pull/12340)] -* auth/aws: fix config/rotate-root to store new key [[GH-12715](https://github.com/hashicorp/vault/pull/12715)] -* auth/jwt: Fixes OIDC auth from the Vault UI when using `form_post` as the `oidc_response_mode`. [[GH-12265](https://github.com/hashicorp/vault/pull/12265)] -* cli/api: Providing consistency for the use of comma separated parameters in auth/secret enable/tune [[GH-12126](https://github.com/hashicorp/vault/pull/12126)] -* cli: fixes CLI requests when namespace is both provided as argument and part of the path [[GH-12720](https://github.com/hashicorp/vault/pull/12720)] -* cli: fixes CLI requests when namespace is both provided as argument and part of the path [[GH-12911](https://github.com/hashicorp/vault/pull/12911)] -* cli: vault debug now puts newlines after every captured log line. [[GH-12175](https://github.com/hashicorp/vault/pull/12175)] -* core (enterprise): Allow deletion of stored licenses on DR secondary nodes -* core (enterprise): Disallow autogenerated licenses to be used in diagnose even when config is specified -* core (enterprise): Fix bug where password generation through password policies do not work on namespaces if performed outside a request callback or from an external plugin. [[GH-12635](https://github.com/hashicorp/vault/pull/12635)] -* core (enterprise): Fix data race during perf standby sealing -* core (enterprise): Fixes reading raft auto-snapshot configuration from performance standby node [[GH-12317](https://github.com/hashicorp/vault/pull/12317)] -* core (enterprise): Only delete quotas on primary cluster. [[GH-12339](https://github.com/hashicorp/vault/pull/12339)] -* core (enterprise): namespace header included in responses, Go client uses it when displaying error messages [[GH-12196](https://github.com/hashicorp/vault/pull/12196)] -* core/api: Fix an arm64 bug converting a negative int to an unsigned int [[GH-12372](https://github.com/hashicorp/vault/pull/12372)] -* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13093](https://github.com/hashicorp/vault/pull/13093)] -* core/identity: Cleanup alias in the in-memory entity after an alias deletion by ID [[GH-12834](https://github.com/hashicorp/vault/pull/12834)] -* core/identity: Disallow entity alias creation/update if a conflicting alias exists for the target entity and mount combination [[GH-12747](https://github.com/hashicorp/vault/pull/12747)] -* core: Fix a deadlock on HA leadership transfer [[GH-12691](https://github.com/hashicorp/vault/pull/12691)] -* core: Fix warnings logged on perf standbys re stored versions [[GH-13042](https://github.com/hashicorp/vault/pull/13042)] -* core: fix byte printing for diagnose disk checks [[GH-12229](https://github.com/hashicorp/vault/pull/12229)] -* core: revert some unintentionally downgraded dependencies from 1.9.0-rc1 [[GH-13168](https://github.com/hashicorp/vault/pull/13168)] -* database/couchbase: change default template to truncate username at 128 characters [[GH-12301](https://github.com/hashicorp/vault/pull/12301)] -* database/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] -* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node -* http: removed unpublished true from logical_system path, making openapi spec consistent with documentation [[GH-12713](https://github.com/hashicorp/vault/pull/12713)] -* identity/token: Adds missing call to unlock mutex in key deletion error handling [[GH-12916](https://github.com/hashicorp/vault/pull/12916)] -* identity: Fail alias rename if the resulting (name,accessor) exists already [[GH-12473](https://github.com/hashicorp/vault/pull/12473)] -* identity: Fix a panic on arm64 platform when doing identity I/O. [[GH-12371](https://github.com/hashicorp/vault/pull/12371)] -* identity: Fix regression preventing startup when aliases were created pre-1.9. [[GH-13169](https://github.com/hashicorp/vault/pull/13169)] -* identity: dedup from_entity_ids when merging two entities [[GH-10101](https://github.com/hashicorp/vault/pull/10101)] -* identity: disallow creation of role without a key parameter [[GH-12208](https://github.com/hashicorp/vault/pull/12208)] -* identity: do not allow a role's token_ttl to be longer than the signing key's verification_ttl [[GH-12151](https://github.com/hashicorp/vault/pull/12151)] -* identity: merge associated entity groups when merging entities [[GH-10085](https://github.com/hashicorp/vault/pull/10085)] -* identity: suppress duplicate policies on entities [[GH-12812](https://github.com/hashicorp/vault/pull/12812)] -* kmip (enterprise): Fix handling of custom attributes when servicing GetAttributes requests -* kmip (enterprise): Fix handling of invalid role parameters within various vault api calls -* kmip (enterprise): Forward KMIP register operations to the active node -* license: ignore stored terminated license while autoloading is enabled [[GH-2104](https://github.com/hashicorp/vault/pull/2104)] -* licensing (enterprise): Revert accidental inclusion of the TDE feature from the `prem` build. -* physical/raft: Fix safeio.Rename error when restoring snapshots on windows [[GH-12377](https://github.com/hashicorp/vault/pull/12377)] -* pki: Fix regression preventing email addresses being used as a common name within certificates [[GH-12716](https://github.com/hashicorp/vault/pull/12716)] -* plugin/couchbase: Fix an issue in which the locking patterns did not allow parallel requests. [[GH-13033](https://github.com/hashicorp/vault/pull/13033)] -* plugin/snowflake: Fixed bug where plugin would crash on 32 bit systems [[GH-12378](https://github.com/hashicorp/vault/pull/12378)] -* raft (enterprise): Fix panic when updating auto-snapshot config -* replication (enterprise): Fix issue where merkle.flushDirty.num_pages metric is not emitted if number -of dirty pages is 0. [[GH-2093](https://github.com/hashicorp/vault/pull/2093)] -* replication (enterprise): Fix merkle.saveCheckpoint.num_dirty metric to accurately specify the number -of dirty pages in the merkle tree at time of checkpoint creation. [[GH-2093](https://github.com/hashicorp/vault/pull/2093)] -* sdk/database: Fix a DeleteUser error message on the gRPC client. [[GH-12351](https://github.com/hashicorp/vault/pull/12351)] -* secrets/db: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [[GH-12563](https://github.com/hashicorp/vault/pull/12563)] -* secrets/gcp: Fixes a potential panic in the service account policy rollback for rolesets. [[GH-12379](https://github.com/hashicorp/vault/pull/12379)] -* secrets/keymgmt (enterprise): Fix support for Azure Managed HSM Key Vault instances. [[GH-12934](https://github.com/hashicorp/vault/pull/12934)] -* secrets/openldap: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [#28](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/28) [[GH-12600](https://github.com/hashicorp/vault/pull/12600)] -* secrets/transit: Enforce minimum cache size for transit backend and init cache size on transit backend without restart. [[GH-12418](https://github.com/hashicorp/vault/pull/12418)] -* storage/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] -* storage/raft (enterprise): Ensure that raft autosnapshot backoff retry duration never hits 0s -* storage/raft: Detect incomplete raft snapshots in api.RaftSnapshot(), and thereby in `vault operator raft snapshot save`. [[GH-12388](https://github.com/hashicorp/vault/pull/12388)] -* storage/raft: Fix regression in 1.9.0-rc1 that changed how time is represented in Raft logs; this prevented using a raft db created pre-1.9. [[GH-13165](https://github.com/hashicorp/vault/pull/13165)] -* storage/raft: Support `addr_type=public_v6` in auto-join [[GH-12366](https://github.com/hashicorp/vault/pull/12366)] -* transform (enterprise): Enforce minimum cache size for Transform backend and reset cache size without a restart -* transform (enterprise): Fix an error where the decode response of an expired token is an empty result rather than an error. -* ui: Adds pagination to auth methods list view [[GH-13054](https://github.com/hashicorp/vault/pull/13054)] -* ui: Fix bug where capabilities check on secret-delete-menu was encoding the forward slashes. [[GH-12550](https://github.com/hashicorp/vault/pull/12550)] -* ui: Fix bug where edit role form on auth method is invalid by default [[GH-12646](https://github.com/hashicorp/vault/pull/12646)] -* ui: Fixed api explorer routing bug [[GH-12354](https://github.com/hashicorp/vault/pull/12354)] -* ui: Fixed text overflow in flash messages [[GH-12357](https://github.com/hashicorp/vault/pull/12357)] -* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] -* ui: Fixes metrics page when read on counter config not allowed [[GH-12348](https://github.com/hashicorp/vault/pull/12348)] -* ui: Remove spinner after token renew [[GH-12887](https://github.com/hashicorp/vault/pull/12887)] -* ui: Removes ability to tune token_type for token auth methods [[GH-12904](https://github.com/hashicorp/vault/pull/12904)] -* ui: Show day of month instead of day of year in the expiration warning dialog [[GH-11984](https://github.com/hashicorp/vault/pull/11984)] -* ui: fix issue where on MaskedInput on auth methods if tab it would clear the value. [[GH-12409](https://github.com/hashicorp/vault/pull/12409)] -* ui: fix missing navbar items on login to namespace [[GH-12478](https://github.com/hashicorp/vault/pull/12478)] -* ui: update bar chart when model changes [[GH-12622](https://github.com/hashicorp/vault/pull/12622)] -* ui: updating database TTL picker help text. [[GH-12212](https://github.com/hashicorp/vault/pull/12212)] +* core: Fixes list password policy to include those with names containing / characters. [[GH-23155](https://github.com/hashicorp/vault/pull/23155)] +* secrets/pki: Fix removal of issuers to clean up unreferenced CRLs. [[GH-23007](https://github.com/hashicorp/vault/pull/23007)] +* ui (enterprise): Fix error message when generating SSH credential with control group [[GH-23025](https://github.com/hashicorp/vault/pull/23025)] +* ui: Fixes old pki's filter and search roles page bug [[GH-22810](https://github.com/hashicorp/vault/pull/22810)] +* ui: don't exclude features present on license [[GH-22855](https://github.com/hashicorp/vault/pull/22855)] + +## 1.13.7 +### September 13, 2023 -## 1.8.12 -### June 10, 2022 +SECURITY: -BUG FIXES: +* secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. This vulnerability, CVE-2023-4680, is fixed in Vault 1.14.3, 1.13.7, and 1.12.11. [[GH-22852](https://github.com/hashicorp/vault/pull/22852), [HSEC-2023-28](https://discuss.hashicorp.com/t/hcsec-2023-28-vault-s-transit-secrets-engine-allowed-nonce-specified-without-convergent-encryption/58249)] -* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] -* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] -* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] -* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] -* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. -* transform (enterprise): Fix non-overridable column default value causing tokenization tokens to expire prematurely when using the MySQL storage backend. +CHANGES: -## 1.8.11 -### April 29, 2022 +* core: Bump Go version to 1.20.8. +* database/snowflake: Update plugin to v0.7.3 [[GH-22591](https://github.com/hashicorp/vault/pull/22591)] + +FEATURES: + +* ** Merkle Tree Corruption Detection (enterprise) **: Add a new endpoint to check merkle tree corruption. + +IMPROVEMENTS: + +* auth/ldap: improved login speed by adding concurrency to LDAP token group searches [[GH-22659](https://github.com/hashicorp/vault/pull/22659)] +* core/quotas: Add configuration to allow skipping of expensive role calculations [[GH-22651](https://github.com/hashicorp/vault/pull/22651)] +* kmip (enterprise): reduce latency of KMIP operation handling BUG FIXES: -* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] -* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] +* cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. [[GH-22818](https://github.com/hashicorp/vault/pull/22818)] +* core/quotas: Only perform ResolveRoleOperation for role-based quotas and lease creation. [[GH-22597](https://github.com/hashicorp/vault/pull/22597)] +* core/quotas: Reduce overhead for role calculation when using cloud auth methods. [[GH-22583](https://github.com/hashicorp/vault/pull/22583)] +* core/seal: add a workaround for potential connection [[hangs](https://github.com/Azure/azure-sdk-for-go/issues/21346)] in Azure autoseals. [[GH-22760](https://github.com/hashicorp/vault/pull/22760)] +* core: All subloggers now reflect configured log level on reload. [[GH-22038](https://github.com/hashicorp/vault/pull/22038)] +* kmip (enterprise): fix date handling error with some re-key operations +* raft/autopilot: Add dr-token flag for raft autopilot cli commands [[GH-21165](https://github.com/hashicorp/vault/pull/21165)] +* replication (enterprise): Fix discovery of bad primary cluster addresses to be more reliable -## 1.8.10 -### April 22, 2022 +## 1.13.6 +### August 30, 2023 CHANGES: -* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] -* core: Bump Go version to 1.16.15. [[GH-go-ver-1810](https://github.com/hashicorp/vault/pull/go-ver-1810)] +* core: Bump Go version to 1.20.7. IMPROVEMENTS: -* auth/ldap: Add username_as_alias configurable to change how aliases are named [[GH-14324](https://github.com/hashicorp/vault/pull/14324)] -* core: Systemd unit file included with the Linux packages now sets the service type to notify. [[GH-14385](https://github.com/hashicorp/vault/pull/14385)] -* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer - -BUG FIXES: +* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)] +* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase. +* secrets/database: Improves error logging for static role rotations by including the database and role names. [[GH-22253](https://github.com/hashicorp/vault/pull/22253)] +* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)] +* ui: KV View Secret card will link to list view if input ends in "/" [[GH-22502](https://github.com/hashicorp/vault/pull/22502)] +* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)] + +BUG FIXES: + +* activity (enterprise): Fix misattribution of entities to no or child namespace auth methods [[GH-18809](https://github.com/hashicorp/vault/pull/18809)] +* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)] +* core (enterprise): Remove MFA Configuration for namespace when deleting namespace +* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. +Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)] +* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)] +* core: Fix bug where background thread to update locked user entries runs on DR secondaries. [[GH-22355](https://github.com/hashicorp/vault/pull/22355)] +* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)] +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)] +* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)] +* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)] +* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath +* replication (enterprise): Fix panic when update-primary was called on demoted clusters using update_primary_addrs +* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards. +* sdk/ldaputil: Properly escape user filters when using UPN domains +sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)] +* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22331](https://github.com/hashicorp/vault/pull/22331)] +* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute +* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)] +* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)] +* ui: fixes model defaults overwriting input value when user tries to clear form input [[GH-22458](https://github.com/hashicorp/vault/pull/22458)] + +## 1.13.5 +### July 25, 2023 -* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] -* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] -* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] -* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] -* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] -* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] -* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] -* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] -* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] -* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] -* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] -* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] -* metrics/autosnapshots (enterprise) : Fix bug that could cause -vault.autosnapshots.save.errors to not be incremented when there is an -autosnapshot save error. -* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not excepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] -* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] -* ui: Fixes issue logging out with wrapped token query parameter [[GH-14329](https://github.com/hashicorp/vault/pull/14329)] -* ui: Fixes issue with correct auth method not selected when logging out from OIDC or JWT methods [[GH-14545](https://github.com/hashicorp/vault/pull/14545)] -* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] -* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] +SECURITY: +* auth/ldap: Normalize HTTP response codes when invalid credentials are provided to prevent user enumeration. This vulnerability, CVE-2023-3462, is fixed in Vault 1.14.1 and 1.13.5. [[GH-21282](https://github.com/hashicorp/vault/pull/21282), [HSEC-2023-24](https://discuss.hashicorp.com/t/hcsec-2023-24-vaults-ldap-auth-method-allows-for-user-enumeration/56714)] +* core/namespace (enterprise): An unhandled error in Vault Enterprise’s namespace creation may cause the Vault process to crash, potentially resulting in denial of service. This vulnerability, CVE-2023-3774, is fixed in Vault Enterprise 1.14.1, 1.13.5, and 1.12.9. [[HSEC_2023-23](https://discuss.hashicorp.com/t/hcsec-2023-23-vault-enterprise-namespace-creation-may-lead-to-denial-of-service/56617)] -## 1.8.9 -### March 3, 2022 +CHANGES: -* secrets/pki: Vault and Vault Enterprise (“Vault”) allowed the PKI secrets engine under certain configurations to issue wildcard certificates to authorized users for a specified domain, even if the PKI role policy attribute allow_subdomains is set to false. This vulnerability, CVE-2022-25243, was fixed in Vault 1.8.9 and 1.9.4. -* transform (enterprise): Vault Enterprise (“Vault”) clusters using the tokenization transform feature can expose the tokenization key through the tokenization key configuration endpoint to authorized operators with read permissions on this endpoint. This vulnerability, CVE-2022-25244, was fixed in Vault Enterprise 1.7.10, 1.8.9, and 1.9.4. +* core/namespace (enterprise): Introduce the concept of high-privilege namespace (administrative namespace), +which will have access to some system backend paths that were previously only accessible in the root namespace. [[GH-21215](https://github.com/hashicorp/vault/pull/21215)] +* secrets/transform (enterprise): Enforce a transformation role's max_ttl setting on encode requests, a warning will be returned if max_ttl was applied. IMPROVEMENTS: -* secrets/pki: Restrict issuance of wildcard certificates via role parameter (`allow_wildcard_certificates`) [[GH-14238](https://github.com/hashicorp/vault/pull/14238)] +* core/fips: Add RPM, DEB packages of FIPS 140-2 and HSM+FIPS 140-2 Vault Enterprise. +* core: Add a new periodic metric to track the number of available policies, `vault.policy.configured.count`. [[GH-21010](https://github.com/hashicorp/vault/pull/21010)] +* replication (enterprise): Avoid logging warning if request is forwarded from a performance standby and not a performance secondary +* secrets/transform (enterprise): Switch to pgx PostgreSQL driver for better timeout handling +* sys/metrics (enterprise): Adds a gauge metric that tracks whether enterprise builtin secret plugins are enabled. [[GH-21681](https://github.com/hashicorp/vault/pull/21681)] BUG FIXES: -* auth/aws: Fix ec2 auth on instances that have a cert in their PKCS7 signature [[GH-12519](https://github.com/hashicorp/vault/pull/12519)] -* database/mssql: Removed string interpolation on internal queries and replaced them with inline queries using named parameters. [[GH-13799](https://github.com/hashicorp/vault/pull/13799)] -* secrets/openldap: Fix panic from nil logger in backend [[GH-14170](https://github.com/hashicorp/vault/pull/14170)] -* secrets/pki: Fix issuance of wildcard certificates matching glob patterns [[GH-14235](https://github.com/hashicorp/vault/pull/14235)] -* ui: Fix issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] -* ui: Trigger background token self-renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] +* auth/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21799](https://github.com/hashicorp/vault/pull/21799)] +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-24170](https://github.com/hashicorp/vault/pull/24170)] +* identity: Remove caseSensitivityKey to prevent errors while loading groups which could result in missing groups in memDB when duplicates are found. [[GH-20965](https://github.com/hashicorp/vault/pull/20965)] +* replication (enterprise): update primary cluster address after DR failover +* secrets/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21632](https://github.com/hashicorp/vault/pull/21632)] +* secrets/pki: Prevent deleted issuers from reappearing when migrating from a version 1 bundle to a version 2 bundle (versions including 1.13.0, 1.12.2, and 1.11.6); when managed keys were removed but referenced in the Vault 1.10 legacy CA bundle, this the error: `no managed key found with uuid`. [[GH-21316](https://github.com/hashicorp/vault/pull/21316)] +* secrets/pki: Support setting both maintain_stored_certificate_counts=false and publish_stored_certificate_count_metrics=false explicitly in tidy config. [[GH-20664](https://github.com/hashicorp/vault/pull/20664)] +* secrets/transform (enterprise): Fix nil panic when deleting a template with tokenization transformations present +* secrets/transform (enterprise): Grab shared locks for various read operations, only escalating to write locks if work is required +* serviceregistration: Fix bug where multiple nodes in a secondary cluster could be labelled active after updating the cluster's primary [[GH-21642](https://github.com/hashicorp/vault/pull/21642)] +* ui: Fixed an issue where editing an SSH role would clear `default_critical_options` and `default_extension` if left unchanged. [[GH-21739](https://github.com/hashicorp/vault/pull/21739)] +* ui: Surface DOMException error when browser settings prevent localStorage. [[GH-21503](https://github.com/hashicorp/vault/pull/21503)] -## 1.8.8 -### January 27, 2022 +## 1.13.4 +### June 21, 2023 +BREAKING CHANGES: -IMPROVEMENTS: +* secrets/pki: Maintaining running count of certificates will be turned off by default. +To re-enable keeping these metrics available on the tidy status endpoint, enable +maintain_stored_certificate_counts on tidy-config, to also publish them to the +metrics consumer, enable publish_stored_certificate_count_metrics . [[GH-18186](https://github.com/hashicorp/vault/pull/18186)] -* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] +CHANGES: -BUG FIXES: +* core: Bump Go version to 1.20.5. -* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13494](https://github.com/hashicorp/vault/pull/13494)] -* core (enterprise): Workaround AWS CloudHSM v5 SDK issue not allowing read-only sessions -* kmip (enterprise): Fix locate by name operations fail to find key after a rekey operation. -* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13549](https://github.com/hashicorp/vault/pull/13549)] -* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-13759](https://github.com/hashicorp/vault/pull/13759)] -* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-2456](https://github.com/hashicorp/vault/pull/2456)] -* storage/raft: Fix issues allowing invalid nodes to become leadership candidates. [[GH-13703](https://github.com/hashicorp/vault/pull/13703)] -* storage/raft: On linux, use map_populate for bolt files to improve startup time. [[GH-13573](https://github.com/hashicorp/vault/pull/13573)] -* storage/raft: Units for bolt metrics now given in milliseconds instead of nanoseconds [[GH-13749](https://github.com/hashicorp/vault/pull/13749)] -* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] -* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] +FEATURES: + +* **Automated License Utilization Reporting**: Added automated license +utilization reporting, which sends minimal product-license [metering +data](https://developer.hashicorp.com/vault/docs/enterprise/license/utilization-reporting) +to HashiCorp without requiring you to manually collect and report them. +* core (enterprise): Add background worker for automatic reporting of billing +information. [[GH-19625](https://github.com/hashicorp/vault/pull/19625)] + +IMPROVEMENTS: -## 1.8.7 -### December 21, 2021 +* api: GET ... /sys/internal/counters/activity?current_billing_period=true now +results in a response which contains the full billing period [[GH-20694](https://github.com/hashicorp/vault/pull/20694)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`minimum_retention_months`. [[GH-20150](https://github.com/hashicorp/vault/pull/20150)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`reporting_enabled` and `billing_start_timestamp` fields. [[GH-20086](https://github.com/hashicorp/vault/pull/20086)] +* core (enterprise): add configuration for license reporting [[GH-19891](https://github.com/hashicorp/vault/pull/19891)] +* core (enterprise): license updates trigger a reload of reporting and the activity log [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): support reloading configuration for automated reporting via SIGHUP [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): vault server command now allows for opt-out of automated +reporting via the `OPTOUT_LICENSE_REPORTING` environment variable. [[GH-3939](https://github.com/hashicorp/vault/pull/3939)] +* core/activity: error when attempting to update retention configuration below the minimum [[GH-20078](https://github.com/hashicorp/vault/pull/20078)] +* core/activity: refactor the activity log's generation of precomputed queries [[GH-20073](https://github.com/hashicorp/vault/pull/20073)] +* ui: updates clients configuration edit form state based on census reporting configuration [[GH-20125](https://github.com/hashicorp/vault/pull/20125)] + +BUG FIXES: + +* agent: Fix bug with 'cache' stanza validation [[GH-20934](https://github.com/hashicorp/vault/pull/20934)] +* core (enterprise): Don't delete backend stored data that appears to be filterable +on this secondary if we don't have a corresponding mount entry. +* core: Change where we evaluate filtered paths as part of mount operations; this is part of an enterprise bugfix that will +have its own changelog entry. Fix wrong lock used in ListAuths link meta interface implementation. [[GH-21260](https://github.com/hashicorp/vault/pull/21260)] +* core: Do not cache seal configuration to fix a bug that resulted in sporadic auto unseal failures. [[GH-21223](https://github.com/hashicorp/vault/pull/21223)] +* core: Don't exit just because we think there's a potential deadlock. [[GH-21342](https://github.com/hashicorp/vault/pull/21342)] +* core: Fix panic in sealed nodes using raft storage trying to emit raft metrics [[GH-21249](https://github.com/hashicorp/vault/pull/21249)] +* identity: Fixes duplicate groups creation with the same name but unique IDs. [[GH-20964](https://github.com/hashicorp/vault/pull/20964)] +* replication (enterprise): Fix a race condition with update-primary that could result in data loss after a DR failover +* replication (enterprise): Fix path filters deleting data right after it's written by backend Initialize funcs +* replication (enterprise): Fix regression causing token creation against a role +with a new entity alias to be incorrectly forwarded from perf standbys. [[GH-21100](https://github.com/hashicorp/vault/pull/21100)] +* storage/raft: Fix race where new follower joining can get pruned by dead server cleanup. [[GH-20986](https://github.com/hashicorp/vault/pull/20986)] + +## 1.13.3 +### June 08, 2023 CHANGES: -* go: Update go version to 1.16.12 [[GH-13422](https://github.com/hashicorp/vault/pull/13422)] +* core: Bump Go version to 1.20.4. +* core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. [[GH-20826](https://github.com/hashicorp/vault/pull/20826)] +* replication (enterprise): Add a new parameter for the update-primary API call +that allows for setting of the primary cluster addresses directly, instead of +via a token. +* storage/aerospike: Aerospike storage shouldn't be used on 32-bit architectures and is now unsupported on them. [[GH-20825](https://github.com/hashicorp/vault/pull/20825)] -## 1.8.6 -### December 9, 2021 +IMPROVEMENTS: + +* Add debug symbols back to builds to fix Dynatrace support [[GH-20519](https://github.com/hashicorp/vault/pull/20519)] +* audit: add a `mount_point` field to audit requests and response entries [[GH-20411](https://github.com/hashicorp/vault/pull/20411)] +* autopilot: Update version to v0.2.0 to add better support for respecting min quorum [[GH-19472](https://github.com/hashicorp/vault/pull/19472)] +* command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when +`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. [[GH-20609](https://github.com/hashicorp/vault/pull/20609)] +* core: Add possibility to decode a generated encoded root token via the rest API [[GH-20595](https://github.com/hashicorp/vault/pull/20595)] +* core: include namespace path in granting_policies block of audit log +* core: report intermediate error messages during request forwarding [[GH-20643](https://github.com/hashicorp/vault/pull/20643)] +* openapi: Fix generated types for duration strings [[GH-20841](https://github.com/hashicorp/vault/pull/20841)] +* sdk/framework: Fix non-deterministic ordering of 'required' fields in OpenAPI spec [[GH-20881](https://github.com/hashicorp/vault/pull/20881)] +* secrets/pki: add subject key identifier to read key response [[GH-20642](https://github.com/hashicorp/vault/pull/20642)] + +BUG FIXES: + +* api: Properly Handle nil identity_policies in Secret Data [[GH-20636](https://github.com/hashicorp/vault/pull/20636)] +* auth/ldap: Set default value for `max_page_size` properly [[GH-20453](https://github.com/hashicorp/vault/pull/20453)] +* cli: CLI should take days as a unit of time for ttl like flags [[GH-20477](https://github.com/hashicorp/vault/pull/20477)] +* cli: disable printing flags warnings messages for the ssh command [[GH-20502](https://github.com/hashicorp/vault/pull/20502)] +* command/server: fixes panic in Vault server command when running in recovery mode [[GH-20418](https://github.com/hashicorp/vault/pull/20418)] +* core (enterprise): Fix log shipper buffer size overflow issue for 32 bit architecture. +* core (enterprise): Fix logshipper buffer size to default to DefaultBufferSize only when reported system memory is zero. +* core (enterprise): Remove MFA Enforcment configuration for namespace when deleting namespace +* core/identity: Allow updates of only the custom-metadata for entity alias. [[GH-20368](https://github.com/hashicorp/vault/pull/20368)] +* core: Fix Forwarded Writer construction to correctly find active nodes, allowing PKI cross-cluster functionality to succeed on existing mounts. +* core: Fix writes to readonly storage on performance standbys when user lockout feature is enabled. [[GH-20783](https://github.com/hashicorp/vault/pull/20783)] +* core: prevent panic on login after namespace is deleted that had mfa enforcement [[GH-20375](https://github.com/hashicorp/vault/pull/20375)] +* replication (enterprise): Fix a race condition with invalid tokens during WAL streaming that was causing Secondary clusters to be unable to connect to a Primary. +* replication (enterprise): fix bug where secondary grpc connections would timeout when connecting to a primary host that no longer exists. +* secrets/pki: Include per-issuer enable_aia_url_templating in issuer read endpoint. [[GH-20354](https://github.com/hashicorp/vault/pull/20354)] +* secrets/transform (enterprise): Fix a caching bug affecting secondary nodes after a tokenization key rotation +* secrets/transform: Added importing of keys and key versions into the Transform secrets engine using the command 'vault transform import' and 'vault transform import-version'. [[GH-20668](https://github.com/hashicorp/vault/pull/20668)] +* secrets/transit: Fix export of HMAC-only key, correctly exporting the key used for sign operations. For consumers of the previously incorrect key, use the plaintext export to retrieve these incorrect keys and import them as new versions. +secrets/transit: Fix bug related to shorter dedicated HMAC key sizing. +sdk/helper/keysutil: New HMAC type policies will have HMACKey equal to Key and be copied over on import. [[GH-20864](https://github.com/hashicorp/vault/pull/20864)] +* ui: Fixes issue unsealing cluster for seal types other than shamir [[GH-20897](https://github.com/hashicorp/vault/pull/20897)] +* ui: fixes issue creating mfa login enforcement from method enforcements tab [[GH-20603](https://github.com/hashicorp/vault/pull/20603)] +* ui: fixes key_bits and signature_bits reverting to default values when editing a pki role [[GH-20907](https://github.com/hashicorp/vault/pull/20907)] + +## 1.13.2 +### April 26, 2023 CHANGES: -* go: Update go version to 1.16.9 [[GH-13029](https://github.com/hashicorp/vault/pull/13029)] +* core: Bump Go version to 1.20.3. SECURITY: -* storage/raft: Integrated Storage backend could be caused to crash by an authenticated user with write permissions to the KV secrets engine. This vulnerability, CVE-2021-45042, was fixed in Vault 1.7.7, 1.8.6, and 1.9.1. +* core/seal: Fix handling of HMACing of seal-wrapped storage entries from HSMs using CKM_AES_CBC or CKM_AES_CBC_PAD which may have allowed an attacker to conduct a padding oracle attack. This vulnerability, CVE-2023-2197, affects Vault from 1.13.0 up to 1.13.1 and was fixed in 1.13.2. [[HCSEC-2023-14](https://discuss.hashicorp.com/t/hcsec-2023-14-vault-enterprise-vulnerable-to-padding-oracle-attacks-when-using-a-cbc-based-encryption-mechanism-with-a-hsm/53322)] -BUG FIXES: +IMPROVEMENTS: -* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes -* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] -* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] -* ui: Adds pagination to auth methods list view [[GH-13054](https://github.com/hashicorp/vault/pull/13054)] -* ui: Do not show verify connection value on database connection config page [[GH-13152](https://github.com/hashicorp/vault/pull/13152)] -* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] -* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] -* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] - -## 1.8.5 -### November 4, 2021 - -SECURITY: - -* core/identity: Templated ACL policies would always match the first-created entity alias if multiple entity aliases existed for a specified entity and mount combination, potentially resulting in incorrect policy enforcement. This vulnerability, CVE-2021-43998, was fixed in Vault and Vault Enterprise 1.7.6, 1.8.5, and 1.9.0. - -BUG FIXES: - -* auth/aws: fix config/rotate-root to store new key [[GH-12715](https://github.com/hashicorp/vault/pull/12715)] -* core/identity: Cleanup alias in the in-memory entity after an alias deletion by ID [[GH-12834](https://github.com/hashicorp/vault/pull/12834)] -* core/identity: Disallow entity alias creation/update if a conflicting alias exists for the target entity and mount combination [[GH-12747](https://github.com/hashicorp/vault/pull/12747)] -* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node -* identity/token: Adds missing call to unlock mutex in key deletion error handling [[GH-12916](https://github.com/hashicorp/vault/pull/12916)] -* kmip (enterprise): Fix handling of custom attributes when servicing GetAttributes requests -* kmip (enterprise): Fix handling of invalid role parameters within various vault api calls -* kmip (enterprise): Forward KMIP register operations to the active node -* secrets/keymgmt (enterprise): Fix support for Azure Managed HSM Key Vault instances. [[GH-12952](https://github.com/hashicorp/vault/pull/12952)] -* transform (enterprise): Fix an error where the decode response of an expired token is an empty result rather than an error. - -## 1.8.4 -### 6 October 2021 +* Add debug symbols back to builds to fix Dynatrace support [[GH-20294](https://github.com/hashicorp/vault/pull/20294)] +* cli/namespace: Add detailed flag to output additional namespace information +such as namespace IDs and custom metadata. [[GH-20243](https://github.com/hashicorp/vault/pull/20243)] +* core/activity: add an endpoint to write test activity log data, guarded by a build flag [[GH-20019](https://github.com/hashicorp/vault/pull/20019)] +* core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the +`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. [[GH-20044](https://github.com/hashicorp/vault/pull/20044)] +* core: include reason for ErrReadOnly on PBPWF writing failures +* sdk/ldaputil: added `connection_timeout` to tune connection timeout duration +for all LDAP plugins. [[GH-20144](https://github.com/hashicorp/vault/pull/20144)] +* secrets/pki: Decrease size and improve compatibility of OCSP responses by removing issuer certificate. [[GH-20201](https://github.com/hashicorp/vault/pull/20201)] +* sys/wrapping: Add example how to unwrap without authentication in Vault [[GH-20109](https://github.com/hashicorp/vault/pull/20109)] +* ui: Allows license-banners to be dismissed. Saves preferences in localStorage. [[GH-19116](https://github.com/hashicorp/vault/pull/19116)] + +BUG FIXES: + +* auth/ldap: Add max_page_size configurable to LDAP configuration [[GH-19032](https://github.com/hashicorp/vault/pull/19032)] +* command/server: Fix incorrect paths in generated config for `-dev-tls` flag on Windows [[GH-20257](https://github.com/hashicorp/vault/pull/20257)] +* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. +* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur +* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` +resulting in 412 errors. +* core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. [[GH-19721](https://github.com/hashicorp/vault/pull/19721)] +* helper/random: Fix race condition in string generator helper [[GH-19875](https://github.com/hashicorp/vault/pull/19875)] +* kmip (enterprise): Fix a problem decrypting with keys that have no Process Start Date attribute. +* pki: Fix automatically turning off CRL signing on upgrade to Vault >= 1.12, if CA Key Usage disallows it [[GH-20220](https://github.com/hashicorp/vault/pull/20220)] +* replication (enterprise): Fix a caching issue when replicating filtered data to +a performance secondary. This resulted in the data being set to nil in the cache +and a "invalid value" error being returned from the API. +* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil +* sdk/helper/ocsp: Workaround bug in Go's ocsp.ParseResponse(...), causing validation to fail with embedded CA certificates. +auth/cert: Fix OCSP validation against Vault's PKI engine. [[GH-20181](https://github.com/hashicorp/vault/pull/20181)] +* secrets/aws: Revert changes that removed the lease on STS credentials, while leaving the new ttl field in place. [[GH-20034](https://github.com/hashicorp/vault/pull/20034)] +* secrets/pki: Ensure cross-cluster delta WAL write failure only logs to avoid unattended forwarding. [[GH-20057](https://github.com/hashicorp/vault/pull/20057)] +* secrets/pki: Fix building of unified delta CRLs and recovery during unified delta WAL write failures. [[GH-20058](https://github.com/hashicorp/vault/pull/20058)] +* secrets/pki: Fix patching of leaf_not_after_behavior on issuers. [[GH-20341](https://github.com/hashicorp/vault/pull/20341)] +* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens +* ui: Fix OIDC provider logo showing when domain doesn't match [[GH-20263](https://github.com/hashicorp/vault/pull/20263)] +* ui: Fix bad link to namespace when namespace name includes `.` [[GH-19799](https://github.com/hashicorp/vault/pull/19799)] +* ui: fixes browser console formatting for help command output [[GH-20064](https://github.com/hashicorp/vault/pull/20064)] +* ui: fixes remaining doc links to include /vault in path [[GH-20070](https://github.com/hashicorp/vault/pull/20070)] +* ui: remove use of htmlSafe except when first sanitized [[GH-20235](https://github.com/hashicorp/vault/pull/20235)] +* website/docs: Fix Kubernetes Auth Code Example to use the correct whitespace in import. [[GH-20216](https://github.com/hashicorp/vault/pull/20216)] + +## 1.13.1 +### March 29, 2023 SECURITY: -* core/identity: A Vault user with write permission to an entity alias ID sharing a mount accessor with another user may acquire this other user’s policies by merging their identities. This vulnerability, CVE-2021-41802, was fixed in Vault and Vault Enterprise 1.7.5 and 1.8.4. - -IMPROVEMENTS: - -* core: Update Oracle Cloud library to enable seal integration with the uk-gov-london-1 region [[GH-12724](https://github.com/hashicorp/vault/pull/12724)] - -BUG FIXES: - -* core: Fix a deadlock on HA leadership transfer [[GH-12691](https://github.com/hashicorp/vault/pull/12691)] -* database/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] -* pki: Fix regression preventing email addresses being used as a common name within certificates [[GH-12716](https://github.com/hashicorp/vault/pull/12716)] -* storage/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] -* ui: Fix bug where edit role form on auth method is invalid by default [[GH-12646](https://github.com/hashicorp/vault/pull/12646)] - -## 1.8.3 -### 29 September 2021 - -IMPROVEMENTS: - -* secrets/pki: Allow signing of self-issued certs with a different signature algorithm. [[GH-12514](https://github.com/hashicorp/vault/pull/12514)] - -BUG FIXES: - -* agent: Avoid possible `unexpected fault address` panic when using persistent cache. [[GH-12534](https://github.com/hashicorp/vault/pull/12534)] -* core (enterprise): Allow deletion of stored licenses on DR secondary nodes -* core (enterprise): Fix bug where password generation through password policies do not work on namespaces if performed outside a request callback or from an external plugin. [[GH-12635](https://github.com/hashicorp/vault/pull/12635)] -* core (enterprise): Only delete quotas on primary cluster. [[GH-12339](https://github.com/hashicorp/vault/pull/12339)] -* identity: Fail alias rename if the resulting (name,accessor) exists already [[GH-12473](https://github.com/hashicorp/vault/pull/12473)] -* raft (enterprise): Fix panic when updating auto-snapshot config -* secrets/db: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [[GH-12563](https://github.com/hashicorp/vault/pull/12563)] -* secrets/openldap: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [#28](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/28) [[GH-12599](https://github.com/hashicorp/vault/pull/12599)] -* secrets/transit: Enforce minimum cache size for transit backend and init cache size on transit backend without restart. [[GH-12418](https://github.com/hashicorp/vault/pull/12418)] -* storage/raft: Detect incomplete raft snapshots in api.RaftSnapshot(), and thereby in `vault operator raft snapshot save`. [[GH-12388](https://github.com/hashicorp/vault/pull/12388)] -* ui: Fix bug where capabilities check on secret-delete-menu was encoding the forward slashes. [[GH-12550](https://github.com/hashicorp/vault/pull/12550)] -* ui: Show day of month instead of day of year in the expiration warning dialog [[GH-11984](https://github.com/hashicorp/vault/pull/11984)] - -## 1.8.2 -### 26 August 2021 - -CHANGES: - -* Alpine: Docker images for Vault 1.6.6+, 1.7.4+, and 1.8.2+ are built with Alpine 3.14, due to CVE-2021-36159 -* go: Update go version to 1.16.7 [[GH-12408](https://github.com/hashicorp/vault/pull/12408)] - -BUG FIXES: - -* auth/aws: Fixes ec2 login no longer supporting DSA signature verification [[GH-12340](https://github.com/hashicorp/vault/pull/12340)] -* cli: vault debug now puts newlines after every captured log line. [[GH-12175](https://github.com/hashicorp/vault/pull/12175)] -* database/couchbase: change default template to truncate username at 128 characters [[GH-12300](https://github.com/hashicorp/vault/pull/12300)] -* identity: Fix a panic on arm64 platform when doing identity I/O. [[GH-12371](https://github.com/hashicorp/vault/pull/12371)] -* physical/raft: Fix safeio.Rename error when restoring snapshots on windows [[GH-12377](https://github.com/hashicorp/vault/pull/12377)] -* plugin/snowflake: Fixed bug where plugin would crash on 32 bit systems [[GH-12378](https://github.com/hashicorp/vault/pull/12378)] -* sdk/database: Fix a DeleteUser error message on the gRPC client. [[GH-12351](https://github.com/hashicorp/vault/pull/12351)] -* secrets/gcp: Fixes a potential panic in the service account policy rollback for rolesets. [[GH-12379](https://github.com/hashicorp/vault/pull/12379)] -* ui: Fixed api explorer routing bug [[GH-12354](https://github.com/hashicorp/vault/pull/12354)] -* ui: Fixes metrics page when read on counter config not allowed [[GH-12348](https://github.com/hashicorp/vault/pull/12348)] -* ui: fix issue where on MaskedInput on auth methods if tab it would clear the value. [[GH-12409](https://github.com/hashicorp/vault/pull/12409)] - -## 1.8.1 -### August 5th, 2021 - -CHANGES: - -* go: Update go version to 1.16.6 [[GH-12245](https://github.com/hashicorp/vault/pull/12245)] +* storage/mssql: When using Vault’s community-supported Microsoft SQL (MSSQL) database storage backend, a privileged attacker with the ability to write arbitrary data to Vault’s configuration may be able to perform arbitrary SQL commands on the underlying database server through Vault. This vulnerability, CVE-2023-0620, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-12](https://discuss.hashicorp.com/t/hcsec-2023-12-vault-s-microsoft-sql-database-storage-backend-vulnerable-to-sql-injection-via-configuration-file/52080)] +* secrets/pki: Vault’s PKI mount issuer endpoints did not correctly authorize access to remove an issuer or modify issuer metadata, potentially resulting in denial of service of the PKI mount. This bug did not affect public or private key material, trust chains or certificate issuance. This vulnerability, CVE-2023-0665, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-11](https://discuss.hashicorp.com/t/hcsec-2023-11-vault-s-pki-issuer-endpoint-did-not-correctly-authorize-access-to-issuer-metadata/52079)] +* core: HashiCorp Vault’s implementation of Shamir’s secret sharing used precomputed table lookups, and was vulnerable to cache-timing attacks. An attacker with access to, and the ability to observe a large number of unseal operations on the host through a side channel may reduce the search space of a brute force effort to recover the Shamir shares. This vulnerability, CVE-2023-25000, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-10](https://discuss.hashicorp.com/t/hcsec-2023-10-vault-vulnerable-to-cache-timing-attacks-during-seal-and-unseal-operations/52078)] IMPROVEMENTS: -* serviceregistration: add `external-source: "vault"` metadata value for Consul registration. [[GH-12163](https://github.com/hashicorp/vault/pull/12163)] +* auth/github: Allow for an optional Github auth token environment variable to make authenticated requests when fetching org id +website/docs: Add docs for `VAULT_AUTH_CONFIG_GITHUB_TOKEN` environment variable when writing Github config [[GH-19244](https://github.com/hashicorp/vault/pull/19244)] +* core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch +option in case all else fails for some replication issues we may not have fully reproduced. [[GH-19676](https://github.com/hashicorp/vault/pull/19676)] +* core: validate name identifiers in mssql physical storage backend prior use [[GH-19591](https://github.com/hashicorp/vault/pull/19591)] +* database/elasticsearch: Update error messages resulting from Elasticsearch API errors [[GH-19545](https://github.com/hashicorp/vault/pull/19545)] +* events: Suppress log warnings triggered when events are sent but the events system is not enabled. [[GH-19593](https://github.com/hashicorp/vault/pull/19593)] + +BUG FIXES: + +* agent: Fix panic when SIGHUP is issued to Agent while it has a non-TLS listener. [[GH-19483](https://github.com/hashicorp/vault/pull/19483)] +* core (enterprise): Attempt to reconnect to a PKCS#11 HSM if we retrieve a CKR_FUNCTION_FAILED error. +* core: Fixed issue with remounting mounts that have a non-trailing space in the 'to' or 'from' paths. [[GH-19585](https://github.com/hashicorp/vault/pull/19585)] +* kmip (enterprise): Do not require attribute Cryptographic Usage Mask when registering Secret Data managed objects. +* kmip (enterprise): Fix a problem forwarding some requests to the active node. +* openapi: Fix logic for labeling unauthenticated/sudo paths. [[GH-19600](https://github.com/hashicorp/vault/pull/19600)] +* secrets/ldap: Invalidates WAL entry for static role if `password_policy` has changed. [[GH-19640](https://github.com/hashicorp/vault/pull/19640)] +* secrets/pki: Fix PKI revocation request forwarding from standby nodes due to an error wrapping bug [[GH-19624](https://github.com/hashicorp/vault/pull/19624)] +* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions +* ui: Fixes crypto.randomUUID error in unsecure contexts from third party ember-data library [[GH-19428](https://github.com/hashicorp/vault/pull/19428)] +* ui: fixes SSH engine config deletion [[GH-19448](https://github.com/hashicorp/vault/pull/19448)] +* ui: fixes issue navigating back a level using the breadcrumb from secret metadata view [[GH-19703](https://github.com/hashicorp/vault/pull/19703)] +* ui: fixes oidc tabs in auth form submitting with the root's default_role value after a namespace has been inputted [[GH-19541](https://github.com/hashicorp/vault/pull/19541)] +* ui: pass encodeBase64 param to HMAC transit-key-actions. [[GH-19429](https://github.com/hashicorp/vault/pull/19429)] +* ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url [[GH-19460](https://github.com/hashicorp/vault/pull/19460)] -BUG FIXES: +## 1.13.0 +### March 01, 2023 -* auth/aws: Remove warning stating AWS Token TTL will be capped by the Default Lease TTL. [[GH-12026](https://github.com/hashicorp/vault/pull/12026)] -* auth/jwt: Fixes OIDC auth from the Vault UI when using `form_post` as the `oidc_response_mode`. [[GH-12258](https://github.com/hashicorp/vault/pull/12258)] -* core (enterprise): Disallow autogenerated licenses to be used in diagnose even when config is specified -* core: fix byte printing for diagnose disk checks [[GH-12229](https://github.com/hashicorp/vault/pull/12229)] -* identity: do not allow a role's token_ttl to be longer than the signing key's verification_ttl [[GH-12151](https://github.com/hashicorp/vault/pull/12151)] +SECURITY: -## 1.8.0 -### July 28th, 2021 +* secrets/ssh: removal of the deprecated dynamic keys mode. **When any remaining dynamic key leases expire**, an error stating `secret is unsupported by this backend` will be thrown by the lease manager. [[GH-18874](https://github.com/hashicorp/vault/pull/18874)] +* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] CHANGES: -* agent: Errors in the template engine will no longer cause agent to exit unless -explicitly defined to do so. A new configuration parameter, -`exit_on_retry_failure`, within the new top-level stanza, `template_config`, can -be set to `true` in order to cause agent to exit. Note that for agent to exit if -`template.error_on_missing_key` is set to `true`, `exit_on_retry_failure` must -be also set to `true`. Otherwise, the template engine will log an error but then -restart its internal runner. [[GH-11775](https://github.com/hashicorp/vault/pull/11775)] -* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs -when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] -* core (enterprise): License/EULA changes that ensure the presence of a valid HashiCorp license to -start Vault. More information is available in the [Vault License FAQ](https://www.vaultproject.io/docs/enterprise/license/faqs) +* auth/alicloud: require the `role` field on login [[GH-19005](https://github.com/hashicorp/vault/pull/19005)] +* auth/approle: Add maximum length of 4096 for approle role_names, as this value results in HMAC calculation [[GH-17768](https://github.com/hashicorp/vault/pull/17768)] +* auth: Returns invalid credentials for ldap, userpass and approle when wrong credentials are provided for existent users. +This will only be used internally for implementing user lockout. [[GH-17104](https://github.com/hashicorp/vault/pull/17104)] +* core: Bump Go version to 1.20.1. +* core: Vault version has been moved out of sdk and into main vault module. +Plugins using sdk/useragent.String must instead use sdk/useragent.PluginString. [[GH-14229](https://github.com/hashicorp/vault/pull/14229)] +* logging: Removed legacy environment variable for log format ('LOGXI_FORMAT'), should use 'VAULT_LOG_FORMAT' instead [[GH-17822](https://github.com/hashicorp/vault/pull/17822)] +* plugins: Mounts can no longer be pinned to a specific _builtin_ version. Mounts previously pinned to a specific builtin version will now automatically upgrade to the latest builtin version, and may now be overridden if an unversioned plugin of the same name and type is registered. Mounts using plugin versions without `builtin` in their metadata remain unaffected. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] +* plugins: `GET /database/config/:name` endpoint now returns an additional `plugin_version` field in the response data. [[GH-16982](https://github.com/hashicorp/vault/pull/16982)] +* plugins: `GET /sys/auth/:path/tune` and `GET /sys/mounts/:path/tune` endpoints may now return an additional `plugin_version` field in the response data if set. [[GH-17167](https://github.com/hashicorp/vault/pull/17167)] +* plugins: `GET` for `/sys/auth`, `/sys/auth/:path`, `/sys/mounts`, and `/sys/mounts/:path` paths now return additional `plugin_version`, `running_plugin_version` and `running_sha256` fields in the response data for each mount. [[GH-17167](https://github.com/hashicorp/vault/pull/17167)] +* sdk: Remove version package, make useragent.String versionless. [[GH-19068](https://github.com/hashicorp/vault/pull/19068)] +* secrets/aws: do not create leases for non-renewable/non-revocable STS credentials to reduce storage calls [[GH-15869](https://github.com/hashicorp/vault/pull/15869)] +* secrets/gcpkms: Updated plugin from v0.13.0 to v0.14.0 [[GH-19063](https://github.com/hashicorp/vault/pull/19063)] +* sys/internal/inspect: Turns of this endpoint by default. A SIGHUP can now be used to reload the configs and turns this endpoint on. +* ui: Upgrade Ember to version 4.4.0 [[GH-17086](https://github.com/hashicorp/vault/pull/17086)] FEATURES: -* **GCP Secrets Engine Static Accounts**: Adds ability to use existing service accounts for generation - of service account keys and access tokens. [[GH-12023](https://github.com/hashicorp/vault/pull/12023)] -* **Key Management Secrets Engine (Enterprise)**: Adds general availability for distributing and managing keys in AWS KMS. [[GH-11958](https://github.com/hashicorp/vault/pull/11958)] -* **License Autoloading (Enterprise)**: Licenses may now be automatically loaded from the environment or disk. -* **MySQL Database UI**: The UI now supports adding and editing MySQL connections in the database secret engine [[GH-11532](https://github.com/hashicorp/vault/pull/11532)] -* **Vault Diagnose**: A new `vault operator` command to detect common issues with vault server setups. - -SECURITY: - -* storage/raft: When initializing Vault’s Integrated Storage backend, excessively broad filesystem permissions may be set for the underlying Bolt database used by Vault’s Raft implementation. This vulnerability, CVE-2021-38553, was fixed in Vault 1.8.0. -* ui: The Vault UI erroneously cached and exposed user-viewed secrets between authenticated sessions in a single shared browser, if the browser window / tab was not refreshed or closed between logout and a subsequent login. This vulnerability, CVE-2021-38554, was fixed in Vault 1.8.0 and will be addressed in pending 1.7.4 / 1.6.6 releases. +* **User lockout**: Ignore repeated bad credentials from the same user for a configured period of time. Enabled by default. +* **Azure Auth Managed Identities**: Allow any Azure resource that supports managed identities to authenticate with Vault [[GH-19077](https://github.com/hashicorp/vault/pull/19077)] +* **Azure Auth Rotate Root**: Add support for rotate root in Azure Auth engine [[GH-19077](https://github.com/hashicorp/vault/pull/19077)] +* **Event System (Alpha)**: Vault has a new opt-in experimental event system. Not yet suitable for production use. Events are currently only generated on writes to the KV secrets engine, but external plugins can also be updated to start generating events. [[GH-19194](https://github.com/hashicorp/vault/pull/19194)] +* **GCP Secrets Impersonated Account Support**: Add support for GCP service account impersonation, allowing callers to generate a GCP access token without requiring Vault to store or retrieve a GCP service account key for each role. [[GH-19018](https://github.com/hashicorp/vault/pull/19018)] +* **Kubernetes Secrets Engine UI**: Kubernetes is now available in the UI as a supported secrets engine. [[GH-17893](https://github.com/hashicorp/vault/pull/17893)] +* **New PKI UI**: Add beta support for new and improved PKI UI [[GH-18842](https://github.com/hashicorp/vault/pull/18842)] +* **PKI Cross-Cluster Revocations**: Revocation information can now be +synchronized across primary and performance replica clusters offering +a unified CRL/OCSP view of revocations across cluster boundaries. [[GH-19196](https://github.com/hashicorp/vault/pull/19196)] +* **Server UDS Listener**: Adding listener to Vault server to serve http request via unix domain socket [[GH-18227](https://github.com/hashicorp/vault/pull/18227)] +* **Transit managed keys**: The transit secrets engine now supports configuring and using managed keys +* **User Lockout**: Adds support to configure the user-lockout behaviour for failed logins to prevent +brute force attacks for userpass, approle and ldap auth methods. [[GH-19230](https://github.com/hashicorp/vault/pull/19230)] +* **VMSS Flex Authentication**: Adds support for Virtual Machine Scale Set Flex Authentication [[GH-19077](https://github.com/hashicorp/vault/pull/19077)] +* **Namespaces (enterprise)**: Added the ability to allow access to secrets and more to be shared across namespaces that do not share a namespace hierarchy. Using the new `sys/config/group-policy-application` API, policies can be configured to apply outside of namespace hierarchy, allowing this kind of cross-namespace sharing. +* **OpenAPI-based Go & .NET Client Libraries (Beta)**: We have now made available two new [[OpenAPI-based Go](https://github.com/hashicorp/vault-client-go/)] & [[OpenAPI-based .NET](https://github.com/hashicorp/vault-client-dotnet/)] Client libraries (beta). You can use them to perform various secret management operations easily from your applications. IMPROVEMENTS: -* agent/template: Added static_secret_render_interval to specify how often to fetch non-leased secrets [[GH-11934](https://github.com/hashicorp/vault/pull/11934)] -* agent: Allow Agent auto auth to read symlinked JWT files [[GH-11502](https://github.com/hashicorp/vault/pull/11502)] -* api: Allow a leveled logger to be provided to `api.Client` through `SetLogger`. [[GH-11696](https://github.com/hashicorp/vault/pull/11696)] -* auth/aws: Underlying error included in validation failure message. [[GH-11638](https://github.com/hashicorp/vault/pull/11638)] -* cli/api: Add lease lookup command [[GH-11129](https://github.com/hashicorp/vault/pull/11129)] -* core: Add `prefix_filter` to telemetry config [[GH-12025](https://github.com/hashicorp/vault/pull/12025)] -* core: Add a darwin/arm64 binary release supporting the Apple M1 CPU [[GH-12071](https://github.com/hashicorp/vault/pull/12071)] -* core: Add a small (<1s) exponential backoff to failed TCP listener Accept failures. [[GH-11588](https://github.com/hashicorp/vault/pull/11588)] -* core (enterprise): Add controlled capabilities to control group policy stanza -* core: Add metrics for standby node forwarding. [[GH-11366](https://github.com/hashicorp/vault/pull/11366)] -* core: Add metrics to report if a node is a perf standby, if a node is a dr secondary or primary, and if a node is a perf secondary or primary. [[GH-11472](https://github.com/hashicorp/vault/pull/11472)] -* core: Send notifications to systemd on start, stop, and configuration reload. [[GH-11517](https://github.com/hashicorp/vault/pull/11517)] -* core: add irrevocable lease list and count apis [[GH-11607](https://github.com/hashicorp/vault/pull/11607)] -* core: allow arbitrary length stack traces upon receiving SIGUSR2 (was 32MB) [[GH-11364](https://github.com/hashicorp/vault/pull/11364)] -* core: Improve renew/revoke performance using per-lease locks [[GH-11122](https://github.com/hashicorp/vault/pull/11122)] -* db/cassandra: Added tls_server_name to specify server name for TLS validation [[GH-11820](https://github.com/hashicorp/vault/pull/11820)] -* go: Update to Go 1.16.5 [[GH-11802](https://github.com/hashicorp/vault/pull/11802)] -* replication: Delay evaluation of X-Vault-Index headers until merkle sync completes. -* secrets/rabbitmq: Add ability to customize dynamic usernames [[GH-11899](https://github.com/hashicorp/vault/pull/11899)] -* secrets/ad: Add `rotate-role` endpoint to allow rotations of service accounts. [[GH-11942](https://github.com/hashicorp/vault/pull/11942)] -* secrets/aws: add IAM tagging support for iam_user roles [[GH-10953](https://github.com/hashicorp/vault/pull/10953)] -* secrets/aws: add ability to provide a role session name when generating STS credentials [[GH-11345](https://github.com/hashicorp/vault/pull/11345)] -* secrets/database/elasticsearch: Add ability to customize dynamic usernames [[GH-11957](https://github.com/hashicorp/vault/pull/11957)] -* secrets/database/influxdb: Add ability to customize dynamic usernames [[GH-11796](https://github.com/hashicorp/vault/pull/11796)] -* secrets/database/mongodb: Add ability to customize `SocketTimeout`, `ConnectTimeout`, and `ServerSelectionTimeout` [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] -* secrets/database/mongodb: Increased throughput by allowing for multiple request threads to simultaneously update users in MongoDB [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] -* secrets/database/mongodbatlas: Adds the ability to customize username generation for dynamic users in MongoDB Atlas. [[GH-11956](https://github.com/hashicorp/vault/pull/11956)] -* secrets/database/redshift: Add ability to customize dynamic usernames [[GH-12016](https://github.com/hashicorp/vault/pull/12016)] -* secrets/database/snowflake: Add ability to customize dynamic usernames [[GH-11997](https://github.com/hashicorp/vault/pull/11997)] -* ssh: add support for templated values in SSH CA DefaultExtensions [[GH-11495](https://github.com/hashicorp/vault/pull/11495)] -* storage/raft: Improve raft batch size selection [[GH-11907](https://github.com/hashicorp/vault/pull/11907)] -* storage/raft: change freelist type to map and set nofreelistsync to true [[GH-11895](https://github.com/hashicorp/vault/pull/11895)] -* storage/raft: Switch to shared raft-boltdb library and add boltdb metrics [[GH-11269](https://github.com/hashicorp/vault/pull/11269)] -* storage/raft: Support autopilot for HA only raft storage. [[GH-11260](https://github.com/hashicorp/vault/pull/11260)] -* storage/raft (enterprise): Enable Autopilot on DR secondary clusters -* ui: Add Validation to KV secret engine [[GH-11785](https://github.com/hashicorp/vault/pull/11785)] -* ui: Add database secret engine support for MSSQL [[GH-11231](https://github.com/hashicorp/vault/pull/11231)] -* ui: Add push notification message when selecting okta auth. [[GH-11442](https://github.com/hashicorp/vault/pull/11442)] -* ui: Add regex validation to Transform Template pattern input [[GH-11586](https://github.com/hashicorp/vault/pull/11586)] -* ui: Add specific error message if unseal fails due to license [[GH-11705](https://github.com/hashicorp/vault/pull/11705)] -* ui: Add validation support for open api form fields [[GH-11963](https://github.com/hashicorp/vault/pull/11963)] -* ui: Added auth method descriptions to UI login page [[GH-11795](https://github.com/hashicorp/vault/pull/11795)] -* ui: JSON fields on database can be cleared on edit [[GH-11708](https://github.com/hashicorp/vault/pull/11708)] -* ui: Obscure secret values on input and displayOnly fields like certificates. [[GH-11284](https://github.com/hashicorp/vault/pull/11284)] -* ui: Redesign of KV 2 Delete toolbar. [[GH-11530](https://github.com/hashicorp/vault/pull/11530)] -* ui: Replace tool partials with components. [[GH-11672](https://github.com/hashicorp/vault/pull/11672)] -* ui: Show description on secret engine list [[GH-11995](https://github.com/hashicorp/vault/pull/11995)] -* ui: Update ember to latest LTS and upgrade UI dependencies [[GH-11447](https://github.com/hashicorp/vault/pull/11447)] -* ui: Update partials to components [[GH-11680](https://github.com/hashicorp/vault/pull/11680)] -* ui: Updated ivy code mirror component for consistency [[GH-11500](https://github.com/hashicorp/vault/pull/11500)] -* ui: Updated node to v14, latest stable build [[GH-12049](https://github.com/hashicorp/vault/pull/12049)] -* ui: Updated search select component styling [[GH-11360](https://github.com/hashicorp/vault/pull/11360)] -* ui: add transform secrets engine to features list [[GH-12003](https://github.com/hashicorp/vault/pull/12003)] -* ui: add validations for duplicate path kv engine [[GH-11878](https://github.com/hashicorp/vault/pull/11878)] -* ui: show site-wide banners for license warnings if applicable [[GH-11759](https://github.com/hashicorp/vault/pull/11759)] -* ui: update license page with relevant autoload info [[GH-11778](https://github.com/hashicorp/vault/pull/11778)] +* **Redis ElastiCache DB Engine**: Renamed configuration parameters for disambiguation; old parameters still supported for compatibility. [[GH-18752](https://github.com/hashicorp/vault/pull/18752)] +* Bump github.com/hashicorp/go-plugin version from 1.4.5 to 1.4.8 [[GH-19100](https://github.com/hashicorp/vault/pull/19100)] +* Reduced binary size [[GH-17678](https://github.com/hashicorp/vault/pull/17678)] +* agent/config: Allow config directories to be specified with -config, and allow multiple -configs to be supplied. [[GH-18403](https://github.com/hashicorp/vault/pull/18403)] +* agent: Add note in logs when starting Vault Agent indicating if the version differs to the Vault Server. [[GH-18684](https://github.com/hashicorp/vault/pull/18684)] +* agent: Added `token_file` auto-auth configuration to allow using a pre-existing token for Vault Agent. [[GH-18740](https://github.com/hashicorp/vault/pull/18740)] +* agent: Agent listeners can now be to be the `metrics_only` role, serving only metrics, as part of the listener's new top level `role` option. [[GH-18101](https://github.com/hashicorp/vault/pull/18101)] +* agent: Configured Vault Agent listeners now listen without the need for caching to be configured. [[GH-18137](https://github.com/hashicorp/vault/pull/18137)] +* agent: allows some parts of config to be reloaded without requiring a restart. [[GH-18638](https://github.com/hashicorp/vault/pull/18638)] +* agent: fix incorrectly used loop variables in parallel tests and when finalizing seals [[GH-16872](https://github.com/hashicorp/vault/pull/16872)] +* api: Remove dependency on sdk module. [[GH-18962](https://github.com/hashicorp/vault/pull/18962)] +* api: Support VAULT_DISABLE_REDIRECTS environment variable (and --disable-redirects flag) to disable default client behavior and prevent the client following any redirection responses. [[GH-17352](https://github.com/hashicorp/vault/pull/17352)] +* audit: Add `elide_list_responses` option, providing a countermeasure for a common source of oversized audit log entries [[GH-18128](https://github.com/hashicorp/vault/pull/18128)] +* audit: Include stack trace when audit logging recovers from a panic. [[GH-18121](https://github.com/hashicorp/vault/pull/18121)] +* auth/alicloud: upgrades dependencies [[GH-18021](https://github.com/hashicorp/vault/pull/18021)] +* auth/azure: Adds support for authentication with Managed Service Identity (MSI) from a +Virtual Machine Scale Set (VMSS) in flexible orchestration mode. [[GH-17540](https://github.com/hashicorp/vault/pull/17540)] +* auth/azure: upgrades dependencies [[GH-17857](https://github.com/hashicorp/vault/pull/17857)] +* auth/cert: Add configurable support for validating client certs with OCSP. [[GH-17093](https://github.com/hashicorp/vault/pull/17093)] +* auth/cert: Support listing provisioned CRLs within the mount. [[GH-18043](https://github.com/hashicorp/vault/pull/18043)] +* auth/cf: Remove incorrect usage of CreateOperation from path_config [[GH-19098](https://github.com/hashicorp/vault/pull/19098)] +* auth/gcp: Upgrades dependencies [[GH-17858](https://github.com/hashicorp/vault/pull/17858)] +* auth/oidc: Adds `abort_on_error` parameter to CLI login command to help in non-interactive contexts [[GH-19076](https://github.com/hashicorp/vault/pull/19076)] +* auth/oidc: Adds ability to set Google Workspace domain for groups search [[GH-19076](https://github.com/hashicorp/vault/pull/19076)] +* auth/token (enterprise): Allow batch token creation in perfStandby nodes +* auth: Allow naming login MFA methods and using those names instead of IDs in satisfying MFA requirement for requests. +Make passcode arguments consistent across login MFA method types. [[GH-18610](https://github.com/hashicorp/vault/pull/18610)] +* auth: Provide an IP address of the requests from Vault to a Duo challenge after successful authentication. [[GH-18811](https://github.com/hashicorp/vault/pull/18811)] +* autopilot: Update version to v.0.2.0 to add better support for respecting min quorum +* cli/kv: improve kv CLI to remove data or custom metadata using kv patch [[GH-18067](https://github.com/hashicorp/vault/pull/18067)] +* cli/pki: Add List-Intermediates functionality to pki client. [[GH-18463](https://github.com/hashicorp/vault/pull/18463)] +* cli/pki: Add health-check subcommand to evaluate the health of a PKI instance. [[GH-17750](https://github.com/hashicorp/vault/pull/17750)] +* cli/pki: Add pki issue command, which creates a CSR, has a vault mount sign it, then reimports it. [[GH-18467](https://github.com/hashicorp/vault/pull/18467)] +* cli/pki: Added "Reissue" command which allows extracting fields from an existing certificate to create a new certificate. [[GH-18499](https://github.com/hashicorp/vault/pull/18499)] +* cli/pki: Change the pki health-check --list default config output to JSON so it's a usable configuration file [[GH-19269](https://github.com/hashicorp/vault/pull/19269)] +* cli: Add support for creating requests to existing non-KVv2 PATCH-capable endpoints. [[GH-17650](https://github.com/hashicorp/vault/pull/17650)] +* cli: Add transit import key helper commands for BYOK to Transit/Transform. [[GH-18887](https://github.com/hashicorp/vault/pull/18887)] +* cli: Support the -format=raw option, to read non-JSON Vault endpoints and original response bodies. [[GH-14945](https://github.com/hashicorp/vault/pull/14945)] +* cli: updated `vault operator rekey` prompts to describe recovery keys when `-target=recovery` [[GH-18892](https://github.com/hashicorp/vault/pull/18892)] +* client/pki: Add a new command verify-sign which checks the relationship between two certificates. [[GH-18437](https://github.com/hashicorp/vault/pull/18437)] +* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] +* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. +* core/identity: Add machine-readable output to body of response upon alias clash during entity merge [[GH-17459](https://github.com/hashicorp/vault/pull/17459)] +* core/server: Added an environment variable to write goroutine stacktraces to a +temporary file for SIGUSR2 signals. [[GH-17929](https://github.com/hashicorp/vault/pull/17929)] +* core: Add RPCs to read and update userFailedLoginInfo map +* core: Add experiments system and `events.alpha1` experiment. [[GH-18682](https://github.com/hashicorp/vault/pull/18682)] +* core: Add read support to `sys/loggers` and `sys/loggers/:name` endpoints [[GH-17979](https://github.com/hashicorp/vault/pull/17979)] +* core: Add user lockout field to config and configuring this for auth mount using auth tune to prevent brute forcing in auth methods [[GH-17338](https://github.com/hashicorp/vault/pull/17338)] +* core: Add vault.core.locked_users telemetry metric to emit information about total number of locked users. [[GH-18718](https://github.com/hashicorp/vault/pull/18718)] +* core: Added sys/locked-users endpoint to list locked users. Changed api endpoint from +sys/lockedusers/[mount_accessor]/unlock/[alias_identifier] to sys/locked-users/[mount_accessor]/unlock/[alias_identifier]. [[GH-18675](https://github.com/hashicorp/vault/pull/18675)] +* core: Added sys/lockedusers/[mount_accessor]/unlock/[alias_identifier] endpoint to unlock an user +with given mount_accessor and alias_identifier if locked [[GH-18279](https://github.com/hashicorp/vault/pull/18279)] +* core: Added warning to /sys/seal-status and vault status command if potentially dangerous behaviour overrides are being used. [[GH-17855](https://github.com/hashicorp/vault/pull/17855)] +* core: Implemented background thread to update locked user entries every 15 minutes to prevent brute forcing in auth methods. [[GH-18673](https://github.com/hashicorp/vault/pull/18673)] +* core: License location is no longer cache exempt, meaning sys/health will not contribute as greatly to storage load when using consul as a storage backend. [[GH-17265](https://github.com/hashicorp/vault/pull/17265)] +* core: Update protoc from 3.21.5 to 3.21.7 [[GH-17499](https://github.com/hashicorp/vault/pull/17499)] +* core: add `detect_deadlocks` config to optionally detect core state deadlocks [[GH-18604](https://github.com/hashicorp/vault/pull/18604)] +* core: added changes for user lockout workflow. [[GH-17951](https://github.com/hashicorp/vault/pull/17951)] +* core: parallelize backend initialization to improve startup time for large numbers of mounts. [[GH-18244](https://github.com/hashicorp/vault/pull/18244)] +* database/postgres: Support multiline strings for revocation statements. [[GH-18632](https://github.com/hashicorp/vault/pull/18632)] +* database/redis-elasticache: changed config argument names for disambiguation [[GH-19044](https://github.com/hashicorp/vault/pull/19044)] +* database/snowflake: Allow parallel requests to Snowflake [[GH-17593](https://github.com/hashicorp/vault/pull/17593)] +* hcp/connectivity: Add foundational OSS support for opt-in secure communication between self-managed Vault nodes and [HashiCorp Cloud Platform](https://cloud.hashicorp.com) [[GH-18228](https://github.com/hashicorp/vault/pull/18228)] +* hcp/connectivity: Include HCP organization, project, and resource ID in server startup logs [[GH-18315](https://github.com/hashicorp/vault/pull/18315)] +* hcp/connectivity: Only update SCADA session metadata if status changes [[GH-18585](https://github.com/hashicorp/vault/pull/18585)] +* hcp/status: Add cluster-level status information [[GH-18351](https://github.com/hashicorp/vault/pull/18351)] +* hcp/status: Expand node-level status information [[GH-18302](https://github.com/hashicorp/vault/pull/18302)] +* logging: Vault Agent supports logging to a specified file path via environment variable, CLI or config [[GH-17841](https://github.com/hashicorp/vault/pull/17841)] +* logging: Vault agent and server commands support log file and log rotation. [[GH-18031](https://github.com/hashicorp/vault/pull/18031)] +* migration: allow parallelization of key migration for `vault operator migrate` in order to speed up a migration. [[GH-18817](https://github.com/hashicorp/vault/pull/18817)] +* namespaces (enterprise): Add new API, `sys/config/group-policy-application`, to allow group policies to be configurable +to apply to a group in `any` namespace. The default, `within_namespace_hierarchy`, is the current behaviour. +* openapi: Add default values to thing_mount_path parameters [[GH-18935](https://github.com/hashicorp/vault/pull/18935)] +* openapi: Add logic to generate openapi response structures [[GH-18192](https://github.com/hashicorp/vault/pull/18192)] +* openapi: Add openapi response definitions to approle/path_login.go & approle/path_tidy_user_id.go [[GH-18772](https://github.com/hashicorp/vault/pull/18772)] +* openapi: Add openapi response definitions to approle/path_role.go [[GH-18198](https://github.com/hashicorp/vault/pull/18198)] +* openapi: Change gen_openapi.sh to generate schema with generic mount paths [[GH-18934](https://github.com/hashicorp/vault/pull/18934)] +* openapi: Mark request body objects as required [[GH-17909](https://github.com/hashicorp/vault/pull/17909)] +* openapi: add openapi response defintions to /sys/audit endpoints [[GH-18456](https://github.com/hashicorp/vault/pull/18456)] +* openapi: generic_mount_paths: Move implementation fully into server, rather than partially in plugin framework; recognize all 4 singleton mounts (auth/token, cubbyhole, identity, system) rather than just 2; change parameter from `{mountPath}` to `{_mount_path}` [[GH-18663](https://github.com/hashicorp/vault/pull/18663)] +* plugins: Add plugin version information to key plugin lifecycle log lines. [[GH-17430](https://github.com/hashicorp/vault/pull/17430)] +* plugins: Allow selecting builtin plugins by their reported semantic version of the form `vX.Y.Z+builtin` or `vX.Y.Z+builtin.vault`. [[GH-17289](https://github.com/hashicorp/vault/pull/17289)] +* plugins: Let Vault unseal and mount deprecated builtin plugins in a +deactivated state if this is not the first unseal after an upgrade. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] +* plugins: Mark app-id auth method Removed and remove the plugin code. [[GH-18039](https://github.com/hashicorp/vault/pull/18039)] +* plugins: Mark logical database plugins Removed and remove the plugin code. [[GH-18039](https://github.com/hashicorp/vault/pull/18039)] +* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] +* sdk: Add response schema validation method framework/FieldData.ValidateStrict and two test helpers (ValidateResponse, ValidateResponseData) [[GH-18635](https://github.com/hashicorp/vault/pull/18635)] +* sdk: Adding FindResponseSchema test helper to assist with response schema validation in tests [[GH-18636](https://github.com/hashicorp/vault/pull/18636)] +* secrets/aws: Update dependencies [[PR-17747](https://github.com/hashicorp/vault/pull/17747)] [[GH-17747](https://github.com/hashicorp/vault/pull/17747)] +* secrets/azure: Adds ability to persist an application for the lifetime of a role. [[GH-19096](https://github.com/hashicorp/vault/pull/19096)] +* secrets/azure: upgrades dependencies [[GH-17964](https://github.com/hashicorp/vault/pull/17964)] +* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] +* secrets/gcp: Upgrades dependencies [[GH-17871](https://github.com/hashicorp/vault/pull/17871)] +* secrets/kubernetes: Add /check endpoint to determine if environment variables are set [[GH-18](https://github.com/hashicorp/vault-plugin-secrets-kubernetes/pull/18)] [[GH-18587](https://github.com/hashicorp/vault/pull/18587)] +* secrets/kubernetes: add /check endpoint to determine if environment variables are set [[GH-19084](https://github.com/hashicorp/vault/pull/19084)] +* secrets/kv: Emit events on write if events system enabled [[GH-19145](https://github.com/hashicorp/vault/pull/19145)] +* secrets/kv: make upgrade synchronous when no keys to upgrade [[GH-19056](https://github.com/hashicorp/vault/pull/19056)] +* secrets/kv: new KVv2 mounts and KVv1 mounts without any keys will upgrade synchronously, allowing for instant use [[GH-17406](https://github.com/hashicorp/vault/pull/17406)] +* secrets/pki: Add a new API that returns the serial numbers of revoked certificates on the local cluster [[GH-17779](https://github.com/hashicorp/vault/pull/17779)] +* secrets/pki: Add support to specify signature bits when generating CSRs through intermediate/generate apis [[GH-17388](https://github.com/hashicorp/vault/pull/17388)] +* secrets/pki: Added a new API that allows external actors to craft a CRL through JSON parameters [[GH-18040](https://github.com/hashicorp/vault/pull/18040)] +* secrets/pki: Allow UserID Field (https://www.rfc-editor.org/rfc/rfc1274#section-9.3.1) to be set on Certificates when +allowed by role [[GH-18397](https://github.com/hashicorp/vault/pull/18397)] +* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] +* secrets/pki: Allow templating performance replication cluster- and issuer-specific AIA URLs. [[GH-18199](https://github.com/hashicorp/vault/pull/18199)] +* secrets/pki: Allow tidying of expired issuer certificates. [[GH-17823](https://github.com/hashicorp/vault/pull/17823)] +* secrets/pki: Allow tidying of the legacy ca_bundle, improving startup on post-migrated, seal-wrapped PKI mounts. [[GH-18645](https://github.com/hashicorp/vault/pull/18645)] +* secrets/pki: Respond with written data to `config/auto-tidy`, `config/crl`, and `roles/:role`. [[GH-18222](https://github.com/hashicorp/vault/pull/18222)] +* secrets/pki: Return issuer_id and issuer_name on /issuer/:issuer_ref/json endpoint. [[GH-18482](https://github.com/hashicorp/vault/pull/18482)] +* secrets/pki: Return new fields revocation_time_rfc3339 and issuer_id to existing certificate serial lookup api if it is revoked [[GH-17774](https://github.com/hashicorp/vault/pull/17774)] +* secrets/ssh: Allow removing SSH host keys from the dynamic keys feature. [[GH-18939](https://github.com/hashicorp/vault/pull/18939)] +* secrets/ssh: Evaluate ssh validprincipals user template before splitting [[GH-16622](https://github.com/hashicorp/vault/pull/16622)] +* secrets/transit: Add an optional reference field to batch operation items +which is repeated on batch responses to help more easily correlate inputs with outputs. [[GH-18243](https://github.com/hashicorp/vault/pull/18243)] +* secrets/transit: Add associated_data parameter for additional authenticated data in AEAD ciphers [[GH-17638](https://github.com/hashicorp/vault/pull/17638)] +* secrets/transit: Add support for PKCSv1_5_NoOID RSA signatures [[GH-17636](https://github.com/hashicorp/vault/pull/17636)] +* secrets/transit: Allow configuring whether upsert of keys is allowed. [[GH-18272](https://github.com/hashicorp/vault/pull/18272)] +* storage/raft: Add `retry_join_as_non_voter` config option. [[GH-18030](https://github.com/hashicorp/vault/pull/18030)] +* storage/raft: add additional raft metrics relating to applied index and heartbeating; also ensure OSS standbys emit periodic metrics. [[GH-12166](https://github.com/hashicorp/vault/pull/12166)] +* sys/internal/inspect: Creates an endpoint to look to inspect internal subsystems. [[GH-17789](https://github.com/hashicorp/vault/pull/17789)] +* sys/internal/inspect: Creates an endpoint to look to inspect internal subsystems. +* ui: Add algorithm-signer as a SSH Secrets Engine UI field [[GH-10299](https://github.com/hashicorp/vault/pull/10299)] +* ui: Add inline policy creation when creating an identity entity or group [[GH-17749](https://github.com/hashicorp/vault/pull/17749)] +* ui: Added JWT authentication warning message about blocked pop-up windows and web browser settings. [[GH-18787](https://github.com/hashicorp/vault/pull/18787)] +* ui: Enable typescript for future development [[GH-17927](https://github.com/hashicorp/vault/pull/17927)] +* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] +* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] +* ui: adds allowed_response_headers as param for secret engine mount config [[GH-19216](https://github.com/hashicorp/vault/pull/19216)] +* ui: consolidate all tag usage [[GH-17866](https://github.com/hashicorp/vault/pull/17866)] +* ui: mfa: use proper request id generation [[GH-17835](https://github.com/hashicorp/vault/pull/17835)] +* ui: remove wizard [[GH-19220](https://github.com/hashicorp/vault/pull/19220)] +* ui: update DocLink component to use new host url: developer.hashicorp.com [[GH-18374](https://github.com/hashicorp/vault/pull/18374)] +* ui: update TTL picker for consistency [[GH-18114](https://github.com/hashicorp/vault/pull/18114)] +* ui: use the combined activity log (partial + historic) API for client count dashboard and remove use of monthly endpoint [[GH-17575](https://github.com/hashicorp/vault/pull/17575)] +* vault/diagnose: Upgrade `go.opentelemetry.io/otel`, `go.opentelemetry.io/otel/sdk`, `go.opentelemetry.io/otel/trace` to v1.11.2 [[GH-18589](https://github.com/hashicorp/vault/pull/18589)] DEPRECATIONS: -* secrets/gcp: Deprecated the `/gcp/token/:roleset` and `/gcp/key/:roleset` paths for generating - secrets for rolesets. Use `/gcp/roleset/:roleset/token` and `/gcp/roleset/:roleset/key` instead. [[GH-12023](https://github.com/hashicorp/vault/pull/12023)] - -BUG FIXES: - -* activity: Omit wrapping tokens and control groups from client counts [[GH-11826](https://github.com/hashicorp/vault/pull/11826)] -* agent/cert: Fix issue where the API client on agent was not honoring certificate - information from the auto-auth config map on renewals or retries. [[GH-11576](https://github.com/hashicorp/vault/pull/11576)] -* agent/template: fix command shell quoting issue [[GH-11838](https://github.com/hashicorp/vault/pull/11838)] -* agent: Fixed agent templating to use configured tls servername values [[GH-11288](https://github.com/hashicorp/vault/pull/11288)] -* agent: fix timestamp format in log messages from the templating engine [[GH-11838](https://github.com/hashicorp/vault/pull/11838)] -* auth/approle: fixing dereference of nil pointer [[GH-11864](https://github.com/hashicorp/vault/pull/11864)] -* auth/jwt: Updates the [hashicorp/cap](https://github.com/hashicorp/cap) library to `v0.1.0` to - bring in a verification key caching fix. [[GH-11784](https://github.com/hashicorp/vault/pull/11784)] -* auth/kubernetes: Fix AliasLookahead to correctly extract ServiceAccount UID when using ephemeral JWTs [[GH-12073](https://github.com/hashicorp/vault/pull/12073)] -* auth/ldap: Fix a bug where the LDAP auth method does not return the request_timeout configuration parameter on config read. [[GH-11975](https://github.com/hashicorp/vault/pull/11975)] -* cli: Add support for response wrapping in `vault list` and `vault kv list` with output format other than `table`. [[GH-12031](https://github.com/hashicorp/vault/pull/12031)] -* cli: vault delete and vault kv delete should support the same output options (e.g. -format) as vault write. [[GH-11992](https://github.com/hashicorp/vault/pull/11992)] -* core (enterprise): Fix orphan return value from auth methods executed on performance standby nodes. -* core (enterprise): Fix plugins mounted in namespaces being unable to use password policies [[GH-11596](https://github.com/hashicorp/vault/pull/11596)] -* core (enterprise): serialize access to HSM entropy generation to avoid errors in concurrent key generation. -* core/metrics: Add generic KV mount support for vault.kv.secret.count telemetry metric [[GH-12020](https://github.com/hashicorp/vault/pull/12020)] -* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)] -* core: Fix edge cases in the configuration endpoint for barrier key autorotation. [[GH-11541](https://github.com/hashicorp/vault/pull/11541)] -* core: Fix goroutine leak when updating rate limit quota [[GH-11371](https://github.com/hashicorp/vault/pull/11371)] -* core (enterprise): Fix panic on DR secondary when there are lease count quotas [[GH-11742](https://github.com/hashicorp/vault/pull/11742)] -* core: Fix race that allowed remounting on path used by another mount [[GH-11453](https://github.com/hashicorp/vault/pull/11453)] -* core: Fix storage entry leak when revoking leases created with non-orphan batch tokens. [[GH-11377](https://github.com/hashicorp/vault/pull/11377)] -* core: Fixed double counting of http requests after operator stepdown [[GH-11970](https://github.com/hashicorp/vault/pull/11970)] -* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] -* identity: Use correct mount accessor when refreshing external group memberships. [[GH-11506](https://github.com/hashicorp/vault/pull/11506)] -* mongo-db: default username template now strips invalid '.' characters [[GH-11872](https://github.com/hashicorp/vault/pull/11872)] -* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)] -* replication: Fix panic trying to update walState during identity group invalidation. -* replication: Fix: mounts created within a namespace that was part of an Allow - filtering rule would not appear on performance secondary if created after rule - was defined. -* secret/pki: use case insensitive domain name comparison as per RFC1035 section 2.3.3 -* secret: fix the bug where transit encrypt batch doesn't work with key_version [[GH-11628](https://github.com/hashicorp/vault/pull/11628)] -* secrets/ad: Forward all creds requests to active node [[GH-76](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/76)] [[GH-11836](https://github.com/hashicorp/vault/pull/11836)] -* secrets/database/cassandra: Fixed issue where hostnames were not being validated when using TLS [[GH-11365](https://github.com/hashicorp/vault/pull/11365)] -* secrets/database/cassandra: Fixed issue where the PEM parsing logic of `pem_bundle` and `pem_json` didn't work for CA-only configurations [[GH-11861](https://github.com/hashicorp/vault/pull/11861)] -* secrets/database/cassandra: Updated default statement for password rotation to allow for special characters. This applies to root and static credentials. [[GH-11262](https://github.com/hashicorp/vault/pull/11262)] -* secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. [[GH-11451](https://github.com/hashicorp/vault/pull/11451)] -* secrets/database: Fixed an issue that prevented external database plugin processes from restarting after a shutdown. [[GH-12087](https://github.com/hashicorp/vault/pull/12087)] -* secrets/database: Fixed minor race condition when rotate-root is called [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] -* secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` [[GH-11585](https://github.com/hashicorp/vault/pull/11585)] -* secrets/openldap: Fix bug where schema was not compatible with rotate-root [#24](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/24) [[GH-12019](https://github.com/hashicorp/vault/pull/12019)] -* storage/dynamodb: Handle throttled batch write requests by retrying, without which writes could be lost. [[GH-10181](https://github.com/hashicorp/vault/pull/10181)] -* storage/raft: Support cluster address change for nodes in a cluster managed by autopilot [[GH-11247](https://github.com/hashicorp/vault/pull/11247)] -* storage/raft: Tweak creation of vault.db file [[GH-12034](https://github.com/hashicorp/vault/pull/12034)] -* storage/raft: leader_tls_servername wasn't used unless leader_ca_cert_file and/or mTLS were configured. [[GH-11252](https://github.com/hashicorp/vault/pull/11252)] -* tokenutil: Perform the num uses check before token type. [[GH-11647](https://github.com/hashicorp/vault/pull/11647)] -* transform (enterprise): Fix an issue with malformed transform configuration - storage when upgrading from 1.5 to 1.6. See Upgrade Notes for 1.6.x. -* ui: Add role from database connection automatically populates the database for new role [[GH-11119](https://github.com/hashicorp/vault/pull/11119)] -* ui: Add root rotation statements support to appropriate database secret engine plugins [[GH-11404](https://github.com/hashicorp/vault/pull/11404)] -* ui: Automatically refresh the page when user logs out [[GH-12035](https://github.com/hashicorp/vault/pull/12035)] -* ui: Fix Version History queryParams on LinkedBlock [[GH-12079](https://github.com/hashicorp/vault/pull/12079)] -* ui: Fix bug where database secret engines with custom names cannot delete connections [[GH-11127](https://github.com/hashicorp/vault/pull/11127)] -* ui: Fix bug where the UI does not recognize version 2 KV until refresh, and fix [object Object] error message [[GH-11258](https://github.com/hashicorp/vault/pull/11258)] -* ui: Fix database role CG access [[GH-12111](https://github.com/hashicorp/vault/pull/12111)] -* ui: Fix date display on expired token notice [[GH-11142](https://github.com/hashicorp/vault/pull/11142)] -* ui: Fix entity group membership and metadata not showing [[GH-11641](https://github.com/hashicorp/vault/pull/11641)] -* ui: Fix error message caused by control group [[GH-11143](https://github.com/hashicorp/vault/pull/11143)] -* ui: Fix footer URL linking to the correct version changelog. [[GH-11283](https://github.com/hashicorp/vault/pull/11283)] -* ui: Fix issue where logging in without namespace input causes error [[GH-11094](https://github.com/hashicorp/vault/pull/11094)] -* ui: Fix namespace-bug on login [[GH-11182](https://github.com/hashicorp/vault/pull/11182)] -* ui: Fix status menu no showing on login [[GH-11213](https://github.com/hashicorp/vault/pull/11213)] -* ui: Fix text link URL on database roles list [[GH-11597](https://github.com/hashicorp/vault/pull/11597)] -* ui: Fixed and updated lease renewal picker [[GH-11256](https://github.com/hashicorp/vault/pull/11256)] -* ui: fix control group access for database credential [[GH-12024](https://github.com/hashicorp/vault/pull/12024)] -* ui: fix issue where select-one option was not showing in secrets database role creation [[GH-11294](https://github.com/hashicorp/vault/pull/11294)] -* ui: fix oidc login with Safari [[GH-11884](https://github.com/hashicorp/vault/pull/11884)] - -## 1.7.10 -### March 3, 2022 - -SECURITY: - -* transform (enterprise): Vault Enterprise (“Vault”) clusters using the tokenization transform feature can expose the tokenization key through the tokenization key configuration endpoint to authorized operators with read permissions on this endpoint. This vulnerability, CVE-2022-25244, was fixed in Vault Enterprise 1.7.10, 1.8.9, and 1.9.4. - -BUG FIXES: - -* database/mssql: Removed string interpolation on internal queries and replaced them with inline queries using named parameters. [[GH-13799](https://github.com/hashicorp/vault/pull/13799)] -* ui: Fix issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] -* ui: Trigger background token self-renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] - -## 1.7.9 -### January 27, 2022 - -IMPROVEMENTS: - -* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] - -BUG FIXES: - -* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13493](https://github.com/hashicorp/vault/pull/13493)] -* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13735](https://github.com/hashicorp/vault/pull/13735)] -* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] -* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] - -## 1.7.8 -### December 21, 2021 - -CHANGES: - -* go: Update go version to 1.16.12 [[GH-13422](https://github.com/hashicorp/vault/pull/13422)] - -BUG FIXES: - -* auth/aws: Fixes ec2 login no longer supporting DSA signature verification [[GH-12340](https://github.com/hashicorp/vault/pull/12340)] -* identity: Fix a panic on arm64 platform when doing identity I/O. [[GH-12371](https://github.com/hashicorp/vault/pull/12371)] - -## 1.7.7 -### December 9, 2021 - -SECURITY: - -* storage/raft: Integrated Storage backend could be caused to crash by an authenticated user with write permissions to the KV secrets engine. This vulnerability, CVE-2021-45042, was fixed in Vault 1.7.7, 1.8.6, and 1.9.1. - -BUG FIXES: - -* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes -* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] -* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] -* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] -* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] -* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] - -## 1.7.6 -### November 4, 2021 - -SECURITY: - -* core/identity: Templated ACL policies would always match the first-created entity alias if multiple entity aliases existed for a specified entity and mount combination, potentially resulting in incorrect policy enforcement. This vulnerability, CVE-2021-43998, was fixed in Vault and Vault Enterprise 1.7.6, 1.8.5, and 1.9.0. +* secrets/ad: Marks the Active Directory (AD) secrets engine as deprecated. [[GH-19334](https://github.com/hashicorp/vault/pull/19334)] BUG FIXES: -* auth/aws: fix config/rotate-root to store new key [[GH-12715](https://github.com/hashicorp/vault/pull/12715)] -* core/identity: Cleanup alias in the in-memory entity after an alias deletion by ID [[GH-12834](https://github.com/hashicorp/vault/pull/12834)] -* core/identity: Disallow entity alias creation/update if a conflicting alias exists for the target entity and mount combination [[GH-12747](https://github.com/hashicorp/vault/pull/12747)] -* core: Fix a deadlock on HA leadership transfer [[GH-12691](https://github.com/hashicorp/vault/pull/12691)] -* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node -* kmip (enterprise): Fix handling of custom attributes when servicing GetAttributes requests -* kmip (enterprise): Fix handling of invalid role parameters within various vault api calls -* kmip (enterprise): Forward KMIP register operations to the active node -* secrets/keymgmt (enterprise): Fix support for Azure Managed HSM Key Vault instances. [[GH-12957](https://github.com/hashicorp/vault/pull/12957)] -* storage/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] -* database/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] -* transform (enterprise): Fix an error where the decode response of an expired token is an empty result rather than an error. - -## 1.7.5 -### 29 September 2021 - -SECURITY: - -* core/identity: A Vault user with write permission to an entity alias ID sharing a mount accessor with another user may acquire this other user’s policies by merging their identities. This vulnerability, CVE-2021-41802, was fixed in Vault and Vault Enterprise 1.7.5 and 1.8.4. - -IMPROVEMENTS: - -* secrets/pki: Allow signing of self-issued certs with a different signature algorithm. [[GH-12514](https://github.com/hashicorp/vault/pull/12514)] - -BUG FIXES: - -* agent: Avoid possible `unexpected fault address` panic when using persistent cache. [[GH-12534](https://github.com/hashicorp/vault/pull/12534)] -* core (enterprise): Fix bug where password generation through password policies do not work on namespaces if performed outside a request callback or from an external plugin. [[GH-12635](https://github.com/hashicorp/vault/pull/12635)] -* core (enterprise): Only delete quotas on primary cluster. [[GH-12339](https://github.com/hashicorp/vault/pull/12339)] -* identity: Fail alias rename if the resulting (name,accessor) exists already [[GH-12473](https://github.com/hashicorp/vault/pull/12473)] -* raft (enterprise): Fix panic when updating auto-snapshot config -* secrets/db: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [[GH-12563](https://github.com/hashicorp/vault/pull/12563)] -* secrets/openldap: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [#28](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/28) [[GH-12598](https://github.com/hashicorp/vault/pull/12598)] -* storage/raft: Detect incomplete raft snapshots in api.RaftSnapshot(), and thereby in `vault operator raft snapshot save`. [[GH-12388](https://github.com/hashicorp/vault/pull/12388)] -* ui: Fixed api explorer routing bug [[GH-12354](https://github.com/hashicorp/vault/pull/12354)] - -## 1.7.4 -### 26 August 2021 - -SECURITY: - -* *UI Secret Caching*: The Vault UI erroneously cached and exposed user-viewed secrets between authenticated sessions in a single shared browser, if the browser window / tab was not refreshed or closed between logout and a subsequent login. This vulnerability, CVE-2021-38554, was fixed in Vault 1.8.0 and will be addressed in pending 1.7.4 / 1.6.6 releases. - -CHANGES: - -* Alpine: Docker images for Vault 1.6.6+, 1.7.4+, and 1.8.2+ are built with Alpine 3.14, due to CVE-2021-36159 -* go: Update go version to 1.15.15 [[GH-12411](https://github.com/hashicorp/vault/pull/12411)] - -IMPROVEMENTS: - -* ui: Updated node to v14, latest stable build [[GH-12049](https://github.com/hashicorp/vault/pull/12049)] - -BUG FIXES: - -* replication (enterprise): Fix a panic that could occur when checking the last wal and the log shipper buffer is empty. -* cli: vault debug now puts newlines after every captured log line. [[GH-12175](https://github.com/hashicorp/vault/pull/12175)] -* database/couchbase: change default template to truncate username at 128 characters [[GH-12299](https://github.com/hashicorp/vault/pull/12299)] -* physical/raft: Fix safeio.Rename error when restoring snapshots on windows [[GH-12377](https://github.com/hashicorp/vault/pull/12377)] -* secrets/database/cassandra: Fixed issue where the PEM parsing logic of `pem_bundle` and `pem_json` didn't work for CA-only configurations [[GH-11861](https://github.com/hashicorp/vault/pull/11861)] -* secrets/database: Fixed an issue that prevented external database plugin processes from restarting after a shutdown. [[GH-12087](https://github.com/hashicorp/vault/pull/12087)] -* ui: Automatically refresh the page when user logs out [[GH-12035](https://github.com/hashicorp/vault/pull/12035)] -* ui: Fix database role CG access [[GH-12111](https://github.com/hashicorp/vault/pull/12111)] -* ui: Fixes metrics page when read on counter config not allowed [[GH-12348](https://github.com/hashicorp/vault/pull/12348)] -* ui: fix control group access for database credential [[GH-12024](https://github.com/hashicorp/vault/pull/12024)] -* ui: fix oidc login with Safari [[GH-11884](https://github.com/hashicorp/vault/pull/11884)] - -## 1.7.3 -### June 16th, 2021 - -CHANGES: - -* go: Update go version to 1.15.13 [[GH-11857](https://github.com/hashicorp/vault/pull/11857)] - -IMPROVEMENTS: - -* db/cassandra: Added tls_server_name to specify server name for TLS validation [[GH-11820](https://github.com/hashicorp/vault/pull/11820)] -* ui: Add specific error message if unseal fails due to license [[GH-11705](https://github.com/hashicorp/vault/pull/11705)] - -BUG FIXES: - -* auth/jwt: Updates the [hashicorp/cap](https://github.com/hashicorp/cap) library to `v0.1.0` to -bring in a verification key caching fix. [[GH-11784](https://github.com/hashicorp/vault/pull/11784)] -* core (enterprise): serialize access to HSM entropy generation to avoid errors in concurrent key generation. -* secret: fix the bug where transit encrypt batch doesn't work with key_version [[GH-11628](https://github.com/hashicorp/vault/pull/11628)] -* secrets/ad: Forward all creds requests to active node [[GH-76](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/76)] [[GH-11836](https://github.com/hashicorp/vault/pull/11836)] -* tokenutil: Perform the num uses check before token type. [[GH-11647](https://github.com/hashicorp/vault/pull/11647)] - -## 1.7.2 -### May 20th, 2021 - -SECURITY: - -* Non-Expiring Leases: Vault and Vault Enterprise renewed nearly-expiring token -leases and dynamic secret leases with a zero-second TTL, causing them to be -treated as non-expiring, and never revoked. This issue affects Vault and Vault -Enterprise versions 0.10.0 through 1.7.1, and is fixed in 1.5.9, 1.6.5, and -1.7.2 (CVE-2021-32923). - -CHANGES: - -* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs -when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] -* auth/gcp: Update to v0.9.1 to use IAM Service Account Credentials API for -signing JWTs [[GH-11494](https://github.com/hashicorp/vault/pull/11494)] - -IMPROVEMENTS: - -* api, agent: LifetimeWatcher now does more retries when renewal failures occur. This also impacts Agent auto-auth and leases managed via Agent caching. [[GH-11445](https://github.com/hashicorp/vault/pull/11445)] -* auth/aws: Underlying error included in validation failure message. [[GH-11638](https://github.com/hashicorp/vault/pull/11638)] -* http: Add optional HTTP response headers for hostname and raft node ID [[GH-11289](https://github.com/hashicorp/vault/pull/11289)] -* secrets/aws: add ability to provide a role session name when generating STS credentials [[GH-11345](https://github.com/hashicorp/vault/pull/11345)] -* secrets/database/mongodb: Add ability to customize `SocketTimeout`, `ConnectTimeout`, and `ServerSelectionTimeout` [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] -* secrets/database/mongodb: Increased throughput by allowing for multiple request threads to simultaneously update users in MongoDB [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] - -BUG FIXES: - -* agent/cert: Fix issue where the API client on agent was not honoring certificate -information from the auto-auth config map on renewals or retries. [[GH-11576](https://github.com/hashicorp/vault/pull/11576)] -* agent: Fixed agent templating to use configured tls servername values [[GH-11288](https://github.com/hashicorp/vault/pull/11288)] -* core (enterprise): Fix plugins mounted in namespaces being unable to use password policies [[GH-11596](https://github.com/hashicorp/vault/pull/11596)] -* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] -* identity: Use correct mount accessor when refreshing external group memberships. [[GH-11506](https://github.com/hashicorp/vault/pull/11506)] -* replication: Fix panic trying to update walState during identity group invalidation. [[GH-1865](https://github.com/hashicorp/vault/pull/1865)] -* secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. [[GH-11451](https://github.com/hashicorp/vault/pull/11451)] -* secrets/database: Fixed minor race condition when rotate-root is called [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] -* secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` [[GH-11585](https://github.com/hashicorp/vault/pull/11585)] -* secrets/keymgmt (enterprise): Fixes audit logging for the read key response. -* storage/raft: Support cluster address change for nodes in a cluster managed by autopilot [[GH-11247](https://github.com/hashicorp/vault/pull/11247)] -* ui: Fix entity group membership and metadata not showing [[GH-11641](https://github.com/hashicorp/vault/pull/11641)] -* ui: Fix text link URL on database roles list [[GH-11597](https://github.com/hashicorp/vault/pull/11597)] - -## 1.7.1 -### 21 April 2021 - -SECURITY: - -* The PKI Secrets Engine tidy functionality may cause Vault to exclude revoked-but-unexpired certificates from the - Vault CRL. This vulnerability affects Vault and Vault Enterprise 1.5.1 and newer and was fixed in versions - 1.5.8, 1.6.4, and 1.7.1. (CVE-2021-27668) -* The Cassandra Database and Storage backends were not correctly verifying TLS certificates. This issue affects all - versions of Vault and Vault Enterprise and was fixed in versions 1.6.4, and 1.7.1. (CVE-2021-27400) - -CHANGES: - -* go: Update to Go 1.15.11 [[GH-11395](https://github.com/hashicorp/vault/pull/11395)] - -IMPROVEMENTS: - -* auth/jwt: Adds ability to directly provide service account JSON in G Suite provider config. [[GH-11388](https://github.com/hashicorp/vault/pull/11388)] -* core: Add tls_max_version listener config option. [[GH-11226](https://github.com/hashicorp/vault/pull/11226)] -* core: Add metrics for standby node forwarding. [[GH-11366](https://github.com/hashicorp/vault/pull/11366)] -* core: allow arbitrary length stack traces upon receiving SIGUSR2 (was 32MB) [[GH-11364](https://github.com/hashicorp/vault/pull/11364)] -* storage/raft: Support autopilot for HA only raft storage. [[GH-11260](https://github.com/hashicorp/vault/pull/11260)] - -BUG FIXES: - -* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)] -* core: Fix goroutine leak when updating rate limit quota [[GH-11371](https://github.com/hashicorp/vault/pull/11371)] -* core: Fix storage entry leak when revoking leases created with non-orphan batch tokens. [[GH-11377](https://github.com/hashicorp/vault/pull/11377)] -* core: requests forwarded by standby weren't always timed out. [[GH-11322](https://github.com/hashicorp/vault/pull/11322)] -* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)] -* replication: Fix: mounts created within a namespace that was part of an Allow - filtering rule would not appear on performance secondary if created after rule - was defined. -* replication: Perf standby nodes on newly enabled DR secondary sometimes couldn't connect to active node with TLS errors. [[GH-1823](https://github.com/hashicorp/vault/pull/1823)] -* secrets/database/cassandra: Fixed issue where hostnames were not being validated when using TLS [[GH-11365](https://github.com/hashicorp/vault/pull/11365)] -* secrets/database/cassandra: Updated default statement for password rotation to allow for special characters. This applies to root and static credentials. [[GH-11262](https://github.com/hashicorp/vault/pull/11262)] -* storage/dynamodb: Handle throttled batch write requests by retrying, without which writes could be lost. [[GH-10181](https://github.com/hashicorp/vault/pull/10181)] -* storage/raft: leader_tls_servername wasn't used unless leader_ca_cert_file and/or mTLS were configured. [[GH-11252](https://github.com/hashicorp/vault/pull/11252)] -* storage/raft: using raft for ha_storage with a different storage backend was broken in 1.7.0, now fixed. [[GH-11340](https://github.com/hashicorp/vault/pull/11340)] -* ui: Add root rotation statements support to appropriate database secret engine plugins [[GH-11404](https://github.com/hashicorp/vault/pull/11404)] -* ui: Fix bug where the UI does not recognize version 2 KV until refresh, and fix [object Object] error message [[GH-11258](https://github.com/hashicorp/vault/pull/11258)] -* ui: Fix OIDC bug seen when running on HCP [[GH-11283](https://github.com/hashicorp/vault/pull/11283)] -* ui: Fix namespace-bug on login [[GH-11182](https://github.com/hashicorp/vault/pull/11182)] -* ui: Fix status menu no showing on login [[GH-11213](https://github.com/hashicorp/vault/pull/11213)] -* ui: fix issue where select-one option was not showing in secrets database role creation [[GH-11294](https://github.com/hashicorp/vault/pull/11294)] - -## 1.7.0 -### 24 March 2021 - -CHANGES: - -* agent: Failed auto-auth attempts are now throttled by an exponential backoff instead of the -~2 second retry delay. The maximum backoff may be configured with the new `max_backoff` parameter, -which defaults to 5 minutes. [[GH-10964](https://github.com/hashicorp/vault/pull/10964)] -* aws/auth: AWS Auth concepts and endpoints that use the "whitelist" and "blacklist" terms -have been updated to more inclusive language (e.g. `/auth/aws/identity-whitelist` has been -updated to`/auth/aws/identity-accesslist`). The old and new endpoints are aliases, -sharing the same underlying data. The legacy endpoint names are considered **deprecated** -and will be removed in a future release (not before Vault 1.9). The complete list of -endpoint changes is available in the [AWS Auth API docs](/api-docs/auth/aws#deprecations-effective-in-vault-1-7). -* go: Update Go version to 1.15.10 [[GH-11114](https://github.com/hashicorp/vault/pull/11114)] [[GH-11173](https://github.com/hashicorp/vault/pull/11173)] - -FEATURES: - -* **Aerospike Storage Backend**: Add support for using Aerospike as a storage backend [[GH-10131](https://github.com/hashicorp/vault/pull/10131)] -* **Autopilot for Integrated Storage**: A set of features has been added to allow for automatic operator-friendly management of Vault servers. This is only applicable when integrated storage is in use. - * **Dead Server Cleanup**: Dead servers will periodically be cleaned up and removed from the Raft peer set, to prevent them from interfering with the quorum size and leader elections. - * **Server Health Checking**: An API has been added to track the state of servers, including their health. - * **New Server Stabilization**: When a new server is added to the cluster, there will be a waiting period where it must be healthy and stable for a certain amount of time before being promoted to a full, voting member. -* **Tokenization Secrets Engine (Enterprise)**: The Tokenization Secrets Engine is now generally available. We have added support for MySQL, key rotation, and snapshot/restore. -* replication (enterprise): The log shipper is now memory as well as length bound, and length and size can be separately configured. -* agent: Support for persisting the agent cache to disk [[GH-10938](https://github.com/hashicorp/vault/pull/10938)] -* auth/jwt: Adds `max_age` role parameter and `auth_time` claim validation. [[GH-10919](https://github.com/hashicorp/vault/pull/10919)] -* core (enterprise): X-Vault-Index and related headers can be used by clients to manage eventual consistency. -* kmip (enterprise): Use entropy augmentation to generate kmip certificates -* sdk: Private key generation in the certutil package now allows custom io.Readers to be used. [[GH-10653](https://github.com/hashicorp/vault/pull/10653)] -* secrets/aws: add IAM tagging support for iam_user roles [[GH-10953](https://github.com/hashicorp/vault/pull/10953)] -* secrets/database/cassandra: Add ability to customize dynamic usernames [[GH-10906](https://github.com/hashicorp/vault/pull/10906)] -* secrets/database/couchbase: Add ability to customize dynamic usernames [[GH-10995](https://github.com/hashicorp/vault/pull/10995)] -* secrets/database/mongodb: Add ability to customize dynamic usernames [[GH-10858](https://github.com/hashicorp/vault/pull/10858)] -* secrets/database/mssql: Add ability to customize dynamic usernames [[GH-10767](https://github.com/hashicorp/vault/pull/10767)] -* secrets/database/mysql: Add ability to customize dynamic usernames [[GH-10834](https://github.com/hashicorp/vault/pull/10834)] -* secrets/database/postgresql: Add ability to customize dynamic usernames [[GH-10766](https://github.com/hashicorp/vault/pull/10766)] -* secrets/db/snowflake: Added support for Snowflake to the Database Secret Engine [[GH-10603](https://github.com/hashicorp/vault/pull/10603)] -* secrets/keymgmt (enterprise): Adds beta support for distributing and managing keys in AWS KMS. -* secrets/keymgmt (enterprise): Adds general availability for distributing and managing keys in Azure Key Vault. -* secrets/openldap: Added dynamic roles to OpenLDAP similar to the combined database engine [[GH-10996](https://github.com/hashicorp/vault/pull/10996)] -* secrets/terraform: New secret engine for managing Terraform Cloud API tokens [[GH-10931](https://github.com/hashicorp/vault/pull/10931)] -* ui: Adds check for feature flag on application, and updates namespace toolbar on login if present [[GH-10588](https://github.com/hashicorp/vault/pull/10588)] -* ui: Adds the wizard to the Database Secret Engine [[GH-10982](https://github.com/hashicorp/vault/pull/10982)] -* ui: Database secrets engine, supporting MongoDB only [[GH-10655](https://github.com/hashicorp/vault/pull/10655)] - -IMPROVEMENTS: - -* agent: Add a `vault.retry` stanza that allows specifying number of retries on failure; this applies both to templating and proxied requests. [[GH-11113](https://github.com/hashicorp/vault/pull/11113)] -* agent: Agent can now run as a Windows service. [[GH-10231](https://github.com/hashicorp/vault/pull/10231)] -* agent: Better concurrent request handling on identical requests proxied through Agent. [[GH-10705](https://github.com/hashicorp/vault/pull/10705)] -* agent: Route templating server through cache when persistent cache is enabled. [[GH-10927](https://github.com/hashicorp/vault/pull/10927)] -* agent: change auto-auth to preload an existing token on start [[GH-10850](https://github.com/hashicorp/vault/pull/10850)] -* auth/approle: Secrets ID generation endpoint now returns `secret_id_ttl` as part of its response. [[GH-10826](https://github.com/hashicorp/vault/pull/10826)] -* auth/ldap: Improve consistency in error messages [[GH-10537](https://github.com/hashicorp/vault/pull/10537)] -* auth/okta: Adds support for Okta Verify TOTP MFA. [[GH-10942](https://github.com/hashicorp/vault/pull/10942)] -* changelog: Add dependencies listed in dependencies/2-25-21 [[GH-11015](https://github.com/hashicorp/vault/pull/11015)] -* command/debug: Now collects logs (at level `trace`) as a periodic output. [[GH-10609](https://github.com/hashicorp/vault/pull/10609)] -* core (enterprise): "vault status" command works when a namespace is set. [[GH-10725](https://github.com/hashicorp/vault/pull/10725)] -* core (enterprise): Update Trial Enterprise license from 30 minutes to 6 hours -* core/metrics: Added "vault operator usage" command. [[GH-10365](https://github.com/hashicorp/vault/pull/10365)] -* core/metrics: New telemetry metrics reporting lease expirations by time interval and namespace [[GH-10375](https://github.com/hashicorp/vault/pull/10375)] -* core: Added active since timestamp to the status output of active nodes. [[GH-10489](https://github.com/hashicorp/vault/pull/10489)] -* core: Check audit device with a test message before adding it. [[GH-10520](https://github.com/hashicorp/vault/pull/10520)] -* core: Track barrier encryption count and automatically rotate after a large number of operations or on a schedule [[GH-10774](https://github.com/hashicorp/vault/pull/10774)] -* core: add metrics for active entity count [[GH-10514](https://github.com/hashicorp/vault/pull/10514)] -* core: add partial month client count api [[GH-11022](https://github.com/hashicorp/vault/pull/11022)] -* core: dev mode listener allows unauthenticated sys/metrics requests [[GH-10992](https://github.com/hashicorp/vault/pull/10992)] -* core: reduce memory used by leases [[GH-10726](https://github.com/hashicorp/vault/pull/10726)] -* secrets/gcp: Truncate ServiceAccount display names longer than 100 characters. [[GH-10558](https://github.com/hashicorp/vault/pull/10558)] -* storage/raft (enterprise): Listing of peers is now allowed on DR secondary -cluster nodes, as an update operation that takes in DR operation token for -authenticating the request. -* transform (enterprise): Improve FPE transformation performance -* transform (enterprise): Use transactions with batch tokenization operations for improved performance -* ui: Clarify language on usage metrics page empty state [[GH-10951](https://github.com/hashicorp/vault/pull/10951)] -* ui: Customize MongoDB input fields on Database Secrets Engine [[GH-10949](https://github.com/hashicorp/vault/pull/10949)] -* ui: Upgrade Ember-cli from 3.8 to 3.22. [[GH-9972](https://github.com/hashicorp/vault/pull/9972)] -* ui: Upgrade Storybook from 5.3.19 to 6.1.17. [[GH-10904](https://github.com/hashicorp/vault/pull/10904)] -* ui: Upgrade date-fns from 1.3.0 to 2.16.1. [[GH-10848](https://github.com/hashicorp/vault/pull/10848)] -* ui: Upgrade dependencies to resolve potential JS vulnerabilities [[GH-10677](https://github.com/hashicorp/vault/pull/10677)] -* ui: better errors on Database secrets engine role create [[GH-10980](https://github.com/hashicorp/vault/pull/10980)] - -BUG FIXES: - -* agent: Only set the namespace if the VAULT_NAMESPACE env var isn't present [[GH-10556](https://github.com/hashicorp/vault/pull/10556)] -* agent: Set TokenParent correctly in the Index to be cached. [[GH-10833](https://github.com/hashicorp/vault/pull/10833)] -* agent: Set namespace for template server in agent. [[GH-10757](https://github.com/hashicorp/vault/pull/10757)] -* api/sys/config/ui: Fixes issue where multiple UI custom header values are ignored and only the first given value is used [[GH-10490](https://github.com/hashicorp/vault/pull/10490)] -* api: Fixes CORS API methods that were outdated and invalid [[GH-10444](https://github.com/hashicorp/vault/pull/10444)] -* auth/jwt: Fixes `bound_claims` validation for provider-specific group and user info fetching. [[GH-10546](https://github.com/hashicorp/vault/pull/10546)] -* auth/jwt: Fixes an issue where JWT verification keys weren't updated after a `jwks_url` change. [[GH-10919](https://github.com/hashicorp/vault/pull/10919)] -* auth/jwt: Fixes an issue where `jwt_supported_algs` were not being validated for JWT auth using -`jwks_url` and `jwt_validation_pubkeys`. [[GH-10919](https://github.com/hashicorp/vault/pull/10919)] -* auth/oci: Fixes alias name to use the role name, and not the literal string `name` [[GH-10](https://github.com/hashicorp/vault-plugin-auth-oci/pull/10)] [[GH-10952](https://github.com/hashicorp/vault/pull/10952)] -* consul-template: Update consul-template vendor version and associated dependencies to master, -pulling in https://github.com/hashicorp/consul-template/pull/1447 [[GH-10756](https://github.com/hashicorp/vault/pull/10756)] -* core (enterprise): Limit entropy augmentation during token generation to root tokens. [[GH-10487](https://github.com/hashicorp/vault/pull/10487)] -* core (enterprise): Vault EGP policies attached to path * were not correctly scoped to the namespace. -* core/identity: Fix deadlock in entity merge endpoint. [[GH-10877](https://github.com/hashicorp/vault/pull/10877)] -* core: Avoid disclosing IP addresses in the errors of unauthenticated requests [[GH-10579](https://github.com/hashicorp/vault/pull/10579)] -* core: Fix client.Clone() to include the address [[GH-10077](https://github.com/hashicorp/vault/pull/10077)] -* core: Fix duplicate quotas on performance standby nodes. [[GH-10855](https://github.com/hashicorp/vault/pull/10855)] -* core: Fix rate limit resource quota migration from 1.5.x to 1.6.x by ensuring `purgeInterval` and -`staleAge` are set appropriately. [[GH-10536](https://github.com/hashicorp/vault/pull/10536)] -* core: Make all APIs that report init status consistent, and make them report -initialized=true when a Raft join is in progress. [[GH-10498](https://github.com/hashicorp/vault/pull/10498)] -* core: Make the response to an unauthenticated request to sys/internal endpoints consistent regardless of mount existence. [[GH-10650](https://github.com/hashicorp/vault/pull/10650)] -* core: Turn off case sensitivity for allowed entity alias check during token create operation. [[GH-10743](https://github.com/hashicorp/vault/pull/10743)] -* http: change max_request_size to be unlimited when the config value is less than 0 [[GH-10072](https://github.com/hashicorp/vault/pull/10072)] -* license: Fix license caching issue that prevents new licenses to get picked up by the license manager [[GH-10424](https://github.com/hashicorp/vault/pull/10424)] -* metrics: Protect emitMetrics from panicking during post-seal [[GH-10708](https://github.com/hashicorp/vault/pull/10708)] -* quotas/rate-limit: Fix quotas enforcing old rate limit quota paths [[GH-10689](https://github.com/hashicorp/vault/pull/10689)] -* replication (enterprise): Fix bug with not starting merkle sync while requests are in progress -* secrets/database/influxdb: Fix issue where not all errors from InfluxDB were being handled [[GH-10384](https://github.com/hashicorp/vault/pull/10384)] -* secrets/database/mysql: Fixes issue where the DisplayName within generated usernames was the incorrect length [[GH-10433](https://github.com/hashicorp/vault/pull/10433)] -* secrets/database: Sanitize `private_key` field when reading database plugin config [[GH-10416](https://github.com/hashicorp/vault/pull/10416)] -* secrets/gcp: Fix issue with account and iam_policy roleset WALs not being removed after attempts when GCP project no longer exists [[GH-10759](https://github.com/hashicorp/vault/pull/10759)] -* secrets/transit: allow for null string to be used for optional parameters in encrypt and decrypt [[GH-10386](https://github.com/hashicorp/vault/pull/10386)] -* serviceregistration: Fix race during shutdown of Consul service registration. [[GH-10901](https://github.com/hashicorp/vault/pull/10901)] -* storage/raft (enterprise): Automated snapshots with Azure required specifying -`azure_blob_environment`, which should have had as a default `AZUREPUBLICCLOUD`. -* storage/raft (enterprise): Reading a non-existent auto snapshot config now returns 404. -* storage/raft (enterprise): The parameter aws_s3_server_kms_key was misnamed and -didn't work. Renamed to aws_s3_kms_key, and make it work so that when provided -the given key will be used to encrypt the snapshot using AWS KMS. -* transform (enterprise): Fix bug tokenization handling metadata on exportable stores -* transform (enterprise): Fix bug where tokenization store changes are persisted but don't take effect -* transform (enterprise): Fix transform configuration not handling `stores` parameter on the legacy path -* transform (enterprise): Make expiration timestamps human readable -* transform (enterprise): Return false for invalid tokens on the validate endpoint rather than returning an HTTP error -* ui: Add role from database connection automatically populates the database for new role [[GH-11119](https://github.com/hashicorp/vault/pull/11119)] -* ui: Fix bug in Transform secret engine when a new role is added and then removed from a transformation [[GH-10417](https://github.com/hashicorp/vault/pull/10417)] -* ui: Fix bug that double encodes secret route when there are spaces in the path and makes you unable to view the version history. [[GH-10596](https://github.com/hashicorp/vault/pull/10596)] -* ui: Fix expected response from feature-flags endpoint [[GH-10684](https://github.com/hashicorp/vault/pull/10684)] -* ui: Fix footer URL linking to the correct version changelog. [[GH-10491](https://github.com/hashicorp/vault/pull/10491)] - -DEPRECATIONS: -* aws/auth: AWS Auth endpoints that use the "whitelist" and "blacklist" terms have been deprecated. -Refer to the CHANGES section for additional details. - -## 1.6.7 -### 29 September 2021 - -BUG FIXES: - -* core (enterprise): Fix bug where password generation through password policies do not work on namespaces if performed outside a request callback or from an external plugin. [[GH-12635](https://github.com/hashicorp/vault/pull/12635)] -* core (enterprise): Only delete quotas on primary cluster. [[GH-12339](https://github.com/hashicorp/vault/pull/12339)] -* secrets/db: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [[GH-12563](https://github.com/hashicorp/vault/pull/12563)] -* secrets/openldap: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [#28](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/28) [[GH-12597](https://github.com/hashicorp/vault/pull/12597)] - -## 1.6.6 -### 26 August 2021 - -SECURITY: - -* *UI Secret Caching*: The Vault UI erroneously cached and exposed user-viewed secrets between authenticated sessions in a single shared browser, if the browser window / tab was not refreshed or closed between logout and a subsequent login. This vulnerability, CVE-2021-38554, was fixed in Vault 1.8.0 and will be addressed in pending 1.7.4 / 1.6.6 releases. - -CHANGES: - -* Alpine: Docker images for Vault 1.6.6+, 1.7.4+, and 1.8.2+ are built with Alpine 3.14, due to CVE-2021-36159 -* go: Update go version to 1.15.15 [[GH-12423](https://github.com/hashicorp/vault/pull/12423)] - -IMPROVEMENTS: - -* db/cassandra: Added tls_server_name to specify server name for TLS validation [[GH-11820](https://github.com/hashicorp/vault/pull/11820)] - -BUG FIXES: - -* physical/raft: Fix safeio.Rename error when restoring snapshots on windows [[GH-12377](https://github.com/hashicorp/vault/pull/12377)] -* secret: fix the bug where transit encrypt batch doesn't work with key_version [[GH-11628](https://github.com/hashicorp/vault/pull/11628)] -* secrets/database: Fixed an issue that prevented external database plugin processes from restarting after a shutdown. [[GH-12087](https://github.com/hashicorp/vault/pull/12087)] -* ui: Automatically refresh the page when user logs out [[GH-12035](https://github.com/hashicorp/vault/pull/12035)] -* ui: Fixes metrics page when read on counter config not allowed [[GH-12348](https://github.com/hashicorp/vault/pull/12348)] -* ui: fix oidc login with Safari [[GH-11884](https://github.com/hashicorp/vault/pull/11884)] - -## 1.6.5 -### May 20th, 2021 - -SECURITY: - -* Non-Expiring Leases: Vault and Vault Enterprise renewed nearly-expiring token -leases and dynamic secret leases with a zero-second TTL, causing them to be -treated as non-expiring, and never revoked. This issue affects Vault and Vault -Enterprise versions 0.10.0 through 1.7.1, and is fixed in 1.5.9, 1.6.5, and -1.7.2 (CVE-2021-32923). - -CHANGES: - -* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs -when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] -* auth/gcp: Update to v0.8.1 to use IAM Service Account Credentials API for -signing JWTs [[GH-11498](https://github.com/hashicorp/vault/pull/11498)] - -BUG FIXES: - -* core (enterprise): Fix plugins mounted in namespaces being unable to use password policies [[GH-11596](https://github.com/hashicorp/vault/pull/11596)] -* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] -* secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. [[GH-11451](https://github.com/hashicorp/vault/pull/11451)] -* secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` [[GH-11585](https://github.com/hashicorp/vault/pull/11585)] -* ui: Fix namespace-bug on login [[GH-11182](https://github.com/hashicorp/vault/pull/11182)] - -## 1.6.4 -### 21 April 2021 - -SECURITY: - -* The PKI Secrets Engine tidy functionality may cause Vault to exclude revoked-but-unexpired certificates from the - Vault CRL. This vulnerability affects Vault and Vault Enterprise 1.5.1 and newer and was fixed in versions - 1.5.8, 1.6.4, and 1.7.1. (CVE-2021-27668) -* The Cassandra Database and Storage backends were not correctly verifying TLS certificates. This issue affects all - versions of Vault and Vault Enterprise and was fixed in versions 1.6.4, and 1.7.1. (CVE-2021-27400) - -CHANGES: - -* go: Update to Go 1.15.11 [[GH-11396](https://github.com/hashicorp/vault/pull/11396)] - -IMPROVEMENTS: - -* command/debug: Now collects logs (at level `trace`) as a periodic output. [[GH-10609](https://github.com/hashicorp/vault/pull/10609)] -* core: Add tls_max_version listener config option. [[GH-11226](https://github.com/hashicorp/vault/pull/11226)] -* core: allow arbitrary length stack traces upon receiving SIGUSR2 (was 32MB) [[GH-11364](https://github.com/hashicorp/vault/pull/11364)] - -BUG FIXES: - -* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)] -* core: Fix goroutine leak when updating rate limit quota [[GH-11371](https://github.com/hashicorp/vault/pull/11371)] -* core: Fix storage entry leak when revoking leases created with non-orphan batch tokens. [[GH-11377](https://github.com/hashicorp/vault/pull/11377)] -* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)] -* pki: Preserve ordering of all DN attribute values when issuing certificates [[GH-11259](https://github.com/hashicorp/vault/pull/11259)] -* replication: Fix: mounts created within a namespace that was part of an Allow - filtering rule would not appear on performance secondary if created after rule - was defined. -* secrets/database/cassandra: Fixed issue where hostnames were not being validated when using TLS [[GH-11365](https://github.com/hashicorp/vault/pull/11365)] -* storage/raft: leader_tls_servername wasn't used unless leader_ca_cert_file and/or mTLS were configured. [[GH-11252](https://github.com/hashicorp/vault/pull/11252)] - - -## 1.6.3 -### February 25, 2021 - -SECURITY: - -* Limited Unauthenticated License Metadata Read: We addressed a security vulnerability that allowed for the unauthenticated -reading of Vault license metadata from DR Secondaries. This vulnerability affects Vault Enterprise and is -fixed in 1.6.3 (CVE-2021-27668). - -CHANGES: - -* secrets/mongodbatlas: Move from whitelist to access list API [[GH-10966](https://github.com/hashicorp/vault/pull/10966)] - -IMPROVEMENTS: - -* ui: Clarify language on usage metrics page empty state [[GH-10951](https://github.com/hashicorp/vault/pull/10951)] - -BUG FIXES: - -* auth/kubernetes: Cancel API calls to TokenReview endpoint when request context -is closed [[GH-10930](https://github.com/hashicorp/vault/pull/10930)] -* core/identity: Fix deadlock in entity merge endpoint. [[GH-10877](https://github.com/hashicorp/vault/pull/10877)] -* quotas: Fix duplicate quotas on performance standby nodes. [[GH-10855](https://github.com/hashicorp/vault/pull/10855)] -* quotas/rate-limit: Fix quotas enforcing old rate limit quota paths [[GH-10689](https://github.com/hashicorp/vault/pull/10689)] -* replication (enterprise): Don't write request count data on DR Secondaries. -Fixes DR Secondaries becoming out of sync approximately every 30s. [[GH-10970](https://github.com/hashicorp/vault/pull/10970)] -* secrets/azure (enterprise): Forward service principal credential creation to the -primary cluster if called on a performance standby or performance secondary. [[GH-10902](https://github.com/hashicorp/vault/pull/10902)] - -## 1.6.2 -### January 29, 2021 - -SECURITY: - -* IP Address Disclosure: We fixed a vulnerability where, under some error -conditions, Vault would return an error message disclosing internal IP -addresses. This vulnerability affects Vault and Vault Enterprise and is fixed in -1.6.2 (CVE-2021-3024). -* Limited Unauthenticated Remove Peer: As of Vault 1.6, the remove-peer command -on DR secondaries did not require authentication. This issue impacts the -stability of HA architecture, as a bad actor could remove all standby -nodes from a DR -secondary. This issue affects Vault Enterprise 1.6.0 and 1.6.1, and is fixed in -1.6.2 (CVE-2021-3282). -* Mount Path Disclosure: Vault previously returned different HTTP status codes for -existent and non-existent mount paths. This behavior would allow unauthenticated -brute force attacks to reveal which paths had valid mounts. This issue affects -Vault and Vault Enterprise and is fixed in 1.6.2 (CVE-2020-25594). - -CHANGES: - -* go: Update go version to 1.15.7 [[GH-10730](https://github.com/hashicorp/vault/pull/10730)] - -FEATURES: - -* ui: Adds check for feature flag on application, and updates namespace toolbar on login if present [[GH-10588](https://github.com/hashicorp/vault/pull/10588)] - -IMPROVEMENTS: - -* core (enterprise): "vault status" command works when a namespace is set. [[GH-10725](https://github.com/hashicorp/vault/pull/10725)] -* core: reduce memory used by leases [[GH-10726](https://github.com/hashicorp/vault/pull/10726)] -* storage/raft (enterprise): Listing of peers is now allowed on DR secondary -cluster nodes, as an update operation that takes in DR operation token for -authenticating the request. -* core: allow setting tls_servername for raft retry/auto-join [[GH-10698](https://github.com/hashicorp/vault/pull/10698)] - -BUG FIXES: - -* agent: Set namespace for template server in agent. [[GH-10757](https://github.com/hashicorp/vault/pull/10757)] -* core: Make the response to an unauthenticated request to sys/internal endpoints consistent regardless of mount existence. [[GH-10650](https://github.com/hashicorp/vault/pull/10650)] -* metrics: Protect emitMetrics from panicking during post-seal [[GH-10708](https://github.com/hashicorp/vault/pull/10708)] -* secrets/gcp: Fix issue with account and iam_policy roleset WALs not being removed after attempts when GCP project no longer exists [[GH-10759](https://github.com/hashicorp/vault/pull/10759)] -* storage/raft (enterprise): Automated snapshots with Azure required specifying -`azure_blob_environment`, which should have had as a default `AZUREPUBLICCLOUD`. -* storage/raft (enterprise): Autosnapshots config and storage weren't excluded from -performance replication, causing conflicts and errors. -* ui: Fix bug that double encodes secret route when there are spaces in the path and makes you unable to view the version history. [[GH-10596](https://github.com/hashicorp/vault/pull/10596)] -* ui: Fix expected response from feature-flags endpoint [[GH-10684](https://github.com/hashicorp/vault/pull/10684)] - -## 1.6.1 -### December 16, 2020 - -SECURITY: - -* LDAP Auth Method: We addressed an issue where error messages returned by the - LDAP auth method allowed user enumeration [[GH-10537](https://github.com/hashicorp/vault/pull/10537)]. This vulnerability affects Vault OSS and Vault - Enterprise and is fixed in 1.5.6 and 1.6.1 (CVE-2020-35177). -* Sentinel EGP: We've fixed incorrect handling of namespace paths to prevent - users within namespaces from applying Sentinel EGP policies to paths above - their namespace. This vulnerability affects Vault Enterprise and is fixed in - 1.5.6 and 1.6.1 (CVE-2020-35453). - -IMPROVEMENTS: - -* auth/ldap: Improve consistency in error messages [[GH-10537](https://github.com/hashicorp/vault/pull/10537)] -* core/metrics: Added "vault operator usage" command. [[GH-10365](https://github.com/hashicorp/vault/pull/10365)] -* secrets/gcp: Truncate ServiceAccount display names longer than 100 characters. [[GH-10558](https://github.com/hashicorp/vault/pull/10558)] - -BUG FIXES: - -* agent: Only set the namespace if the VAULT_NAMESPACE env var isn't present [[GH-10556](https://github.com/hashicorp/vault/pull/10556)] -* auth/jwt: Fixes `bound_claims` validation for provider-specific group and user info fetching. [[GH-10546](https://github.com/hashicorp/vault/pull/10546)] -* core (enterprise): Vault EGP policies attached to path * were not correctly scoped to the namespace. -* core: Avoid deadlocks by ensuring that if grabLockOrStop returns stopped=true, the lock will not be held. [[GH-10456](https://github.com/hashicorp/vault/pull/10456)] -* core: Fix client.Clone() to include the address [[GH-10077](https://github.com/hashicorp/vault/pull/10077)] -* core: Fix rate limit resource quota migration from 1.5.x to 1.6.x by ensuring `purgeInterval` and -`staleAge` are set appropriately. [[GH-10536](https://github.com/hashicorp/vault/pull/10536)] -* core: Make all APIs that report init status consistent, and make them report -initialized=true when a Raft join is in progress. [[GH-10498](https://github.com/hashicorp/vault/pull/10498)] -* secrets/database/influxdb: Fix issue where not all errors from InfluxDB were being handled [[GH-10384](https://github.com/hashicorp/vault/pull/10384)] -* secrets/database/mysql: Fixes issue where the DisplayName within generated usernames was the incorrect length [[GH-10433](https://github.com/hashicorp/vault/pull/10433)] -* secrets/database: Sanitize `private_key` field when reading database plugin config [[GH-10416](https://github.com/hashicorp/vault/pull/10416)] -* secrets/transit: allow for null string to be used for optional parameters in encrypt and decrypt [[GH-10386](https://github.com/hashicorp/vault/pull/10386)] -* storage/raft (enterprise): The parameter aws_s3_server_kms_key was misnamed and didn't work. Renamed to aws_s3_kms_key, and make it work so that when provided the given key will be used to encrypt the snapshot using AWS KMS. -* transform (enterprise): Fix bug tokenization handling metadata on exportable stores -* transform (enterprise): Fix transform configuration not handling `stores` parameter on the legacy path -* transform (enterprise): Make expiration timestamps human readable -* transform (enterprise): Return false for invalid tokens on the validate endpoint rather than returning an HTTP error -* transform (enterprise): Fix bug where tokenization store changes are persisted but don't take effect -* ui: Fix bug in Transform secret engine when a new role is added and then removed from a transformation [[GH-10417](https://github.com/hashicorp/vault/pull/10417)] -* ui: Fix footer URL linking to the correct version changelog. [[GH-10491](https://github.com/hashicorp/vault/pull/10491)] -* ui: Fox radio click on secrets and auth list pages. [[GH-10586](https://github.com/hashicorp/vault/pull/10586)] - -## 1.6.0 -### November 11th, 2020 - -NOTE: - -Binaries for 32-bit macOS (i.e. the `darwin_386` build) will no longer be published. This target was dropped in the latest version of the Go compiler. - -CHANGES: - -* agent: Agent now properly returns a non-zero exit code on error, such as one due to template rendering failure. Using `error_on_missing_key` in the template config will cause agent to immediately exit on failure. In order to make agent properly exit due to continuous failure from template rendering errors, the old behavior of indefinitely restarting the template server is now changed to exit once the default retry attempt of 12 times (with exponential backoff) gets exhausted. [[GH-9670](https://github.com/hashicorp/vault/pull/9670)] -* token: Periodic tokens generated by auth methods will have the period value stored in its token entry. [[GH-7885](https://github.com/hashicorp/vault/pull/7885)] -* core: New telemetry metrics reporting mount table size and number of entries [[GH-10201](https://github.com/hashicorp/vault/pull/10201)] -* go: Updated Go version to 1.15.4 [[GH-10366](https://github.com/hashicorp/vault/pull/10366)] - -FEATURES: - -* **Couchbase Secrets**: Vault can now manage static and dynamic credentials for Couchbase. [[GH-9664](https://github.com/hashicorp/vault/pull/9664)] -* **Expanded Password Policy Support**: Custom password policies are now supported for all database engines. -* **Integrated Storage Auto Snapshots (Enterprise)**: This feature enables an operator to schedule snapshots of the integrated storage backend and ensure those snapshots are persisted elsewhere. -* **Integrated Storage Cloud Auto Join**: This feature for integrated storage enables Vault nodes running in the cloud to automatically discover and join a Vault cluster via operator-supplied metadata. -* **Key Management Secrets Engine (Enterprise; Tech Preview)**: This new secret engine allows securely distributing and managing keys to Azure cloud KMS services. -* **Seal Migration**: With Vault 1.6, we will support migrating from an auto unseal mechanism to a different mechanism of the same type. For example, if you were using an AWS KMS key to automatically unseal, you can now migrate to a different AWS KMS key. -* **Tokenization (Enterprise; Tech Preview)**: Tokenization supports creating irreversible “tokens” from sensitive data. Tokens can be used in less secure environments, protecting the original data. -* **Vault Client Count**: Vault now counts the number of active entities (and non-entity tokens) per month and makes this information available via the "Metrics" section of the UI. - -IMPROVEMENTS: - -* auth/approle: Role names can now be referenced in templated policies through the `approle.metadata.role_name` property [[GH-9529](https://github.com/hashicorp/vault/pull/9529)] -* auth/aws: Improve logic check on wildcard `BoundIamPrincipalARNs` and include role name on error messages on check failure [[GH-10036](https://github.com/hashicorp/vault/pull/10036)] -* auth/jwt: Add support for fetching groups and user information from G Suite during authentication. [[GH-123](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/123)] -* auth/jwt: Adding EdDSA (ed25519) to supported algorithms [[GH-129](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/129)] -* auth/jwt: Improve cli authorization error [[GH-137](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/137)] -* auth/jwt: Add OIDC namespace_in_state option [[GH-140](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/140)] -* secrets/transit: fix missing plaintext in bulk decrypt response [[GH-9991](https://github.com/hashicorp/vault/pull/9991)] -* command/server: Delay informational messages in -dev mode until logs have settled. [[GH-9702](https://github.com/hashicorp/vault/pull/9702)] -* command/server: Add environment variable support for `disable_mlock`. [[GH-9931](https://github.com/hashicorp/vault/pull/9931)] -* core/metrics: Add metrics for storage cache [[GH_10079](https://github.com/hashicorp/vault/pull/10079)] -* core/metrics: Add metrics for leader status [[GH 10147](https://github.com/hashicorp/vault/pull/10147)] -* physical/azure: Add the ability to use Azure Instance Metadata Service to set the credentials for Azure Blob storage on the backend. [[GH-10189](https://github.com/hashicorp/vault/pull/10189)] -* sdk/framework: Add a time type for API fields. [[GH-9911](https://github.com/hashicorp/vault/pull/9911)] -* secrets/database: Added support for password policies to all databases [[GH-9641](https://github.com/hashicorp/vault/pull/9641), - [and more](https://github.com/hashicorp/vault/pulls?q=is%3Apr+is%3Amerged+dbpw)] -* secrets/database/cassandra: Added support for static credential rotation [[GH-10051](https://github.com/hashicorp/vault/pull/10051)] -* secrets/database/elasticsearch: Added support for static credential rotation [[GH-19](https://github.com/hashicorp/vault-plugin-database-elasticsearch/pull/19)] -* secrets/database/hanadb: Added support for root credential & static credential rotation [[GH-10142](https://github.com/hashicorp/vault/pull/10142)] -* secrets/database/hanadb: Default password generation now includes dashes. Custom statements may need to be updated - to include quotes around the password field [[GH-10142](https://github.com/hashicorp/vault/pull/10142)] -* secrets/database/influxdb: Added support for static credential rotation [[GH-10118](https://github.com/hashicorp/vault/pull/10118)] -* secrets/database/mongodbatlas: Added support for root credential rotation [[GH-14](https://github.com/hashicorp/vault-plugin-database-mongodbatlas/pull/14)] -* secrets/database/mongodbatlas: Support scopes field in creations statements for MongoDB Atlas database plugin [[GH-15](https://github.com/hashicorp/vault-plugin-database-mongodbatlas/pull/15)] -* seal/awskms: Add logging during awskms auto-unseal [[GH-9794](https://github.com/hashicorp/vault/pull/9794)] -* storage/azure: Update SDK library to use [azure-storage-blob-go](https://github.com/Azure/azure-storage-blob-go) since previous library has been deprecated. [[GH-9577](https://github.com/hashicorp/vault/pull/9577/)] -* secrets/ad: `rotate-root` now supports POST requests like other secret engines [[GH-70](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/70)] -* ui: Add ui functionality for the Transform Secret Engine [[GH-9665](https://github.com/hashicorp/vault/pull/9665)] -* ui: Pricing metrics dashboard [[GH-10049](https://github.com/hashicorp/vault/pull/10049)] - -BUG FIXES: - -* auth/jwt: Fix bug preventing config edit UI from rendering [[GH-141](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/141)] -* cli: Don't open or overwrite a raft snapshot file on an unsuccessful `vault operator raft snapshot` [[GH-9894](https://github.com/hashicorp/vault/pull/9894)] -* core: Implement constant time version of shamir GF(2^8) math [[GH-9932](https://github.com/hashicorp/vault/pull/9932)] -* core: Fix resource leak in plugin API (plugin-dependent, not all plugins impacted) [[GH-9557](https://github.com/hashicorp/vault/pull/9557)] -* core: Fix race involved in enabling certain features via a license change -* core: Fix error handling in HCL parsing of objects with invalid syntax [[GH-410](https://github.com/hashicorp/hcl/pull/410)] -* identity: Check for timeouts in entity API [[GH-9925](https://github.com/hashicorp/vault/pull/9925)] -* secrets/database: Fix handling of TLS options in mongodb connection strings [[GH-9519](https://github.com/hashicorp/vault/pull/9519)] -* secrets/gcp: Ensure that the IAM policy version is appropriately set after a roleset's bindings have changed. [[GH-93](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/93)] -* ui: Mask LDAP bindpass while typing [[GH-10087](https://github.com/hashicorp/vault/pull/10087)] -* ui: Update language in promote dr modal flow [[GH-10155](https://github.com/hashicorp/vault/pull/10155)] -* ui: Update language on replication primary dashboard for clarity [[GH-10205](https://github.com/hashicorp/vault/pull/10217)] -* core: Fix bug where updating an existing path quota could introduce a conflict. [[GH-10285](https://github.com/hashicorp/vault/pull/10285)] - -## 1.5.9 -### May 20th, 2021 - -SECURITY: - -* Non-Expiring Leases: Vault and Vault Enterprise renewed nearly-expiring token -leases and dynamic secret leases with a zero-second TTL, causing them to be -treated as non-expiring, and never revoked. This issue affects Vault and Vault -Enterprise versions 0.10.0 through 1.7.1, and is fixed in 1.5.9, 1.6.5, and -1.7.2 (CVE-2021-32923). - -CHANGES: - -* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs -when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] -* auth/gcp: Update to v0.7.2 to use IAM Service Account Credentials API for -signing JWTs [[GH-11499](https://github.com/hashicorp/vault/pull/11499)] - -BUG FIXES: - -* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] - -## 1.5.8 -### 21 April 2021 - -SECURITY: - -* The PKI Secrets Engine tidy functionality may cause Vault to exclude revoked-but-unexpired certificates from the - Vault CRL. This vulnerability affects Vault and Vault Enterprise 1.5.1 and newer and was fixed in versions - 1.5.8, 1.6.4, and 1.7.1. (CVE-2021-27668) - -CHANGES: - -* go: Update to Go 1.14.15 [[GH-11397](https://github.com/hashicorp/vault/pull/11397)] - -IMPROVEMENTS: - -* core: Add tls_max_version listener config option. [[GH-11226](https://github.com/hashicorp/vault/pull/11226)] - -BUG FIXES: - -* core/identity: Fix deadlock in entity merge endpoint. [[GH-10877](https://github.com/hashicorp/vault/pull/10877)] -* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)] -* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)] -* core: Avoid deadlocks by ensuring that if grabLockOrStop returns stopped=true, the lock will not be held. [[GH-10456](https://github.com/hashicorp/vault/pull/10456)] - -## 1.5.7 -### January 29, 2021 - -SECURITY: - -* IP Address Disclosure: We fixed a vulnerability where, under some error -conditions, Vault would return an error message disclosing internal IP -addresses. This vulnerability affects Vault and Vault Enterprise and is fixed in -1.6.2 and 1.5.7 (CVE-2021-3024). -* Mount Path Disclosure: Vault previously returned different HTTP status codes for -existent and non-existent mount paths. This behavior would allow unauthenticated -brute force attacks to reveal which paths had valid mounts. This issue affects -Vault and Vault Enterprise and is fixed in 1.6.2 and 1.5.7 (CVE-2020-25594). - -IMPROVEMENTS: - -* storage/raft (enterprise): Listing of peers is now allowed on DR secondary -cluster nodes, as an update operation that takes in DR operation token for -authenticating the request. - -BUG FIXES: - -* core: Avoid disclosing IP addresses in the errors of unauthenticated requests [[GH-10579](https://github.com/hashicorp/vault/pull/10579)] -* core: Make the response to an unauthenticated request to sys/internal endpoints consistent regardless of mount existence. [[GH-10650](https://github.com/hashicorp/vault/pull/10650)] - -## 1.5.6 -### December 16, 2020 - -SECURITY: - -* LDAP Auth Method: We addressed an issue where error messages returned by the - LDAP auth method allowed user enumeration [[GH-10537](https://github.com/hashicorp/vault/pull/10537)]. This vulnerability affects Vault OSS and Vault - Enterprise and is fixed in 1.5.6 and 1.6.1 (CVE-2020-35177). -* Sentinel EGP: We've fixed incorrect handling of namespace paths to prevent - users within namespaces from applying Sentinel EGP policies to paths above - their namespace. This vulnerability affects Vault Enterprise and is fixed in - 1.5.6 and 1.6.1. - -IMPROVEMENTS: - -* auth/ldap: Improve consistency in error messages [[GH-10537](https://github.com/hashicorp/vault/pull/10537)] - -BUG FIXES: - -* core (enterprise): Vault EGP policies attached to path * were not correctly scoped to the namespace. -* core: Fix bug where updating an existing path quota could introduce a conflict [[GH-10285](https://github.com/hashicorp/vault/pull/10285)] -* core: Fix client.Clone() to include the address [[GH-10077](https://github.com/hashicorp/vault/pull/10077)] -* quotas (enterprise): Reset cache before loading quotas in the db during startup -* secrets/transit: allow for null string to be used for optional parameters in encrypt and decrypt [[GH-10386](https://github.com/hashicorp/vault/pull/10386)] - -## 1.5.5 -### October 21, 2020 - -IMPROVEMENTS: - -* auth/aws, core/seal, secret/aws: Set default IMDS timeouts to match AWS SDK [[GH-10133](https://github.com/hashicorp/vault/pull/10133)] - -BUG FIXES: - -* auth/aws: Restrict region selection when in the aws-us-gov partition to avoid IAM errors [[GH-9947](https://github.com/hashicorp/vault/pull/9947)] -* core (enterprise): Allow operators to add and remove (Raft) peers in a DR secondary cluster using Integrated Storage. -* core (enterprise): Add DR operation token to the remove peer API and CLI command (when DR secondary). -* core (enterprise): Fix deadlock in handling EGP policies -* core (enterprise): Fix extraneous error messages in DR Cluster -* secrets/mysql: Conditionally overwrite TLS parameters for MySQL secrets engine [[GH-9729](https://github.com/hashicorp/vault/pull/9729)] -* secrets/ad: Fix bug where `password_policy` setting was not using correct key when `ad/config` was read [[GH-71](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/71)] -* ui: Fix issue with listing roles and methods on the same auth methods with different names [[GH-10122](https://github.com/hashicorp/vault/pull/10122)] - -## 1.5.4 -### September 24th, 2020 - -SECURITY: - -* Batch Token Expiry: We addressed an issue where batch token leases could outlive their TTL because we were not scheduling the expiration time correctly. This vulnerability affects Vault OSS and Vault Enterprise 1.0 and newer and is fixed in 1.4.7 and 1.5.4 (CVE-2020-25816). - -IMPROVEMENTS: - -* secrets/pki: Handle expiration of a cert not in storage as a success [[GH-9880](https://github.com/hashicorp/vault/pull/9880)] -* auth/kubernetes: Add an option to disable defaulting to the local CA cert and service account JWT when running in a Kubernetes pod [[GH-97]](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/97) -* secrets/gcp: Add check for 403 during rollback to prevent repeated deletion calls [[GH-97](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/97)] -* core: Disable usage metrics collection on performance standby nodes. [[GH-9966](https://github.com/hashicorp/vault/pull/9966)] -* credential/aws: Added X-Amz-Content-Sha256 as a default STS request header [[GH-10009](https://github.com/hashicorp/vault/pull/10009)] - -BUG FIXES: - -* agent: Fix `disable_fast_negotiation` not being set on the auth method when configured by user. [[GH-9892](https://github.com/hashicorp/vault/pull/9892)] -* core (enterprise): Fix hang when cluster-wide plugin reload cleanup is slow on unseal -* core (enterprise): Fix an error in cluster-wide plugin reload cleanup following such a reload -* core: Fix crash when metrics collection encounters zero-length keys in KV store [[GH-9811](https://github.com/hashicorp/vault/pull/9881)] -* mfa (enterprise): Fix incorrect handling of PingID responses that could result in auth requests failing -* replication (enterprise): Improve race condition when using a newly created token on a performance standby node -* replication (enterprise): Only write failover cluster addresses if they've changed -* ui: fix bug where dropdown for identity/entity management is not reflective of actual policy [[GH-9958](https://github.com/hashicorp/vault/pull/9958)] - -## 1.5.3 -### August 27th, 2020 - -NOTE: - -All security content from 1.5.2, 1.5.1, 1.4.5, 1.4.4, 1.3.9, 1.3.8, 1.2.6, and 1.2.5 has been made fully open source, and the git tags for 1.5.3, 1.4.6, 1.3.10, and 1.2.7 will build correctly for open source users. - -BUG FIXES: - -* auth/aws: Made header handling for IAM authentication more robust -* secrets/ssh: Fixed a bug with role option for SSH signing algorithm to allow more than RSA signing - -## 1.5.2.1 -### August 21st, 2020 -### Enterprise Only - -NOTE: - -Includes correct license in the HSM binary. - -## 1.5.2 -### August 20th, 2020 - -NOTE: - -OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. - -KNOWN ISSUES: - -* AWS IAM logins may return an error depending on the headers sent with the request. - For more details and a workaround, see the [1.5.2 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.5.2) -* In versions 1.2.6, 1.3.9, 1.4.5, and 1.5.2, enterprise licenses on the HSM build were not incorporated correctly - enterprise - customers should use 1.2.6.1, 1.3.9.1, 1.4.5.1, and 1.5.2.1. - +* api: Remove timeout logic from ReadRaw functions and add ReadRawWithContext [[GH-18708](https://github.com/hashicorp/vault/pull/18708)] +* auth/alicloud: fix regression in vault login command that caused login to fail [[GH-19005](https://github.com/hashicorp/vault/pull/19005)] +* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] +* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] +* auth/cert: Address a race condition accessing the loaded crls without a lock [[GH-18945](https://github.com/hashicorp/vault/pull/18945)] +* auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#173](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/173)] [[GH-18716](https://github.com/hashicorp/vault/pull/18716)] +* auth/kubernetes: fixes and dep updates for the auth-kubernetes plugin (see plugin changelog for details) [[GH-19094](https://github.com/hashicorp/vault/pull/19094)] +* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] +* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] +* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] +* cli/pki: Decode integer values properly in health-check configuration file [[GH-19265](https://github.com/hashicorp/vault/pull/19265)] +* cli/pki: Fix path for role health-check warning messages [[GH-19274](https://github.com/hashicorp/vault/pull/19274)] +* cli/pki: Properly report permission issues within health-check mount tune checks [[GH-19276](https://github.com/hashicorp/vault/pull/19276)] +* cli/transit: Fix import, import-version command invocation [[GH-19373](https://github.com/hashicorp/vault/pull/19373)] +* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] +* cli: Fix vault read handling to return raw data as secret.Data when there is no top-level data object from api response. [[GH-17913](https://github.com/hashicorp/vault/pull/17913)] +* cli: Remove empty table heading for `vault secrets list -detailed` output. [[GH-17577](https://github.com/hashicorp/vault/pull/17577)] +* command/namespace: Fix vault cli namespace patch examples in help text. [[GH-18143](https://github.com/hashicorp/vault/pull/18143)] +* core (enterprise): Fix missing quotation mark in error message +* core (enterprise): Fix panic that could occur with SSCT alongside invoking external plugins for revocation. +* core (enterprise): Fix panic when using invalid accessor for control-group request +* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. +* core (enterprise): Supported storage check in `vault server` command will no longer prevent startup. Instead, a warning will be logged if configured to use storage backend other than `raft` or `consul`. +* core/activity: add namespace breakdown for new clients when date range spans multiple months, including the current month. [[GH-18766](https://github.com/hashicorp/vault/pull/18766)] +* core/activity: de-duplicate namespaces when historical and current month data are mixed [[GH-18452](https://github.com/hashicorp/vault/pull/18452)] +* core/activity: fix the end_date returned from the activity log endpoint when partial counts are computed [[GH-17856](https://github.com/hashicorp/vault/pull/17856)] +* core/activity: include mount counts when de-duplicating current and historical month data [[GH-18598](https://github.com/hashicorp/vault/pull/18598)] +* core/activity: report mount paths (rather than mount accessors) in current month activity log counts and include deleted mount paths in precomputed queries. [[GH-18916](https://github.com/hashicorp/vault/pull/18916)] +* core/activity: return partial month counts when querying a historical date range and no historical data exists. [[GH-17935](https://github.com/hashicorp/vault/pull/17935)] +* core/auth: Return a 403 instead of a 500 for wrapping requests when token is not provided [[GH-18859](https://github.com/hashicorp/vault/pull/18859)] +* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace +* core/managed-keys (enterprise): Return better error messages when encountering key creation failures +* core/managed-keys (enterprise): Switch to using hash length as PSS Salt length within the test/sign api for better PKCS#11 compatibility +* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. +* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. +* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] +* core/seal: Fix regression handling of the key_id parameter in seal configuration HCL. [[GH-17612](https://github.com/hashicorp/vault/pull/17612)] +* core: Fix panic caused in Vault Agent when rendering certificate templates [[GH-17419](https://github.com/hashicorp/vault/pull/17419)] +* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] +* core: Fix spurious `permission denied` for all HelpOperations on sudo-protected paths [[GH-18568](https://github.com/hashicorp/vault/pull/18568)] +* core: Fix vault operator init command to show the right curl string with -output-curl-string and right policy hcl with -output-policy [[GH-17514](https://github.com/hashicorp/vault/pull/17514)] +* core: Fixes spurious warnings being emitted relating to "unknown or unsupported fields" for JSON config [[GH-17660](https://github.com/hashicorp/vault/pull/17660)] +* core: Linux packages now have vendor label and set the default label to HashiCorp. +This fix is implemented for any future releases, but will not be updated for historical releases. +* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] +* core: Refactor lock grabbing code to simplify stateLock deadlock investigations [[GH-17187](https://github.com/hashicorp/vault/pull/17187)] +* core: fix GPG encryption to support subkeys. [[GH-16224](https://github.com/hashicorp/vault/pull/16224)] +* core: fix a start up race condition where performance standbys could go into a +mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] +* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. +* core: fix race when using SystemView.ReplicationState outside of a request context [[GH-17186](https://github.com/hashicorp/vault/pull/17186)] +* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] +* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] +* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] +* core: trying to unseal with the wrong key now returns HTTP 400 [[GH-17836](https://github.com/hashicorp/vault/pull/17836)] +* credential/cert: adds error message if no tls connection is found during the AliasLookahead operation [[GH-17904](https://github.com/hashicorp/vault/pull/17904)] +* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] +* expiration: Prevent panics on perf standbys when an irrevocable lease gets deleted. [[GH-18401](https://github.com/hashicorp/vault/pull/18401)] +* kmip (enterprise): Fix a problem with some multi-part MAC Verify operations. +* kmip (enterprise): Only require data to be full blocks on encrypt/decrypt operations using CBC and ECB block cipher modes. +* license (enterprise): Fix bug where license would update even if the license didn't change. +* licensing (enterprise): update autoloaded license cache after reload +* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] +* openapi: Fix many incorrect details in generated API spec, by using better techniques to parse path regexps [[GH-18554](https://github.com/hashicorp/vault/pull/18554)] +* openapi: fix gen_openapi.sh script to correctly load vault plugins [[GH-17752](https://github.com/hashicorp/vault/pull/17752)] +* plugins/kv: KV v2 returns 404 instead of 500 for request paths that incorrectly include a trailing slash. [[GH-17339](https://github.com/hashicorp/vault/pull/17339)] +* plugins: Allow running external plugins which override deprecated builtins. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] +* plugins: Corrected the path to check permissions on when the registered plugin name does not match the plugin binary's filename. [[GH-17340](https://github.com/hashicorp/vault/pull/17340)] +* plugins: Listing all plugins while audit logging is enabled will no longer result in an internal server error. [[GH-18173](https://github.com/hashicorp/vault/pull/18173)] +* plugins: Only report deprecation status for builtin plugins. [[GH-17816](https://github.com/hashicorp/vault/pull/17816)] +* plugins: Skip loading but still mount data associated with missing plugins on unseal. [[GH-18189](https://github.com/hashicorp/vault/pull/18189)] +* plugins: Vault upgrades will no longer fail if a mount has been created using an explicit builtin plugin version. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] +* replication (enterprise): Fix bug where reloading external plugin on a secondary would +break replication. +* sdk: Don't panic if system view or storage methods called during plugin setup. [[GH-18210](https://github.com/hashicorp/vault/pull/18210)] +* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] +* secrets/ad: Fix bug where updates to config would fail if password isn't provided [[GH-19061](https://github.com/hashicorp/vault/pull/19061)] +* secrets/gcp: fix issue where IAM bindings were not preserved during policy update [[GH-19018](https://github.com/hashicorp/vault/pull/19018)] +* secrets/mongodb-atlas: Fix a bug that did not allow WAL rollback to handle partial failures when creating API keys [[GH-19111](https://github.com/hashicorp/vault/pull/19111)] +* secrets/pki: Address nil panic when an empty POST request is sent to the OCSP handler [[GH-18184](https://github.com/hashicorp/vault/pull/18184)] +* secrets/pki: Allow patching issuer to set an empty issuer name. [[GH-18466](https://github.com/hashicorp/vault/pull/18466)] +* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17385](https://github.com/hashicorp/vault/pull/17385)] +* secrets/pki: Fix upgrade of missing expiry, delta_rebuild_interval by setting them to the default. [[GH-17693](https://github.com/hashicorp/vault/pull/17693)] +* secrets/pki: Fixes duplicate otherName in certificates created by the sign-verbatim endpoint. [[GH-16700](https://github.com/hashicorp/vault/pull/16700)] +* secrets/pki: OCSP GET request parameter was not being URL unescaped before processing. [[GH-18938](https://github.com/hashicorp/vault/pull/18938)] +* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] +* secrets/pki: Revert fix for PR [18938](https://github.com/hashicorp/vault/pull/18938) [[GH-19037](https://github.com/hashicorp/vault/pull/19037)] +* secrets/pki: consistently use UTC for CA's notAfter exceeded error message [[GH-18984](https://github.com/hashicorp/vault/pull/18984)] +* secrets/pki: fix race between tidy's cert counting and tidy status reporting. [[GH-18899](https://github.com/hashicorp/vault/pull/18899)] +* secrets/transit: Do not warn about unrecognized parameter 'batch_input' [[GH-18299](https://github.com/hashicorp/vault/pull/18299)] +* secrets/transit: Honor `partial_success_response_code` on decryption failures. [[GH-18310](https://github.com/hashicorp/vault/pull/18310)] +* server/config: Use file.Stat when checking file permissions when VAULT_ENABLE_FILE_PERMISSIONS_CHECK is enabled [[GH-19311](https://github.com/hashicorp/vault/pull/19311)] +* storage/raft (enterprise): An already joined node can rejoin by wiping storage +and re-issueing a join request, but in doing so could transiently become a +non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https://github.com/hashicorp/vault/pull/18263)] +* storage/raft: Don't panic on unknown raft ops [[GH-17732](https://github.com/hashicorp/vault/pull/17732)] +* storage/raft: Fix race with follower heartbeat tracker during teardown. [[GH-18704](https://github.com/hashicorp/vault/pull/18704)] +* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] +* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] +* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] +* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] +* ui: Remove `default` and add `default-service` and `default-batch` to UI token_type for auth mount and tuning. [[GH-19290](https://github.com/hashicorp/vault/pull/19290)] +* ui: Remove default value of 30 to TtlPicker2 if no value is passed in. [[GH-17376](https://github.com/hashicorp/vault/pull/17376)] +* ui: allow selection of "default" for ssh algorithm_signer in web interface [[GH-17894](https://github.com/hashicorp/vault/pull/17894)] +* ui: cleanup unsaved auth method ember data record when navigating away from mount backend form [[GH-18651](https://github.com/hashicorp/vault/pull/18651)] +* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] +* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] +* ui: fixes reliance on secure context (https) by removing methods using the Crypto interface [[GH-19403](https://github.com/hashicorp/vault/pull/19403)] +* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)] -## 1.5.1 -### August 20th, 2020 +## 1.12.11 +### September 13, 2023 SECURITY: -* When using the IAM AWS Auth Method, under certain circumstances, values Vault uses to validate identities and roles can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.7.1 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16250) (Discovered by Felix Wilhelm of Google Project Zero) -* When using the GCP GCE Auth Method, under certain circumstances, values Vault uses to validate GCE VMs can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.8.3 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16251) (Discovered by Felix Wilhelm of Google Project Zero) -* When using Vault Agent with cert auto-auth and caching enabled, under certain circumstances, clients without permission to access agent's token may retrieve the token without login credentials. This vulnerability affects Vault Agent 1.1.0 and newer and is fixed in 1.5.1 (CVE-2020-17455) - -KNOWN ISSUES: - -* OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. -* AWS IAM logins may return an error depending on the headers sent with the request. - For more details and a workaround, see the [1.5.1 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.5.1) - -CHANGES: - -* pki: The tidy operation will now remove revoked certificates if the parameter `tidy_revoked_certs` is set to `true`. This will result in certificate entries being immediately removed, as opposed to awaiting until its NotAfter time. Note that this only affects certificates that have been already revoked. [[GH-9609](https://github.com/hashicorp/vault/pull/9609)] -* go: Updated Go version to 1.14.7 +* secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. [[GH-22852](https://github.com/hashicorp/vault/pull/22852)] IMPROVEMENTS: -* auth/jwt: Add support for fetching groups and user information from G Suite during authentication. [[GH-9574](https://github.com/hashicorp/vault/pull/9574)] -* auth/jwt: Add EdDSA to supported algorithms. [[GH-129](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/129)] -* secrets/openldap: Add "ad" schema that allows the engine to correctly rotate AD passwords. [[GH-9740](https://github.com/hashicorp/vault/pull/9740)] -* pki: Add a `allowed_domains_template` parameter that enables the use of identity templating within the `allowed_domains` parameter. [[GH-8509](https://github.com/hashicorp/vault/pull/8509)] -* secret/azure: Use write-ahead-logs to cleanup any orphaned Service Principals [[GH-9773](https://github.com/hashicorp/vault/pull/9773)] -* ui: Wrap TTL option on transit engine export action is updated to a new component. [[GH-9632](https://github.com/hashicorp/vault/pull/9632)] -* ui: Wrap Tool uses newest version of TTL Picker component. [[GH-9691](https://github.com/hashicorp/vault/pull/9691)] +* auth/ldap: improved login speed by adding concurrency to LDAP token group searches [[GH-22659](https://github.com/hashicorp/vault/pull/22659)] +* kmip (enterprise): reduce latency of KMIP operation handling BUG FIXES: -* secrets/gcp: Ensure that the IAM policy version is appropriately set after a roleset's bindings have changed. [[GH-9603](https://github.com/hashicorp/vault/pull/9603)] -* replication (enterprise): Fix status API output incorrectly stating replication is in `idle` state. -* replication (enterprise): Use PrimaryClusterAddr if it's been set -* core: Fix panic when printing over-long info fields at startup [[GH-9681](https://github.com/hashicorp/vault/pull/9681)] -* core: Seal migration using the new minimal-downtime strategy didn't work properly with performance standbys. [[GH-9690](https://github.com/hashicorp/vault/pull/9690)] -* core: Vault failed to start when there were non-string values in seal configuration [[GH-9555](https://github.com/hashicorp/vault/pull/9555)] -* core: Handle a trailing slash in the API address used for enabling replication +* cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. [[GH-22818](https://github.com/hashicorp/vault/pull/22818)] +* core/quotas: Reduce overhead for role calculation when using cloud auth methods. [[GH-22583](https://github.com/hashicorp/vault/pull/22583)] +* core/seal: add a workaround for potential connection [[hangs](https://github.com/Azure/azure-sdk-for-go/issues/21346)] in Azure autoseals. [[GH-22760](https://github.com/hashicorp/vault/pull/22760)] +* raft/autopilot: Add dr-token flag for raft autopilot cli commands [[GH-21165](https://github.com/hashicorp/vault/pull/21165)] +* replication (enterprise): Fix discovery of bad primary cluster addresses to be more reliable -## 1.5.0 -### July 21st, 2020 +## 1.12.10 +### August 30, 2023 CHANGES: -* audit: Token TTL and issue time are now provided in the auth portion of audit logs. [[GH-9091](https://github.com/hashicorp/vault/pull/9091)] -* auth/gcp: Changes the default name of the entity alias that gets created to be the role ID for both IAM and GCE authentication. [[GH-99](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/99)] -* core: Remove the addition of newlines to parsed configuration when using integer/boolean values [[GH-8928](https://github.com/hashicorp/vault/pull/8928)] -* cubbyhole: Reject reads and writes to an empty ("") path. [[GH-8971](https://github.com/hashicorp/vault/pull/8971)] -* secrets/azure: Default password generation changed from uuid to cryptographically secure randomized string [[GH-40](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/40)] -* storage/gcs: The `credentials_file` config option has been removed. The `GOOGLE_APPLICATION_CREDENTIALS` environment variable - or default credentials may be used instead [[GH-9424](https://github.com/hashicorp/vault/pull/9424)] -* storage/raft: The storage configuration now accepts a new `max_entry_size` config that will limit - the total size in bytes of any entry committed via raft. It defaults to `"1048576"` (1MiB). [[GH-9027](https://github.com/hashicorp/vault/pull/9027)] -* token: Token creation with custom token ID via `id` will no longer allow periods (`.`) as part of the input string. - The final generated token value may contain periods, such as the `s.` prefix for service token - indication. [[GH-8646](https://github.com/hashicorp/vault/pull/8646/files)] -* token: Token renewals will now return token policies within the `token_policies` , identity policies within `identity_policies`, and the full policy set within `policies`. [[GH-8535](https://github.com/hashicorp/vault/pull/8535)] -* go: Updated Go version to 1.14.4 - -FEATURES: - -* **Monitoring**: We have released a Splunk App [9] for Enterprise customers. The app is accompanied by an updated monitoring guide and a few new metrics to enable OSS users to effectively monitor Vault. -* **Password Policies**: Allows operators to customize how passwords are generated for select secret engines (OpenLDAP, Active Directory, Azure, and RabbitMQ). -* **Replication UI Improvements**: We have redesigned the replication UI to highlight the state and relationship between primaries and secondaries and improved management workflows, enabling a more holistic understanding of multiple Vault clusters. -* **Resource Quotas**: As of 1.5, Vault supports specifying a quota to rate limit requests on OSS and Enterprise. Enterprise customers also have access to set quotas on the number of leases that can be generated on a path. -* **OpenShift Support**: We have updated the Helm charts to allow users to install Vault onto their OpenShift clusters. -* **Seal Migration**: We have made updates to allow migrations from auto unseal to Shamir unseal on Enterprise. -* **AWS Auth Web Identity Support**: We've added support for AWS Web Identities, which will be used in the credentials chain if present. -* **Vault Monitor**: Similar to the monitor command for Consul and Nomad, we have added the ability for Vault to stream logs from other Vault servers at varying log levels. -* **AWS Secrets Groups Support**: IAM users generated by Vault may now be added to IAM Groups. -* **Integrated Storage as HA Storage**: In Vault 1.5, it is possible to use Integrated Storage as HA Storage with a different storage backend as regular storage. -* **OIDC Auth Provider Extensions**: We've added support to OIDC Auth to incorporate IdP-specific extensions. Currently this includes expanded Azure AD groups support. -* **GCP Secrets**: Support BigQuery dataset ACLs in absence of IAM endpoints. -* **KMIP**: Add support for signing client certificates requests (CSRs) rather than having them be generated entirely within Vault. +* core: Bump Go version to 1.19.12. IMPROVEMENTS: -* audit: Replication status requests are no longer audited. [[GH-8877](https://github.com/hashicorp/vault/pull/8877)] -* audit: Added mount_type field to requests and responses. [[GH-9167](https://github.com/hashicorp/vault/pull/9167)] -* auth/aws: Add support for Web Identity credentials [[GH-7738](https://github.com/hashicorp/vault/pull/7738)] -* auth/jwt: Support users that are members of more than 200 groups on Azure [[GH-120](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/120)] -* auth/kerberos: Support identities without userPrincipalName [[GH-44](https://github.com/hashicorp/vault-plugin-auth-kerberos/issues/44)] -* auth/kubernetes: Allow disabling `iss` validation [[GH-91](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/91)] -* auth/kubernetes: Try reading the ca.crt and TokenReviewer JWT from the default service account [[GH-83](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/83)] -* cli: Support reading TLS parameters from file for the `vault operator raft join` command. [[GH-9060](https://github.com/hashicorp/vault/pull/9060)] -* cli: Add a new subcommand, `vault monitor`, for tailing server logs in the console. [[GH-8477](https://github.com/hashicorp/vault/pull/8477)] -* core: Add the Go version used to build a Vault binary to the server message output. [[GH-9078](https://github.com/hashicorp/vault/pull/9078)] -* core: Added Password Policies for user-configurable password generation [[GH-8637](https://github.com/hashicorp/vault/pull/8637)] -* core: New telemetry metrics covering token counts, token creation, KV secret counts, lease creation. [[GH-9239](https://github.com/hashicorp/vault/pull/9239)] [[GH-9250](https://github.com/hashicorp/vault/pull/9250)] [[GH-9244](https://github.com/hashicorp/vault/pull/9244)] [[GH-9052](https://github.com/hashicorp/vault/pull/9052)] -* physical/gcs: The storage backend now uses a dedicated client for HA lock updates to prevent lock table update failures when flooded by other client requests. [[GH-9424](https://github.com/hashicorp/vault/pull/9424)] -* physical/spanner: The storage backend now uses a dedicated client for HA lock updates to prevent lock table update failures when flooded by other client requests. [[GH-9423](https://github.com/hashicorp/vault/pull/9423)] -* plugin: Add SDK method, `Sys.ReloadPlugin`, and CLI command, `vault plugin reload`, for reloading plugins. [[GH-8777](https://github.com/hashicorp/vault/pull/8777)] -* plugin (enterprise): Add a scope field to plugin reload, which when global, reloads the plugin anywhere in a cluster. [[GH-9347](https://github.com/hashicorp/vault/pull/9347)] -* sdk/framework: Support accepting TypeFloat parameters over the API [[GH-8923](https://github.com/hashicorp/vault/pull/8923)] -* secrets/aws: Add iam_groups parameter to role create/update [[GH-8811](https://github.com/hashicorp/vault/pull/8811)] -* secrets/database: Add static role rotation for MongoDB Atlas database plugin [[GH-11](https://github.com/hashicorp/vault-plugin-database-mongodbatlas/pull/11)] -* secrets/database: Add static role rotation for MSSQL database plugin [[GH-9062](https://github.com/hashicorp/vault/pull/9062)] -* secrets/database: Allow InfluxDB to use insecure TLS without cert bundle [[GH-8778](https://github.com/hashicorp/vault/pull/8778)] -* secrets/gcp: Support BigQuery dataset ACLs in absence of IAM endpoints [[GH-78](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/78)] -* secrets/pki: Allow 3072-bit RSA keys [[GH-8343](https://github.com/hashicorp/vault/pull/8343)] -* secrets/ssh: Add a CA-mode role option to specify signing algorithm [[GH-9096](https://github.com/hashicorp/vault/pull/9096)] -* secrets/ssh: The [Vault SSH Helper](https://github.com/hashicorp/vault-ssh-helper) can now be configured to reference a mount in a namespace [[GH-44](https://github.com/hashicorp/vault-ssh-helper/pull/44)] -* secrets/transit: Transit requests that make use of keys now include a new field `key_version` in their responses [[GH-9100](https://github.com/hashicorp/vault/pull/9100)] -* secrets/transit: Improving transit batch encrypt and decrypt latencies [[GH-8775](https://github.com/hashicorp/vault/pull/8775)] -* sentinel: Add a sentinel config section, and "additional_enabled_modules", a list of Sentinel modules that may be imported in addition to the defaults. -* ui: Update TTL picker styling on SSH secret engine [[GH-8891](https://github.com/hashicorp/vault/pull/8891)] -* ui: Only render the JWT input field of the Vault login form on mounts configured for JWT auth [[GH-8952](https://github.com/hashicorp/vault/pull/8952)] -* ui: Add replication dashboards. Improve replication management workflows. [[GH-8705]](https://github.com/hashicorp/vault/pull/8705). -* ui: Update alert banners to match design systems black text. [[GH-9463]](https://github.com/hashicorp/vault/pull/9463). - -BUG FIXES: - -* auth/oci: Fix issue where users of the Oracle Cloud Infrastructure (OCI) auth method could not authenticate when the plugin backend was mounted at a non-default path. [[GH-7](https://github.com/hashicorp/vault-plugin-auth-oci/pull/7)] -* core: Extend replicated cubbyhole fix in 1.4.0 to cover case where a performance primary is also a DR primary [[GH-9148](https://github.com/hashicorp/vault/pull/9148)] -* replication (enterprise): Use the PrimaryClusterAddr if it's been set -* seal/awskms: fix AWS KMS auto-unseal when AWS_ROLE_SESSION_NAME not set [[GH-9416](https://github.com/hashicorp/vault/pull/9416)] -* sentinel: fix panic due to concurrent map access when rules iterate over metadata maps -* secrets/aws: Fix issue where performance standbys weren't able to generate STS credentials after an IAM access key rotation in AWS and root IAM credential update in Vault [[GH-9186](https://github.com/hashicorp/vault/pull/9186)] -* secrets/database: Fix issue where rotating root database credentials while Vault's storage backend is unavailable causes Vault to lose access to the database [[GH-8782](https://github.com/hashicorp/vault/pull/8782)] -* secrets/database: Fix issue that prevents performance standbys from connecting to databases after a root credential rotation [[GH-9129](https://github.com/hashicorp/vault/pull/9129)] -* secrets/database: Fix parsing of multi-line PostgreSQL statements [[GH-8512](https://github.com/hashicorp/vault/pull/8512)] -* secrets/gcp: Fix issue were updates were not being applied to the `token_scopes` of a roleset. [[GH-90](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/90)] -* secrets/kv: Return the value of delete_version_after when reading kv/config, even if it is set to the default. [[GH-42](https://github.com/hashicorp/vault-plugin-secrets-kv/pull/42)] -* ui: Add Toggle component into core addon so it is available in KMIP and other Ember Engines.[[GH-8913]](https://github.com/hashicorp/vault/pull/8913) -* ui: Disallow max versions value of large than 9999999999999999 on kv2 secrets engine. [[GH-9242](https://github.com/hashicorp/vault/pull/9242)] -* ui: Add and upgrade missing dependencies to resolve a failure with `make static-dist`. [[GH-9277](https://github.com/hashicorp/vault/pull/9371)] - -## 1.4.7.1 -### October 15th, 2020 -### Enterprise Only - -BUG FIXES: -* replication (enterprise): Fix panic when old filter path evaluation fails - -## 1.4.7 -### September 24th, 2020 - -SECURITY: - -* Batch Token Expiry: We addressed an issue where batch token leases could outlive their TTL because we were not scheduling the expiration time correctly. This vulnerability affects Vault OSS and Vault Enterprise 1.0 and newer and is fixed in 1.4.7 and 1.5.4 (CVE-2020-25816). - -IMPROVEMENTS: - -* secret/azure: Use write-ahead-logs to cleanup any orphaned Service Principals [[GH-9773](https://github.com/hashicorp/vault/pull/9773)] - -BUG FIXES: -* replication (enterprise): Don't stop replication if old filter path evaluation fails - -## 1.4.6 -### August 27th, 2020 - -NOTE: - -All security content from 1.5.2, 1.5.1, 1.4.5, 1.4.4, 1.3.9, 1.3.8, 1.2.6, and 1.2.5 has been made fully open source, and the git tags for 1.5.3, 1.4.6, 1.3.10, and 1.2.7 will build correctly for open source users. - -BUG FIXES: - -* auth/aws: Made header handling for IAM authentication more robust -* secrets/ssh: Fixed a bug with role option for SSH signing algorithm to allow more than RSA signing [[GH-9824](https://github.com/hashicorp/vault/pull/9824)] - -## 1.4.5.1 -### August 21st, 2020 -### Enterprise Only - -NOTE: - -Includes correct license in the HSM binary. - -## 1.4.5 -### August 20th, 2020 - -NOTE: - -OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. - -KNOWN ISSUES: - -* AWS IAM logins may return an error depending on the headers sent with the request. - For more details and a workaround, see the [1.4.5 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.4.5) -* In versions 1.2.6, 1.3.9, 1.4.5, and 1.5.2, enterprise licenses on the HSM build were not incorporated correctly - enterprise - customers should use 1.2.6.1, 1.3.9.1, 1.4.5.1, and 1.5.2.1. - - -## 1.4.4 -### August 20th, 2020 +* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)] +* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase. +* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)] +* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)] + +BUG FIXES: + +* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)] +* core (enterprise): Remove MFA Configuration for namespace when deleting namespace +* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. +Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)] +* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)] +* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)] +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)] +* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)] +* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)] +* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath +* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards. +* sdk/ldaputil: Properly escape user filters when using UPN domains +sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)] +* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22332](https://github.com/hashicorp/vault/pull/22332)] +* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute +* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)] +* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)] + +## 1.12.9 +### July 25, 2023 SECURITY: -* When using the IAM AWS Auth Method, under certain circumstances, values Vault uses to validate identities and roles can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.7.1 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16250) (Discovered by Felix Wilhelm of Google Project Zero) -* When using the GCP GCE Auth Method, under certain circumstances, values Vault uses to validate GCE VMs can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.8.3 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16251) (Discovered by Felix Wilhelm of Google Project Zero) - -KNOWN ISSUES: - -* OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. -* AWS IAM logins may return an error depending on the headers sent with the request. - For more details and a workaround, see the [1.4.4 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.4.4) +* core/namespace (enterprise): An unhandled error in Vault Enterprise’s namespace creation may cause the Vault process to crash, potentially resulting in denial of service. This vulnerability, CVE-2023-3774, is fixed in Vault Enterprise 1.14.1, 1.13.5, and 1.12.9. [[HSEC_2023-23](https://discuss.hashicorp.com/t/hcsec-2023-23-vault-enterprise-namespace-creation-may-lead-to-denial-of-service/56617)] -BUG FIXES: - -* auth/okta: fix bug introduced in 1.4.0: only 200 external groups were fetched even if user belonged to more [[GH-9580](https://github.com/hashicorp/vault/pull/9580)] -* seal/awskms: fix AWS KMS auto-unseal when AWS_ROLE_SESSION_NAME not set [[GH-9416](https://github.com/hashicorp/vault/pull/9416)] -* secrets/aws: Fix possible issue creating access keys when using Performance Standbys [[GH-9606](https://github.com/hashicorp/vault/pull/9606)] - -IMPROVEMENTS: -* auth/aws: Retry on transient failures during AWS IAM auth login attempts [[GH-8727](https://github.com/hashicorp/vault/pull/8727)] -* ui: Add transit key algorithms aes128-gcm96, ecdsa-p384, ecdsa-p521 to the UI. [[GH-9070](https://github.com/hashicorp/vault/pull/9070)] & [[GH-9520](https://github.com/hashicorp/vault/pull/9520)] +CHANGES: -## 1.4.3 -### July 2nd, 2020 +* secrets/transform (enterprise): Enforce a transformation role's max_ttl setting on encode requests, a warning will be returned if max_ttl was applied. IMPROVEMENTS: -* auth/aws: Add support for Web Identity credentials [[GH-9251](https://github.com/hashicorp/vault/pull/9251)] -* auth/kerberos: Support identities without userPrincipalName [[GH-44](https://github.com/hashicorp/vault-plugin-auth-kerberos/issues/44)] -* core: Add the Go version used to build a Vault binary to the server message output. [[GH-9078](https://github.com/hashicorp/vault/pull/9078)] -* secrets/database: Add static role rotation for MongoDB Atlas database plugin [[GH-9311](https://github.com/hashicorp/vault/pull/9311)] -* physical/mysql: Require TLS or plaintext flagging in MySQL configuration [[GH-9012](https://github.com/hashicorp/vault/pull/9012)] -* ui: Link to the Vault Changelog in the UI footer [[GH-9216](https://github.com/hashicorp/vault/pull/9216)] - -BUG FIXES: - -* agent: Restart template server when it shuts down [[GH-9200](https://github.com/hashicorp/vault/pull/9200)] -* auth/oci: Fix issue where users of the Oracle Cloud Infrastructure (OCI) auth method could not authenticate when the plugin backend was mounted at a non-default path. [[GH-9278](https://github.com/hashicorp/vault/pull/9278)] -* replication: The issue causing cubbyholes in namespaces on performance secondaries to not work, which was fixed in 1.4.0, was still an issue when the primary was both a performance primary and DR primary. -* seal: (enterprise) Fix issue causing stored seal and recovery keys to be mistaken as sealwrapped values -* secrets/aws: Fix issue where performance standbys weren't able to generate STS credentials after an IAM access key rotation in AWS and root IAM credential update in Vault [[GH-9207](https://github.com/hashicorp/vault/pull/9207)] -* secrets/database: Fix issue that prevents performance standbys from connecting to databases after a root credential rotation [[GH-9208](https://github.com/hashicorp/vault/pull/9208)] -* secrets/gcp: Fix issue were updates were not being applied to the `token_scopes` of a roleset. [[GH-9277](https://github.com/hashicorp/vault/pull/9277)] +* core/fips: Add RPM, DEB packages of FIPS 140-2 and HSM+FIPS 140-2 Vault Enterprise. +* replication (enterprise): Avoid logging warning if request is forwarded from a performance standby and not a performance secondary +* secrets/transform (enterprise): Switch to pgx PostgreSQL driver for better timeout handling - -## 1.4.2 (May 21st, 2020) - -SECURITY: -* core: Proxy environment variables are now redacted before being logged, in case the URLs include a username:password. This vulnerability, CVE-2020-13223, is fixed in 1.3.6 and 1.4.2, but affects 1.4.0 and 1.4.1, as well as older versions of Vault [[GH-9022](https://github.com/hashicorp/vault/pull/9022)] -* secrets/gcp: Fix a regression in 1.4.0 where the system TTLs were being used instead of the configured backend TTLs for dynamic service accounts. This vulnerability is CVE-2020-12757. [[GH-85](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/85)] - -IMPROVEMENTS: - -* storage/raft: The storage stanza now accepts `leader_ca_cert_file`, `leader_client_cert_file`, and - `leader_client_key_file` parameters to read and parse TLS certificate information from paths on disk. - Existing non-path based parameters will continue to work, but their values will need to be provided as a - single-line string with newlines delimited by `\n`. [[GH-8894](https://github.com/hashicorp/vault/pull/8894)] -* storage/raft: The `vault status` CLI command and the `sys/leader` API now contain the committed and applied - raft indexes. [[GH-9011](https://github.com/hashicorp/vault/pull/9011)] - BUG FIXES: -* auth/aws: Fix token renewal issues caused by the metadata changes in 1.4.1 [[GH-8991](https://github.com/hashicorp/vault/pull/8991)] -* auth/ldap: Fix 1.4.0 regression that could result in auth failures when LDAP auth config includes upndomain. [[GH-9041](https://github.com/hashicorp/vault/pull/9041)] -* secrets/ad: Forward rotation requests from standbys to active clusters [[GH-66](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/66)] -* secrets/database: Prevent generation of usernames that are not allowed by the MongoDB Atlas API [[GH-9](https://github.com/hashicorp/vault-plugin-database-mongodbatlas/pull/9)] -* secrets/database: Return an error if a manual rotation of static account credentials fails [[GH-9035](https://github.com/hashicorp/vault/pull/9035)] -* secrets/openldap: Forward all rotation requests from standbys to active clusters [[GH-9028](https://github.com/hashicorp/vault/pull/9028)] -* secrets/transform (enterprise): Fix panic that could occur when accessing cached template entries, such as a requests - that accessed templates directly or indirectly from a performance standby node. -* serviceregistration: Fix a regression for Consul service registration that ignored using the listener address as - the redirect address unless api_addr was provided. It now properly uses the same redirect address as the one - used by Vault's Core object. [[GH-8976](https://github.com/hashicorp/vault/pull/8976)] -* storage/raft: Advertise the configured cluster address to the rest of the nodes in the raft cluster. This fixes - an issue where a node advertising 0.0.0.0 is not using a unique hostname. [[GH-9008](https://github.com/hashicorp/vault/pull/9008)] -* storage/raft: Fix panic when multiple nodes attempt to join the cluster at once. [[GH-9008](https://github.com/hashicorp/vault/pull/9008)] -* sys: The path provided in `sys/internal/ui/mounts/:path` is now namespace-aware. This fixes an issue - with `vault kv` subcommands that had namespaces provided in the path returning permission denied all the time. - [[GH-8962](https://github.com/hashicorp/vault/pull/8962)] -* ui: Fix snowman that appears when namespaces have more than one period [[GH-8910](https://github.com/hashicorp/vault/pull/8910)] - -## 1.4.1 (April 30th, 2020) - -CHANGES: - -* auth/aws: The default set of metadata fields added in 1.4.1 has been changed to `account_id` and `auth_type` [[GH-8783](https://github.com/hashicorp/vault/pull/8783)] -* storage/raft: Disallow `ha_storage` to be specified if `raft` is set as the `storage` type. [[GH-8707](https://github.com/hashicorp/vault/pull/8707)] - -IMPROVEMENTS: - -* auth/aws: The set of metadata stored during login is now configurable [[GH-8783](https://github.com/hashicorp/vault/pull/8783)] -* auth/aws: Improve region selection to avoid errors seen if the account hasn't enabled some newer AWS regions [[GH-8679](https://github.com/hashicorp/vault/pull/8679)] -* auth/azure: Enable login from Azure VMs with user-assigned identities [[GH-33](https://github.com/hashicorp/vault-plugin-auth-azure/pull/33)] -* auth/gcp: The set of metadata stored during login is now configurable [[GH-92](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/92)] -* auth/gcp: The type of alias name used during login is now configurable [[GH-95](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/95)] -* auth/ldap: Improve error messages during LDAP operation failures [[GH-8740](https://github.com/hashicorp/vault/pull/8740)] -* identity: Add a batch delete API for identity entities [[GH-8785]](https://github.com/hashicorp/vault/pull/8785) -* identity: Improve performance of logins when no group updates are needed [[GH-8795]](https://github.com/hashicorp/vault/pull/8795) -* metrics: Add `vault.identity.num_entities` metric [[GH-8816]](https://github.com/hashicorp/vault/pull/8816) -* secrets/kv: Allow `delete-version-after` to be reset to 0 via the CLI [[GH-8635](https://github.com/hashicorp/vault/pull/8635)] -* secrets/rabbitmq: Improve error handling and reporting [[GH-8619](https://github.com/hashicorp/vault/pull/8619)] -* ui: Provide One Time Password during Operation Token generation process [[GH-8630]](https://github.com/hashicorp/vault/pull/8630) +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-24170](https://github.com/hashicorp/vault/pull/24170)] +* identity: Remove caseSensitivityKey to prevent errors while loading groups which could result in missing groups in memDB when duplicates are found. [[GH-20965](https://github.com/hashicorp/vault/pull/20965)] +* replication (enterprise): update primary cluster address after DR failover +* secrets/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21633](https://github.com/hashicorp/vault/pull/21633)] +* secrets/pki: Prevent deleted issuers from reappearing when migrating from a version 1 bundle to a version 2 bundle (versions including 1.13.0, 1.12.2, and 1.11.6); when managed keys were removed but referenced in the Vault 1.10 legacy CA bundle, this the error: `no managed key found with uuid`. [[GH-21316](https://github.com/hashicorp/vault/pull/21316)] +* secrets/pki: Support setting both maintain_stored_certificate_counts=false and publish_stored_certificate_count_metrics=false explicitly in tidy config. [[GH-20664](https://github.com/hashicorp/vault/pull/20664)] +* secrets/transform (enterprise): Fix nil panic when deleting a template with tokenization transformations present +* secrets/transform (enterprise): Grab shared locks for various read operations, only escalating to write locks if work is required +* serviceregistration: Fix bug where multiple nodes in a secondary cluster could be labelled active after updating the cluster's primary [[GH-21642](https://github.com/hashicorp/vault/pull/21642)] +* ui: Fixed an issue where editing an SSH role would clear `default_critical_options` and `default_extension` if left unchanged. [[GH-21739](https://github.com/hashicorp/vault/pull/21739)] -BUG FIXES: +## 1.12.8 +### June 21, 2023 +BREAKING CHANGES: -* auth/okta: Fix MFA regression (introduced in [GH-8143](https://github.com/hashicorp/vault/pull/8143)) from 1.4.0 [[GH-8807](https://github.com/hashicorp/vault/pull/8807)] -* auth/userpass: Fix upgrade value for `token_bound_cidrs` being ignored due to incorrect key provided [[GH-8826](https://github.com/hashicorp/vault/pull/8826/files)] -* config/seal: Fix segfault when seal block is removed [[GH-8517](https://github.com/hashicorp/vault/pull/8517)] -* core: Fix an issue where users attempting to build Vault could receive Go module checksum errors [[GH-8770](https://github.com/hashicorp/vault/pull/8770)] -* core: Fix blocked requests if a SIGHUP is issued during a long-running request has the state lock held. - Also fixes deadlock that can happen if `vault debug` with the config target is ran during this time. - [[GH-8755](https://github.com/hashicorp/vault/pull/8755)] -* core: Always rewrite the .vault-token file as part of a `vault login` to ensure permissions and ownership are set correctly [[GH-8867](https://github.com/hashicorp/vault/pull/8867)] -* database/mongodb: Fix context deadline error that may result due to retry attempts on failed commands - [[GH-8863](https://github.com/hashicorp/vault/pull/8863)] -* http: Fix superflous call messages from the http package on logs caused by missing returns after - `respondError` calls [[GH-8796](https://github.com/hashicorp/vault/pull/8796)] -* namespace (enterprise): Fix namespace listing to return `key_info` when a scoping namespace is also provided. -* seal/gcpkms: Fix panic that could occur if all seal parameters were provided via environment - variables [[GH-8840](https://github.com/hashicorp/vault/pull/8840)] -* storage/raft: Fix memory allocation and incorrect metadata tracking issues with snapshots [[GH-8793](https://github.com/hashicorp/vault/pull/8793)] -* storage/raft: Fix panic that could occur if `disable_clustering` was set to true on Raft storage cluster [[GH-8784](https://github.com/hashicorp/vault/pull/8784)] -* storage/raft: Handle errors returned from the API during snapshot operations [[GH-8861](https://github.com/hashicorp/vault/pull/8861)] -* sys/wrapping: Allow unwrapping of wrapping tokens which contain nil data [[GH-8714](https://github.com/hashicorp/vault/pull/8714)] - -## 1.4.0 (April 7th, 2020) +* secrets/pki: Maintaining running count of certificates will be turned off by default. +To re-enable keeping these metrics available on the tidy status endpoint, enable +maintain_stored_certificate_counts on tidy-config, to also publish them to the +metrics consumer, enable publish_stored_certificate_count_metrics . [[GH-18186](https://github.com/hashicorp/vault/pull/18186)] CHANGES: -* cli: The raft configuration command has been renamed to list-peers to avoid - confusion. +* core: Bump Go version to 1.19.10. FEATURES: -* **Kerberos Authentication**: Vault now supports Kerberos authentication using a SPNEGO token. - Login can be performed using the Vault CLI, API, or agent. -* **Kubernetes Service Discovery**: A new Kubernetes service discovery feature where, if - configured, Vault will tag Vault pods with their current health status. For more, see [#8249](https://github.com/hashicorp/vault/pull/8249). -* **MongoDB Atlas Secrets**: Vault can now generate dynamic credentials for both MongoDB Atlas databases - as well as the [Atlas programmatic interface](https://docs.atlas.mongodb.com/tutorial/manage-programmatic-access/). -* **OpenLDAP Secrets Engine**: We now support password management of existing OpenLDAP user entries. For more, see [#8360](https://github.com/hashicorp/vault/pull/8360/). -* **Redshift Database Secrets Engine**: The database secrets engine now supports static and dynamic secrets for the Amazon Web Services (AWS) Redshift service. -* **Service Registration Config**: A newly introduced `service_registration` configuration stanza, that allows for service registration to be configured separately from the storage backend. For more, see [#7887](https://github.com/hashicorp/vault/pull/7887/). -* **Transform Secrets Engine (Enterprise)**: A new secrets engine that handles secure data transformations against provided input values. -* **Integrated Storage**: Promoted out of beta and into general availability for both open-source and enterprise workloads. +* **Automated License Utilization Reporting**: Added automated license +utilization reporting, which sends minimal product-license [metering +data](https://developer.hashicorp.com/vault/docs/enterprise/license/utilization-reporting) +to HashiCorp without requiring you to manually collect and report them. +* core (enterprise): Add background worker for automatic reporting of billing +information. [[GH-19625](https://github.com/hashicorp/vault/pull/19625)] IMPROVEMENTS: -* agent: add option to force the use of the auth-auth token, and ignore the Vault token in the request [[GH-8101](https://github.com/hashicorp/vault/pull/8101)] -* api: Restore and fix DNS SRV Lookup [[GH-8520](https://github.com/hashicorp/vault/pull/8520)] -* audit: HMAC http_raw_body in audit log; this ensures that large authenticated Prometheus metrics responses get - replaced with short HMAC values [[GH-8130](https://github.com/hashicorp/vault/pull/8130)] -* audit: Generate-root, generate-recovery-token, and generate-dr-operation-token requests and responses are now audited. [[GH-8301](https://github.com/hashicorp/vault/pull/8301)] -* auth/aws: Reduce the number of simultaneous STS client credentials needed [[GH-8161](https://github.com/hashicorp/vault/pull/8161)] -* auth/azure: subscription ID, resource group, vm and vmss names are now stored in alias metadata [[GH-30](https://github.com/hashicorp/vault-plugin-auth-azure/pull/30)] -* auth/jwt: Additional OIDC callback parameters available for CLI logins [[GH-80](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/80) & [GH-86](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/86)] -* auth/jwt: Bound claims may be optionally configured using globs [[GH-89](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/89)] -* auth/jwt: Timeout during OIDC CLI login if process doesn't complete within 2 minutes [[GH-97](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/97)] -* auth/jwt: Add support for the `form_post` response mode [[GH-98](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/98)] -* auth/jwt: add optional client_nonce to authorization flow [[GH-104](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/104)] -* auth/okta: Upgrade okta sdk lib, which should improve handling of groups [[GH-8143](https://github.com/hashicorp/vault/pull/8143)] -* aws: Add support for v2 of the instance metadata service (see [issue 7924](https://github.com/hashicorp/vault/issues/7924) for all linked PRs) -* core: Separate out service discovery interface from storage interface to allow - new types of service discovery not coupled to storage [[GH-7887](https://github.com/hashicorp/vault/pull/7887)] -* core: Add support for telemetry option `metrics_prefix` [[GH-8340](https://github.com/hashicorp/vault/pull/8340)] -* core: Entropy Augmentation can now be used with AWS KMS and Vault Transit seals -* core: Allow tls_min_version to be set to TLS 1.3 [[GH-8305](https://github.com/hashicorp/vault/pull/8305)] -* cli: Incorrect TLS configuration will now correctly fail [[GH-8025](https://github.com/hashicorp/vault/pull/8025)] -* identity: Allow specifying a custom `client_id` for identity tokens [[GH-8165](https://github.com/hashicorp/vault/pull/8165)] -* metrics/prometheus: improve performance with high volume of metrics updates [[GH-8507](https://github.com/hashicorp/vault/pull/8507)] -* replication (enterprise): Fix race condition causing clusters with high throughput writes to sometimes - fail to enter streaming-wal mode -* replication (enterprise): Secondary clusters can now perform an extra gRPC call to all nodes in a primary - cluster in an attempt to resolve the active node's address -* replication (enterprise): The replication status API now outputs `last_performance_wal`, `last_dr_wal`, - and `connection_state` values -* replication (enterprise): DR secondary clusters can now be recovered by the `replication/dr/secondary/recover` - API -* replication (enterprise): We now allow for an alternate means to create a Disaster Recovery token, by using a batch - token that is created with an ACL that allows for access to one or more of the DR endpoints. -* secrets/database/mongodb: Switched internal MongoDB driver to mongo-driver [[GH-8140](https://github.com/hashicorp/vault/pull/8140)] -* secrets/database/mongodb: Add support for x509 client authorization to MongoDB [[GH-8329](https://github.com/hashicorp/vault/pull/8329)] -* secrets/database/oracle: Add support for static credential rotation [[GH-26](https://github.com/hashicorp/vault-plugin-database-oracle/pull/26)] -* secrets/consul: Add support to specify TLS options per Consul backend [[GH-4800](https://github.com/hashicorp/vault/pull/4800)] -* secrets/gcp: Allow specifying the TTL for a service key [[GH-54](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/54)] -* secrets/gcp: Add support for rotating root keys [[GH-53](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/53)] -* secrets/gcp: Handle version 3 policies for Resource Manager IAM requests [[GH-77](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/77)] -* secrets/nomad: Add support to specify TLS options per Nomad backend [[GH-8083](https://github.com/hashicorp/vault/pull/8083)] -* secrets/ssh: Allowed users can now be templated with identity information [[GH-7548](https://github.com/hashicorp/vault/pull/7548)] -* secrets/transit: Adding RSA3072 key support [[GH-8151](https://github.com/hashicorp/vault/pull/8151)] -* storage/consul: Vault returns now a more descriptive error message when only a client cert or - a client key has been provided [[GH-4930]](https://github.com/hashicorp/vault/pull/8084) -* storage/raft: Nodes in the raft cluster can all be given possible leader - addresses for them to continuously try and join one of them, thus automating - the process of join to a greater extent [[GH-7856](https://github.com/hashicorp/vault/pull/7856)] -* storage/raft: Fix a potential deadlock that could occur on leadership transition [[GH-8547](https://github.com/hashicorp/vault/pull/8547)] -* storage/raft: Refresh TLS keyring on snapshot restore [[GH-8546](https://github.com/hashicorp/vault/pull/8546)] -* storage/etcd: Bumped etcd client API SDK [[GH-7931](https://github.com/hashicorp/vault/pull/7931) & [GH-4961](https://github.com/hashicorp/vault/pull/4961) & [GH-4349](https://github.com/hashicorp/vault/pull/4349) & [GH-7582](https://github.com/hashicorp/vault/pull/7582)] -* ui: Make Transit Key actions more prominent [[GH-8304](https://github.com/hashicorp/vault/pull/8304)] -* ui: Add Core Usage Metrics [[GH-8347](https://github.com/hashicorp/vault/pull/8347)] -* ui: Add refresh Namespace list on the Namespace dropdown, and redesign of Namespace dropdown menu [[GH-8442](https://github.com/hashicorp/vault/pull/8442)] -* ui: Update transit actions to codeblocks & automatically encode plaintext unless indicated [[GH-8462](https://github.com/hashicorp/vault/pull/8462)] -* ui: Display the results of transit key actions in a modal window [[GH-8462](https://github.com/hashicorp/vault/pull/8575)] -* ui: Transit key version styling updates & ability to copy key from dropdown [[GH-8480](https://github.com/hashicorp/vault/pull/8480)] - -BUG FIXES: - -* agent: Fix issue where TLS options are ignored for agent template feature [[GH-7889](https://github.com/hashicorp/vault/pull/7889)] -* auth/jwt: Use lower case role names for `default_role` to match the `role` case convention [[GH-100](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/100)] -* auth/ldap: Fix a bug where the UPNDOMAIN parameter was wrongly used to lookup the group - membership of the given user [[GH-6325]](https://github.com/hashicorp/vault/pull/8333) -* cli: Support autocompletion for nested mounts [[GH-8303](https://github.com/hashicorp/vault/pull/8303)] -* cli: Fix CLI namespace autocompletion [[GH-8315](https://github.com/hashicorp/vault/pull/8315)] -* identity: Fix incorrect caching of identity token JWKS responses [[GH-8412](https://github.com/hashicorp/vault/pull/8412)] -* metrics/stackdriver: Fix issue that prevents the stackdriver metrics library to create unnecessary stackdriver descriptors [[GH-8073](https://github.com/hashicorp/vault/pull/8073)] -* replication (enterprise): Fix issue causing cubbyholes in namespaces on performance secondaries to not work. -* replication (enterprise): Unmounting a dynamic secrets backend could sometimes lead to replication errors. Change the order of operations to prevent that. -* seal (enterprise): Fix seal migration when transactional seal wrap backend is in use. -* secrets/database/influxdb: Fix potential panic if connection to the InfluxDB database cannot be established [[GH-8282](https://github.com/hashicorp/vault/pull/8282)] -* secrets/database/mysql: Ensures default static credential rotation statements are used [[GH-8240](https://github.com/hashicorp/vault/pull/8240)] -* secrets/database/mysql: Fix inconsistent query parameter names: {{name}} or {{username}} for - different queries. Now it allows for either for backwards compatibility [[GH-8240](https://github.com/hashicorp/vault/pull/8240)] -* secrets/database/postgres: Fix inconsistent query parameter names: {{name}} or {{username}} for - different queries. Now it allows for either for backwards compatibility [[GH-8240](https://github.com/hashicorp/vault/pull/8240)] -* secrets/pki: Support FQDNs in DNS Name [[GH-8288](https://github.com/hashicorp/vault/pull/8288)] -* storage/raft: Allow seal migration to be performed on Vault clusters using raft storage [[GH-8103](https://github.com/hashicorp/vault/pull/8103)] -* telemetry: Prometheus requests on standby nodes will now return an error instead of forwarding - the request to the active node [[GH-8280](https://github.com/hashicorp/vault/pull/8280)] -* ui: Fix broken popup menu on the transit secrets list page [[GH-8348](https://github.com/hashicorp/vault/pull/8348)] -* ui: Update headless Chrome flag to fix `yarn run test:oss` [[GH-8035](https://github.com/hashicorp/vault/pull/8035)] -* ui: Update CLI to accept empty strings as param value to reset previously-set values -* ui: Fix bug where error states don't clear when moving between action tabs on Transit [[GH-8354](https://github.com/hashicorp/vault/pull/8354)] - -## 1.3.10 -### August 27th, 2020 - -NOTE: - -All security content from 1.5.2, 1.5.1, 1.4.5, 1.4.4, 1.3.9, 1.3.8, 1.2.6, and 1.2.5 has been made fully open source, and the git tags for 1.5.3, 1.4.6, 1.3.10, and 1.2.7 will build correctly for open source users. - -BUG FIXES: - -* auth/aws: Made header handling for IAM authentication more robust - -## 1.3.9.1 -### August 21st, 2020 -### Enterprise Only - -NOTE: - -Includes correct license in the HSM binary. - -## 1.3.9 -### August 20th, 2020 - -NOTE: - -OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. - -KNOWN ISSUES: - -* AWS IAM logins may return an error depending on the headers sent with the request. - For more details and a workaround, see the [1.3.9 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.3.9) -* In versions 1.2.6, 1.3.9, 1.4.5, and 1.5.2, enterprise licenses on the HSM build were not incorporated correctly - enterprise - customers should use 1.2.6.1, 1.3.9.1, 1.4.5.1, and 1.5.2.1. - -## 1.3.8 -### August 20th, 2020 - -SECURITY: - -* When using the IAM AWS Auth Method, under certain circumstances, values Vault uses to validate identities and roles can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.7.1 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16250) (Discovered by Felix Wilhelm of Google Project Zero) -* When using the GCP GCE Auth Method, under certain circumstances, values Vault uses to validate GCE VMs can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.8.3 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16251) (Discovered by Felix Wilhelm of Google Project Zero) - -KNOWN ISSUES: - -* OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. -* AWS IAM logins may return an error depending on the headers sent with the request. - For more details and a workaround, see the [1.3.8 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.3.8) - -## 1.3.7 -### July 2nd, 2020 - -BUG FIXES: - -* seal: (enterprise) Fix issue causing stored seal and recovery keys to be mistaken as sealwrapped values -* secrets/aws: Fix issue where performance standbys weren't able to generate STS credentials after an IAM access key rotation in AWS and root IAM credential update in Vault [[GH-9363](https://github.com/hashicorp/vault/pull/9363)] - -## 1.3.6 (May 21st, 2020) +* api: GET ... /sys/internal/counters/activity?current_billing_period=true now +results in a response which contains the full billing period [[GH-20694](https://github.com/hashicorp/vault/pull/20694)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`minimum_retention_months`. [[GH-20150](https://github.com/hashicorp/vault/pull/20150)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`reporting_enabled` and `billing_start_timestamp` fields. [[GH-20086](https://github.com/hashicorp/vault/pull/20086)] +* core (enterprise): add configuration for license reporting [[GH-19891](https://github.com/hashicorp/vault/pull/19891)] +* core (enterprise): license updates trigger a reload of reporting and the activity log [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): support reloading configuration for automated reporting via SIGHUP [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): vault server command now allows for opt-out of automated +reporting via the `OPTOUT_LICENSE_REPORTING` environment variable. [[GH-3939](https://github.com/hashicorp/vault/pull/3939)] +* core/activity: error when attempting to update retention configuration below the minimum [[GH-20078](https://github.com/hashicorp/vault/pull/20078)] +* core/activity: refactor the activity log's generation of precomputed queries [[GH-20073](https://github.com/hashicorp/vault/pull/20073)] +* ui: updates clients configuration edit form state based on census reporting configuration [[GH-20125](https://github.com/hashicorp/vault/pull/20125)] + +BUG FIXES: + +* core (enterprise): Don't delete backend stored data that appears to be filterable +on this secondary if we don't have a corresponding mount entry. +* core/activity: add namespace breakdown for new clients when date range spans multiple months, including the current month. [[GH-18766](https://github.com/hashicorp/vault/pull/18766)] +* core/activity: de-duplicate namespaces when historical and current month data are mixed [[GH-18452](https://github.com/hashicorp/vault/pull/18452)] +* core/activity: fix the end_date returned from the activity log endpoint when partial counts are computed [[GH-17856](https://github.com/hashicorp/vault/pull/17856)] +* core/activity: include mount counts when de-duplicating current and historical month data [[GH-18598](https://github.com/hashicorp/vault/pull/18598)] +* core/activity: report mount paths (rather than mount accessors) in current month activity log counts and include deleted mount paths in precomputed queries. [[GH-18916](https://github.com/hashicorp/vault/pull/18916)] +* core/activity: return partial month counts when querying a historical date range and no historical data exists. [[GH-17935](https://github.com/hashicorp/vault/pull/17935)] +* core: Change where we evaluate filtered paths as part of mount operations; this is part of an enterprise bugfix that will +have its own changelog entry. Fix wrong lock used in ListAuths link meta interface implementation. [[GH-21260](https://github.com/hashicorp/vault/pull/21260)] +* core: Do not cache seal configuration to fix a bug that resulted in sporadic auto unseal failures. [[GH-21223](https://github.com/hashicorp/vault/pull/21223)] +* core: Don't exit just because we think there's a potential deadlock. [[GH-21342](https://github.com/hashicorp/vault/pull/21342)] +* core: Fix panic in sealed nodes using raft storage trying to emit raft metrics [[GH-21249](https://github.com/hashicorp/vault/pull/21249)] +* identity: Fixes duplicate groups creation with the same name but unique IDs. [[GH-20964](https://github.com/hashicorp/vault/pull/20964)] +* replication (enterprise): Fix a race condition with update-primary that could result in data loss after a DR failover +* replication (enterprise): Fix path filters deleting data right after it's written by backend Initialize funcs +* storage/raft: Fix race where new follower joining can get pruned by dead server cleanup. [[GH-20986](https://github.com/hashicorp/vault/pull/20986)] + +## 1.12.7 +### June 08, 2023 SECURITY: -* core: proxy environment variables are now redacted before being logged, in case the URLs include a username:password. This vulnerability, CVE-2020-13223, is fixed in 1.3.6 and 1.4.2, but affects 1.4 and 1.4.1, as well as older versions of Vault [[GH-9022](https://github.com/hashicorp/vault/pull/9022)] - -BUG FIXES: -* auth/aws: Fix token renewal issues caused by the metadata changes in 1.3.5 [[GH-8991](https://github.com/hashicorp/vault/pull/8991)] -* replication: Fix mount filter bug that allowed replication filters to hide local mounts on a performance secondary +* ui: key-value v2 (kv-v2) diff viewer allowed HTML injection into the Vault web UI through key values. This vulnerability, CVE-2023-2121, is fixed in Vault 1.14.0, 1.13.3, 1.12.7, and 1.11.11. [[HSEC-2023-17](https://discuss.hashicorp.com/t/hcsec-2023-17-vault-s-kv-diff-viewer-allowed-html-injection/54814)] -## 1.3.5 (April 28th, 2020) - -CHANGES: +CHANGES: -* auth/aws: The default set of metadata fields added in 1.3.2 has been changed to `account_id` and `auth_type` [[GH-8783](https://github.com/hashicorp/vault/pull/8783)] +* core: Bump Go version to 1.19.9. +* core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. [[GH-20826](https://github.com/hashicorp/vault/pull/20826)] IMPROVEMENTS: -* auth/aws: The set of metadata stored during login is now configurable [[GH-8783](https://github.com/hashicorp/vault/pull/8783)] - -## 1.3.4 (March 19th, 2020) - -SECURITY: - -* A vulnerability was identified in Vault and Vault Enterprise such that, under certain circumstances, an Entity's Group membership may inadvertently include Groups the Entity no longer has permissions to. This vulnerability, CVE-2020-10660, affects Vault and Vault Enterprise versions 0.9.0 and newer, and is fixed in 1.3.4. [[GH-8606](https://github.com/hashicorp/vault/pull/8606)] -* A vulnerability was identified in Vault Enterprise such that, under certain circumstances, existing nested-path policies may give access to Namespaces created after-the-fact. This vulnerability, CVE-2020-10661, affects Vault Enterprise versions 0.11 and newer, and is fixed in 1.3.4. - -## 1.3.3 (March 5th, 2020) - -BUG FIXES: - -* approle: Fix excessive locking during tidy, which could potentially block new approle logins for long enough to cause an outage [[GH-8418](https://github.com/hashicorp/vault/pull/8418)] -* cli: Fix issue where Raft snapshots from standby nodes created an empty backup file [[GH-8097](https://github.com/hashicorp/vault/pull/8097)] -* identity: Fix incorrect caching of identity token JWKS responses [[GH-8412](https://github.com/hashicorp/vault/pull/8412)] -* kmip: role read now returns tls_client_ttl -* kmip: fix panic when templateattr not provided in rekey request -* secrets/database/influxdb: Fix potential panic if connection to the InfluxDB database cannot be established [[GH-8282](https://github.com/hashicorp/vault/pull/8282)] -* storage/mysql: Fix potential crash when using MySQL as coordination for high availability [[GH-8300](https://github.com/hashicorp/vault/pull/8300)] -* storage/raft: Fix potential crash when using Raft as coordination for high availability [[GH-8356](https://github.com/hashicorp/vault/pull/8356)] -* ui: Fix missing License menu item [[GH-8230](https://github.com/hashicorp/vault/pull/8230)] -* ui: Fix bug where default auth method on login is defaulted to auth method that is listing-visibility=unauth instead of “other” [[GH-8218](https://github.com/hashicorp/vault/pull/8218)] -* ui: Fix bug where KMIP details were not shown in the UI Wizard [[GH-8255](https://github.com/hashicorp/vault/pull/8255)] -* ui: Show Error messages on Auth Configuration page when you hit permission errors [[GH-8500](https://github.com/hashicorp/vault/pull/8500)] -* ui: Remove duplicate form inputs for the GitHub config [[GH-8519](https://github.com/hashicorp/vault/pull/8519)] -* ui: Correct HMAC capitalization [[GH-8528](https://github.com/hashicorp/vault/pull/8528)] -* ui: Fix danger message in DR [[GH-8555](https://github.com/hashicorp/vault/pull/8555)] -* ui: Fix certificate field for LDAP config [[GH-8573](https://github.com/hashicorp/vault/pull/8573)] - -## 1.3.2 (January 22nd, 2020) - -SECURITY: - * When deleting a namespace on Vault Enterprise, in certain circumstances, the deletion - process will fail to revoke dynamic secrets for a mount in that namespace. This will - leave any dynamic secrets in remote systems alive and will fail to clean them up. This - vulnerability, CVE-2020-7220, affects Vault Enterprise 0.11.0 and newer. - -IMPROVEMENTS: - * auth/aws: Add aws metadata to identity alias [[GH-7985](https://github.com/hashicorp/vault/pull/7985)] - * auth/kubernetes: Allow both names and namespaces to be set to "*" [[GH-78](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/78)] +* audit: add a `mount_point` field to audit requests and response entries [[GH-20411](https://github.com/hashicorp/vault/pull/20411)] +* command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when +`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. [[GH-20609](https://github.com/hashicorp/vault/pull/20609)] +* core: include namespace path in granting_policies block of audit log +* openapi: Fix generated types for duration strings [[GH-20841](https://github.com/hashicorp/vault/pull/20841)] +* sdk/framework: Fix non-deterministic ordering of 'required' fields in OpenAPI spec [[GH-20881](https://github.com/hashicorp/vault/pull/20881)] +* secrets/pki: add subject key identifier to read key response [[GH-20642](https://github.com/hashicorp/vault/pull/20642)] +* ui: update TTL picker for consistency [[GH-18114](https://github.com/hashicorp/vault/pull/18114)] BUG FIXES: -* auth/azure: Fix Azure compute client to use correct base URL [[GH-8072](https://github.com/hashicorp/vault/pull/8072)] -* auth/ldap: Fix renewal of tokens without configured policies that are - generated by an LDAP login [[GH-8072](https://github.com/hashicorp/vault/pull/8072)] -* auth/okta: Fix renewal of tokens without configured policies that are - generated by an Okta login [[GH-8072](https://github.com/hashicorp/vault/pull/8072)] -* core: Fix seal migration error when attempting to migrate from auto unseal to shamir [[GH-8172](https://github.com/hashicorp/vault/pull/8172)] -* core: Fix seal migration config issue when migrating from auto unseal to auto unseal [[GH-8172](https://github.com/hashicorp/vault/pull/8172)] -* plugin: Fix issue where a plugin unwrap request potentially used an expired token [[GH-8058](https://github.com/hashicorp/vault/pull/8058)] -* replication: Fix issue where a forwarded request from a performance/standby node could run into - a timeout -* secrets/database: Fix issue where a manual static role rotation could potentially panic [[GH-8098](https://github.com/hashicorp/vault/pull/8098)] -* secrets/database: Fix issue where a manual root credential rotation request is not forwarded - to the primary node [[GH-8125](https://github.com/hashicorp/vault/pull/8125)] -* secrets/database: Fix issue where a manual static role rotation request is not forwarded - to the primary node [[GH-8126](https://github.com/hashicorp/vault/pull/8126)] -* secrets/database/mysql: Fix issue where special characters for a MySQL password were encoded [[GH-8040](https://github.com/hashicorp/vault/pull/8040)] -* ui: Fix deleting namespaces [[GH-8132](https://github.com/hashicorp/vault/pull/8132)] -* ui: Fix Error handler on kv-secret edit and kv-secret view pages [[GH-8133](https://github.com/hashicorp/vault/pull/8133)] -* ui: Fix OIDC callback to check storage [[GH-7929](https://github.com/hashicorp/vault/pull/7929)]. -* ui: Change `.box-radio` height to min-height to prevent overflow issues [[GH-8065](https://github.com/hashicorp/vault/pull/8065)] - -## 1.3.1 (December 18th, 2019) - -IMPROVEMENTS: - -* agent: Add ability to set `exit-after-auth` via the CLI [[GH-7920](https://github.com/hashicorp/vault/pull/7920)] -* auth/ldap: Add a `request_timeout` configuration option to prevent connection - requests from hanging [[GH-7909](https://github.com/hashicorp/vault/pull/7909)] -* auth/kubernetes: Add audience to tokenreview API request for Kube deployments where issuer - is not Kube. [[GH-74](https://github.com/hashicorp/vault/pull/74)] -* secrets/ad: Add a `request_timeout` configuration option to prevent connection - requests from hanging [[GH-59](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/59)] -* storage/postgresql: Add support for setting `connection_url` from enviornment - variable `VAULT_PG_CONNECTION_URL` [[GH-7937](https://github.com/hashicorp/vault/pull/7937)] -* telemetry: Add `enable_hostname_label` option to telemetry stanza [[GH-7902](https://github.com/hashicorp/vault/pull/7902)] -* telemetry: Add accept header check for prometheus mime type [[GH-7958](https://github.com/hashicorp/vault/pull/7958)] - -BUG FIXES: +* api: Properly Handle nil identity_policies in Secret Data [[GH-20636](https://github.com/hashicorp/vault/pull/20636)] +* auth/ldap: Set default value for `max_page_size` properly [[GH-20453](https://github.com/hashicorp/vault/pull/20453)] +* cli: CLI should take days as a unit of time for ttl like flags [[GH-20477](https://github.com/hashicorp/vault/pull/20477)] +* cli: disable printing flags warnings messages for the ssh command [[GH-20502](https://github.com/hashicorp/vault/pull/20502)] +* core (enterprise): Fix log shipper buffer size overflow issue for 32 bit architecture. +* core (enterprise): Fix logshipper buffer size to default to DefaultBufferSize only when reported system memory is zero. +* core (enterprise): Remove MFA Enforcment configuration for namespace when deleting namespace +* core: prevent panic on login after namespace is deleted that had mfa enforcement [[GH-20375](https://github.com/hashicorp/vault/pull/20375)] +* replication (enterprise): Fix a race condition with invalid tokens during WAL streaming that was causing Secondary clusters to be unable to connect to a Primary. +* replication (enterprise): fix bug where secondary grpc connections would timeout when connecting to a primary host that no longer exists. +* secrets/transform (enterprise): Fix a caching bug affecting secondary nodes after a tokenization key rotation +* secrets/transit: Fix export of HMAC-only key, correctly exporting the key used for sign operations. For consumers of the previously incorrect key, use the plaintext export to retrieve these incorrect keys and import them as new versions. +secrets/transit: Fix bug related to shorter dedicated HMAC key sizing. +sdk/helper/keysutil: New HMAC type policies will have HMACKey equal to Key and be copied over on import. [[GH-20864](https://github.com/hashicorp/vault/pull/20864)] +* ui: Fixes issue unsealing cluster for seal types other than shamir [[GH-20897](https://github.com/hashicorp/vault/pull/20897)] -* agent: Fix issue where Agent exits before all templates are rendered when - using and `exit_after_auth` [[GH-7899](https://github.com/hashicorp/vault/pull/7899)] -* auth/aws: Fixes region-related issues when using a custom `sts_endpoint` by adding - a `sts_region` parameter [[GH-7922](https://github.com/hashicorp/vault/pull/7922)] -* auth/token: Fix panic when getting batch tokens on a performance standby from a role - that does not exist [[GH-8027](https://github.com/hashicorp/vault/pull/8027)] -* core: Improve warning message for lease TTLs [[GH-7901](https://github.com/hashicorp/vault/pull/7901)] -* identity: Fix identity token panic during invalidation [[GH-8043](https://github.com/hashicorp/vault/pull/8043)] -* plugin: Fix a panic that could occur if a mount/auth entry was unable to - mount the plugin backend and a request that required the system view to be - retrieved was made [[GH-7991](https://github.com/hashicorp/vault/pull/7991)] -* replication: Add `generate-public-key` endpoint to list of allowed endpoints - for existing DR secondaries -* secrets/gcp: Fix panic if bindings aren't provided in roleset create/update. [[GH-56](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/56)] -* secrets/pki: Prevent generating certificate on performance standby when storing - [[GH-7904](https://github.com/hashicorp/vault/pull/7904)] -* secrets/transit: Prevent restoring keys to new names that are sub paths [[GH-7998](https://github.com/hashicorp/vault/pull/7998)] -* storage/s3: Fix a bug in configurable S3 paths that was preventing use of S3 as - a source during `operator migrate` operations [[GH-7966](https://github.com/hashicorp/vault/pull/7966)] -* ui: Ensure secrets with a period in their key can be viewed and copied [[GH-7926](https://github.com/hashicorp/vault/pull/7926)] -* ui: Fix status menu after demotion [[GH-7997](https://github.com/hashicorp/vault/pull/7997)] -* ui: Fix select dropdowns in Safari when running Mojave [[GH-8023](https://github.com/hashicorp/vault/pull/8023)] - -## 1.3 (November 14th, 2019) +## 1.12.6 +### April 26, 2023 CHANGES: - * Secondary cluster activation: There has been a change to the way that activating - performance and DR secondary clusters works when using public keys for - encryption of the parameters rather than a wrapping token. This flow was - experimental and never documented. It is now officially supported and - documented but is not backwards compatible with older Vault releases. - * Cluster cipher suites: On its cluster port, Vault will no longer advertise - the full TLS 1.2 cipher suite list by default. Although this port is only - used for Vault-to-Vault communication and would always pick a strong cipher, - it could cause false flags on port scanners and other security utilities - that assumed insecure ciphers were being used. The previous behavior can be - achieved by setting the value of the (undocumented) `cluster_cipher_suites` - config flag to `tls12`. - * API/Agent Renewal behavior: The API now allows multiple options for how it - deals with renewals. The legacy behavior in the Agent/API is for the renewer - (now called the lifetime watcher) to exit on a renew error, leading to a - reauthentication. The new default behavior is for the lifetime watcher to - ignore 5XX errors and simply retry as scheduled, using the existing lease - duration. It is also possible, within custom code, to disable renewals - entirely, which allows the lifetime watcher to simply return when it - believes it is time for your code to renew or reauthenticate. - -FEATURES: - - * **Vault Debug**: A new top-level subcommand, `debug`, is added that allows - operators to retrieve debugging information related to a particular Vault - node. Operators can use this simple workflow to capture triaging information, - which can then be consumed programmatically or by support and engineering teams. - It has the abilitity to probe for config, host, metrics, pprof, server status, - and replication status. - * **Recovery Mode**: Vault server can be brought up in recovery mode to resolve - outages caused due to data store being in bad state. This is a privileged mode - that allows `sys/raw` API calls to perform surgical corrections to the data - store. Bad storage state can be caused by bugs. However, this is usually - observed when known (and fixed) bugs are hit by older versions of Vault. - * **Entropy Augmentation (Enterprise)**: Vault now supports sourcing entropy from - external source for critical security parameters. Currently an HSM that - supports PKCS#11 is the only supported source. - * **Active Directory Secret Check-In/Check-Out**: In the Active Directory secrets - engine, users or applications can check out a service account for use, and its - password will be rotated when it's checked back in. - * **Vault Agent Template**: Vault Agent now supports rendering templates containing - Vault secrets to disk, similar to Consul Template [[GH-7652](https://github.com/hashicorp/vault/pull/7652)] - * **Transit Key Type Support**: Signing and verification is now supported with the P-384 - (secp384r1) and P-521 (secp521r1) ECDSA curves [[GH-7551](https://github.com/hashicorp/vault/pull/7551)] and encryption and - decryption is now supported via AES128-GCM96 [[GH-7555](https://github.com/hashicorp/vault/pull/7555)] - * **SSRF Protection for Vault Agent**: Vault Agent has a configuration option to - require a specific header before allowing requests [[GH-7627](https://github.com/hashicorp/vault/pull/7627)] - * **AWS Auth Method Root Rotation**: The credential used by the AWS auth method can - now be rotated, to ensure that only Vault knows the credentials it is using [[GH-7131](https://github.com/hashicorp/vault/pull/7131)] - * **New UI Features**: The UI now supports managing users and groups for the - Userpass, Cert, Okta, and Radius auth methods. - * **Shamir with Stored Master Key**: The on disk format for Shamir seals has changed, - allowing for a secondary cluster using Shamir downstream from a primary cluster - using Auto Unseal. [[GH-7694](https://github.com/hashicorp/vault/pull/7694)] - * **Stackdriver Metrics Sink**: Vault can now send metrics to - [Stackdriver](https://cloud.google.com/stackdriver/). See the [configuration - documentation](https://www.vaultproject.io/docs/config/index.html) for - details. [[GH-6957](https://github.com/hashicorp/vault/pull/6957)] - * **Filtered Paths Replication (Enterprise)**: Based on the predecessor Filtered Mount Replication, - Filtered Paths Replication allows now filtering of namespaces in addition to mounts. - With this feature, Filtered Mount Replication should be considered deprecated. - * **Token Renewal via Accessor**: Tokens can now be renewed via the accessor value through - the new `auth/token/renew-accessor` endpoint if the caller's token has - permission to access that endpoint. - * **Improved Integrated Storage (Beta)**: Improved raft write performance, added support for - non-voter nodes, along with UI support for: using raft storage, joining a raft cluster, - and downloading and restoring a snapshot. +* core: Bump Go version to 1.19.8. IMPROVEMENTS: - * agent: Add ability to set the TLS SNI name used by Agent [[GH-7519](https://github.com/hashicorp/vault/pull/7519)] - * agent & api: Change default renewer behavior to ignore 5XX errors [[GH-7733](https://github.com/hashicorp/vault/pull/7733)] - * auth/jwt: The redirect callback host may now be specified for CLI logins - [[GH-71](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/71)] - * auth/jwt: Bound claims may now contain boolean values [[GH-73](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/73)] - * auth/jwt: CLI logins can now open the browser when running in WSL [[GH-77](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/77)] - * core: Exit ScanView if context has been cancelled [[GH-7419](https://github.com/hashicorp/vault/pull/7419)] - * core: re-encrypt barrier and recovery keys if the unseal key is updated - [[GH-7493](https://github.com/hashicorp/vault/pull/7493)] - * core: Don't advertise the full set of TLS 1.2 cipher suites on the cluster - port, even though only strong ciphers were used [[GH-7487](https://github.com/hashicorp/vault/pull/7487)] - * core (enterprise): Add background seal re-wrap - * core/metrics: Add config parameter to allow unauthenticated sys/metrics - access. [[GH-7550](https://github.com/hashicorp/vault/pull/7550)] - * metrics: Upgrade DataDog library to improve performance [[GH-7794](https://github.com/hashicorp/vault/pull/7794)] - * replication (enterprise): Write-Ahead-Log entries will not duplicate the - data belonging to the encompassing physical entries of the transaction, - thereby improving the performance and storage capacity. - * replication (enterprise): Added more replication metrics - * replication (enterprise): Reindex process now compares subpages for a more - accurate indexing process. - * replication (enterprise): Reindex API now accepts a new `skip_flush` - parameter indicating all the changes should not be flushed while the tree is - locked. - * secrets/aws: The root config can now be read [[GH-7245](https://github.com/hashicorp/vault/pull/7245)] - * secrets/aws: Role paths may now contain the '@' character [[GH-7553](https://github.com/hashicorp/vault/pull/7553)] - * secrets/database/cassandra: Add ability to skip verfication of connection - [[GH-7614](https://github.com/hashicorp/vault/pull/7614)] - * secrets/gcp: Fix panic during rollback if the roleset has been deleted - [[GH-52](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/52)] - * storage/azure: Add config parameter to Azure storage backend to allow - specifying the ARM endpoint [[GH-7567](https://github.com/hashicorp/vault/pull/7567)] - * storage/cassandra: Improve storage efficiency by eliminating unnecessary - copies of value data [[GH-7199](https://github.com/hashicorp/vault/pull/7199)] - * storage/raft: Improve raft write performance by utilizing FSM Batching - [[GH-7527](https://github.com/hashicorp/vault/pull/7527)] - * storage/raft: Add support for non-voter nodes [[GH-7634](https://github.com/hashicorp/vault/pull/7634)] - * sys: Add a new `sys/host-info` endpoint for querying information about - the host [[GH-7330](https://github.com/hashicorp/vault/pull/7330)] - * sys: Add a new set of endpoints under `sys/pprof/` that allows profiling - information to be extracted [[GH-7473](https://github.com/hashicorp/vault/pull/7473)] - * sys: Add endpoint that counts the total number of active identity entities - [[GH-7541](https://github.com/hashicorp/vault/pull/7541)] - * sys: `sys/seal-status` now has a `storage_type` field denoting what type of - storage - the cluster is configured to use - * sys: Add a new `sys/internal/counters/tokens` endpoint, that counts the - total number of active service token accessors in the shared token storage. - [[GH-7541](https://github.com/hashicorp/vault/pull/7541)] - * sys/config: Add a new endpoint under `sys/config/state/sanitized` that - returns the configuration state of the server. It excludes config values - from `storage`, `ha_storage`, and `seal` stanzas and some values - from `telemetry` due to potential sensitive entries in those fields. - * ui: when using raft storage, you can now join a raft cluster, download a - snapshot, and restore a snapshot from the UI [[GH-7410](https://github.com/hashicorp/vault/pull/7410)] - * ui: clarify when secret version is deleted in the secret version history - dropdown [[GH-7714](https://github.com/hashicorp/vault/pull/7714)] - -BUG FIXES: - - * agent: Fix a data race on the token value for inmemsink [[GH-7707](https://github.com/hashicorp/vault/pull/7707)] - * api: Fix Go API using lease revocation via URL instead of body [[GH-7777](https://github.com/hashicorp/vault/pull/7777)] - * api: Allow setting a function to control retry behavior [[GH-7331](https://github.com/hashicorp/vault/pull/7331)] - * auth/gcp: Fix a bug where region information in instance groups names could - cause an authorization attempt to fail [[GH-74](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/74)] - * cli: Fix a bug where a token of an unknown format (e.g. in ~/.vault-token) - could cause confusing error messages during `vault login` [[GH-7508](https://github.com/hashicorp/vault/pull/7508)] - * cli: Fix a bug where the `namespace list` command with JSON formatting - always returned an empty object [[GH-7705](https://github.com/hashicorp/vault/pull/7705)] - * cli: Command timeouts are now always specified solely by the - `VAULT_CLIENT_TIMEOUT` value. [[GH-7469](https://github.com/hashicorp/vault/pull/7469)] - * core: Don't allow registering a non-root zero TTL token lease. This is purely - defense in depth as the lease would be revoked immediately anyways, but - there's no real reason to allow registration. [[GH-7524](https://github.com/hashicorp/vault/pull/7524)] - * core: Correctly revoke the token that's present in the response auth from a - auth/token/ request if there's partial failure during the process. [[GH-7835](https://github.com/hashicorp/vault/pull/7835)] - * identity (enterprise): Fixed identity case sensitive loading in secondary - cluster [[GH-7327](https://github.com/hashicorp/vault/pull/7327)] - * identity: Ensure only replication primary stores the identity case sensitivity state [[GH-7820](https://github.com/hashicorp/vault/pull/7820)] - * raft: Fixed VAULT_CLUSTER_ADDR env being ignored at startup [[GH-7619](https://github.com/hashicorp/vault/pull/7619)] - * secrets/pki: Don't allow duplicate SAN names in issued certs [[GH-7605](https://github.com/hashicorp/vault/pull/7605)] - * sys/health: Pay attention to the values provided for `standbyok` and - `perfstandbyok` rather than simply using their presence as a key to flip on - that behavior [[GH-7323](https://github.com/hashicorp/vault/pull/7323)] - * ui: using the `wrapped_token` query param will work with `redirect_to` and - will automatically log in as intended [[GH-7398](https://github.com/hashicorp/vault/pull/7398)] - * ui: fix an error when initializing from the UI using PGP keys [[GH-7542](https://github.com/hashicorp/vault/pull/7542)] - * ui: show all active kv v2 secret versions even when `delete_version_after` is configured [[GH-7685](https://github.com/hashicorp/vault/pull/7685)] - * ui: Ensure that items in the top navigation link to pages that users have access to [[GH-7590](https://github.com/hashicorp/vault/pull/7590)] - -## 1.2.7 -### August 27th, 2020 - -NOTE: - -All security content from 1.5.2, 1.5.1, 1.4.5, 1.4.4, 1.3.9, 1.3.8, 1.2.6, and 1.2.5 has been made fully open source, and the git tags for 1.5.3, 1.4.6, 1.3.10, and 1.2.7 will build correctly for open source users. - -BUG FIXES: - -* auth/aws: Made header handling for IAM authentication more robust - -## 1.2.6.1 -### August 21st, 2020 -### Enterprise Only - -NOTE: - -Includes correct license in the HSM binary. - -## 1.2.6 -### August 20th, 2020 - -NOTE: - -OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. - -KNOWN ISSUES: - -* AWS IAM logins may return an error depending on the headers sent with the request. - For more details and a workaround, see the [1.2.6 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.2.6) -* In versions 1.2.6, 1.3.9, 1.4.5, and 1.5.2, enterprise licenses on the HSM build were not incorporated correctly - enterprise - customers should use 1.2.6.1, 1.3.9.1, 1.4.5.1, and 1.5.2.1. - -## 1.2.5 -### August 20th, 2020 - -SECURITY: - - * When using the IAM AWS Auth Method, under certain circumstances, values Vault uses to validate identities and roles can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.7.1 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16250) (Discovered by Felix Wilhelm of Google Project Zero) - * When using the GCP GCE Auth Method, under certain circumstances, values Vault uses to validate GCE VMs can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.8.3 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16251) (Discovered by Felix Wilhelm of Google Project Zero) - -KNOWN ISSUES: - -* OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. -* AWS IAM logins may return an error depending on the headers sent with the request. - For more details and a workaround, see the [1.2.5 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.2.5) - -BUG FIXES: -* seal: (enterprise) Fix issue causing stored seal and recovery keys to be mistaken as sealwrapped values - -## 1.2.4 (November 7th, 2019) +* cli/namespace: Add detailed flag to output additional namespace information +such as namespace IDs and custom metadata. [[GH-20243](https://github.com/hashicorp/vault/pull/20243)] +* core/activity: add an endpoint to write test activity log data, guarded by a build flag [[GH-20019](https://github.com/hashicorp/vault/pull/20019)] +* core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the +`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. [[GH-20044](https://github.com/hashicorp/vault/pull/20044)] +* sdk/ldaputil: added `connection_timeout` to tune connection timeout duration +for all LDAP plugins. [[GH-20144](https://github.com/hashicorp/vault/pull/20144)] +* secrets/pki: Decrease size and improve compatibility of OCSP responses by removing issuer certificate. [[GH-20201](https://github.com/hashicorp/vault/pull/20201)] + +BUG FIXES: + +* auth/ldap: Add max_page_size configurable to LDAP configuration [[GH-19032](https://github.com/hashicorp/vault/pull/19032)] +* command/server: Fix incorrect paths in generated config for `-dev-tls` flag on Windows [[GH-20257](https://github.com/hashicorp/vault/pull/20257)] +* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. +* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur +* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` +resulting in 412 errors. +* core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. [[GH-19721](https://github.com/hashicorp/vault/pull/19721)] +* helper/random: Fix race condition in string generator helper [[GH-19875](https://github.com/hashicorp/vault/pull/19875)] +* kmip (enterprise): Fix a problem decrypting with keys that have no Process Start Date attribute. +* openapi: Fix many incorrect details in generated API spec, by using better techniques to parse path regexps [[GH-18554](https://github.com/hashicorp/vault/pull/18554)] +* pki: Fix automatically turning off CRL signing on upgrade to Vault >= 1.12, if CA Key Usage disallows it [[GH-20220](https://github.com/hashicorp/vault/pull/20220)] +* replication (enterprise): Fix a caching issue when replicating filtered data to +a performance secondary. This resulted in the data being set to nil in the cache +and a "invalid value" error being returned from the API. +* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil +* secrets/pki: Fix patching of leaf_not_after_behavior on issuers. [[GH-20341](https://github.com/hashicorp/vault/pull/20341)] +* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens +* ui: Fix OIDC provider logo showing when domain doesn't match [[GH-20263](https://github.com/hashicorp/vault/pull/20263)] +* ui: Fix bad link to namespace when namespace name includes `.` [[GH-19799](https://github.com/hashicorp/vault/pull/19799)] +* ui: fixes browser console formatting for help command output [[GH-20064](https://github.com/hashicorp/vault/pull/20064)] +* ui: remove use of htmlSafe except when first sanitized [[GH-20235](https://github.com/hashicorp/vault/pull/20235)] + +## 1.12.5 +### March 29, 2023 SECURITY: - * In a non-root namespace, revocation of a token scoped to a non-root - namespace did not trigger the expected revocation of dynamic secret leases - associated with that token. As a result, dynamic secret leases in non-root - namespaces may outlive the token that created them. This vulnerability, - CVE-2019-18616, affects Vault Enterprise 0.11.0 and newer. - * Disaster Recovery secondary clusters did not delete already-replicated data - after a mount filter has been created on an upstream Performance secondary - cluster. As a result, encrypted secrets may remain replicated on a Disaster - Recovery secondary cluster after application of a mount filter excluding - those secrets from replication. This vulnerability, CVE-2019-18617, affects - Vault Enterprise 0.8 and newer. - * Update version of Go to 1.12.12 to fix Go bug golang.org/issue/34960 which - corresponds to CVE-2019-17596. - -CHANGES: - - * auth/aws: If a custom `sts_endpoint` is configured, Vault Agent and the CLI - should provide the corresponding region via the `region` parameter (which - already existed as a CLI parameter, and has now been added to Agent). The - automatic region detection added to the CLI and Agent in 1.2 has been removed. +* storage/mssql: When using Vault’s community-supported Microsoft SQL (MSSQL) database storage backend, a privileged attacker with the ability to write arbitrary data to Vault’s configuration may be able to perform arbitrary SQL commands on the underlying database server through Vault. This vulnerability, CVE-2023-0620, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-12](https://discuss.hashicorp.com/t/hcsec-2023-12-vault-s-microsoft-sql-database-storage-backend-vulnerable-to-sql-injection-via-configuration-file/52080)] +* secrets/pki: Vault’s PKI mount issuer endpoints did not correctly authorize access to remove an issuer or modify issuer metadata, potentially resulting in denial of service of the PKI mount. This bug did not affect public or private key material, trust chains or certificate issuance. This vulnerability, CVE-2023-0665, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-11](https://discuss.hashicorp.com/t/hcsec-2023-11-vault-s-pki-issuer-endpoint-did-not-correctly-authorize-access-to-issuer-metadata/52079)] +* core: HashiCorp Vault’s implementation of Shamir’s secret sharing used precomputed table lookups, and was vulnerable to cache-timing attacks. An attacker with access to, and the ability to observe a large number of unseal operations on the host through a side channel may reduce the search space of a brute force effort to recover the Shamir shares. This vulnerability, CVE-2023-25000, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-10](https://discuss.hashicorp.com/t/hcsec-2023-10-vault-vulnerable-to-cache-timing-attacks-during-seal-and-unseal-operations/52078)] IMPROVEMENTS: - * cli: Ignore existing token during CLI login [[GH-7508](https://github.com/hashicorp/vault/pull/7508)] - * core: Log proxy settings from environment on startup [[GH-7528](https://github.com/hashicorp/vault/pull/7528)] - * core: Cache whether we've been initialized to reduce load on storage [[GH-7549](https://github.com/hashicorp/vault/pull/7549)] +* auth/github: Allow for an optional Github auth token environment variable to make authenticated requests when fetching org id +website/docs: Add docs for `VAULT_AUTH_CONFIG_GITHUB_TOKEN` environment variable when writing Github config [[GH-19244](https://github.com/hashicorp/vault/pull/19244)] +* core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch +option in case all else fails for some replication issues we may not have fully reproduced. [[GH-19676](https://github.com/hashicorp/vault/pull/19676)] +* core: validate name identifiers in mssql physical storage backend prior use [[GH-19591](https://github.com/hashicorp/vault/pull/19591)] BUG FIXES: - * agent: Fix handling of gzipped responses [[GH-7470](https://github.com/hashicorp/vault/pull/7470)] - * cli: Fix panic when pgp keys list is empty [[GH-7546](https://github.com/hashicorp/vault/pull/7546)] - * cli: Command timeouts are now always specified solely by the - `VAULT_CLIENT_TIMEOUT` value. [[GH-7469](https://github.com/hashicorp/vault/pull/7469)] - * core: add hook for initializing seals for migration [[GH-7666](https://github.com/hashicorp/vault/pull/7666)] - * core (enterprise): Migrating from one auto unseal method to another never - worked on enterprise, now it does. - * identity: Add required field `response_types_supported` to identity token - `.well-known/openid-configuration` response [[GH-7533](https://github.com/hashicorp/vault/pull/7533)] - * identity: Fixed nil pointer panic when merging entities [[GH-7712](https://github.com/hashicorp/vault/pull/7712)] - * replication (Enterprise): Fix issue causing performance standbys nodes - disconnecting when under high loads. - * secrets/azure: Fix panic that could occur if client retries timeout [[GH-7793](https://github.com/hashicorp/vault/pull/7793)] - * secrets/database: Fix bug in combined DB secrets engine that can result in - writes to static-roles endpoints timing out [[GH-7518](https://github.com/hashicorp/vault/pull/7518)] - * secrets/pki: Improve tidy to continue when value is nil [[GH-7589](https://github.com/hashicorp/vault/pull/7589)] - * ui (Enterprise): Allow kv v2 secrets that are gated by Control Groups to be - viewed in the UI [[GH-7504](https://github.com/hashicorp/vault/pull/7504)] - -## 1.2.3 (September 12, 2019) - -FEATURES: - -* **Oracle Cloud (OCI) Integration**: Vault now support using Oracle Cloud for - storage, auto unseal, and authentication. - -IMPROVEMENTS: - - * auth/jwt: Groups claim matching now treats a string response as a single - element list [[GH-63](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/63)] - * auth/kubernetes: enable better support for projected tokens API by allowing - user to specify issuer [[GH-65](https://github.com/hashicorp/vault/pull/65)] - * auth/pcf: The PCF auth plugin was renamed to the CF auth plugin, maintaining - full backwards compatibility [[GH-7346](https://github.com/hashicorp/vault/pull/7346)] - * replication: Premium packages now come with unlimited performance standby - nodes +* cli: Fix vault read handling to return raw data as secret.Data when there is no top-level data object from api response. [[GH-17913](https://github.com/hashicorp/vault/pull/17913)] +* core (enterprise): Attempt to reconnect to a PKCS#11 HSM if we retrieve a CKR_FUNCTION_FAILED error. +* core: Fixed issue with remounting mounts that have a non-trailing space in the 'to' or 'from' paths. [[GH-19585](https://github.com/hashicorp/vault/pull/19585)] +* kmip (enterprise): Do not require attribute Cryptographic Usage Mask when registering Secret Data managed objects. +* kmip (enterprise): Fix a problem forwarding some requests to the active node. +* openapi: Fix logic for labeling unauthenticated/sudo paths. [[GH-19600](https://github.com/hashicorp/vault/pull/19600)] +* secrets/ldap: Invalidates WAL entry for static role if `password_policy` has changed. [[GH-19641](https://github.com/hashicorp/vault/pull/19641)] +* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions +* ui: fixes issue navigating back a level using the breadcrumb from secret metadata view [[GH-19703](https://github.com/hashicorp/vault/pull/19703)] +* ui: pass encodeBase64 param to HMAC transit-key-actions. [[GH-19429](https://github.com/hashicorp/vault/pull/19429)] +* ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url [[GH-19460](https://github.com/hashicorp/vault/pull/19460)] -BUG FIXES: +## 1.12.4 +### March 01, 2023 - * agent: Allow batch tokens and other non-renewable tokens to be used for - agent operations [[GH-7441](https://github.com/hashicorp/vault/pull/7441)] - * auth/jwt: Fix an error where newer (v1.2) token_* configuration parameters - were not being applied to tokens generated using the OIDC login flow - [[GH-67](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/67)] - * raft: Fix an incorrect JSON tag on `leader_ca_cert` in the join request [[GH-7393](https://github.com/hashicorp/vault/pull/7393)] - * seal/transit: Allow using Vault Agent for transit seal operations [[GH-7441](https://github.com/hashicorp/vault/pull/7441)] - * storage/couchdb: Fix a file descriptor leak [[GH-7345](https://github.com/hashicorp/vault/pull/7345)] - * ui: Fix a bug where the status menu would disappear when trying to revoke a - token [[GH-7337](https://github.com/hashicorp/vault/pull/7337)] - * ui: Fix a regression that prevented input of custom items in search-select - [[GH-7338](https://github.com/hashicorp/vault/pull/7338)] - * ui: Fix an issue with the namespace picker being unable to render nested - namespaces named with numbers and sorting of namespaces in the picker - [[GH-7333](https://github.com/hashicorp/vault/pull/7333)] - -## 1.2.2 (August 15, 2019) +SECURITY: +* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] CHANGES: - * auth/pcf: The signature format has been updated to use the standard Base64 - encoding instead of the URL-safe variant. Signatures created using the - previous format will continue to be accepted [PCF-27] - * core: The http response code returned when an identity token key is not found - has been changed from 400 to 404 +* core: Bump Go version to 1.19.6. IMPROVEMENTS: - * identity: Remove 512 entity limit for groups [[GH-7317](https://github.com/hashicorp/vault/pull/7317)] - -BUG FIXES: - - * auth/approle: Fix an error where an empty `token_type` string was not being - correctly handled as `TokenTypeDefault` [[GH-7273](https://github.com/hashicorp/vault/pull/7273)] - * auth/radius: Fix panic when logging in [[GH-7286](https://github.com/hashicorp/vault/pull/7286)] - * ui: the string-list widget will now honor multiline input [[GH-7254](https://github.com/hashicorp/vault/pull/7254)] - * ui: various visual bugs in the KV interface were addressed [[GH-7307](https://github.com/hashicorp/vault/pull/7307)] - * ui: fixed incorrect URL to access help in LDAP auth [[GH-7299](https://github.com/hashicorp/vault/pull/7299)] - -## 1.2.1 (August 6th, 2019) +* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] +* ui: remove wizard [[GH-19220](https://github.com/hashicorp/vault/pull/19220)] BUG FIXES: - * agent: Fix a panic on creds pulling in some error conditions in `aws` and - `alicloud` auth methods [[GH-7238](https://github.com/hashicorp/vault/pull/7238)] - * auth/approle: Fix error reading role-id on a role created pre-1.2 [[GH-7231](https://github.com/hashicorp/vault/pull/7231)] - * auth/token: Fix sudo check in non-root namespaces on create [[GH-7224](https://github.com/hashicorp/vault/pull/7224)] - * core: Fix health checks with perfstandbyok=true returning the wrong status - code [[GH-7240](https://github.com/hashicorp/vault/pull/7240)] - * ui: The web CLI will now parse input as a shell string, with special - characters escaped [[GH-7206](https://github.com/hashicorp/vault/pull/7206)] - * ui: The UI will now redirect to a page after authentication [[GH-7088](https://github.com/hashicorp/vault/pull/7088)] - * ui (Enterprise): The list of namespaces is now cleared when logging - out [[GH-7186](https://github.com/hashicorp/vault/pull/7186)] +* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] +* core (enterprise): Fix panic when using invalid accessor for control-group request +* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. +* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] +* license (enterprise): Fix bug where license would update even if the license didn't change. +* replication (enterprise): Fix bug where reloading external plugin on a secondary would +break replication. +* secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. [[GH-18207](https://github.com/hashicorp/vault/pull/18207)] +* secrets/pki: Revert fix for PR [18938](https://github.com/hashicorp/vault/pull/18938) [[GH-19037](https://github.com/hashicorp/vault/pull/19037)] +* server/config: Use file.Stat when checking file permissions when VAULT_ENABLE_FILE_PERMISSIONS_CHECK is enabled [[GH-19311](https://github.com/hashicorp/vault/pull/19311)] +* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] +* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] +* ui: fixes reliance on secure context (https) by removing methods using the Crypto interface [[GH-19410](https://github.com/hashicorp/vault/pull/19410)] +* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)] -## 1.2.0 (July 30th, 2019) +## 1.12.3 +### February 6, 2023 CHANGES: - * Token store roles use new, common token fields for the values - that overlap with other auth backends. `period`, `explicit_max_ttl`, and - `bound_cidrs` will continue to work, with priority being given to the - `token_` prefixed versions of those parameters. They will also be returned - when doing a read on the role if they were used to provide values initially; - however, in Vault 1.4 if `period` or `explicit_max_ttl` is zero they will no - longer be returned. (`explicit_max_ttl` was already not returned if empty.) - * Due to underlying changes in Go version 1.12 and Go > 1.11.5, Vault is now - stricter about what characters it will accept in path names. Whereas before - it would filter out unprintable characters (and this could be turned off), - control characters and other invalid characters are now rejected within Go's - HTTP library before the request is passed to Vault, and this cannot be - disabled. To continue using these (e.g. for already-written paths), they - must be properly percent-encoded (e.g. `\r` becomes `%0D`, `\x00` becomes - `%00`, and so on). - * The user-configured regions on the AWSKMS seal stanza will now be preferred - over regions set in the enclosing environment. This is a _breaking_ change. - * All values in audit logs now are omitted if they are empty. This helps - reduce the size of audit log entries by not reproducing keys in each entry - that commonly don't contain any value, which can help in cases where audit - log entries are above the maximum UDP packet size and others. - * Both PeriodicFunc and WALRollback functions will be called if both are - provided. Previously WALRollback would only be called if PeriodicFunc was - not set. See [[GH-6717](https://github.com/hashicorp/vault/pull/6717)] for - details. - * Vault now uses Go's official dependency management system, Go Modules, to - manage dependencies. As a result to both reduce transitive dependencies for - API library users and plugin authors, and to work around various conflicts, - we have moved various helpers around, mostly under an `sdk/` submodule. A - couple of functions have also moved from plugin helper code to the `api/` - submodule. If you are a plugin author, take a look at some of our official - plugins and the paths they are importing for guidance. - * AppRole uses new, common token fields for values that overlap - with other auth backends. `period` and `policies` will continue to work, - with priority being given to the `token_` prefixed versions of those - parameters. They will also be returned when doing a read on the role if they - were used to provide values initially. - * In AppRole, `"default"` is no longer automatically added to the `policies` - parameter. This was a no-op since it would always be added anyways by - Vault's core; however, this can now be explicitly disabled with the new - `token_no_default_policy` field. - * In AppRole, `bound_cidr_list` is no longer returned when reading a role - * rollback: Rollback will no longer display log messages when it runs; it will - only display messages on error. - * Database plugins will now default to 4 `max_open_connections` - rather than 2. - -FEATURES: - - * **Integrated Storage**: Vault 1.2 includes a _tech preview_ of a new way to - manage storage directly within a Vault cluster. This new integrated storage - solution is based on the Raft protocol which is also used to back HashiCorp - Consul and HashiCorp Nomad. - * **Combined DB credential rotation**: Alternative mode for the Combined DB - Secret Engine to automatically rotate existing database account credentials - and set Vault as the source of truth for credentials. - * **Identity Tokens**: Vault's Identity system can now generate OIDC-compliant - ID tokens. These customizable tokens allow encapsulating a signed, verifiable - snapshot of identity information and metadata. They can be use by other - applications—even those without Vault authorization—as a way of establishing - identity based on a Vault entity. - * **Pivotal Cloud Foundry plugin**: New auth method using Pivotal Cloud - Foundry certificates for Vault authentication. - * **ElasticSearch database plugin**: New ElasticSearch database plugin issues - unique, short-lived ElasticSearch credentials. - * **New UI Features**: An HTTP Request Volume Page and new UI for editing LDAP - Users and Groups have been added. - * **HA support for Postgres**: PostgreSQL versions >= 9.5 may now but used as - and HA storage backend. - * **KMIP secrets engine (Enterprise)**: Allows Vault to operate as a KMIP - Server, seamlessly brokering cryptographic operations for traditional - infrastructure. - * Common Token Fields: Auth methods now use common fields for controlling - token behavior, making it easier to understand configuration across methods. - * **Vault API explorer**: The Vault UI now includes an embedded API explorer - where you can browse the endpoints avaliable to you and make requests. To try - it out, open the Web CLI and type `api`. - -IMPROVEMENTS: - - * agent: Allow EC2 nonce to be passed in [[GH-6953](https://github.com/hashicorp/vault/pull/6953)] - * agent: Add optional `namespace` parameter, which sets the default namespace - for the auto-auth functionality [[GH-6988](https://github.com/hashicorp/vault/pull/6988)] - * agent: Add cert auto-auth method [[GH-6652](https://github.com/hashicorp/vault/pull/6652)] - * api: Add support for passing data to delete operations via `DeleteWithData` - [[GH-7139](https://github.com/hashicorp/vault/pull/7139)] - * audit/file: Dramatically speed up file operations by changing - locking/marshaling order [[GH-7024](https://github.com/hashicorp/vault/pull/7024)] - * auth/jwt: A JWKS endpoint may now be configured for signature verification [[GH-43](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/43)] - * auth/jwt: A new `verbose_oidc_logging` role parameter has been added to help - troubleshoot OIDC configuration [[GH-57](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/57)] - * auth/jwt: `bound_claims` will now match received claims that are lists if any element - of the list is one of the expected values [[GH-50](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/50)] - * auth/jwt: Leeways for `nbf` and `exp` are now configurable, as is clock skew - leeway [[GH-53](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/53)] - * auth/kubernetes: Allow service names/namespaces to be configured as globs - [[GH-58](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/58)] - * auth/token: Allow the support of the identity system for the token backend - via token roles [[GH-6267](https://github.com/hashicorp/vault/pull/6267)] - * auth/token: Add a large set of token configuration options to token store - roles [[GH-6662](https://github.com/hashicorp/vault/pull/6662)] - * cli: `path-help` now allows `-format=json` to be specified, which will - output OpenAPI [[GH-7006](https://github.com/hashicorp/vault/pull/7006)] - * cli: Add support for passing parameters to `vault delete` operations - [[GH-7139](https://github.com/hashicorp/vault/pull/7139)] - * cli: Add a log-format CLI flag that can specify either "standard" or "json" - for the log format for the `vault server`command. [[GH-6840](https://github.com/hashicorp/vault/pull/6840)] - * cli: Add `-dev-no-store-token` to allow dev servers to not store the - generated token at the tokenhelper location [[GH-7104](https://github.com/hashicorp/vault/pull/7104)] - * identity: Allow a group alias' canonical ID to be modified - * namespaces: Namespaces can now be created and deleted from performance - replication secondaries - * plugins: Change the default for `max_open_connections` for DB plugins to 4 - [[GH-7093](https://github.com/hashicorp/vault/pull/7093)] - * replication: Client TLS authentication is now supported when enabling or - updating a replication secondary - * secrets/database: Cassandra operations will now cancel on client timeout - [[GH-6954](https://github.com/hashicorp/vault/pull/6954)] - * secrets/kv: Add optional `delete_version_after` parameter, which takes a - duration and can be set on the mount and/or the metadata for a specific key - [[GH-7005](https://github.com/hashicorp/vault/pull/7005)] - * storage/postgres: LIST now performs better on large datasets [[GH-6546](https://github.com/hashicorp/vault/pull/6546)] - * storage/s3: A new `path` parameter allows selecting the path within a bucket - for Vault data [[GH-7157](https://github.com/hashicorp/vault/pull/7157)] - * ui: KV v1 and v2 will now gracefully degrade allowing a write without read - workflow in the UI [[GH-6570](https://github.com/hashicorp/vault/pull/6570)] - * ui: Many visual improvements with the addition of Toolbars [[GH-6626](https://github.com/hashicorp/vault/pull/6626)], the restyling - of the Confirm Action component [[GH-6741](https://github.com/hashicorp/vault/pull/6741)], and using a new set of glyphs for our - Icon component [[GH-6736](https://github.com/hashicorp/vault/pull/6736)] - * ui: Lazy loading parts of the application so that the total initial payload is - smaller [[GH-6718](https://github.com/hashicorp/vault/pull/6718)] - * ui: Tabbing to auto-complete in filters will first complete a common prefix if there - is one [[GH-6759](https://github.com/hashicorp/vault/pull/6759)] - * ui: Removing jQuery from the application makes the initial JS payload smaller [[GH-6768](https://github.com/hashicorp/vault/pull/6768)] - -BUG FIXES: - - * audit: Log requests and responses due to invalid wrapping token provided - [[GH-6541](https://github.com/hashicorp/vault/pull/6541)] - * audit: Fix bug preventing request counter queries from working with auditing - enabled [[GH-6767](https://github.com/hashicorp/vault/pull/6767) - * auth/aws: AWS Roles are now upgraded and saved to the latest version just - after the AWS credential plugin is mounted. [[GH-7025](https://github.com/hashicorp/vault/pull/7025)] - * auth/aws: Fix a case where a panic could stem from a malformed assumed-role ARN - when parsing this value [[GH-6917](https://github.com/hashicorp/vault/pull/6917)] - * auth/aws: Fix an error complaining about a read-only view that could occur - during updating of a role when on a performance replication secondary - [[GH-6926](https://github.com/hashicorp/vault/pull/6926)] - * auth/jwt: Fix a regression introduced in 1.1.1 that disabled checking of client_id - for OIDC logins [[GH-54](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/54)] - * auth/jwt: Fix a panic during OIDC CLI logins that could occur if the Vault server - response is empty [[GH-55](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/55)] - * auth/jwt: Fix issue where OIDC logins might intermittently fail when using - performance standbys [[GH-61](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/61)] - * identity: Fix a case where modifying aliases of an entity could end up - moving the entity into the wrong namespace - * namespaces: Fix a behavior (currently only known to be benign) where we - wouldn't delete policies through the official functions before wiping the - namespaces on deletion - * secrets/database: Escape username/password before using in connection URL - [[GH-7089](https://github.com/hashicorp/vault/pull/7089)] - * secrets/pki: Forward revocation requests to active node when on a - performance standby [[GH-7173](https://github.com/hashicorp/vault/pull/7173)] - * ui: Fix timestamp on some transit keys [[GH-6827](https://github.com/hashicorp/vault/pull/6827)] - * ui: Show Entities and Groups in Side Navigation [[GH-7138](https://github.com/hashicorp/vault/pull/7138)] - * ui: Ensure dropdown updates selected item on HTTP Request Metrics page - -## 1.1.4/1.1.5 (July 25th/30th, 2019) - -NOTE: - -Although 1.1.4 was tagged, we realized very soon after the tag was publicly -pushed that an intended fix was accidentally left out. As a result, 1.1.4 was -not officially announced and 1.1.5 should be used as the release after 1.1.3. - -IMPROVEMENTS: - - * identity: Allow a group alias' canonical ID to be modified - * namespaces: Improve namespace deletion performance [[GH-6939](https://github.com/hashicorp/vault/pull/6939)] - * namespaces: Namespaces can now be created and deleted from performance - replication secondaries - -BUG FIXES: - - * api: Add backwards compat support for API env vars [[GH-7135](https://github.com/hashicorp/vault/pull/7135)] - * auth/aws: Fix a case where a panic could stem from a malformed assumed-role - ARN when parsing this value [[GH-6917](https://github.com/hashicorp/vault/pull/6917)] - * auth/ldap: Add `use_pre111_group_cn_behavior` flag to allow recovering from - a regression caused by a bug fix starting in 1.1.1 [[GH-7208](https://github.com/hashicorp/vault/pull/7208)] - * auth/aws: Use a role cache to avoid separate locking paths [[GH-6926](https://github.com/hashicorp/vault/pull/6926)] - * core: Fix a deadlock if a panic happens during request handling [[GH-6920](https://github.com/hashicorp/vault/pull/6920)] - * core: Fix an issue that may cause key upgrades to not be cleaned up properly - [[GH-6949](https://github.com/hashicorp/vault/pull/6949)] - * core: Don't shutdown if key upgrades fail due to canceled context [[GH-7070](https://github.com/hashicorp/vault/pull/7070)] - * core: Fix panic caused by handling requests while vault is inactive - * identity: Fix reading entity and groups that have spaces in their names - [[GH-7055](https://github.com/hashicorp/vault/pull/7055)] - * identity: Ensure entity alias operations properly verify namespace [[GH-6886](https://github.com/hashicorp/vault/pull/6886)] - * mfa: Fix a nil pointer panic that could occur if invalid Duo credentials - were supplied - * replication: Forward step-down on perf standbys to match HA behavior - * replication: Fix various read only storage errors on performance standbys - * replication: Stop forwarding before stopping replication to eliminate some - possible bad states - * secrets/database: Allow cassandra queries to be cancled [[GH-6954](https://github.com/hashicorp/vault/pull/6954)] - * storage/consul: Fix a regression causing vault to not connect to consul over - unix sockets [[GH-6859](https://github.com/hashicorp/vault/pull/6859)] - * ui: Fix saving of TTL and string array fields generated by Open API [[GH-7094](https://github.com/hashicorp/vault/pull/7094)] - -## 1.1.3 (June 5th, 2019) +* core: Bump Go version to 1.19.4. IMPROVEMENTS: - * agent: Now supports proxying request query parameters [[GH-6772](https://github.com/hashicorp/vault/pull/6772)] - * core: Mount table output now includes a UUID indicating the storage path [[GH-6633](https://github.com/hashicorp/vault/pull/6633)] - * core: HTTP server timeout values are now configurable [[GH-6666](https://github.com/hashicorp/vault/pull/6666)] - * replication: Improve performance of the reindex operation on secondary clusters - when mount filters are in use - * replication: Replication status API now returns the state and progress of a reindex - -BUG FIXES: - - * api: Return the Entity ID in the secret output [[GH-6819](https://github.com/hashicorp/vault/pull/6819)] - * auth/jwt: Consider bound claims when considering if there is at least one - bound constraint [[GH-49](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/49)] - * auth/okta: Fix handling of group names containing slashes [[GH-6665](https://github.com/hashicorp/vault/pull/6665)] - * cli: Add deprecated stored-shares flag back to the init command [[GH-6677](https://github.com/hashicorp/vault/pull/6677)] - * cli: Fix a panic when the KV command would return no data [[GH-6675](https://github.com/hashicorp/vault/pull/6675)] - * cli: Fix issue causing CLI list operations to not return proper format when - there is an empty response [[GH-6776](https://github.com/hashicorp/vault/pull/6776)] - * core: Correctly honor non-HMAC request keys when auditing requests [[GH-6653](https://github.com/hashicorp/vault/pull/6653)] - * core: Fix the `x-vault-unauthenticated` value in OpenAPI for a number of - endpoints [[GH-6654](https://github.com/hashicorp/vault/pull/6654)] - * core: Fix issue where some OpenAPI parameters were incorrectly listed as - being sent as a header [[GH-6679](https://github.com/hashicorp/vault/pull/6679)] - * core: Fix issue that would allow duplicate mount names to be used [[GH-6771](https://github.com/hashicorp/vault/pull/6771)] - * namespaces: Fix behavior when using `root` instead of `root/` as the - namespace header value - * pki: fix a panic when a client submits a null value [[GH-5679](https://github.com/hashicorp/vault/pull/5679)] - * replication: Properly update mount entry cache on a secondary to apply all - new values after a tune - * replication: Properly close connection on bootstrap error - * replication: Fix an issue causing startup problems if a namespace policy - wasn't replicated properly - * replication: Fix longer than necessary WAL replay during an initial reindex - * replication: Fix error during mount filter invalidation on DR secondary clusters - * secrets/ad: Make time buffer configurable [AD-35] - * secrets/gcp: Check for nil config when getting credentials [[GH-35](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/35)] - * secrets/gcp: Fix error checking in some cases where the returned value could - be 403 instead of 404 [[GH-37](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/37)] - * secrets/gcpkms: Disable key rotation when deleting a key [[GH-10](https://github.com/hashicorp/vault-plugin-secrets-gcpkms/pull/10)] - * storage/consul: recognize `https://` address even if schema not specified - [[GH-6602](https://github.com/hashicorp/vault/pull/6602)] - * storage/dynamodb: Fix an issue where a deleted lock key in DynamoDB (HA) - could cause constant switching of the active node [[GH-6637](https://github.com/hashicorp/vault/pull/6637)] - * storage/dynamodb: Eliminate a high-CPU condition that could occur if an - error was received from the DynamoDB API [[GH-6640](https://github.com/hashicorp/vault/pull/6640)] - * storage/gcs: Correctly use configured chunk size values [[GH-6655](https://github.com/hashicorp/vault/pull/6655)] - * storage/mssql: Use the correct database when pre-created schemas exist - [[GH-6356](https://github.com/hashicorp/vault/pull/6356)] - * ui: Fix issue with select arrows on drop down menus [[GH-6627](https://github.com/hashicorp/vault/pull/6627)] - * ui: Fix an issue where sensitive input values weren't being saved to the - server [[GH-6586](https://github.com/hashicorp/vault/pull/6586)] - * ui: Fix web cli parsing when using quoted values [[GH-6755](https://github.com/hashicorp/vault/pull/6755)] - * ui: Fix a namespace workflow mapping identities from external namespaces by - allowing arbitrary input in search-select component [[GH-6728](https://github.com/hashicorp/vault/pull/6728)] - -## 1.1.2 (April 18th, 2019) - -This is a bug fix release containing the two items below. It is otherwise -unchanged from 1.1.1. +* audit: Include stack trace when audit logging recovers from a panic. [[GH-18121](https://github.com/hashicorp/vault/pull/18121)] +* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] +* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. +* core: Add read support to `sys/loggers` and `sys/loggers/:name` endpoints [[GH-17979](https://github.com/hashicorp/vault/pull/17979)] +* plugins: Let Vault unseal and mount deprecated builtin plugins in a +deactivated state if this is not the first unseal after an upgrade. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] +* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] +* secrets/kv: new KVv2 mounts and KVv1 mounts without any keys will upgrade synchronously, allowing for instant use [[GH-17406](https://github.com/hashicorp/vault/pull/17406)] +* storage/raft: add additional raft metrics relating to applied index and heartbeating; also ensure OSS standbys emit periodic metrics. [[GH-12166](https://github.com/hashicorp/vault/pull/12166)] +* ui: Added JWT authentication warning message about blocked pop-up windows and web browser settings. [[GH-18787](https://github.com/hashicorp/vault/pull/18787)] +* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] +* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] BUG FIXES: - * auth/okta: Fix a potential dropped error [[GH-6592](https://github.com/hashicorp/vault/pull/6592)] - * secrets/kv: Fix a regression on upgrade where a KVv2 mount could fail to be - mounted on unseal if it had previously been mounted but not written to - [[GH-31](https://github.com/hashicorp/vault-plugin-secrets-kv/pull/31)] - -## 1.1.1 (April 11th, 2019) - -SECURITY: - - * Given: (a) performance replication is enabled; (b) performance standbys are - in use on the performance replication secondary cluster; and (c) mount - filters are in use, if a mount that was previously available to a secondary - is updated to be filtered out, although the data would be removed from the - secondary cluster, the in-memory cache of the data would not be purged on - the performance standby nodes. As a result, the previously-available data - could still be read from memory if it was ever read from disk, and if this - included mount configuration data this could result in token or lease - issuance. The issue is fixed in this release; in prior releases either an - active node changeover (such as a step-down) or a restart of the standby - nodes is sufficient to cause the performance standby nodes to clear their - cache. A CVE is in the process of being issued; the number is - CVE-2019-11075. - * Roles in the JWT Auth backend using the OIDC login flow (i.e. role_type of - “oidc”) were not enforcing bound_cidrs restrictions, if any were configured - for the role. This issue did not affect roles of type “jwt”. +* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] +* auth/cert: Address a race condition accessing the loaded crls without a lock [[GH-18945](https://github.com/hashicorp/vault/pull/18945)] +* auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#173](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/173)] [[GH-18716](https://github.com/hashicorp/vault/pull/18716)] +* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] +* command/namespace: Fix vault cli namespace patch examples in help text. [[GH-18143](https://github.com/hashicorp/vault/pull/18143)] +* core (enterprise): Fix a race condition resulting in login errors to PKCS#11 modules under high concurrency. +* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace +* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. +* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] +* core/seal: Fix regression handling of the key_id parameter in seal configuration HCL. [[GH-17612](https://github.com/hashicorp/vault/pull/17612)] +* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. +* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] +* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] +* expiration: Prevent panics on perf standbys when an irrevocable lease gets deleted. [[GH-18401](https://github.com/hashicorp/vault/pull/18401)] +* kmip (enterprise): Fix Destroy operation response that omitted Unique Identifier on some batched responses. +* kmip (enterprise): Fix Locate operation response incompatibility with clients using KMIP versions prior to 1.3. +* kmip (enterprise): Fix Query operation response that omitted streaming capability and supported profiles. +* licensing (enterprise): update autoloaded license cache after reload +* plugins: Allow running external plugins which override deprecated builtins. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] +* plugins: Listing all plugins while audit logging is enabled will no longer result in an internal server error. [[GH-18173](https://github.com/hashicorp/vault/pull/18173)] +* plugins: Skip loading but still mount data associated with missing plugins on unseal. [[GH-18189](https://github.com/hashicorp/vault/pull/18189)] +* sdk: Don't panic if system view or storage methods called during plugin setup. [[GH-18210](https://github.com/hashicorp/vault/pull/18210)] +* secrets/pki: Address nil panic when an empty POST request is sent to the OCSP handler [[GH-18184](https://github.com/hashicorp/vault/pull/18184)] +* secrets/pki: Allow patching issuer to set an empty issuer name. [[GH-18466](https://github.com/hashicorp/vault/pull/18466)] +* secrets/pki: OCSP GET request parameter was not being URL unescaped before processing. [[GH-18938](https://github.com/hashicorp/vault/pull/18938)] +* secrets/pki: fix race between tidy's cert counting and tidy status reporting. [[GH-18899](https://github.com/hashicorp/vault/pull/18899)] +* secrets/transit: Do not warn about unrecognized parameter 'batch_input' [[GH-18299](https://github.com/hashicorp/vault/pull/18299)] +* secrets/transit: Honor `partial_success_response_code` on decryption failures. [[GH-18310](https://github.com/hashicorp/vault/pull/18310)] +* storage/raft (enterprise): An already joined node can rejoin by wiping storage +and re-issueing a join request, but in doing so could transiently become a +non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https://github.com/hashicorp/vault/pull/18263)] +* storage/raft: Don't panic on unknown raft ops [[GH-17732](https://github.com/hashicorp/vault/pull/17732)] +* ui: cleanup unsaved auth method ember data record when navigating away from mount backend form [[GH-18651](https://github.com/hashicorp/vault/pull/18651)] +* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] +## 1.12.2 +### November 30, 2022 CHANGES: - * auth/jwt: Disallow logins of role_type "oidc" via the `/login` path [[GH-38](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/38)] - * core/acl: New ordering defines which policy wins when there are multiple - inexact matches and at least one path contains `+`. `+*` is now illegal in - policy paths. The previous behavior simply selected any matching - segment-wildcard path that matched. [[GH-6532](https://github.com/hashicorp/vault/pull/6532)] - * replication: Due to technical limitations, mounting and unmounting was not - previously possible from a performance secondary. These have been resolved, - and these operations may now be run from a performance secondary. +* core: Bump Go version to 1.19.3. +* plugins: Mounts can no longer be pinned to a specific _builtin_ version. Mounts previously pinned to a specific builtin version will now automatically upgrade to the latest builtin version, and may now be overridden if an unversioned plugin of the same name and type is registered. Mounts using plugin versions without `builtin` in their metadata remain unaffected. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] IMPROVEMENTS: - * agent: Allow AppRole auto-auth without a secret-id [[GH-6324](https://github.com/hashicorp/vault/pull/6324)] - * auth/gcp: Cache clients to improve performance and reduce open file usage - * auth/jwt: Bounds claims validiation will now allow matching the received - claims against a list of expected values [[GH-41](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/41)] - * secret/gcp: Cache clients to improve performance and reduce open file usage - * replication: Mounting/unmounting/remounting/mount-tuning is now supported - from a performance secondary cluster - * ui: Suport for authentication via the RADIUS auth method [[GH-6488](https://github.com/hashicorp/vault/pull/6488)] - * ui: Navigating away from secret list view will clear any page-specific - filter that was applied [[GH-6511](https://github.com/hashicorp/vault/pull/6511)] - * ui: Improved the display when OIDC auth errors [[GH-6553](https://github.com/hashicorp/vault/pull/6553)] +* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] +* storage/raft: Add `retry_join_as_non_voter` config option. [[GH-18030](https://github.com/hashicorp/vault/pull/18030)] BUG FIXES: - * agent: Allow auto-auth to be used with caching without having to define any - sinks [[GH-6468](https://github.com/hashicorp/vault/pull/6468)] - * agent: Disallow some nonsensical config file combinations [[GH-6471](https://github.com/hashicorp/vault/pull/6471)] - * auth/ldap: Fix CN check not working if CN was not all in uppercase [[GH-6518](https://github.com/hashicorp/vault/pull/6518)] - * auth/jwt: The CLI helper for OIDC logins will now open the browser to the correct - URL when running on Windows [[GH-37](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/37)] - * auth/jwt: Fix OIDC login issue where configured TLS certs weren't being used [[GH-40](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/40)] - * auth/jwt: Fix an issue where the `oidc_scopes` parameter was not being included in - the response to a role read request [[GH-35](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/35)] - * core: Fix seal migration case when migrating to Shamir and a seal block - wasn't explicitly specified [[GH-6455](https://github.com/hashicorp/vault/pull/6455)] - * core: Fix unwrapping when using namespaced wrapping tokens [[GH-6536](https://github.com/hashicorp/vault/pull/6536)] - * core: Fix incorrect representation of required properties in OpenAPI output - [[GH-6490](https://github.com/hashicorp/vault/pull/6490)] - * core: Fix deadlock that could happen when using the UI [[GH-6560](https://github.com/hashicorp/vault/pull/6560)] - * identity: Fix updating groups removing existing members [[GH-6527](https://github.com/hashicorp/vault/pull/6527)] - * identity: Properly invalidate group alias in performance secondary [[GH-6564](https://github.com/hashicorp/vault/pull/6564)] - * identity: Use namespace context when loading entities and groups to ensure - merging of duplicate entries works properly [[GH-6563](https://github.com/hashicorp/vault/pull/6563)] - * replication: Fix performance standby election failure [[GH-6561](https://github.com/hashicorp/vault/pull/6561)] - * replication: Fix mount filter invalidation on performance standby nodes - * replication: Fix license reloading on performance standby nodes - * replication: Fix handling of control groups on performance standby nodes - * replication: Fix some forwarding scenarios with request bodies using - performance standby nodes [[GH-6538](https://github.com/hashicorp/vault/pull/6538)] - * secret/gcp: Fix roleset binding when using JSON [[GH-27](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/27)] - * secret/pki: Use `uri_sans` param in when not using CSR parameters [[GH-6505](https://github.com/hashicorp/vault/pull/6505)] - * storage/dynamodb: Fix a race condition possible in HA configurations that could - leave the cluster without a leader [[GH-6512](https://github.com/hashicorp/vault/pull/6512)] - * ui: Fix an issue where in production builds OpenAPI model generation was - failing, causing any form using it to render labels with missing fields [[GH-6474](https://github.com/hashicorp/vault/pull/6474)] - * ui: Fix issue nav-hiding when moving between namespaces [[GH-6473](https://github.com/hashicorp/vault/pull/6473)] - * ui: Secrets will always show in the nav regardless of access to cubbyhole [[GH-6477](https://github.com/hashicorp/vault/pull/6477)] - * ui: fix SSH OTP generation [[GH-6540](https://github.com/hashicorp/vault/pull/6540)] - * ui: add polyfill to load UI in IE11 [[GH-6567](https://github.com/hashicorp/vault/pull/6567)] - * ui: Fix issue where some elements would fail to work properly if using ACLs - with segment-wildcard paths (`/+/` segments) [[GH-6525](https://github.com/hashicorp/vault/pull/6525)] - -## 1.1.0 (March 18th, 2019) - -CHANGES: - - * auth/jwt: The `groups_claim_delimiter_pattern` field has been removed. If the - groups claim is not at the top level, it can now be specified as a - [JSONPointer](https://tools.ietf.org/html/rfc6901). - * auth/jwt: Roles now have a "role type" parameter with a default type of - "oidc". To configure new JWT roles, a role type of "jwt" must be explicitly - specified. - * cli: CLI commands deprecated in 0.9.2 are now removed. Please see the CLI - help/warning output in previous versions of Vault for updated commands. - * core: Vault no longer automatically mounts a K/V backend at the "secret/" - path when initializing Vault - * core: Vault's cluster port will now be open at all times on HA standby nodes - * plugins: Vault no longer supports running netRPC plugins. These were - deprecated in favor of gRPC based plugins and any plugin built since 0.9.4 - defaults to gRPC. Older plugins may need to be recompiled against the latest - Vault dependencies. - -FEATURES: +* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] +* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] +* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] +* core (enterprise): Supported storage check in `vault server` command will no longer prevent startup. Instead, a warning will be logged if configured to use storage backend other than `raft` or `consul`. +* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. +* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] +* core: fix a start up race condition where performance standbys could go into a + mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] +* plugins: Only report deprecation status for builtin plugins. [[GH-17816](https://github.com/hashicorp/vault/pull/17816)] +* plugins: Vault upgrades will no longer fail if a mount has been created using an explicit builtin plugin version. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] +* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] +* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18086](https://github.com/hashicorp/vault/pull/18086)] +* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18111](https://github.com/hashicorp/vault/pull/18111)] +* secrets/pki: Fix upgrade of missing expiry, delta_rebuild_interval by setting them to the default. [[GH-17693](https://github.com/hashicorp/vault/pull/17693)] +* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] +* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] - * **Vault Agent Caching**: Vault Agent can now be configured to act as a - caching proxy to Vault. Clients can send requests to Vault Agent and the - request will be proxied to the Vault server and cached locally in Agent. - Currently Agent will cache generated leases and tokens and keep them - renewed. The proxy can also use the Auto Auth feature so clients do not need - to authenticate to Vault, but rather can make requests to Agent and have - Agent fully manage token lifecycle. - * **OIDC Redirect Flow Support**: The JWT auth backend now supports OIDC - roles. These allow authentication via an OIDC-compliant provider via the - user's browser. The login may be initiated from the Vault UI or through - the `vault login` command. - * **ACL Path Wildcard**: ACL paths can now use the `+` character to enable - wild card matching for a single directory in the path definition. - * **Transit Auto Unseal**: Vault can now be configured to use the Transit - Secret Engine in another Vault cluster as an auto unseal provider. +## 1.12.1 +### November 2, 2022 IMPROVEMENTS: - * auth/jwt: A default role can be set. It will be used during JWT/OIDC logins if - a role is not specified. - * auth/jwt: Arbitrary claims data can now be copied into token & alias metadata. - * auth/jwt: An arbitrary set of bound claims can now be configured for a role. - * auth/jwt: The name "oidc" has been added as an alias for the jwt backend. Either - name may be specified in the `auth enable` command. - * command/server: A warning will be printed when 'tls_cipher_suites' includes a - blacklisted cipher suite or all cipher suites are blacklisted by the HTTP/2 - specification [[GH-6300](https://github.com/hashicorp/vault/pull/6300)] - * core/metrics: Prometheus pull support using a new sys/metrics endpoint. [[GH-5308](https://github.com/hashicorp/vault/pull/5308)] - * core: On non-windows platforms a SIGUSR2 will make the server log a dump of - all running goroutines' stack traces for debugging purposes [[GH-6240](https://github.com/hashicorp/vault/pull/6240)] - * replication: The initial replication indexing process on newly initialized or upgraded - clusters now runs asynchronously - * sentinel: Add token namespace id and path, available in rules as - token.namespace.id and token.namespace.path - * ui: The UI is now leveraging OpenAPI definitions to pull in fields for various forms. - This means, it will not be necessary to add fields on the go and JS sides in the future. - [[GH-6209](https://github.com/hashicorp/vault/pull/6209)] +* api: Support VAULT_DISABLE_REDIRECTS environment variable (and --disable-redirects flag) to disable default client behavior and prevent the client following any redirection responses. [[GH-17352](https://github.com/hashicorp/vault/pull/17352)] +* database/snowflake: Allow parallel requests to Snowflake [[GH-17593](https://github.com/hashicorp/vault/pull/17593)] +* plugins: Add plugin version information to key plugin lifecycle log lines. [[GH-17430](https://github.com/hashicorp/vault/pull/17430)] +* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] BUG FIXES: - * auth/jwt: Apply `bound_claims` validation across all login paths - * auth/jwt: Update `bound_audiences` validation during non-OIDC logins to accept - any matched audience, as documented and handled in OIDC logins [[GH-30](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/30)] - * auth/token: Fix issue where empty values for token role update call were - ignored [[GH-6314](https://github.com/hashicorp/vault/pull/6314)] - * core: The `operator migrate` command will no longer hang on empty key names - [[GH-6371](https://github.com/hashicorp/vault/pull/6371)] - * identity: Fix a panic at login when external group has a nil alias [[GH-6230](https://github.com/hashicorp/vault/pull/6230)] - * namespaces: Clear out identity store items upon namespace deletion - * replication/perfstandby: Fixed a bug causing performance standbys to wait - longer than necessary after forwarding a write to the active node - * replication/mountfilter: Fix a deadlock that could occur when mount filters - were updated [[GH-6426](https://github.com/hashicorp/vault/pull/6426)] - * secret/kv: Fix issue where a v1→v2 upgrade could run on a performance - standby when using a local mount - * secret/ssh: Fix for a bug where attempting to delete the last ssh role - in the zeroaddress configuration could fail [[GH-6390](https://github.com/hashicorp/vault/pull/6390)] - * secret/totp: Uppercase provided keys so they don't fail base32 validation - [[GH-6400](https://github.com/hashicorp/vault/pull/6400)] - * secret/transit: Multiple HMAC, Sign or Verify operations can now be - performed with one API call using the new `batch_input` parameter [[GH-5875](https://github.com/hashicorp/vault/pull/5875)] - * sys: `sys/internal/ui/mounts` will no longer return secret or auth mounts - that have been filtered. Similarly, `sys/internal/ui/mount/:path` will - return a error response if a filtered mount path is requested. [[GH-6412](https://github.com/hashicorp/vault/pull/6412)] - * ui: Fix for a bug where you couldn't access the data tab after clicking on - wrap details on the unwrap page [[GH-6404](https://github.com/hashicorp/vault/pull/6404)] - * ui: Fix an issue where the policies tab was erroneously hidden [[GH-6301](https://github.com/hashicorp/vault/pull/6301)] - * ui: Fix encoding issues with kv interfaces [[GH-6294](https://github.com/hashicorp/vault/pull/6294)] - -## 1.0.3.1 (March 14th, 2019) (Enterprise Only) +* cli: Remove empty table heading for `vault secrets list -detailed` output. [[GH-17577](https://github.com/hashicorp/vault/pull/17577)] +* core/managed-keys (enterprise): Return better error messages when encountering key creation failures +* core/managed-keys (enterprise): Switch to using hash length as PSS Salt length within the test/sign api for better PKCS#11 compatibility +* core: Fix panic caused in Vault Agent when rendering certificate templates [[GH-17419](https://github.com/hashicorp/vault/pull/17419)] +* core: Fixes spurious warnings being emitted relating to "unknown or unsupported fields" for JSON config [[GH-17660](https://github.com/hashicorp/vault/pull/17660)] +* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] +* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] +* kmip (enterprise): Fix a problem in the handling of attributes that caused Import operations to fail. +* kmip (enterprise): Fix selection of Cryptographic Parameters for Encrypt/Decrypt operations. +* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] +* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] +* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] + +## 1.12.0 +### October 13, 2022 SECURITY: - * A regression was fixed in replication mount filter code introduced in Vault - 1.0 that caused the underlying filtered data to be replicated to - secondaries. This data was not accessible to users via Vault's API but via a - combination of privileged configuration file changes/Vault commands it could - be read. Upgrading to this version or 1.1 will fix this issue and cause the - replicated data to be deleted from filtered secondaries. More information - was sent to customer contacts on file. - -## 1.0.3 (February 12th, 2019) +* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] CHANGES: - * New AWS authentication plugin mounts will default to using the generated - role ID as the Identity alias name. This applies to both EC2 and IAM auth. - Existing mounts that explicitly set this value will not be affected but - mounts that specified no preference will switch over on upgrade. - * The default policy now allows a token to look up its associated identity - entity either by name or by id [[GH-6105](https://github.com/hashicorp/vault/pull/6105)] - * The Vault UI's navigation and onboarding wizard now only displays items that - are permitted in a users' policy [[GH-5980](https://github.com/hashicorp/vault/pull/5980), [GH-6094](https://github.com/hashicorp/vault/pull/6094)] - * An issue was fixed that caused recovery keys to not work on secondary - clusters when using a different unseal mechanism/key than the primary. This - would be hit if the cluster was rekeyed or initialized after 1.0. We recommend - rekeying the recovery keys on the primary cluster if you meet the above - requirements. +* api: Exclusively use `GET /sys/plugins/catalog` endpoint for listing plugins, and add `details` field to list responses. [[GH-17347](https://github.com/hashicorp/vault/pull/17347)] +* auth: `GET /sys/auth/:name` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] +* auth: `GET /sys/auth` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] +* auth: `POST /sys/auth/:type` endpoint response contains a warning for `Deprecated` auth methods. [[GH-17058](https://github.com/hashicorp/vault/pull/17058)] +* auth: `auth enable` returns an error and `POST /sys/auth/:type` endpoint reports an error for `Pending Removal` auth methods. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] +* core/entities: Fixed stranding of aliases upon entity merge, and require explicit selection of which aliases should be kept when some must be deleted [[GH-16539](https://github.com/hashicorp/vault/pull/16539)] +* core: Bump Go version to 1.19.2. +* core: Validate input parameters for vault operator init command. Vault 1.12 CLI version is needed to run operator init now. [[GH-16379](https://github.com/hashicorp/vault/pull/16379)] +* identity: a request to `/identity/group` that includes `member_group_ids` that contains a cycle will now be responded to with a 400 rather than 500 [[GH-15912](https://github.com/hashicorp/vault/pull/15912)] +* licensing (enterprise): Terminated licenses will no longer result in shutdown. Instead, upgrades will not be allowed if the license expiration time is before the build date of the binary. +* plugins: Add plugin version to auth register, list, and mount table [[GH-16856](https://github.com/hashicorp/vault/pull/16856)] +* plugins: `GET /sys/plugins/catalog/:type/:name` endpoint contains deprecation status for builtin plugins. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] +* plugins: `GET /sys/plugins/catalog/:type/:name` endpoint now returns an additional `version` field in the response data. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] +* plugins: `GET /sys/plugins/catalog/` endpoint contains deprecation status in `detailed` list. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] +* plugins: `GET /sys/plugins/catalog` endpoint now returns an additional `detailed` field in the response data with a list of additional plugin metadata. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] +* plugins: `plugin info` displays deprecation status for builtin plugins. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] +* plugins: `plugin list` now accepts a `-detailed` flag, which display deprecation status and version info. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] +* secrets/azure: Removed deprecated AAD graph API support from the secrets engine. [[GH-17180](https://github.com/hashicorp/vault/pull/17180)] +* secrets: All database-specific (standalone DB) secrets engines are now marked `Pending Removal`. [[GH-17038](https://github.com/hashicorp/vault/pull/17038)] +* secrets: `GET /sys/mounts/:name` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] +* secrets: `GET /sys/mounts` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] +* secrets: `POST /sys/mounts/:type` endpoint response contains a warning for `Deprecated` secrets engines. [[GH-17058](https://github.com/hashicorp/vault/pull/17058)] +* secrets: `secrets enable` returns an error and `POST /sys/mount/:type` endpoint reports an error for `Pending Removal` secrets engines. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] FEATURES: - * **cURL Command Output**: CLI commands can now use the `-output-curl-string` - flag to print out an equivalent cURL command. - * **Response Headers From Plugins**: Plugins can now send back headers that - will be included in the response to a client. The set of allowed headers can - be managed by the operator. +* **GCP Cloud KMS support for managed keys**: Managed keys now support using GCP Cloud KMS keys +* **LDAP Secrets Engine**: Adds the `ldap` secrets engine with service account check-out functionality for all supported schemas. [[GH-17152](https://github.com/hashicorp/vault/pull/17152)] +* **OCSP Responder**: PKI mounts now have an OCSP responder that implements a subset of RFC6960, answering single serial number OCSP requests for a specific cluster's revoked certificates in a mount. [[GH-16723](https://github.com/hashicorp/vault/pull/16723)] +* **Redis DB Engine**: Adding the new Redis database engine that supports the generation of static and dynamic user roles and root credential rotation on a stand alone Redis server. [[GH-17070](https://github.com/hashicorp/vault/pull/17070)] +* **Redis ElastiCache DB Plugin**: Added Redis ElastiCache as a built-in plugin. [[GH-17075](https://github.com/hashicorp/vault/pull/17075)] +* **Secrets/auth plugin multiplexing**: manage multiple plugin configurations with a single plugin process [[GH-14946](https://github.com/hashicorp/vault/pull/14946)] +* **Transform Key Import (BYOK)**: The transform secrets engine now supports importing keys for tokenization and FPE transformations +* HCP (enterprise): Adding foundational support for self-managed vault nodes to securely communicate with [HashiCorp Cloud Platform](https://cloud.hashicorp.com) as an opt-in feature +* ui: UI support for Okta Number Challenge. [[GH-15998](https://github.com/hashicorp/vault/pull/15998)] +* **Plugin Versioning**: Vault supports registering, managing, and running plugins with semantic versions specified. IMPROVEMENTS: - * auth/aws: AWS EC2 authentication can optionally create entity aliases by - role ID [[GH-6133](https://github.com/hashicorp/vault/pull/6133)] - * auth/jwt: The supported set of signing algorithms is now configurable [JWT - plugin [GH-16](https://github.com/hashicorp/vault/pull/16)] - * core: When starting from an uninitialized state, HA nodes will now attempt - to auto-unseal using a configured auto-unseal mechanism after the active - node initializes Vault [[GH-6039](https://github.com/hashicorp/vault/pull/6039)] - * secret/database: Add socket keepalive option for Cassandra [[GH-6201](https://github.com/hashicorp/vault/pull/6201)] - * secret/ssh: Add signed key constraints, allowing enforcement of key types - and minimum key sizes [[GH-6030](https://github.com/hashicorp/vault/pull/6030)] - * secret/transit: ECDSA signatures can now be marshaled in JWS-compatible - fashion [[GH-6077](https://github.com/hashicorp/vault/pull/6077)] - * storage/etcd: Support SRV service names [[GH-6087](https://github.com/hashicorp/vault/pull/6087)] - * storage/aws: Support specifying a KMS key ID for server-side encryption - [[GH-5996](https://github.com/hashicorp/vault/pull/5996)] +* core/managed-keys (enterprise): Allow operators to specify PSS signatures and/or hash algorithm for the test/sign api +* activity (enterprise): Added new clients unit tests to test accuracy of estimates +* agent/auto-auth: Add `exit_on_err` which when set to true, will cause Agent to exit if any errors are encountered during authentication. [[GH-17091](https://github.com/hashicorp/vault/pull/17091)] +* agent: Added `disable_idle_connections` configuration to disable leaving idle connections open in auto-auth, caching and templating. [[GH-15986](https://github.com/hashicorp/vault/pull/15986)] +* agent: Added `disable_keep_alives` configuration to disable keep alives in auto-auth, caching and templating. [[GH-16479](https://github.com/hashicorp/vault/pull/16479)] +* agent: JWT auto auth now supports a `remove_jwt_after_reading` config option which defaults to true. [[GH-11969](https://github.com/hashicorp/vault/pull/11969)] +* agent: Send notifications to systemd on start and stop. [[GH-9802](https://github.com/hashicorp/vault/pull/9802)] +* api/mfa: Add namespace path to the MFA read/list endpoint [[GH-16911](https://github.com/hashicorp/vault/pull/16911)] +* api: Add a sentinel error for missing KV secrets [[GH-16699](https://github.com/hashicorp/vault/pull/16699)] +* auth/alicloud: Enables AliCloud roles to be compatible with Vault's role based quotas. [[GH-17251](https://github.com/hashicorp/vault/pull/17251)] +* auth/approle: SecretIDs can now be generated with an per-request specified TTL and num_uses. +When either the ttl and num_uses fields are not specified, the role's configuration is used. [[GH-14474](https://github.com/hashicorp/vault/pull/14474)] +* auth/aws: PKCS7 signatures will now use SHA256 by default in prep for Go 1.18 [[GH-16455](https://github.com/hashicorp/vault/pull/16455)] +* auth/azure: Enables Azure roles to be compatible with Vault's role based quotas. [[GH-17194](https://github.com/hashicorp/vault/pull/17194)] +* auth/cert: Add metadata to identity-alias [[GH-14751](https://github.com/hashicorp/vault/pull/14751)] +* auth/cert: Operators can now specify a CRL distribution point URL, in which case the cert auth engine will fetch and use the CRL from that location rather than needing to push CRLs directly to auth/cert. [[GH-17136](https://github.com/hashicorp/vault/pull/17136)] +* auth/cf: Enables CF roles to be compatible with Vault's role based quotas. [[GH-17196](https://github.com/hashicorp/vault/pull/17196)] +* auth/gcp: Add support for GCE regional instance groups [[GH-16435](https://github.com/hashicorp/vault/pull/16435)] +* auth/gcp: Updates dependencies: `google.golang.org/api@v0.83.0`, `github.com/hashicorp/go-gcp-common@v0.8.0`. [[GH-17160](https://github.com/hashicorp/vault/pull/17160)] +* auth/jwt: Adds support for Microsoft US Gov L4 to the Azure provider for groups fetching. [[GH-16525](https://github.com/hashicorp/vault/pull/16525)] +* auth/jwt: Improves detection of Windows Subsystem for Linux (WSL) for CLI-based logins. [[GH-16525](https://github.com/hashicorp/vault/pull/16525)] +* auth/kerberos: add `add_group_aliases` config to include LDAP groups in Vault group aliases [[GH-16890](https://github.com/hashicorp/vault/pull/16890)] +* auth/kerberos: add `remove_instance_name` parameter to the login CLI and the Kerberos config in Vault. This removes any instance names found in the keytab service principal name. [[GH-16594](https://github.com/hashicorp/vault/pull/16594)] +* auth/kubernetes: Role resolution for K8S Auth [[GH-156](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/156)] [[GH-17161](https://github.com/hashicorp/vault/pull/17161)] +* auth/oci: Add support for role resolution. [[GH-17212](https://github.com/hashicorp/vault/pull/17212)] +* auth/oidc: Adds support for group membership parsing when using SecureAuth as an OIDC provider. [[GH-16274](https://github.com/hashicorp/vault/pull/16274)] +* cli: CLI commands will print a warning if flags will be ignored because they are passed after positional arguments. [[GH-16441](https://github.com/hashicorp/vault/pull/16441)] +* cli: `auth` and `secrets` list `-detailed` commands now show Deprecation Status for builtin plugins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] +* cli: `vault plugin list` now has a `details` field in JSON format, and version and type information in table format. [[GH-17347](https://github.com/hashicorp/vault/pull/17347)] +* command/audit: Improve missing type error message [[GH-16409](https://github.com/hashicorp/vault/pull/16409)] +* command/server: add `-dev-tls` and `-dev-tls-cert-dir` subcommands to create a Vault dev server with generated certificates and private key. [[GH-16421](https://github.com/hashicorp/vault/pull/16421)] +* command: Fix shell completion for KV v2 mounts [[GH-16553](https://github.com/hashicorp/vault/pull/16553)] +* core (enterprise): Add HTTP PATCH support for namespaces with an associated `namespace patch` CLI command +* core (enterprise): Add check to `vault server` command to ensure configured storage backend is supported. +* core (enterprise): Add custom metadata support for namespaces +* core/activity: generate hyperloglogs containing clientIds for each month during precomputation [[GH-16146](https://github.com/hashicorp/vault/pull/16146)] +* core/activity: refactor activity log api to reuse partial api functions in activity endpoint when current month is specified [[GH-16162](https://github.com/hashicorp/vault/pull/16162)] +* core/activity: use monthly hyperloglogs to calculate new clients approximation for current month [[GH-16184](https://github.com/hashicorp/vault/pull/16184)] +* core/quotas (enterprise): Added ability to add path suffixes for lease-count resource quotas +* core/quotas (enterprise): Added ability to add role information for lease-count resource quotas, to limit login requests on auth mounts made using that role +* core/quotas: Added ability to add path suffixes for rate-limit resource quotas [[GH-15989](https://github.com/hashicorp/vault/pull/15989)] +* core/quotas: Added ability to add role information for rate-limit resource quotas, to limit login requests on auth mounts made using that role [[GH-16115](https://github.com/hashicorp/vault/pull/16115)] +* core: Activity log goroutine management improvements to allow tests to be more deterministic. [[GH-17028](https://github.com/hashicorp/vault/pull/17028)] +* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] +* core: Handle and log deprecated builtin mounts. Introduces `VAULT_ALLOW_PENDING_REMOVAL_MOUNTS` to override shutdown and error when attempting to mount `Pending Removal` builtin plugins. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] +* core: Limit activity log client count usage by namespaces [[GH-16000](https://github.com/hashicorp/vault/pull/16000)] +* core: Upgrade github.com/hashicorp/raft [[GH-16609](https://github.com/hashicorp/vault/pull/16609)] +* core: remove gox [[GH-16353](https://github.com/hashicorp/vault/pull/16353)] +* docs: Clarify the behaviour of local mounts in the context of DR replication [[GH-16218](https://github.com/hashicorp/vault/pull/16218)] +* identity/oidc: Adds support for detailed listing of clients and providers. [[GH-16567](https://github.com/hashicorp/vault/pull/16567)] +* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] +* identity/oidc: allows filtering the list providers response by an allowed_client_id [[GH-16181](https://github.com/hashicorp/vault/pull/16181)] +* identity: Prevent possibility of data races on entity creation. [[GH-16487](https://github.com/hashicorp/vault/pull/16487)] +* physical/postgresql: pass context to queries to propagate timeouts and cancellations on requests. [[GH-15866](https://github.com/hashicorp/vault/pull/15866)] +* plugins/multiplexing: Added multiplexing support to database plugins if run as external plugins [[GH-16995](https://github.com/hashicorp/vault/pull/16995)] +* plugins: Add Deprecation Status method to builtinregistry. [[GH-16846](https://github.com/hashicorp/vault/pull/16846)] +* plugins: Added environment variable flag to opt-out specific plugins from multiplexing [[GH-16972](https://github.com/hashicorp/vault/pull/16972)] +* plugins: Adding version to plugin GRPC interface [[GH-17088](https://github.com/hashicorp/vault/pull/17088)] +* plugins: Plugin catalog supports registering and managing plugins with semantic version information. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] +* replication (enterprise): Fix race in merkle sync that can prevent streaming by returning key value matching provided hash if found in log shipper buffer. +* secret/nomad: allow reading CA and client auth certificate from /nomad/config/access [[GH-15809](https://github.com/hashicorp/vault/pull/15809)] +* secret/pki: Add RSA PSS signature support for issuing certificates, signing CRLs [[GH-16519](https://github.com/hashicorp/vault/pull/16519)] +* secret/pki: Add signature_bits to sign-intermediate, sign-verbatim endpoints [[GH-16124](https://github.com/hashicorp/vault/pull/16124)] +* secret/pki: Allow issuing certificates with non-domain, non-email Common Names from roles, sign-verbatim, and as issuers (`cn_validations`). [[GH-15996](https://github.com/hashicorp/vault/pull/15996)] +* secret/pki: Allow specifying SKID for cross-signed issuance from older Vault versions. [[GH-16494](https://github.com/hashicorp/vault/pull/16494)] +* secret/transit: Allow importing Ed25519 keys from PKCS#8 with inner RFC 5915 ECPrivateKey blobs (NSS-wrapped keys). [[GH-15742](https://github.com/hashicorp/vault/pull/15742)] +* secrets/ad: set config default length only if password_policy is missing [[GH-16140](https://github.com/hashicorp/vault/pull/16140)] +* secrets/azure: Adds option to permanently delete AzureAD objects created by Vault. [[GH-17045](https://github.com/hashicorp/vault/pull/17045)] +* secrets/database/hana: Add ability to customize dynamic usernames [[GH-16631](https://github.com/hashicorp/vault/pull/16631)] +* secrets/database/snowflake: Add multiplexing support [[GH-17159](https://github.com/hashicorp/vault/pull/17159)] +* secrets/gcp: Updates dependencies: `google.golang.org/api@v0.83.0`, `github.com/hashicorp/go-gcp-common@v0.8.0`. [[GH-17174](https://github.com/hashicorp/vault/pull/17174)] +* secrets/gcpkms: Update dependencies: google.golang.org/api@v0.83.0. [[GH-17199](https://github.com/hashicorp/vault/pull/17199)] +* secrets/kubernetes: upgrade to v0.2.0 [[GH-17164](https://github.com/hashicorp/vault/pull/17164)] +* secrets/pki/tidy: Add another pair of metrics counting certificates not deleted by the tidy operation. [[GH-16702](https://github.com/hashicorp/vault/pull/16702)] +* secrets/pki: Add a new flag to issue/sign APIs which can filter out root CAs from the returned ca_chain field [[GH-16935](https://github.com/hashicorp/vault/pull/16935)] +* secrets/pki: Add a warning to any successful response when the requested TTL is overwritten by MaxTTL [[GH-17073](https://github.com/hashicorp/vault/pull/17073)] +* secrets/pki: Add ability to cancel tidy operations, control tidy resource usage. [[GH-16958](https://github.com/hashicorp/vault/pull/16958)] +* secrets/pki: Add ability to periodically rebuild CRL before expiry [[GH-16762](https://github.com/hashicorp/vault/pull/16762)] +* secrets/pki: Add ability to periodically run tidy operations to remove expired certificates. [[GH-16900](https://github.com/hashicorp/vault/pull/16900)] +* secrets/pki: Add support for per-issuer Authority Information Access (AIA) URLs [[GH-16563](https://github.com/hashicorp/vault/pull/16563)] +* secrets/pki: Add support to specify signature bits when generating CSRs through intermediate/generate apis [[GH-17388](https://github.com/hashicorp/vault/pull/17388)] +* secrets/pki: Added gauge metrics "secrets.pki.total_revoked_certificates_stored" and "secrets.pki.total_certificates_stored" to track the number of certificates in storage. [[GH-16676](https://github.com/hashicorp/vault/pull/16676)] +* secrets/pki: Allow revocation of certificates with explicitly provided certificate (bring your own certificate / BYOC). [[GH-16564](https://github.com/hashicorp/vault/pull/16564)] +* secrets/pki: Allow revocation via proving possession of certificate's private key [[GH-16566](https://github.com/hashicorp/vault/pull/16566)] +* secrets/pki: Allow tidy to associate revoked certs with their issuers for OCSP performance [[GH-16871](https://github.com/hashicorp/vault/pull/16871)] +* secrets/pki: Honor If-Modified-Since header on CA, CRL fetch; requires passthrough_request_headers modification on the mount point. [[GH-16249](https://github.com/hashicorp/vault/pull/16249)] +* secrets/pki: Improve stability of association of revoked cert with its parent issuer; when an issuer loses crl-signing usage, do not place certs on default issuer's CRL. [[GH-16874](https://github.com/hashicorp/vault/pull/16874)] +* secrets/pki: Support generating delta CRLs for up-to-date CRLs when auto-building is enabled. [[GH-16773](https://github.com/hashicorp/vault/pull/16773)] +* secrets/ssh: Add allowed_domains_template to allow templating of allowed_domains. [[GH-16056](https://github.com/hashicorp/vault/pull/16056)] +* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] +* secrets/ssh: Allow the use of Identity templates in the `default_user` field [[GH-16351](https://github.com/hashicorp/vault/pull/16351)] +* secrets/transit: Add a dedicated HMAC key type, which can be used with key import. [[GH-16668](https://github.com/hashicorp/vault/pull/16668)] +* secrets/transit: Added a parameter to encrypt/decrypt batch operations to allow the caller to override the HTTP response code in case of partial user-input failures. [[GH-17118](https://github.com/hashicorp/vault/pull/17118)] +* secrets/transit: Allow configuring the possible salt lengths for RSA PSS signatures. [[GH-16549](https://github.com/hashicorp/vault/pull/16549)] +* ssh: Addition of an endpoint `ssh/issue/:role` to allow the creation of signed key pairs [[GH-15561](https://github.com/hashicorp/vault/pull/15561)] +* storage/cassandra: tuning parameters for clustered environments `connection_timeout`, `initial_connection_timeout`, `simple_retry_policy_retries`. [[GH-10467](https://github.com/hashicorp/vault/pull/10467)] +* storage/gcs: Add documentation explaining how to configure the gcs backend using environment variables instead of options in the configuration stanza [[GH-14455](https://github.com/hashicorp/vault/pull/14455)] +* ui: Changed the tokenBoundCidrs tooltip content to clarify that comma separated values are not accepted in this field. [[GH-15852](https://github.com/hashicorp/vault/pull/15852)] +* ui: Prevents requests to /sys/internal/ui/resultant-acl endpoint when unauthenticated [[GH-17139](https://github.com/hashicorp/vault/pull/17139)] +* ui: Removed deprecated version of core-js 2.6.11 [[GH-15898](https://github.com/hashicorp/vault/pull/15898)] +* ui: Renamed labels under Tools for wrap, lookup, rewrap and unwrap with description. [[GH-16489](https://github.com/hashicorp/vault/pull/16489)] +* ui: Replaces non-inclusive terms [[GH-17116](https://github.com/hashicorp/vault/pull/17116)] +* ui: redirect_to param forwards from auth route when authenticated [[GH-16821](https://github.com/hashicorp/vault/pull/16821)] +* website/docs: API generate-recovery-token documentation. [[GH-16213](https://github.com/hashicorp/vault/pull/16213)] +* website/docs: Add documentation around the expensiveness of making lots of lease count quotas in a short period [[GH-16950](https://github.com/hashicorp/vault/pull/16950)] +* website/docs: Removes mentions of unauthenticated from internal ui resultant-acl doc [[GH-17139](https://github.com/hashicorp/vault/pull/17139)] +* website/docs: Update replication docs to mention Integrated Storage [[GH-16063](https://github.com/hashicorp/vault/pull/16063)] +* website/docs: changed to echo for all string examples instead of (<<<) here-string. [[GH-9081](https://github.com/hashicorp/vault/pull/9081)] BUG FIXES: - * core: Fix a rare case where a standby whose connection is entirely torn down - to the active node, then reconnects to the same active node, may not - successfully resume operation [[GH-6167](https://github.com/hashicorp/vault/pull/6167)] - * cors: Don't duplicate headers when they're written [[GH-6207](https://github.com/hashicorp/vault/pull/6207)] - * identity: Persist merged entities only on the primary [[GH-6075](https://github.com/hashicorp/vault/pull/6075)] - * replication: Fix a potential race when a token is created and then used with - a performance standby very quickly, before an associated entity has been - replicated. If the entity is not found in this scenario, the request will - forward to the active node. - * replication: Fix issue where recovery keys would not work on secondary - clusters if using a different unseal mechanism than the primary. - * replication: Fix a "failed to register lease" error when using performance - standbys - * storage/postgresql: The `Get` method will now return an Entry object with - the `Key` member correctly populated with the full path that was requested - instead of just the last path element [[GH-6044](https://github.com/hashicorp/vault/pull/6044)] - -## 1.0.2 (January 15th, 2019) - -SECURITY: +* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] +* agent: Agent will now respect `max_retries` retry configuration even when caching is set. [[GH-16970](https://github.com/hashicorp/vault/pull/16970)] +* agent: Update consul-template for pkiCert bug fixes [[GH-16087](https://github.com/hashicorp/vault/pull/16087)] +* api/sys/internal/specs/openapi: support a new "dynamic" query parameter to generate generic mountpaths [[GH-15835](https://github.com/hashicorp/vault/pull/15835)] +* api: Fixed erroneous warnings of unrecognized parameters when unwrapping data. [[GH-16794](https://github.com/hashicorp/vault/pull/16794)] +* api: Fixed issue with internal/ui/mounts and internal/ui/mounts/(?P.+) endpoints where it was not properly handling /auth/ [[GH-15552](https://github.com/hashicorp/vault/pull/15552)] +* api: properly handle switching to/from unix domain socket when changing client address [[GH-11904](https://github.com/hashicorp/vault/pull/11904)] +* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] +* auth/kerberos: Maintain headers set by the client [[GH-16636](https://github.com/hashicorp/vault/pull/16636)] +* auth/kubernetes: Restore support for JWT signature algorithm ES384 [[GH-160](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/160)] [[GH-17161](https://github.com/hashicorp/vault/pull/17161)] +* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] +* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] +* core (enterprise): Fix bug where wrapping token lookup does not work within namespaces. [[GH-15583](https://github.com/hashicorp/vault/pull/15583)] +* core (enterprise): Fix creation of duplicate entities via alias metadata changes on local auth mounts. +* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] +* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] +* core/license (enterprise): Always remove stored license and allow unseal to complete when license cleanup fails +* core/managed-keys (enterprise): fix panic when having `cache_disable` true +* core/quotas (enterprise): Fixed issue with improper counting of leases if lease count quota created after leases +* core/quotas: Added globbing functionality on the end of path suffix quota paths [[GH-16386](https://github.com/hashicorp/vault/pull/16386)] +* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] +* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty +* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] +* core: Fix panic when the plugin catalog returns neither a plugin nor an error. [[GH-17204](https://github.com/hashicorp/vault/pull/17204)] +* core: Fixes parsing boolean values for ha_storage backends in config [[GH-15900](https://github.com/hashicorp/vault/pull/15900)] +* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] +* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] +* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] +* debug: Fix panic when capturing debug bundle on Windows [[GH-14399](https://github.com/hashicorp/vault/pull/14399)] +* debug: Remove extra empty lines from vault.log when debug command is run [[GH-16714](https://github.com/hashicorp/vault/pull/16714)] +* identity (enterprise): Fix a data race when creating an entity for a local alias. +* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] +* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] +* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] +* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] +* openapi: Fixed issue where information about /auth/token endpoints was not present with explicit policy permissions [[GH-15552](https://github.com/hashicorp/vault/pull/15552)] +* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] +* plugin/secrets/auth: Fix a bug with aliased backends such as aws-ec2 or generic [[GH-16673](https://github.com/hashicorp/vault/pull/16673)] +* plugins: Corrected the path to check permissions on when the registered plugin name does not match the plugin binary's filename. [[GH-17340](https://github.com/hashicorp/vault/pull/17340)] +* quotas/lease-count: Fix lease-count quotas on mounts not properly being enforced when the lease generating request is a read [[GH-15735](https://github.com/hashicorp/vault/pull/15735)] +* replication (enterprise): Fix data race in SaveCheckpoint() +* replication (enterprise): Fix data race in saveCheckpoint. +* replication (enterprise): Fix possible data race during merkle diff/sync +* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] +* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] +* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] +* secrets/kv: Fix `kv get` issue preventing the ability to read a secret when providing a leading slash [[GH-16443](https://github.com/hashicorp/vault/pull/16443)] +* secrets/pki: Allow import of issuers without CRLSign KeyUsage; prohibit setting crl-signing usage on such issuers [[GH-16865](https://github.com/hashicorp/vault/pull/16865)] +* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] +* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17385](https://github.com/hashicorp/vault/pull/17385)] +* secrets/pki: Fix migration to properly handle mounts that contain only keys, no certificates [[GH-16813](https://github.com/hashicorp/vault/pull/16813)] +* secrets/pki: Ignore EC PARAMETER PEM blocks during issuer import (/config/ca, /issuers/import/*, and /intermediate/set-signed) [[GH-16721](https://github.com/hashicorp/vault/pull/16721)] +* secrets/pki: LIST issuers endpoint is now unauthenticated. [[GH-16830](https://github.com/hashicorp/vault/pull/16830)] +* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. +* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. +* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. +* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] +* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] +* storage/raft: Nodes no longer get demoted to nonvoter if we don't know their version due to missing heartbeats. [[GH-17019](https://github.com/hashicorp/vault/pull/17019)] +* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] +* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] +* ui: Fix info tooltip submitting form [[GH-16659](https://github.com/hashicorp/vault/pull/16659)] +* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] +* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] +* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] +* ui: Fixed bug where red spellcheck underline appears in sensitive/secret kv values when it should not appear [[GH-15681](https://github.com/hashicorp/vault/pull/15681)] +* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] +* ui: OIDC login type uses localStorage instead of sessionStorage [[GH-16170](https://github.com/hashicorp/vault/pull/16170)] +* vault: Fix a bug where duplicate policies could be added to an identity group. [[GH-15638](https://github.com/hashicorp/vault/pull/15638)] - * When creating a child token from a parent with `bound_cidrs`, the list of - CIDRs would not be propagated to the child token, allowing the child token - to be used from any address. +## 1.11.12 +### June 21, 2023 CHANGES: - * secret/aws: Role now returns `credential_type` instead of `credential_types` - to match role input. If a legacy role that can supply more than one - credential type, they will be concatenated with a `,`. - * physical/dynamodb, autoseal/aws: Instead of Vault performing environment - variable handling, and overriding static (config file) values if found, we - use the default AWS SDK env handling behavior, which also looks for - deprecated values. If you were previously providing both config values and - environment values, please ensure the config values are unset if you want to - use environment values. - * Namespaces (Enterprise): Providing "root" as the header value for - `X-Vault-Namespace` will perform the request on the root namespace. This is - equivalent to providing an empty value. Creating a namespace called "root" in - the root namespace is disallowed. +* core: Bump Go version to 1.19.10. +* licensing (enterprise): Terminated licenses will no longer result in shutdown. Instead, upgrades +will not be allowed if the license termination time is before the build date of the binary. FEATURES: - * **InfluxDB Database Plugin**: Use Vault to dynamically create and manage InfluxDB - users +* **Automated License Utilization Reporting**: Added automated license +utilization reporting, which sends minimal product-license [metering +data](https://developer.hashicorp.com/vault/docs/enterprise/license/utilization-reporting) +to HashiCorp without requiring you to manually collect and report them. +* core (enterprise): Add background worker for automatic reporting of billing +information. [[GH-19625](https://github.com/hashicorp/vault/pull/19625)] IMPROVEMENTS: - * auth/aws: AWS EC2 authentication can optionally create entity aliases by - image ID [[GH-5846](https://github.com/hashicorp/vault/pull/5846)] - * autoseal/gcpckms: Reduce the required permissions for the GCPCKMS autounseal - [[GH-5999](https://github.com/hashicorp/vault/pull/5999)] - * physical/foundationdb: TLS support added. [[GH-5800](https://github.com/hashicorp/vault/pull/5800)] +* api: GET ... /sys/internal/counters/activity?current_billing_period=true now +results in a response which contains the full billing period [[GH-20694](https://github.com/hashicorp/vault/pull/20694)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`minimum_retention_months`. [[GH-20150](https://github.com/hashicorp/vault/pull/20150)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`reporting_enabled` and `billing_start_timestamp` fields. [[GH-20086](https://github.com/hashicorp/vault/pull/20086)] +* core (enterprise): add configuration for license reporting [[GH-19891](https://github.com/hashicorp/vault/pull/19891)] +* core (enterprise): license updates trigger a reload of reporting and the activity log [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): support reloading configuration for automated reporting via SIGHUP [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): vault server command now allows for opt-out of automated +reporting via the `OPTOUT_LICENSE_REPORTING` environment variable. [[GH-3939](https://github.com/hashicorp/vault/pull/3939)] +* core/activity: error when attempting to update retention configuration below the minimum [[GH-20078](https://github.com/hashicorp/vault/pull/20078)] +* core/activity: generate hyperloglogs containing clientIds for each month during precomputation [[GH-16146](https://github.com/hashicorp/vault/pull/16146)] +* core/activity: refactor activity log api to reuse partial api functions in activity endpoint when current month is specified [[GH-16162](https://github.com/hashicorp/vault/pull/16162)] +* core/activity: refactor the activity log's generation of precomputed queries [[GH-20073](https://github.com/hashicorp/vault/pull/20073)] +* core/activity: use monthly hyperloglogs to calculate new clients approximation for current month [[GH-16184](https://github.com/hashicorp/vault/pull/16184)] +* core: Activity log goroutine management improvements to allow tests to be more deterministic. [[GH-17028](https://github.com/hashicorp/vault/pull/17028)] +* core: Limit activity log client count usage by namespaces [[GH-16000](https://github.com/hashicorp/vault/pull/16000)] +* storage/raft: add additional raft metrics relating to applied index and heartbeating; also ensure OSS standbys emit periodic metrics. [[GH-12166](https://github.com/hashicorp/vault/pull/12166)] +* ui: updates clients configuration edit form state based on census reporting configuration [[GH-20125](https://github.com/hashicorp/vault/pull/20125)] BUG FIXES: - * api: Fix a couple of places where we were using the `LIST` HTTP verb - (necessary to get the right method into the wrapping lookup function) and - not then modifying it to a `GET`; although this is officially the verb Vault - uses for listing and it's fully legal to use custom verbs, since many WAFs - and API gateways choke on anything outside of RFC-standardized verbs we fall - back to `GET` [[GH-6026](https://github.com/hashicorp/vault/pull/6026)] - * autoseal/aws: Fix reading session tokens when AWS access key/secret key are - also provided [[GH-5965](https://github.com/hashicorp/vault/pull/5965)] - * command/operator/rekey: Fix help output showing `-delete-backup` when it - should show `-backup-delete` [[GH-5981](https://github.com/hashicorp/vault/pull/5981)] - * core: Fix bound_cidrs not being propagated to child tokens - * replication: Correctly forward identity entity creation that originates from - performance standby nodes (Enterprise) - * secret/aws: Make input `credential_type` match the output type (string, not - array) [[GH-5972](https://github.com/hashicorp/vault/pull/5972)] - * secret/cubbyhole: Properly cleanup cubbyhole after token revocation [[GH-6006](https://github.com/hashicorp/vault/pull/6006)] - * secret/pki: Fix reading certificates on windows with the file storage backend [[GH-6013](https://github.com/hashicorp/vault/pull/6013)] - * ui (enterprise): properly display perf-standby count on the license page [[GH-5971](https://github.com/hashicorp/vault/pull/5971)] - * ui: fix disappearing nested secrets and go to the nearest parent when deleting - a secret - [[GH-5976](https://github.com/hashicorp/vault/pull/5976)] - * ui: fix error where deleting an item via the context menu would fail if the - item name contained dots [[GH-6018](https://github.com/hashicorp/vault/pull/6018)] - * ui: allow saving of kv secret after an errored save attempt [[GH-6022](https://github.com/hashicorp/vault/pull/6022)] - * ui: fix display of kv-v1 secret containing a key named "keys" [[GH-6023](https://github.com/hashicorp/vault/pull/6023)] - -## 1.0.1 (December 14th, 2018) +* core/activity: add namespace breakdown for new clients when date range spans multiple months, including the current month. [[GH-18766](https://github.com/hashicorp/vault/pull/18766)] +* core/activity: de-duplicate namespaces when historical and current month data are mixed [[GH-18452](https://github.com/hashicorp/vault/pull/18452)] +* core/activity: fix the end_date returned from the activity log endpoint when partial counts are computed [[GH-17856](https://github.com/hashicorp/vault/pull/17856)] +* core/activity: include mount counts when de-duplicating current and historical month data [[GH-18598](https://github.com/hashicorp/vault/pull/18598)] +* core/activity: report mount paths (rather than mount accessors) in current month activity log counts and include deleted mount paths in precomputed queries. [[GH-18916](https://github.com/hashicorp/vault/pull/18916)] +* core/activity: return partial month counts when querying a historical date range and no historical data exists. [[GH-17935](https://github.com/hashicorp/vault/pull/17935)] +* core: Change where we evaluate filtered paths as part of mount operations; this is part of an enterprise bugfix that will +have its own changelog entry. [[GH-21260](https://github.com/hashicorp/vault/pull/21260)] +* core: Do not cache seal configuration to fix a bug that resulted in sporadic auto unseal failures. [[GH-21223](https://github.com/hashicorp/vault/pull/21223)] +* core: Don't exit just because we think there's a potential deadlock. [[GH-21342](https://github.com/hashicorp/vault/pull/21342)] +* core: Fix panic in sealed nodes using raft storage trying to emit raft metrics [[GH-21249](https://github.com/hashicorp/vault/pull/21249)] +* identity: Fixes duplicate groups creation with the same name but unique IDs. [[GH-20964](https://github.com/hashicorp/vault/pull/20964)] +* replication (enterprise): Fix a race condition with update-primary that could result in data loss after a DR failover +* replication (enterprise): Fix path filters deleting data right after it's written by backend Initialize funcs + +## 1.11.11 +### June 08, 2023 SECURITY: - * Update version of Go to 1.11.3 to fix Go bug - https://github.com/golang/go/issues/29233 which corresponds to - CVE-2018-16875 - * Database user revocation: If a client has configured custom revocation - statements for a role with a value of `""`, that statement would be executed - verbatim, resulting in a lack of actual revocation but success for the - operation. Vault will now strip empty statements from any provided; as a - result if an empty statement is provided, it will behave as if no statement - is provided, falling back to the default revocation statement. +* ui: key-value v2 (kv-v2) diff viewer allowed HTML injection into the Vault web UI through key values. This vulnerability, CVE-2023-2121, is fixed in Vault 1.14.0, 1.13.3, 1.12.7, and 1.11.11. [[HSEC-2023-17](https://discuss.hashicorp.com/t/hcsec-2023-17-vault-s-kv-diff-viewer-allowed-html-injection/54814)] CHANGES: - * secret/database: On role read, empty statements will be returned as empty - slices instead of potentially being returned as JSON null values. This makes - it more in line with other parts of Vault and makes it easier for statically - typed languages to interpret the values. +* core: Bump Go version to 1.19.9. +* core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. [[GH-20826](https://github.com/hashicorp/vault/pull/20826)] IMPROVEMENTS: - * cli: Strip iTerm extra characters from password manager input [[GH-5837](https://github.com/hashicorp/vault/pull/5837)] - * command/server: Setting default kv engine to v1 in -dev mode can now be - specified via -dev-kv-v1 [[GH-5919](https://github.com/hashicorp/vault/pull/5919)] - * core: Add operationId field to OpenAPI output [[GH-5876](https://github.com/hashicorp/vault/pull/5876)] - * ui: Added ability to search for Group and Policy IDs when creating Groups - and Entities instead of typing them in manually +* command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when +`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. [[GH-20609](https://github.com/hashicorp/vault/pull/20609)] +* secrets/pki: add subject key identifier to read key response [[GH-20642](https://github.com/hashicorp/vault/pull/20642)] +* ui: update TTL picker for consistency [[GH-18114](https://github.com/hashicorp/vault/pull/18114)] BUG FIXES: - * auth/azure: Cache azure authorizer [15] - * auth/gcp: Remove explicit project for service account in GCE authorizer [[GH-58](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/58)] - * cli: Show correct stored keys/threshold for autoseals [[GH-5910](https://github.com/hashicorp/vault/pull/5910)] - * cli: Fix backwards compatibility fallback when listing plugins [[GH-5913](https://github.com/hashicorp/vault/pull/5913)] - * core: Fix upgrades when the seal config had been created on early versions - of vault [[GH-5956](https://github.com/hashicorp/vault/pull/5956)] - * namespaces: Correctly reload the proper mount when tuning or reloading the - mount [[GH-5937](https://github.com/hashicorp/vault/pull/5937)] - * secret/azure: Cache azure authorizer [19] - * secret/database: Strip empty statements on user input [[GH-5955](https://github.com/hashicorp/vault/pull/5955)] - * secret/gcpkms: Add path for retrieving the public key [[GH-5](https://github.com/hashicorp/vault-plugin-secrets-gcpkms/pull/5)] - * secret/pki: Fix panic that could occur during tidy operation when malformed - data was found [[GH-5931](https://github.com/hashicorp/vault/pull/5931)] - * secret/pki: Strip empty line in ca_chain output [[GH-5779](https://github.com/hashicorp/vault/pull/5779)] - * ui: Fixed a bug where the web CLI was not usable via the `fullscreen` - command - [[GH-5909](https://github.com/hashicorp/vault/pull/5909)] - * ui: Fix a bug where you couldn't write a jwt auth method config [[GH-5936](https://github.com/hashicorp/vault/pull/5936)] - -## 0.11.6 (December 14th, 2018) - -This release contains the three security fixes from 1.0.0 and 1.0.1 and the -following bug fixes from 1.0.0/1.0.1: - - * namespaces: Correctly reload the proper mount when tuning or reloading the - mount [[GH-5937](https://github.com/hashicorp/vault/pull/5937)] - * replication/perfstandby: Fix audit table upgrade on standbys [[GH-5811](https://github.com/hashicorp/vault/pull/5811)] - * replication/perfstandby: Fix redirect on approle update [[GH-5820](https://github.com/hashicorp/vault/pull/5820)] - * secrets/kv: Fix issue where storage version would get incorrectly downgraded - [[GH-5809](https://github.com/hashicorp/vault/pull/5809)] - -It is otherwise identical to 0.11.5. - -## 1.0.0 (December 3rd, 2018) - -SECURITY: +* api: Properly Handle nil identity_policies in Secret Data [[GH-20636](https://github.com/hashicorp/vault/pull/20636)] +* auth/ldap: Set default value for `max_page_size` properly [[GH-20453](https://github.com/hashicorp/vault/pull/20453)] +* cli: CLI should take days as a unit of time for ttl like flags [[GH-20477](https://github.com/hashicorp/vault/pull/20477)] +* core (enterprise): Fix log shipper buffer size overflow issue for 32 bit architecture. +* core (enterprise): Fix logshipper buffer size to default to DefaultBufferSize only when reported system memory is zero. +* core (enterprise): Remove MFA Enforcment configuration for namespace when deleting namespace +* core: prevent panic on login after namespace is deleted that had mfa enforcement [[GH-20375](https://github.com/hashicorp/vault/pull/20375)] +* replication (enterprise): Fix a race condition with invalid tokens during WAL streaming that was causing Secondary clusters to be unable to connect to a Primary. +* replication (enterprise): fix bug where secondary grpc connections would timeout when connecting to a primary host that no longer exists. +* secrets/transform (enterprise): Fix a caching bug affecting secondary nodes after a tokenization key rotation - * When debugging a customer incident we discovered that in the case of - malformed data from an autoseal mechanism, Vault's master key could be - logged in Vault's server log. For this to happen, the data would need to be - modified by the autoseal mechanism after being submitted to it by Vault but - prior to encryption, or after decryption, prior to it being returned to - Vault. To put it another way, it requires the data that Vault submits for - encryption to not match the data returned after decryption. It is not - sufficient for the autoseal mechanism to return an error, and it cannot be - triggered by an outside attacker changing the on-disk ciphertext as all - autoseal mechanisms use authenticated encryption. We do not believe that - this is generally a cause for concern; since it involves the autoseal - mechanism returning bad data to Vault but with no error, in a working Vault - configuration this code path should never be hit, and if hitting this issue - Vault will not be unsealing properly anyways so it will be obvious what is - happening and an immediate rekey of the master key can be performed after - service is restored. We have filed for a CVE (CVE-2018-19786) and a CVSS V3 - score of 5.2 has been assigned. +## 1.11.10 +### April 26, 2023 CHANGES: - * Tokens are now prefixed by a designation to indicate what type of token they - are. Service tokens start with `s.` and batch tokens start with `b.`. - Existing tokens will still work (they are all of service type and will be - considered as such). Prefixing allows us to be more efficient when consuming - a token, which keeps the critical path of requests faster. - * Paths within `auth/token` that allow specifying a token or accessor in the - URL have been removed. These have been deprecated since March 2016 and - undocumented, but were retained for backwards compatibility. They shouldn't - be used due to the possibility of those paths being logged, so at this point - they are simply being removed. - * Vault will no longer accept updates when the storage key has invalid UTF-8 - character encoding [[GH-5819](https://github.com/hashicorp/vault/pull/5819)] - * Mount/Auth tuning the `options` map on backends will now upsert any provided - values, and keep any of the existing values in place if not provided. The - options map itself cannot be unset once it's set, but the keypairs within the - map can be unset if an empty value is provided, with the exception of the - `version` keypair which is handled differently for KVv2 purposes. - * Agent no longer automatically reauthenticates when new credentials are - detected. It's not strictly necessary and in some cases was causing - reauthentication much more often than intended. - * HSM Regenerate Key Support Removed: Vault no longer supports destroying and - regenerating encryption keys on an HSM; it only supports creating them. - Although this has never been a source of a customer incident, it is simply a - code path that is too trivial to activate, especially by mistyping - `regenerate_key` instead of `generate_key`. - * Barrier Config Upgrade (Enterprise): When upgrading from Vault 0.8.x, the - seal type in the barrier config storage entry will be upgraded from - "hsm-auto" to "awskms" or "pkcs11" upon unseal if using AWSKMS or HSM seals. - If performing seal migration, the barrier config should first be upgraded - prior to starting migration. - * Go API client uses pooled HTTP client: The Go API client now uses a - connection-pooling HTTP client by default. For CLI operations this makes no - difference but it should provide significant performance benefits for those - writing custom clients using the Go API library. As before, this can be - changed to any custom HTTP client by the caller. - * Builtin Secret Engines and Auth Methods are integrated deeper into the - plugin system. The plugin catalog can now override builtin plugins with - custom versions of the same name. Additionally the plugin system now - requires a plugin `type` field when configuring plugins, this can be "auth", - "database", or "secret". - -FEATURES: - - * **Auto-Unseal in Open Source**: Cloud-based auto-unseal has been migrated - from Enterprise to Open Source. We've created a migrator to allow migrating - between Shamir seals and auto unseal methods. - * **Batch Tokens**: Batch tokens trade off some features of service tokens for no - storage overhead, and in most cases can be used across performance - replication clusters. - * **Replication Speed Improvements**: We've worked hard to speed up a lot of - operations when using Vault Enterprise Replication. - * **GCP KMS Secrets Engine**: This new secrets engine provides a Transit-like - pattern to keys stored within GCP Cloud KMS. - * **AppRole support in Vault Agent Auto-Auth**: You can now use AppRole - credentials when having Agent automatically authenticate to Vault - * **OpenAPI Support**: Descriptions of mounted backends can be served directly - from Vault - * **Kubernetes Projected Service Account Tokens**: Projected Service Account - Tokens are now supported in Kubernetes auth - * **Response Wrapping in UI**: Added ability to wrap secrets and easily copy - the wrap token or secret JSON in the UI +* core: Bump Go version to 1.19.8. IMPROVEMENTS: - * agent: Support for configuring the location of the kubernetes service account - [[GH-5725](https://github.com/hashicorp/vault/pull/5725)] - * auth/token: New tokens are indexed in storage HMAC-SHA256 instead of SHA1 - * secret/totp: Allow @ character to be part of key name [[GH-5652](https://github.com/hashicorp/vault/pull/5652)] - * secret/consul: Add support for new policy based tokens added in Consul 1.4 - [[GH-5586](https://github.com/hashicorp/vault/pull/5586)] - * ui: Improve the token auto-renew warning, and automatically begin renewal - when a user becomes active again [[GH-5662](https://github.com/hashicorp/vault/pull/5662)] - * ui: The unbundled UI page now has some styling [[GH-5665](https://github.com/hashicorp/vault/pull/5665)] - * ui: Improved banner and popup design [[GH-5672](https://github.com/hashicorp/vault/pull/5672)] - * ui: Added token type to auth method mount config [[GH-5723](https://github.com/hashicorp/vault/pull/5723)] - * ui: Display additonal wrap info when unwrapping. [[GH-5664](https://github.com/hashicorp/vault/pull/5664)] - * ui: Empty states have updated styling and link to relevant actions and - documentation [[GH-5758](https://github.com/hashicorp/vault/pull/5758)] - * ui: Allow editing of KV V2 data when a token doesn't have capabilities to - read secret metadata [[GH-5879](https://github.com/hashicorp/vault/pull/5879)] - -BUG FIXES: - - * agent: Fix auth when multiple redirects [[GH-5814](https://github.com/hashicorp/vault/pull/5814)] - * cli: Restore the `-policy-override` flag [[GH-5826](https://github.com/hashicorp/vault/pull/5826)] - * core: Fix rekey progress reset which did not happen under certain - circumstances. [[GH-5743](https://github.com/hashicorp/vault/pull/5743)] - * core: Migration from autounseal to shamir will clean up old keys [[GH-5671](https://github.com/hashicorp/vault/pull/5671)] - * identity: Update group memberships when entity is deleted [[GH-5786](https://github.com/hashicorp/vault/pull/5786)] - * replication/perfstandby: Fix audit table upgrade on standbys [[GH-5811](https://github.com/hashicorp/vault/pull/5811)] - * replication/perfstandby: Fix redirect on approle update [[GH-5820](https://github.com/hashicorp/vault/pull/5820)] - * secrets/azure: Fix valid roles being rejected for duplicate ids despite - having distinct scopes - [[GH-16](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/16)] - * storage/gcs: Send md5 of values to GCS to avoid potential corruption - [[GH-5804](https://github.com/hashicorp/vault/pull/5804)] - * secrets/kv: Fix issue where storage version would get incorrectly downgraded - [[GH-5809](https://github.com/hashicorp/vault/pull/5809)] - * secrets/kv: Disallow empty paths on a `kv put` while accepting empty paths - for all other operations for backwards compatibility - [[GH-19](https://github.com/hashicorp/vault-plugin-secrets-kv/pull/19)] - * ui: Allow for secret creation in kv v2 when cas_required=true [[GH-5823](https://github.com/hashicorp/vault/pull/5823)] - * ui: Fix dr secondary operation token generation via the ui [[GH-5818](https://github.com/hashicorp/vault/pull/5818)] - * ui: Fix the PKI context menu so that items load [[GH-5824](https://github.com/hashicorp/vault/pull/5824)] - * ui: Update DR Secondary Token generation command [[GH-5857](https://github.com/hashicorp/vault/pull/5857)] - * ui: Fix pagination bug where controls would be rendered once for each - item when viewing policies [[GH-5866](https://github.com/hashicorp/vault/pull/5866)] - * ui: Fix bug where `sys/leases/revoke` required 'sudo' capability to show - the revoke button in the UI [[GH-5647](https://github.com/hashicorp/vault/pull/5647)] - * ui: Fix issue where certain pages wouldn't render in a namespace [[GH-5692](https://github.com/hashicorp/vault/pull/5692)] - -## 0.11.5 (November 13th, 2018) +* cli/namespace: Add detailed flag to output additional namespace information +such as namespace IDs and custom metadata. [[GH-20243](https://github.com/hashicorp/vault/pull/20243)] +* core/activity: add an endpoint to write test activity log data, guarded by a build flag [[GH-20019](https://github.com/hashicorp/vault/pull/20019)] +* core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the +`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. [[GH-20044](https://github.com/hashicorp/vault/pull/20044)] +* sdk/ldaputil: added `connection_timeout` to tune connection timeout duration +for all LDAP plugins. [[GH-20144](https://github.com/hashicorp/vault/pull/20144)] BUG FIXES: - * agent: Fix issue when specifying two file sinks [[GH-5610](https://github.com/hashicorp/vault/pull/5610)] - * auth/userpass: Fix minor timing issue that could leak the presence of a - username [[GH-5614](https://github.com/hashicorp/vault/pull/5614)] - * autounseal/alicloud: Fix issue interacting with the API (Enterprise) - * autounseal/azure: Fix key version tracking (Enterprise) - * cli: Fix panic that could occur if parameters were not provided [[GH-5603](https://github.com/hashicorp/vault/pull/5603)] - * core: Fix buggy behavior if trying to remount into a namespace - * identity: Fix duplication of entity alias entity during alias transfer - between entities [[GH-5733](https://github.com/hashicorp/vault/pull/5733)] - * namespaces: Fix tuning of auth mounts in a namespace - * ui: Fix bug where editing secrets as JSON doesn't save properly [[GH-5660](https://github.com/hashicorp/vault/pull/5660)] - * ui: Fix issue where IE 11 didn't render the UI and also had a broken form - when trying to use tool/hash [[GH-5714](https://github.com/hashicorp/vault/pull/5714)] - -## 0.11.4 (October 23rd, 2018) - -CHANGES: - - * core: HA lock file is no longer copied during `operator migrate` [[GH-5503](https://github.com/hashicorp/vault/pull/5503)]. - We've categorized this as a change, but generally this can be considered - just a bug fix, and no action is needed. - -FEATURES: - - * **Transit Key Trimming**: Keys in transit secret engine can now be trimmed to - remove older unused key versions - * **Web UI support for KV Version 2**: Browse, delete, undelete and destroy - individual secret versions in the UI - * **Azure Existing Service Principal Support**: Credentials can now be generated - against an existing service principal - -IMPROVEMENTS: - - * core: Add last WAL in leader/health output for easier debugging [[GH-5523](https://github.com/hashicorp/vault/pull/5523)] - * identity: Identity names will now be handled case insensitively by default. - This includes names of entities, aliases and groups [[GH-5404](https://github.com/hashicorp/vault/pull/5404)] - * secrets/aws: Added role-option max_sts_ttl to cap TTL for AWS STS - credentials [[GH-5500](https://github.com/hashicorp/vault/pull/5500)] - * secret/database: Allow Cassandra user to be non-superuser so long as it has - role creation permissions [[GH-5402](https://github.com/hashicorp/vault/pull/5402)] - * secret/radius: Allow setting the NAS Identifier value in the generated - packet [[GH-5465](https://github.com/hashicorp/vault/pull/5465)] - * secret/ssh: Allow usage of JSON arrays when setting zero addresses [[GH-5528](https://github.com/hashicorp/vault/pull/5528)] - * secret/transit: Allow trimming unused keys [[GH-5388](https://github.com/hashicorp/vault/pull/5388)] - * ui: Support KVv2 [[GH-5547](https://github.com/hashicorp/vault/pull/5547)], [[GH-5563](https://github.com/hashicorp/vault/pull/5563)] - * ui: Allow viewing and updating Vault license via the UI - * ui: Onboarding will now display your progress through the chosen tutorials - * ui: Dynamic secret backends obfuscate sensitive data by default and - visibility is toggleable - -BUG FIXES: +* auth/ldap: Add max_page_size configurable to LDAP configuration [[GH-19032](https://github.com/hashicorp/vault/pull/19032)] +* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. +* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur +* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` +resulting in 412 errors. +* core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. [[GH-19721](https://github.com/hashicorp/vault/pull/19721)] +* helper/random: Fix race condition in string generator helper [[GH-19875](https://github.com/hashicorp/vault/pull/19875)] +* openapi: Fix many incorrect details in generated API spec, by using better techniques to parse path regexps [[GH-18554](https://github.com/hashicorp/vault/pull/18554)] +* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil +* secrets/pki: Fix patching of leaf_not_after_behavior on issuers. [[GH-20341](https://github.com/hashicorp/vault/pull/20341)] +* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens +* ui: Fix OIDC provider logo showing when domain doesn't match [[GH-20263](https://github.com/hashicorp/vault/pull/20263)] +* ui: Fix bad link to namespace when namespace name includes `.` [[GH-19799](https://github.com/hashicorp/vault/pull/19799)] +* ui: fixes browser console formatting for help command output [[GH-20064](https://github.com/hashicorp/vault/pull/20064)] +* ui: remove use of htmlSafe except when first sanitized [[GH-20235](https://github.com/hashicorp/vault/pull/20235)] - * agent: Fix potential hang during agent shutdown [[GH-5026](https://github.com/hashicorp/vault/pull/5026)] - * auth/ldap: Fix listing of users/groups that contain slashes [[GH-5537](https://github.com/hashicorp/vault/pull/5537)] - * core: Fix memory leak during some expiration calls [[GH-5505](https://github.com/hashicorp/vault/pull/5505)] - * core: Fix generate-root operations requiring empty `otp` to be provided - instead of an empty body [[GH-5495](https://github.com/hashicorp/vault/pull/5495)] - * identity: Remove lookup check during alias removal from entity [[GH-5524](https://github.com/hashicorp/vault/pull/5524)] - * secret/pki: Fix TTL/MaxTTL check when using `sign-verbatim` [[GH-5549](https://github.com/hashicorp/vault/pull/5549)] - * secret/pki: Fix regression in 0.11.2+ causing the NotBefore value of - generated certificates to be set to the Unix epoch if the role value was not - set, instead of using the default of 30 seconds [[GH-5481](https://github.com/hashicorp/vault/pull/5481)] - * storage/mysql: Use `varbinary` instead of `varchar` when creating HA tables - [[GH-5529](https://github.com/hashicorp/vault/pull/5529)] - -## 0.11.3 (October 8th, 2018) +## 1.11.9 +### March 29, 2023 SECURITY: - * Revocation: A regression in 0.11.2 (OSS) and 0.11.0 (Enterprise) caused - lease IDs containing periods (`.`) to not be revoked properly. Upon startup - when revocation is tried again these should now revoke successfully. - -IMPROVEMENTS: - - * auth/ldap: Listing of users and groups return absolute paths [[GH-5537](https://github.com/hashicorp/vault/pull/5537)] - * secret/pki: OID SANs can now specify `*` to allow any value [[GH-5459](https://github.com/hashicorp/vault/pull/5459)] - -BUG FIXES: - - * auth/ldap: Fix panic if specific values were given to be escaped [[GH-5471](https://github.com/hashicorp/vault/pull/5471)] - * cli/auth: Fix panic if `vault auth` was given no parameters [[GH-5473](https://github.com/hashicorp/vault/pull/5473)] - * secret/database/mongodb: Fix panic that could occur at high load [[GH-5463](https://github.com/hashicorp/vault/pull/5463)] - * secret/pki: Fix CA generation not allowing OID SANs [[GH-5459](https://github.com/hashicorp/vault/pull/5459)] - -## 0.11.2 (October 2nd, 2018) - -CHANGES: - - * `sys/seal-status` now includes an `initialized` boolean in the output. If - Vault is not initialized, it will return a `200` with this value set `false` - instead of a `400`. - * `passthrough_request_headers` will now deny certain headers from being - provided to backends based on a global denylist. - * Token Format: Tokens are now represented as a base62 value; tokens in - namespaces will have the namespace identifier appended. (This appeared in - Enterprise in 0.11.0, but is only in OSS in 0.11.2.) - -FEATURES: - - * **AWS Secret Engine Root Credential Rotation**: The credential used by the AWS - secret engine can now be rotated, to ensure that only Vault knows the - credentials it is using [[GH-5140](https://github.com/hashicorp/vault/pull/5140)] - * **Storage Backend Migrator**: A new `operator migrate` command allows offline - migration of data between two storage backends - * **AliCloud KMS Auto Unseal and Seal Wrap Support (Enterprise)**: AliCloud KMS can now be used a support seal for - Auto Unseal and Seal Wrapping - -BUG FIXES: - - * auth/okta: Fix reading deprecated `token` parameter if a token was - previously set in the configuration [[GH-5409](https://github.com/hashicorp/vault/pull/5409)] - * core: Re-add deprecated capabilities information for now [[GH-5360](https://github.com/hashicorp/vault/pull/5360)] - * core: Fix handling of cyclic token relationships [[GH-4803](https://github.com/hashicorp/vault/pull/4803)] - * storage/mysql: Fix locking on MariaDB [[GH-5343](https://github.com/hashicorp/vault/pull/5343)] - * replication: Fix DR API when using a token [[GH-5398](https://github.com/hashicorp/vault/pull/5398)] - * identity: Ensure old group alias is removed when a new one is written [[GH-5350](https://github.com/hashicorp/vault/pull/5350)] - * storage/alicloud: Don't call uname on package init [[GH-5358](https://github.com/hashicorp/vault/pull/5358)] - * secrets/jwt: Fix issue where request context would be canceled too early - * ui: fix need to have update for aws iam creds generation [GF-5294] - * ui: fix calculation of token expiry [[GH-5435](https://github.com/hashicorp/vault/pull/5435)] +* storage/mssql: When using Vault’s community-supported Microsoft SQL (MSSQL) database storage backend, a privileged attacker with the ability to write arbitrary data to Vault’s configuration may be able to perform arbitrary SQL commands on the underlying database server through Vault. This vulnerability, CVE-2023-0620, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-12](https://discuss.hashicorp.com/t/hcsec-2023-12-vault-s-microsoft-sql-database-storage-backend-vulnerable-to-sql-injection-via-configuration-file/52080)] +* secrets/pki: Vault’s PKI mount issuer endpoints did not correctly authorize access to remove an issuer or modify issuer metadata, potentially resulting in denial of service of the PKI mount. This bug did not affect public or private key material, trust chains or certificate issuance. This vulnerability, CVE-2023-0665, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-11](https://discuss.hashicorp.com/t/hcsec-2023-11-vault-s-pki-issuer-endpoint-did-not-correctly-authorize-access-to-issuer-metadata/52079)] +* core: HashiCorp Vault’s implementation of Shamir’s secret sharing used precomputed table lookups, and was vulnerable to cache-timing attacks. An attacker with access to, and the ability to observe a large number of unseal operations on the host through a side channel may reduce the search space of a brute force effort to recover the Shamir shares. This vulnerability, CVE-2023-25000, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-10](https://discuss.hashicorp.com/t/hcsec-2023-10-vault-vulnerable-to-cache-timing-attacks-during-seal-and-unseal-operations/52078)] IMPROVEMENTS: - * auth/aws: The identity alias name can now configured to be either IAM unique - ID of the IAM Principal, or ARN of the caller identity [[GH-5247](https://github.com/hashicorp/vault/pull/5247)] - * auth/cert: Add allowed_organizational_units support [[GH-5252](https://github.com/hashicorp/vault/pull/5252)] - * cli: Format TTLs for non-secret responses [[GH-5367](https://github.com/hashicorp/vault/pull/5367)] - * identity: Support operating on entities and groups by their names [[GH-5355](https://github.com/hashicorp/vault/pull/5355)] - * plugins: Add `env` parameter when registering plugins to the catalog to allow - operators to include environment variables during plugin execution. [[GH-5359](https://github.com/hashicorp/vault/pull/5359)] - * secrets/aws: WAL Rollback improvements [[GH-5202](https://github.com/hashicorp/vault/pull/5202)] - * secrets/aws: Allow specifying STS role-default TTLs [[GH-5138](https://github.com/hashicorp/vault/pull/5138)] - * secrets/pki: Add configuration support for setting NotBefore [[GH-5325](https://github.com/hashicorp/vault/pull/5325)] - * core: Support for passing the Vault token via an Authorization Bearer header [[GH-5397](https://github.com/hashicorp/vault/pull/5397)] - * replication: Reindex process now runs in the background and does not block other - vault operations - * storage/zookeeper: Enable TLS based communication with Zookeeper [[GH-4856](https://github.com/hashicorp/vault/pull/4856)] - * ui: you can now init a cluster with a seal config [[GH-5428](https://github.com/hashicorp/vault/pull/5428)] - * ui: added the option to force promote replication clusters [[GH-5438](https://github.com/hashicorp/vault/pull/5438)] - * replication: Allow promotion of a secondary when data is syncing with a "force" flag - -## 0.11.1.1 (September 17th, 2018) (Enterprise Only) +* auth/github: Allow for an optional Github auth token environment variable to make authenticated requests when fetching org id +website/docs: Add docs for `VAULT_AUTH_CONFIG_GITHUB_TOKEN` environment variable when writing Github config [[GH-19244](https://github.com/hashicorp/vault/pull/19244)] +* core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch +option in case all else fails for some replication issues we may not have fully reproduced. [[GH-19676](https://github.com/hashicorp/vault/pull/19676)] +* core: validate name identifiers in mssql physical storage backend prior use [[GH-19591](https://github.com/hashicorp/vault/pull/19591)] BUG FIXES: - * agent: Fix auth handler-based wrapping of output tokens [[GH-5316](https://github.com/hashicorp/vault/pull/5316)] - * core: Properly store the replication checkpoint file if it's larger than the - storage engine's per-item limit - * core: Improve WAL deletion rate - * core: Fix token creation on performance standby nodes - * core: Fix unwrapping inside a namespace - * core: Always forward tidy operations from performance standby nodes - -IMPROVEMENTS: - - * auth/aws: add support for key/value pairs or JSON values for - `iam_request_headers` with IAM auth method [[GH-5320](https://github.com/hashicorp/vault/pull/5320)] - * auth/aws, secret/aws: Throttling errors from the AWS API will now be - reported as 502 errors by Vault, along with the original error [[GH-5270](https://github.com/hashicorp/vault/pull/5270)] - * replication: Start fetching during a sync from where it previously errored +* auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#190](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/190)] [[GH-19720](https://github.com/hashicorp/vault/pull/19720)] +* cli: Fix vault read handling to return raw data as secret.Data when there is no top-level data object from api response. [[GH-17913](https://github.com/hashicorp/vault/pull/17913)] +* core (enterprise): Attempt to reconnect to a PKCS#11 HSM if we retrieve a CKR_FUNCTION_FAILED error. +* core: Fixed issue with remounting mounts that have a non-trailing space in the 'to' or 'from' paths. [[GH-19585](https://github.com/hashicorp/vault/pull/19585)] +* openapi: Fix logic for labeling unauthenticated/sudo paths. [[GH-19600](https://github.com/hashicorp/vault/pull/19600)] +* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions +* ui: fixes issue navigating back a level using the breadcrumb from secret metadata view [[GH-19703](https://github.com/hashicorp/vault/pull/19703)] +* ui: pass encodeBase64 param to HMAC transit-key-actions. [[GH-19429](https://github.com/hashicorp/vault/pull/19429)] +* ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url [[GH-19460](https://github.com/hashicorp/vault/pull/19460)] -## 0.11.1 (September 6th, 2018) +## 1.11.8 +### March 01, 2023 SECURITY: - * Random Byte Reading in Barrier: Prior to this release, Vault was not - properly checking the error code when reading random bytes for the IV for - AES operations in its cryptographic barrier. Specifically, this means that - such an IV could potentially be zero multiple times, causing nonce re-use - and weakening the security of the key. On most platforms this should never - happen because reading from kernel random sources is non-blocking and always - successful, but there may be platform-specific behavior that has not been - accounted for. (Vault has tests to check exactly this, and the tests have - never seen nonce re-use.) - -FEATURES: - - * AliCloud Agent Support: Vault Agent can now authenticate against the - AliCloud auth method. - * UI: Enable AliCloud auth method and Azure secrets engine via the UI. - -IMPROVEMENTS: - - * core: Logging level for most logs (not including secrets/auth plugins) can - now be changed on-the-fly via `SIGHUP`, reading the desired value from - Vault's config file [[GH-5280](https://github.com/hashicorp/vault/pull/5280)] - -BUG FIXES: - - * core: Ensure we use a background context when stepping down [[GH-5290](https://github.com/hashicorp/vault/pull/5290)] - * core: Properly check error return from random byte reading [[GH-5277](https://github.com/hashicorp/vault/pull/5277)] - * core: Re-add `sys/` top-route injection for now [[GH-5241](https://github.com/hashicorp/vault/pull/5241)] - * core: Policies stored in minified JSON would return an error [[GH-5229](https://github.com/hashicorp/vault/pull/5229)] - * core: Evaluate templated policies in capabilities check [[GH-5250](https://github.com/hashicorp/vault/pull/5250)] - * identity: Update MemDB with identity group alias while loading groups [[GH-5289](https://github.com/hashicorp/vault/pull/5289)] - * secrets/database: Fix nil pointer when revoking some leases [[GH-5262](https://github.com/hashicorp/vault/pull/5262)] - * secrets/pki: Fix sign-verbatim losing extra Subject attributes [[GH-5245](https://github.com/hashicorp/vault/pull/5245)] - * secrets/pki: Remove certificates from store when tidying revoked - certificates and simplify API [[GH-5231](https://github.com/hashicorp/vault/pull/5231)] - * ui: JSON editor will not coerce input to an object, and will now show an - error about Vault expecting an object [[GH-5271](https://github.com/hashicorp/vault/pull/5271)] - * ui: authentication form will now default to any methods that have been tuned - to show up for unauthenticated users [[GH-5281](https://github.com/hashicorp/vault/pull/5281)] - - -## 0.11.0 (August 28th, 2018) - -DEPRECATIONS/CHANGES: - - * Request Timeouts: A default request timeout of 90s is now enforced. This - setting can be overwritten in the config file. If you anticipate requests - taking longer than 90s this setting should be updated before upgrading. - * (NOTE: will be re-added into 0.11.1 as it broke more than anticipated. There - will be some further guidelines around when this will be removed again.) - * `sys/` Top Level Injection: For the last two years for backwards - compatibility data for various `sys/` routes has been injected into both the - Secret's Data map and into the top level of the JSON response object. - However, this has some subtle issues that pop up from time to time and is - becoming increasingly complicated to maintain, so it's finally being - removed. - * Path Fallback for List Operations: For a very long time Vault has - automatically adjusted `list` operations to always end in a `/`, as list - operations operates on prefixes, so all list operations by definition end - with `/`. This was done server-side so affects all clients. However, this - has also led to a lot of confusion for users writing policies that assume - that the path that they use in the CLI is the path used internally. Starting - in 0.11, ACL policies gain a new fallback rule for listing: they will use a - matching path ending in `/` if available, but if not found, they will look - for the same path without a trailing `/`. This allows putting `list` - capabilities in the same path block as most other capabilities for that - path, while not providing any extra access if `list` wasn't actually - provided there. - * Performance Standbys On By Default: If you flavor/license of Vault - Enterprise supports Performance Standbys, they are on by default. You can - disable this behavior per-node with the `disable_performance_standby` - configuration flag. - * AWS Secret Engine Roles: The AWS Secret Engine roles are now explicit about - the type of AWS credential they are generating; this reduces reduce - ambiguity that existed previously as well as enables new features for - specific credential types. Writing role data and generating credentials - remain backwards compatible; however, the data returned when reading a - role's configuration has changed in backwards-incompatible ways. Anything - that depended on reading role data from the AWS secret engine will break - until it is updated to work with the new format. - * Token Format (Enterprise): Tokens are now represented as a base62 value; - tokens in namespaces will have the namespace identifier appended. +* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] -FEATURES: +CHANGES: - * **Namespaces (Enterprise)**: A set of features within Vault Enterprise - that allows Vault environments to support *Secure Multi-tenancy* within a - single Vault Enterprise infrastructure. Through namespaces, Vault - administrators can support tenant isolation for teams and individuals as - well as empower those individuals to self-manage their own tenant - environment. - * **Performance Standbys (Enterprise)**: Standby nodes can now service - requests that do not modify storage. This provides near-horizontal scaling - of a cluster in some workloads, and is the intra-cluster analogue of - the existing Performance Replication feature, which replicates to distinct - clusters in other datacenters, geos, etc. - * **AliCloud OSS Storage**: AliCloud OSS can now be used for Vault storage. - * **AliCloud Auth Plugin**: AliCloud's identity services can now be used to - grant access to Vault. See the [plugin - repository](https://github.com/hashicorp/vault-plugin-auth-alicloud) for - more information. - * **Azure Secrets Plugin**: There is now a plugin (pulled in to Vault) that - allows generating credentials to allow access to Azure. See the [plugin - repository](https://github.com/hashicorp/vault-plugin-secrets-azure) for - more information. - * **HA Support for MySQL Storage**: MySQL storage now supports HA. - * **ACL Templating**: ACL policies can now be templated using identity Entity, - Groups, and Metadata. - * **UI Onboarding wizards**: The Vault UI can provide contextual help and - guidance, linking out to relevant links or guides on vaultproject.io for - various workflows in Vault. +* core: Bump Go version to 1.19.6. IMPROVEMENTS: - * agent: Add `exit_after_auth` to be able to use the Agent for a single - authentication [[GH-5013](https://github.com/hashicorp/vault/pull/5013)] - * auth/approle: Add ability to set token bound CIDRs on individual Secret IDs - [[GH-5034](https://github.com/hashicorp/vault/pull/5034)] - * cli: Add support for passing parameters to `vault read` operations [[GH-5093](https://github.com/hashicorp/vault/pull/5093)] - * secrets/aws: Make credential types more explicit [[GH-4360](https://github.com/hashicorp/vault/pull/4360)] - * secrets/nomad: Support for longer token names [[GH-5117](https://github.com/hashicorp/vault/pull/5117)] - * secrets/pki: Allow disabling CRL generation [[GH-5134](https://github.com/hashicorp/vault/pull/5134)] - * storage/azure: Add support for different Azure environments [[GH-4997](https://github.com/hashicorp/vault/pull/4997)] - * storage/file: Sort keys in list responses [[GH-5141](https://github.com/hashicorp/vault/pull/5141)] - * storage/mysql: Support special characters in database and table names. +* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] BUG FIXES: - * auth/jwt: Always validate `aud` claim even if `bound_audiences` isn't set - (IOW, error in this case) - * core: Prevent Go's HTTP library from interspersing logs in a different - format and/or interleaved [[GH-5135](https://github.com/hashicorp/vault/pull/5135)] - * identity: Properly populate `mount_path` and `mount_type` on group lookup - [[GH-5074](https://github.com/hashicorp/vault/pull/5074)] - * identity: Fix persisting alias metadata [[GH-5188](https://github.com/hashicorp/vault/pull/5188)] - * identity: Fix carryover issue from previously fixed race condition that - could cause Vault not to start up due to two entities referencing the same - alias. These entities are now merged. [[GH-5000](https://github.com/hashicorp/vault/pull/5000)] - * replication: Fix issue causing some pages not to flush to storage - * secrets/database: Fix inability to update custom SQL statements on - database roles. [[GH-5080](https://github.com/hashicorp/vault/pull/5080)] - * secrets/pki: Disallow putting the CA's serial on its CRL. While technically - legal, doing so inherently means the CRL can't be trusted anyways, so it's - not useful and easy to footgun. [[GH-5134](https://github.com/hashicorp/vault/pull/5134)] - * storage/gcp,spanner: Fix data races [[GH-5081](https://github.com/hashicorp/vault/pull/5081)] - -## 0.10.4 (July 25th, 2018) - -SECURITY: +* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] +* core (enterprise): Fix panic when using invalid accessor for control-group request +* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. +* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] +* license (enterprise): Fix bug where license would update even if the license didn't change. +* replication (enterprise): Fix bug where reloading external plugin on a secondary would +break replication. +* secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. [[GH-18208](https://github.com/hashicorp/vault/pull/18208)] +* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] +* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] +* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)] - * Control Groups: The associated Identity entity with a request was not being - properly persisted. As a result, the same authorizer could provide more than - one authorization. - -DEPRECATIONS/CHANGES: - - * Revocations of dynamic secrets leases are now queued/asynchronous rather - than synchronous. This allows Vault to take responsibility for revocation - even if the initial attempt fails. The previous synchronous behavior can be - attained via the `-sync` CLI flag or `sync` API parameter. When in - synchronous mode, if the operation results in failure it is up to the user - to retry. - * CLI Retries: The CLI will no longer retry commands on 5xx errors. This was a - source of confusion to users as to why Vault would "hang" before returning a - 5xx error. The Go API client still defaults to two retries. - * Identity Entity Alias metadata: You can no longer manually set metadata on - entity aliases. All alias data (except the canonical entity ID it refers to) - is intended to be managed by the plugin providing the alias information, so - allowing it to be set manually didn't make sense. +## 1.11.7 +### February 6, 2023 -FEATURES: +CHANGES: - * **JWT/OIDC Auth Method**: The new `jwt` auth method accepts JWTs and either - validates signatures locally or uses OIDC Discovery to fetch the current set - of keys for signature validation. Various claims can be specified for - validation (in addition to the cryptographic signature) and a user and - optional groups claim can be used to provide Identity information. - * **FoundationDB Storage**: You can now use FoundationDB for storing Vault - data. - * **UI Control Group Workflow (enterprise)**: The UI will now detect control - group responses and provides a workflow to view the status of the request - and to authorize requests. - * **Vault Agent (Beta)**: Vault Agent is a daemon that can automatically - authenticate for you across a variety of authentication methods, provide - tokens to clients, and keep the tokens renewed, reauthenticating as - necessary. +* core: Bump Go version to 1.19.4. IMPROVEMENTS: - * auth/azure: Add support for virtual machine scale sets - * auth/gcp: Support multiple bindings for region, zone, and instance group - * cli: Add subcommands for interacting with the plugin catalog [[GH-4911](https://github.com/hashicorp/vault/pull/4911)] - * cli: Add a `-description` flag to secrets and auth tune subcommands to allow - updating an existing secret engine's or auth method's description. This - change also allows the description to be unset by providing an empty string. - * core: Add config flag to disable non-printable character check [[GH-4917](https://github.com/hashicorp/vault/pull/4917)] - * core: A `max_request_size` parameter can now be set per-listener to adjust - the maximum allowed size per request [[GH-4824](https://github.com/hashicorp/vault/pull/4824)] - * core: Add control group request endpoint to default policy [[GH-4904](https://github.com/hashicorp/vault/pull/4904)] - * identity: Identity metadata is now passed through to plugins [[GH-4967](https://github.com/hashicorp/vault/pull/4967)] - * replication: Add additional saftey checks and logging when replication is - in a bad state - * secrets/kv: Add support for using `-field=data` to KVv2 when using `vault - kv` [[GH-4895](https://github.com/hashicorp/vault/pull/4895)] - * secrets/pki: Add the ability to tidy revoked but unexpired certificates - [[GH-4916](https://github.com/hashicorp/vault/pull/4916)] - * secrets/ssh: Allow Vault to work with single-argument SSH flags [[GH-4825](https://github.com/hashicorp/vault/pull/4825)] - * secrets/ssh: SSH executable path can now be configured in the CLI [[GH-4937](https://github.com/hashicorp/vault/pull/4937)] - * storage/swift: Add additional configuration options [[GH-4901](https://github.com/hashicorp/vault/pull/4901)] - * ui: Choose which auth methods to show to unauthenticated users via - `listing_visibility` in the auth method edit forms [[GH-4854](https://github.com/hashicorp/vault/pull/4854)] - * ui: Authenticate users automatically by passing a wrapped token to the UI via - the new `wrapped_token` query parameter [[GH-4854](https://github.com/hashicorp/vault/pull/4854)] +* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] +* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. +* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] +* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] +* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] BUG FIXES: - * api: Fix response body being cleared too early [[GH-4987](https://github.com/hashicorp/vault/pull/4987)] - * auth/approle: Fix issue with tidy endpoint that would unnecessarily remove - secret accessors [[GH-4981](https://github.com/hashicorp/vault/pull/4981)] - * auth/aws: Fix updating `max_retries` [[GH-4980](https://github.com/hashicorp/vault/pull/4980)] - * auth/kubernetes: Trim trailing whitespace when sending JWT - * cli: Fix parsing of environment variables for integer flags [[GH-4925](https://github.com/hashicorp/vault/pull/4925)] - * core: Fix returning 500 instead of 503 if a rekey is attempted when Vault is - sealed [[GH-4874](https://github.com/hashicorp/vault/pull/4874)] - * core: Fix issue releasing the leader lock in some circumstances [[GH-4915](https://github.com/hashicorp/vault/pull/4915)] - * core: Fix a panic that could happen if the server was shut down while still - starting up - * core: Fix deadlock that would occur if a leadership loss occurs at the same - time as a seal operation [[GH-4932](https://github.com/hashicorp/vault/pull/4932)] - * core: Fix issue with auth mounts failing to renew tokens due to policies - changing [[GH-4960](https://github.com/hashicorp/vault/pull/4960)] - * auth/radius: Fix issue where some radius logins were being canceled too early - [[GH-4941](https://github.com/hashicorp/vault/pull/4941)] - * core: Fix accidental seal of vault of we lose leadership during startup - [[GH-4924](https://github.com/hashicorp/vault/pull/4924)] - * core: Fix standby not being able to forward requests larger than 4MB - [[GH-4844](https://github.com/hashicorp/vault/pull/4844)] - * core: Avoid panic while processing group memberships [[GH-4841](https://github.com/hashicorp/vault/pull/4841)] - * identity: Fix a race condition creating aliases [[GH-4965](https://github.com/hashicorp/vault/pull/4965)] - * plugins: Fix being unable to send very large payloads to or from plugins - [[GH-4958](https://github.com/hashicorp/vault/pull/4958)] - * physical/azure: Long list responses would sometimes be truncated [[GH-4983](https://github.com/hashicorp/vault/pull/4983)] - * replication: Allow replication status requests to be processed while in - merkle sync - * replication: Ensure merkle reindex flushes all changes to storage immediately - * replication: Fix a case where a network interruption could cause a secondary - to be unable to reconnect to a primary - * secrets/pki: Fix permitted DNS domains performing improper validation - [[GH-4863](https://github.com/hashicorp/vault/pull/4863)] - * secrets/database: Fix panic during DB creds revocation [[GH-4846](https://github.com/hashicorp/vault/pull/4846)] - * ui: Fix usage of cubbyhole backend in the UI [[GH-4851](https://github.com/hashicorp/vault/pull/4851)] - * ui: Fix toggle state when a secret is JSON-formatted [[GH-4913](https://github.com/hashicorp/vault/pull/4913)] - * ui: Fix coercion of falsey values to empty string when editing secrets as - JSON [[GH-4977](https://github.com/hashicorp/vault/pull/4977)] - -## 0.10.3 (June 20th, 2018) - -DEPRECATIONS/CHANGES: - - * In the audit log and in client responses, policies are now split into three - parameters: policies that came only from tokens, policies that came only - from Identity, and the combined set. Any previous location of policies via - the API now contains the full, combined set. - * When a token is tied to an Identity entity and the entity is deleted, the - token will no longer be usable, regardless of the validity of the token - itself. - * When authentication succeeds but no policies were defined for that specific - user, most auth methods would allow a token to be generated but a few would - reject the authentication, namely `ldap`, `okta`, and `radius`. Since the - `default` policy is added by Vault's core, this would incorrectly reject - valid authentications before they would in fact be granted policies. This - inconsistency has been addressed; valid authentications for these methods - now succeed even if no policy was specifically defined in that method for - that user. - -FEATURES: +* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] +* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] +* core (enterprise): Fix a race condition resulting in login errors to PKCS#11 modules under high concurrency. +* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace +* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. +* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] +* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. +* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] +* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] +* identity (enterprise): Fix a data race when creating an entity for a local alias. +* kmip (enterprise): Fix Destroy operation response that omitted Unique Identifier on some batched responses. +* kmip (enterprise): Fix Locate operation response incompatibility with clients using KMIP versions prior to 1.3. +* kmip (enterprise): Fix Query operation response that omitted streaming capability and supported profiles. +* licensing (enterprise): update autoloaded license cache after reload +* secrets/pki: Allow patching issuer to set an empty issuer name. [[GH-18466](https://github.com/hashicorp/vault/pull/18466)] +* secrets/transit: Do not warn about unrecognized parameter 'batch_input' [[GH-18299](https://github.com/hashicorp/vault/pull/18299)] +* storage/raft (enterprise): An already joined node can rejoin by wiping storage +and re-issueing a join request, but in doing so could transiently become a +non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https://github.com/hashicorp/vault/pull/18263)] +* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. +* storage/raft: Don't panic on unknown raft ops [[GH-17732](https://github.com/hashicorp/vault/pull/17732)] +* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] - * Root Rotation for Active Directory: You can now command Vault to rotate the - configured root credentials used in the AD secrets engine, to ensure that - only Vault knows the credentials it's using. - * URI SANs in PKI: You can now configure URI Subject Alternate Names in the - `pki` backend. Roles can limit which SANs are allowed via globbing. - * `kv rollback` Command: You can now use `vault kv rollback` to roll a KVv2 - path back to a previous non-deleted/non-destroyed version. The previous - version becomes the next/newest version for the path. - * Token Bound CIDRs in AppRole: You can now add CIDRs to which a token - generated from AppRole will be bound. +## 1.11.6 +### November 30, 2022 IMPROVEMENTS: - * approle: Return 404 instead of 202 on invalid role names during POST - operations [[GH-4778](https://github.com/hashicorp/vault/pull/4778)] - * core: Add idle and initial header read/TLS handshake timeouts to connections - to ensure server resources are cleaned up [[GH-4760](https://github.com/hashicorp/vault/pull/4760)] - * core: Report policies in token, identity, and full sets [[GH-4747](https://github.com/hashicorp/vault/pull/4747)] - * secrets/databases: Add `create`/`update` distinction for connection - configurations [[GH-3544](https://github.com/hashicorp/vault/pull/3544)] - * secrets/databases: Add `create`/`update` distinction for role configurations - [[GH-3544](https://github.com/hashicorp/vault/pull/3544)] - * secrets/databases: Add best-effort revocation logic for use when a role has - been deleted [[GH-4782](https://github.com/hashicorp/vault/pull/4782)] - * secrets/kv: Add `kv rollback` [[GH-4774](https://github.com/hashicorp/vault/pull/4774)] - * secrets/pki: Add URI SANs support [[GH-4675](https://github.com/hashicorp/vault/pull/4675)] - * secrets/ssh: Allow standard SSH command arguments to be used, without - requiring username@hostname syntax [[GH-4710](https://github.com/hashicorp/vault/pull/4710)] - * storage/consul: Add context support so that requests are cancelable - [[GH-4739](https://github.com/hashicorp/vault/pull/4739)] - * sys: Added `hidden` option to `listing_visibility` field on `sys/mounts` - API [[GH-4827](https://github.com/hashicorp/vault/pull/4827)] - * ui: Secret values are obfuscated by default and visibility is toggleable [[GH-4422](https://github.com/hashicorp/vault/pull/4422)] +* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] BUG FIXES: - * auth/approle: Fix panic due to metadata being nil [[GH-4719](https://github.com/hashicorp/vault/pull/4719)] - * auth/aws: Fix delete path for tidy operations [[GH-4799](https://github.com/hashicorp/vault/pull/4799)] - * core: Optimizations to remove some speed regressions due to the - security-related changes in 0.10.2 - * storage/dynamodb: Fix errors seen when reading existing DynamoDB data [[GH-4721](https://github.com/hashicorp/vault/pull/4721)] - * secrets/database: Fix default MySQL root rotation statement [[GH-4748](https://github.com/hashicorp/vault/pull/4748)] - * secrets/gcp: Fix renewal for GCP account keys - * secrets/kv: Fix writing to the root of a KVv2 mount from `vault kv` commands - incorrectly operating on a root+mount path instead of being an error - [[GH-4726](https://github.com/hashicorp/vault/pull/4726)] - * seal/pkcs11: Add `CKK_SHA256_HMAC` to the search list when finding HMAC - keys, fixing lookup on some Thales devices - * replication: Fix issue enabling replication when a non-auth mount and auth - mount have the same name - * auth/kubernetes: Fix issue verifying ECDSA signed JWTs - * ui: add missing edit mode for auth method configs [[GH-4770](https://github.com/hashicorp/vault/pull/4770)] - -## 0.10.2 (June 6th, 2018) - -SECURITY: - - * Tokens: A race condition was identified that could occur if a token's - lease expired while Vault was not running. In this case, when Vault came - back online, sometimes it would properly revoke the lease but other times it - would not, leading to a Vault token that no longer had an expiration and had - essentially unlimited lifetime. This race was per-token, not all-or-nothing - for all tokens that may have expired during Vault's downtime. We have fixed - the behavior and put extra checks in place to help prevent any similar - future issues. In addition, the logic we have put in place ensures that such - lease-less tokens can no longer be used (unless they are root tokens that - never had an expiration to begin with). - * Convergent Encryption: The version 2 algorithm used in `transit`'s - convergent encryption feature is susceptible to offline - plaintext-confirmation attacks. As a result, we are introducing a version 3 - algorithm that mitigates this. If you are currently using convergent - encryption, we recommend upgrading, rotating your encryption key (the new - key version will use the new algorithm), and rewrapping your data (the - `rewrap` endpoint can be used to allow a relatively non-privileged user to - perform the rewrapping while never divulging the plaintext). - * AppRole case-sensitive role name secret-id leaking: When using a mixed-case - role name via AppRole, deleting a secret-id via accessor or other operations - could end up leaving the secret-id behind and valid but without an accessor. - This has now been fixed, and we have put checks in place to prevent these - secret-ids from being used. - -DEPRECATIONS/CHANGES: - - * PKI duration return types: The PKI backend now returns durations (e.g. when - reading a role) as an integer number of seconds instead of a Go-style - string, in line with how the rest of Vault's API returns durations. - -FEATURES: +* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] +* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] +* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] +* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. +* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] +* core: fix a start up race condition where performance standbys could go into a + mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] +* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] +* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18085](https://github.com/hashicorp/vault/pull/18085)] +* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18110](https://github.com/hashicorp/vault/pull/18110)] +* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] +* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] - * Active Directory Secrets Engine: A new `ad` secrets engine has been created - which allows Vault to rotate and provide credentials for configured AD - accounts. - * Rekey Verification: Rekey operations can now require verification. This - turns on a two-phase process where the existing key shares authorize - generating a new master key, and a threshold of the new, returned key shares - must be provided to verify that they have been successfully received in - order for the actual master key to be rotated. - * CIDR restrictions for `cert`, `userpass`, and `kubernetes` auth methods: - You can now limit authentication to specific CIDRs; these will also be - encoded in resultant tokens to limit their use. - * Vault UI Browser CLI: The UI now supports usage of read/write/list/delete - commands in a CLI that can be accessed from the nav bar. Complex inputs such - as JSON files are not currently supported. This surfaces features otherwise - unsupported in Vault's UI. - * Azure Key Vault Auto Unseal/Seal Wrap Support (Enterprise): Azure Key Vault - can now be used a support seal for Auto Unseal and Seal Wrapping. +## 1.11.5 +### November 2, 2022 IMPROVEMENTS: - * api: Close renewer's doneCh when the renewer is stopped, so that programs - expecting a final value through doneCh behave correctly [[GH-4472](https://github.com/hashicorp/vault/pull/4472)] - * auth/cert: Break out `allowed_names` into component parts and add - `allowed_uri_sans` [[GH-4231](https://github.com/hashicorp/vault/pull/4231)] - * auth/ldap: Obfuscate error messages pre-bind for greater security [[GH-4700](https://github.com/hashicorp/vault/pull/4700)] - * cli: `vault login` now supports a `-no-print` flag to suppress printing - token information but still allow storing into the token helper [[GH-4454](https://github.com/hashicorp/vault/pull/4454)] - * core/pkcs11 (enterprise): Add support for CKM_AES_CBC_PAD, CKM_RSA_PKCS, and - CKM_RSA_PKCS_OAEP mechanisms - * core/pkcs11 (enterprise): HSM slots can now be selected by token label - instead of just slot number - * core/token: Optimize token revocation by removing unnecessary list call - against the storage backend when calling revoke-orphan on tokens [[GH-4465](https://github.com/hashicorp/vault/pull/4465)] - * core/token: Refactor token revocation logic to not block on the call when - underlying leases are pending revocation by moving the expiration logic to - the expiration manager [[GH-4512](https://github.com/hashicorp/vault/pull/4512)] - * expiration: Allow revoke-prefix and revoke-force to work on single leases as - well as prefixes [[GH-4450](https://github.com/hashicorp/vault/pull/4450)] - * identity: Return parent group info when reading a group [[GH-4648](https://github.com/hashicorp/vault/pull/4648)] - * identity: Provide more contextual key information when listing entities, - groups, and aliases - * identity: Passthrough EntityID to backends [[GH-4663](https://github.com/hashicorp/vault/pull/4663)] - * identity: Adds ability to request entity information through system view - [GH_4681] - * secret/pki: Add custom extended key usages [[GH-4667](https://github.com/hashicorp/vault/pull/4667)] - * secret/pki: Add custom PKIX serial numbers [[GH-4694](https://github.com/hashicorp/vault/pull/4694)] - * secret/ssh: Use hostname instead of IP in OTP mode, similar to CA mode - [[GH-4673](https://github.com/hashicorp/vault/pull/4673)] - * storage/file: Attempt in some error conditions to do more cleanup [[GH-4684](https://github.com/hashicorp/vault/pull/4684)] - * ui: wrapping lookup now distplays the path [[GH-4644](https://github.com/hashicorp/vault/pull/4644)] - * ui: Identity interface now has more inline actions to make editing and adding - aliases to an entity or group easier [[GH-4502](https://github.com/hashicorp/vault/pull/4502)] - * ui: Identity interface now lists groups by name [[GH-4655](https://github.com/hashicorp/vault/pull/4655)] - * ui: Permission denied errors still render the sidebar in the Access section - [[GH-4658](https://github.com/hashicorp/vault/pull/4658)] - * replication: Improve performance of index page flushes and WAL garbage - collecting - -BUG FIXES: - - * auth/approle: Make invalid role_id a 400 error instead of 500 [[GH-4470](https://github.com/hashicorp/vault/pull/4470)] - * auth/cert: Fix Identity alias using serial number instead of common name - [[GH-4475](https://github.com/hashicorp/vault/pull/4475)] - * cli: Fix panic running `vault token capabilities` with multiple paths - [[GH-4552](https://github.com/hashicorp/vault/pull/4552)] - * core: When using the `use_always` option with PROXY protocol support, do not - require `authorized_addrs` to be set [[GH-4065](https://github.com/hashicorp/vault/pull/4065)] - * core: Fix panic when certain combinations of policy paths and allowed/denied - parameters were used [[GH-4582](https://github.com/hashicorp/vault/pull/4582)] - * secret/gcp: Make `bound_region` able to use short names - * secret/kv: Fix response wrapping for KV v2 [[GH-4511](https://github.com/hashicorp/vault/pull/4511)] - * secret/kv: Fix address flag not being honored correctly [[GH-4617](https://github.com/hashicorp/vault/pull/4617)] - * secret/pki: Fix `safety_buffer` for tidy being allowed to be negative, - clearing all certs [[GH-4641](https://github.com/hashicorp/vault/pull/4641)] - * secret/pki: Fix `key_type` not being allowed to be set to `any` [[GH-4595](https://github.com/hashicorp/vault/pull/4595)] - * secret/pki: Fix path length parameter being ignored when using - `use_csr_values` and signing an intermediate CA cert [[GH-4459](https://github.com/hashicorp/vault/pull/4459)] - * secret/ssh: Only append UserKnownHostsFile to args when configured with a - value [[GH-4674](https://github.com/hashicorp/vault/pull/4674)] - * storage/dynamodb: Fix listing when one child is left within a nested path - [[GH-4570](https://github.com/hashicorp/vault/pull/4570)] - * storage/gcs: Fix swallowing an error on connection close [[GH-4691](https://github.com/hashicorp/vault/pull/4691)] - * ui: Fix HMAC algorithm in transit [[GH-4604](https://github.com/hashicorp/vault/pull/4604)] - * ui: Fix unwrap of auth responses via the UI's unwrap tool [[GH-4611](https://github.com/hashicorp/vault/pull/4611)] - * ui (enterprise): Fix parsing of version string that blocked some users from seeing - enterprise-specific pages in the UI [[GH-4547](https://github.com/hashicorp/vault/pull/4547)] - * ui: Fix incorrect capabilities path check when viewing policies [[GH-4566](https://github.com/hashicorp/vault/pull/4566)] - * replication: Fix error while running plugins on a newly created replication - secondary - * replication: Fix issue with token store lookups after a secondary's mount table - is invalidated. - * replication: Improve startup time when a large merkle index is in use. - * replication: Fix panic when storage becomes unreachable during unseal. - -## 0.10.1/0.9.7 (April 25th, 2018) - -The following two items are in both 0.9.7 and 0.10.1. They only affect -Enterprise, and as such 0.9.7 is an Enterprise-only release: - -SECURITY: - - * EGPs: A regression affecting 0.9.6 and 0.10.0 causes EGPs to not be applied - correctly if an EGP is updated in a running Vault after initial write or - after it is loaded on unseal. This has been fixed. +* database/snowflake: Allow parallel requests to Snowflake [[GH-17594](https://github.com/hashicorp/vault/pull/17594)] +* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] BUG FIXES: - * Fixed an upgrade issue affecting performance secondaries when migrating from - a version that did not include Identity to one that did. - -All other content in this release is for 0.10.1 only. - -DEPRECATIONS/CHANGES: - - * `vault kv` and Vault versions: In 0.10.1 some issues with `vault kv` against - v1 K/V engine mounts are fixed. However, using 0.10.1 for both the server - and CLI versions is required. - * Mount information visibility: Users that have access to any path within a - mount can now see information about that mount, such as its type and - options, via some API calls. - * Identity and Local Mounts: Local mounts would allow creating Identity - entities but these would not be able to be used successfully (even locally) - in replicated scenarios. We have now disallowed entities and groups from - being created for local mounts in the first place. - -FEATURES: - - * X-Forwarded-For support: `X-Forwarded-For` headers can now be used to set the - client IP seen by Vault. See the [TCP listener configuration - page](https://www.vaultproject.io/docs/configuration/listener/tcp.html) for - details. - * CIDR IP Binding for Tokens: Tokens now support being bound to specific - CIDR(s) for usage. Currently this is implemented in Token Roles; usage can be - expanded to other authentication backends over time. - * `vault kv patch` command: A new `kv patch` helper command that allows - modifying only some values in existing data at a K/V path, but uses - check-and-set to ensure that this modification happens safely. - * AppRole Local Secret IDs: Roles can now be configured to generate secret IDs - local to the cluster. This enables performance secondaries to generate and - consume secret IDs without contacting the primary. - * AES-GCM Support for PKCS#11 [BETA] (Enterprise): For supporting HSMs, - AES-GCM can now be used in lieu of AES-CBC/HMAC-SHA256. This has currently - only been fully tested on AWS CloudHSM. - * Auto Unseal/Seal Wrap Key Rotation Support (Enterprise): Auto Unseal - mechanisms, including PKCS#11 HSMs, now support rotation of encryption keys, - and migration between key and encryption types, such as from AES-CBC to - AES-GCM, can be performed at the same time (where supported). - -IMPROVEMENTS: - - * auth/approle: Support for cluster local secret IDs. This enables secondaries - to generate secret IDs without contacting the primary [[GH-4427](https://github.com/hashicorp/vault/pull/4427)] - * auth/token: Add to the token lookup response, the policies inherited due to - identity associations [[GH-4366](https://github.com/hashicorp/vault/pull/4366)] - * auth/token: Add CIDR binding to token roles [[GH-815](https://github.com/hashicorp/vault/pull/815)] - * cli: Add `vault kv patch` [[GH-4432](https://github.com/hashicorp/vault/pull/4432)] - * core: Add X-Forwarded-For support [[GH-4380](https://github.com/hashicorp/vault/pull/4380)] - * core: Add token CIDR-binding support [[GH-815](https://github.com/hashicorp/vault/pull/815)] - * identity: Add the ability to disable an entity. Disabling an entity does not - revoke associated tokens, but while the entity is disabled they cannot be - used. [[GH-4353](https://github.com/hashicorp/vault/pull/4353)] - * physical/consul: Allow tuning of session TTL and lock wait time [[GH-4352](https://github.com/hashicorp/vault/pull/4352)] - * replication: Dynamically adjust WAL cleanup over a period of time based on - the rate of writes committed - * secret/ssh: Update dynamic key install script to use shell locking to avoid - concurrent modifications [[GH-4358](https://github.com/hashicorp/vault/pull/4358)] - * ui: Access to `sys/mounts` is no longer needed to use the UI - the list of - engines will show you the ones you implicitly have access to (because you have - access to to secrets in those engines) [[GH-4439](https://github.com/hashicorp/vault/pull/4439)] - -BUG FIXES: +* core/managed-keys (enterprise): Return better error messages when encountering key creation failures +* core/managed-keys (enterprise): fix panic when having `cache_disable` true +* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] +* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] +* kmip (enterprise): Fix a problem in the handling of attributes that caused Import operations to fail. +* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] +* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] +* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17384](https://github.com/hashicorp/vault/pull/17384)] +* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] +* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] +* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] - * cli: Fix `vault kv` backwards compatibility with KV v1 engine mounts - [[GH-4430](https://github.com/hashicorp/vault/pull/4430)] - * identity: Persist entity memberships in external identity groups across - mounts [[GH-4365](https://github.com/hashicorp/vault/pull/4365)] - * identity: Fix error preventing authentication using local mounts on - performance secondary replication clusters [[GH-4407](https://github.com/hashicorp/vault/pull/4407)] - * replication: Fix issue causing secondaries to not connect properly to a - pre-0.10 primary until the primary was upgraded - * secret/gcp: Fix panic on rollback when a roleset wasn't created properly - [[GH-4344](https://github.com/hashicorp/vault/pull/4344)] - * secret/gcp: Fix panic on renewal - * ui: Fix IE11 form submissions in a few parts of the application [[GH-4378](https://github.com/hashicorp/vault/pull/4378)] - * ui: Fix IE file saving on policy pages and init screens [[GH-4376](https://github.com/hashicorp/vault/pull/4376)] - * ui: Fixed an issue where the AWS secret backend would show the wrong menu - [[GH-4371](https://github.com/hashicorp/vault/pull/4371)] - * ui: Fixed an issue where policies with commas would not render in the - interface properly [[GH-4398](https://github.com/hashicorp/vault/pull/4398)] - * ui: Corrected the saving of mount tune ttls for auth methods [[GH-4431](https://github.com/hashicorp/vault/pull/4431)] - * ui: Credentials generation no longer checks capabilities before making - api calls. This should fix needing "update" capabilites to read IAM - credentials in the AWS secrets engine [[GH-4446](https://github.com/hashicorp/vault/pull/4446)] - -## 0.10.0 (April 10th, 2018) +## 1.11.4 +### September 30, 2022 SECURITY: - * Log sanitization for Combined Database Secret Engine: In certain failure - scenarios with incorrectly formatted connection urls, the raw connection - errors were being returned to the user with the configured database - credentials. Errors are now sanitized before being returned to the user. - -DEPRECATIONS/CHANGES: - - * Database plugin compatibility: The database plugin interface was enhanced to - support some additional functionality related to root credential rotation - and supporting templated URL strings. The changes were made in a - backwards-compatible way and all builtin plugins were updated with the new - features. Custom plugins not built into Vault will need to be upgraded to - support templated URL strings and root rotation. Additionally, the - Initialize method was deprecated in favor of a new Init method that supports - configuration modifications that occur in the plugin back to the primary - data store. - * Removal of returned secret information: For a long time Vault has returned - configuration given to various secret engines and auth methods with secret - values (such as secret API keys or passwords) still intact, and with a - warning to the user on write that anyone with read access could see the - secret. This was mostly done to make it easy for tools like Terraform to - judge whether state had drifted. However, it also feels quite un-Vault-y to - do this and we've never felt very comfortable doing so. In 0.10 we have gone - through and removed this behavior from the various backends; fields which - contained secret values are simply no longer returned on read. We are - working with the Terraform team to make changes to their provider to - accommodate this as best as possible, and users of other tools may have to - make adjustments, but in the end we felt that the ends did not justify the - means and we needed to prioritize security over operational convenience. - * LDAP auth method case sensitivity: We now treat usernames and groups - configured locally for policy assignment in a case insensitive fashion by - default. Existing configurations will continue to work as they do now; - however, the next time a configuration is written `case_sensitive_names` - will need to be explicitly set to `true`. - * TTL handling within core: All lease TTL handling has been centralized within - the core of Vault to ensure consistency across all backends. Since this was - previously delegated to individual backends, there may be some slight - differences in TTLs generated from some backends. - * Removal of default `secret/` mount: In 0.12 we will stop mounting `secret/` - by default at initialization time (it will still be available in `dev` - mode). - -FEATURES: - - * OSS UI: The Vault UI is now fully open-source. Similarly to the CLI, some - features are only available with a supporting version of Vault, but the code - base is entirely open. - * Versioned K/V: The `kv` backend has been completely revamped, featuring - flexible versioning of values, check-and-set protections, and more. A new - `vault kv` subcommand allows friendly interactions with it. Existing mounts - of the `kv` backend can be upgraded to the new versioned mode (downgrades - are not currently supported). The old "passthrough" mode is still the - default for new mounts; versioning can be turned on by setting the - `-version=2` flag for the `vault secrets enable` command. - * Database Root Credential Rotation: Database configurations can now rotate - their own configured admin/root credentials, allowing configured credentials - for a database connection to be rotated immediately after sending them into - Vault, invalidating the old credentials and ensuring only Vault knows the - actual valid values. - * Azure Authentication Plugin: There is now a plugin (pulled in to Vault) that - allows authenticating Azure machines to Vault using Azure's Managed Service - Identity credentials. See the [plugin - repository](https://github.com/hashicorp/vault-plugin-auth-azure) for more - information. - * GCP Secrets Plugin: There is now a plugin (pulled in to Vault) that allows - generating secrets to allow access to GCP. See the [plugin - repository](https://github.com/hashicorp/vault-plugin-secrets-gcp) for more - information. - * Selective Audit HMACing of Request and Response Data Keys: HMACing in audit - logs can be turned off for specific keys in the request input map and - response `data` map on a per-mount basis. - * Passthrough Request Headers: Request headers can now be selectively passed - through to backends on a per-mount basis. This is useful in various cases - when plugins are interacting with external services. - * HA for Google Cloud Storage: The GCS storage type now supports HA. - * UI support for identity: Add and edit entities, groups, and their associated - aliases. - * UI auth method support: Enable, disable, and configure all of the built-in - authentication methods. - * UI (Enterprise): View and edit Sentinel policies. - -IMPROVEMENTS: - - * core: Centralize TTL generation for leases in core [[GH-4230](https://github.com/hashicorp/vault/pull/4230)] - * identity: API to update group-alias by ID [[GH-4237](https://github.com/hashicorp/vault/pull/4237)] - * secret/cassandra: Update Cassandra storage delete function to not use batch - operations [[GH-4054](https://github.com/hashicorp/vault/pull/4054)] - * storage/mysql: Allow setting max idle connections and connection lifetime - [[GH-4211](https://github.com/hashicorp/vault/pull/4211)] - * storage/gcs: Add HA support [[GH-4226](https://github.com/hashicorp/vault/pull/4226)] - * ui: Add Nomad to the list of available secret engines - * ui: Adds ability to set static headers to be returned by the UI - -BUG FIXES: - - * api: Fix retries not working [[GH-4322](https://github.com/hashicorp/vault/pull/4322)] - * auth/gcp: Invalidate clients on config change - * auth/token: Revoke-orphan and tidy operations now correctly cleans up the - parent prefix entry in the underlying storage backend. These operations also - mark corresponding child tokens as orphans by removing the parent/secondary - index from the entries. [[GH-4193](https://github.com/hashicorp/vault/pull/4193)] - * command: Re-add `-mfa` flag and migrate to OSS binary [[GH-4223](https://github.com/hashicorp/vault/pull/4223)] - * core: Fix issue occurring from mounting two auth backends with the same path - with one mount having `auth/` in front [[GH-4206](https://github.com/hashicorp/vault/pull/4206)] - * mfa: Invalidation of MFA configurations (Enterprise) - * replication: Fix a panic on some non-64-bit platforms - * replication: Fix invalidation of policies on performance secondaries - * secret/pki: When tidying if a value is unexpectedly nil, delete it and move - on [[GH-4214](https://github.com/hashicorp/vault/pull/4214)] - * storage/s3: Fix panic if S3 returns no Content-Length header [[GH-4222](https://github.com/hashicorp/vault/pull/4222)] - * ui: Fixed an issue where the UI was checking incorrect paths when operating - on transit keys. Capabilities are now checked when attempting to encrypt / - decrypt, etc. - * ui: Fixed IE 11 layout issues and JS errors that would stop the application - from running. - * ui: Fixed the link that gets rendered when a user doesn't have permissions - to view the root of a secret engine. The link now sends them back to the list - of secret engines. - * replication: Fix issue with DR secondaries when using mount specified local - paths. - * cli: Fix an issue where generating a dr operation token would not output the - token [[GH-4328](https://github.com/hashicorp/vault/pull/4328)] - -## 0.9.6 (March 20th, 2018) - -DEPRECATIONS/CHANGES: - - * The AWS authentication backend now allows binds for inputs as either a - comma-delimited string or a string array. However, to keep consistency with - input and output, when reading a role the binds will now be returned as - string arrays rather than strings. - * In order to prefix-match IAM role and instance profile ARNs in AWS auth - backend, you now must explicitly opt-in by adding a `*` to the end of the - ARN. Existing configurations will be upgraded automatically, but when - writing a new role configuration the updated behavior will be used. - -FEATURES: - - * Replication Activation Enhancements: When activating a replication - secondary, a public key can now be fetched first from the target cluster. - This public key can be provided to the primary when requesting the - activation token. If provided, the public key will be used to perform a - Diffie-Hellman key exchange resulting in a shared key that encrypts the - contents of the activation token. The purpose is to protect against - accidental disclosure of the contents of the token if unwrapped by the wrong - party, given that the contents of the token are highly sensitive. If - accidentally unwrapped, the contents of the token are not usable by the - unwrapping party. It is important to note that just as a malicious operator - could unwrap the contents of the token, a malicious operator can pretend to - be a secondary and complete the Diffie-Hellman exchange on their own; this - feature provides defense in depth but still requires due diligence around - replication activation, including multiple eyes on the commands/tokens and - proper auditing. - -IMPROVEMENTS: - - * api: Update renewer grace period logic. It no longer is static, but rather - dynamically calculates one based on the current lease duration after each - renew. [[GH-4090](https://github.com/hashicorp/vault/pull/4090)] - * auth/approle: Allow array input for bound_cidr_list [4078] - * auth/aws: Allow using lists in role bind parameters [[GH-3907](https://github.com/hashicorp/vault/pull/3907)] - * auth/aws: Allow binding by EC2 instance IDs [[GH-3816](https://github.com/hashicorp/vault/pull/3816)] - * auth/aws: Allow non-prefix-matched IAM role and instance profile ARNs - [[GH-4071](https://github.com/hashicorp/vault/pull/4071)] - * auth/ldap: Set a very large size limit on queries [[GH-4169](https://github.com/hashicorp/vault/pull/4169)] - * core: Log info notifications of revoked leases for all leases/reasons, not - just expirations [[GH-4164](https://github.com/hashicorp/vault/pull/4164)] - * physical/couchdb: Removed limit on the listing of items [[GH-4149](https://github.com/hashicorp/vault/pull/4149)] - * secret/pki: Support certificate policies [[GH-4125](https://github.com/hashicorp/vault/pull/4125)] - * secret/pki: Add ability to have CA:true encoded into intermediate CSRs, to - improve compatibility with some ADFS scenarios [[GH-3883](https://github.com/hashicorp/vault/pull/3883)] - * secret/transit: Allow selecting signature algorithm as well as hash - algorithm when signing/verifying [[GH-4018](https://github.com/hashicorp/vault/pull/4018)] - * server: Make sure `tls_disable_client_cert` is actually a true value rather - than just set [[GH-4049](https://github.com/hashicorp/vault/pull/4049)] - * storage/dynamodb: Allow specifying max retries for dynamo client [[GH-4115](https://github.com/hashicorp/vault/pull/4115)] - * storage/gcs: Allow specifying chunk size for transfers, which can reduce - memory utilization [[GH-4060](https://github.com/hashicorp/vault/pull/4060)] - * sys/capabilities: Add the ability to use multiple paths for capability - checking [[GH-3663](https://github.com/hashicorp/vault/pull/3663)] - -BUG FIXES: - - * auth/aws: Fix honoring `max_ttl` when a corresponding role `ttl` is not also - set [[GH-4107](https://github.com/hashicorp/vault/pull/4107)] - * auth/okta: Fix honoring configured `max_ttl` value [[GH-4110](https://github.com/hashicorp/vault/pull/4110)] - * auth/token: If a periodic token being issued has a period greater than the - max_lease_ttl configured on the token store mount, truncate it. This matches - renewal behavior; before it was inconsistent between issuance and renewal. - [[GH-4112](https://github.com/hashicorp/vault/pull/4112)] - * cli: Improve error messages around `vault auth help` when there is no CLI - helper for a particular method [[GH-4056](https://github.com/hashicorp/vault/pull/4056)] - * cli: Fix autocomplete installation when using Fish as the shell [[GH-4094](https://github.com/hashicorp/vault/pull/4094)] - * secret/database: Properly honor mount-tuned max TTL [[GH-4051](https://github.com/hashicorp/vault/pull/4051)] - * secret/ssh: Return `key_bits` value when reading a role [[GH-4098](https://github.com/hashicorp/vault/pull/4098)] - * sys: When writing policies on a performance replication secondary, properly - forward requests to the primary [[GH-4129](https://github.com/hashicorp/vault/pull/4129)] - -## 0.9.5 (February 26th, 2018) +* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] IMPROVEMENTS: - * auth: Allow sending default_lease_ttl and max_lease_ttl values when enabling - auth methods. [[GH-4019](https://github.com/hashicorp/vault/pull/4019)] - * secret/database: Add list functionality to `database/config` endpoint - [[GH-4026](https://github.com/hashicorp/vault/pull/4026)] - * physical/consul: Allow setting a specific service address [[GH-3971](https://github.com/hashicorp/vault/pull/3971)] - * replication: When bootstrapping a new secondary, if the initial cluster - connection fails, Vault will attempt to roll back state so that - bootstrapping can be tried again, rather than having to recreate the - downstream cluster. This will still require fetching a new secondary - activation token. +* agent/auto-auth: Add `exit_on_err` which when set to true, will cause Agent to exit if any errors are encountered during authentication. [[GH-17091](https://github.com/hashicorp/vault/pull/17091)] +* agent: Send notifications to systemd on start and stop. [[GH-9802](https://github.com/hashicorp/vault/pull/9802)] BUG FIXES: - * auth/aws: Update libraries to fix regression verifying PKCS#7 identity - documents [[GH-4014](https://github.com/hashicorp/vault/pull/4014)] - * listener: Revert to Go 1.9 for now to allow certificates with non-DNS names - in their DNS SANs to be used for Vault's TLS connections [[GH-4028](https://github.com/hashicorp/vault/pull/4028)] - * replication: Fix issue with a performance secondary/DR primary node losing - its DR primary status when performing an update-primary operation - * replication: Fix issue where performance secondaries could be unable to - automatically connect to a performance primary after that performance - primary has been promoted to a DR primary from a DR secondary - * ui: Fix behavior when a value contains a `.` - -## 0.9.4 (February 20th, 2018) - -SECURITY: - - * Role Tags used with the EC2 style of AWS auth were being improperly parsed; - as a result they were not being used to properly restrict values. - Implementations following our suggestion of using these as defense-in-depth - rather than the only source of restriction should not have significant - impact. - -FEATURES: - - * **ChaCha20-Poly1305 support in `transit`**: You can now encrypt and decrypt - with ChaCha20-Poly1305 in `transit`. Key derivation and convergent - encryption is also supported. - * **Okta Push support in Okta Auth Backend**: If a user account has MFA - required within Okta, an Okta Push MFA flow can be used to successfully - finish authentication. - * **PKI Improvements**: Custom OID subject alternate names can now be set, - subject to allow restrictions that support globbing. Additionally, Country, - Locality, Province, Street Address, and Postal Code can now be set in - certificate subjects. - * **Manta Storage**: Joyent Triton Manta can now be used for Vault storage - * **Google Cloud Spanner Storage**: Google Cloud Spanner can now be used for - Vault storage - -IMPROVEMENTS: - - * auth/centrify: Add CLI helper - * audit: Always log failure metrics, even if zero, to ensure the values appear - on dashboards [[GH-3937](https://github.com/hashicorp/vault/pull/3937)] - * cli: Disable color when output is not a TTY [[GH-3897](https://github.com/hashicorp/vault/pull/3897)] - * cli: Add `-format` flag to all subcommands [[GH-3897](https://github.com/hashicorp/vault/pull/3897)] - * cli: Do not display deprecation warnings when the format is not table - [[GH-3897](https://github.com/hashicorp/vault/pull/3897)] - * core: If over a predefined lease count (256k), log a warning not more than - once a minute. Too many leases can be problematic for many of the storage - backends and often this number of leases is indicative of a need for - workflow improvements. [[GH-3957](https://github.com/hashicorp/vault/pull/3957)] - * secret/nomad: Have generated ACL tokens cap out at 64 characters [[GH-4009](https://github.com/hashicorp/vault/pull/4009)] - * secret/pki: Country, Locality, Province, Street Address, and Postal Code can - now be set on certificates [[GH-3992](https://github.com/hashicorp/vault/pull/3992)] - * secret/pki: UTF-8 Other Names can now be set in Subject Alternate Names in - issued certs; allowed values can be set per role and support globbing - [[GH-3889](https://github.com/hashicorp/vault/pull/3889)] - * secret/pki: Add a flag to make the common name optional on certs [[GH-3940](https://github.com/hashicorp/vault/pull/3940)] - * secret/pki: Ensure only DNS-compatible names go into DNS SANs; additionally, - properly handle IDNA transformations for these DNS names [[GH-3953](https://github.com/hashicorp/vault/pull/3953)] - * secret/ssh: Add `valid-principles` flag to CLI for CA mode [[GH-3922](https://github.com/hashicorp/vault/pull/3922)] - * storage/manta: Add Manta storage [[GH-3270](https://github.com/hashicorp/vault/pull/3270)] - * ui (Enterprise): Support for ChaCha20-Poly1305 keys in the transit engine. +* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] +* auth/kubernetes: Restore support for JWT signature algorithm ES384 [[GH-160](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/160)] [[GH-17162](https://github.com/hashicorp/vault/pull/17162)] +* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] +* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] +* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] +* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] +* replication (enterprise): Fix data race in SaveCheckpoint() +* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. +* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. +* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] +* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] -BUG FIXES: - * api/renewer: Honor increment value in renew auth calls [[GH-3904](https://github.com/hashicorp/vault/pull/3904)] - * auth/approle: Fix inability to use limited-use-count secret IDs on - replication performance secondaries - * auth/approle: Cleanup of secret ID accessors during tidy and removal of - dangling accessor entries [[GH-3924](https://github.com/hashicorp/vault/pull/3924)] - * auth/aws-ec2: Avoid masking of role tag response [[GH-3941](https://github.com/hashicorp/vault/pull/3941)] - * auth/cert: Verify DNS SANs in the authenticating certificate [[GH-3982](https://github.com/hashicorp/vault/pull/3982)] - * auth/okta: Return configured durations as seconds, not nanoseconds [[GH-3871](https://github.com/hashicorp/vault/pull/3871)] - * auth/okta: Get all okta groups for a user vs. default 200 limit [[GH-4034](https://github.com/hashicorp/vault/pull/4034)] - * auth/token: Token creation via the CLI no longer forces periodic token - creation. Passing an explicit zero value for the period no longer create - periodic tokens. [[GH-3880](https://github.com/hashicorp/vault/pull/3880)] - * command: Fix interpreted formatting directives when printing raw fields - [[GH-4005](https://github.com/hashicorp/vault/pull/4005)] - * command: Correctly format output when using -field and -format flags at the - same time [[GH-3987](https://github.com/hashicorp/vault/pull/3987)] - * command/rekey: Re-add lost `stored-shares` parameter [[GH-3974](https://github.com/hashicorp/vault/pull/3974)] - * command/ssh: Create and reuse the api client [[GH-3909](https://github.com/hashicorp/vault/pull/3909)] - * command/status: Fix panic when status returns 500 from leadership lookup - [[GH-3998](https://github.com/hashicorp/vault/pull/3998)] - * identity: Fix race when creating entities [[GH-3932](https://github.com/hashicorp/vault/pull/3932)] - * plugin/gRPC: Fixed an issue with list requests and raw responses coming from - plugins using gRPC transport [[GH-3881](https://github.com/hashicorp/vault/pull/3881)] - * plugin/gRPC: Fix panic when special paths are not set [[GH-3946](https://github.com/hashicorp/vault/pull/3946)] - * secret/pki: Verify a name is a valid hostname before adding to DNS SANs - [[GH-3918](https://github.com/hashicorp/vault/pull/3918)] - * secret/transit: Fix auditing when reading a key after it has been backed up - or restored [[GH-3919](https://github.com/hashicorp/vault/pull/3919)] - * secret/transit: Fix storage/memory consistency when persistence fails - [[GH-3959](https://github.com/hashicorp/vault/pull/3959)] - * storage/consul: Validate that service names are RFC 1123 compliant [[GH-3960](https://github.com/hashicorp/vault/pull/3960)] - * storage/etcd3: Fix memory ballooning with standby instances [[GH-3798](https://github.com/hashicorp/vault/pull/3798)] - * storage/etcd3: Fix large lists (like token loading at startup) not being - handled [[GH-3772](https://github.com/hashicorp/vault/pull/3772)] - * storage/postgresql: Fix compatibility with versions using custom string - version tags [[GH-3949](https://github.com/hashicorp/vault/pull/3949)] - * storage/zookeeper: Update vendoring to fix freezing issues [[GH-3896](https://github.com/hashicorp/vault/pull/3896)] - * ui (Enterprise): Decoding the replication token should no longer error and - prevent enabling of a secondary replication cluster via the ui. - * plugin/gRPC: Add connection info to the request object [[GH-3997](https://github.com/hashicorp/vault/pull/3997)] - -## 0.9.3 (January 28th, 2018) - -A regression from a feature merge disabled the Nomad secrets backend in 0.9.2. -This release re-enables the Nomad secrets backend; it is otherwise identical to -0.9.2. - -## 0.9.2 (January 26th, 2018) +## 1.11.3 +### August 31, 2022 SECURITY: - * Okta Auth Backend: While the Okta auth backend was successfully verifying - usernames and passwords, it was not checking the returned state of the - account, so accounts that had been marked locked out could still be used to - log in. Only accounts in SUCCESS or PASSWORD_WARN states are now allowed. - * Periodic Tokens: A regression in 0.9.1 meant that periodic tokens created by - the AppRole, AWS, and Cert auth backends would expire when the max TTL for - the backend/mount/system was hit instead of their stated behavior of living - as long as they are renewed. This is now fixed; existing tokens do not have - to be reissued as this was purely a regression in the renewal logic. - * Seal Wrapping: During certain replication states values written marked for - seal wrapping may not be wrapped on the secondaries. This has been fixed, - and existing values will be wrapped on next read or write. This does not - affect the barrier keys. - -DEPRECATIONS/CHANGES: - - * `sys/health` DR Secondary Reporting: The `replication_dr_secondary` bool - returned by `sys/health` could be misleading since it would be `false` both - when a cluster was not a DR secondary but also when the node is a standby in - the cluster and has not yet fully received state from the active node. This - could cause health checks on LBs to decide that the node was acceptable for - traffic even though DR secondaries cannot handle normal Vault traffic. (In - other words, the bool could only convey "yes" or "no" but not "not sure - yet".) This has been replaced by `replication_dr_mode` and - `replication_perf_mode` which are string values that convey the current - state of the node; a value of `disabled` indicates that replication is - disabled or the state is still being discovered. As a result, an LB check - can positively verify that the node is both not `disabled` and is not a DR - secondary, and avoid sending traffic to it if either is true. - * PKI Secret Backend Roles parameter types: For `ou` and `organization` - in role definitions in the PKI secret backend, input can now be a - comma-separated string or an array of strings. Reading a role will - now return arrays for these parameters. - * Plugin API Changes: The plugin API has been updated to utilize golang's - context.Context package. Many function signatures now accept a context - object as the first parameter. Existing plugins will need to pull in the - latest Vault code and update their function signatures to begin using - context and the new gRPC transport. - -FEATURES: - - * **gRPC Backend Plugins**: Backend plugins now use gRPC for transport, - allowing them to be written in other languages. - * **Brand New CLI**: Vault has a brand new CLI interface that is significantly - streamlined, supports autocomplete, and is almost entirely backwards - compatible. - * **UI: PKI Secret Backend (Enterprise)**: Configure PKI secret backends, - create and browse roles and certificates, and issue and sign certificates via - the listed roles. - -IMPROVEMENTS: - - * auth/aws: Handle IAM headers produced by clients that formulate numbers as - ints rather than strings [[GH-3763](https://github.com/hashicorp/vault/pull/3763)] - * auth/okta: Support JSON lists when specifying groups and policies [[GH-3801](https://github.com/hashicorp/vault/pull/3801)] - * autoseal/hsm: Attempt reconnecting to the HSM on certain kinds of issues, - including HA scenarios for some Gemalto HSMs. - (Enterprise) - * cli: Output password prompts to stderr to make it easier to pipe an output - token to another command [[GH-3782](https://github.com/hashicorp/vault/pull/3782)] - * core: Report replication status in `sys/health` [[GH-3810](https://github.com/hashicorp/vault/pull/3810)] - * physical/s3: Allow using paths with S3 for non-AWS deployments [[GH-3730](https://github.com/hashicorp/vault/pull/3730)] - * physical/s3: Add ability to disable SSL for non-AWS deployments [[GH-3730](https://github.com/hashicorp/vault/pull/3730)] - * plugins: Args for plugins can now be specified separately from the command, - allowing the same output format and input format for plugin information - [[GH-3778](https://github.com/hashicorp/vault/pull/3778)] - * secret/pki: `ou` and `organization` can now be specified as a - comma-separated string or an array of strings [[GH-3804](https://github.com/hashicorp/vault/pull/3804)] - * plugins: Plugins will fall back to using netrpc as the communication protocol - on older versions of Vault [[GH-3833](https://github.com/hashicorp/vault/pull/3833)] - -BUG FIXES: - - * auth/(approle,aws,cert): Fix behavior where periodic tokens generated by - these backends could not have their TTL renewed beyond the system/mount max - TTL value [[GH-3803](https://github.com/hashicorp/vault/pull/3803)] - * auth/aws: Fix error returned if `bound_iam_principal_arn` was given to an - existing role update [[GH-3843](https://github.com/hashicorp/vault/pull/3843)] - * core/sealwrap: Speed improvements and bug fixes (Enterprise) - * identity: Delete group alias when an external group is deleted [[GH-3773](https://github.com/hashicorp/vault/pull/3773)] - * legacymfa/duo: Fix intermittent panic when Duo could not be reached - [[GH-2030](https://github.com/hashicorp/vault/pull/2030)] - * secret/database: Fix a location where a lock could potentially not be - released, leading to deadlock [[GH-3774](https://github.com/hashicorp/vault/pull/3774)] - * secret/(all databases) Fix behavior where if a max TTL was specified but no - default TTL was specified the system/mount default TTL would be used but not - be capped by the local max TTL [[GH-3814](https://github.com/hashicorp/vault/pull/3814)] - * secret/database: Fix an issue where plugins were not closed properly if they - failed to initialize [[GH-3768](https://github.com/hashicorp/vault/pull/3768)] - * ui: mounting a secret backend will now properly set `max_lease_ttl` and - `default_lease_ttl` when specified - previously both fields set - `default_lease_ttl`. - -## 0.9.1 (December 21st, 2017) - -DEPRECATIONS/CHANGES: - - * AppRole Case Sensitivity: In prior versions of Vault, `list` operations - against AppRole roles would require preserving case in the role name, even - though most other operations within AppRole are case-insensitive with - respect to the role name. This has been fixed; existing roles will behave as - they have in the past, but new roles will act case-insensitively in these - cases. - * Token Auth Backend Roles parameter types: For `allowed_policies` and - `disallowed_policies` in role definitions in the token auth backend, input - can now be a comma-separated string or an array of strings. Reading a role - will now return arrays for these parameters. - * Transit key exporting: You can now mark a key in the `transit` backend as - `exportable` at any time, rather than just at creation time; however, once - this value is set, it still cannot be unset. - * PKI Secret Backend Roles parameter types: For `allowed_domains` and - `key_usage` in role definitions in the PKI secret backend, input - can now be a comma-separated string or an array of strings. Reading a role - will now return arrays for these parameters. - * SSH Dynamic Keys Method Defaults to 2048-bit Keys: When using the dynamic - key method in the SSH backend, the default is now to use 2048-bit keys if no - specific key bit size is specified. - * Consul Secret Backend lease handling: The `consul` secret backend can now - accept both strings and integer numbers of seconds for its lease value. The - value returned on a role read will be an integer number of seconds instead - of a human-friendly string. - * Unprintable characters not allowed in API paths: Unprintable characters are - no longer allowed in names in the API (paths and path parameters), with an - extra restriction on whitespace characters. Allowed characters are those - that are considered printable by Unicode plus spaces. - -FEATURES: - - * **Transit Backup/Restore**: The `transit` backend now supports a backup - operation that can export a given key, including all key versions and - configuration, as well as a restore operation allowing import into another - Vault. - * **gRPC Database Plugins**: Database plugins now use gRPC for transport, - allowing them to be written in other languages. - * **Nomad Secret Backend**: Nomad ACL tokens can now be generated and revoked - using Vault. - * **TLS Cert Auth Backend Improvements**: The `cert` auth backend can now - match against custom certificate extensions via exact or glob matching, and - additionally supports max_ttl and periodic token toggles. +* core: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. This vulnerability, CVE-2022-40186, is fixed in 1.11.3, 1.10.6, and 1.9.9. [[HSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] -IMPROVEMENTS: - - * auth/cert: Support custom certificate constraints [[GH-3634](https://github.com/hashicorp/vault/pull/3634)] - * auth/cert: Support setting `max_ttl` and `period` [[GH-3642](https://github.com/hashicorp/vault/pull/3642)] - * audit/file: Setting a file mode of `0000` will now disable Vault from - automatically `chmod`ing the log file [[GH-3649](https://github.com/hashicorp/vault/pull/3649)] - * auth/github: The legacy MFA system can now be used with the GitHub auth - backend [[GH-3696](https://github.com/hashicorp/vault/pull/3696)] - * auth/okta: The legacy MFA system can now be used with the Okta auth backend - [[GH-3653](https://github.com/hashicorp/vault/pull/3653)] - * auth/token: `allowed_policies` and `disallowed_policies` can now be specified - as a comma-separated string or an array of strings [[GH-3641](https://github.com/hashicorp/vault/pull/3641)] - * command/server: The log level can now be specified with `VAULT_LOG_LEVEL` - [[GH-3721](https://github.com/hashicorp/vault/pull/3721)] - * core: Period values from auth backends will now be checked and applied to the - TTL value directly by core on login and renewal requests [[GH-3677](https://github.com/hashicorp/vault/pull/3677)] - * database/mongodb: Add optional `write_concern` parameter, which can be set - during database configuration. This establishes a session-wide [write - concern](https://docs.mongodb.com/manual/reference/write-concern/) for the - lifecycle of the mount [[GH-3646](https://github.com/hashicorp/vault/pull/3646)] - * http: Request path containing non-printable characters will return 400 - Bad - Request [[GH-3697](https://github.com/hashicorp/vault/pull/3697)] - * mfa/okta: Filter a given email address as a login filter, allowing operation - when login email and account email are different - * plugins: Make Vault more resilient when unsealing when plugins are - unavailable [[GH-3686](https://github.com/hashicorp/vault/pull/3686)] - * secret/pki: `allowed_domains` and `key_usage` can now be specified - as a comma-separated string or an array of strings [[GH-3642](https://github.com/hashicorp/vault/pull/3642)] - * secret/ssh: Allow 4096-bit keys to be used in dynamic key method [[GH-3593](https://github.com/hashicorp/vault/pull/3593)] - * secret/consul: The Consul secret backend now uses the value of `lease` set - on the role, if set, when renewing a secret. [[GH-3796](https://github.com/hashicorp/vault/pull/3796)] - * storage/mysql: Don't attempt database creation if it exists, which can help - under certain permissions constraints [[GH-3716](https://github.com/hashicorp/vault/pull/3716)] - -BUG FIXES: - - * api/status (enterprise): Fix status reporting when using an auto seal - * auth/approle: Fix case-sensitive/insensitive comparison issue [[GH-3665](https://github.com/hashicorp/vault/pull/3665)] - * auth/cert: Return `allowed_names` on role read [[GH-3654](https://github.com/hashicorp/vault/pull/3654)] - * auth/ldap: Fix incorrect control information being sent [[GH-3402](https://github.com/hashicorp/vault/pull/3402)] [[GH-3496](https://github.com/hashicorp/vault/pull/3496)] - [[GH-3625](https://github.com/hashicorp/vault/pull/3625)] [[GH-3656](https://github.com/hashicorp/vault/pull/3656)] - * core: Fix seal status reporting when using an autoseal - * core: Add creation path to wrap info for a control group token - * core: Fix potential panic that could occur using plugins when a node - transitioned from active to standby [[GH-3638](https://github.com/hashicorp/vault/pull/3638)] - * core: Fix memory ballooning when a connection would connect to the cluster - port and then go away -- redux! [[GH-3680](https://github.com/hashicorp/vault/pull/3680)] - * core: Replace recursive token revocation logic with depth-first logic, which - can avoid hitting stack depth limits in extreme cases [[GH-2348](https://github.com/hashicorp/vault/pull/2348)] - * core: When doing a read on configured audited-headers, properly handle case - insensitivity [[GH-3701](https://github.com/hashicorp/vault/pull/3701)] - * core/pkcs11 (enterprise): Fix panic when PKCS#11 library is not readable - * database/mysql: Allow the creation statement to use commands that are not yet - supported by the prepare statement protocol [[GH-3619](https://github.com/hashicorp/vault/pull/3619)] - * plugin/auth-gcp: Fix IAM roles when using `allow_gce_inference` [VPAG-19] - -## 0.9.0.1 (November 21st, 2017) (Enterprise Only) - -IMPROVEMENTS: - - * auth/gcp: Support seal wrapping of configuration parameters - * auth/kubernetes: Support seal wrapping of configuration parameters - -BUG FIXES: - - * Fix an upgrade issue with some physical backends when migrating from legacy - HSM stored key support to the new Seal Wrap mechanism (Enterprise) - * mfa: Add the 'mfa' flag that was removed by mistake [[GH-4223](https://github.com/hashicorp/vault/pull/4223)] - -## 0.9.0 (November 14th, 2017) - -DEPRECATIONS/CHANGES: - - * HSM config parameter requirements: When using Vault with an HSM, a new - parameter is required: `hmac_key_label`. This performs a similar function to - `key_label` but for the HMAC key Vault will use. Vault will generate a - suitable key if this value is specified and `generate_key` is set true. - * API HTTP client behavior: When calling `NewClient` the API no longer - modifies the provided client/transport. In particular this means it will no - longer enable redirection limiting and HTTP/2 support on custom clients. It - is suggested that if you want to make changes to an HTTP client that you use - one created by `DefaultConfig` as a starting point. - * AWS EC2 client nonce behavior: The client nonce generated by the backend - that gets returned along with the authentication response will be audited in - plaintext. If this is undesired, the clients can choose to supply a custom - nonce to the login endpoint. The custom nonce set by the client will from - now on, not be returned back with the authentication response, and hence not - audit logged. - * AWS Auth role options: The API will now error when trying to create or - update a role with the mutually-exclusive options - `disallow_reauthentication` and `allow_instance_migration`. - * SSH CA role read changes: When reading back a role from the `ssh` backend, - the TTL/max TTL values will now be an integer number of seconds rather than - a string. This better matches the API elsewhere in Vault. - * SSH role list changes: When listing roles from the `ssh` backend via the API, - the response data will additionally return a `key_info` map that will contain - a map of each key with a corresponding object containing the `key_type`. - * More granularity in audit logs: Audit request and response entries are still - in RFC3339 format but now have a granularity of nanoseconds. - * High availability related values have been moved out of the `storage` and - `ha_storage` stanzas, and into the top-level configuration. `redirect_addr` - has been renamed to `api_addr`. The stanzas still support accepting - HA-related values to maintain backward compatibility, but top-level values - will take precedence. - * A new `seal` stanza has been added to the configuration file, which is - optional and enables configuration of the seal type to use for additional - data protection, such as using HSM or Cloud KMS solutions to encrypt and - decrypt data. - -FEATURES: +CHANGES: - * **RSA Support for Transit Backend**: Transit backend can now generate RSA - keys which can be used for encryption and signing. [[GH-3489](https://github.com/hashicorp/vault/pull/3489)] - * **Identity System**: Now in open source and with significant enhancements, - Identity is an integrated system for understanding users across tokens and - enabling easier management of users directly and via groups. - * **External Groups in Identity**: Vault can now automatically assign users - and systems to groups in Identity based on their membership in external - groups. - * **Seal Wrap / FIPS 140-2 Compatibility (Enterprise)**: Vault can now take - advantage of FIPS 140-2-certified HSMs to ensure that Critical Security - Parameters are protected in a compliant fashion. Vault's implementation has - received a statement of compliance from Leidos. - * **Control Groups (Enterprise)**: Require multiple members of an Identity - group to authorize a requested action before it is allowed to run. - * **Cloud Auto-Unseal (Enterprise)**: Automatically unseal Vault using AWS KMS - and GCP CKMS. - * **Sentinel Integration (Enterprise)**: Take advantage of HashiCorp Sentinel - to create extremely flexible access control policies -- even on - unauthenticated endpoints. - * **Barrier Rekey Support for Auto-Unseal (Enterprise)**: When using auto-unsealing - functionality, the `rekey` operation is now supported; it uses recovery keys - to authorize the master key rekey. - * **Operation Token for Disaster Recovery Actions (Enterprise)**: When using - Disaster Recovery replication, a token can be created that can be used to - authorize actions such as promotion and updating primary information, rather - than using recovery keys. - * **Trigger Auto-Unseal with Recovery Keys (Enterprise)**: When using - auto-unsealing, a request to unseal Vault can be triggered by a threshold of - recovery keys, rather than requiring the Vault process to be restarted. - * **UI Redesign (Enterprise)**: All new experience for the Vault Enterprise - UI. The look and feel has been completely redesigned to give users a better - experience and make managing secrets fast and easy. - * **UI: SSH Secret Backend (Enterprise)**: Configure an SSH secret backend, - create and browse roles. And use them to sign keys or generate one time - passwords. - * **UI: AWS Secret Backend (Enterprise)**: You can now configure the AWS - backend via the Vault Enterprise UI. In addition you can create roles, - browse the roles and Generate IAM Credentials from them in the UI. +* core: Bump Go version to 1.17.13. IMPROVEMENTS: - * api: Add ability to set custom headers on each call [[GH-3394](https://github.com/hashicorp/vault/pull/3394)] - * command/server: Add config option to disable requesting client certificates - [[GH-3373](https://github.com/hashicorp/vault/pull/3373)] - * auth/aws: Max retries can now be customized for the AWS client [[GH-3965](https://github.com/hashicorp/vault/pull/3965)] - * core: Disallow mounting underneath an existing path, not just over [[GH-2919](https://github.com/hashicorp/vault/pull/2919)] - * physical/file: Use `700` as permissions when creating directories. The files - themselves were `600` and are all encrypted, but this doesn't hurt. - * secret/aws: Add ability to use custom IAM/STS endpoints [[GH-3416](https://github.com/hashicorp/vault/pull/3416)] - * secret/aws: Max retries can now be customized for the AWS client [[GH-3965](https://github.com/hashicorp/vault/pull/3965)] - * secret/cassandra: Work around Cassandra ignoring consistency levels for a - user listing query [[GH-3469](https://github.com/hashicorp/vault/pull/3469)] - * secret/pki: Private keys can now be marshalled as PKCS#8 [[GH-3518](https://github.com/hashicorp/vault/pull/3518)] - * secret/pki: Allow entering URLs for `pki` as both comma-separated strings and JSON - arrays [[GH-3409](https://github.com/hashicorp/vault/pull/3409)] - * secret/ssh: Role TTL/max TTL can now be specified as either a string or an - integer [[GH-3507](https://github.com/hashicorp/vault/pull/3507)] - * secret/transit: Sign and verify operations now support a `none` hash - algorithm to allow signing/verifying pre-hashed data [[GH-3448](https://github.com/hashicorp/vault/pull/3448)] - * secret/database: Add the ability to glob allowed roles in the Database Backend [[GH-3387](https://github.com/hashicorp/vault/pull/3387)] - * ui (enterprise): Support for RSA keys in the transit backend - * ui (enterprise): Support for DR Operation Token generation, promoting, and - updating primary on DR Secondary clusters +* auth/kerberos: add `add_group_aliases` config to include LDAP groups in Vault group aliases [[GH-16890](https://github.com/hashicorp/vault/pull/16890)] +* auth/kerberos: add `remove_instance_name` parameter to the login CLI and the +Kerberos config in Vault. This removes any instance names found in the keytab +service principal name. [[GH-16594](https://github.com/hashicorp/vault/pull/16594)] +* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] +* storage/gcs: Add documentation explaining how to configure the gcs backend using environment variables instead of options in the configuration stanza [[GH-14455](https://github.com/hashicorp/vault/pull/14455)] BUG FIXES: - * api: Fix panic when setting a custom HTTP client but with a nil transport - [[GH-3435](https://github.com/hashicorp/vault/pull/3435)] [[GH-3437](https://github.com/hashicorp/vault/pull/3437)] - * api: Fix authing to the `cert` backend when the CA for the client cert is - not known to the server's listener [[GH-2946](https://github.com/hashicorp/vault/pull/2946)] - * auth/approle: Create role ID index during read if a role is missing one [[GH-3561](https://github.com/hashicorp/vault/pull/3561)] - * auth/aws: Don't allow mutually exclusive options [[GH-3291](https://github.com/hashicorp/vault/pull/3291)] - * auth/radius: Fix logging in in some situations [[GH-3461](https://github.com/hashicorp/vault/pull/3461)] - * core: Fix memleak when a connection would connect to the cluster port and - then go away [[GH-3513](https://github.com/hashicorp/vault/pull/3513)] - * core: Fix panic if a single-use token is used to step-down or seal [[GH-3497](https://github.com/hashicorp/vault/pull/3497)] - * core: Set rather than add headers to prevent some duplicated headers in - responses when requests were forwarded to the active node [[GH-3485](https://github.com/hashicorp/vault/pull/3485)] - * physical/etcd3: Fix some listing issues due to how etcd3 does prefix - matching [[GH-3406](https://github.com/hashicorp/vault/pull/3406)] - * physical/etcd3: Fix case where standbys can lose their etcd client lease - [[GH-3031](https://github.com/hashicorp/vault/pull/3031)] - * physical/file: Fix listing when underscores are the first component of a - path [[GH-3476](https://github.com/hashicorp/vault/pull/3476)] - * plugins: Allow response errors to be returned from backend plugins [[GH-3412](https://github.com/hashicorp/vault/pull/3412)] - * secret/transit: Fix panic if the length of the input ciphertext was less - than the expected nonce length [[GH-3521](https://github.com/hashicorp/vault/pull/3521)] - * ui (enterprise): Reinstate support for generic secret backends - this was - erroneously removed in a previous release - -## 0.8.3 (September 19th, 2017) - -CHANGES: +* api: Fixed erroneous warnings of unrecognized parameters when unwrapping data. [[GH-16794](https://github.com/hashicorp/vault/pull/16794)] +* auth/gcp: Fixes the ability to reset the configuration's credentials to use application default credentials. [[GH-16523](https://github.com/hashicorp/vault/pull/16523)] +* auth/kerberos: Maintain headers set by the client [[GH-16636](https://github.com/hashicorp/vault/pull/16636)] +* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] +* core/license (enterprise): Always remove stored license and allow unseal to complete when license cleanup fails +* database/elasticsearch: Fixes a bug in boolean parsing for initialize [[GH-16526](https://github.com/hashicorp/vault/pull/16526)] +* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] +* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the +Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] +* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] +* plugin/secrets/auth: Fix a bug with aliased backends such as aws-ec2 or generic [[GH-16673](https://github.com/hashicorp/vault/pull/16673)] +* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] +* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] +* secrets/pki: Fix migration to properly handle mounts that contain only keys, no certificates [[GH-16813](https://github.com/hashicorp/vault/pull/16813)] +* secrets/pki: Ignore EC PARAMETER PEM blocks during issuer import (/config/ca, /issuers/import/*, and /intermediate/set-signed) [[GH-16721](https://github.com/hashicorp/vault/pull/16721)] +* secrets/pki: LIST issuers endpoint is now unauthenticated. [[GH-16830](https://github.com/hashicorp/vault/pull/16830)] +* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] +* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] +* ui: Fix info tooltip submitting form [[GH-16659](https://github.com/hashicorp/vault/pull/16659)] +* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] - * Policy input/output standardization: For all built-in authentication - backends, policies can now be specified as a comma-delimited string or an - array if using JSON as API input; on read, policies will be returned as an - array; and the `default` policy will not be forcefully added to policies - saved in configurations. Please note that the `default` policy will continue - to be added to generated tokens, however, rather than backends adding - `default` to the given set of input policies (in some cases, and not in - others), the stored set will reflect the user-specified set. - * `sign-self-issued` modifies Issuer in generated certificates: In 0.8.2 the - endpoint would not modify the Issuer in the generated certificate, leaving - the output self-issued. Although theoretically valid, in practice crypto - stacks were unhappy validating paths containing such certs. As a result, - `sign-self-issued` now encodes the signing CA's Subject DN into the Issuer - DN of the generated certificate. - * `sys/raw` requires enabling: While the `sys/raw` endpoint can be extremely - useful in break-glass or support scenarios, it is also extremely dangerous. - As of now, a configuration file option `raw_storage_endpoint` must be set in - order to enable this API endpoint. Once set, the available functionality has - been enhanced slightly; it now supports listing and decrypting most of - Vault's core data structures, except for the encryption keyring itself. - * `generic` is now `kv`: To better reflect its actual use, the `generic` - backend is now `kv`. Using `generic` will still work for backwards - compatibility. +SECURITY: -FEATURES: +* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] - * **GCE Support for GCP Auth**: GCE instances can now authenticate to Vault - using machine credentials. - * **Support for Kubernetes Service Account Auth**: Kubernetes Service Accounts - can now authenticate to vault using JWT tokens. +## 1.11.2 +### August 2, 2022 IMPROVEMENTS: - * configuration: Provide a config option to store Vault server's process ID - (PID) in a file [[GH-3321](https://github.com/hashicorp/vault/pull/3321)] - * mfa (Enterprise): Add the ability to use identity metadata in username format - * mfa/okta (Enterprise): Add support for configuring base_url for API calls - * secret/pki: `sign-intermediate` will now allow specifying a `ttl` value - longer than the signing CA certificate's NotAfter value. [[GH-3325](https://github.com/hashicorp/vault/pull/3325)] - * sys/raw: Raw storage access is now disabled by default [[GH-3329](https://github.com/hashicorp/vault/pull/3329)] - -BUG FIXES: - - * auth/okta: Fix regression that removed the ability to set base_url [[GH-3313](https://github.com/hashicorp/vault/pull/3313)] - * core: Fix panic while loading leases at startup on ARM processors - [[GH-3314](https://github.com/hashicorp/vault/pull/3314)] - * secret/pki: Fix `sign-self-issued` encoding the wrong subject public key - [[GH-3325](https://github.com/hashicorp/vault/pull/3325)] - -## 0.8.2.1 (September 11th, 2017) (Enterprise Only) +* agent: Added `disable_keep_alives` configuration to disable keep alives in auto-auth, caching and templating. [[GH-16479](https://github.com/hashicorp/vault/pull/16479)] BUG FIXES: - * Fix an issue upgrading to 0.8.2 for Enterprise customers. +* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] +* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] +* secrets/kv: Fix `kv get` issue preventing the ability to read a secret when providing a leading slash [[GH-16443](https://github.com/hashicorp/vault/pull/16443)] +* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] -## 0.8.2 (September 5th, 2017) +## 1.11.1 +### July 21, 2022 SECURITY: -* In prior versions of Vault, if authenticating via AWS IAM and requesting a - periodic token, the period was not properly respected. This could lead to - tokens expiring unexpectedly, or a token lifetime being longer than expected. - Upon token renewal with Vault 0.8.2 the period will be properly enforced. +* storage/raft: Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] -DEPRECATIONS/CHANGES: - -* `vault ssh` users should supply `-mode` and `-role` to reduce the number of - API calls. A future version of Vault will mark these optional values are - required. Failure to supply `-mode` or `-role` will result in a warning. -* Vault plugins will first briefly run a restricted version of the plugin to - fetch metadata, and then lazy-load the plugin on first request to prevent - crash/deadlock of Vault during the unseal process. Plugins will need to be - built with the latest changes in order for them to run properly. - -FEATURES: +CHANGES: -* **Lazy Lease Loading**: On startup, Vault will now load leases from storage - in a lazy fashion (token checks and revocation/renewal requests still force - an immediate load). For larger installations this can significantly reduce - downtime when switching active nodes or bringing Vault up from cold start. -* **SSH CA Login with `vault ssh`**: `vault ssh` now supports the SSH CA - backend for authenticating to machines. It also supports remote host key - verification through the SSH CA backend, if enabled. -* **Signing of Self-Issued Certs in PKI**: The `pki` backend now supports - signing self-issued CA certs. This is useful when switching root CAs. +* core: Bump Go version to 1.17.12. IMPROVEMENTS: - * audit/file: Allow specifying `stdout` as the `file_path` to log to standard - output [[GH-3235](https://github.com/hashicorp/vault/pull/3235)] - * auth/aws: Allow wildcards in `bound_iam_principal_arn` [[GH-3213](https://github.com/hashicorp/vault/pull/3213)] - * auth/okta: Compare groups case-insensitively since Okta is only - case-preserving [[GH-3240](https://github.com/hashicorp/vault/pull/3240)] - * auth/okta: Standardize Okta configuration APIs across backends [[GH-3245](https://github.com/hashicorp/vault/pull/3245)] - * cli: Add subcommand autocompletion that can be enabled with - `vault -autocomplete-install` [[GH-3223](https://github.com/hashicorp/vault/pull/3223)] - * cli: Add ability to handle wrapped responses when using `vault auth`. What - is output depends on the other given flags; see the help output for that - command for more information. [[GH-3263](https://github.com/hashicorp/vault/pull/3263)] - * core: TLS cipher suites used for cluster behavior can now be set via - `cluster_cipher_suites` in configuration [[GH-3228](https://github.com/hashicorp/vault/pull/3228)] - * core: The `plugin_name` can now either be specified directly as part of the - parameter or within the `config` object when mounting a secret or auth backend - via `sys/mounts/:path` or `sys/auth/:path` respectively [[GH-3202](https://github.com/hashicorp/vault/pull/3202)] - * core: It is now possible to update the `description` of a mount when - mount-tuning, although this must be done through the HTTP layer [[GH-3285](https://github.com/hashicorp/vault/pull/3285)] - * secret/databases/mongo: If an EOF is encountered, attempt reconnecting and - retrying the operation [[GH-3269](https://github.com/hashicorp/vault/pull/3269)] - * secret/pki: TTLs can now be specified as a string or an integer number of - seconds [[GH-3270](https://github.com/hashicorp/vault/pull/3270)] - * secret/pki: Self-issued certs can now be signed via - `pki/root/sign-self-issued` [[GH-3274](https://github.com/hashicorp/vault/pull/3274)] - * storage/gcp: Use application default credentials if they exist [[GH-3248](https://github.com/hashicorp/vault/pull/3248)] +* agent: Added `disable_idle_connections` configuration to disable leaving idle connections open in auto-auth, caching and templating. [[GH-15986](https://github.com/hashicorp/vault/pull/15986)] +* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] +* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] BUG FIXES: - * auth/aws: Properly use role-set period values for IAM-derived token renewals - [[GH-3220](https://github.com/hashicorp/vault/pull/3220)] - * auth/okta: Fix updating organization/ttl/max_ttl after initial setting - [[GH-3236](https://github.com/hashicorp/vault/pull/3236)] - * core: Fix PROXY when underlying connection is TLS [[GH-3195](https://github.com/hashicorp/vault/pull/3195)] - * core: Policy-related commands would sometimes fail to act case-insensitively - [[GH-3210](https://github.com/hashicorp/vault/pull/3210)] - * storage/consul: Fix parsing TLS configuration when using a bare IPv6 address - [[GH-3268](https://github.com/hashicorp/vault/pull/3268)] - * plugins: Lazy-load plugins to prevent crash/deadlock during unseal process. - [[GH-3255](https://github.com/hashicorp/vault/pull/3255)] - * plugins: Skip mounting plugin-based secret and credential mounts when setting - up mounts if the plugin is no longer present in the catalog. [[GH-3255](https://github.com/hashicorp/vault/pull/3255)] - -## 0.8.1 (August 16th, 2017) - -DEPRECATIONS/CHANGES: - - * PKI Root Generation: Calling `pki/root/generate` when a CA cert/key already - exists will now return a `204` instead of overwriting an existing root. If - you want to recreate the root, first run a delete operation on `pki/root` - (requires `sudo` capability), then generate it again. - -FEATURES: - - * **Oracle Secret Backend**: There is now an external plugin to support leased - credentials for Oracle databases (distributed separately). - * **GCP IAM Auth Backend**: There is now an authentication backend that allows - using GCP IAM credentials to retrieve Vault tokens. This is available as - both a plugin and built-in to Vault. - * **PingID Push Support for Path-Based MFA (Enterprise)**: PingID Push can - now be used for MFA with the new path-based MFA introduced in Vault - Enterprise 0.8. - * **Permitted DNS Domains Support in PKI**: The `pki` backend now supports - specifying permitted DNS domains for CA certificates, allowing you to - narrowly scope the set of domains for which a CA can issue or sign child - certificates. - * **Plugin Backend Reload Endpoint**: Plugin backends can now be triggered to - reload using the `sys/plugins/reload/backend` endpoint and providing either - the plugin name or the mounts to reload. - * **Self-Reloading Plugins**: The plugin system will now attempt to reload a - crashed or stopped plugin, once per request. - -IMPROVEMENTS: - - * auth/approle: Allow array input for policies in addition to comma-delimited - strings [[GH-3163](https://github.com/hashicorp/vault/pull/3163)] - * plugins: Send logs through Vault's logger rather than stdout [[GH-3142](https://github.com/hashicorp/vault/pull/3142)] - * secret/pki: Add `pki/root` delete operation [[GH-3165](https://github.com/hashicorp/vault/pull/3165)] - * secret/pki: Don't overwrite an existing root cert/key when calling generate - [[GH-3165](https://github.com/hashicorp/vault/pull/3165)] +* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] +* agent: Update consul-template for pkiCert bug fixes [[GH-16087](https://github.com/hashicorp/vault/pull/16087)] +* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] +* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty +* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] +* kmip (enterprise): Return SecretData as supported Object Type. +* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] +* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] +* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] +* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. +* ui: OIDC login type uses localStorage instead of sessionStorage [[GH-16170](https://github.com/hashicorp/vault/pull/16170)] -BUG FIXES: +SECURITY: - * aws: Don't prefer a nil HTTP client over an existing one [[GH-3159](https://github.com/hashicorp/vault/pull/3159)] - * core: If there is an error when checking for create/update existence, return - 500 instead of 400 [[GH-3162](https://github.com/hashicorp/vault/pull/3162)] - * secret/database: Avoid creating usernames that are too long for legacy MySQL - [[GH-3138](https://github.com/hashicorp/vault/pull/3138)] +* storage/raft (enterprise): Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HCSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] -## 0.8.0 (August 9th, 2017) +## 1.11.0 +### June 20, 2022 -SECURITY: +CHANGES: - * We've added a note to the docs about the way the GitHub auth backend works - as it may not be readily apparent that GitHub personal access tokens, which - are used by the backend, can be used for unauthorized access if they are - stolen from third party services and access to Vault is public. - -DEPRECATIONS/CHANGES: - - * Database Plugin Backends: Passwords generated for these backends now - enforce stricter password requirements, as opposed to the previous behavior - of returning a randomized UUID. Passwords are of length 20, and have a `A1a-` - characters prepended to ensure stricter requirements. No regressions are - expected from this change. (For database backends that were previously - substituting underscores for hyphens in passwords, this will remain the - case.) - * Lease Endpoints: The endpoints `sys/renew`, `sys/revoke`, `sys/revoke-prefix`, - `sys/revoke-force` have been deprecated and relocated under `sys/leases`. - Additionally, the deprecated path `sys/revoke-force` now requires the `sudo` - capability. - * Response Wrapping Lookup Unauthenticated: The `sys/wrapping/lookup` endpoint - is now unauthenticated. This allows introspection of the wrapping info by - clients that only have the wrapping token without then invalidating the - token. Validation functions/checks are still performed on the token. +* auth/aws: Add RoleSession to DisplayName when using assumeRole for authentication [[GH-14954](https://github.com/hashicorp/vault/pull/14954)] +* auth/kubernetes: If `kubernetes_ca_cert` is unset, and there is no pod-local CA available, an error will be surfaced when writing config instead of waiting for login. [[GH-15584](https://github.com/hashicorp/vault/pull/15584)] +* auth: Remove support for legacy MFA +(https://www.vaultproject.io/docs/v1.10.x/auth/mfa) [[GH-14869](https://github.com/hashicorp/vault/pull/14869)] +* core/fips: Disable and warn about entropy augmentation in FIPS 140-2 Inside mode [[GH-15858](https://github.com/hashicorp/vault/pull/15858)] +* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] +* core: Bump Go version to 1.17.11. [[GH-go-ver-1110](https://github.com/hashicorp/vault/pull/go-ver-1110)] +* database & storage: Change underlying driver library from [lib/pq](https://github.com/lib/pq) to [pgx](https://github.com/jackc/pgx). This change affects Redshift & Postgres database secrets engines, and CockroachDB & Postgres storage engines [[GH-15343](https://github.com/hashicorp/vault/pull/15343)] +* licensing (enterprise): Remove support for stored licenses and associated `sys/license` and `sys/license/signed` +endpoints in favor of [autoloaded licenses](https://www.vaultproject.io/docs/enterprise/license/autoloading). +* replication (enterprise): The `/sys/replication/performance/primary/mount-filter` endpoint has been removed. Please use [Paths Filter](https://www.vaultproject.io/api-docs/system/replication/replication-performance#create-paths-filter) instead. +* secret/pki: Remove unused signature_bits parameter from intermediate CSR generation; this parameter doesn't control the final certificate's signature algorithm selection as that is up to the signing CA [[GH-15478](https://github.com/hashicorp/vault/pull/15478)] +* secrets/kubernetes: Split `additional_metadata` into `extra_annotations` and `extra_labels` parameters [[GH-15655](https://github.com/hashicorp/vault/pull/15655)] +* secrets/pki: A new aliased api path (/pki/issuer/:issuer_ref/sign-self-issued) +providing the same functionality as the existing API(/pki/root/sign-self-issued) +does not require sudo capabilities but the latter still requires it in an +effort to maintain backwards compatibility. [[GH-15211](https://github.com/hashicorp/vault/pull/15211)] +* secrets/pki: Err on unknown role during sign-verbatim. [[GH-15543](https://github.com/hashicorp/vault/pull/15543)] +* secrets/pki: Existing CRL API (/pki/crl) now returns an X.509 v2 CRL instead +of a v1 CRL. [[GH-15100](https://github.com/hashicorp/vault/pull/15100)] +* secrets/pki: The `ca_chain` response field within issuing (/pki/issue/:role) +and signing APIs will now include the root CA certificate if the mount is +aware of it. [[GH-15155](https://github.com/hashicorp/vault/pull/15155)] +* secrets/pki: existing Delete Root API (pki/root) will now delete all issuers +and keys within the mount path. [[GH-15004](https://github.com/hashicorp/vault/pull/15004)] +* secrets/pki: existing Generate Root (pki/root/generate/:type), +Set Signed Intermediate (/pki/intermediate/set-signed) APIs will +add new issuers/keys to a mount instead of warning that an existing CA exists [[GH-14975](https://github.com/hashicorp/vault/pull/14975)] +* secrets/pki: the signed CA certificate from the sign-intermediate api will now appear within the ca_chain +response field along with the issuer's ca chain. [[GH-15524](https://github.com/hashicorp/vault/pull/15524)] +* ui: Upgrade Ember to version 3.28 [[GH-14763](https://github.com/hashicorp/vault/pull/14763)] FEATURES: - * **Cassandra Storage**: Cassandra can now be used for Vault storage - * **CockroachDB Storage**: CockroachDB can now be used for Vault storage - * **CouchDB Storage**: CouchDB can now be used for Vault storage - * **SAP HANA Database Plugin**: The `databases` backend can now manage users - for SAP HANA databases - * **Plugin Backends**: Vault now supports running secret and auth backends as - plugins. Plugins can be mounted like normal backends and can be developed - independently from Vault. - * **PROXY Protocol Support** Vault listeners can now be configured to honor - PROXY protocol v1 information to allow passing real client IPs into Vault. A - list of authorized addresses (IPs or subnets) can be defined and - accept/reject behavior controlled. - * **Lease Lookup and Browsing in the Vault Enterprise UI**: Vault Enterprise UI - now supports lookup and listing of leases and the associated actions from the - `sys/leases` endpoints in the API. These are located in the new top level - navigation item "Leases". - * **Filtered Mounts for Performance Mode Replication**: Whitelists or - blacklists of mounts can be defined per-secondary to control which mounts - are actually replicated to that secondary. This can allow targeted - replication of specific sets of data to specific geolocations/datacenters. - * **Disaster Recovery Mode Replication (Enterprise Only)**: There is a new - replication mode, Disaster Recovery (DR), that performs full real-time - replication (including tokens and leases) to DR secondaries. DR secondaries - cannot handle client requests, but can be promoted to primary as needed for - failover. - * **Manage New Replication Features in the Vault Enterprise UI**: Support for - Replication features in Vault Enterprise UI has expanded to include new DR - Replication mode and management of Filtered Mounts in Performance Replication - mode. - * **Vault Identity (Enterprise Only)**: Vault's new Identity system allows - correlation of users across tokens. At present this is only used for MFA, - but will be the foundation of many other features going forward. - * **Duo Push, Okta Push, and TOTP MFA For All Authenticated Paths (Enterprise - Only)**: A brand new MFA system built on top of Identity allows MFA - (currently Duo Push, Okta Push, and TOTP) for any authenticated path within - Vault. MFA methods can be configured centrally, and TOTP keys live within - the user's Identity information to allow using the same key across tokens. - Specific MFA method(s) required for any given path within Vault can be - specified in normal ACL path statements. +* **Autopilot Improvements (Enterprise)**: Autopilot on Vault Enterprise now supports automated upgrades and redundancy zones when using integrated storage. +* **KeyMgmt UI**: Add UI support for managing the Key Management Secrets Engine [[GH-15523](https://github.com/hashicorp/vault/pull/15523)] +* **Kubernetes Secrets Engine**: This new secrets engine generates Kubernetes service account tokens, service accounts, role bindings, and roles dynamically. [[GH-15551](https://github.com/hashicorp/vault/pull/15551)] +* **Non-Disruptive Intermediate/Root Certificate Rotation**: This allows +import, generation and configuration of any number of keys and/or issuers +within a PKI mount, providing operators the ability to rotate certificates +in place without affecting existing client configurations. [[GH-15277](https://github.com/hashicorp/vault/pull/15277)] +* **Print minimum required policy for any command**: The global CLI flag `-output-policy` can now be used with any command to print out the minimum required policy HCL for that operation, including whether the given path requires the "sudo" capability. [[GH-14899](https://github.com/hashicorp/vault/pull/14899)] +* **Snowflake Database Plugin**: Adds ability to manage RSA key pair credentials for dynamic and static Snowflake users. [[GH-15376](https://github.com/hashicorp/vault/pull/15376)] +* **Transit BYOK**: Allow import of externally-generated keys into the Transit secrets engine. [[GH-15414](https://github.com/hashicorp/vault/pull/15414)] +* nomad: Bootstrap Nomad ACL system if no token is provided [[GH-12451](https://github.com/hashicorp/vault/pull/12451)] +* storage/dynamodb: Added `AWS_DYNAMODB_REGION` environment variable. [[GH-15054](https://github.com/hashicorp/vault/pull/15054)] IMPROVEMENTS: - * api: Add client method for a secret renewer background process [[GH-2886](https://github.com/hashicorp/vault/pull/2886)] - * api: Add `RenewTokenAsSelf` [[GH-2886](https://github.com/hashicorp/vault/pull/2886)] - * api: Client timeout can now be adjusted with the `VAULT_CLIENT_TIMEOUT` env - var or with a new API function [[GH-2956](https://github.com/hashicorp/vault/pull/2956)] - * api/cli: Client will now attempt to look up SRV records for the given Vault - hostname [[GH-3035](https://github.com/hashicorp/vault/pull/3035)] - * audit/socket: Enhance reconnection logic and don't require the connection to - be established at unseal time [[GH-2934](https://github.com/hashicorp/vault/pull/2934)] - * audit/file: Opportunistically try re-opening the file on error [[GH-2999](https://github.com/hashicorp/vault/pull/2999)] - * auth/approle: Add role name to token metadata [[GH-2985](https://github.com/hashicorp/vault/pull/2985)] - * auth/okta: Allow specifying `ttl`/`max_ttl` inside the mount [[GH-2915](https://github.com/hashicorp/vault/pull/2915)] - * cli: Client timeout can now be adjusted with the `VAULT_CLIENT_TIMEOUT` env - var [[GH-2956](https://github.com/hashicorp/vault/pull/2956)] - * command/auth: Add `-token-only` flag to `vault auth` that returns only the - token on stdout and does not store it via the token helper [[GH-2855](https://github.com/hashicorp/vault/pull/2855)] - * core: CORS allowed origins can now be configured [[GH-2021](https://github.com/hashicorp/vault/pull/2021)] - * core: Add metrics counters for audit log failures [[GH-2863](https://github.com/hashicorp/vault/pull/2863)] - * cors: Allow setting allowed headers via the API instead of always using - wildcard [[GH-3023](https://github.com/hashicorp/vault/pull/3023)] - * secret/ssh: Allow specifying the key ID format using template values for CA - type [[GH-2888](https://github.com/hashicorp/vault/pull/2888)] - * server: Add `tls_client_ca_file` option for specifying a CA file to use for - client certificate verification when `tls_require_and_verify_client_cert` is - enabled [[GH-3034](https://github.com/hashicorp/vault/pull/3034)] - * storage/cockroachdb: Add CockroachDB storage backend [[GH-2713](https://github.com/hashicorp/vault/pull/2713)] - * storage/couchdb: Add CouchDB storage backend [[GH-2880](https://github.com/hashicorp/vault/pull/2880)] - * storage/mssql: Add `max_parallel` [[GH-3026](https://github.com/hashicorp/vault/pull/3026)] - * storage/postgresql: Add `max_parallel` [[GH-3026](https://github.com/hashicorp/vault/pull/3026)] - * storage/postgresql: Improve listing speed [[GH-2945](https://github.com/hashicorp/vault/pull/2945)] - * storage/s3: More efficient paging when an object has a lot of subobjects - [[GH-2780](https://github.com/hashicorp/vault/pull/2780)] - * sys/wrapping: Make `sys/wrapping/lookup` unauthenticated [[GH-3084](https://github.com/hashicorp/vault/pull/3084)] - * sys/wrapping: Wrapped tokens now store the original request path of the data - [[GH-3100](https://github.com/hashicorp/vault/pull/3100)] - * telemetry: Add support for DogStatsD [[GH-2490](https://github.com/hashicorp/vault/pull/2490)] +* activity: return nil response months in activity log API when no month data exists [[GH-15420](https://github.com/hashicorp/vault/pull/15420)] +* agent/auto-auth: Add `min_backoff` to the method stanza for configuring initial backoff duration. [[GH-15204](https://github.com/hashicorp/vault/pull/15204)] +* agent: Update consul-template to v0.29.0 [[GH-15293](https://github.com/hashicorp/vault/pull/15293)] +* agent: Upgrade hashicorp/consul-template version for sprig template functions and improved writeTo function [[GH-15092](https://github.com/hashicorp/vault/pull/15092)] +* api/monitor: Add log_format option to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] +* api: Add ability to pass certificate as PEM bytes to api.Client. [[GH-14753](https://github.com/hashicorp/vault/pull/14753)] +* api: Add context-aware functions to vault/api for each API wrapper function. [[GH-14388](https://github.com/hashicorp/vault/pull/14388)] +* api: Added MFALogin() for handling MFA flow when using login helpers. [[GH-14900](https://github.com/hashicorp/vault/pull/14900)] +* api: If the parameters supplied over the API payload are ignored due to not +being what the endpoints were expecting, or if the parameters supplied get +replaced by the values in the endpoint's path itself, warnings will be added to +the non-empty responses listing all the ignored and replaced parameters. [[GH-14962](https://github.com/hashicorp/vault/pull/14962)] +* api: KV helper methods to simplify the common use case of reading and writing KV secrets [[GH-15305](https://github.com/hashicorp/vault/pull/15305)] +* api: Provide a helper method WithNamespace to create a cloned client with a new NS [[GH-14963](https://github.com/hashicorp/vault/pull/14963)] +* api: Support VAULT_PROXY_ADDR environment variable to allow overriding the Vault client's HTTP proxy. [[GH-15377](https://github.com/hashicorp/vault/pull/15377)] +* api: Use the context passed to the api/auth Login helpers. [[GH-14775](https://github.com/hashicorp/vault/pull/14775)] +* api: make ListPlugins parse only known plugin types [[GH-15434](https://github.com/hashicorp/vault/pull/15434)] +* audit: Add a policy_results block into the audit log that contains the set of +policies that granted this request access. [[GH-15457](https://github.com/hashicorp/vault/pull/15457)] +* audit: Include mount_accessor in audit request and response logs [[GH-15342](https://github.com/hashicorp/vault/pull/15342)] +* audit: added entity_created boolean to audit log, set when login operations create an entity [[GH-15487](https://github.com/hashicorp/vault/pull/15487)] +* auth/aws: Add rsa2048 signature type to API [[GH-15719](https://github.com/hashicorp/vault/pull/15719)] +* auth/gcp: Enable the Google service endpoints used by the underlying client to be customized [[GH-15592](https://github.com/hashicorp/vault/pull/15592)] +* auth/gcp: Vault CLI now infers the service account email when running on Google Cloud [[GH-15592](https://github.com/hashicorp/vault/pull/15592)] +* auth/jwt: Adds ability to use JSON pointer syntax for the `user_claim` value. [[GH-15593](https://github.com/hashicorp/vault/pull/15593)] +* auth/okta: Add support for Google provider TOTP type in the Okta auth method [[GH-14985](https://github.com/hashicorp/vault/pull/14985)] +* auth/okta: Add support for performing [the number +challenge](https://help.okta.com/en-us/Content/Topics/Mobile/ov-admin-config.htm?cshid=csh-okta-verify-number-challenge-v1#enable-number-challenge) +during an Okta Verify push challenge [[GH-15361](https://github.com/hashicorp/vault/pull/15361)] +* auth: Globally scoped Login MFA method Get/List endpoints [[GH-15248](https://github.com/hashicorp/vault/pull/15248)] +* auth: enforce a rate limit for TOTP passcode validation attempts [[GH-14864](https://github.com/hashicorp/vault/pull/14864)] +* auth: forward cached MFA auth response to the leader using RPC instead of forwarding all login requests [[GH-15469](https://github.com/hashicorp/vault/pull/15469)] +* cli/debug: added support for retrieving metrics from DR clusters if `unauthenticated_metrics_access` is enabled [[GH-15316](https://github.com/hashicorp/vault/pull/15316)] +* cli/vault: warn when policy name contains upper-case letter [[GH-14670](https://github.com/hashicorp/vault/pull/14670)] +* cli: Alternative flag-based syntax for KV to mitigate confusion from automatically appended /data [[GH-14807](https://github.com/hashicorp/vault/pull/14807)] +* cockroachdb: add high-availability support [[GH-12965](https://github.com/hashicorp/vault/pull/12965)] +* command/debug: Add log_format flag to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] +* command: Support optional '-log-level' flag to be passed to 'operator migrate' command (defaults to info). Also support VAULT_LOG_LEVEL env var. [[GH-15405](https://github.com/hashicorp/vault/pull/15405)] +* command: Support the optional '-detailed' flag to be passed to 'vault list' command to show ListResponseWithInfo data. Also supports the VAULT_DETAILED env var. [[GH-15417](https://github.com/hashicorp/vault/pull/15417)] +* core (enterprise): Include `termination_time` in `sys/license/status` response +* core (enterprise): Include termination time in `license inspect` command output +* core,transit: Allow callers to choose random byte source including entropy augmentation sources for the sys/tools/random and transit/random endpoints. [[GH-15213](https://github.com/hashicorp/vault/pull/15213)] +* core/activity: Order month data in ascending order of timestamps [[GH-15259](https://github.com/hashicorp/vault/pull/15259)] +* core/activity: allow client counts to be precomputed and queried on non-contiguous chunks of data [[GH-15352](https://github.com/hashicorp/vault/pull/15352)] +* core/managed-keys (enterprise): Allow configuring the number of parallel operations to PKCS#11 managed keys. +* core: Add an export API for historical activity log data [[GH-15586](https://github.com/hashicorp/vault/pull/15586)] +* core: Add new DB methods that do not prepare statements. [[GH-15166](https://github.com/hashicorp/vault/pull/15166)] +* core: check uid and permissions of config dir, config file, plugin dir and plugin binaries [[GH-14817](https://github.com/hashicorp/vault/pull/14817)] +* core: Fix some identity data races found by Go race detector (no known impact yet). [[GH-15123](https://github.com/hashicorp/vault/pull/15123)] +* core: Include build date in `sys/seal-status` and `sys/version-history` endpoints. [[GH-14957](https://github.com/hashicorp/vault/pull/14957)] +* core: Upgrade github.org/x/crypto/ssh [[GH-15125](https://github.com/hashicorp/vault/pull/15125)] +* kmip (enterprise): Implement operations Query, Import, Encrypt and Decrypt. Improve operations Locate, Add Attribute, Get Attributes and Get Attribute List to handle most supported attributes. +* mfa/okta: migrate to use official Okta SDK [[GH-15355](https://github.com/hashicorp/vault/pull/15355)] +* sdk: Change OpenAPI code generator to extract request objects into /components/schemas and reference them by name. [[GH-14217](https://github.com/hashicorp/vault/pull/14217)] +* secrets/consul: Add support for Consul node-identities and service-identities [[GH-15295](https://github.com/hashicorp/vault/pull/15295)] +* secrets/consul: Vault is now able to automatically bootstrap the Consul ACL system. [[GH-10751](https://github.com/hashicorp/vault/pull/10751)] +* secrets/database/elasticsearch: Use the new /_security base API path instead of /_xpack/security when managing elasticsearch. [[GH-15614](https://github.com/hashicorp/vault/pull/15614)] +* secrets/pki: Add not_before_duration to root CA generation, intermediate CA signing paths. [[GH-14178](https://github.com/hashicorp/vault/pull/14178)] +* secrets/pki: Add support for CPS URLs and User Notice to Policy Information [[GH-15751](https://github.com/hashicorp/vault/pull/15751)] +* secrets/pki: Allow operators to control the issuing certificate behavior when +the requested TTL is beyond the NotAfter value of the signing certificate [[GH-15152](https://github.com/hashicorp/vault/pull/15152)] +* secrets/pki: Always return CRLs, URLs configurations, even if using the default value. [[GH-15470](https://github.com/hashicorp/vault/pull/15470)] +* secrets/pki: Enable Patch Functionality for Roles and Issuers (API only) [[GH-15510](https://github.com/hashicorp/vault/pull/15510)] +* secrets/pki: Have pki/sign-verbatim use the not_before_duration field defined in the role [[GH-15429](https://github.com/hashicorp/vault/pull/15429)] +* secrets/pki: Warn on empty Subject field during issuer generation (root/generate and root/sign-intermediate). [[GH-15494](https://github.com/hashicorp/vault/pull/15494)] +* secrets/pki: Warn on missing AIA access information when generating issuers (config/urls). [[GH-15509](https://github.com/hashicorp/vault/pull/15509)] +* secrets/pki: Warn when `generate_lease` and `no_store` are both set to `true` on requests. [[GH-14292](https://github.com/hashicorp/vault/pull/14292)] +* secrets/ssh: Add connection timeout of 1 minute for outbound SSH connection in deprecated Dynamic SSH Keys mode. [[GH-15440](https://github.com/hashicorp/vault/pull/15440)] +* secrets/ssh: Support for `add_before_duration` in SSH [[GH-15250](https://github.com/hashicorp/vault/pull/15250)] +* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer +* storage/raft: Use larger timeouts at startup to reduce likelihood of inducing elections. [[GH-15042](https://github.com/hashicorp/vault/pull/15042)] +* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] +* ui: Default auto-rotation period in transit is 30 days [[GH-15474](https://github.com/hashicorp/vault/pull/15474)] +* ui: Parse schema refs from OpenAPI [[GH-14508](https://github.com/hashicorp/vault/pull/14508)] +* ui: Remove stored license references [[GH-15513](https://github.com/hashicorp/vault/pull/15513)] +* ui: Remove storybook. [[GH-15074](https://github.com/hashicorp/vault/pull/15074)] +* ui: Replaces the IvyCodemirror wrapper with a custom ember modifier. [[GH-14659](https://github.com/hashicorp/vault/pull/14659)] +* website/docs: Add usage documentation for Kubernetes Secrets Engine [[GH-15527](https://github.com/hashicorp/vault/pull/15527)] +* website/docs: added a link to an Enigma secret plugin. [[GH-14389](https://github.com/hashicorp/vault/pull/14389)] + +DEPRECATIONS: + +* docs: Document removal of X.509 certificates with signatures who use SHA-1 in Vault 1.12 [[GH-15581](https://github.com/hashicorp/vault/pull/15581)] +* secrets/consul: Deprecate old parameters "token_type" and "policy" [[GH-15550](https://github.com/hashicorp/vault/pull/15550)] +* secrets/consul: Deprecate parameter "policies" in favor of "consul_policies" for consistency [[GH-15400](https://github.com/hashicorp/vault/pull/15400)] BUG FIXES: - * api/health: Don't treat standby `429` codes as an error [[GH-2850](https://github.com/hashicorp/vault/pull/2850)] - * api/leases: Fix lease lookup returning lease properties at the top level - * audit: Fix panic when audit logging a read operation on an asymmetric - `transit` key [[GH-2958](https://github.com/hashicorp/vault/pull/2958)] - * auth/approle: Fix panic when secret and cidr list not provided in role - [[GH-3075](https://github.com/hashicorp/vault/pull/3075)] - * auth/aws: Look up proper account ID on token renew [[GH-3012](https://github.com/hashicorp/vault/pull/3012)] - * auth/aws: Store IAM header in all cases when it changes [[GH-3004](https://github.com/hashicorp/vault/pull/3004)] - * auth/ldap: Verify given certificate is PEM encoded instead of failing - silently [[GH-3016](https://github.com/hashicorp/vault/pull/3016)] - * auth/token: Don't allow using the same token ID twice when manually - specifying [[GH-2916](https://github.com/hashicorp/vault/pull/2916)] - * cli: Fix issue with parsing keys that start with special characters [[GH-2998](https://github.com/hashicorp/vault/pull/2998)] - * core: Relocated `sys/leases/renew` returns same payload as original - `sys/leases` endpoint [[GH-2891](https://github.com/hashicorp/vault/pull/2891)] - * secret/ssh: Fix panic when signing with incorrect key type [[GH-3072](https://github.com/hashicorp/vault/pull/3072)] - * secret/totp: Ensure codes can only be used once. This makes some automated - workflows harder but complies with the RFC. [[GH-2908](https://github.com/hashicorp/vault/pull/2908)] - * secret/transit: Fix locking when creating a key with unsupported options - [[GH-2974](https://github.com/hashicorp/vault/pull/2974)] - -## 0.7.3 (June 7th, 2017) +* Fixed panic when adding or modifying a Duo MFA Method in Enterprise +* agent: Fix log level mismatch between ERR and ERROR [[GH-14424](https://github.com/hashicorp/vault/pull/14424)] +* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] +* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] +* api: Fixes bug where OutputCurlString field was unintentionally being copied over during client cloning [[GH-14968](https://github.com/hashicorp/vault/pull/14968)] +* api: Respect increment value in grace period calculations in LifetimeWatcher [[GH-14836](https://github.com/hashicorp/vault/pull/14836)] +* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] +* auth/kubernetes: Fix error code when using the wrong service account [[GH-15584](https://github.com/hashicorp/vault/pull/15584)] +* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set +has been fixed. The previous behavior would make a request to the LDAP server to +get `user_attr` before discarding it and using the username instead. This would +make it impossible for a user to connect if this attribute was missing or had +multiple values, even though it would not be used anyway. This has been fixed +and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] +* auth: Fixed erroneous success message when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Fixed erroneous token information being displayed when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Fixed two-phase MFA information missing from table format when using vault login [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Prevent deleting a valid MFA method ID using the endpoint for a different MFA method type [[GH-15482](https://github.com/hashicorp/vault/pull/15482)] +* auth: forward requests subject to login MFA from perfStandby to Active node [[GH-15009](https://github.com/hashicorp/vault/pull/15009)] +* auth: load login MFA configuration upon restart [[GH-15261](https://github.com/hashicorp/vault/pull/15261)] +* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] +* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] +* cli: kv get command now honors trailing spaces to retrieve secrets [[GH-15188](https://github.com/hashicorp/vault/pull/15188)] +* command: do not report listener and storage types as key not found warnings [[GH-15383](https://github.com/hashicorp/vault/pull/15383)] +* core (enterprise): Allow local alias create RPCs to persist alias metadata +* core (enterprise): Fix overcounting of lease count quota usage at startup. +* core (enterprise): Fix some races in merkle index flushing code found in testing +* core (enterprise): Handle additional edge cases reinitializing PKCS#11 libraries after login errors. +* core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} [[GH-15224](https://github.com/hashicorp/vault/pull/15224)] +* core/managed-keys (enterprise): Allow PKCS#11 managed keys to use 0 as a slot number +* core/metrics: Fix incorrect table size metric for local mounts [[GH-14755](https://github.com/hashicorp/vault/pull/14755)] +* core: Fix double counting for "route" metrics [[GH-12763](https://github.com/hashicorp/vault/pull/12763)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] +* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] +* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] +* core: Limit SSCT WAL checks on perf standbys to raft backends only [[GH-15879](https://github.com/hashicorp/vault/pull/15879)] +* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] +* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] +* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] +* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] +* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] +* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] +* core: renaming the environment variable VAULT_DISABLE_FILE_PERMISSIONS_CHECK to VAULT_ENABLE_FILE_PERMISSIONS_CHECK and adjusting the logic [[GH-15452](https://github.com/hashicorp/vault/pull/15452)] +* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] +* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] +* identity: deduplicate policies when creating/updating identity groups [[GH-15055](https://github.com/hashicorp/vault/pull/15055)] +* mfa/okta: disable client side rate limiting causing delays in push notifications [[GH-15369](https://github.com/hashicorp/vault/pull/15369)] +* plugin: Fix a bug where plugin reload would falsely report success in certain scenarios. [[GH-15579](https://github.com/hashicorp/vault/pull/15579)] +* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] +* raft: Ensure initialMmapSize is set to 0 on Windows [[GH-14977](https://github.com/hashicorp/vault/pull/14977)] +* replication (enterprise): fix panic due to missing entity during invalidation of local aliases. [[GH-14622](https://github.com/hashicorp/vault/pull/14622)] +* sdk/cidrutil: Only check if cidr contains remote address for IP addresses [[GH-14487](https://github.com/hashicorp/vault/pull/14487)] +* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] +* sdk: Fix OpenApi spec generator to remove duplicate sha_256 parameter [[GH-15163](https://github.com/hashicorp/vault/pull/15163)] +* secrets/database: Ensure that a `connection_url` password is redacted in all cases. [[GH-14744](https://github.com/hashicorp/vault/pull/14744)] +* secrets/kv: Fix issue preventing the ability to reset the `delete_version_after` key metadata field to 0s via HTTP `PATCH`. [[GH-15792](https://github.com/hashicorp/vault/pull/15792)] +* secrets/pki: CRLs on performance secondary clusters are now automatically +rebuilt upon changes to the list of issuers. [[GH-15179](https://github.com/hashicorp/vault/pull/15179)] +* secrets/pki: Fix handling of "any" key type with default zero signature bits value. [[GH-14875](https://github.com/hashicorp/vault/pull/14875)] +* secrets/pki: Fixed bug where larger SHA-2 hashes were truncated with shorter ECDSA CA certificates [[GH-14943](https://github.com/hashicorp/vault/pull/14943)] +* secrets/ssh: Convert role field not_before_duration to seconds before returning it [[GH-15559](https://github.com/hashicorp/vault/pull/15559)] +* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. +* storage/raft: Forward autopilot state requests on perf standbys to active node. [[GH-15493](https://github.com/hashicorp/vault/pull/15493)] +* storage/raft: joining a node to a cluster now ignores any VAULT_NAMESPACE environment variable set on the server process [[GH-15519](https://github.com/hashicorp/vault/pull/15519)] +* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not accepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] +* ui: Fix KV secret showing in the edit form after a user creates a new version but doesn't have read capabilities [[GH-14794](https://github.com/hashicorp/vault/pull/14794)] +* ui: Fix inconsistent behavior in client count calendar widget [[GH-15789](https://github.com/hashicorp/vault/pull/15789)] +* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] +* ui: Fix issue with KV not recomputing model when you changed versions. [[GH-14941](https://github.com/hashicorp/vault/pull/14941)] +* ui: Fixed client count timezone for start and end months [[GH-15167](https://github.com/hashicorp/vault/pull/15167)] +* ui: Fixed unsupported revocation statements field for DB roles [[GH-15573](https://github.com/hashicorp/vault/pull/15573)] +* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] +* ui: Fixes issue logging in with OIDC from a listed auth mounts tab [[GH-14916](https://github.com/hashicorp/vault/pull/14916)] +* ui: Revert using localStorage in favor of sessionStorage [[GH-15769](https://github.com/hashicorp/vault/pull/15769)] +* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] +* ui: fix firefox inability to recognize file format of client count csv export [[GH-15364](https://github.com/hashicorp/vault/pull/15364)] +* ui: fix form validations ignoring default values and disabling submit button [[GH-15560](https://github.com/hashicorp/vault/pull/15560)] +* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] +* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] + +## 1.10.11 +### March 01, 2023 SECURITY: - * Cert auth backend now checks validity of individual certificates: In - previous versions of Vault, validity (e.g. expiration) of individual leaf - certificates added for authentication was not checked. This was done to make - it easier for administrators to control lifecycles of individual - certificates added to the backend, e.g. the authentication material being - checked was access to that specific certificate's private key rather than - all private keys signed by a CA. However, this behavior is often unexpected - and as a result can lead to insecure deployments, so we are now validating - these certificates as well. - * App-ID path salting was skipped in 0.7.1/0.7.2: A regression in 0.7.1/0.7.2 - caused the HMACing of any App-ID information stored in paths (including - actual app-IDs and user-IDs) to be unsalted and written as-is from the API. - In 0.7.3 any such paths will be automatically changed to salted versions on - access (e.g. login or read); however, if you created new app-IDs or user-IDs - in 0.7.1/0.7.2, you may want to consider whether any users with access to - Vault's underlying data store may have intercepted these values, and - revoke/roll them. - -DEPRECATIONS/CHANGES: - - * Step-Down is Forwarded: When a step-down is issued against a non-active node - in an HA cluster, it will now forward the request to the active node. +* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] -FEATURES: +CHANGES: - * **ed25519 Signing/Verification in Transit with Key Derivation**: The - `transit` backend now supports generating - [ed25519](https://ed25519.cr.yp.to/) keys for signing and verification - functionality. These keys support derivation, allowing you to modify the - actual encryption key used by supplying a `context` value. - * **Key Version Specification for Encryption in Transit**: You can now specify - the version of a key you use to wish to generate a signature, ciphertext, or - HMAC. This can be controlled by the `min_encryption_version` key - configuration property. - * **Replication Primary Discovery (Enterprise)**: Replication primaries will - now advertise the addresses of their local HA cluster members to replication - secondaries. This helps recovery if the primary active node goes down and - neither service discovery nor load balancers are in use to steer clients. +* core: Bump Go version to 1.19.6. IMPROVEMENTS: - * api/health: Add Sys().Health() [[GH-2805](https://github.com/hashicorp/vault/pull/2805)] - * audit: Add auth information to requests that error out [[GH-2754](https://github.com/hashicorp/vault/pull/2754)] - * command/auth: Add `-no-store` option that prevents the auth command from - storing the returned token into the configured token helper [[GH-2809](https://github.com/hashicorp/vault/pull/2809)] - * core/forwarding: Request forwarding now heartbeats to prevent unused - connections from being terminated by firewalls or proxies - * plugins/databases: Add MongoDB as an internal database plugin [[GH-2698](https://github.com/hashicorp/vault/pull/2698)] - * storage/dynamodb: Add a method for checking the existence of children, - speeding up deletion operations in the DynamoDB storage backend [[GH-2722](https://github.com/hashicorp/vault/pull/2722)] - * storage/mysql: Add max_parallel parameter to MySQL backend [[GH-2760](https://github.com/hashicorp/vault/pull/2760)] - * secret/databases: Support listing connections [[GH-2823](https://github.com/hashicorp/vault/pull/2823)] - * secret/databases: Support custom renewal statements in Postgres database - plugin [[GH-2788](https://github.com/hashicorp/vault/pull/2788)] - * secret/databases: Use the role name as part of generated credentials - [[GH-2812](https://github.com/hashicorp/vault/pull/2812)] - * ui (Enterprise): Transit key and secret browsing UI handle large lists better - * ui (Enterprise): root tokens are no longer persisted - * ui (Enterprise): support for mounting Database and TOTP secret backends - -BUG FIXES: - - * auth/app-id: Fix regression causing loading of salts to be skipped - * auth/aws: Improve EC2 describe instances performance [[GH-2766](https://github.com/hashicorp/vault/pull/2766)] - * auth/aws: Fix lookup of some instance profile ARNs [[GH-2802](https://github.com/hashicorp/vault/pull/2802)] - * auth/aws: Resolve ARNs to internal AWS IDs which makes lookup at various - points (e.g. renewal time) more robust [[GH-2814](https://github.com/hashicorp/vault/pull/2814)] - * auth/aws: Properly honor configured period when using IAM authentication - [[GH-2825](https://github.com/hashicorp/vault/pull/2825)] - * auth/aws: Check that a bound IAM principal is not empty (in the current - state of the role) before requiring it match the previously authenticated - client [[GH-2781](https://github.com/hashicorp/vault/pull/2781)] - * auth/cert: Fix panic on renewal [[GH-2749](https://github.com/hashicorp/vault/pull/2749)] - * auth/cert: Certificate verification for non-CA certs [[GH-2761](https://github.com/hashicorp/vault/pull/2761)] - * core/acl: Prevent race condition when compiling ACLs in some scenarios - [[GH-2826](https://github.com/hashicorp/vault/pull/2826)] - * secret/database: Increase wrapping token TTL; in a loaded scenario it could - be too short - * secret/generic: Allow integers to be set as the value of `ttl` field as the - documentation claims is supported [[GH-2699](https://github.com/hashicorp/vault/pull/2699)] - * secret/ssh: Added host key callback to ssh client config [[GH-2752](https://github.com/hashicorp/vault/pull/2752)] - * storage/s3: Avoid a panic when some bad data is returned [[GH-2785](https://github.com/hashicorp/vault/pull/2785)] - * storage/dynamodb: Fix list functions working improperly on Windows [[GH-2789](https://github.com/hashicorp/vault/pull/2789)] - * storage/file: Don't leak file descriptors in some error cases - * storage/swift: Fix pre-v3 project/tenant name reading [[GH-2803](https://github.com/hashicorp/vault/pull/2803)] - -## 0.7.2 (May 8th, 2017) +* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] BUG FIXES: - * audit: Fix auditing entries containing certain kinds of time values - [[GH-2689](https://github.com/hashicorp/vault/pull/2689)] - -## 0.7.1 (May 5th, 2017) - -DEPRECATIONS/CHANGES: +* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] +* core (enterprise): Fix panic when using invalid accessor for control-group request +* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] +* replication (enterprise): Fix bug where reloading external plugin on a secondary would +break replication. +* secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. [[GH-18209](https://github.com/hashicorp/vault/pull/18209)] +* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] +* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] - * LDAP Auth Backend: Group membership queries will now run as the `binddn` - user when `binddn`/`bindpass` are configured, rather than as the - authenticating user as was the case previously. +## 1.10.10 +### February 6, 2023 -FEATURES: +CHANGES: - * **AWS IAM Authentication**: IAM principals can get Vault tokens - automatically, opening AWS-based authentication to users, ECS containers, - Lambda instances, and more. Signed client identity information retrieved - using the AWS API `sts:GetCallerIdentity` is validated against the AWS STS - service before issuing a Vault token. This backend is unified with the - `aws-ec2` authentication backend under the name `aws`, and allows additional - EC2-related restrictions to be applied during the IAM authentication; the - previous EC2 behavior is also still available. [[GH-2441](https://github.com/hashicorp/vault/pull/2441)] - * **MSSQL Physical Backend**: You can now use Microsoft SQL Server as your - Vault physical data store [[GH-2546](https://github.com/hashicorp/vault/pull/2546)] - * **Lease Listing and Lookup**: You can now introspect a lease to get its - creation and expiration properties via `sys/leases/lookup`; with `sudo` - capability you can also list leases for lookup, renewal, or revocation via - that endpoint. Various lease functions (renew, revoke, revoke-prefix, - revoke-force) have also been relocated to `sys/leases/`, but they also work - at the old paths for compatibility. Reading (but not listing) leases via - `sys/leases/lookup` is now a part of the current `default` policy. [[GH-2650](https://github.com/hashicorp/vault/pull/2650)] - * **TOTP Secret Backend**: You can now store multi-factor authentication keys - in Vault and use the API to retrieve time-based one-time use passwords on - demand. The backend can also be used to generate a new key and validate - passwords generated by that key. [[GH-2492](https://github.com/hashicorp/vault/pull/2492)] - * **Database Secret Backend & Secure Plugins (Beta)**: This new secret backend - combines the functionality of the MySQL, PostgreSQL, MSSQL, and Cassandra - backends. It also provides a plugin interface for extendability through - custom databases. [[GH-2200](https://github.com/hashicorp/vault/pull/2200)] +* core: Bump Go version to 1.19.4. IMPROVEMENTS: - * auth/cert: Support for constraints on subject Common Name and DNS/email - Subject Alternate Names in certificates [[GH-2595](https://github.com/hashicorp/vault/pull/2595)] - * auth/ldap: Use the binding credentials to search group membership rather - than the user credentials [[GH-2534](https://github.com/hashicorp/vault/pull/2534)] - * cli/revoke: Add `-self` option to allow revoking the currently active token - [[GH-2596](https://github.com/hashicorp/vault/pull/2596)] - * core: Randomize x coordinate in Shamir shares [[GH-2621](https://github.com/hashicorp/vault/pull/2621)] - * replication: Fix a bug when enabling `approle` on a primary before - secondaries were connected - * replication: Add heartbeating to ensure firewalls don't kill connections to - primaries - * secret/pki: Add `no_store` option that allows certificates to be issued - without being stored. This removes the ability to look up and/or add to a - CRL but helps with scaling to very large numbers of certificates. [[GH-2565](https://github.com/hashicorp/vault/pull/2565)] - * secret/pki: If used with a role parameter, the `sign-verbatim/` - endpoint honors the values of `generate_lease`, `no_store`, `ttl` and - `max_ttl` from the given role [[GH-2593](https://github.com/hashicorp/vault/pull/2593)] - * secret/pki: Add role parameter `allow_glob_domains` that enables defining - names in `allowed_domains` containing `*` glob patterns [[GH-2517](https://github.com/hashicorp/vault/pull/2517)] - * secret/pki: Update certificate storage to not use characters that are not - supported on some filesystems [[GH-2575](https://github.com/hashicorp/vault/pull/2575)] - * storage/etcd3: Add `discovery_srv` option to query for SRV records to find - servers [[GH-2521](https://github.com/hashicorp/vault/pull/2521)] - * storage/s3: Support `max_parallel` option to limit concurrent outstanding - requests [[GH-2466](https://github.com/hashicorp/vault/pull/2466)] - * storage/s3: Use pooled transport for http client [[GH-2481](https://github.com/hashicorp/vault/pull/2481)] - * storage/swift: Allow domain values for V3 authentication [[GH-2554](https://github.com/hashicorp/vault/pull/2554)] - * tidy: Improvements to `auth/token/tidy` and `sys/leases/tidy` to handle more - cleanup cases [[GH-2452](https://github.com/hashicorp/vault/pull/2452)] +* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] +* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. +* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] +* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] +* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] BUG FIXES: - * api: Respect a configured path in Vault's address [[GH-2588](https://github.com/hashicorp/vault/pull/2588)] - * auth/aws-ec2: New bounds added as criteria to allow role creation [[GH-2600](https://github.com/hashicorp/vault/pull/2600)] - * auth/ldap: Don't lowercase groups attached to users [[GH-2613](https://github.com/hashicorp/vault/pull/2613)] - * cli: Don't panic if `vault write` is used with the `force` flag but no path - [[GH-2674](https://github.com/hashicorp/vault/pull/2674)] - * core: Help operations should request forward since standbys may not have - appropriate info [[GH-2677](https://github.com/hashicorp/vault/pull/2677)] - * replication: Fix enabling secondaries when certain mounts already existed on - the primary - * secret/mssql: Update mssql driver to support queries with colons [[GH-2610](https://github.com/hashicorp/vault/pull/2610)] - * secret/pki: Don't lowercase O/OU values in certs [[GH-2555](https://github.com/hashicorp/vault/pull/2555)] - * secret/pki: Don't attempt to validate IP SANs if none are provided [[GH-2574](https://github.com/hashicorp/vault/pull/2574)] - * secret/ssh: Don't automatically lowercase principles in issued SSH certs - [[GH-2591](https://github.com/hashicorp/vault/pull/2591)] - * storage/consul: Properly handle state events rather than timing out - [[GH-2548](https://github.com/hashicorp/vault/pull/2548)] - * storage/etcd3: Ensure locks are released if client is improperly shut down - [[GH-2526](https://github.com/hashicorp/vault/pull/2526)] - -## 0.7.0 (March 21th, 2017) - -SECURITY: - - * Common name not being validated when `exclude_cn_from_sans` option used in - `pki` backend: When using a role in the `pki` backend that specified the - `exclude_cn_from_sans` option, the common name would not then be properly - validated against the role's constraints. This has been fixed. We recommend - any users of this feature to upgrade to 0.7 as soon as feasible. - -DEPRECATIONS/CHANGES: - - * List Operations Always Use Trailing Slash: Any list operation, whether via - the `GET` or `LIST` HTTP verb, will now internally canonicalize the path to - have a trailing slash. This makes policy writing more predictable, as it - means clients will no longer work or fail based on which client they're - using or which HTTP verb they're using. However, it also means that policies - allowing `list` capability must be carefully checked to ensure that they - contain a trailing slash; some policies may need to be split into multiple - stanzas to accommodate. - * PKI Defaults to Unleased Certificates: When issuing certificates from the - PKI backend, by default, no leases will be issued. If you want to manually - revoke a certificate, its serial number can be used with the `pki/revoke` - endpoint. Issuing leases is still possible by enabling the `generate_lease` - toggle in PKI role entries (this will default to `true` for upgrades, to - keep existing behavior), which will allow using lease IDs to revoke - certificates. For installations issuing large numbers of certificates (tens - to hundreds of thousands, or millions), this will significantly improve - Vault startup time since leases associated with these certificates will not - have to be loaded; however note that it also means that revocation of a - token used to issue certificates will no longer add these certificates to a - CRL. If this behavior is desired or needed, consider keeping leases enabled - and ensuring lifetimes are reasonable, and issue long-lived certificates via - a different role with leases disabled. - -FEATURES: - - * **Replication (Enterprise)**: Vault Enterprise now has support for creating - a multi-datacenter replication set between clusters. The current replication - offering is based on an asynchronous primary/secondary (1:N) model that - replicates static data while keeping dynamic data (leases, tokens) - cluster-local, focusing on horizontal scaling for high-throughput and - high-fanout deployments. - * **Response Wrapping & Replication in the Vault Enterprise UI**: Vault - Enterprise UI now supports looking up and rotating response wrapping tokens, - as well as creating tokens with arbitrary values inside. It also now - supports replication functionality, enabling the configuration of a - replication set in the UI. - * **Expanded Access Control Policies**: Access control policies can now - specify allowed and denied parameters -- and, optionally, their values -- to - control what a client can and cannot submit during an API call. Policies can - also specify minimum/maximum response wrapping TTLs to both enforce the use - of response wrapping and control the duration of resultant wrapping tokens. - See the [policies concepts - page](https://www.vaultproject.io/docs/concepts/policies.html) for more - information. - * **SSH Backend As Certificate Authority**: The SSH backend can now be - configured to sign host and user certificates. Each mount of the backend - acts as an independent signing authority. The CA key pair can be configured - for each mount and the public key is accessible via an unauthenticated API - call; additionally, the backend can generate a public/private key pair for - you. We recommend using separate mounts for signing host and user - certificates. - -IMPROVEMENTS: +* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] +* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] +* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] +* core (enterprise): Fix a race condition resulting in login errors to PKCS#11 modules under high concurrency. +* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace +* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. +* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] +* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. +* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] +* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] +* identity (enterprise): Fix a data race when creating an entity for a local alias. +* kmip (enterprise): Fix Destroy operation response that omitted Unique Identifier on some batched responses. +* kmip (enterprise): Fix Locate operation response incompatibility with clients using KMIP versions prior to 1.3. +* licensing (enterprise): update autoloaded license cache after reload +* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. +* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] - * api/request: Passing username and password information in API request - [GH-2469] - * audit: Logging the token's use count with authentication response and - logging the remaining uses of the client token with request [GH-2437] - * auth/approle: Support for restricting the number of uses on the tokens - issued [GH-2435] - * auth/aws-ec2: AWS EC2 auth backend now supports constraints for VPC ID, - Subnet ID and Region [GH-2407] - * auth/ldap: Use the value of the `LOGNAME` or `USER` env vars for the - username if not explicitly set on the command line when authenticating - [GH-2154] - * audit: Support adding a configurable prefix (such as `@cee`) before each - line [GH-2359] - * core: Canonicalize list operations to use a trailing slash [GH-2390] - * core: Add option to disable caching on a per-mount level [GH-2455] - * core: Add ability to require valid client certs in listener config [GH-2457] - * physical/dynamodb: Implement a session timeout to avoid having to use - recovery mode in the case of an unclean shutdown, which makes HA much safer - [GH-2141] - * secret/pki: O (Organization) values can now be set to role-defined values - for issued/signed certificates [GH-2369] - * secret/pki: Certificates issued/signed from PKI backend do not generate - leases by default [GH-2403] - * secret/pki: When using DER format, still return the private key type - [GH-2405] - * secret/pki: Add an intermediate to the CA chain even if it lacks an - authority key ID [GH-2465] - * secret/pki: Add role option to use CSR SANs [GH-2489] - * secret/ssh: SSH backend as CA to sign user and host certificates [GH-2208] - * secret/ssh: Support reading of SSH CA public key from `config/ca` endpoint - and also return it when CA key pair is generated [GH-2483] +## 1.10.9 +### November 30, 2022 BUG FIXES: - * audit: When auditing headers use case-insensitive comparisons [GH-2362] - * auth/aws-ec2: Return role period in seconds and not nanoseconds [GH-2374] - * auth/okta: Fix panic if user had no local groups and/or policies set - [GH-2367] - * command/server: Fix parsing of redirect address when port is not mentioned - [GH-2354] - * physical/postgresql: Fix listing returning incorrect results if there were - multiple levels of children [GH-2393] - -## 0.6.5 (February 7th, 2017) - -FEATURES: - - * **Okta Authentication**: A new Okta authentication backend allows you to use - Okta usernames and passwords to authenticate to Vault. If provided with an - appropriate Okta API token, group membership can be queried to assign - policies; users and groups can be defined locally as well. - * **RADIUS Authentication**: A new RADIUS authentication backend allows using - a RADIUS server to authenticate to Vault. Policies can be configured for - specific users or for any authenticated user. - * **Exportable Transit Keys**: Keys in `transit` can now be marked as - `exportable` at creation time. This allows a properly ACL'd user to retrieve - the associated signing key, encryption key, or HMAC key. The `exportable` - value is returned on a key policy read and cannot be changed, so if a key is - marked `exportable` it will always be exportable, and if it is not it will - never be exportable. - * **Batch Transit Operations**: `encrypt`, `decrypt` and `rewrap` operations - in the transit backend now support processing multiple input items in one - call, returning the output of each item in the response. - * **Configurable Audited HTTP Headers**: You can now specify headers that you - want to have included in each audit entry, along with whether each header - should be HMAC'd or kept plaintext. This can be useful for adding additional - client or network metadata to the audit logs. - * **Transit Backend UI (Enterprise)**: Vault Enterprise UI now supports the transit - backend, allowing creation, viewing and editing of named keys as well as using - those keys to perform supported transit operations directly in the UI. - * **Socket Audit Backend** A new socket audit backend allows audit logs to be sent - through TCP, UDP, or UNIX Sockets. - -IMPROVEMENTS: +* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] +* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. +* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] +* core: fix a start up race condition where performance standbys could go into a + mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] +* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18084](https://github.com/hashicorp/vault/pull/18084)] +* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18109](https://github.com/hashicorp/vault/pull/18109)] +* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] - * auth/aws-ec2: Add support for cross-account auth using STS [GH-2148] - * auth/aws-ec2: Support issuing periodic tokens [GH-2324] - * auth/github: Support listing teams and users [GH-2261] - * auth/ldap: Support adding policies to local users directly, in addition to - local groups [GH-2152] - * command/server: Add ability to select and prefer server cipher suites - [GH-2293] - * core: Add a nonce to unseal operations as a check (useful mostly for - support, not as a security principle) [GH-2276] - * duo: Added ability to supply extra context to Duo pushes [GH-2118] - * physical/consul: Add option for setting consistency mode on Consul gets - [GH-2282] - * physical/etcd: Full v3 API support; code will autodetect which API version - to use. The v3 code path is significantly less complicated and may be much - more stable. [GH-2168] - * secret/pki: Allow specifying OU entries in generated certificate subjects - [GH-2251] - * secret mount ui (Enterprise): the secret mount list now shows all mounted - backends even if the UI cannot browse them. Additional backends can now be - mounted from the UI as well. +## 1.10.8 +### November 2, 2022 BUG FIXES: - * auth/token: Fix regression in 0.6.4 where using token store roles as a - blacklist (with only `disallowed_policies` set) would not work in most - circumstances [GH-2286] - * physical/s3: Page responses in client so list doesn't truncate [GH-2224] - * secret/cassandra: Stop a connection leak that could occur on active node - failover [GH-2313] - * secret/pki: When using `sign-verbatim`, don't require a role and use the - CSR's common name [GH-2243] +* core/managed-keys (enterprise): Return better error messages when encountering key creation failures +* core/managed-keys (enterprise): fix panic when having `cache_disable` true +* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] +* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] +* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] +* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] +* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] +* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] -## 0.6.4 (December 16, 2016) +## 1.10.7 +### September 30, 2022 SECURITY: -Further details about these security issues can be found in the 0.6.4 upgrade -guide. - - * `default` Policy Privilege Escalation: If a parent token did not have the - `default` policy attached to its token, it could still create children with - the `default` policy. This is no longer allowed (unless the parent has - `sudo` capability for the creation path). In most cases this is low severity - since the access grants in the `default` policy are meant to be access - grants that are acceptable for all tokens to have. - * Leases Not Expired When Limited Use Token Runs Out of Uses: When using - limited-use tokens to create leased secrets, if the limited-use token was - revoked due to running out of uses (rather than due to TTL expiration or - explicit revocation) it would fail to revoke the leased secrets. These - secrets would still be revoked when their TTL expired, limiting the severity - of this issue. An endpoint has been added (`auth/token/tidy`) that can - perform housekeeping tasks on the token store; one of its tasks can detect - this situation and revoke the associated leases. - -FEATURES: - - * **Policy UI (Enterprise)**: Vault Enterprise UI now supports viewing, - creating, and editing policies. - -IMPROVEMENTS: - - * http: Vault now sets a `no-store` cache control header to make it more - secure in setups that are not end-to-end encrypted [GH-2183] - -BUG FIXES: - - * auth/ldap: Don't panic if dialing returns an error and starttls is enabled; - instead, return the error [GH-2188] - * ui (Enterprise): Submitting an unseal key now properly resets the - form so a browser refresh isn't required to continue. - -## 0.6.3 (December 6, 2016) - -DEPRECATIONS/CHANGES: - - * Request size limitation: A maximum request size of 32MB is imposed to - prevent a denial of service attack with arbitrarily large requests [GH-2108] - * LDAP denies passwordless binds by default: In new LDAP mounts, or when - existing LDAP mounts are rewritten, passwordless binds will be denied by - default. The new `deny_null_bind` parameter can be set to `false` to allow - these. [GH-2103] - * Any audit backend activated satisfies conditions: Previously, when a new - Vault node was taking over service in an HA cluster, all audit backends were - required to be loaded successfully to take over active duty. This behavior - now matches the behavior of the audit logging system itself: at least one - audit backend must successfully be loaded. The server log contains an error - when this occurs. This helps keep a Vault HA cluster working when there is a - misconfiguration on a standby node. [GH-2083] - -FEATURES: - - * **Web UI (Enterprise)**: Vault Enterprise now contains a built-in web UI - that offers access to a number of features, including init/unsealing/sealing, - authentication via userpass or LDAP, and K/V reading/writing. The capability - set of the UI will be expanding rapidly in further releases. To enable it, - set `ui = true` in the top level of Vault's configuration file and point a - web browser at your Vault address. - * **Google Cloud Storage Physical Backend**: You can now use GCS for storing - Vault data [GH-2099] - -IMPROVEMENTS: - - * auth/github: Policies can now be assigned to users as well as to teams - [GH-2079] - * cli: Set the number of retries on 500 down to 0 by default (no retrying). It - can be very confusing to users when there is a pause while the retries - happen if they haven't explicitly set it. With request forwarding the need - for this is lessened anyways. [GH-2093] - * core: Response wrapping is now allowed to be specified by backend responses - (requires backends gaining support) [GH-2088] - * physical/consul: When announcing service, use the scheme of the Vault server - rather than the Consul client [GH-2146] - * secret/consul: Added listing functionality to roles [GH-2065] - * secret/postgresql: Added `revocation_sql` parameter on the role endpoint to - enable customization of user revocation SQL statements [GH-2033] - * secret/transit: Add listing of keys [GH-1987] - -BUG FIXES: - - * api/unwrap, command/unwrap: Increase compatibility of `unwrap` command with - Vault 0.6.1 and older [GH-2014] - * api/unwrap, command/unwrap: Fix error when no client token exists [GH-2077] - * auth/approle: Creating the index for the role_id properly [GH-2004] - * auth/aws-ec2: Handle the case of multiple upgrade attempts when setting the - instance-profile ARN [GH-2035] - * auth/ldap: Avoid leaking connections on login [GH-2130] - * command/path-help: Use the actual error generated by Vault rather than - always using 500 when there is a path help error [GH-2153] - * command/ssh: Use temporary file for identity and ensure its deletion before - the command returns [GH-2016] - * cli: Fix error printing values with `-field` if the values contained - formatting directives [GH-2109] - * command/server: Don't say mlock is supported on OSX when it isn't. [GH-2120] - * core: Fix bug where a failure to come up as active node (e.g. if an audit - backend failed) could lead to deadlock [GH-2083] - * physical/mysql: Fix potential crash during setup due to a query failure - [GH-2105] - * secret/consul: Fix panic on user error [GH-2145] - -## 0.6.2 (October 5, 2016) - -DEPRECATIONS/CHANGES: - - * Convergent Encryption v2: New keys in `transit` using convergent mode will - use a new nonce derivation mechanism rather than require the user to supply - a nonce. While not explicitly increasing security, it minimizes the - likelihood that a user will use the mode improperly and impact the security - of their keys. Keys in convergent mode that were created in v0.6.1 will - continue to work with the same mechanism (user-supplied nonce). - * `etcd` HA off by default: Following in the footsteps of `dynamodb`, the - `etcd` storage backend now requires that `ha_enabled` be explicitly - specified in the configuration file. The backend currently has known broken - HA behavior, so this flag discourages use by default without explicitly - enabling it. If you are using this functionality, when upgrading, you should - set `ha_enabled` to `"true"` *before* starting the new versions of Vault. - * Default/Max lease/token TTLs are now 32 days: In previous versions of Vault - the default was 30 days, but moving it to 32 days allows some operations - (e.g. reauthenticating, renewing, etc.) to be performed via a monthly cron - job. - * AppRole Secret ID endpoints changed: Secret ID and Secret ID accessors are - no longer part of request URLs. The GET and DELETE operations are now moved - to new endpoints (`/lookup` and `/destroy`) which consumes the input from - the body and not the URL. - * AppRole requires at least one constraint: previously it was sufficient to - turn off all AppRole authentication constraints (secret ID, CIDR block) and - use the role ID only. It is now required that at least one additional - constraint is enabled. Existing roles are unaffected, but any new roles or - updated roles will require this. - * Reading wrapped responses from `cubbyhole/response` is deprecated. The - `sys/wrapping/unwrap` endpoint should be used instead as it provides - additional security, auditing, and other benefits. The ability to read - directly will be removed in a future release. - * Request Forwarding is now on by default: in 0.6.1 this required toggling on, - but is now enabled by default. This can be disabled via the - `"disable_clustering"` parameter in Vault's - [config](https://www.vaultproject.io/docs/config/index.html), or per-request - with the `X-Vault-No-Request-Forwarding` header. - * In prior versions a bug caused the `bound_iam_role_arn` value in the - `aws-ec2` authentication backend to actually use the instance profile ARN. - This has been corrected, but as a result there is a behavior change. To - match using the instance profile ARN, a new parameter - `bound_iam_instance_profile_arn` has been added. Existing roles will - automatically transfer the value over to the correct parameter, but the next - time the role is updated, the new meanings will take effect. - -FEATURES: - - * **Secret ID CIDR Restrictions in `AppRole`**: Secret IDs generated under an - approle can now specify a list of CIDR blocks from where the requests to - generate secret IDs should originate from. If an approle already has CIDR - restrictions specified, the CIDR restrictions on the secret ID should be a - subset of those specified on the role [GH-1910] - * **Initial Root Token PGP Encryption**: Similar to `generate-root`, the root - token created at initialization time can now be PGP encrypted [GH-1883] - * **Support Chained Intermediate CAs in `pki`**: The `pki` backend now allows, - when a CA cert is being supplied as a signed root or intermediate, a trust - chain of arbitrary length. The chain is returned as a parameter at - certificate issue/sign time and is retrievable independently as well. - [GH-1694] - * **Response Wrapping Enhancements**: There are new endpoints to look up - response wrapped token parameters; wrap arbitrary values; rotate wrapping - tokens; and unwrap with enhanced validation. In addition, list operations - can now be response-wrapped. [GH-1927] - * **Transit Features**: The `transit` backend now supports generating random - bytes and SHA sums; HMACs; and signing and verification functionality using - EC keys (P-256 curve) - -IMPROVEMENTS: - - * api: Return error when an invalid (as opposed to incorrect) unseal key is - submitted, rather than ignoring it [GH-1782] - * api: Add method to call `auth/token/create-orphan` endpoint [GH-1834] - * api: Rekey operation now redirects from standbys to master [GH-1862] - * audit/file: Sending a `SIGHUP` to Vault now causes Vault to close and - re-open the log file, making it easier to rotate audit logs [GH-1953] - * auth/aws-ec2: EC2 instances can get authenticated by presenting the identity - document and its SHA256 RSA digest [GH-1961] - * auth/aws-ec2: IAM bound parameters on the aws-ec2 backend will perform a - prefix match instead of exact match [GH-1943] - * auth/aws-ec2: Added a new constraint `bound_iam_instance_profile_arn` to - refer to IAM instance profile ARN and fixed the earlier `bound_iam_role_arn` - to refer to IAM role ARN instead of the instance profile ARN [GH-1913] - * auth/aws-ec2: Backend generates the nonce by default and clients can - explicitly disable reauthentication by setting empty nonce [GH-1889] - * auth/token: Added warnings if tokens and accessors are used in URLs [GH-1806] - * command/format: The `format` flag on select CLI commands takes `yml` as an - alias for `yaml` [GH-1899] - * core: Allow the size of the read cache to be set via the config file, and - change the default value to 1MB (from 32KB) [GH-1784] - * core: Allow single and two-character path parameters for most places - [GH-1811] - * core: Allow list operations to be response-wrapped [GH-1814] - * core: Provide better protection against timing attacks in Shamir code - [GH-1877] - * core: Unmounting/disabling backends no longer returns an error if the mount - didn't exist. This is line with elsewhere in Vault's API where `DELETE` is - an idempotent operation. [GH-1903] - * credential/approle: At least one constraint is required to be enabled while - creating and updating a role [GH-1882] - * secret/cassandra: Added consistency level for use with roles [GH-1931] - * secret/mysql: SQL for revoking user can be configured on the role [GH-1914] - * secret/transit: Use HKDF (RFC 5869) as the key derivation function for new - keys [GH-1812] - * secret/transit: Empty plaintext values are now allowed [GH-1874] +* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] BUG FIXES: - * audit: Fix panic being caused by some values logging as underlying Go types - instead of formatted strings [GH-1912] - * auth/approle: Fixed panic on deleting approle that doesn't exist [GH-1920] - * auth/approle: Not letting secret IDs and secret ID accessors to get logged - in plaintext in audit logs [GH-1947] - * auth/aws-ec2: Allow authentication if the underlying host is in a bad state - but the instance is running [GH-1884] - * auth/token: Fixed metadata getting missed out from token lookup response by - gracefully handling token entry upgrade [GH-1924] - * cli: Don't error on newline in token file [GH-1774] - * core: Pass back content-type header for forwarded requests [GH-1791] - * core: Fix panic if the same key was given twice to `generate-root` [GH-1827] - * core: Fix potential deadlock on unmount/remount [GH-1793] - * physical/file: Remove empty directories from the `file` storage backend [GH-1821] - * physical/zookeeper: Remove empty directories from the `zookeeper` storage - backend and add a fix to the `file` storage backend's logic [GH-1964] - * secret/aws: Added update operation to `aws/sts` path to consider `ttl` - parameter [39b75c6] - * secret/aws: Mark STS secrets as non-renewable [GH-1804] - * secret/cassandra: Properly store session for re-use [GH-1802] - * secret/ssh: Fix panic when revoking SSH dynamic keys [GH-1781] - -## 0.6.1 (August 22, 2016) - -DEPRECATIONS/CHANGES: - - * Once the active node is 0.6.1, standby nodes must also be 0.6.1 in order to - connect to the HA cluster. We recommend following our [general upgrade - instructions](https://www.vaultproject.io/docs/install/upgrade.html) in - addition to 0.6.1-specific upgrade instructions to ensure that this is not - an issue. - * Status codes for sealed/uninitialized Vaults have changed to `503`/`501` - respectively. See the [version-specific upgrade - guide](https://www.vaultproject.io/docs/install/upgrade-to-0.6.1.html) for - more details. - * Root tokens (tokens with the `root` policy) can no longer be created except - by another root token or the `generate-root` endpoint. - * Issued certificates from the `pki` backend against new roles created or - modified after upgrading will contain a set of default key usages. - * The `dynamodb` physical data store no longer supports HA by default. It has - some non-ideal behavior around failover that was causing confusion. See the - [documentation](https://www.vaultproject.io/docs/config/index.html#ha_enabled) - for information on enabling HA mode. It is very important that this - configuration is added _before upgrading_. - * The `ldap` backend no longer searches for `memberOf` groups as part of its - normal flow. Instead, the desired group filter must be specified. This fixes - some errors and increases speed for directories with different structures, - but if this behavior has been relied upon, ensure that you see the upgrade - notes _before upgrading_. - * `app-id` is now deprecated with the addition of the new AppRole backend. - There are no plans to remove it, but we encourage using AppRole whenever - possible, as it offers enhanced functionality and can accommodate many more - types of authentication paradigms. - -FEATURES: - - * **AppRole Authentication Backend**: The `approle` backend is a - machine-oriented authentication backend that provides a similar concept to - App-ID while adding many missing features, including a pull model that - allows for the backend to generate authentication credentials rather than - requiring operators or other systems to push credentials in. It should be - useful in many more situations than App-ID. The inclusion of this backend - deprecates App-ID. [GH-1426] - * **Request Forwarding**: Vault servers can now forward requests to each other - rather than redirecting clients. This feature is off by default in 0.6.1 but - will be on by default in the next release. See the [HA concepts - page](https://www.vaultproject.io/docs/concepts/ha.html) for information on - enabling and configuring it. [GH-443] - * **Convergent Encryption in `Transit`**: The `transit` backend now supports a - convergent encryption mode where the same plaintext will produce the same - ciphertext. Although very useful in some situations, this has potential - security implications, which are mostly mitigated by requiring the use of - key derivation when convergent encryption is enabled. See [the `transit` - backend - documentation](https://www.vaultproject.io/docs/secrets/transit/index.html) - for more details. [GH-1537] - * **Improved LDAP Group Filters**: The `ldap` auth backend now uses templates - to define group filters, providing the capability to support some - directories that could not easily be supported before (especially specific - Active Directory setups with nested groups). [GH-1388] - * **Key Usage Control in `PKI`**: Issued certificates from roles created or - modified after upgrading contain a set of default key usages for increased - compatibility with OpenVPN and some other software. This set can be changed - when writing a role definition. Existing roles are unaffected. [GH-1552] - * **Request Retrying in the CLI and Go API**: Requests that fail with a `5xx` - error code will now retry after a backoff. The maximum total number of - retries (including disabling this functionality) can be set with an - environment variable. See the [environment variable - documentation](https://www.vaultproject.io/docs/commands/environment.html) - for more details. [GH-1594] - * **Service Discovery in `vault init`**: The new `-auto` option on `vault init` - will perform service discovery using Consul. When only one node is discovered, - it will be initialized and when more than one node is discovered, they will - be output for easy selection. See `vault init --help` for more details. [GH-1642] - * **MongoDB Secret Backend**: Generate dynamic unique MongoDB database - credentials based on configured roles. Sponsored by - [CommerceHub](http://www.commercehub.com/). [GH-1414] - * **Circonus Metrics Integration**: Vault can now send metrics to - [Circonus](http://www.circonus.com/). See the [configuration - documentation](https://www.vaultproject.io/docs/config/index.html) for - details. [GH-1646] - -IMPROVEMENTS: - - * audit: Added a unique identifier to each request which will also be found in - the request portion of the response. [GH-1650] - * auth/aws-ec2: Added a new constraint `bound_account_id` to the role - [GH-1523] - * auth/aws-ec2: Added a new constraint `bound_iam_role_arn` to the role - [GH-1522] - * auth/aws-ec2: Added `ttl` field for the role [GH-1703] - * auth/ldap, secret/cassandra, physical/consul: Clients with `tls.Config` - have the minimum TLS version set to 1.2 by default. This is configurable. - * auth/token: Added endpoint to list accessors [GH-1676] - * auth/token: Added `disallowed_policies` option to token store roles [GH-1681] - * auth/token: `root` or `sudo` tokens can now create periodic tokens via - `auth/token/create`; additionally, the same token can now be periodic and - have an explicit max TTL [GH-1725] - * build: Add support for building on Solaris/Illumos [GH-1726] - * cli: Output formatting in the presence of warnings in the response object - [GH-1533] - * cli: `vault auth` command supports a `-path` option to take in the path at - which the auth backend is enabled, thereby allowing authenticating against - different paths using the command options [GH-1532] - * cli: `vault auth -methods` will now display the config settings of the mount - [GH-1531] - * cli: `vault read/write/unwrap -field` now allows selecting token response - fields [GH-1567] - * cli: `vault write -field` now allows selecting wrapped response fields - [GH-1567] - * command/status: Version information and cluster details added to the output - of `vault status` command [GH-1671] - * core: Response wrapping is now enabled for login endpoints [GH-1588] - * core: The duration of leadership is now exported via events through - telemetry [GH-1625] - * core: `sys/capabilities-self` is now accessible as part of the `default` - policy [GH-1695] - * core: `sys/renew` is now accessible as part of the `default` policy [GH-1701] - * core: Unseal keys will now be returned in both hex and base64 forms, and - either can be used [GH-1734] - * core: Responses from most `/sys` endpoints now return normal `api.Secret` - structs in addition to the values they carried before. This means that - response wrapping can now be used with most authenticated `/sys` operations - [GH-1699] - * physical/etcd: Support `ETCD_ADDR` env var for specifying addresses [GH-1576] - * physical/consul: Allowing additional tags to be added to Consul service - registration via `service_tags` option [GH-1643] - * secret/aws: Listing of roles is supported now [GH-1546] - * secret/cassandra: Add `connect_timeout` value for Cassandra connection - configuration [GH-1581] - * secret/mssql,mysql,postgresql: Reading of connection settings is supported - in all the sql backends [GH-1515] - * secret/mysql: Added optional maximum idle connections value to MySQL - connection configuration [GH-1635] - * secret/mysql: Use a combination of the role name and token display name in - generated user names and allow the length to be controlled [GH-1604] - * secret/{cassandra,mssql,mysql,postgresql}: SQL statements can now be passed - in via one of four ways: a semicolon-delimited string, a base64-delimited - string, a serialized JSON string array, or a base64-encoded serialized JSON - string array [GH-1686] - * secret/ssh: Added `allowed_roles` to vault-ssh-helper's config and returning - role name as part of response of `verify` API - * secret/ssh: Added passthrough of command line arguments to `ssh` [GH-1680] - * sys/health: Added version information to the response of health status - endpoint [GH-1647] - * sys/health: Cluster information isbe returned as part of health status when - Vault is unsealed [GH-1671] - * sys/mounts: MountTable data is compressed before serializing to accommodate - thousands of mounts [GH-1693] - * website: The [token - concepts](https://www.vaultproject.io/docs/concepts/tokens.html) page has - been completely rewritten [GH-1725] - -BUG FIXES: +* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] +* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] +* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] +* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] +* replication (enterprise): Fix data race in SaveCheckpoint() +* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. +* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. +* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] - * auth/aws-ec2: Added a nil check for stored whitelist identity object - during renewal [GH-1542] - * auth/cert: Fix panic if no client certificate is supplied [GH-1637] - * auth/token: Don't report that a non-expiring root token is renewable, as - attempting to renew it results in an error [GH-1692] - * cli: Don't retry a command when a redirection is received [GH-1724] - * core: Fix regression causing status codes to be `400` in most non-5xx error - cases [GH-1553] - * core: Fix panic that could occur during a leadership transition [GH-1627] - * physical/postgres: Remove use of prepared statements as this causes - connection multiplexing software to break [GH-1548] - * physical/consul: Multiple Vault nodes on the same machine leading to check ID - collisions were resulting in incorrect health check responses [GH-1628] - * physical/consul: Fix deregistration of health checks on exit [GH-1678] - * secret/postgresql: Check for existence of role before attempting deletion - [GH-1575] - * secret/postgresql: Handle revoking roles that have privileges on sequences - [GH-1573] - * secret/postgresql(,mysql,mssql): Fix incorrect use of database over - transaction object which could lead to connection exhaustion [GH-1572] - * secret/pki: Fix parsing CA bundle containing trailing whitespace [GH-1634] - * secret/pki: Fix adding email addresses as SANs [GH-1688] - * secret/pki: Ensure that CRL values are always UTC, per RFC [GH-1727] - * sys/seal-status: Fixed nil Cluster object while checking seal status [GH-1715] - -## 0.6.0 (June 14th, 2016) +## 1.10.6 +### August 31, 2022 SECURITY: - * Although `sys/revoke-prefix` was intended to revoke prefixes of secrets (via - lease IDs, which incorporate path information) and - `auth/token/revoke-prefix` was intended to revoke prefixes of tokens (using - the tokens' paths and, since 0.5.2, role information), in implementation - they both behaved exactly the same way since a single component in Vault is - responsible for managing lifetimes of both, and the type of the tracked - lifetime was not being checked. The end result was that either endpoint - could revoke both secret leases and tokens. We consider this a very minor - security issue as there are a number of mitigating factors: both endpoints - require `sudo` capability in addition to write capability, preventing - blanket ACL path globs from providing access; both work by using the prefix - to revoke as a part of the endpoint path, allowing them to be properly - ACL'd; and both are intended for emergency scenarios and users should - already not generally have access to either one. In order to prevent - confusion, we have simply removed `auth/token/revoke-prefix` in 0.6, and - `sys/revoke-prefix` will be meant for both leases and tokens instead. - -DEPRECATIONS/CHANGES: - - * `auth/token/revoke-prefix` has been removed. See the security notice for - details. [GH-1280] - * Vault will now automatically register itself as the `vault` service when - using the `consul` backend and will perform its own health checks. See - the Consul backend documentation for information on how to disable - auto-registration and service checks. - * List operations that do not find any keys now return a `404` status code - rather than an empty response object [GH-1365] - * CA certificates issued from the `pki` backend no longer have associated - leases, and any CA certs already issued will ignore revocation requests from - the lease manager. This is to prevent CA certificates from being revoked - when the token used to issue the certificate expires; it was not be obvious - to users that they need to ensure that the token lifetime needed to be at - least as long as a potentially very long-lived CA cert. +* core: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. This vulnerability, CVE-2022-40186, is fixed in 1.11.3, 1.10.6, and 1.9.9. [[HSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] -FEATURES: +CHANGES: - * **AWS EC2 Auth Backend**: Provides a secure introduction mechanism for AWS - EC2 instances allowing automated retrieval of Vault tokens. Unlike most - Vault authentication backends, this backend does not require first deploying - or provisioning security-sensitive credentials (tokens, username/password, - client certificates, etc). Instead, it treats AWS as a Trusted Third Party - and uses the cryptographically signed dynamic metadata information that - uniquely represents each EC2 instance. [Vault - Enterprise](https://www.hashicorp.com/vault.html) customers have access to a - turnkey client that speaks the backend API and makes access to a Vault token - easy. - * **Response Wrapping**: Nearly any response within Vault can now be wrapped - inside a single-use, time-limited token's cubbyhole, taking the [Cubbyhole - Authentication - Principles](https://www.hashicorp.com/blog/vault-cubbyhole-principles.html) - mechanism to its logical conclusion. Retrieving the original response is as - simple as a single API command or the new `vault unwrap` command. This makes - secret distribution easier and more secure, including secure introduction. - * **Azure Physical Backend**: You can now use Azure blob object storage as - your Vault physical data store [GH-1266] - * **Swift Physical Backend**: You can now use Swift blob object storage as - your Vault physical data store [GH-1425] - * **Consul Backend Health Checks**: The Consul backend will automatically - register a `vault` service and perform its own health checking. By default - the active node can be found at `active.vault.service.consul` and all with - standby nodes are `standby.vault.service.consul`. Sealed vaults are marked - critical and are not listed by default in Consul's service discovery. See - the documentation for details. [GH-1349] - * **Explicit Maximum Token TTLs**: You can now set explicit maximum TTLs on - tokens that do not honor changes in the system- or mount-set values. This is - useful, for instance, when the max TTL of the system or the `auth/token` - mount must be set high to accommodate certain needs but you want more - granular restrictions on tokens being issued directly from the Token - authentication backend at `auth/token`. [GH-1399] - * **Non-Renewable Tokens**: When creating tokens directly through the token - authentication backend, you can now specify in both token store roles and - the API whether or not a token should be renewable, defaulting to `true`. - * **RabbitMQ Secret Backend**: Vault can now generate credentials for - RabbitMQ. Vhosts and tags can be defined within roles. [GH-788] +* core: Bump Go version to 1.17.13. IMPROVEMENTS: - * audit: Add the DisplayName value to the copy of the Request object embedded - in the associated Response, to match the original Request object [GH-1387] - * audit: Enable auditing of the `seal` and `step-down` commands [GH-1435] - * backends: Remove most `root`/`sudo` paths in favor of normal ACL mechanisms. - A particular exception are any current MFA paths. A few paths in `token` and - `sys` also require `root` or `sudo`. [GH-1478] - * command/auth: Restore the previous authenticated token if the `auth` command - fails to authenticate the provided token [GH-1233] - * command/write: `-format` and `-field` can now be used with the `write` - command [GH-1228] - * core: Add `mlock` support for FreeBSD, OpenBSD, and Darwin [GH-1297] - * core: Don't keep lease timers around when tokens are revoked [GH-1277] - * core: If using the `disable_cache` option, caches for the policy store and - the `transit` backend are now disabled as well [GH-1346] - * credential/cert: Renewal requests are rejected if the set of policies has - changed since the token was issued [GH-477] - * credential/cert: Check CRLs for specific non-CA certs configured in the - backend [GH-1404] - * credential/ldap: If `groupdn` is not configured, skip searching LDAP and - only return policies for local groups, plus a warning [GH-1283] - * credential/ldap: `vault list` support for users and groups [GH-1270] - * credential/ldap: Support for the `memberOf` attribute for group membership - searching [GH-1245] - * credential/userpass: Add list support for users [GH-911] - * credential/userpass: Remove user configuration paths from requiring sudo, in - favor of normal ACL mechanisms [GH-1312] - * credential/token: Sanitize policies and add `default` policies in appropriate - places [GH-1235] - * credential/token: Setting the renewable status of a token is now possible - via `vault token-create` and the API. The default is true, but tokens can be - specified as non-renewable. [GH-1499] - * secret/aws: Use chain credentials to allow environment/EC2 instance/shared - providers [GH-307] - * secret/aws: Support for STS AssumeRole functionality [GH-1318] - * secret/consul: Reading consul access configuration supported. The response - will contain non-sensitive information only [GH-1445] - * secret/pki: Added `exclude_cn_from_sans` field to prevent adding the CN to - DNS or Email Subject Alternate Names [GH-1220] - * secret/pki: Added list support for certificates [GH-1466] - * sys/capabilities: Enforce ACL checks for requests that query the capabilities - of a token on a given path [GH-1221] - * sys/health: Status information can now be retrieved with `HEAD` [GH-1509] +* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] BUG FIXES: - * command/read: Fix panic when using `-field` with a non-string value [GH-1308] - * command/token-lookup: Fix TTL showing as 0 depending on how a token was - created. This only affected the value shown at lookup, not the token - behavior itself. [GH-1306] - * command/various: Tell the JSON decoder to not convert all numbers to floats; - fixes some various places where numbers were showing up in scientific - notation - * command/server: Prioritized `devRootTokenID` and `devListenAddress` flags - over their respective env vars [GH-1480] - * command/ssh: Provided option to disable host key checking. The automated - variant of `vault ssh` command uses `sshpass` which was failing to handle - host key checking presented by the `ssh` binary. [GH-1473] - * core: Properly persist mount-tuned TTLs for auth backends [GH-1371] - * core: Don't accidentally crosswire SIGINT to the reload handler [GH-1372] - * credential/github: Make organization comparison case-insensitive during - login [GH-1359] - * credential/github: Fix panic when renewing a token created with some earlier - versions of Vault [GH-1510] - * credential/github: The token used to log in via `vault auth` can now be - specified in the `VAULT_AUTH_GITHUB_TOKEN` environment variable [GH-1511] - * credential/ldap: Fix problem where certain error conditions when configuring - or opening LDAP connections would cause a panic instead of return a useful - error message [GH-1262] - * credential/token: Fall back to normal parent-token semantics if - `allowed_policies` is empty for a role. Using `allowed_policies` of - `default` resulted in the same behavior anyways. [GH-1276] - * credential/token: Fix issues renewing tokens when using the "suffix" - capability of token roles [GH-1331] - * credential/token: Fix lookup via POST showing the request token instead of - the desired token [GH-1354] - * credential/various: Fix renewal conditions when `default` policy is not - contained in the backend config [GH-1256] - * physical/s3: Don't panic in certain error cases from bad S3 responses [GH-1353] - * secret/consul: Use non-pooled Consul API client to avoid leaving files open - [GH-1428] - * secret/pki: Don't check whether a certificate is destined to be a CA - certificate if sign-verbatim endpoint is used [GH-1250] - -## 0.5.3 (May 27th, 2016) +* auth/gcp: Fixes the ability to reset the configuration's credentials to use application default credentials. [[GH-16524](https://github.com/hashicorp/vault/pull/16524)] +* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] +* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] +* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] +* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] +* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] +* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the +Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] +* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] +* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] +* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] +* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] +* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] +* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] +* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] SECURITY: - * Consul ACL Token Revocation: An issue was reported to us indicating that - generated Consul ACL tokens were not being properly revoked. Upon - investigation, we found that this behavior was reproducible in a specific - scenario: when a generated lease for a Consul ACL token had been renewed - prior to revocation. In this case, the generated token was not being - properly persisted internally through the renewal function, leading to an - error during revocation due to the missing token. Unfortunately, this was - coded as a user error rather than an internal error, and the revocation - logic was expecting internal errors if revocation failed. As a result, the - revocation logic believed the revocation to have succeeded when it in fact - failed, causing the lease to be dropped while the token was still valid - within Consul. In this release, the Consul backend properly persists the - token through renewals, and the revocation logic has been changed to - consider any error type to have been a failure to revoke, causing the lease - to persist and attempt to be revoked later. - -We have written an example shell script that searches through Consul's ACL -tokens and looks for those generated by Vault, which can be used as a template -for a revocation script as deemed necessary for any particular security -response. The script is available at -https://gist.github.com/jefferai/6233c2963f9407a858d84f9c27d725c0 - -Please note that any outstanding leases for Consul tokens produced prior to -0.5.3 that have been renewed will continue to exhibit this behavior. As a -result, we recommend either revoking all tokens produced by the backend and -issuing new ones, or if needed, a more advanced variant of the provided example -could use the timestamp embedded in each generated token's name to decide which -tokens are too old and should be deleted. This could then be run periodically -up until the maximum lease time for any outstanding pre-0.5.3 tokens has -expired. - -This is a security-only release. There are no other code changes since 0.5.2. -The binaries have one additional change: they are built against Go 1.6.1 rather -than Go 1.6, as Go 1.6.1 contains two security fixes to the Go programming -language itself. - -## 0.5.2 (March 16th, 2016) - -FEATURES: - - * **MSSQL Backend**: Generate dynamic unique MSSQL database credentials based - on configured roles [GH-998] - * **Token Accessors**: Vault now provides an accessor with each issued token. - This accessor is an identifier that can be used for a limited set of - actions, notably for token revocation. This value can be logged in - plaintext to audit logs, and in combination with the plaintext metadata - logged to audit logs, provides a searchable and straightforward way to - revoke particular users' or services' tokens in many cases. To enable - plaintext audit logging of these accessors, set `hmac_accessor=false` when - enabling an audit backend. - * **Token Credential Backend Roles**: Roles can now be created in the `token` - credential backend that allow modifying token behavior in ways that are not - otherwise exposed or easily delegated. This allows creating tokens with a - fixed set (or subset) of policies (rather than a subset of the calling - token's), periodic tokens with a fixed TTL but no expiration, specified - prefixes, and orphans. - * **Listener Certificate Reloading**: Vault's configured listeners now reload - their TLS certificate and private key when the Vault process receives a - SIGHUP. - -IMPROVEMENTS: - - * auth/token: Endpoints optionally accept tokens from the HTTP body rather - than just from the URLs [GH-1211] - * auth/token,sys/capabilities: Added new endpoints - `auth/token/lookup-accessor`, `auth/token/revoke-accessor` and - `sys/capabilities-accessor`, which enables performing the respective actions - with just the accessor of the tokens, without having access to the actual - token [GH-1188] - * core: Ignore leading `/` in policy paths [GH-1170] - * core: Ignore leading `/` in mount paths [GH-1172] - * command/policy-write: Provided HCL is now validated for format violations - and provides helpful information around where the violation occurred - [GH-1200] - * command/server: The initial root token ID when running in `-dev` mode can - now be specified via `-dev-root-token-id` or the environment variable - `VAULT_DEV_ROOT_TOKEN_ID` [GH-1162] - * command/server: The listen address when running in `-dev` mode can now be - specified via `-dev-listen-address` or the environment variable - `VAULT_DEV_LISTEN_ADDRESS` [GH-1169] - * command/server: The configured listeners now reload their TLS - certificates/keys when Vault is SIGHUP'd [GH-1196] - * command/step-down: New `vault step-down` command and API endpoint to force - the targeted node to give up active status, but without sealing. The node - will wait ten seconds before attempting to grab the lock again. [GH-1146] - * command/token-renew: Allow no token to be passed in; use `renew-self` in - this case. Change the behavior for any token being passed in to use `renew`. - [GH-1150] - * credential/app-id: Allow `app-id` parameter to be given in the login path; - this causes the `app-id` to be part of the token path, making it easier to - use with `revoke-prefix` [GH-424] - * credential/cert: Non-CA certificates can be used for authentication. They - must be matched exactly (issuer and serial number) for authentication, and - the certificate must carry the client authentication or 'any' extended usage - attributes. [GH-1153] - * credential/cert: Subject and Authority key IDs are output in metadata; this - allows more flexible searching/revocation in the audit logs [GH-1183] - * credential/cert: Support listing configured certs [GH-1212] - * credential/userpass: Add support for `create`/`update` capability - distinction in user path, and add user-specific endpoints to allow changing - the password and policies [GH-1216] - * credential/token: Add roles [GH-1155] - * secret/mssql: Add MSSQL backend [GH-998] - * secret/pki: Add revocation time (zero or Unix epoch) to `pki/cert/SERIAL` - endpoint [GH-1180] - * secret/pki: Sanitize serial number in `pki/revoke` endpoint to allow some - other formats [GH-1187] - * secret/ssh: Added documentation for `ssh/config/zeroaddress` endpoint. - [GH-1154] - * sys: Added new endpoints `sys/capabilities` and `sys/capabilities-self` to - fetch the capabilities of a token on a given path [GH-1171] - * sys: Added `sys/revoke-force`, which enables a user to ignore backend errors - when revoking a lease, necessary in some emergency/failure scenarios - [GH-1168] - * sys: The return codes from `sys/health` can now be user-specified via query - parameters [GH-1199] - -BUG FIXES: - - * logical/cassandra: Apply hyphen/underscore replacement to the entire - generated username, not just the UUID, in order to handle token display name - hyphens [GH-1140] - * physical/etcd: Output actual error when cluster sync fails [GH-1141] - * vault/expiration: Not letting the error responses from the backends to skip - during renewals [GH-1176] - -## 0.5.1 (February 25th, 2016) - -DEPRECATIONS/CHANGES: - - * RSA keys less than 2048 bits are no longer supported in the PKI backend. - 1024-bit keys are considered unsafe and are disallowed in the Internet PKI. - The `pki` backend has enforced SHA256 hashes in signatures from the - beginning, and software that can handle these hashes should be able to - handle larger key sizes. [GH-1095] - * The PKI backend now does not automatically delete expired certificates, - including from the CRL. Doing so could lead to a situation where a time - mismatch between the Vault server and clients could result in a certificate - that would not be considered expired by a client being removed from the CRL. - The new `pki/tidy` endpoint can be used to trigger expirations. [GH-1129] - * The `cert` backend now performs a variant of channel binding at renewal time - for increased security. In order to not overly burden clients, a notion of - identity is used. This functionality can be disabled. See the 0.5.1 upgrade - guide for more specific information [GH-1127] - -FEATURES: - - * **Codebase Audit**: Vault's 0.5 codebase was audited by iSEC. (The terms of - the audit contract do not allow us to make the results public.) [GH-220] - -IMPROVEMENTS: - - * api: The `VAULT_TLS_SERVER_NAME` environment variable can be used to control - the SNI header during TLS connections [GH-1131] - * api/health: Add the server's time in UTC to health responses [GH-1117] - * command/rekey and command/generate-root: These now return the status at - attempt initialization time, rather than requiring a separate fetch for the - nonce [GH-1054] - * credential/cert: Don't require root/sudo tokens for the `certs/` and `crls/` - paths; use normal ACL behavior instead [GH-468] - * credential/github: The validity of the token used for login will be checked - at renewal time [GH-1047] - * credential/github: The `config` endpoint no longer requires a root token; - normal ACL path matching applies - * deps: Use the standardized Go 1.6 vendoring system - * secret/aws: Inform users of AWS-imposed policy restrictions around STS - tokens if they attempt to use an invalid policy [GH-1113] - * secret/mysql: The MySQL backend now allows disabling verification of the - `connection_url` [GH-1096] - * secret/pki: Submitted CSRs are now verified to have the correct key type and - minimum number of bits according to the role. The exception is intermediate - CA signing and the `sign-verbatim` path [GH-1104] - * secret/pki: New `tidy` endpoint to allow expunging expired certificates. - [GH-1129] - * secret/postgresql: The PostgreSQL backend now allows disabling verification - of the `connection_url` [GH-1096] - * secret/ssh: When verifying an OTP, return 400 if it is not valid instead of - 204 [GH-1086] - * credential/app-id: App ID backend will check the validity of app-id and user-id - during renewal time [GH-1039] - * credential/cert: TLS Certificates backend, during renewal, will now match the - client identity with the client identity used during login [GH-1127] - -BUG FIXES: - - * credential/ldap: Properly escape values being provided to search filters - [GH-1100] - * secret/aws: Capping on length of usernames for both IAM and STS types - [GH-1102] - * secret/pki: If a cert is not found during lookup of a serial number, - respond with a 400 rather than a 500 [GH-1085] - * secret/postgresql: Add extra revocation statements to better handle more - permission scenarios [GH-1053] - * secret/postgresql: Make connection_url work properly [GH-1112] +* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] -## 0.5.0 (February 10, 2016) +## 1.10.5 +### July 21, 2022 SECURITY: - * Previous versions of Vault could allow a malicious user to hijack the rekey - operation by canceling an operation in progress and starting a new one. The - practical application of this is very small. If the user was an unseal key - owner, they could attempt to do this in order to either receive unencrypted - reseal keys or to replace the PGP keys used for encryption with ones under - their control. However, since this would invalidate any rekey progress, they - would need other unseal key holders to resubmit, which would be rather - suspicious during this manual operation if they were not also the original - initiator of the rekey attempt. If the user was not an unseal key holder, - there is no benefit to be gained; the only outcome that could be attempted - would be a denial of service against a legitimate rekey operation by sending - cancel requests over and over. Thanks to Josh Snyder for the report! - -DEPRECATIONS/CHANGES: - - * `s3` physical backend: Environment variables are now preferred over - configuration values. This makes it behave similar to the rest of Vault, - which, in increasing order of preference, uses values from the configuration - file, environment variables, and CLI flags. [GH-871] - * `etcd` physical backend: `sync` functionality is now supported and turned on - by default. This can be disabled. [GH-921] - * `transit`: If a client attempts to encrypt a value with a key that does not - yet exist, what happens now depends on the capabilities set in the client's - ACL policies. If the client has `create` (or `create` and `update`) - capability, the key will upsert as in the past. If the client has `update` - capability, they will receive an error. [GH-1012] - * `token-renew` CLI command: If the token given for renewal is the same as the - client token, the `renew-self` endpoint will be used in the API. Given that - the `default` policy (by default) allows all clients access to the - `renew-self` endpoint, this makes it much more likely that the intended - operation will be successful. [GH-894] - * Token `lookup`: the `ttl` value in the response now reflects the actual - remaining TTL rather than the original TTL specified when the token was - created; this value is now located in `creation_ttl` [GH-986] - * Vault no longer uses grace periods on leases or token TTLs. Uncertainty - about the length grace period for any given backend could cause confusion - and uncertainty. [GH-1002] - * `rekey`: Rekey now requires a nonce to be supplied with key shares. This - nonce is generated at the start of a rekey attempt and is unique for that - attempt. - * `status`: The exit code for the `status` CLI command is now `2` for an - uninitialized Vault instead of `1`. `1` is returned for errors. This better - matches the rest of the CLI. +* storage/raft: Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] -FEATURES: +CHANGES: - * **Split Data/High Availability Physical Backends**: You can now configure - two separate physical backends: one to be used for High Availability - coordination and another to be used for encrypted data storage. See the - [configuration - documentation](https://vaultproject.io/docs/config/index.html) for details. - [GH-395] - * **Fine-Grained Access Control**: Policies can now use the `capabilities` set - to specify fine-grained control over operations allowed on a path, including - separation of `sudo` privileges from other privileges. These can be mixed - and matched in any way desired. The `policy` value is kept for backwards - compatibility. See the [updated policy - documentation](https://vaultproject.io/docs/concepts/policies.html) for - details. [GH-914] - * **List Support**: Listing is now supported via the API and the new `vault - list` command. This currently supports listing keys in the `generic` and - `cubbyhole` backends and a few other places (noted in the IMPROVEMENTS - section below). Different parts of the API and backends will need to - implement list capabilities in ways that make sense to particular endpoints, - so further support will appear over time. [GH-617] - * **Root Token Generation via Unseal Keys**: You can now use the - `generate-root` CLI command to generate new orphaned, non-expiring root - tokens in case the original is lost or revoked (accidentally or - purposefully). This requires a quorum of unseal key holders. The output - value is protected via any PGP key of the initiator's choosing or a one-time - pad known only to the initiator (a suitable pad can be generated via the - `-genotp` flag to the command. [GH-915] - * **Unseal Key Archiving**: You can now optionally have Vault store your - unseal keys in your chosen physical store for disaster recovery purposes. - This option is only available when the keys are encrypted with PGP. [GH-907] - * **Keybase Support for PGP Encryption Keys**: You can now specify Keybase - users when passing in PGP keys to the `init`, `rekey`, and `generate-root` - CLI commands. Public keys for these users will be fetched automatically. - [GH-901] - * **DynamoDB HA Physical Backend**: There is now a new, community-supported - HA-enabled physical backend using Amazon DynamoDB. See the [configuration - documentation](https://vaultproject.io/docs/config/index.html) for details. - [GH-878] - * **PostgreSQL Physical Backend**: There is now a new, community-supported - physical backend using PostgreSQL. See the [configuration - documentation](https://vaultproject.io/docs/config/index.html) for details. - [GH-945] - * **STS Support in AWS Secret Backend**: You can now use the AWS secret - backend to fetch STS tokens rather than IAM users. [GH-927] - * **Speedups in the transit backend**: The `transit` backend has gained a - cache, and now loads only the working set of keys (e.g. from the - `min_decryption_version` to the current key version) into its working set. - This provides large speedups and potential memory savings when the `rotate` - feature of the backend is used heavily. +* core/fips: Disable and warn about entropy augmentation in FIPS 140-2 Inside mode [[GH-15858](https://github.com/hashicorp/vault/pull/15858)] +* core: Bump Go version to 1.17.12. IMPROVEMENTS: - * cli: Output secrets sorted by key name [GH-830] - * cli: Support YAML as an output format [GH-832] - * cli: Show an error if the output format is incorrect, rather than falling - back to an empty table [GH-849] - * cli: Allow setting the `advertise_addr` for HA via the - `VAULT_ADVERTISE_ADDR` environment variable [GH-581] - * cli/generate-root: Add generate-root and associated functionality [GH-915] - * cli/init: Add `-check` flag that returns whether Vault is initialized - [GH-949] - * cli/server: Use internal functions for the token-helper rather than shelling - out, which fixes some problems with using a static binary in Docker or paths - with multiple spaces when launching in `-dev` mode [GH-850] - * cli/token-lookup: Add token-lookup command [GH-892] - * command/{init,rekey}: Allow ASCII-armored keychain files to be arguments for - `-pgp-keys` [GH-940] - * conf: Use normal bool values rather than empty/non-empty for the - `tls_disable` option [GH-802] - * credential/ldap: Add support for binding, both anonymously (to discover a - user DN) and via a username and password [GH-975] - * credential/token: Add `last_renewal_time` to token lookup calls [GH-896] - * credential/token: Change `ttl` to reflect the current remaining TTL; the - original value is in `creation_ttl` [GH-1007] - * helper/certutil: Add ability to parse PKCS#8 bundles [GH-829] - * logical/aws: You can now get STS tokens instead of IAM users [GH-927] - * logical/cassandra: Add `protocol_version` parameter to set the CQL proto - version [GH-1005] - * logical/cubbyhole: Add cubbyhole access to default policy [GH-936] - * logical/mysql: Add list support for roles path [GH-984] - * logical/pki: Fix up key usages being specified for CAs [GH-989] - * logical/pki: Add list support for roles path [GH-985] - * logical/pki: Allow `pem_bundle` to be specified as the format, which - provides a concatenated PEM bundle of returned values [GH-1008] - * logical/pki: Add 30 seconds of slack to the validity start period to - accommodate some clock skew in machines [GH-1036] - * logical/postgres: Add `max_idle_connections` parameter [GH-950] - * logical/postgres: Add list support for roles path - * logical/ssh: Add list support for roles path [GH-983] - * logical/transit: Keys are archived and only keys between the latest version - and `min_decryption_version` are loaded into the working set. This can - provide a very large speed increase when rotating keys very often. [GH-977] - * logical/transit: Keys are now cached, which should provide a large speedup - in most cases [GH-979] - * physical/cache: Use 2Q cache instead of straight LRU [GH-908] - * physical/etcd: Support basic auth [GH-859] - * physical/etcd: Support sync functionality and enable by default [GH-921] +* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] +* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] BUG FIXES: - * api: Correct the HTTP verb used in the LookupSelf method [GH-887] - * api: Fix the output of `Sys().MountConfig(...)` to return proper values - [GH-1017] - * command/read: Fix panic when an empty argument was given [GH-923] - * command/ssh: Fix panic when username lookup fails [GH-886] - * core: When running in standalone mode, don't advertise that we are active - until post-unseal setup completes [GH-872] - * core: Update go-cleanhttp dependency to ensure idle connections aren't - leaked [GH-867] - * core: Don't allow tokens to have duplicate policies [GH-897] - * core: Fix regression in `sys/renew` that caused information stored in the - Secret part of the response to be lost [GH-912] - * physical: Use square brackets when setting an IPv6-based advertise address - as the auto-detected advertise address [GH-883] - * physical/s3: Use an initialized client when using IAM roles to fix a - regression introduced against newer versions of the AWS Go SDK [GH-836] - * secret/pki: Fix a condition where unmounting could fail if the CA - certificate was not properly loaded [GH-946] - * secret/ssh: Fix a problem where SSH connections were not always closed - properly [GH-942] - -MISC: - - * Clarified our stance on support for community-derived physical backends. - See the [configuration - documentation](https://vaultproject.io/docs/config/index.html) for details. - * Add `vault-java` to libraries [GH-851] - * Various minor documentation fixes and improvements [GH-839] [GH-854] - [GH-861] [GH-876] [GH-899] [GH-900] [GH-904] [GH-923] [GH-924] [GH-958] - [GH-959] [GH-981] [GH-990] [GH-1024] [GH-1025] - -BUILD NOTE: - - * The HashiCorp-provided binary release of Vault 0.5.0 is built against a - patched version of Go 1.5.3 containing two specific bug fixes affecting TLS - certificate handling. These fixes are in the Go 1.6 tree and were - cherry-picked on top of stock Go 1.5.3. If you want to examine the way in - which the releases were built, please look at our [cross-compilation - Dockerfile](https://github.com/hashicorp/vault/blob/v0.5.0/scripts/cross/Dockerfile-patched-1.5.3). - -## 0.4.1 (January 13, 2016) - -SECURITY: +* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] +* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] +* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty +* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] +* core: Limit SSCT WAL checks on perf standbys to raft backends only [[GH-15879](https://github.com/hashicorp/vault/pull/15879)] +* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] +* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] +* storage/raft (enterprise): Prevent unauthenticated voter status with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] +* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. +* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] +* ui: Revert using localStorage in favor of sessionStorage [[GH-16169](https://github.com/hashicorp/vault/pull/16169)] +* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] - * Build against Go 1.5.3 to mitigate a security vulnerability introduced in - Go 1.5. For more information, please see - https://groups.google.com/forum/#!topic/golang-dev/MEATuOi_ei4 - -This is a security-only release; other than the version number and building -against Go 1.5.3, there are no changes from 0.4.0. - -## 0.4.0 (December 10, 2015) - -DEPRECATIONS/CHANGES: - - * Policy Name Casing: Policy names are now normalized to lower-case on write, - helping prevent accidental case mismatches. For backwards compatibility, - policy names are not currently normalized when reading or deleting. [GH-676] - * Default etcd port number: the default connection string for the `etcd` - physical store uses port 2379 instead of port 4001, which is the port used - by the supported version 2.x of etcd. [GH-753] - * As noted below in the FEATURES section, if your Vault installation contains - a policy called `default`, new tokens created will inherit this policy - automatically. - * In the PKI backend there have been a few minor breaking changes: - * The token display name is no longer a valid option for providing a base - domain for issuance. Since this name is prepended with the name of the - authentication backend that issued it, it provided a faulty use-case at best - and a confusing experience at worst. We hope to figure out a better - per-token value in a future release. - * The `allowed_base_domain` parameter has been changed to `allowed_domains`, - which accepts a comma-separated list of domains. This allows issuing - certificates with DNS subjects across multiple domains. If you had a - configured `allowed_base_domain` parameter, it will be migrated - automatically when the role is read (either via a normal read, or via - issuing a certificate). +## 1.10.4 +### June 10, 2022 -FEATURES: +CHANGES: - * **Significantly Enhanced PKI Backend**: The `pki` backend can now generate - and sign root CA certificates and intermediate CA CSRs. It can also now sign - submitted client CSRs, as well as a significant number of other - enhancements. See the updated documentation for the full API. [GH-666] - * **CRL Checking for Certificate Authentication**: The `cert` backend now - supports pushing CRLs into the mount and using the contained serial numbers - for revocation checking. See the documentation for the `cert` backend for - more info. [GH-330] - * **Default Policy**: Vault now ensures that a policy named `default` is added - to every token. This policy cannot be deleted, but it can be modified - (including to an empty policy). There are three endpoints allowed in the - default `default` policy, related to token self-management: `lookup-self`, - which allows a token to retrieve its own information, and `revoke-self` and - `renew-self`, which are self-explanatory. If your existing Vault - installation contains a policy called `default`, it will not be overridden, - but it will be added to each new token created. You can override this - behavior when using manual token creation (i.e. not via an authentication - backend) by setting the "no_default_policy" flag to true. [GH-732] +* core: Bump Go version to 1.17.11. [[GH-go-ver-1104](https://github.com/hashicorp/vault/pull/go-ver-1104)] IMPROVEMENTS: - * api: API client now uses a 60 second timeout instead of indefinite [GH-681] - * api: Implement LookupSelf, RenewSelf, and RevokeSelf functions for auth - tokens [GH-739] - * api: Standardize environment variable reading logic inside the API; the CLI - now uses this but can still override via command-line parameters [GH-618] - * audit: HMAC-SHA256'd client tokens are now stored with each request entry. - Previously they were only displayed at creation time; this allows much - better traceability of client actions. [GH-713] - * audit: There is now a `sys/audit-hash` endpoint that can be used to generate - an HMAC-SHA256'd value from provided data using the given audit backend's - salt [GH-784] - * core: The physical storage read cache can now be disabled via - "disable_cache" [GH-674] - * core: The unsealing process can now be reset midway through (this feature - was documented before, but not enabled) [GH-695] - * core: Tokens can now renew themselves [GH-455] - * core: Base64-encoded PGP keys can be used with the CLI for `init` and - `rekey` operations [GH-653] - * core: Print version on startup [GH-765] - * core: Access to `sys/policy` and `sys/mounts` now uses the normal ACL system - instead of requiring a root token [GH-769] - * credential/token: Display whether or not a token is an orphan in the output - of a lookup call [GH-766] - * logical: Allow `.` in path-based variables in many more locations [GH-244] - * logical: Responses now contain a "warnings" key containing a list of - warnings returned from the server. These are conditions that did not require - failing an operation, but of which the client should be aware. [GH-676] - * physical/(consul,etcd): Consul and etcd now use a connection pool to limit - the number of outstanding operations, improving behavior when a lot of - operations must happen at once [GH-677] [GH-780] - * physical/consul: The `datacenter` parameter was removed; It could not be - effective unless the Vault node (or the Consul node it was connecting to) - was in the datacenter specified, in which case it wasn't needed [GH-816] - * physical/etcd: Support TLS-encrypted connections and use a connection pool - to limit the number of outstanding operations [GH-780] - * physical/s3: The S3 endpoint can now be configured, allowing using - S3-API-compatible storage solutions [GH-750] - * physical/s3: The S3 bucket can now be configured with the `AWS_S3_BUCKET` - environment variable [GH-758] - * secret/consul: Management tokens can now be created [GH-714] - -BUG FIXES: - - * api: API client now checks for a 301 response for redirects. Vault doesn't - generate these, but in certain conditions Go's internal HTTP handler can - generate them, leading to client errors. - * cli: `token-create` now supports the `ttl` parameter in addition to the - deprecated `lease` parameter. [GH-688] - * core: Return data from `generic` backends on the last use of a limited-use - token [GH-615] - * core: Fix upgrade path for leases created in `generic` prior to 0.3 [GH-673] - * core: Stale leader entries will now be reaped [GH-679] - * core: Using `mount-tune` on the auth/token path did not take effect. - [GH-688] - * core: Fix a potential race condition when (un)sealing the vault with metrics - enabled [GH-694] - * core: Fix an error that could happen in some failure scenarios where Vault - could fail to revert to a clean state [GH-733] - * core: Ensure secondary indexes are removed when a lease is expired [GH-749] - * core: Ensure rollback manager uses an up-to-date mounts table [GH-771] - * everywhere: Don't use http.DefaultClient, as it shares state implicitly and - is a source of hard-to-track-down bugs [GH-700] - * credential/token: Allow creating orphan tokens via an API path [GH-748] - * secret/generic: Validate given duration at write time, not just read time; - if stored durations are not parseable, return a warning and the default - duration rather than an error [GH-718] - * secret/generic: Return 400 instead of 500 when `generic` backend is written - to with no data fields [GH-825] - * secret/postgresql: Revoke permissions before dropping a user or revocation - may fail [GH-699] - -MISC: - - * Various documentation fixes and improvements [GH-685] [GH-688] [GH-697] - [GH-710] [GH-715] [GH-831] - -## 0.3.1 (October 6, 2015) - -SECURITY: - - * core: In certain failure scenarios, the full values of requests and - responses would be logged [GH-665] - -FEATURES: - - * **Settable Maximum Open Connections**: The `mysql` and `postgresql` backends - now allow setting the number of maximum open connections to the database, - which was previously capped to 2. [GH-661] - * **Renewable Tokens for GitHub**: The `github` backend now supports - specifying a TTL, enabling renewable tokens. [GH-664] +* api/monitor: Add log_format option to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] +* auth: Globally scoped Login MFA method Get/List endpoints [[GH-15248](https://github.com/hashicorp/vault/pull/15248)] +* auth: forward cached MFA auth response to the leader using RPC instead of forwarding all login requests [[GH-15469](https://github.com/hashicorp/vault/pull/15469)] +* cli/debug: added support for retrieving metrics from DR clusters if `unauthenticated_metrics_access` is enabled [[GH-15316](https://github.com/hashicorp/vault/pull/15316)] +* command/debug: Add log_format flag to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] +* core: Fix some identity data races found by Go race detector (no known impact yet). [[GH-15123](https://github.com/hashicorp/vault/pull/15123)] +* storage/raft: Use larger timeouts at startup to reduce likelihood of inducing elections. [[GH-15042](https://github.com/hashicorp/vault/pull/15042)] +* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] BUG FIXES: - * dist: linux-amd64 distribution was dynamically linked [GH-656] - * credential/github: Fix acceptance tests [GH-651] - -MISC: - - * Various minor documentation fixes and improvements [GH-649] [GH-650] - [GH-654] [GH-663] - -## 0.3.0 (September 28, 2015) - -DEPRECATIONS/CHANGES: - -Note: deprecations and breaking changes in upcoming releases are announced -ahead of time on the "vault-tool" mailing list. - - * **Cookie Authentication Removed**: As of 0.3 the only way to authenticate is - via the X-Vault-Token header. Cookie authentication was hard to properly - test, could result in browsers/tools/applications saving tokens in plaintext - on disk, and other issues. [GH-564] - * **Terminology/Field Names**: Vault is transitioning from overloading the - term "lease" to mean both "a set of metadata" and "the amount of time the - metadata is valid". The latter is now being referred to as TTL (or - "lease_duration" for backwards-compatibility); some parts of Vault have - already switched to using "ttl" and others will follow in upcoming releases. - In particular, the "token", "generic", and "pki" backends accept both "ttl" - and "lease" but in 0.4 only "ttl" will be accepted. [GH-528] - * **Downgrade Not Supported**: Due to enhancements in the storage subsystem, - values written by Vault 0.3+ will not be able to be read by prior versions - of Vault. There are no expected upgrade issues, however, as with all - critical infrastructure it is recommended to back up Vault's physical - storage before upgrading. - -FEATURES: - - * **SSH Backend**: Vault can now be used to delegate SSH access to machines, - via a (recommended) One-Time Password approach or by issuing dynamic keys. - [GH-385] - * **Cubbyhole Backend**: This backend works similarly to the "generic" backend - but provides a per-token workspace. This enables some additional - authentication workflows (especially for containers) and can be useful to - applications to e.g. store local credentials while being restarted or - upgraded, rather than persisting to disk. [GH-612] - * **Transit Backend Improvements**: The transit backend now allows key - rotation and datakey generation. For rotation, data encrypted with previous - versions of the keys can still be decrypted, down to a (configurable) - minimum previous version; there is a rewrap function for manual upgrades of - ciphertext to newer versions. Additionally, the backend now allows - generating and returning high-entropy keys of a configurable bitsize - suitable for AES and other functions; this is returned wrapped by a named - key, or optionally both wrapped and plaintext for immediate use. [GH-626] - * **Global and Per-Mount Default/Max TTL Support**: You can now set the - default and maximum Time To Live for leases both globally and per-mount. - Per-mount settings override global settings. Not all backends honor these - settings yet, but the maximum is a hard limit enforced outside the backend. - See the documentation for "/sys/mounts/" for details on configuring - per-mount TTLs. [GH-469] - * **PGP Encryption for Unseal Keys**: When initializing or rotating Vault's - master key, PGP/GPG public keys can now be provided. The output keys will be - encrypted with the given keys, in order. [GH-570] - * **Duo Multifactor Authentication Support**: Backends that support MFA can - now use Duo as the mechanism. [GH-464] - * **Performance Improvements**: Users of the "generic" backend will see a - significant performance improvement as the backend no longer creates leases, - although it does return TTLs (global/mount default, or set per-item) as - before. [GH-631] - * **Codebase Audit**: Vault's codebase was audited by iSEC. (The terms of the - audit contract do not allow us to make the results public.) [GH-220] +* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] +* auth/kubernetes: Fix error code when using the wrong service account [[GH-15585](https://github.com/hashicorp/vault/pull/15585)] +* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set +has been fixed. The previous behavior would make a request to the LDAP server to +get `user_attr` before discarding it and using the username instead. This would +make it impossible for a user to connect if this attribute was missing or had +multiple values, even though it would not be used anyway. This has been fixed +and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] +* auth: Fixed erroneous success message when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Fixed erroneous token information being displayed when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Fixed two-phase MFA information missing from table format when using vault login [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Prevent deleting a valid MFA method ID using the endpoint for a different MFA method type [[GH-15482](https://github.com/hashicorp/vault/pull/15482)] +* core (enterprise): Fix overcounting of lease count quota usage at startup. +* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] +* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] +* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] +* mfa/okta: disable client side rate limiting causing delays in push notifications [[GH-15369](https://github.com/hashicorp/vault/pull/15369)] +* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. +* transform (enterprise): Fix non-overridable column default value causing tokenization tokens to expire prematurely when using the MySQL storage backend. +* ui: Fix inconsistent behavior in client count calendar widget [[GH-15789](https://github.com/hashicorp/vault/pull/15789)] +* ui: Fixed client count timezone for start and end months [[GH-15167](https://github.com/hashicorp/vault/pull/15167)] +* ui: fix firefox inability to recognize file format of client count csv export [[GH-15364](https://github.com/hashicorp/vault/pull/15364)] -IMPROVEMENTS: +## 1.10.3 +### May 11, 2022 - * audit: Log entries now contain a time field [GH-495] - * audit: Obfuscated audit entries now use hmac-sha256 instead of sha1 [GH-627] - * backends: Add ability for a cleanup function to be called on backend unmount - [GH-608] - * config: Allow specifying minimum acceptable TLS version [GH-447] - * core: If trying to mount in a location that is already mounted, be more - helpful about the error [GH-510] - * core: Be more explicit on failure if the issue is invalid JSON [GH-553] - * core: Tokens can now revoke themselves [GH-620] - * credential/app-id: Give a more specific error when sending a duplicate POST - to sys/auth/app-id [GH-392] - * credential/github: Support custom API endpoints (e.g. for Github Enterprise) - [GH-572] - * credential/ldap: Add per-user policies and option to login with - userPrincipalName [GH-420] - * credential/token: Allow root tokens to specify the ID of a token being - created from CLI [GH-502] - * credential/userpass: Enable renewals for login tokens [GH-623] - * scripts: Use /usr/bin/env to find Bash instead of hardcoding [GH-446] - * scripts: Use godep for build scripts to use same environment as tests - [GH-404] - * secret/mysql: Allow reading configuration data [GH-529] - * secret/pki: Split "allow_any_name" logic to that and "enforce_hostnames", to - allow for non-hostname values (e.g. for client certificates) [GH-555] - * storage/consul: Allow specifying certificates used to talk to Consul - [GH-384] - * storage/mysql: Allow SSL encrypted connections [GH-439] - * storage/s3: Allow using temporary security credentials [GH-433] - * telemetry: Put telemetry object in configuration to allow more flexibility - [GH-419] - * testing: Disable mlock for testing of logical backends so as not to require - root [GH-479] +SECURITY: +* auth: A vulnerability was identified in Vault and Vault Enterprise (“Vault”) from 1.10.0 to 1.10.2 where MFA may not be enforced on user logins after a server restart. This vulnerability, CVE-2022-30689, was fixed in Vault 1.10.3. BUG FIXES: - * audit/file: Do not enable auditing if file permissions are invalid [GH-550] - * backends: Allow hyphens in endpoint patterns (fixes AWS and others) [GH-559] - * cli: Fixed missing setup of client TLS certificates if no custom CA was - provided - * cli/read: Do not include a carriage return when using raw field output - [GH-624] - * core: Bad input data could lead to a panic for that session, rather than - returning an error [GH-503] - * core: Allow SHA2-384/SHA2-512 hashed certificates [GH-448] - * core: Do not return a Secret if there are no uses left on a token (since it - will be unable to be used) [GH-615] - * core: Code paths that called lookup-self would decrement num_uses and - potentially immediately revoke a token [GH-552] - * core: Some /sys/ paths would not properly redirect from a standby to the - leader [GH-499] [GH-551] - * credential/aws: Translate spaces in a token's display name to avoid making - IAM unhappy [GH-567] - * credential/github: Integration failed if more than ten organizations or - teams [GH-489] - * credential/token: Tokens with sudo access to "auth/token/create" can now use - root-only options [GH-629] - * secret/cassandra: Work around backwards-incompatible change made in - Cassandra 2.2 preventing Vault from properly setting/revoking leases - [GH-549] - * secret/mysql: Use varbinary instead of varchar to avoid InnoDB/UTF-8 issues - [GH-522] - * secret/postgres: Explicitly set timezone in connections [GH-597] - * storage/etcd: Renew semaphore periodically to prevent leadership flapping - [GH-606] - * storage/zk: Fix collisions in storage that could lead to data unavailability - [GH-411] - -MISC: - - * Various documentation fixes and improvements [GH-412] [GH-474] [GH-476] - [GH-482] [GH-483] [GH-486] [GH-508] [GH-568] [GH-574] [GH-586] [GH-590] - [GH-591] [GH-592] [GH-595] [GH-613] [GH-637] - * Less "armon" in stack traces [GH-453] - * Sourcegraph integration [GH-456] - -## 0.2.0 (July 13, 2015) - -FEATURES: - - * **Key Rotation Support**: The `rotate` command can be used to rotate the - master encryption key used to write data to the storage (physical) backend. - [GH-277] - * **Rekey Support**: Rekey can be used to rotate the master key and change the - configuration of the unseal keys (number of shares, threshold required). - [GH-277] - * **New secret backend: `pki`**: Enable Vault to be a certificate authority - and generate signed TLS certificates. [GH-310] - * **New secret backend: `cassandra`**: Generate dynamic credentials for - Cassandra [GH-363] - * **New storage backend: `etcd`**: store physical data in etcd [GH-259] - [GH-297] - * **New storage backend: `s3`**: store physical data in S3. Does not support - HA. [GH-242] - * **New storage backend: `MySQL`**: store physical data in MySQL. Does not - support HA. [GH-324] - * `transit` secret backend supports derived keys for per-transaction unique - keys [GH-399] - -IMPROVEMENTS: +* auth: load login MFA configuration upon restart [[GH-15261](https://github.com/hashicorp/vault/pull/15261)] +* core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} [[GH-15224](https://github.com/hashicorp/vault/pull/15224)] +* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] - * cli/auth: Enable `cert` method [GH-380] - * cli/auth: read input from stdin [GH-250] - * cli/read: Ability to read a single field from a secret [GH-257] - * cli/write: Adding a force flag when no input required - * core: allow time duration format in place of seconds for some inputs - * core: audit log provides more useful information [GH-360] - * core: graceful shutdown for faster HA failover - * core: **change policy format** to use explicit globbing [GH-400] Any - existing policy in Vault is automatically upgraded to avoid issues. All - policy files must be updated for future writes. Adding the explicit glob - character `*` to the path specification is all that is required. - * core: policy merging to give deny highest precedence [GH-400] - * credential/app-id: Protect against timing attack on app-id - * credential/cert: Record the common name in the metadata [GH-342] - * credential/ldap: Allow TLS verification to be disabled [GH-372] - * credential/ldap: More flexible names allowed [GH-245] [GH-379] [GH-367] - * credential/userpass: Protect against timing attack on password - * credential/userpass: Use bcrypt for password matching - * http: response codes improved to reflect error [GH-366] - * http: the `sys/health` endpoint supports `?standbyok` to return 200 on - standby [GH-389] - * secret/app-id: Support deleting AppID and UserIDs [GH-200] - * secret/consul: Fine grained lease control [GH-261] - * secret/transit: Decouple raw key from key management endpoint [GH-355] - * secret/transit: Upsert named key when encrypt is used [GH-355] - * storage/zk: Support for HA configuration [GH-252] - * storage/zk: Changing node representation. **Backwards incompatible**. - [GH-416] +## 1.10.2 +### April 29, 2022 BUG FIXES: - * audit/file: file removing TLS connection state - * audit/syslog: fix removing TLS connection state - * command/*: commands accepting `k=v` allow blank values - * core: Allow building on FreeBSD [GH-365] - * core: Fixed various panics when audit logging enabled - * core: Lease renewal does not create redundant lease - * core: fixed leases with negative duration [GH-354] - * core: token renewal does not create child token - * core: fixing panic when lease increment is null [GH-408] - * credential/app-id: Salt the paths in storage backend to avoid information - leak - * credential/cert: Fixing client certificate not being requested - * credential/cert: Fixing panic when no certificate match found [GH-361] - * http: Accept PUT as POST for sys/auth - * http: Accept PUT as POST for sys/mounts [GH-349] - * http: Return 503 when sealed [GH-225] - * secret/postgres: Username length is capped to exceeding limit - * server: Do not panic if backend not configured [GH-222] - * server: Explicitly check value of tls_diable [GH-201] - * storage/zk: Fixed issues with version conflicts [GH-190] - -MISC: - - * cli/path-help: renamed from `help` to avoid confusion - -## 0.1.2 (May 11, 2015) +* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] +* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] + +## 1.10.1 +### April 22, 2022 -FEATURES: +CHANGES: - * **New physical backend: `zookeeper`**: store physical data in Zookeeper. - HA not supported yet. - * **New credential backend: `ldap`**: authenticate using LDAP credentials. +* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] +* core: Bump Go version to 1.17.9. [[GH-15044](https://github.com/hashicorp/vault/pull/15044)] IMPROVEMENTS: - * core: Auth backends can store internal data about auth creds - * audit: display name for auth is shown in logs [GH-176] - * command/*: `-insecure` has been renamed to `-tls-skip-verify` [GH-130] - * command/*: `VAULT_TOKEN` overrides local stored auth [GH-162] - * command/server: environment variables are copy-pastable - * credential/app-id: hash of app and user ID are in metadata [GH-176] - * http: HTTP API accepts `X-Vault-Token` as auth header [GH-124] - * logical/*: Generate help output even if no synopsis specified +* agent: Upgrade hashicorp/consul-template version for sprig template functions and improved writeTo function [[GH-15092](https://github.com/hashicorp/vault/pull/15092)] +* auth: enforce a rate limit for TOTP passcode validation attempts [[GH-14864](https://github.com/hashicorp/vault/pull/14864)] +* cli/vault: warn when policy name contains upper-case letter [[GH-14670](https://github.com/hashicorp/vault/pull/14670)] +* cockroachdb: add high-availability support [[GH-12965](https://github.com/hashicorp/vault/pull/12965)] +* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer BUG FIXES: - * core: login endpoints should never return secrets - * core: Internal data should never be returned from core endpoints - * core: defer barrier initialization to as late as possible to avoid error - cases during init that corrupt data (no data loss) - * core: guard against invalid init config earlier - * audit/file: create file if it doesn't exist [GH-148] - * command/*: ignore directories when traversing CA paths [GH-181] - * credential/*: all policy mapping keys are case insensitive [GH-163] - * physical/consul: Fixing path for locking so HA works in every case +* Fixed panic when adding or modifying a Duo MFA Method in Enterprise +* agent: Fix log level mismatch between ERR and ERROR [[GH-14424](https://github.com/hashicorp/vault/pull/14424)] +* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] +* api: Respect increment value in grace period calculations in LifetimeWatcher [[GH-14836](https://github.com/hashicorp/vault/pull/14836)] +* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] +* auth: forward requests subject to login MFA from perfStandby to Active node [[GH-15009](https://github.com/hashicorp/vault/pull/15009)] +* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] +* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] +* core (enterprise): Allow local alias create RPCs to persist alias metadata [[GH-changelog:_2747](https://github.com/hashicorp/vault/pull/changelog:_2747)] +* core/managed-keys (enterprise): Allow PKCS#11 managed keys to use 0 as a slot number +* core/metrics: Fix incorrect table size metric for local mounts [[GH-14755](https://github.com/hashicorp/vault/pull/14755)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] +* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] +* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] +* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] +* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] +* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] +* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] +* raft: Ensure initialMmapSize is set to 0 on Windows [[GH-14977](https://github.com/hashicorp/vault/pull/14977)] +* replication (enterprise): fix panic due to missing entity during invalidation of local aliases. [[GH-14622](https://github.com/hashicorp/vault/pull/14622)] +* secrets/database: Ensure that a `connection_url` password is redacted in all cases. [[GH-14744](https://github.com/hashicorp/vault/pull/14744)] +* secrets/pki: Fix handling of "any" key type with default zero signature bits value. [[GH-14875](https://github.com/hashicorp/vault/pull/14875)] +* secrets/pki: Fixed bug where larger SHA-2 hashes were truncated with shorter ECDSA CA certificates [[GH-14943](https://github.com/hashicorp/vault/pull/14943)] +* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not excepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] +* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] +* ui: Fixes issue logging in with OIDC from a listed auth mounts tab [[GH-14916](https://github.com/hashicorp/vault/pull/14916)] +* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] +* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] -## 0.1.1 (May 2, 2015) +## 1.10.0 +### March 23, 2022 -SECURITY CHANGES: +CHANGES: - * physical/file: create the storge with 0600 permissions [GH-102] - * token/disk: write the token to disk with 0600 perms +* core (enterprise): requests with newly generated tokens to perf standbys which are lagging behind the active node return http 412 instead of 400/403/50x. +* core: Changes the unit of `default_lease_ttl` and `max_lease_ttl` values returned by +the `/sys/config/state/sanitized` endpoint from nanoseconds to seconds. [[GH-14206](https://github.com/hashicorp/vault/pull/14206)] +* core: Bump Go version to 1.17.7. [[GH-14232](https://github.com/hashicorp/vault/pull/14232)] +* plugin/database: The return value from `POST /database/config/:name` has been updated to "204 No Content" [[GH-14033](https://github.com/hashicorp/vault/pull/14033)] +* secrets/azure: Changes the configuration parameter `use_microsoft_graph_api` to use the Microsoft +Graph API by default. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] +* storage/etcd: Remove support for v2. [[GH-14193](https://github.com/hashicorp/vault/pull/14193)] +* ui: Upgrade Ember to version 3.24 [[GH-13443](https://github.com/hashicorp/vault/pull/13443)] -IMPROVEMENTS: +FEATURES: - * core: Very verbose error if mlock fails [GH-59] - * command/*: On error with TLS oversized record, show more human-friendly - error message. [GH-123] - * command/read: `lease_renewable` is now outputted along with the secret to - show whether it is renewable or not - * command/server: Add configuration option to disable mlock - * command/server: Disable mlock for dev mode so it works on more systems +* **Database plugin multiplexing**: manage multiple database connections with a single plugin process [[GH-14033](https://github.com/hashicorp/vault/pull/14033)] +* **Login MFA**: Single and two phase MFA is now available when authenticating to Vault. [[GH-14025](https://github.com/hashicorp/vault/pull/14025)] +* **Mount Migration**: Vault supports moving secrets and auth mounts both within and across namespaces. +* **Postgres in the UI**: Postgres DB is now supported by the UI [[GH-12945](https://github.com/hashicorp/vault/pull/12945)] +* **Report in-flight requests**: Adding a trace capability to show in-flight requests, and a new gauge metric to show the total number of in-flight requests [[GH-13024](https://github.com/hashicorp/vault/pull/13024)] +* **Server Side Consistent Tokens**: Service tokens have been updated to be longer (a minimum of 95 bytes) and token prefixes for all token types are updated from s., b., and r. to hvs., hvb., and hvr. for service, batch, and recovery tokens respectively. Vault clusters with integrated storage will now have read-after-write consistency by default. [[GH-14109](https://github.com/hashicorp/vault/pull/14109)] +* **Transit SHA-3 Support**: Add support for SHA-3 in the Transit backend. [[GH-13367](https://github.com/hashicorp/vault/pull/13367)] +* **Transit Time-Based Key Autorotation**: Add support for automatic, time-based key rotation to transit secrets engine, including in the UI. [[GH-13691](https://github.com/hashicorp/vault/pull/13691)] +* **UI Client Count Improvements**: Restructures client count dashboard, making use of billing start date to improve accuracy. Adds mount-level distribution and filtering. [[GH-client-counts](https://github.com/hashicorp/vault/pull/client-counts)] +* **Agent Telemetry**: The Vault Agent can now collect and return telemetry information at the `/agent/v1/metrics` endpoint. -BUG FIXES: +IMPROVEMENTS: - * core: if token helper isn't absolute, prepend with path to Vault - executable, not "vault" (which requires PATH) [GH-60] - * core: Any "mapping" routes allow hyphens in keys [GH-119] - * core: Validate `advertise_addr` is a valid URL with scheme [GH-106] - * command/auth: Using an invalid token won't crash [GH-75] - * credential/app-id: app and user IDs can have hyphens in keys [GH-119] - * helper/password: import proper DLL for Windows to ask password [GH-83] +* agent: Adds ability to configure specific user-assigned managed identities for Azure auto-auth. [[GH-14214](https://github.com/hashicorp/vault/pull/14214)] +* agent: The `agent/v1/quit` endpoint can now be used to stop the Vault Agent remotely [[GH-14223](https://github.com/hashicorp/vault/pull/14223)] +* api: Allow cloning `api.Client` tokens via `api.Config.CloneToken` or `api.Client.SetCloneToken()`. [[GH-13515](https://github.com/hashicorp/vault/pull/13515)] +* api: Define constants for X-Vault-Forward and X-Vault-Inconsistent headers [[GH-14067](https://github.com/hashicorp/vault/pull/14067)] +* api: Implements Login method in Go client libraries for GCP and Azure auth methods [[GH-13022](https://github.com/hashicorp/vault/pull/13022)] +* api: Implements Login method in Go client libraries for LDAP auth methods [[GH-13841](https://github.com/hashicorp/vault/pull/13841)] +* api: Trim newline character from wrapping token in logical.Unwrap from the api package [[GH-13044](https://github.com/hashicorp/vault/pull/13044)] +* api: add api method for modifying raft autopilot configuration [[GH-12428](https://github.com/hashicorp/vault/pull/12428)] +* api: respect WithWrappingToken() option during AppRole login authentication when used with secret ID specified from environment or from string [[GH-13241](https://github.com/hashicorp/vault/pull/13241)] +* audit: The audit logs now contain the port used by the client [[GH-12790](https://github.com/hashicorp/vault/pull/12790)] +* auth/aws: Enable region detection in the CLI by specifying the region as `auto` [[GH-14051](https://github.com/hashicorp/vault/pull/14051)] +* auth/cert: Add certificate extensions as metadata [[GH-13348](https://github.com/hashicorp/vault/pull/13348)] +* auth/jwt: The Authorization Code flow makes use of the Proof Key for Code Exchange (PKCE) extension. [[GH-13365](https://github.com/hashicorp/vault/pull/13365)] +* auth/kubernetes: Added support for dynamically reloading short-lived tokens for better Kubernetes 1.21+ compatibility [[GH-13595](https://github.com/hashicorp/vault/pull/13595)] +* auth/ldap: Add a response warning and server log whenever the config is accessed +if `userfilter` doesn't consider `userattr` [[GH-14095](https://github.com/hashicorp/vault/pull/14095)] +* auth/ldap: Add username to alias metadata [[GH-13669](https://github.com/hashicorp/vault/pull/13669)] +* auth/ldap: Add username_as_alias configurable to change how aliases are named [[GH-14324](https://github.com/hashicorp/vault/pull/14324)] +* auth/okta: Update [okta-sdk-golang](https://github.com/okta/okta-sdk-golang) dependency to version v2.9.1 for improved request backoff handling [[GH-13439](https://github.com/hashicorp/vault/pull/13439)] +* auth/token: The `auth/token/revoke-accessor` endpoint is now idempotent and will +not error out if the token has already been revoked. [[GH-13661](https://github.com/hashicorp/vault/pull/13661)] +* auth: reading `sys/auth/:path` now returns the configuration for the auth engine mounted at the given path [[GH-12793](https://github.com/hashicorp/vault/pull/12793)] +* cli: interactive CLI for login mfa [[GH-14131](https://github.com/hashicorp/vault/pull/14131)] +* command (enterprise): "vault license get" now uses non-deprecated endpoint /sys/license/status +* core/ha: Add new mechanism for keeping track of peers talking to active node, and new 'operator members' command to view them. [[GH-13292](https://github.com/hashicorp/vault/pull/13292)] +* core/identity: Support updating an alias' `custom_metadata` to be empty. [[GH-13395](https://github.com/hashicorp/vault/pull/13395)] +* core/pki: Support Y10K value in notAfter field to be compliant with IEEE 802.1AR-2018 standard [[GH-12795](https://github.com/hashicorp/vault/pull/12795)] +* core/pki: Support Y10K value in notAfter field when signing non-CA certificates [[GH-13736](https://github.com/hashicorp/vault/pull/13736)] +* core: Add duration and start_time to completed requests log entries [[GH-13682](https://github.com/hashicorp/vault/pull/13682)] +* core: Add support to list password policies at `sys/policies/password` [[GH-12787](https://github.com/hashicorp/vault/pull/12787)] +* core: Add support to list version history via API at `sys/version-history` and via CLI with `vault version-history` [[GH-13766](https://github.com/hashicorp/vault/pull/13766)] +* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] +* core: Periodically test the health of connectivity to auto-seal backends [[GH-13078](https://github.com/hashicorp/vault/pull/13078)] +* core: Reading `sys/mounts/:path` now returns the configuration for the secret engine at the given path [[GH-12792](https://github.com/hashicorp/vault/pull/12792)] +* core: Replace "master key" terminology with "root key" [[GH-13324](https://github.com/hashicorp/vault/pull/13324)] +* core: Small changes to ensure goroutines terminate in tests [[GH-14197](https://github.com/hashicorp/vault/pull/14197)] +* core: Systemd unit file included with the Linux packages now sets the service type to notify. [[GH-14385](https://github.com/hashicorp/vault/pull/14385)] +* core: Update github.com/prometheus/client_golang to fix security vulnerability CVE-2022-21698. [[GH-14190](https://github.com/hashicorp/vault/pull/14190)] +* core: Vault now supports the PROXY protocol v2. Support for UNKNOWN connections +has also been added to the PROXY protocol v1. [[GH-13540](https://github.com/hashicorp/vault/pull/13540)] +* http (enterprise): Serve /sys/license/status endpoint within namespaces +* identity/oidc: Adds a default OIDC provider [[GH-14119](https://github.com/hashicorp/vault/pull/14119)] +* identity/oidc: Adds a default key for OIDC clients [[GH-14119](https://github.com/hashicorp/vault/pull/14119)] +* identity/oidc: Adds an `allow_all` assignment that permits all entities to authenticate via an OIDC client [[GH-14119](https://github.com/hashicorp/vault/pull/14119)] +* identity/oidc: Adds proof key for code exchange (PKCE) support to OIDC providers. [[GH-13917](https://github.com/hashicorp/vault/pull/13917)] +* sdk: Add helper for decoding root tokens [[GH-10505](https://github.com/hashicorp/vault/pull/10505)] +* secrets/azure: Adds support for rotate-root. [#70](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/70) [[GH-13034](https://github.com/hashicorp/vault/pull/13034)] +* secrets/consul: Add support for consul enterprise namespaces and admin partitions. [[GH-13850](https://github.com/hashicorp/vault/pull/13850)] +* secrets/consul: Add support for consul roles. [[GH-14014](https://github.com/hashicorp/vault/pull/14014)] +* secrets/database/influxdb: Switch/upgrade to the `influxdb1-client` module [[GH-12262](https://github.com/hashicorp/vault/pull/12262)] +* secrets/database: Add database configuration parameter 'disable_escaping' for username and password when connecting to a database. [[GH-13414](https://github.com/hashicorp/vault/pull/13414)] +* secrets/kv: add full secret path output to table-formatted responses [[GH-14301](https://github.com/hashicorp/vault/pull/14301)] +* secrets/kv: add patch support for KVv2 key metadata [[GH-13215](https://github.com/hashicorp/vault/pull/13215)] +* secrets/kv: add subkeys endpoint to retrieve a secret's stucture without its values [[GH-13893](https://github.com/hashicorp/vault/pull/13893)] +* secrets/pki: Add ability to fetch individual certificate as DER or PEM [[GH-10948](https://github.com/hashicorp/vault/pull/10948)] +* secrets/pki: Add count and duration metrics to PKI issue and revoke calls. [[GH-13889](https://github.com/hashicorp/vault/pull/13889)] +* secrets/pki: Add error handling for error types other than UserError or InternalError [[GH-14195](https://github.com/hashicorp/vault/pull/14195)] +* secrets/pki: Allow URI SAN templates in allowed_uri_sans when allowed_uri_sans_template is set to true. [[GH-10249](https://github.com/hashicorp/vault/pull/10249)] +* secrets/pki: Allow other_sans in sign-intermediate and sign-verbatim [[GH-13958](https://github.com/hashicorp/vault/pull/13958)] +* secrets/pki: Calculate the Subject Key Identifier as suggested in [RFC 5280, Section 4.2.1.2](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.2). [[GH-11218](https://github.com/hashicorp/vault/pull/11218)] +* secrets/pki: Restrict issuance of wildcard certificates via role parameter (`allow_wildcard_certificates`) [[GH-14238](https://github.com/hashicorp/vault/pull/14238)] +* secrets/pki: Return complete chain (in `ca_chain` field) on calls to `pki/cert/ca_chain` [[GH-13935](https://github.com/hashicorp/vault/pull/13935)] +* secrets/pki: Use application/pem-certificate-chain for PEM certificates, application/x-pem-file for PEM CRLs [[GH-13927](https://github.com/hashicorp/vault/pull/13927)] +* secrets/pki: select appropriate signature algorithm for ECDSA signature on certificates. [[GH-11216](https://github.com/hashicorp/vault/pull/11216)] +* secrets/ssh: Add support for generating non-RSA SSH CAs [[GH-14008](https://github.com/hashicorp/vault/pull/14008)] +* secrets/ssh: Allow specifying multiple approved key lengths for a single algorithm [[GH-13991](https://github.com/hashicorp/vault/pull/13991)] +* secrets/ssh: Use secure default for algorithm signer (rsa-sha2-256) with RSA SSH CA keys on new roles [[GH-14006](https://github.com/hashicorp/vault/pull/14006)] +* secrets/transit: Don't abort transit encrypt or decrypt batches on single item failure. [[GH-13111](https://github.com/hashicorp/vault/pull/13111)] +* storage/aerospike: Upgrade `aerospike-client-go` to v5.6.0. [[GH-12165](https://github.com/hashicorp/vault/pull/12165)] +* storage/raft: Set InitialMmapSize to 100GB on 64bit architectures [[GH-13178](https://github.com/hashicorp/vault/pull/13178)] +* storage/raft: When using retry_join stanzas, join against all of them in parallel. [[GH-13606](https://github.com/hashicorp/vault/pull/13606)] +* sys/raw: Enhance sys/raw to read and write values that cannot be encoded in json. [[GH-13537](https://github.com/hashicorp/vault/pull/13537)] +* ui: Add support for ECDSA and Ed25519 certificate views [[GH-13894](https://github.com/hashicorp/vault/pull/13894)] +* ui: Add version diff view for KV V2 [[GH-13000](https://github.com/hashicorp/vault/pull/13000)] +* ui: Added client side paging for namespace list view [[GH-13195](https://github.com/hashicorp/vault/pull/13195)] +* ui: Adds flight icons to UI [[GH-12976](https://github.com/hashicorp/vault/pull/12976)] +* ui: Adds multi-factor authentication support [[GH-14049](https://github.com/hashicorp/vault/pull/14049)] +* ui: Allow static role credential rotation in Database secrets engines [[GH-14268](https://github.com/hashicorp/vault/pull/14268)] +* ui: Display badge for all versions in secrets engine header [[GH-13015](https://github.com/hashicorp/vault/pull/13015)] +* ui: Swap browser localStorage in favor of sessionStorage [[GH-14054](https://github.com/hashicorp/vault/pull/14054)] +* ui: The integrated web terminal now accepts both `-f` and `--force` as aliases +for `-force` for the `write` command. [[GH-13683](https://github.com/hashicorp/vault/pull/13683)] +* ui: Transform advanced templating with encode/decode format support [[GH-13908](https://github.com/hashicorp/vault/pull/13908)] +* ui: Updates ember blueprints to glimmer components [[GH-13149](https://github.com/hashicorp/vault/pull/13149)] +* ui: customizes empty state messages for transit and transform [[GH-13090](https://github.com/hashicorp/vault/pull/13090)] -## 0.1.0 (April 28, 2015) +BUG FIXES: - * Initial release +* Fixed bug where auth method only considers system-identity when multiple identities are available. [#50](https://github.com/hashicorp/vault-plugin-auth-azure/pull/50) [[GH-14138](https://github.com/hashicorp/vault/pull/14138)] +* activity log (enterprise): allow partial monthly client count to be accessed from namespaces [[GH-13086](https://github.com/hashicorp/vault/pull/13086)] +* agent: Fixes bug where vault agent is unaware of the namespace in the config when wrapping token +* api/client: Fixes an issue where the `replicateStateStore` was being set to `nil` upon consecutive calls to `client.SetReadYourWrites(true)`. [[GH-13486](https://github.com/hashicorp/vault/pull/13486)] +* auth/approle: Fix regression where unset cidrlist is returned as nil instead of zero-length array. [[GH-13235](https://github.com/hashicorp/vault/pull/13235)] +* auth/approle: Fix wrapping of nil errors in `login` endpoint [[GH-14107](https://github.com/hashicorp/vault/pull/14107)] +* auth/github: Use the Organization ID instead of the Organization name to verify the org membership. [[GH-13332](https://github.com/hashicorp/vault/pull/13332)] +* auth/kubernetes: Properly handle the migration of role storage entries containing an empty `alias_name_source` [[GH-13925](https://github.com/hashicorp/vault/pull/13925)] +* auth/kubernetes: ensure valid entity alias names created for projected volume tokens [[GH-14144](https://github.com/hashicorp/vault/pull/14144)] +* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13492](https://github.com/hashicorp/vault/pull/13492)] +* cli: Fix using kv patch with older server versions that don't support HTTP PATCH. [[GH-13615](https://github.com/hashicorp/vault/pull/13615)] +* core (enterprise): Fix a data race in logshipper. +* core (enterprise): Workaround AWS CloudHSM v5 SDK issue not allowing read-only sessions +* core/api: Fix overwriting of request headers when using JSONMergePatch. [[GH-14222](https://github.com/hashicorp/vault/pull/14222)] +* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13093](https://github.com/hashicorp/vault/pull/13093)] +* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13476](https://github.com/hashicorp/vault/pull/13476)] +* core/token: Fix null token panic from 'v1/auth/token/' endpoints and return proper error response. [[GH-13233](https://github.com/hashicorp/vault/pull/13233)] +* core/token: Fix null token_type panic resulting from 'v1/auth/token/roles/{role_name}' endpoint [[GH-13236](https://github.com/hashicorp/vault/pull/13236)] +* core: Fix warnings logged on perf standbys re stored versions [[GH-13042](https://github.com/hashicorp/vault/pull/13042)] +* core: `-output-curl-string` now properly sets cURL options for client and CA +certificates. [[GH-13660](https://github.com/hashicorp/vault/pull/13660)] +* core: add support for go-sockaddr templates in the top-level cluster_addr field [[GH-13678](https://github.com/hashicorp/vault/pull/13678)] +* core: authentication to "login" endpoint for non-existent mount path returns permission denied with status code 403 [[GH-13162](https://github.com/hashicorp/vault/pull/13162)] +* core: revert some unintentionally downgraded dependencies from 1.9.0-rc1 [[GH-13168](https://github.com/hashicorp/vault/pull/13168)] +* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes +* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node +* http:Fix /sys/monitor endpoint returning streaming not supported [[GH-13200](https://github.com/hashicorp/vault/pull/13200)] +* identity/oidc: Adds support for port-agnostic validation of loopback IP redirect URIs. [[GH-13871](https://github.com/hashicorp/vault/pull/13871)] +* identity/oidc: Check for a nil signing key on rotation to prevent panics. [[GH-13716](https://github.com/hashicorp/vault/pull/13716)] +* identity/oidc: Fixes inherited group membership when evaluating client assignments [[GH-14013](https://github.com/hashicorp/vault/pull/14013)] +* identity/oidc: Fixes potential write to readonly storage on performance secondary clusters during key rotation [[GH-14426](https://github.com/hashicorp/vault/pull/14426)] +* identity/oidc: Make the `nonce` parameter optional for the Authorization Endpoint of OIDC providers. [[GH-13231](https://github.com/hashicorp/vault/pull/13231)] +* identity/token: Fixes a bug where duplicate public keys could appear in the .well-known JWKS [[GH-14543](https://github.com/hashicorp/vault/pull/14543)] +* identity: Fix possible nil pointer dereference. [[GH-13318](https://github.com/hashicorp/vault/pull/13318)] +* identity: Fix regression preventing startup when aliases were created pre-1.9. [[GH-13169](https://github.com/hashicorp/vault/pull/13169)] +* identity: Fixes a panic in the OIDC key rotation due to a missing nil check. [[GH-13298](https://github.com/hashicorp/vault/pull/13298)] +* kmip (enterprise): Fix locate by name operations fail to find key after a rekey operation. +* licensing (enterprise): Revert accidental inclusion of the TDE feature from the `prem` build. +* metrics/autosnapshots (enterprise) : Fix bug that could cause +vault.autosnapshots.save.errors to not be incremented when there is an +autosnapshot save error. +* physical/mysql: Create table with wider `vault_key` column when initializing database tables. [[GH-14231](https://github.com/hashicorp/vault/pull/14231)] +* plugin/couchbase: Fix an issue in which the locking patterns did not allow parallel requests. [[GH-13033](https://github.com/hashicorp/vault/pull/13033)] +* replication (enterprise): When using encrypted secondary tokens, only clear the +private key after a successful connection to the primary cluster +* sdk/framework: Generate proper OpenAPI specs for path patterns that use an alternation as the root. [[GH-13487](https://github.com/hashicorp/vault/pull/13487)] +* sdk/helper/ldaputil: properly escape a trailing escape character to prevent panics. [[GH-13452](https://github.com/hashicorp/vault/pull/13452)] +* sdk/queue: move lock before length check to prevent panics. [[GH-13146](https://github.com/hashicorp/vault/pull/13146)] +* sdk: Fixes OpenAPI to distinguish between paths that can do only List, or both List and Read. [[GH-13643](https://github.com/hashicorp/vault/pull/13643)] +* secrets/azure: Fixed bug where Azure environment did not change Graph URL [[GH-13973](https://github.com/hashicorp/vault/pull/13973)] +* secrets/azure: Fixes service principal generation when assigning roles that have [DataActions](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-definitions#dataactions). [[GH-13277](https://github.com/hashicorp/vault/pull/13277)] +* secrets/azure: Fixes the [rotate root](https://www.vaultproject.io/api-docs/secret/azure#rotate-root) +operation for upgraded configurations with a `root_password_ttl` of zero. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] +* secrets/database/cassandra: change connect_timeout to 5s as documentation says [[GH-12443](https://github.com/hashicorp/vault/pull/12443)] +* secrets/database/mssql: Accept a boolean for `contained_db`, rather than just a string. [[GH-13469](https://github.com/hashicorp/vault/pull/13469)] +* secrets/gcp: Fixed bug where error was not reported for invalid bindings [[GH-13974](https://github.com/hashicorp/vault/pull/13974)] +* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13548](https://github.com/hashicorp/vault/pull/13548)] +* secrets/openldap: Fix panic from nil logger in backend [[GH-14171](https://github.com/hashicorp/vault/pull/14171)] +* secrets/pki: Default value for key_bits changed to 0, enabling key_type=ec key generation with default value [[GH-13080](https://github.com/hashicorp/vault/pull/13080)] +* secrets/pki: Fix issuance of wildcard certificates matching glob patterns [[GH-14235](https://github.com/hashicorp/vault/pull/14235)] +* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-13759](https://github.com/hashicorp/vault/pull/13759)] +* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-2456](https://github.com/hashicorp/vault/pull/2456)] +* secrets/pki: Fixes around NIST P-curve signature hash length, default value for signature_bits changed to 0. [[GH-12872](https://github.com/hashicorp/vault/pull/12872)] +* secrets/pki: Recognize ed25519 when requesting a response in PKCS8 format [[GH-13257](https://github.com/hashicorp/vault/pull/13257)] +* secrets/pki: Skip signature bits validation for ed25519 curve key type [[GH-13254](https://github.com/hashicorp/vault/pull/13254)] +* secrets/transit: Ensure that Vault does not panic for invalid nonce size when we aren't in convergent encryption mode. [[GH-13690](https://github.com/hashicorp/vault/pull/13690)] +* secrets/transit: Return an error if any required parameter is missing. [[GH-14074](https://github.com/hashicorp/vault/pull/14074)] +* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] +* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] +* storage/raft: Fix issues allowing invalid nodes to become leadership candidates. [[GH-13703](https://github.com/hashicorp/vault/pull/13703)] +* storage/raft: Fix regression in 1.9.0-rc1 that changed how time is represented in Raft logs; this prevented using a raft db created pre-1.9. [[GH-13165](https://github.com/hashicorp/vault/pull/13165)] +* storage/raft: On linux, use map_populate for bolt files to improve startup time. [[GH-13573](https://github.com/hashicorp/vault/pull/13573)] +* storage/raft: Units for bolt metrics now given in milliseconds instead of nanoseconds [[GH-13749](https://github.com/hashicorp/vault/pull/13749)] +* ui: Adds pagination to auth methods list view [[GH-13054](https://github.com/hashicorp/vault/pull/13054)] +* ui: Do not show verify connection value on database connection config page [[GH-13152](https://github.com/hashicorp/vault/pull/13152)] +* ui: Fix client count current month data not showing unless monthly history data exists [[GH-13396](https://github.com/hashicorp/vault/pull/13396)] +* ui: Fix default TTL display and set on database role [[GH-14224](https://github.com/hashicorp/vault/pull/14224)] +* ui: Fix incorrect validity message on transit secrets engine [[GH-14233](https://github.com/hashicorp/vault/pull/14233)] +* ui: Fix issue where UI incorrectly handled API errors when mounting backends [[GH-14551](https://github.com/hashicorp/vault/pull/14551)] +* ui: Fix kv engine access bug [[GH-13872](https://github.com/hashicorp/vault/pull/13872)] +* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] +* ui: Fixes caching issue on kv new version create [[GH-14489](https://github.com/hashicorp/vault/pull/14489)] +* ui: Fixes displaying empty masked values in PKI engine [[GH-14400](https://github.com/hashicorp/vault/pull/14400)] +* ui: Fixes horizontal bar chart hover issue when filtering namespaces and mounts [[GH-14493](https://github.com/hashicorp/vault/pull/14493)] +* ui: Fixes issue logging out with wrapped token query parameter [[GH-14329](https://github.com/hashicorp/vault/pull/14329)] +* ui: Fixes issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] +* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] +* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] +* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] +* ui: Fixes issue with SearchSelect component not holding focus [[GH-13590](https://github.com/hashicorp/vault/pull/13590)] +* ui: Fixes issue with automate secret deletion value not displaying initially if set in secret metadata edit view [[GH-13177](https://github.com/hashicorp/vault/pull/13177)] +* ui: Fixes issue with correct auth method not selected when logging out from OIDC or JWT methods [[GH-14545](https://github.com/hashicorp/vault/pull/14545)] +* ui: Fixes issue with placeholder not displaying for automatically deleted secrets when deletion time has passed [[GH-13166](https://github.com/hashicorp/vault/pull/13166)] +* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] +* ui: Fixes long secret key names overlapping masked values [[GH-13032](https://github.com/hashicorp/vault/pull/13032)] +* ui: Fixes node-forge error when parsing EC (elliptical curve) certs [[GH-13238](https://github.com/hashicorp/vault/pull/13238)] +* ui: Redirects to managed namespace if incorrect namespace in URL param [[GH-14422](https://github.com/hashicorp/vault/pull/14422)] +* ui: Removes ability to tune token_type for token auth methods [[GH-12904](https://github.com/hashicorp/vault/pull/12904)] +* ui: trigger token renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] diff --git a/CODEOWNERS b/CODEOWNERS index a95f8a096ba0..d4282db1147e 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -22,25 +22,29 @@ /builtin/logical/postgresql/ @hashicorp/vault-ecosystem /builtin/logical/rabbitmq/ @hashicorp/vault-ecosystem +# Identity Integrations (OIDC, tokens) +/vault/identity_store_oidc* @hashicorp/vault-ecosystem + /plugins/ @hashicorp/vault-ecosystem /vault/plugin_catalog.go @hashicorp/vault-ecosystem -/website/content/ @yhyakuna -/website/content/docs/plugin-portal.mdx @acahn @yhyakuna +/website/content/ @hashicorp/vault-education-approvers +/website/content/docs/plugin-portal.mdx @acahn @hashicorp/vault-education-approvers # Plugin docs -/website/content/docs/plugins/ @fairclothjm @yhyakuna -/website/content/docs/upgrading/plugins.mdx @fairclothjm @yhyakuna +/website/content/docs/plugins/ @hashicorp/vault-ecosystem @hashicorp/vault-education-approvers +/website/content/docs/upgrading/plugins.mdx @hashicorp/vault-ecosystem @hashicorp/vault-education-approvers +/ui/ @hashicorp/vault-ui # UI code related to Vault's JWT/OIDC auth method and OIDC provider. # Changes to these files often require coordination with backend code, # so stewards of the backend code are added below for notification. -/ui/app/components/auth-jwt.js @austingebauer -/ui/app/routes/vault/cluster/oidc-*.js @austingebauer +/ui/app/components/auth-jwt.js @hashicorp/vault-ecosystem +/ui/app/routes/vault/cluster/oidc-*.js @hashicorp/vault-ecosystem # Release config; service account is required for automation tooling. -/.release/ @hashicorp/release-engineering @hashicorp/github-secure-vault-core @hashicorp/quality-team -/.github/workflows/build.yml @hashicorp/release-engineering @hashicorp/github-secure-vault-core @hashicorp/quality-team +/.release/ @hashicorp/github-secure-vault-core @hashicorp/quality-team +/.github/workflows/build.yml @hashicorp/github-secure-vault-core @hashicorp/quality-team # Quality engineering /.github/ @hashicorp/quality-team @@ -48,6 +52,7 @@ # Cryptosec /builtin/logical/pki/ @hashicorp/vault-crypto +/builtin/logical/pkiext/ @hashicorp/vault-crypto /website/content/docs/secrets/pki/ @hashicorp/vault-crypto /website/content/api-docs/secret/pki.mdx @hashicorp/vault-crypto /builtin/credential/cert/ @hashicorp/vault-crypto @@ -81,3 +86,4 @@ /website/content/docs/secrets/kmip.mdx @hashicorp/vault-crypto /website/content/api-docs/secret/kmip.mdx @hashicorp/vault-crypto /website/content/docs/enterprise/fips/ @hashicorp/vault-crypto +/website/content/docs/platform/k8s @hashicorp/vault-ecosystem diff --git a/Dockerfile b/Dockerfile index 77a25184f242..4bdf1b0ce5b8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,8 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 ## DOCKERHUB DOCKERFILE ## -FROM alpine:3.15 as default +FROM alpine:3 AS default ARG BIN_NAME # NAME and PRODUCT_VERSION are the name of the software in releases.hashicorp.com @@ -24,7 +24,8 @@ LABEL name="Vault" \ summary="Vault is a tool for securely accessing secrets." \ description="Vault is a tool for securely accessing secrets. A secret is anything that you want to tightly control access to, such as API keys, passwords, certificates, and more. Vault provides a unified interface to any secret, while providing tight access control and recording a detailed audit log." -COPY LICENSE /licenses/mozilla.txt +# Copy the license file as per Legal requirement +COPY LICENSE /usr/share/doc/$NAME/LICENSE.txt # Set ARGs as ENV so that they can be used in ENTRYPOINT/CMD ENV NAME=$NAME @@ -33,7 +34,11 @@ ENV VERSION=$VERSION # Create a non-root user to run the software. RUN addgroup ${NAME} && adduser -S -G ${NAME} ${NAME} -RUN apk add --no-cache libcap su-exec dumb-init tzdata +RUN apk add --no-cache libcap su-exec dumb-init tzdata curl && \ + mkdir -p /usr/share/doc/vault && \ + curl -o /usr/share/doc/vault/EULA.txt https://eula.hashicorp.com/EULA.txt && \ + curl -o /usr/share/doc/vault/TermsOfEvaluation.txt https://eula.hashicorp.com/TermsOfEvaluation.txt && \ + apk del curl COPY dist/$TARGETOS/$TARGETARCH/$BIN_NAME /bin/ @@ -62,7 +67,7 @@ EXPOSE 8200 # The entry point script uses dumb-init as the top-level process to reap any # zombie processes created by Vault sub-processes. # -# For production derivatives of this container, you shoud add the IPC_LOCK +# For production derivatives of this container, you should add the IPC_LOCK # capability so that Vault can mlock memory. COPY .release/docker/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh ENTRYPOINT ["docker-entrypoint.sh"] @@ -74,11 +79,12 @@ CMD ["server", "-dev"] ## UBI DOCKERFILE ## -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.7 as ubi +FROM registry.access.redhat.com/ubi8/ubi-minimal AS ubi ARG BIN_NAME -# PRODUCT_VERSION is the version built dist/$TARGETOS/$TARGETARCH/$BIN_NAME, -# which we COPY in later. Example: PRODUCT_VERSION=1.2.3. +# NAME and PRODUCT_VERSION are the name of the software in releases.hashicorp.com +# and the version to download. Example: NAME=vault PRODUCT_VERSION=1.2.3. +ARG NAME=vault ARG PRODUCT_VERSION ARG PRODUCT_REVISION # TARGETARCH and TARGETOS are set automatically when --platform is provided. @@ -95,17 +101,21 @@ LABEL name="Vault" \ summary="Vault is a tool for securely accessing secrets." \ description="Vault is a tool for securely accessing secrets. A secret is anything that you want to tightly control access to, such as API keys, passwords, certificates, and more. Vault provides a unified interface to any secret, while providing tight access control and recording a detailed audit log." -COPY LICENSE /licenses/mozilla.txt - # Set ARGs as ENV so that they can be used in ENTRYPOINT/CMD ENV NAME=$NAME ENV VERSION=$VERSION +# Copy the license file as per Legal requirement +COPY LICENSE /usr/share/doc/$NAME/LICENSE.txt + +# We must have a copy of the license in this directory to comply with the HasLicense Redhat requirement +COPY LICENSE /licenses/LICENSE.txt + # Set up certificates, our base tools, and Vault. Unlike the other version of # this (https://github.com/hashicorp/docker-vault/blob/master/ubi/Dockerfile), # we copy in the Vault binary from CRT. RUN set -eux; \ - microdnf install -y ca-certificates gnupg openssl libcap tzdata procps shadow-utils util-linux + microdnf install -y ca-certificates gnupg openssl libcap tzdata procps shadow-utils util-linux tar # Create a non-root user to run the software. RUN groupadd --gid 1000 vault && \ @@ -121,7 +131,7 @@ COPY dist/$TARGETOS/$TARGETARCH/$BIN_NAME /bin/ # storage backend, if desired; the server will be started with /vault/config as # the configuration directory so you can add additional config files in that # location. -ENV HOME /home/vault +ENV HOME=/home/vault RUN mkdir -p /vault/logs && \ mkdir -p /vault/file && \ mkdir -p /vault/config && \ @@ -130,6 +140,11 @@ RUN mkdir -p /vault/logs && \ chgrp -R 0 $HOME && chmod -R g+rwX $HOME && \ chgrp -R 0 /vault && chmod -R g+rwX /vault +# Include EULA and Terms of Eval +RUN mkdir -p /usr/share/doc/vault && \ + curl -o /usr/share/doc/vault/EULA.txt https://eula.hashicorp.com/EULA.txt && \ + curl -o /usr/share/doc/vault/TermsOfEvaluation.txt https://eula.hashicorp.com/TermsOfEvaluation.txt + # Expose the logs directory as a volume since there's potentially long-running # state in there VOLUME /vault/logs @@ -145,7 +160,7 @@ EXPOSE 8200 # The entry point script uses dumb-init as the top-level process to reap any # zombie processes created by Vault sub-processes. # -# For production derivatives of this container, you shoud add the IPC_LOCK +# For production derivatives of this container, you should add the IPC_LOCK # capability so that Vault can mlock memory. COPY .release/docker/ubi-docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh ENTRYPOINT ["docker-entrypoint.sh"] @@ -156,3 +171,9 @@ USER vault # # By default you'll get a single-node development server that stores everything # # in RAM and bootstraps itself. Don't use this configuration for production. CMD ["server", "-dev"] + +FROM ubi AS ubi-fips + +FROM ubi AS ubi-hsm + +FROM ubi AS ubi-hsm-fips diff --git a/LICENSE b/LICENSE index f4f97ee5853a..fbeca00ad74c 100644 --- a/LICENSE +++ b/LICENSE @@ -1,365 +1,92 @@ -Copyright (c) 2015 HashiCorp, Inc. - -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - +License text copyright (c) 2020 MariaDB Corporation Ab, All Rights Reserved. +"Business Source License" is a trademark of MariaDB Corporation Ab. + +Parameters + +Licensor: HashiCorp, Inc. +Licensed Work: Vault Version 1.15.0 or later. The Licensed Work is (c) 2024 + HashiCorp, Inc. +Additional Use Grant: You may make production use of the Licensed Work, provided + Your use does not include offering the Licensed Work to third + parties on a hosted or embedded basis in order to compete with + HashiCorp's paid version(s) of the Licensed Work. For purposes + of this license: + + A "competitive offering" is a Product that is offered to third + parties on a paid basis, including through paid support + arrangements, that significantly overlaps with the capabilities + of HashiCorp's paid version(s) of the Licensed Work. If Your + Product is not a competitive offering when You first make it + generally available, it will not become a competitive offering + later due to HashiCorp releasing a new version of the Licensed + Work with additional capabilities. In addition, Products that + are not provided on a paid basis are not competitive. + + "Product" means software that is offered to end users to manage + in their own environments or offered as a service on a hosted + basis. + + "Embedded" means including the source code or executable code + from the Licensed Work in a competitive offering. "Embedded" + also means packaging the competitive offering in such a way + that the Licensed Work must be accessed or downloaded for the + competitive offering to operate. + + Hosting or using the Licensed Work(s) for internal purposes + within an organization is not considered a competitive + offering. HashiCorp considers your organization to include all + of your affiliates under common control. + + For binding interpretive guidance on using HashiCorp products + under the Business Source License, please visit our FAQ. + (https://www.hashicorp.com/license-faq) +Change Date: Four years from the date the Licensed Work is published. +Change License: MPL 2.0 + +For information about alternative licensing arrangements for the Licensed Work, +please contact licensing@hashicorp.com. + +Notice + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN "AS IS" BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. diff --git a/META.d/_summary.yaml b/META.d/_summary.yaml new file mode 100644 index 000000000000..c1da2a666c77 --- /dev/null +++ b/META.d/_summary.yaml @@ -0,0 +1,11 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +schema: 1.1 +partition: secure +category: product +summary: + owner: team-vault + description: The repositories holding the Vault OSS & ENT codebase + visibility: internal diff --git a/Makefile b/Makefile index 2d2f742644b1..ba499ad7407f 100644 --- a/Makefile +++ b/Makefile @@ -2,21 +2,19 @@ # Be sure to place this BEFORE `include` directives, if any. THIS_FILE := $(lastword $(MAKEFILE_LIST)) -TEST?=$$($(GO_CMD) list ./... | grep -v /vendor/ | grep -v /integ) +MAIN_PACKAGES=$$($(GO_CMD) list ./... | grep -v vendor/ ) +SDK_PACKAGES=$$(cd $(CURDIR)/sdk && $(GO_CMD) list ./... | grep -v vendor/ ) +API_PACKAGES=$$(cd $(CURDIR)/api && $(GO_CMD) list ./... | grep -v vendor/ ) +ALL_PACKAGES=$(MAIN_PACKAGES) $(SDK_PACKAGES) $(API_PACKAGES) +TEST=$$(echo $(ALL_PACKAGES) | grep -v integ/ ) TEST_TIMEOUT?=45m EXTENDED_TEST_TIMEOUT=60m INTEG_TEST_TIMEOUT=120m VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr -EXTERNAL_TOOLS_CI=\ - golang.org/x/tools/cmd/goimports \ - github.com/golangci/revgrep/cmd/revgrep -EXTERNAL_TOOLS=\ - github.com/client9/misspell/cmd/misspell GOFMT_FILES?=$$(find . -name '*.go' | grep -v pb.go | grep -v vendor) SED?=$(shell command -v gsed || command -v sed) GO_VERSION_MIN=$$(cat $(CURDIR)/.go-version) -PROTOC_VERSION_MIN=3.21.12 GO_CMD?=go CGO_ENABLED?=0 ifneq ($(FDB_ENABLED), ) @@ -24,6 +22,12 @@ ifneq ($(FDB_ENABLED), ) BUILD_TAGS+=foundationdb endif +# Set BUILD_MINIMAL to a non-empty value to build a minimal version of Vault with only core features. +BUILD_MINIMAL ?= +ifneq ($(strip $(BUILD_MINIMAL)),) + BUILD_TAGS+=minimal +endif + default: dev # bin generates the releasable binaries for Vault @@ -32,10 +36,13 @@ bin: prep # dev creates binaries for testing Vault locally. These are put # into ./bin/ as well as $GOPATH/bin +dev: BUILD_TAGS+=testonly dev: prep @CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" +dev-ui: BUILD_TAGS+=testonly dev-ui: assetcheck prep @CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS) ui' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" +dev-dynamic: BUILD_TAGS+=testonly dev-dynamic: prep @CGO_ENABLED=1 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" @@ -51,13 +58,16 @@ dev-dynamic-mem: dev-dynamic # Creates a Docker image by adding the compiled linux/amd64 binary found in ./bin. # The resulting image is tagged "vault:dev". +docker-dev: BUILD_TAGS+=testonly docker-dev: prep docker build --build-arg VERSION=$(GO_VERSION_MIN) --build-arg BUILD_TAGS="$(BUILD_TAGS)" -f scripts/docker/Dockerfile -t vault:dev . +docker-dev-ui: BUILD_TAGS+=testonly docker-dev-ui: prep docker build --build-arg VERSION=$(GO_VERSION_MIN) --build-arg BUILD_TAGS="$(BUILD_TAGS)" -f scripts/docker/Dockerfile.ui -t vault:dev-ui . # test runs the unit tests and vets the code +test: BUILD_TAGS+=testonly test: prep @CGO_ENABLED=$(CGO_ENABLED) \ VAULT_ADDR= \ @@ -66,12 +76,14 @@ test: prep VAULT_ACC= \ $(GO_CMD) test -tags='$(BUILD_TAGS)' $(TEST) $(TESTARGS) -timeout=$(TEST_TIMEOUT) -parallel=20 +testcompile: BUILD_TAGS+=testonly testcompile: prep @for pkg in $(TEST) ; do \ $(GO_CMD) test -v -c -tags='$(BUILD_TAGS)' $$pkg -parallel=4 ; \ done # testacc runs acceptance tests +testacc: BUILD_TAGS+=testonly testacc: prep @if [ "$(TEST)" = "./..." ]; then \ echo "ERROR: Set TEST to a specific package"; \ @@ -80,6 +92,7 @@ testacc: prep VAULT_ACC=1 $(GO_CMD) test -tags='$(BUILD_TAGS)' $(TEST) -v $(TESTARGS) -timeout=$(EXTENDED_TEST_TIMEOUT) # testrace runs the race checker +testrace: BUILD_TAGS+=testonly testrace: prep @CGO_ENABLED=1 \ VAULT_ADDR= \ @@ -102,49 +115,72 @@ vet: echo "and fix them if necessary before submitting the code for reviewal."; \ fi -# tools/godoctests/.bin/godoctests builds the custom analyzer to check for godocs for tests -tools/godoctests/.bin/godoctests: - @cd tools/godoctests && $(GO_CMD) build -o .bin/godoctests . - -# vet-godoctests runs godoctests on the test functions. All output gets piped to revgrep -# which will only return an error if a new function is missing a godoc -vet-godoctests: bootstrap tools/godoctests/.bin/godoctests - @$(GO_CMD) vet -vettool=./tools/godoctests/.bin/godoctests $(TEST) 2>&1 | revgrep - -# ci-vet-godoctests runs godoctests on the test functions. All output gets piped to revgrep -# which will only return an error if a new function that is not on main is missing a godoc -ci-vet-godoctests: ci-bootstrap tools/godoctests/.bin/godoctests - @$(GO_CMD) vet -vettool=./tools/godoctests/.bin/godoctests $(TEST) 2>&1 | revgrep origin/main +# deprecations runs staticcheck tool to look for deprecations. Checks entire code to see if it +# has deprecated function, variable, constant or field +deprecations: bootstrap prep + @BUILD_TAGS='$(BUILD_TAGS)' ./scripts/deprecations-checker.sh "" + +# ci-deprecations runs staticcheck tool to look for deprecations. All output gets piped to revgrep +# which will only return an error if changes that is not on main has deprecated function, variable, constant or field +ci-deprecations: prep check-tools-external + @BUILD_TAGS='$(BUILD_TAGS)' ./scripts/deprecations-checker.sh main + +# vet-codechecker runs our custom linters on the test functions. All output gets +# piped to revgrep which will only return an error if new piece of code violates +# the check +vet-codechecker: check-tools-internal + @echo "==> Running go vet with ./tools/codechecker..." + @$(GO_CMD) vet -vettool=$$(which codechecker) -tags=$(BUILD_TAGS) ./... 2>&1 | revgrep + +# vet-codechecker runs our custom linters on the test functions. All output gets +# piped to revgrep which will only return an error if new piece of code that is +# not on main violates the check +ci-vet-codechecker: tools-internal check-tools-external + @echo "==> Running go vet with ./tools/codechecker..." + @$(GO_CMD) vet -vettool=$$(which codechecker) -tags=$(BUILD_TAGS) ./... 2>&1 | revgrep origin/main # lint runs vet plus a number of other checkers, it is more comprehensive, but louder -lint: +lint: check-tools-external @$(GO_CMD) list -f '{{.Dir}}' ./... | grep -v /vendor/ \ | xargs golangci-lint run; if [ $$? -eq 1 ]; then \ echo ""; \ echo "Lint found suspicious constructs. Please check the reported constructs"; \ echo "and fix them if necessary before submitting the code for reviewal."; \ fi + # for ci jobs, runs lint against the changed packages in the commit -ci-lint: +ci-lint: check-tools-external @golangci-lint run --deadline 10m --new-from-rev=HEAD~ +# Lint protobuf files +protolint: prep check-tools-external + @echo "==> Linting protobufs..." + @buf lint + # prep runs `go generate` to build the dynamically generated # source files. -prep: fmtcheck - @sh -c "'$(CURDIR)/scripts/goversioncheck.sh' '$(GO_VERSION_MIN)'" - @$(GO_CMD) generate $($(GO_CMD) list ./... | grep -v /vendor/) +# +# n.b.: prep used to depend on fmtcheck, but since fmtcheck is +# now run as a pre-commit hook (and there's little value in +# making every build run the formatter), we've removed that +# dependency. +prep: check-go-version clean + @echo "==> Running go generate..." + @GOARCH= GOOS= $(GO_CMD) generate $(MAIN_PACKAGES) + @GOARCH= GOOS= cd api && $(GO_CMD) generate $(API_PACKAGES) + @GOARCH= GOOS= cd sdk && $(GO_CMD) generate $(SDK_PACKAGES) + +# Git doesn't allow us to store shared hooks in .git. Instead, we make sure they're up-to-date +# whenever a make target is invoked. +.PHONY: hooks +hooks: @if [ -d .git/hooks ]; then cp .hooks/* .git/hooks/; fi -# bootstrap the build by downloading additional tools needed to build -ci-bootstrap: - @for tool in $(EXTERNAL_TOOLS_CI) ; do \ - echo "Installing/Updating $$tool" ; \ - GO111MODULE=off $(GO_CMD) get -u $$tool; \ - done +-include hooks # Make sure they're always up-to-date -# bootstrap the build by downloading additional tools that may be used by devs -bootstrap: ci-bootstrap - go generate -tags tools tools/tools.go +# bootstrap the build by generating any necessary code and downloading additional tools that may +# be used by devs. +bootstrap: tools prep # Note: if you have plugins in GOPATH you can update all of them via something like: # for i in $(ls | grep vault-plugin-); do cd $i; git remote update; git reset --hard origin/master; dep ensure -update; git add .; git commit; git push; cd ..; done @@ -155,52 +191,34 @@ static-assets-dir: @mkdir -p ./http/web_ui install-ui-dependencies: - @echo "--> Installing JavaScript assets" - @cd ui && yarn --ignore-optional + @echo "==> Installing JavaScript assets" + @cd ui && yarn test-ember: install-ui-dependencies - @echo "--> Running ember tests" + @echo "==> Running ember tests" @cd ui && yarn run test:oss test-ember-enos: install-ui-dependencies - @echo "--> Running ember tests with a real backend" + @echo "==> Running ember tests with a real backend" @cd ui && yarn run test:enos -check-vault-in-path: - @VAULT_BIN=$$(command -v vault) || { echo "vault command not found"; exit 1; }; \ - [ -x "$$VAULT_BIN" ] || { echo "$$VAULT_BIN not executable"; exit 1; }; \ - printf "Using Vault at %s:\n\$$ vault version\n%s\n" "$$VAULT_BIN" "$$(vault version)" - ember-dist: install-ui-dependencies @cd ui && npm rebuild node-sass - @echo "--> Building Ember application" + @echo "==> Building Ember application" @cd ui && yarn run build @rm -rf ui/if-you-need-to-delete-this-open-an-issue-async-disk-cache ember-dist-dev: install-ui-dependencies @cd ui && npm rebuild node-sass - @echo "--> Building Ember application" + @echo "==> Building Ember application" @cd ui && yarn run build:dev static-dist: ember-dist static-dist-dev: ember-dist-dev -proto: bootstrap - @sh -c "'$(CURDIR)/scripts/protocversioncheck.sh' '$(PROTOC_VERSION_MIN)'" - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/*.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/activity/activity_log.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative helper/storagepacker/types.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative helper/forwarding/types.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/logical/*.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative physical/raft/types.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative helper/identity/mfa/types.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative helper/identity/types.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/database/dbplugin/*.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/database/dbplugin/v5/proto/*.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/plugin/pb/*.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/tokens/token.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/helper/pluginutil/*.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/hcp_link/proto/*/*.proto +proto: check-tools-external + @echo "==> Generating Go code from protobufs..." + buf generate # No additional sed expressions should be added to this list. Going forward # we should just use the variable names choosen by protobuf. These are left @@ -212,19 +230,28 @@ proto: bootstrap protoc-go-inject-tag -input=./helper/identity/types.pb.go protoc-go-inject-tag -input=./helper/identity/mfa/types.pb.go -fmtcheck: - @true -#@sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'" +importfmt: check-tools-external + find . -name '*.go' | grep -v pb.go | grep -v vendor | xargs gosimports -w -fmt: - find . -name '*.go' | grep -v pb.go | grep -v vendor | xargs go run mvdan.cc/gofumpt -w +fmt: importfmt + find . -name '*.go' | grep -v pb.go | grep -v vendor | xargs gofumpt -w + +fmtcheck: check-go-fmt + +.PHONY: go-mod-download +go-mod-download: + @$(CURDIR)/scripts/go-helper.sh mod-download + +.PHONY: go-mod-tidy +go-mod-tidy: + @$(CURDIR)/scripts/go-helper.sh mod-tidy + +protofmt: + buf format -w semgrep: semgrep --include '*.go' --exclude 'vendor' -a -f tools/semgrep . -semgrep-ci: - semgrep --error --include '*.go' --exclude 'vendor' -f tools/semgrep/ci . - assetcheck: @echo "==> Checking compiled UI assets..." @sh -c "'$(CURDIR)/scripts/assetcheck.sh'" @@ -233,6 +260,60 @@ spellcheck: @echo "==> Spell checking website..." @misspell -error -source=text website/source +.PHONY check-go-fmt: +check-go-fmt: + @$(CURDIR)/scripts/go-helper.sh check-fmt + +.PHONY check-go-version: +check-go-version: + @$(CURDIR)/scripts/go-helper.sh check-version $(GO_VERSION_MIN) + +.PHONY: check-proto-fmt +check-proto-fmt: + buf format -d --error-format github-actions --exit-code + +.PHONY: check-proto-delta +check-proto-delta: prep + @echo "==> Checking for a delta in proto generated Go files..." + @echo "==> Deleting all *.pg.go files..." + find . -type f -name '*.pb.go' -delete -print0 + @$(MAKE) -f $(THIS_FILE) proto + @if ! git diff --exit-code; then echo "Go protobuf bindings need to be regenerated. Run 'make proto' to fix them." && exit 1; fi + +.PHONY:check-sempgrep +check-sempgrep: check-tools-external + @echo "==> Checking semgrep..." + @semgrep --error --include '*.go' --exclude 'vendor' -f tools/semgrep/ci . + +.PHONY: check-tools +check-tools: + @$(CURDIR)/tools/tools.sh check + +.PHONY: check-tools-external +check-tools-external: + @$(CURDIR)/tools/tools.sh check-external + +.PHONY: check-tools-internal +check-tools-internal: + @$(CURDIR)/tools/tools.sh check-internal + +check-vault-in-path: + @VAULT_BIN=$$(command -v vault) || { echo "vault command not found"; exit 1; }; \ + [ -x "$$VAULT_BIN" ] || { echo "$$VAULT_BIN not executable"; exit 1; }; \ + printf "Using Vault at %s:\n\$$ vault version\n%s\n" "$$VAULT_BIN" "$$(vault version)" + +.PHONY: tools +tools: + @$(CURDIR)/tools/tools.sh install + +.PHONY: tools-external +tools-external: + @$(CURDIR)/tools/tools.sh install-external + +.PHONY: tools-internal +tools-internal: + @$(CURDIR)/tools/tools.sh install-internal + mysql-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/mysql-database-plugin ./plugins/database/mysql/mysql-database-plugin @@ -257,17 +338,6 @@ hana-database-plugin: mongodb-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/mongodb-database-plugin ./plugins/database/mongodb/mongodb-database-plugin -.PHONY: ci-config -ci-config: - @$(MAKE) -C .circleci ci-config -.PHONY: ci-verify -ci-verify: - @$(MAKE) -C .circleci ci-verify - -.PHONY: bin default prep test vet bootstrap ci-bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin influxdb-database-plugin postgresql-database-plugin mssql-database-plugin hana-database-plugin mongodb-database-plugin ember-dist ember-dist-dev static-dist static-dist-dev assetcheck check-vault-in-path packages build build-ci semgrep semgrep-ci vet-godoctests ci-vet-godoctests - -.NOTPARALLEL: ember-dist ember-dist-dev - # These ci targets are used for used for building and testing in Github Actions # workflows and for Enos scenarios. .PHONY: ci-build @@ -282,10 +352,6 @@ ci-build-ui: ci-bundle: @$(CURDIR)/scripts/ci-helper.sh bundle -.PHONY: ci-filter-matrix -ci-filter-matrix: - @$(CURDIR)/scripts/ci-helper.sh matrix-filter-file - .PHONY: ci-get-artifact-basename ci-get-artifact-basename: @$(CURDIR)/scripts/ci-helper.sh artifact-basename @@ -294,46 +360,46 @@ ci-get-artifact-basename: ci-get-date: @$(CURDIR)/scripts/ci-helper.sh date -.PHONY: ci-get-matrix-group-id -ci-get-matrix-group-id: - @$(CURDIR)/scripts/ci-helper.sh matrix-group-id - .PHONY: ci-get-revision ci-get-revision: @$(CURDIR)/scripts/ci-helper.sh revision -.PHONY: ci-get-version -ci-get-version: - @$(CURDIR)/scripts/ci-helper.sh version +.PHONY: ci-get-version-package +ci-get-version-package: + @$(CURDIR)/scripts/ci-helper.sh version-package -.PHONY: ci-get-version-base -ci-get-version-base: - @$(CURDIR)/scripts/ci-helper.sh version-base +.PHONY: ci-install-external-tools +ci-install-external-tools: + @$(CURDIR)/scripts/ci-helper.sh install-external-tools -.PHONY: ci-get-version-major -ci-get-version-major: - @$(CURDIR)/scripts/ci-helper.sh version-major +.PHONY: ci-prepare-ent-legal +ci-prepare-ent-legal: + @$(CURDIR)/scripts/ci-helper.sh prepare-ent-legal -.PHONY: ci-get-version-meta -ci-get-version-meta: - @$(CURDIR)/scripts/ci-helper.sh version-meta +.PHONY: ci-prepare-ce-legal +ci-prepare-ce-legal: + @$(CURDIR)/scripts/ci-helper.sh prepare-ce-legal -.PHONY: ci-get-version-minor -ci-get-version-minor: - @$(CURDIR)/scripts/ci-helper.sh version-minor +.PHONY: ci-update-external-tool-modules +ci-update-external-tool-modules: + @$(CURDIR)/scripts/ci-helper.sh update-external-tool-modules -.PHONY: ci-get-version-package -ci-get-version-package: - @$(CURDIR)/scripts/ci-helper.sh version-package +.PHONY: ci-copywriteheaders +ci-copywriteheaders: + copywrite headers --plan + # Special case for MPL headers in /api, /sdk, and /shamir + cd api && $(CURDIR)/scripts/copywrite-exceptions.sh + cd sdk && $(CURDIR)/scripts/copywrite-exceptions.sh + cd shamir && $(CURDIR)/scripts/copywrite-exceptions.sh -.PHONY: ci-get-version-patch -ci-get-version-patch: - @$(CURDIR)/scripts/ci-helper.sh version-patch +.PHONY: all bin default prep test vet bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin influxdb-database-plugin postgresql-database-plugin mssql-database-plugin hana-database-plugin mongodb-database-plugin ember-dist ember-dist-dev static-dist static-dist-dev assetcheck check-vault-in-path packages build build-ci semgrep semgrep-ci vet-codechecker ci-vet-codechecker clean dev + +.NOTPARALLEL: ember-dist ember-dist-dev -.PHONY: ci-get-version-pre -ci-get-version-pre: - @$(CURDIR)/scripts/ci-helper.sh version-pre +.PHONY: all-packages +all-packages: + @echo $(ALL_PACKAGES) | tr ' ' '\n' -.PHONY: ci-prepare-legal -ci-prepare-legal: - @$(CURDIR)/scripts/ci-helper.sh prepare-legal +.PHONY: clean +clean: + @echo "==> Cleaning..." diff --git a/README.md b/README.md index 7de5fb620cd3..8139fa6d751a 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Vault [![CircleCI](https://circleci.com/gh/hashicorp/vault.svg?style=svg)](https://circleci.com/gh/hashicorp/vault) [![vault enterprise](https://img.shields.io/badge/vault-enterprise-yellow.svg?colorB=7c8797&colorA=000000)](https://www.hashicorp.com/products/vault/?utm_source=github&utm_medium=banner&utm_campaign=github-vault-enterprise) +# Vault [![build](https://github.com/hashicorp/vault/actions/workflows/build.yml/badge.svg)](https://github.com/hashicorp/vault/actions/workflows/build.yml) [![ci](https://github.com/hashicorp/vault/actions/workflows/ci.yml/badge.svg)](https://github.com/hashicorp/vault/actions/workflows/ci.yml) [![vault enterprise](https://img.shields.io/badge/vault-enterprise-yellow.svg?colorB=7c8797&colorA=000000)](https://www.hashicorp.com/products/vault/?utm_source=github&utm_medium=banner&utm_campaign=github-vault-enterprise) ---- @@ -9,9 +9,9 @@ - Website: https://www.vaultproject.io - Announcement list: [Google Groups](https://groups.google.com/group/hashicorp-announce) - Discussion forum: [Discuss](https://discuss.hashicorp.com/c/vault) -- Documentation: [https://www.vaultproject.io/docs/](https://www.vaultproject.io/docs/) -- Tutorials: [HashiCorp's Learn Platform](https://learn.hashicorp.com/vault) -- Certification Exam: [Vault Associate](https://www.hashicorp.com/certification/#hashicorp-certified-vault-associate) +- Documentation: [https://developer.hashicorp.com/vault/docs](https://developer.hashicorp.com/vault/docs) +- Tutorials: [https://developer.hashicorp.com/vault/tutorials](https://developer.hashicorp.com/vault/tutorials) +- Certification Exam: [https://developer.hashicorp.com/certifications/security-automation](https://developer.hashicorp.com/certifications/security-automation) Vault Logo @@ -21,8 +21,7 @@ A modern system requires access to a multitude of secrets: database credentials, The key features of Vault are: -* **Secure Secret Storage**: Arbitrary key/value secrets can be stored - in Vault. Vault encrypts these secrets prior to writing them to persistent +* **Secure Secret Storage**: Vault can store arbitrary key/value pairs. Vault encrypts data before writing it to persistent storage, so gaining access to the raw storage isn't enough to access your secrets. Vault can write to disk, [Consul](https://www.consul.io), and more. @@ -39,8 +38,8 @@ The key features of Vault are: developers to store encrypted data in a location such as a SQL database without having to design their own encryption methods. -* **Leasing and Renewal**: All secrets in Vault have a _lease_ associated - with them. At the end of the lease, Vault will automatically revoke that +* **Leasing and Renewal**: Vault associates a **lease** with each secret. + At the end of the lease, Vault automatically revokes the secret. Clients are able to renew leases via built-in renew APIs. * **Revocation**: Vault has built-in support for secret revocation. Vault @@ -52,7 +51,7 @@ The key features of Vault are: Documentation, Getting Started, and Certification Exams ------------------------------- -Documentation is available on the [Vault website](https://www.vaultproject.io/docs/). +Documentation is available on the [Vault website](https://developer.hashicorp.com/vault/docs). If you're new to Vault and want to get started with security automation, please check out our [Getting Started guides](https://learn.hashicorp.com/collections/vault/getting-started) @@ -73,9 +72,12 @@ If you wish to work on Vault itself or any of its built-in systems, you'll first need [Go](https://www.golang.org) installed on your machine. For local dev first make sure Go is properly installed, including setting up a -[GOPATH](https://golang.org/doc/code.html#GOPATH). Ensure that `$GOPATH/bin` is in -your path as some distributions bundle the old version of build tools. Next, clone this -repository. Vault uses [Go Modules](https://github.com/golang/go/wiki/Modules), +[GOPATH](https://golang.org/doc/code.html#GOPATH), then setting the +[GOBIN](https://pkg.go.dev/cmd/go#hdr-Environment_variables) variable to `$GOPATH/bin`. +Ensure that `$GOPATH/bin` is in your path as some distributions bundle the old version +of build tools. + +Next, clone this repository. Vault uses [Go Modules](https://github.com/golang/go/wiki/Modules), so it is recommended that you clone the repository ***outside*** of the GOPATH. You can then download any required build tools by bootstrapping your environment: @@ -121,6 +123,15 @@ $ make test TEST=./vault ... ``` +### Troubleshooting + +If you encounter an error like `could not read Username for 'https://github.com'` you may need to adjust your git config like so: + +```sh +$ git config --global --add url."git@github.com:".insteadOf "https://github.com/" +``` + + ### Importing Vault This repository publishes two libraries that may be imported by other projects: @@ -136,6 +147,8 @@ is not, and has never been, a supported way to use the Vault project. We aren't likely to fix bugs relating to failure to import `github.com/hashicorp/vault` into your project. +See also the section "Docker-based tests" below. + ### Acceptance Tests Vault has comprehensive [acceptance tests](https://en.wikipedia.org/wiki/Acceptance_testing) @@ -169,3 +182,118 @@ things such as access keys. The test itself should error early and tell you what to set, so it is not documented here. For more information on Vault Enterprise features, visit the [Vault Enterprise site](https://www.hashicorp.com/products/vault/?utm_source=github&utm_medium=referral&utm_campaign=github-vault-enterprise). + +### Docker-based Tests + +We have created an experimental new testing mechanism inspired by NewTestCluster. +An example of how to use it: + +```go +import ( + "testing" + "github.com/hashicorp/vault/sdk/helper/testcluster/docker" +) + +func Test_Something_With_Docker(t *testing.T) { + opts := &docker.DockerClusterOptions{ + ImageRepo: "hashicorp/vault", // or "hashicorp/vault-enterprise" + ImageTag: "latest", + } + cluster := docker.NewTestDockerCluster(t, opts) + defer cluster.Cleanup() + + client := cluster.Nodes()[0].APIClient() + _, err := client.Logical().Read("sys/storage/raft/configuration") + if err != nil { + t.Fatal(err) + } +} +``` + +Or for Enterprise: + +```go +import ( + "testing" + "github.com/hashicorp/vault/sdk/helper/testcluster/docker" +) + +func Test_Something_With_Docker(t *testing.T) { + opts := &docker.DockerClusterOptions{ + ImageRepo: "hashicorp/vault-enterprise", + ImageTag: "latest", + VaultLicense: licenseString, // not a path, the actual license bytes + } + cluster := docker.NewTestDockerCluster(t, opts) + defer cluster.Cleanup() +} +``` + +Here is a more realistic example of how we use it in practice. DefaultOptions uses +`hashicorp/vault`:`latest` as the repo and tag, but it also looks at the environment +variable VAULT_BINARY. If populated, it will copy the local file referenced by +VAULT_BINARY into the container. This is useful when testing local changes. + +Instead of setting the VaultLicense option, you can set the VAULT_LICENSE_CI environment +variable, which is better than committing a license to version control. + +Optionally you can set COMMIT_SHA, which will be appended to the image name we +build as a debugging convenience. + +```go +func Test_Custom_Build_With_Docker(t *testing.T) { + opts := docker.DefaultOptions(t) + cluster := docker.NewTestDockerCluster(t, opts) + defer cluster.Cleanup() +} +``` + +There are a variety of helpers in the `github.com/hashicorp/vault/sdk/helper/testcluster` +package, e.g. these tests below will create a pair of 3-node clusters and link them using +PR or DR replication respectively, and fail if the replication state doesn't become healthy +before the passed context expires. + +Again, as written, these depend on having a Vault Enterprise binary locally and the env +var VAULT_BINARY set to point to it, as well as having VAULT_LICENSE_CI set. + +```go +func TestStandardPerfReplication_Docker(t *testing.T) { + opts := docker.DefaultOptions(t) + r, err := docker.NewReplicationSetDocker(t, opts) + if err != nil { + t.Fatal(err) + } + defer r.Cleanup() + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + err = r.StandardPerfReplication(ctx) + if err != nil { + t.Fatal(err) + } +} + +func TestStandardDRReplication_Docker(t *testing.T) { + opts := docker.DefaultOptions(t) + r, err := docker.NewReplicationSetDocker(t, opts) + if err != nil { + t.Fatal(err) + } + defer r.Cleanup() + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + err = r.StandardDRReplication(ctx) + if err != nil { + t.Fatal(err) + } +} +``` + +Finally, here's an example of running an existing OSS docker test with a custom binary: + +```bash +$ GOOS=linux make dev +$ VAULT_BINARY=$(pwd)/bin/vault go test -run 'TestRaft_Configuration_Docker' ./vault/external_tests/raft/raft_binary +ok github.com/hashicorp/vault/vault/external_tests/raft/raft_binary 20.960s +``` diff --git a/api/.copywrite.hcl b/api/.copywrite.hcl new file mode 100644 index 000000000000..c4b09f33640c --- /dev/null +++ b/api/.copywrite.hcl @@ -0,0 +1,8 @@ +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2024 + + header_ignore = [] +} diff --git a/api/LICENSE b/api/LICENSE new file mode 100644 index 000000000000..f4f97ee5853a --- /dev/null +++ b/api/LICENSE @@ -0,0 +1,365 @@ +Copyright (c) 2015 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/api/README.md b/api/README.md index 7230ce779fe2..d21458c11449 100644 --- a/api/README.md +++ b/api/README.md @@ -4,6 +4,6 @@ Vault API This provides the `github.com/hashicorp/vault/api` package which contains code useful for interacting with a Vault server. For examples of how to use this module, see the [vault-examples](https://github.com/hashicorp/vault-examples) repo. -For a step-by-step walkthrough on using these client libraries, see the [developer quickstart](https://www.vaultproject.io/docs/get-started/developer-qs). +For a step-by-step walkthrough on using these client libraries, see the [developer quickstart](https://developer.hashicorp.com/vault/docs/get-started/developer-qs). [![GoDoc](https://godoc.org/github.com/hashicorp/vault/api?status.png)](https://godoc.org/github.com/hashicorp/vault/api) \ No newline at end of file diff --git a/api/auth/approle/go.mod b/api/auth/approle/go.mod index f321791dbdd5..6196e6942f93 100644 --- a/api/auth/approle/go.mod +++ b/api/auth/approle/go.mod @@ -1,5 +1,28 @@ module github.com/hashicorp/vault/api/auth/approle -go 1.16 +go 1.21 -require github.com/hashicorp/vault/api v1.9.0 +toolchain go1.22.2 + +require github.com/hashicorp/vault/api v1.15.0 + +require ( + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/go-jose/go-jose/v4 v4.0.1 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect + github.com/hashicorp/go-sockaddr v1.0.2 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/ryanuber/go-glob v1.0.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/text v0.15.0 // indirect + golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 // indirect +) diff --git a/api/auth/approle/go.sum b/api/auth/approle/go.sum index 1ac812646c18..84fd13507a68 100644 --- a/api/auth/approle/go.sum +++ b/api/auth/approle/go.sum @@ -1,30 +1,31 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= -github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U= +github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= -github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= -github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= @@ -36,17 +37,14 @@ github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0S github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.9.0 h1:ab7dI6W8DuCY7yCU8blo0UCYl2oHre/dloCmzMWg9w8= -github.com/hashicorp/vault/api v1.9.0/go.mod h1:lloELQP4EyhjnCQhF8agKvWIVTmxbpEJj70b98959sM= +github.com/hashicorp/vault/api v1.15.0 h1:O24FYQCWwhwKnF7CuSqP30S51rTV7vz1iACXE/pj5DA= +github.com/hashicorp/vault/api v1.15.0/go.mod h1:+5YTO09JGn0u+b6ySD/LLVf8WkJCPLAL2Vkmrn2+CM8= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -61,51 +59,21 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/auth/aws/aws.go b/api/auth/aws/aws.go index f2aa9be1d00c..df873828e13c 100644 --- a/api/auth/aws/aws.go +++ b/api/auth/aws/aws.go @@ -56,7 +56,7 @@ const ( // passed as a parameter to the client.Auth().Login method. // // Supported options: WithRole, WithMountPath, WithIAMAuth, WithEC2Auth, -// WithPKCS7Signature, WithIdentitySignature, WithIAMServerIDHeader, WithNonce, WithRegion +// WithPKCS7Signature, WithIdentitySignature, WithRSA2048Signature, WithIAMServerIDHeader, WithNonce, WithRegion func NewAWSAuth(opts ...LoginOption) (*AWSAuth, error) { a := &AWSAuth{ mountPath: defaultMountPath, @@ -241,7 +241,7 @@ func WithIAMAuth() LoginOption { // If this option is not provided, will default to using the PKCS #7 signature. // The signature type used should match the type of the public AWS cert Vault // has been configured with to verify EC2 instance identity. -// https://www.vaultproject.io/api/auth/aws#create-certificate-configuration +// https://developer.hashicorp.com/vault/api-docs/auth/aws#create-certificate-configuration func WithIdentitySignature() LoginOption { return func(a *AWSAuth) error { a.signatureType = identityType @@ -254,7 +254,7 @@ func WithIdentitySignature() LoginOption { // PKCS #7 is the default, but this method is provided for additional clarity. // The signature type used should match the type of the public AWS cert Vault // has been configured with to verify EC2 instance identity. -// https://www.vaultproject.io/api/auth/aws#create-certificate-configuration +// https://developer.hashicorp.com/vault/api-docs/auth/aws#create-certificate-configuration func WithPKCS7Signature() LoginOption { return func(a *AWSAuth) error { a.signatureType = pkcs7Type @@ -262,6 +262,19 @@ func WithPKCS7Signature() LoginOption { } } +// WithRSA2048Signature will explicitly tell the client to send the RSA2048 +// signature to verify EC2 auth logins. Only used by EC2 auth type. +// If this option is not provided, will default to using the PKCS #7 signature. +// The signature type used should match the type of the public AWS cert Vault +// has been configured with to verify EC2 instance identity. +// https://www.vaultproject.io/api/auth/aws#create-certificate-configuration +func WithRSA2048Signature() LoginOption { + return func(a *AWSAuth) error { + a.signatureType = rsa2048Type + return nil + } +} + func WithIAMServerIDHeader(headerValue string) LoginOption { return func(a *AWSAuth) error { a.iamServerIDHeaderValue = headerValue diff --git a/api/auth/aws/go.mod b/api/auth/aws/go.mod index 19e8f0175b7c..9f0a2efdc629 100644 --- a/api/auth/aws/go.mod +++ b/api/auth/aws/go.mod @@ -1,11 +1,40 @@ module github.com/hashicorp/vault/api/auth/aws -go 1.16 +go 1.21 + +toolchain go1.22.2 require ( - github.com/aws/aws-sdk-go v1.30.27 - github.com/hashicorp/go-hclog v0.16.2 + github.com/aws/aws-sdk-go v1.49.22 + github.com/hashicorp/go-hclog v1.6.3 github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 github.com/hashicorp/go-uuid v1.0.2 - github.com/hashicorp/vault/api v1.9.0 + github.com/hashicorp/vault/api v1.15.0 +) + +require ( + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/fatih/color v1.16.0 // indirect + github.com/go-jose/go-jose/v4 v4.0.1 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect + github.com/hashicorp/go-sockaddr v1.0.2 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/ryanuber/go-glob v1.0.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect + golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 // indirect ) diff --git a/api/auth/aws/go.sum b/api/auth/aws/go.sum index 2a723f1eae18..c03cee1af401 100644 --- a/api/auth/aws/go.sum +++ b/api/auth/aws/go.sum @@ -1,34 +1,38 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.30.27 h1:9gPjZWVDSoQrBO2AvqrWObS6KAZByfEJxQoCYo4ZfK0= github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.49.22 h1:r01+cQJ3cORQI1PJxG8af0jzrZpUOL9L+/3kU2x1geU= +github.com/aws/aws-sdk-go v1.49.22/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= -github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U= +github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= -github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 h1:W9WN8p6moV1fjKLkeqEgkAMu5rauy9QeYDAmIaPuuiA= @@ -44,10 +48,13 @@ github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2I github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.9.0 h1:ab7dI6W8DuCY7yCU8blo0UCYl2oHre/dloCmzMWg9w8= -github.com/hashicorp/vault/api v1.9.0/go.mod h1:lloELQP4EyhjnCQhF8agKvWIVTmxbpEJj70b98959sM= -github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= +github.com/hashicorp/vault/api v1.15.0 h1:O24FYQCWwhwKnF7CuSqP30S51rTV7vz1iACXE/pj5DA= +github.com/hashicorp/vault/api v1.15.0/go.mod h1:+5YTO09JGn0u+b6ySD/LLVf8WkJCPLAL2Vkmrn2+CM8= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -55,13 +62,19 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -81,22 +94,16 @@ github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIH github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -104,33 +111,24 @@ golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/auth/azure/azure.go b/api/auth/azure/azure.go index b68219570115..ecca535b43f0 100644 --- a/api/auth/azure/azure.go +++ b/api/auth/azure/azure.go @@ -7,7 +7,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "time" @@ -175,7 +175,7 @@ func (a *AzureAuth) getJWT() (string, error) { } defer resp.Body.Close() - responseBytes, err := ioutil.ReadAll(resp.Body) + responseBytes, err := io.ReadAll(resp.Body) if err != nil { return "", fmt.Errorf("error reading response body from Azure token endpoint: %w", err) } @@ -222,7 +222,7 @@ func getMetadata() (metadataJSON, error) { } defer resp.Body.Close() - responseBytes, err := ioutil.ReadAll(resp.Body) + responseBytes, err := io.ReadAll(resp.Body) if err != nil { return metadataJSON{}, fmt.Errorf("error reading response body from metadata endpoint: %w", err) } diff --git a/api/auth/azure/go.mod b/api/auth/azure/go.mod index 4e5c95f5a7b4..5f45bcb710e1 100644 --- a/api/auth/azure/go.mod +++ b/api/auth/azure/go.mod @@ -1,5 +1,28 @@ module github.com/hashicorp/vault/api/auth/azure -go 1.16 +go 1.21 -require github.com/hashicorp/vault/api v1.9.0 +toolchain go1.22.2 + +require github.com/hashicorp/vault/api v1.15.0 + +require ( + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/go-jose/go-jose/v4 v4.0.1 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect + github.com/hashicorp/go-sockaddr v1.0.2 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/ryanuber/go-glob v1.0.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/text v0.15.0 // indirect + golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 // indirect +) diff --git a/api/auth/azure/go.sum b/api/auth/azure/go.sum index 1ac812646c18..84fd13507a68 100644 --- a/api/auth/azure/go.sum +++ b/api/auth/azure/go.sum @@ -1,30 +1,31 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= -github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U= +github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= -github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= -github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= @@ -36,17 +37,14 @@ github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0S github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.9.0 h1:ab7dI6W8DuCY7yCU8blo0UCYl2oHre/dloCmzMWg9w8= -github.com/hashicorp/vault/api v1.9.0/go.mod h1:lloELQP4EyhjnCQhF8agKvWIVTmxbpEJj70b98959sM= +github.com/hashicorp/vault/api v1.15.0 h1:O24FYQCWwhwKnF7CuSqP30S51rTV7vz1iACXE/pj5DA= +github.com/hashicorp/vault/api v1.15.0/go.mod h1:+5YTO09JGn0u+b6ySD/LLVf8WkJCPLAL2Vkmrn2+CM8= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -61,51 +59,21 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/auth/gcp/gcp.go b/api/auth/gcp/gcp.go index 2d6ef842a4b4..7f4f222c0c72 100644 --- a/api/auth/gcp/gcp.go +++ b/api/auth/gcp/gcp.go @@ -7,7 +7,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "time" @@ -181,7 +181,7 @@ func (a *GCPAuth) getJWTFromMetadataService(vaultAddress string) (string, error) defer resp.Body.Close() // get jwt from response - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) jwt := string(body) if err != nil { return "", fmt.Errorf("error reading response from metadata service: %w", err) diff --git a/api/auth/gcp/go.mod b/api/auth/gcp/go.mod index f2611e35de05..a1f61fa92415 100644 --- a/api/auth/gcp/go.mod +++ b/api/auth/gcp/go.mod @@ -1,10 +1,57 @@ module github.com/hashicorp/vault/api/auth/gcp -go 1.16 +go 1.21 + +toolchain go1.22.2 + +require ( + cloud.google.com/go/compute/metadata v0.3.0 + cloud.google.com/go/iam v1.1.8 + github.com/hashicorp/vault/api v1.15.0 + google.golang.org/genproto v0.0.0-20240604185151-ef581f913117 +) require ( - cloud.google.com/go v0.97.0 - github.com/hashicorp/vault/api v1.9.0 - google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0 - google.golang.org/grpc v1.41.0 // indirect + cloud.google.com/go/auth v0.3.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-jose/go-jose/v4 v4.0.1 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.3 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect + github.com/hashicorp/go-sockaddr v1.0.2 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/ryanuber/go-glob v1.0.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/oauth2 v0.19.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect + golang.org/x/time v0.5.0 // indirect + google.golang.org/api v0.177.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/grpc v1.64.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect ) diff --git a/api/auth/gcp/go.sum b/api/auth/gcp/go.sum index 8584be4a8d21..59fc4eb1228c 100644 --- a/api/auth/gcp/go.sum +++ b/api/auth/gcp/go.sum @@ -1,177 +1,84 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +cloud.google.com/go/auth v0.3.0 h1:PRyzEpGfx/Z9e8+lHsbkoUVXD0gnu4MNmm7Gp8TQNIs= +cloud.google.com/go/auth v0.3.0/go.mod h1:lBv6NKTWp8E3LPzmO1TbiiRKc4drLOfHsgmlH9ogv5w= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= +cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= -github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U= +github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0 h1:6DWmvNpomjL1+3liNSZbVns3zsYzzCjm6pRBO1tLeso= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= +github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= -github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= -github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= @@ -181,29 +88,16 @@ github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9 github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.9.0 h1:ab7dI6W8DuCY7yCU8blo0UCYl2oHre/dloCmzMWg9w8= -github.com/hashicorp/vault/api v1.9.0/go.mod h1:lloELQP4EyhjnCQhF8agKvWIVTmxbpEJj70b98959sM= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/hashicorp/vault/api v1.15.0 h1:O24FYQCWwhwKnF7CuSqP30S51rTV7vz1iACXE/pj5DA= +github.com/hashicorp/vault/api v1.15.0/go.mod h1:+5YTO09JGn0u+b6ySD/LLVf8WkJCPLAL2Vkmrn2+CM8= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -215,395 +109,93 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= +golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0 h1:4t9zuDlHLcIx0ZEhmXEeFVCRsiOgpgn2QOH9N0MNjPI= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.177.0 h1:8a0p/BbPa65GlqGWtUKxot4p0TV8OGOfyTjtmkXNXmk= +google.golang.org/api v0.177.0/go.mod h1:srbhue4MLjkjbkux5p3dw/ocYOSZTaIEvf7bCOnFQDw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0 h1:5Tbluzus3QxoAJx4IefGt1W0HQZW4nuMrVk684jI74Q= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20240604185151-ef581f913117 h1:HCZ6DlkKtCDAtD8ForECsY3tKuaR+p4R3grlK80uCCc= +google.golang.org/genproto v0.0.0-20240604185151-ef581f913117/go.mod h1:lesfX/+9iA+3OdqeCpoDddJaNxVB1AB6tD7EfqMmprc= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -612,28 +204,12 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/api/auth/kubernetes/go.mod b/api/auth/kubernetes/go.mod index f7e418df1b03..f06c5c611ffb 100644 --- a/api/auth/kubernetes/go.mod +++ b/api/auth/kubernetes/go.mod @@ -1,5 +1,28 @@ module github.com/hashicorp/vault/api/auth/kubernetes -go 1.16 +go 1.21 -require github.com/hashicorp/vault/api v1.9.0 +toolchain go1.22.2 + +require github.com/hashicorp/vault/api v1.15.0 + +require ( + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/go-jose/go-jose/v4 v4.0.1 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect + github.com/hashicorp/go-sockaddr v1.0.2 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/ryanuber/go-glob v1.0.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/text v0.15.0 // indirect + golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 // indirect +) diff --git a/api/auth/kubernetes/go.sum b/api/auth/kubernetes/go.sum index 1ac812646c18..84fd13507a68 100644 --- a/api/auth/kubernetes/go.sum +++ b/api/auth/kubernetes/go.sum @@ -1,30 +1,31 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= -github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U= +github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= -github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= -github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= @@ -36,17 +37,14 @@ github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0S github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.9.0 h1:ab7dI6W8DuCY7yCU8blo0UCYl2oHre/dloCmzMWg9w8= -github.com/hashicorp/vault/api v1.9.0/go.mod h1:lloELQP4EyhjnCQhF8agKvWIVTmxbpEJj70b98959sM= +github.com/hashicorp/vault/api v1.15.0 h1:O24FYQCWwhwKnF7CuSqP30S51rTV7vz1iACXE/pj5DA= +github.com/hashicorp/vault/api v1.15.0/go.mod h1:+5YTO09JGn0u+b6ySD/LLVf8WkJCPLAL2Vkmrn2+CM8= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -61,51 +59,21 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/auth/ldap/go.mod b/api/auth/ldap/go.mod index f7413c4d4ce1..ba1fd3f02fb9 100644 --- a/api/auth/ldap/go.mod +++ b/api/auth/ldap/go.mod @@ -1,5 +1,28 @@ module github.com/hashicorp/vault/api/auth/ldap -go 1.16 +go 1.21 -require github.com/hashicorp/vault/api v1.9.0 +toolchain go1.22.2 + +require github.com/hashicorp/vault/api v1.15.0 + +require ( + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/go-jose/go-jose/v4 v4.0.1 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect + github.com/hashicorp/go-sockaddr v1.0.2 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/ryanuber/go-glob v1.0.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/text v0.15.0 // indirect + golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 // indirect +) diff --git a/api/auth/ldap/go.sum b/api/auth/ldap/go.sum index 1ac812646c18..84fd13507a68 100644 --- a/api/auth/ldap/go.sum +++ b/api/auth/ldap/go.sum @@ -1,30 +1,31 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= -github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U= +github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= -github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= -github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= @@ -36,17 +37,14 @@ github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0S github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.9.0 h1:ab7dI6W8DuCY7yCU8blo0UCYl2oHre/dloCmzMWg9w8= -github.com/hashicorp/vault/api v1.9.0/go.mod h1:lloELQP4EyhjnCQhF8agKvWIVTmxbpEJj70b98959sM= +github.com/hashicorp/vault/api v1.15.0 h1:O24FYQCWwhwKnF7CuSqP30S51rTV7vz1iACXE/pj5DA= +github.com/hashicorp/vault/api v1.15.0/go.mod h1:+5YTO09JGn0u+b6ySD/LLVf8WkJCPLAL2Vkmrn2+CM8= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -61,51 +59,21 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/auth/userpass/go.mod b/api/auth/userpass/go.mod index fa4712b05473..83d87488e41c 100644 --- a/api/auth/userpass/go.mod +++ b/api/auth/userpass/go.mod @@ -1,5 +1,28 @@ module github.com/hashicorp/vault/api/auth/userpass -go 1.16 +go 1.21 -require github.com/hashicorp/vault/api v1.9.0 +toolchain go1.22.2 + +require github.com/hashicorp/vault/api v1.15.0 + +require ( + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/go-jose/go-jose/v4 v4.0.1 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect + github.com/hashicorp/go-sockaddr v1.0.2 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/ryanuber/go-glob v1.0.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/text v0.15.0 // indirect + golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 // indirect +) diff --git a/api/auth/userpass/go.sum b/api/auth/userpass/go.sum index 1ac812646c18..84fd13507a68 100644 --- a/api/auth/userpass/go.sum +++ b/api/auth/userpass/go.sum @@ -1,30 +1,31 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= -github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U= +github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= -github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= -github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= @@ -36,17 +37,14 @@ github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0S github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.9.0 h1:ab7dI6W8DuCY7yCU8blo0UCYl2oHre/dloCmzMWg9w8= -github.com/hashicorp/vault/api v1.9.0/go.mod h1:lloELQP4EyhjnCQhF8agKvWIVTmxbpEJj70b98959sM= +github.com/hashicorp/vault/api v1.15.0 h1:O24FYQCWwhwKnF7CuSqP30S51rTV7vz1iACXE/pj5DA= +github.com/hashicorp/vault/api v1.15.0/go.mod h1:+5YTO09JGn0u+b6ySD/LLVf8WkJCPLAL2Vkmrn2+CM8= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -61,51 +59,21 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/cliconfig/config.go b/api/cliconfig/config.go new file mode 100644 index 000000000000..f918f24395f8 --- /dev/null +++ b/api/cliconfig/config.go @@ -0,0 +1,99 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cliconfig + +import ( + "fmt" + "os" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/mitchellh/go-homedir" +) + +const ( + // defaultConfigPath is the default path to the configuration file + defaultConfigPath = "~/.vault" + + // configPathEnv is the environment variable that can be used to + // override where the Vault configuration is. + configPathEnv = "VAULT_CONFIG_PATH" +) + +// Config is the CLI configuration for Vault that can be specified via +// a `$HOME/.vault` file which is HCL-formatted (therefore HCL or JSON). +type defaultConfig struct { + // TokenHelper is the executable/command that is executed for storing + // and retrieving the authentication token for the Vault CLI. If this + // is not specified, then vault's internal token store will be used, which + // stores the token on disk unencrypted. + TokenHelper string `hcl:"token_helper"` +} + +// loadConfig reads the configuration from the given path. If path is +// empty, then the default path will be used, or the environment variable +// if set. +func loadConfig(path string) (*defaultConfig, error) { + if path == "" { + path = defaultConfigPath + } + if v := os.Getenv(configPathEnv); v != "" { + path = v + } + + // NOTE: requires HOME env var to be set + path, err := homedir.Expand(path) + if err != nil { + return nil, fmt.Errorf("error expanding config path %q: %w", path, err) + } + + contents, err := os.ReadFile(path) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + + conf, err := parseConfig(string(contents)) + if err != nil { + return nil, fmt.Errorf("error parsing config file at %q: %w; ensure that the file is valid; Ansible Vault is known to conflict with it", path, err) + } + + return conf, nil +} + +// parseConfig parses the given configuration as a string. +func parseConfig(contents string) (*defaultConfig, error) { + root, err := hcl.Parse(contents) + if err != nil { + return nil, err + } + + // Top-level item should be the object list + list, ok := root.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("failed to parse config; does not contain a root object") + } + + valid := map[string]struct{}{ + "token_helper": {}, + } + + var validationErrors error + for _, item := range list.Items { + key := item.Keys[0].Token.Value().(string) + if _, ok := valid[key]; !ok { + validationErrors = multierror.Append(validationErrors, fmt.Errorf("invalid key %q on line %d", key, item.Assign.Line)) + } + } + + if validationErrors != nil { + return nil, validationErrors + } + + var c defaultConfig + if err := hcl.DecodeObject(&c, list); err != nil { + return nil, err + } + return &c, nil +} diff --git a/api/cliconfig/config_test.go b/api/cliconfig/config_test.go new file mode 100644 index 000000000000..5e2dedeaeb0a --- /dev/null +++ b/api/cliconfig/config_test.go @@ -0,0 +1,50 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cliconfig + +import ( + "path/filepath" + "reflect" + "strings" + "testing" +) + +func TestLoadConfig(t *testing.T) { + config, err := loadConfig(filepath.Join("testdata", "config.hcl")) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &defaultConfig{ + TokenHelper: "foo", + } + if !reflect.DeepEqual(expected, config) { + t.Fatalf("bad: %#v", config) + } +} + +func TestLoadConfig_noExist(t *testing.T) { + config, err := loadConfig("nope/not-once/.never") + if err != nil { + t.Fatal(err) + } + + if config.TokenHelper != "" { + t.Errorf("expected %q to be %q", config.TokenHelper, "") + } +} + +func TestParseConfig_badKeys(t *testing.T) { + _, err := parseConfig(` +token_helper = "/token" +nope = "true" +`) + if err == nil { + t.Fatal("expected error") + } + + if !strings.Contains(err.Error(), `invalid key "nope" on line 3`) { + t.Errorf("bad error: %s", err.Error()) + } +} diff --git a/api/cliconfig/testdata/config.hcl b/api/cliconfig/testdata/config.hcl new file mode 100644 index 000000000000..164acd29cc80 --- /dev/null +++ b/api/cliconfig/testdata/config.hcl @@ -0,0 +1,4 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +token_helper = "foo" diff --git a/api/cliconfig/util.go b/api/cliconfig/util.go new file mode 100644 index 000000000000..e492ccb03857 --- /dev/null +++ b/api/cliconfig/util.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cliconfig + +import ( + "github.com/hashicorp/vault/api/tokenhelper" +) + +// DefaultTokenHelper returns the token helper that is configured for Vault. +// This helper should only be used for non-server CLI commands. +func DefaultTokenHelper() (tokenhelper.TokenHelper, error) { + config, err := loadConfig("") + if err != nil { + return nil, err + } + + path := config.TokenHelper + if path == "" { + return tokenhelper.NewInternalTokenHelper() + } + + path, err = tokenhelper.ExternalTokenHelperPath(path) + if err != nil { + return nil, err + } + return &tokenhelper.ExternalTokenHelper{BinaryPath: path}, nil +} diff --git a/api/client.go b/api/client.go index 7ca12d560bd0..0090321caa7f 100644 --- a/api/client.go +++ b/api/client.go @@ -10,6 +10,7 @@ import ( "crypto/tls" "encoding/base64" "encoding/hex" + "encoding/json" "fmt" "net" "net/http" @@ -41,6 +42,7 @@ const ( EnvVaultClientCert = "VAULT_CLIENT_CERT" EnvVaultClientKey = "VAULT_CLIENT_KEY" EnvVaultClientTimeout = "VAULT_CLIENT_TIMEOUT" + EnvVaultHeaders = "VAULT_HEADERS" EnvVaultSRVLookup = "VAULT_SRV_LOOKUP" EnvVaultSkipVerify = "VAULT_SKIP_VERIFY" EnvVaultNamespace = "VAULT_NAMESPACE" @@ -82,6 +84,8 @@ const ( const ( EnvVaultAgentAddress = "VAULT_AGENT_ADDR" EnvVaultInsecure = "VAULT_SKIP_VERIFY" + + DefaultAddress = "https://127.0.0.1:8200" ) // WrappingLookupFunc is a function that, given an HTTP verb and a path, @@ -185,6 +189,9 @@ type Config struct { // CloneToken from parent. CloneToken bool + // CloneTLSConfig from parent (tls.Config). + CloneTLSConfig bool + // ReadYourWrites ensures isolated read-after-write semantics by // providing discovered cluster replication states in each request. // The shared state is automatically propagated to all Client clones. @@ -203,6 +210,7 @@ type Config struct { // commands such as 'vault operator raft snapshot' as this redirects to the // primary node. DisableRedirects bool + clientTLSConfig *tls.Config } // TLSConfig contains the parameters needed to configure TLS on the HTTP client @@ -244,7 +252,7 @@ type TLSConfig struct { // If an error is encountered, the Error field on the returned *Config will be populated with the specific error. func DefaultConfig() *Config { config := &Config{ - Address: "https://127.0.0.1:8200", + Address: DefaultAddress, HttpClient: cleanhttp.DefaultPooledClient(), Timeout: time.Second * 60, MinRetryWait: time.Millisecond * 1000, @@ -289,7 +297,14 @@ func (c *Config) configureTLS(t *TLSConfig) error { if c.HttpClient == nil { c.HttpClient = DefaultConfig().HttpClient } - clientTLSConfig := c.HttpClient.Transport.(*http.Transport).TLSClientConfig + + transport, ok := c.HttpClient.Transport.(*http.Transport) + if !ok { + return fmt.Errorf( + "unsupported HTTPClient transport type %T", c.HttpClient.Transport) + } + + clientTLSConfig := transport.TLSClientConfig var clientCert tls.Certificate foundClientCert := false @@ -337,10 +352,17 @@ func (c *Config) configureTLS(t *TLSConfig) error { if t.TLSServerName != "" { clientTLSConfig.ServerName = t.TLSServerName } + c.clientTLSConfig = clientTLSConfig return nil } +func (c *Config) TLSConfig() *tls.Config { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + return c.clientTLSConfig.Clone() +} + // ConfigureTLS takes a set of TLS configurations and applies those to the // HTTP client. func (c *Config) ConfigureTLS(t *TLSConfig) error { @@ -510,6 +532,7 @@ func (c *Config) ParseAddress(address string) (*url.URL, error) { return nil, err } + previousAddress := c.Address c.Address = address if strings.HasPrefix(address, "unix://") { @@ -527,12 +550,12 @@ func (c *Config) ParseAddress(address string) (*url.URL, error) { // be pointing to the protocol used in the application layer and not to // the transport layer. Hence, setting the fields accordingly. u.Scheme = "http" - u.Host = socket + u.Host = "localhost" u.Path = "" } else { return nil, fmt.Errorf("attempting to specify unix:// address with non-transport transport") } - } else if strings.HasPrefix(c.Address, "unix://") { + } else if strings.HasPrefix(previousAddress, "unix://") { // When the address being set does not begin with unix:// but the previous // address in the Config did, change the transport's DialContext back to // use the default configuration that cleanhttp uses. @@ -571,6 +594,7 @@ type Client struct { requestCallbacks []RequestCallback responseCallbacks []ResponseCallback replicationStateStore *replicationStateStore + hcpCookie *http.Cookie } // NewClient returns a new client for the given configuration. @@ -643,6 +667,30 @@ func NewClient(c *Config) (*Client, error) { client.setNamespace(namespace) } + if envHeaders := os.Getenv(EnvVaultHeaders); envHeaders != "" { + var result map[string]any + err := json.Unmarshal([]byte(envHeaders), &result) + if err != nil { + return nil, fmt.Errorf("could not unmarshal environment-supplied headers") + } + var forbiddenHeaders []string + for key, value := range result { + if strings.HasPrefix(key, "X-Vault-") { + forbiddenHeaders = append(forbiddenHeaders, key) + continue + } + + value, ok := value.(string) + if !ok { + return nil, fmt.Errorf("environment-supplied headers include non-string values") + } + client.AddHeader(key, value) + } + if len(forbiddenHeaders) > 0 { + return nil, fmt.Errorf("failed to setup Headers[%s]: Header starting by 'X-Vault-' are for internal usage only", strings.Join(forbiddenHeaders, ", ")) + } + } + return client, nil } @@ -665,6 +713,7 @@ func (c *Client) CloneConfig() *Config { newConfig.CloneHeaders = c.config.CloneHeaders newConfig.CloneToken = c.config.CloneToken newConfig.ReadYourWrites = c.config.ReadYourWrites + newConfig.clientTLSConfig = c.config.clientTLSConfig // we specifically want a _copy_ of the client here, not a pointer to the original one newClient := *c.config.HttpClient @@ -682,7 +731,7 @@ func (c *Client) SetAddress(addr string) error { parsedAddr, err := c.config.ParseAddress(addr) if err != nil { - return errwrap.Wrapf("failed to set address: {{err}}", err) + return fmt.Errorf("failed to set address: %w", err) } c.addr = parsedAddr @@ -979,7 +1028,9 @@ func (c *Client) Namespace() string { func (c *Client) WithNamespace(namespace string) *Client { c2 := *c c2.modifyLock = sync.RWMutex{} - c2.headers = c.Headers() + c.modifyLock.RLock() + c2.headers = c.headersInternal() + c.modifyLock.RUnlock() if namespace == "" { c2.ClearNamespace() } else { @@ -1004,6 +1055,33 @@ func (c *Client) SetToken(v string) { c.token = v } +// HCPCookie returns the HCP cookie being used by this client. It will +// return an empty cookie when no cookie is set. +func (c *Client) HCPCookie() string { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + if c.hcpCookie == nil { + return "" + } + return c.hcpCookie.String() +} + +// SetHCPCookie sets the hcp cookie directly. This won't perform any auth +// verification, it simply sets the token properly for future requests. +func (c *Client) SetHCPCookie(v *http.Cookie) error { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + if err := v.Valid(); err != nil { + return err + } + + c.hcpCookie = v + + return nil +} + // ClearToken deletes the token if it is set or does nothing otherwise. func (c *Client) ClearToken() { c.modifyLock.Lock() @@ -1016,7 +1094,12 @@ func (c *Client) ClearToken() { func (c *Client) Headers() http.Header { c.modifyLock.RLock() defer c.modifyLock.RUnlock() + return c.headersInternal() +} +// headersInternal gets the current set of headers used for requests. Must be called +// with the read modifyLock held. +func (c *Client) headersInternal() http.Header { if c.headers == nil { return nil } @@ -1134,6 +1217,26 @@ func (c *Client) ReadYourWrites() bool { return c.config.ReadYourWrites } +// SetCloneTLSConfig from parent. +func (c *Client) SetCloneTLSConfig(clone bool) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.CloneTLSConfig = clone +} + +// CloneTLSConfig gets the configured CloneTLSConfig value. +func (c *Client) CloneTLSConfig() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.CloneTLSConfig +} + // Clone creates a new client with the same configuration. Note that the same // underlying http.Client is used; modifying the client from more than one // goroutine at once may not be safe, so modify the client as needed and then @@ -1144,24 +1247,28 @@ func (c *Client) ReadYourWrites() bool { // the api.Config struct, such as policy override and wrapping function // behavior, must currently then be set as desired on the new client. func (c *Client) Clone() (*Client, error) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() return c.clone(c.config.CloneHeaders) } // CloneWithHeaders creates a new client similar to Clone, with the difference -// being that the headers are always cloned +// being that the headers are always cloned func (c *Client) CloneWithHeaders() (*Client, error) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() return c.clone(true) } // clone creates a new client, with the headers being cloned based on the -// passed in cloneheaders boolean +// passed in cloneheaders boolean. +// Must be called with the read lock and config read lock held. func (c *Client) clone(cloneHeaders bool) (*Client, error) { - c.modifyLock.RLock() - defer c.modifyLock.RUnlock() - config := c.config - config.modifyLock.RLock() - defer config.modifyLock.RUnlock() newConfig := &Config{ Address: config.Address, @@ -1180,13 +1287,18 @@ func (c *Client) clone(cloneHeaders bool) (*Client, error) { CloneToken: config.CloneToken, ReadYourWrites: config.ReadYourWrites, } + + if config.CloneTLSConfig { + newConfig.clientTLSConfig = config.clientTLSConfig + } + client, err := NewClient(newConfig) if err != nil { return nil, err } if cloneHeaders { - client.SetHeaders(c.Headers().Clone()) + client.SetHeaders(c.headersInternal().Clone()) } if config.CloneToken { @@ -1217,6 +1329,7 @@ func (c *Client) NewRequest(method, requestPath string) *Request { mfaCreds := c.mfaCreds wrappingLookupFunc := c.wrappingLookupFunc policyOverride := c.policyOverride + headers := c.headersInternal() c.modifyLock.RUnlock() host := addr.Host @@ -1243,6 +1356,8 @@ func (c *Client) NewRequest(method, requestPath string) *Request { Params: make(map[string][]string), } + req.HCPCookie = c.hcpCookie + var lookupPath string switch { case strings.HasPrefix(requestPath, "/v1/"): @@ -1261,7 +1376,7 @@ func (c *Client) NewRequest(method, requestPath string) *Request { req.WrapTTL = DefaultWrappingLookupFunc(method, lookupPath) } - req.Headers = c.Headers() + req.Headers = headers req.PolicyOverride = policyOverride return req @@ -1271,8 +1386,9 @@ func (c *Client) NewRequest(method, requestPath string) *Request { // a Vault server not configured with this client. This is an advanced operation // that generally won't need to be called externally. // -// Deprecated: This method should not be used directly. Use higher level -// methods instead. +// Deprecated: RawRequest exists for historical compatibility and should not be +// used directly. Use client.Logical().ReadRaw(...) or higher level methods +// instead. func (c *Client) RawRequest(r *Request) (*Response, error) { return c.RawRequestWithContext(context.Background(), r) } @@ -1281,8 +1397,9 @@ func (c *Client) RawRequest(r *Request) (*Response, error) { // a Vault server not configured with this client. This is an advanced operation // that generally won't need to be called externally. // -// Deprecated: This method should not be used directly. Use higher level -// methods instead. +// Deprecated: RawRequestWithContext exists for historical compatibility and +// should not be used directly. Use client.Logical().ReadRawWithContext(...) +// or higher level methods instead. func (c *Client) RawRequestWithContext(ctx context.Context, r *Request) (*Response, error) { // Note: we purposefully do not call cancel manually. The reason is // when canceled, the request.Body will EOF when reading due to the way diff --git a/api/client_test.go b/api/client_test.go index 4055da47fcde..1ed4dfd3d359 100644 --- a/api/client_test.go +++ b/api/client_test.go @@ -6,7 +6,7 @@ package api import ( "bytes" "context" - "crypto/x509" + "crypto/tls" "encoding/base64" "fmt" "io" @@ -85,7 +85,7 @@ func TestClientDefaultHttpClient_unixSocket(t *testing.T) { if client.addr.Scheme != "http" { t.Fatalf("bad: %s", client.addr.Scheme) } - if client.addr.Host != "/var/run/vault.sock" { + if client.addr.Host != "localhost" { t.Fatalf("bad: %s", client.addr.Host) } } @@ -103,14 +103,15 @@ func TestClientSetAddress(t *testing.T) { t.Fatalf("bad: expected: '172.168.2.1:8300' actual: %q", client.addr.Host) } // Test switching to Unix Socket address from TCP address + client.config.HttpClient.Transport.(*http.Transport).DialContext = nil if err := client.SetAddress("unix:///var/run/vault.sock"); err != nil { t.Fatal(err) } if client.addr.Scheme != "http" { t.Fatalf("bad: expected: 'http' actual: %q", client.addr.Scheme) } - if client.addr.Host != "/var/run/vault.sock" { - t.Fatalf("bad: expected: '/var/run/vault.sock' actual: %q", client.addr.Host) + if client.addr.Host != "localhost" { + t.Fatalf("bad: expected: 'localhost' actual: %q", client.addr.Host) } if client.addr.Path != "" { t.Fatalf("bad: expected '' actual: %q", client.addr.Path) @@ -119,6 +120,7 @@ func TestClientSetAddress(t *testing.T) { t.Fatal("bad: expected DialContext to not be nil") } // Test switching to TCP address from Unix Socket address + client.config.HttpClient.Transport.(*http.Transport).DialContext = nil if err := client.SetAddress("http://172.168.2.1:8300"); err != nil { t.Fatal(err) } @@ -128,6 +130,9 @@ func TestClientSetAddress(t *testing.T) { if client.addr.Scheme != "http" { t.Fatalf("bad: expected: 'http' actual: %q", client.addr.Scheme) } + if client.config.HttpClient.Transport.(*http.Transport).DialContext == nil { + t.Fatal("bad: expected DialContext to not be nil") + } } func TestClientToken(t *testing.T) { @@ -225,6 +230,7 @@ func TestClientDisableRedirects(t *testing.T) { for name, tc := range tests { test := tc + name := name t.Run(name, func(t *testing.T) { t.Parallel() numReqs := 0 @@ -323,7 +329,7 @@ func TestDefaulRetryPolicy(t *testing.T) { }, "don't retry connection failures": { err: &url.Error{ - Err: x509.UnknownAuthorityError{}, + Err: &tls.CertificateVerificationError{}, }, }, "don't retry on 200": { @@ -368,6 +374,61 @@ func TestDefaulRetryPolicy(t *testing.T) { } } +func TestClientEnvHeaders(t *testing.T) { + oldHeaders := os.Getenv(EnvVaultHeaders) + + defer func() { + os.Setenv(EnvVaultHeaders, oldHeaders) + }() + + cases := []struct { + Input string + Valid bool + }{ + { + "{}", + true, + }, + { + "{\"foo\": \"bar\"}", + true, + }, + { + "{\"foo\": 1}", // Values must be strings + false, + }, + { + "{\"X-Vault-Foo\": \"bar\"}", // X-Vault-* not allowed + false, + }, + } + + for _, tc := range cases { + os.Setenv(EnvVaultHeaders, tc.Input) + config := DefaultConfig() + config.ReadEnvironment() + _, err := NewClient(config) + if err != nil { + if tc.Valid { + t.Fatalf("unexpected error reading headers from environment: %v", err) + } + } else { + if !tc.Valid { + t.Fatal("no error reading headers from environment when error was expected") + } + } + } + + os.Setenv(EnvVaultHeaders, "{\"foo\": \"bar\"}") + config := DefaultConfig() + config.ReadEnvironment() + cli, _ := NewClient(config) + + if !reflect.DeepEqual(cli.Headers().Values("foo"), []string{"bar"}) { + t.Error("Environment-supplied headers not set in CLI client") + } +} + func TestClientEnvSettings(t *testing.T) { cwd, _ := os.Getwd() @@ -590,6 +651,24 @@ func TestClone(t *testing.T) { }, token: "cloneToken", }, + { + name: "cloneTLSConfig-enabled", + config: &Config{ + CloneTLSConfig: true, + clientTLSConfig: &tls.Config{ + ServerName: "foo.bar.baz", + }, + }, + }, + { + name: "cloneTLSConfig-disabled", + config: &Config{ + CloneTLSConfig: false, + clientTLSConfig: &tls.Config{ + ServerName: "foo.bar.baz", + }, + }, + }, } for _, tt := range tests { @@ -698,10 +777,81 @@ func TestClone(t *testing.T) { t.Fatalf("expected replicationStateStore %v, actual %v", parent.replicationStateStore, clone.replicationStateStore) } + if tt.config.CloneTLSConfig { + if !reflect.DeepEqual(parent.config.TLSConfig(), clone.config.TLSConfig()) { + t.Fatalf("config.clientTLSConfig doesn't match: %v vs %v", + parent.config.TLSConfig(), clone.config.TLSConfig()) + } + } else if tt.config.clientTLSConfig != nil { + if reflect.DeepEqual(parent.config.TLSConfig(), clone.config.TLSConfig()) { + t.Fatalf("config.clientTLSConfig should not match: %v vs %v", + parent.config.TLSConfig(), clone.config.TLSConfig()) + } + } else { + if !reflect.DeepEqual(parent.config.TLSConfig(), clone.config.TLSConfig()) { + t.Fatalf("config.clientTLSConfig doesn't match: %v vs %v", + parent.config.TLSConfig(), clone.config.TLSConfig()) + } + } }) } } +// TestCloneWithHeadersNoDeadlock confirms that the cloning of the client doesn't cause +// a deadlock. +// Raised in https://github.com/hashicorp/vault/issues/22393 -- there was a +// potential deadlock caused by running the problematicFunc() function in +// multiple goroutines. +func TestCloneWithHeadersNoDeadlock(t *testing.T) { + client, err := NewClient(nil) + if err != nil { + t.Fatal(err) + } + + wg := &sync.WaitGroup{} + + problematicFunc := func() { + client.SetCloneToken(true) + _, err := client.CloneWithHeaders() + if err != nil { + t.Fatal(err) + } + wg.Done() + } + + for i := 0; i < 1000; i++ { + wg.Add(1) + go problematicFunc() + } + wg.Wait() +} + +// TestCloneNoDeadlock is like TestCloneWithHeadersNoDeadlock but with +// Clone instead of CloneWithHeaders +func TestCloneNoDeadlock(t *testing.T) { + client, err := NewClient(nil) + if err != nil { + t.Fatal(err) + } + + wg := &sync.WaitGroup{} + + problematicFunc := func() { + client.SetCloneToken(true) + _, err := client.Clone() + if err != nil { + t.Fatal(err) + } + wg.Done() + } + + for i := 0; i < 1000; i++ { + wg.Add(1) + go problematicFunc() + } + wg.Wait() +} + func TestSetHeadersRaceSafe(t *testing.T) { client, err1 := NewClient(nil) if err1 != nil { @@ -1425,7 +1575,7 @@ func TestParseAddressWithUnixSocket(t *testing.T) { if u.Scheme != "http" { t.Fatal("Scheme not changed to http") } - if u.Host != "/var/run/vault.sock" { + if u.Host != "localhost" { t.Fatal("Host not changed to socket name") } if u.Path != "" { diff --git a/api/go.mod b/api/go.mod index 62288bd371f0..2b273d8a06be 100644 --- a/api/go.mod +++ b/api/go.mod @@ -1,36 +1,45 @@ module github.com/hashicorp/vault/api -go 1.19 +// The Go version directive for the api package should normally only be updated when +// code in the api package requires a newer Go version to build. It should not +// automatically track the Go version used to build Vault itself. Many projects import +// the api module and we don't want to impose a newer version on them any more than we +// have to. +go 1.21 -replace github.com/hashicorp/vault/sdk => ../sdk +toolchain go1.21.8 require ( - github.com/cenkalti/backoff/v3 v3.0.0 + github.com/cenkalti/backoff/v4 v4.3.0 + github.com/go-jose/go-jose/v4 v4.0.1 github.com/go-test/deep v1.0.2 github.com/hashicorp/errwrap v1.1.0 github.com/hashicorp/go-cleanhttp v0.5.2 - github.com/hashicorp/go-hclog v0.16.2 + github.com/hashicorp/go-hclog v1.6.3 github.com/hashicorp/go-multierror v1.1.1 - github.com/hashicorp/go-retryablehttp v0.6.6 + github.com/hashicorp/go-retryablehttp v0.7.7 github.com/hashicorp/go-rootcerts v1.0.2 github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 github.com/hashicorp/hcl v1.0.0 + github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/mapstructure v1.5.0 - golang.org/x/net v0.7.0 + github.com/natefinch/atomic v1.0.1 + github.com/stretchr/testify v1.8.4 + golang.org/x/net v0.25.0 golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 - gopkg.in/square/go-jose.v2 v2.5.1 ) require ( - github.com/fatih/color v1.7.0 // indirect - github.com/google/go-cmp v0.5.7 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fatih/color v1.16.0 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect - github.com/mattn/go-colorable v0.1.6 // indirect - github.com/mattn/go-isatty v0.0.12 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect - golang.org/x/crypto v0.6.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/api/go.sum b/api/go.sum index 808c56f7fe08..03a463b4b439 100644 --- a/api/go.sum +++ b/api/go.sum @@ -1,30 +1,32 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= -github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U= +github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= -github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= -github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= @@ -37,14 +39,16 @@ github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjG github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -52,6 +56,8 @@ github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUb github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= +github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= @@ -59,27 +65,30 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/kv.go b/api/kv.go index 20862fbfdf1d..720393254690 100644 --- a/api/kv.go +++ b/api/kv.go @@ -38,7 +38,7 @@ type KVSecret struct { // by default when a server is started in -dev mode. See the kvv2 struct. // // Learn more about the KV secrets engine here: -// https://www.vaultproject.io/docs/secrets/kv +// https://developer.hashicorp.com/vault/docs/secrets/kv func (c *Client) KVv1(mountPath string) *KVv1 { return &KVv1{c: c, mountPath: mountPath} } @@ -53,7 +53,7 @@ func (c *Client) KVv1(mountPath string) *KVv1 { // as these are the default settings when a server is started in -dev mode. // // Learn more about the KV secrets engine here: -// https://www.vaultproject.io/docs/secrets/kv +// https://developer.hashicorp.com/vault/docs/secrets/kv func (c *Client) KVv2(mountPath string) *KVv2 { return &KVv2{c: c, mountPath: mountPath} } diff --git a/api/lifetime_watcher.go b/api/lifetime_watcher.go index 5c060e5a1509..bdb8fb64b3c9 100644 --- a/api/lifetime_watcher.go +++ b/api/lifetime_watcher.go @@ -6,10 +6,11 @@ package api import ( "errors" "math/rand" + "strings" "sync" "time" - "github.com/cenkalti/backoff/v3" + "github.com/cenkalti/backoff/v4" ) var ( @@ -31,6 +32,7 @@ var ( DefaultRenewerRenewBuffer = 5 ) +//go:generate enumer -type=RenewBehavior -trimprefix=RenewBehavior type RenewBehavior uint const ( @@ -288,12 +290,18 @@ func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool, switch { case nonRenewable || r.renewBehavior == RenewBehaviorRenewDisabled: // Can't or won't renew, just keep the same expiration so we exit - // when it's reauthentication time + // when it's re-authentication time remainingLeaseDuration = fallbackLeaseDuration default: // Renew the token renewal, err = renew(credString, r.increment) + if err != nil && strings.Contains(err.Error(), "permission denied") { + // We can't renew since the token doesn't have permission to. Fall back + // to the code path for non-renewable tokens. + nonRenewable = true + continue + } if err != nil || renewal == nil || (tokenMode && renewal.Auth == nil) { if r.renewBehavior == RenewBehaviorErrorOnErrors { if err != nil { @@ -349,8 +357,11 @@ func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool, if errorBackoff == nil { sleepDuration = r.calculateSleepDuration(remainingLeaseDuration, priorDuration) - } else if errorBackoff.NextBackOff() == backoff.Stop { - return err + } else { + sleepDuration = errorBackoff.NextBackOff() + if sleepDuration == backoff.Stop { + return err + } } // remainingLeaseDuration becomes the priorDuration for the next loop diff --git a/api/logical.go b/api/logical.go index 927dd168e440..068e9068f389 100644 --- a/api/logical.go +++ b/api/logical.go @@ -212,6 +212,17 @@ func (c *Logical) WriteWithContext(ctx context.Context, path string, data map[st return c.write(ctx, path, r) } +func (c *Logical) WriteRaw(path string, data []byte) (*Response, error) { + return c.WriteRawWithContext(context.Background(), path, data) +} + +func (c *Logical) WriteRawWithContext(ctx context.Context, path string, data []byte) (*Response, error) { + r := c.c.NewRequest(http.MethodPut, "/v1/"+path) + r.BodyBytes = data + + return c.writeRaw(ctx, r) +} + func (c *Logical) JSONMergePatch(ctx context.Context, path string, data map[string]interface{}) (*Secret, error) { r := c.c.NewRequest(http.MethodPatch, "/v1/"+path) r.Headers.Set("Content-Type", "application/merge-patch+json") @@ -261,6 +272,14 @@ func (c *Logical) write(ctx context.Context, path string, request *Request) (*Se return ParseSecret(resp.Body) } +func (c *Logical) writeRaw(ctx context.Context, request *Request) (*Response, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + resp, err := c.c.rawRequestWithContext(ctx, request) + return resp, err +} + func (c *Logical) Delete(path string) (*Secret, error) { return c.DeleteWithContext(context.Background(), path) } diff --git a/api/plugin_helpers.go b/api/plugin_helpers.go index 2d6416d70a1c..d330478f5bfe 100644 --- a/api/plugin_helpers.go +++ b/api/plugin_helpers.go @@ -12,13 +12,23 @@ import ( "flag" "net/url" "os" - "regexp" - - squarejwt "gopkg.in/square/go-jose.v2/jwt" + jose "github.com/go-jose/go-jose/v4" + "github.com/go-jose/go-jose/v4/jwt" "github.com/hashicorp/errwrap" ) +// This file contains helper code used when writing Vault auth method or secrets engine plugins. +// +// As such, it would be better located in the sdk module with the rest of the code which is only to support plugins, +// rather than api, but is here for historical reasons. (The api module used to depend on the sdk module, this code +// calls NewClient within the api package, so placing it in the sdk would have created a dependency cycle. This reason +// is now historical, as the dependency between sdk and api has since been reversed in direction.) +// Moving this code to the sdk would be appropriate if an api v2.0.0 release is ever planned. +// +// This helper code is used when a plugin is hosted by Vault 1.11 and earlier. Vault 1.12 and sdk v0.6.0 introduced +// version 5 of the backend plugin interface, which uses go-plugin's AutoMTLS feature instead of this code. + const ( // PluginAutoMTLSEnv is used to ensure AutoMTLS is used. This will override // setting a TLSProviderFunc for a plugin. @@ -31,51 +41,12 @@ const ( // PluginUnwrapTokenEnv is the ENV name used to pass unwrap tokens to the // plugin. PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN" -) -// sudoPaths is a map containing the paths that require a token's policy -// to have the "sudo" capability. The keys are the paths as strings, in -// the same format as they are returned by the OpenAPI spec. The values -// are the regular expressions that can be used to test whether a given -// path matches that path or not (useful specifically for the paths that -// contain templated fields.) -var sudoPaths = map[string]*regexp.Regexp{ - "/auth/token/accessors/": regexp.MustCompile(`^/auth/token/accessors/?$`), - "/pki/root": regexp.MustCompile(`^/pki/root$`), - "/pki/root/sign-self-issued": regexp.MustCompile(`^/pki/root/sign-self-issued$`), - "/sys/audit": regexp.MustCompile(`^/sys/audit$`), - "/sys/audit/{path}": regexp.MustCompile(`^/sys/audit/.+$`), - "/sys/auth/{path}": regexp.MustCompile(`^/sys/auth/.+$`), - "/sys/auth/{path}/tune": regexp.MustCompile(`^/sys/auth/.+/tune$`), - "/sys/config/auditing/request-headers": regexp.MustCompile(`^/sys/config/auditing/request-headers$`), - "/sys/config/auditing/request-headers/{header}": regexp.MustCompile(`^/sys/config/auditing/request-headers/.+$`), - "/sys/config/cors": regexp.MustCompile(`^/sys/config/cors$`), - "/sys/config/ui/headers/": regexp.MustCompile(`^/sys/config/ui/headers/?$`), - "/sys/config/ui/headers/{header}": regexp.MustCompile(`^/sys/config/ui/headers/.+$`), - "/sys/leases": regexp.MustCompile(`^/sys/leases$`), - "/sys/leases/lookup/": regexp.MustCompile(`^/sys/leases/lookup/?$`), - "/sys/leases/lookup/{prefix}": regexp.MustCompile(`^/sys/leases/lookup/.+$`), - "/sys/leases/revoke-force/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-force/.+$`), - "/sys/leases/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-prefix/.+$`), - "/sys/plugins/catalog/{name}": regexp.MustCompile(`^/sys/plugins/catalog/[^/]+$`), - "/sys/plugins/catalog/{type}": regexp.MustCompile(`^/sys/plugins/catalog/[\w-]+$`), - "/sys/plugins/catalog/{type}/{name}": regexp.MustCompile(`^/sys/plugins/catalog/[\w-]+/[^/]+$`), - "/sys/raw": regexp.MustCompile(`^/sys/raw$`), - "/sys/raw/{path}": regexp.MustCompile(`^/sys/raw/.+$`), - "/sys/remount": regexp.MustCompile(`^/sys/remount$`), - "/sys/revoke-force/{prefix}": regexp.MustCompile(`^/sys/revoke-force/.+$`), - "/sys/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/revoke-prefix/.+$`), - "/sys/rotate": regexp.MustCompile(`^/sys/rotate$`), - "/sys/internal/inspect/router/{tag}": regexp.MustCompile(`^/sys/internal/inspect/router/.+$`), - - // enterprise-only paths - "/sys/replication/dr/primary/secondary-token": regexp.MustCompile(`^/sys/replication/dr/primary/secondary-token$`), - "/sys/replication/performance/primary/secondary-token": regexp.MustCompile(`^/sys/replication/performance/primary/secondary-token$`), - "/sys/replication/primary/secondary-token": regexp.MustCompile(`^/sys/replication/primary/secondary-token$`), - "/sys/replication/reindex": regexp.MustCompile(`^/sys/replication/reindex$`), - "/sys/storage/raft/snapshot-auto/config/": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/?$`), - "/sys/storage/raft/snapshot-auto/config/{name}": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/[^/]+$`), -} + // CubbyHoleJWTSignatureAlgorithm is the signature algorithm used for + // the unwrap token that Vault passes to a plugin when auto-mTLS is + // not enabled. + CubbyHoleJWTSignatureAlgorithm = jose.ES512 +) // PluginAPIClientMeta is a helper that plugins can use to configure TLS connections // back to Vault. @@ -85,6 +56,7 @@ type PluginAPIClientMeta struct { flagCAPath string flagClientCert string flagClientKey string + flagServerName string flagInsecure bool } @@ -96,6 +68,7 @@ func (f *PluginAPIClientMeta) FlagSet() *flag.FlagSet { fs.StringVar(&f.flagCAPath, "ca-path", "", "") fs.StringVar(&f.flagClientCert, "client-cert", "", "") fs.StringVar(&f.flagClientKey, "client-key", "", "") + fs.StringVar(&f.flagServerName, "tls-server-name", "", "") fs.BoolVar(&f.flagInsecure, "tls-skip-verify", false, "") return fs @@ -104,13 +77,13 @@ func (f *PluginAPIClientMeta) FlagSet() *flag.FlagSet { // GetTLSConfig will return a TLSConfig based off the values from the flags func (f *PluginAPIClientMeta) GetTLSConfig() *TLSConfig { // If we need custom TLS configuration, then set it - if f.flagCACert != "" || f.flagCAPath != "" || f.flagClientCert != "" || f.flagClientKey != "" || f.flagInsecure { + if f.flagCACert != "" || f.flagCAPath != "" || f.flagClientCert != "" || f.flagClientKey != "" || f.flagInsecure || f.flagServerName != "" { t := &TLSConfig{ CACert: f.flagCACert, CAPath: f.flagCAPath, ClientCert: f.flagClientCert, ClientKey: f.flagClientKey, - TLSServerName: "", + TLSServerName: f.flagServerName, Insecure: f.flagInsecure, } @@ -135,7 +108,7 @@ func VaultPluginTLSProviderContext(ctx context.Context, apiTLSConfig *TLSConfig) return func() (*tls.Config, error) { unwrapToken := os.Getenv(PluginUnwrapTokenEnv) - parsedJWT, err := squarejwt.ParseSigned(unwrapToken) + parsedJWT, err := jwt.ParseSigned(unwrapToken, []jose.SignatureAlgorithm{CubbyHoleJWTSignatureAlgorithm}) if err != nil { return nil, errwrap.Wrapf("error parsing wrapping token: {{err}}", err) } @@ -244,28 +217,3 @@ func VaultPluginTLSProviderContext(ctx context.Context, apiTLSConfig *TLSConfig) return tlsConfig, nil } } - -func SudoPaths() map[string]*regexp.Regexp { - return sudoPaths -} - -// Determine whether the given path requires the sudo capability -func IsSudoPath(path string) bool { - // Return early if the path is any of the non-templated sudo paths. - if _, ok := sudoPaths[path]; ok { - return true - } - - // Some sudo paths have templated fields in them. - // (e.g. /sys/revoke-prefix/{prefix}) - // The values in the sudoPaths map are actually regular expressions, - // so we can check if our path matches against them. - for _, sudoPathRegexp := range sudoPaths { - match := sudoPathRegexp.MatchString(path) - if match { - return true - } - } - - return false -} diff --git a/api/plugin_helpers_test.go b/api/plugin_helpers_test.go deleted file mode 100644 index 7b3ddbf8154a..000000000000 --- a/api/plugin_helpers_test.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import "testing" - -func TestIsSudoPath(t *testing.T) { - t.Parallel() - - testCases := []struct { - path string - expected bool - }{ - { - "/not/in/sudo/paths/list", - false, - }, - { - "/sys/raw/single-node-path", - true, - }, - { - "/sys/raw/multiple/nodes/path", - true, - }, - { - "/sys/raw/WEIRD(but_still_valid!)p4Th?🗿笑", - true, - }, - { - "/sys/auth/path/in/middle/tune", - true, - }, - { - "/sys/plugins/catalog/some-type", - true, - }, - { - "/sys/plugins/catalog/some/type/or/name/with/slashes", - false, - }, - { - "/sys/plugins/catalog/some-type/some-name", - true, - }, - { - "/sys/plugins/catalog/some-type/some/name/with/slashes", - false, - }, - } - - for _, tc := range testCases { - result := IsSudoPath(tc.path) - if result != tc.expected { - t.Fatalf("expected api.IsSudoPath to return %v for path %s but it returned %v", tc.expected, tc.path, result) - } - } -} diff --git a/api/plugin_runtime_types.go b/api/plugin_runtime_types.go new file mode 100644 index 000000000000..2514f1279db1 --- /dev/null +++ b/api/plugin_runtime_types.go @@ -0,0 +1,30 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +// NOTE: this file was copied from +// https://github.com/hashicorp/vault/blob/main/sdk/helper/consts/plugin_runtime_types.go +// Any changes made should be made to both files at the same time. + +import "fmt" + +var PluginRuntimeTypes = _PluginRuntimeTypeValues + +//go:generate enumer -type=PluginRuntimeType -trimprefix=PluginRuntimeType -transform=snake +type PluginRuntimeType uint32 + +// This is a list of PluginRuntimeTypes used by Vault. +const ( + PluginRuntimeTypeUnsupported PluginRuntimeType = iota + PluginRuntimeTypeContainer +) + +// ParsePluginRuntimeType is a wrapper around PluginRuntimeTypeString kept for backwards compatibility. +func ParsePluginRuntimeType(PluginRuntimeType string) (PluginRuntimeType, error) { + t, err := PluginRuntimeTypeString(PluginRuntimeType) + if err != nil { + return PluginRuntimeTypeUnsupported, fmt.Errorf("%q is not a supported plugin runtime type", PluginRuntimeType) + } + return t, nil +} diff --git a/api/plugin_types.go b/api/plugin_types.go index 4c759a2decc5..c8f69ae404f2 100644 --- a/api/plugin_types.go +++ b/api/plugin_types.go @@ -7,7 +7,10 @@ package api // https://github.com/hashicorp/vault/blob/main/sdk/helper/consts/plugin_types.go // Any changes made should be made to both files at the same time. -import "fmt" +import ( + "encoding/json" + "fmt" +) var PluginTypes = []PluginType{ PluginTypeUnknown, @@ -64,3 +67,34 @@ func ParsePluginType(pluginType string) (PluginType, error) { return PluginTypeUnknown, fmt.Errorf("%q is not a supported plugin type", pluginType) } } + +// UnmarshalJSON implements json.Unmarshaler. It supports unmarshaling either a +// string or a uint32. All new serialization will be as a string, but we +// previously serialized as a uint32 so we need to support that for backwards +// compatibility. +func (p *PluginType) UnmarshalJSON(data []byte) error { + var asString string + err := json.Unmarshal(data, &asString) + if err == nil { + *p, err = ParsePluginType(asString) + return err + } + + var asUint32 uint32 + err = json.Unmarshal(data, &asUint32) + if err != nil { + return err + } + *p = PluginType(asUint32) + switch *p { + case PluginTypeUnknown, PluginTypeCredential, PluginTypeDatabase, PluginTypeSecrets: + return nil + default: + return fmt.Errorf("%d is not a supported plugin type", asUint32) + } +} + +// MarshalJSON implements json.Marshaler. +func (p PluginType) MarshalJSON() ([]byte, error) { + return json.Marshal(p.String()) +} diff --git a/api/plugin_types_test.go b/api/plugin_types_test.go new file mode 100644 index 000000000000..0b6085379b43 --- /dev/null +++ b/api/plugin_types_test.go @@ -0,0 +1,101 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +// NOTE: this file was copied from +// https://github.com/hashicorp/vault/blob/main/sdk/helper/consts/plugin_types_test.go +// Any changes made should be made to both files at the same time. + +import ( + "encoding/json" + "testing" +) + +type testType struct { + PluginType PluginType `json:"plugin_type"` +} + +func TestPluginTypeJSONRoundTrip(t *testing.T) { + for _, pluginType := range PluginTypes { + original := testType{ + PluginType: pluginType, + } + asBytes, err := json.Marshal(original) + if err != nil { + t.Fatal(err) + } + + var roundTripped testType + err = json.Unmarshal(asBytes, &roundTripped) + if err != nil { + t.Fatal(err) + } + + if original != roundTripped { + t.Fatalf("expected %v, got %v", original, roundTripped) + } + } +} + +func TestPluginTypeJSONUnmarshal(t *testing.T) { + // Failure/unsupported cases. + for name, tc := range map[string]string{ + "unsupported": `{"plugin_type":"unsupported"}`, + "random string": `{"plugin_type":"foo"}`, + "boolean": `{"plugin_type":true}`, + "empty": `{"plugin_type":""}`, + "negative": `{"plugin_type":-1}`, + "out of range": `{"plugin_type":10}`, + } { + t.Run(name, func(t *testing.T) { + var result testType + err := json.Unmarshal([]byte(tc), &result) + if err == nil { + t.Fatal("expected error") + } + }) + } + + // Valid cases. + for name, tc := range map[string]struct { + json string + expected PluginType + }{ + "unknown": {`{"plugin_type":"unknown"}`, PluginTypeUnknown}, + "auth": {`{"plugin_type":"auth"}`, PluginTypeCredential}, + "secret": {`{"plugin_type":"secret"}`, PluginTypeSecrets}, + "database": {`{"plugin_type":"database"}`, PluginTypeDatabase}, + "absent": {`{}`, PluginTypeUnknown}, + "integer unknown": {`{"plugin_type":0}`, PluginTypeUnknown}, + "integer auth": {`{"plugin_type":1}`, PluginTypeCredential}, + "integer db": {`{"plugin_type":2}`, PluginTypeDatabase}, + "integer secret": {`{"plugin_type":3}`, PluginTypeSecrets}, + } { + t.Run(name, func(t *testing.T) { + var result testType + err := json.Unmarshal([]byte(tc.json), &result) + if err != nil { + t.Fatal(err) + } + if tc.expected != result.PluginType { + t.Fatalf("expected %v, got %v", tc.expected, result.PluginType) + } + }) + } +} + +func TestUnknownTypeExcludedWithOmitEmpty(t *testing.T) { + type testTypeOmitEmpty struct { + Type PluginType `json:"type,omitempty"` + } + bytes, err := json.Marshal(testTypeOmitEmpty{}) + if err != nil { + t.Fatal(err) + } + m := map[string]any{} + json.Unmarshal(bytes, &m) + if _, exists := m["type"]; exists { + t.Fatal("type should not be present") + } +} diff --git a/api/pluginruntimetype_enumer.go b/api/pluginruntimetype_enumer.go new file mode 100644 index 000000000000..663f440ff446 --- /dev/null +++ b/api/pluginruntimetype_enumer.go @@ -0,0 +1,49 @@ +// Code generated by "enumer -type=PluginRuntimeType -trimprefix=PluginRuntimeType -transform=snake"; DO NOT EDIT. + +package api + +import ( + "fmt" +) + +const _PluginRuntimeTypeName = "unsupportedcontainer" + +var _PluginRuntimeTypeIndex = [...]uint8{0, 11, 20} + +func (i PluginRuntimeType) String() string { + if i >= PluginRuntimeType(len(_PluginRuntimeTypeIndex)-1) { + return fmt.Sprintf("PluginRuntimeType(%d)", i) + } + return _PluginRuntimeTypeName[_PluginRuntimeTypeIndex[i]:_PluginRuntimeTypeIndex[i+1]] +} + +var _PluginRuntimeTypeValues = []PluginRuntimeType{0, 1} + +var _PluginRuntimeTypeNameToValueMap = map[string]PluginRuntimeType{ + _PluginRuntimeTypeName[0:11]: 0, + _PluginRuntimeTypeName[11:20]: 1, +} + +// PluginRuntimeTypeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func PluginRuntimeTypeString(s string) (PluginRuntimeType, error) { + if val, ok := _PluginRuntimeTypeNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to PluginRuntimeType values", s) +} + +// PluginRuntimeTypeValues returns all values of the enum +func PluginRuntimeTypeValues() []PluginRuntimeType { + return _PluginRuntimeTypeValues +} + +// IsAPluginRuntimeType returns "true" if the value is listed in the enum definition. "false" otherwise +func (i PluginRuntimeType) IsAPluginRuntimeType() bool { + for _, v := range _PluginRuntimeTypeValues { + if i == v { + return true + } + } + return false +} diff --git a/api/renewbehavior_enumer.go b/api/renewbehavior_enumer.go new file mode 100644 index 000000000000..9b272e3e0cec --- /dev/null +++ b/api/renewbehavior_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer -type=RenewBehavior -trimprefix=RenewBehavior"; DO NOT EDIT. + +package api + +import ( + "fmt" +) + +const _RenewBehaviorName = "IgnoreErrorsRenewDisabledErrorOnErrors" + +var _RenewBehaviorIndex = [...]uint8{0, 12, 25, 38} + +func (i RenewBehavior) String() string { + if i >= RenewBehavior(len(_RenewBehaviorIndex)-1) { + return fmt.Sprintf("RenewBehavior(%d)", i) + } + return _RenewBehaviorName[_RenewBehaviorIndex[i]:_RenewBehaviorIndex[i+1]] +} + +var _RenewBehaviorValues = []RenewBehavior{0, 1, 2} + +var _RenewBehaviorNameToValueMap = map[string]RenewBehavior{ + _RenewBehaviorName[0:12]: 0, + _RenewBehaviorName[12:25]: 1, + _RenewBehaviorName[25:38]: 2, +} + +// RenewBehaviorString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func RenewBehaviorString(s string) (RenewBehavior, error) { + if val, ok := _RenewBehaviorNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to RenewBehavior values", s) +} + +// RenewBehaviorValues returns all values of the enum +func RenewBehaviorValues() []RenewBehavior { + return _RenewBehaviorValues +} + +// IsARenewBehavior returns "true" if the value is listed in the enum definition. "false" otherwise +func (i RenewBehavior) IsARenewBehavior() bool { + for _, v := range _RenewBehaviorValues { + if i == v { + return true + } + } + return false +} diff --git a/api/renewer_test.go b/api/renewer_test.go index 7ba16e66eca2..1c9a5d03e2d2 100644 --- a/api/renewer_test.go +++ b/api/renewer_test.go @@ -177,6 +177,20 @@ func TestLifetimeWatcher(t *testing.T) { expectError: nil, expectRenewal: true, }, + { + maxTestTime: time.Second, + name: "permission_denied_error", + leaseDurationSeconds: 60, + incrementSeconds: 10, + // This should cause the lifetime watcher to behave just + // like a non-renewable secret, i.e. wait until its lifetime + // then be done. + renew: func(_ string, _ int) (*Secret, error) { + return nil, fmt.Errorf("permission denied") + }, + expectError: nil, + expectRenewal: false, + }, } for _, tc := range cases { @@ -204,7 +218,9 @@ func TestLifetimeWatcher(t *testing.T) { for { select { case <-time.After(tc.maxTestTime): - t.Fatalf("renewal didn't happen") + if tc.expectRenewal || tc.expectError != nil { + t.Fatalf("expected error or renewal, and neither happened") + } case r := <-v.RenewCh(): if !tc.expectRenewal { t.Fatal("expected no renewals") diff --git a/api/replication_status.go b/api/replication_status.go new file mode 100644 index 000000000000..d7224ff11d1c --- /dev/null +++ b/api/replication_status.go @@ -0,0 +1,133 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +const ( + apiRepPerformanceStatusPath = "/v1/sys/replication/performance/status" + apiRepDRStatusPath = "/v1/sys/replication/dr/status" + apiRepStatusPath = "/v1/sys/replication/status" +) + +type ClusterInfo struct { + APIAddr string `json:"api_address,omitempty" mapstructure:"api_address"` + ClusterAddress string `json:"cluster_address,omitempty" mapstructure:"cluster_address"` + ConnectionStatus string `json:"connection_status,omitempty" mapstructure:"connection_status"` + LastHeartBeat string `json:"last_heartbeat,omitempty" mapstructure:"last_heartbeat"` + LastHeartBeatDurationMillis string `json:"last_heartbeat_duration_ms,omitempty" mapstructure:"last_heartbeat_duration_ms"` + ClockSkewMillis string `json:"clock_skew_ms,omitempty" mapstructure:"clock_skew_ms"` + NodeID string `json:"node_id,omitempty" mapstructure:"node_id"` + ReplicationPrimaryCanaryAgeMillis string `json:"replication_primary_canary_age_ms,omitempty" mapstructure:"replication_primary_canary_age_ms"` +} + +type ReplicationStatusGenericResponse struct { + LastDRWAL uint64 `json:"last_dr_wal,omitempty" mapstructure:"last_dr_wal"` + LastReindexEpoch string `json:"last_reindex_epoch,omitempty" mapstructure:"last_reindex_epoch"` + ClusterID string `json:"cluster_id,omitempty" mapstructure:"cluster_id"` + LastWAL uint64 `json:"last_wal,omitempty" mapstructure:"last_wal"` + MerkleRoot string `json:"merkle_root,omitempty" mapstructure:"merkle_root"` + Mode string `json:"mode,omitempty" mapstructure:"mode"` + PrimaryClusterAddr string `json:"primary_cluster_addr,omitempty" mapstructure:"primary_cluster_addr"` + LastPerformanceWAL uint64 `json:"last_performance_wal,omitempty" mapstructure:"last_performance_wal"` + State string `json:"state,omitempty" mapstructure:"state"` + LastRemoteWAL uint64 `json:"last_remote_wal,omitempty" mapstructure:"last_remote_wal"` + SecondaryID string `json:"secondary_id,omitempty" mapstructure:"secondary_id"` + SSCTGenerationCounter uint64 `json:"ssct_generation_counter,omitempty" mapstructure:"ssct_generation_counter"` + + KnownSecondaries []string `json:"known_secondaries,omitempty" mapstructure:"known_secondaries"` + KnownPrimaryClusterAddrs []string `json:"known_primary_cluster_addrs,omitempty" mapstructure:"known_primary_cluster_addrs"` + Primaries []ClusterInfo `json:"primaries,omitempty" mapstructure:"primaries"` + Secondaries []ClusterInfo `json:"secondaries,omitempty" mapstructure:"secondaries"` +} + +type ReplicationStatusResponse struct { + DR ReplicationStatusGenericResponse `json:"dr,omitempty" mapstructure:"dr"` + Performance ReplicationStatusGenericResponse `json:"performance,omitempty" mapstructure:"performance"` +} + +func (c *Sys) ReplicationStatus() (*ReplicationStatusResponse, error) { + return c.ReplicationStatusWithContext(context.Background(), apiRepStatusPath) +} + +func (c *Sys) ReplicationPerformanceStatusWithContext(ctx context.Context) (*ReplicationStatusGenericResponse, error) { + s, err := c.ReplicationStatusWithContext(ctx, apiRepPerformanceStatusPath) + if err != nil { + return nil, err + } + + return &s.Performance, nil +} + +func (c *Sys) ReplicationDRStatusWithContext(ctx context.Context) (*ReplicationStatusGenericResponse, error) { + s, err := c.ReplicationStatusWithContext(ctx, apiRepDRStatusPath) + if err != nil { + return nil, err + } + + return &s.DR, nil +} + +func (c *Sys) ReplicationStatusWithContext(ctx context.Context, path string) (*ReplicationStatusResponse, error) { + // default to replication/status + if path == "" { + path = apiRepStatusPath + } + + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, path) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer func() { _ = resp.Body.Close() }() + + // First decode response into a map[string]interface{} + data := make(map[string]interface{}) + dec := json.NewDecoder(resp.Body) + dec.UseNumber() + if err := dec.Decode(&data); err != nil { + return nil, err + } + + rawData, ok := data["data"] + if !ok { + return nil, fmt.Errorf("empty data in replication status response") + } + + s := &ReplicationStatusResponse{} + g := &ReplicationStatusGenericResponse{} + switch { + case path == apiRepPerformanceStatusPath: + err = mapstructure.Decode(rawData, g) + if err != nil { + return nil, err + } + s.Performance = *g + case path == apiRepDRStatusPath: + err = mapstructure.Decode(rawData, g) + if err != nil { + return nil, err + } + s.DR = *g + default: + err = mapstructure.Decode(rawData, s) + if err != nil { + return nil, err + } + return s, err + } + + return s, err +} diff --git a/api/request.go b/api/request.go index ecf783701ad4..c0c8dea73449 100644 --- a/api/request.go +++ b/api/request.go @@ -7,7 +7,6 @@ import ( "bytes" "encoding/json" "io" - "io/ioutil" "net/http" "net/url" @@ -39,6 +38,9 @@ type Request struct { // EGPs). If set, the override flag will take effect for all policies // evaluated during the request. PolicyOverride bool + + // HCPCookie is used to set a http cookie when client is connected to HCP + HCPCookie *http.Cookie } // SetJSONBody is used to set a request body that is a JSON-encoded value. @@ -74,13 +76,13 @@ func (r *Request) ToHTTP() (*http.Request, error) { // No body case r.BodyBytes != nil: - req.Request.Body = ioutil.NopCloser(bytes.NewReader(r.BodyBytes)) + req.Request.Body = io.NopCloser(bytes.NewReader(r.BodyBytes)) default: if c, ok := r.Body.(io.ReadCloser); ok { req.Request.Body = c } else { - req.Request.Body = ioutil.NopCloser(r.Body) + req.Request.Body = io.NopCloser(r.Body) } } @@ -145,5 +147,9 @@ func (r *Request) toRetryableHTTP() (*retryablehttp.Request, error) { req.Header.Set("X-Vault-Policy-Override", "true") } + if r.HCPCookie != nil { + req.AddCookie(r.HCPCookie) + } + return req, nil } diff --git a/api/response.go b/api/response.go index 2842c125514a..23246bf7165e 100644 --- a/api/response.go +++ b/api/response.go @@ -8,7 +8,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" ) @@ -44,7 +43,7 @@ func (r *Response) Error() error { } r.Body.Close() - r.Body = ioutil.NopCloser(bodyBuf) + r.Body = io.NopCloser(bodyBuf) ns := r.Header.Get(NamespaceHeaderName) // Build up the error object diff --git a/api/secret.go b/api/secret.go index be159d706763..7df9f66a4da7 100644 --- a/api/secret.go +++ b/api/secret.go @@ -42,6 +42,10 @@ type Secret struct { // cubbyhole of the given token (which has a TTL of the given number of // seconds) WrapInfo *SecretWrapInfo `json:"wrap_info,omitempty"` + + // MountType, if non-empty, provides some information about what kind + // of mount this secret came from. + MountType string `json:"mount_type,omitempty"` } // TokenID returns the standardized token ID (token) for the given secret. @@ -150,8 +154,12 @@ TOKEN_DONE: // Identity policies { - _, ok := s.Data["identity_policies"] - if !ok { + v, ok := s.Data["identity_policies"] + if !ok || v == nil { + goto DONE + } + + if s.Data["identity_policies"] == nil { goto DONE } diff --git a/api/secret_test.go b/api/secret_test.go new file mode 100644 index 000000000000..9fa20e1a9cf1 --- /dev/null +++ b/api/secret_test.go @@ -0,0 +1,211 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "testing" +) + +func TestTokenPolicies(t *testing.T) { + var s *Secret + + // Verify some of the short-circuit paths in the function + if policies, err := s.TokenPolicies(); policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } + + s = &Secret{} + + if policies, err := s.TokenPolicies(); policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } + + s.Auth = &SecretAuth{} + + if policies, err := s.TokenPolicies(); policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } + + s.Auth.Policies = []string{} + + if policies, err := s.TokenPolicies(); policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } + + s.Auth.Policies = []string{"test"} + + if policies, err := s.TokenPolicies(); policies == nil { + t.Error("policies was nil") + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } + + s.Auth = nil + s.Data = make(map[string]interface{}) + + if policies, err := s.TokenPolicies(); policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } + + // Verify that s.Data["policies"] are properly processed + { + policyList := make([]string, 0) + s.Data["policies"] = policyList + + if policies, err := s.TokenPolicies(); len(policies) != len(policyList) { + t.Errorf("expecting policies length %d, got %d", len(policyList), len(policies)) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } else if s.Auth == nil { + t.Error("Auth field is still nil") + } + + policyList = append(policyList, "policy1", "policy2") + s.Data["policies"] = policyList + + if policies, err := s.TokenPolicies(); len(policyList) != 2 { + t.Errorf("expecting policies length %d, got %d", len(policyList), len(policies)) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } else if s.Auth == nil { + t.Error("Auth field is still nil") + } + } + + // Do it again but with an interface{} slice + { + s.Auth = nil + policyList := make([]interface{}, 0) + s.Data["policies"] = policyList + + if policies, err := s.TokenPolicies(); len(policies) != len(policyList) { + t.Errorf("expecting policies length %d, got %d", len(policyList), len(policies)) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } else if s.Auth == nil { + t.Error("Auth field is still nil") + } + + policyItems := make([]interface{}, 2) + policyItems[0] = "policy1" + policyItems[1] = "policy2" + + policyList = append(policyList, policyItems...) + s.Data["policies"] = policyList + + if policies, err := s.TokenPolicies(); len(policies) != 2 { + t.Errorf("expecting policies length %d, got %d", len(policyList), len(policies)) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } else if s.Auth == nil { + t.Error("Auth field is still nil") + } + + s.Auth = nil + s.Data["policies"] = 7.0 + + if policies, err := s.TokenPolicies(); err == nil { + t.Error("err was nil") + } else if policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } + + s.Auth = nil + s.Data["policies"] = []int{2, 3, 5, 8, 13} + + if policies, err := s.TokenPolicies(); err == nil { + t.Error("err was nil") + } else if policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } + } + + s.Auth = nil + s.Data["policies"] = nil + + if policies, err := s.TokenPolicies(); err != nil { + t.Errorf("err was not nil, got %v", err) + } else if policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } + + // Verify that logic that merges s.Data["policies"] and s.Data["identity_policies"] works + { + policyList := []string{"policy1", "policy2", "policy3"} + s.Data["policies"] = policyList[:1] + s.Data["identity_policies"] = "not_a_slice" + s.Auth = nil + + if policies, err := s.TokenPolicies(); err == nil { + t.Error("err was nil") + } else if policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } + + s.Data["identity_policies"] = policyList[1:] + + if policies, err := s.TokenPolicies(); len(policyList) != len(policies) { + t.Errorf("expecting policies length %d, got %d", len(policyList), len(policies)) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } else if s.Auth == nil { + t.Error("Auth field is still nil") + } + } + + // Do it again but with an interface{} slice + { + policyList := []interface{}{"policy1", "policy2", "policy3"} + s.Data["policies"] = policyList[:1] + s.Data["identity_policies"] = "not_a_slice" + s.Auth = nil + + if policies, err := s.TokenPolicies(); err == nil { + t.Error("err was nil") + } else if policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } + + s.Data["identity_policies"] = policyList[1:] + + if policies, err := s.TokenPolicies(); len(policyList) != len(policies) { + t.Errorf("expecting policies length %d, got %d", len(policyList), len(policies)) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } else if s.Auth == nil { + t.Error("Auth field is still nil") + } + + s.Auth = nil + s.Data["identity_policies"] = []int{2, 3, 5, 8, 13} + + if policies, err := s.TokenPolicies(); err == nil { + t.Error("err was nil") + } else if policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } + } + + s.Auth = nil + s.Data["policies"] = []string{"policy1"} + s.Data["identity_policies"] = nil + + if policies, err := s.TokenPolicies(); err != nil { + t.Errorf("err was not nil, got %v", err) + } else if len(policies) != 1 { + t.Errorf("expecting policies length %d, got %d", 1, len(policies)) + } else if s.Auth == nil { + t.Error("Auth field is still nil") + } +} diff --git a/api/sudo_paths.go b/api/sudo_paths.go new file mode 100644 index 000000000000..d458cbde0f45 --- /dev/null +++ b/api/sudo_paths.go @@ -0,0 +1,88 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "regexp" +) + +// sudoPaths is a map containing the paths that require a token's policy +// to have the "sudo" capability. The keys are the paths as strings, in +// the same format as they are returned by the OpenAPI spec. The values +// are the regular expressions that can be used to test whether a given +// path matches that path or not (useful specifically for the paths that +// contain templated fields.) +var sudoPaths = map[string]*regexp.Regexp{ + "/auth/token/accessors": regexp.MustCompile(`^/auth/token/accessors/?$`), + "/auth/token/revoke-orphan": regexp.MustCompile(`^/auth/token/revoke-orphan$`), + "/pki/root": regexp.MustCompile(`^/pki/root$`), + "/pki/root/sign-self-issued": regexp.MustCompile(`^/pki/root/sign-self-issued$`), + "/sys/audit": regexp.MustCompile(`^/sys/audit$`), + "/sys/audit/{path}": regexp.MustCompile(`^/sys/audit/.+$`), + "/sys/auth/{path}": regexp.MustCompile(`^/sys/auth/.+$`), + "/sys/auth/{path}/tune": regexp.MustCompile(`^/sys/auth/.+/tune$`), + "/sys/config/auditing/request-headers": regexp.MustCompile(`^/sys/config/auditing/request-headers$`), + "/sys/config/auditing/request-headers/{header}": regexp.MustCompile(`^/sys/config/auditing/request-headers/.+$`), + "/sys/config/cors": regexp.MustCompile(`^/sys/config/cors$`), + "/sys/config/ui/headers": regexp.MustCompile(`^/sys/config/ui/headers/?$`), + "/sys/config/ui/headers/{header}": regexp.MustCompile(`^/sys/config/ui/headers/.+$`), + "/sys/internal/inspect/router/{tag}": regexp.MustCompile(`^/sys/internal/inspect/router/.+$`), + "/sys/internal/counters/activity/export": regexp.MustCompile(`^/sys/internal/counters/activity/export$`), + "/sys/leases": regexp.MustCompile(`^/sys/leases$`), + // This entry is a bit wrong... sys/leases/lookup does NOT require sudo. But sys/leases/lookup/ with a trailing + // slash DOES require sudo. But the part of the Vault CLI that uses this logic doesn't pass operation-appropriate + // trailing slashes, it always strips them off, so we end up giving the wrong answer for one of these. + "/sys/leases/lookup/{prefix}": regexp.MustCompile(`^/sys/leases/lookup(?:/.+)?$`), + "/sys/leases/revoke-force/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-force/.+$`), + "/sys/leases/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-prefix/.+$`), + "/sys/plugins/catalog/{name}": regexp.MustCompile(`^/sys/plugins/catalog/[^/]+$`), + "/sys/plugins/catalog/{type}": regexp.MustCompile(`^/sys/plugins/catalog/[\w-]+$`), + "/sys/plugins/catalog/{type}/{name}": regexp.MustCompile(`^/sys/plugins/catalog/[\w-]+/[^/]+$`), + "/sys/plugins/runtimes/catalog": regexp.MustCompile(`^/sys/plugins/runtimes/catalog/?$`), + "/sys/plugins/runtimes/catalog/{type}/{name}": regexp.MustCompile(`^/sys/plugins/runtimes/catalog/[\w-]+/[^/]+$`), + "/sys/raw/{path}": regexp.MustCompile(`^/sys/raw(?:/.+)?$`), + "/sys/remount": regexp.MustCompile(`^/sys/remount$`), + "/sys/revoke-force/{prefix}": regexp.MustCompile(`^/sys/revoke-force/.+$`), + "/sys/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/revoke-prefix/.+$`), + "/sys/rotate": regexp.MustCompile(`^/sys/rotate$`), + "/sys/seal": regexp.MustCompile(`^/sys/seal$`), + "/sys/step-down": regexp.MustCompile(`^/sys/step-down$`), + + // enterprise-only paths + "/sys/replication/dr/primary/secondary-token": regexp.MustCompile(`^/sys/replication/dr/primary/secondary-token$`), + "/sys/replication/performance/primary/secondary-token": regexp.MustCompile(`^/sys/replication/performance/primary/secondary-token$`), + "/sys/replication/primary/secondary-token": regexp.MustCompile(`^/sys/replication/primary/secondary-token$`), + "/sys/replication/reindex": regexp.MustCompile(`^/sys/replication/reindex$`), + "/sys/storage/raft/snapshot-auto/config": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/?$`), + "/sys/storage/raft/snapshot-auto/config/{name}": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/[^/]+$`), +} + +func SudoPaths() map[string]*regexp.Regexp { + return sudoPaths +} + +// Determine whether the given path requires the sudo capability. +// Note that this uses hardcoded static path information, so will return incorrect results for paths in namespaces, +// or for secret engines mounted at non-default paths. +// Expects to receive a path with an initial slash, but no trailing slashes, as the Vault CLI (the only known and +// expected user of this function) sanitizes its paths that way. +func IsSudoPath(path string) bool { + // Return early if the path is any of the non-templated sudo paths. + if _, ok := sudoPaths[path]; ok { + return true + } + + // Some sudo paths have templated fields in them. + // (e.g. /sys/revoke-prefix/{prefix}) + // The values in the sudoPaths map are actually regular expressions, + // so we can check if our path matches against them. + for _, sudoPathRegexp := range sudoPaths { + match := sudoPathRegexp.MatchString(path) + if match { + return true + } + } + + return false +} diff --git a/api/sudo_paths_test.go b/api/sudo_paths_test.go new file mode 100644 index 000000000000..b23af7067fc9 --- /dev/null +++ b/api/sudo_paths_test.go @@ -0,0 +1,83 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import "testing" + +func TestIsSudoPath(t *testing.T) { + t.Parallel() + + testCases := []struct { + path string + expected bool + }{ + // Testing: Not a real endpoint + { + "/not/in/sudo/paths/list", + false, + }, + // Testing: sys/raw/{path} + { + "/sys/raw/single-node-path", + true, + }, + { + "/sys/raw/multiple/nodes/path", + true, + }, + { + "/sys/raw/WEIRD(but_still_valid!)p4Th?🗿笑", + true, + }, + // Testing: sys/auth/{path}/tune + { + "/sys/auth/path/in/middle/tune", + true, + }, + // Testing: sys/plugins/catalog/{type} and sys/plugins/catalog/{name} (regexes overlap) + { + "/sys/plugins/catalog/some-type", + true, + }, + // Testing: Not a real endpoint + { + "/sys/plugins/catalog/some/type/or/name/with/slashes", + false, + }, + // Testing: sys/plugins/catalog/{type}/{name} + { + "/sys/plugins/catalog/some-type/some-name", + true, + }, + // Testing: Not a real endpoint + { + "/sys/plugins/catalog/some-type/some/name/with/slashes", + false, + }, + // Testing: sys/plugins/runtimes/catalog/{type}/{name} + { + "/sys/plugins/runtimes/catalog/some-type/some-name", + true, + }, + // Testing: auth/token/accessors (an example of a sudo path that only accepts list operations) + // It is matched as sudo without the trailing slash... + { + "/auth/token/accessors", + true, + }, + // ...and also with it. + // (Although at the time of writing, the only caller of IsSudoPath always removes trailing slashes.) + { + "/auth/token/accessors/", + true, + }, + } + + for _, tc := range testCases { + result := IsSudoPath(tc.path) + if result != tc.expected { + t.Fatalf("expected api.IsSudoPath to return %v for path %s but it returned %v", tc.expected, tc.path, result) + } + } +} diff --git a/api/sys_auth.go b/api/sys_auth.go index e814412191f3..67beb63db21c 100644 --- a/api/sys_auth.go +++ b/api/sys_auth.go @@ -12,6 +12,41 @@ import ( "github.com/mitchellh/mapstructure" ) +func (c *Sys) GetAuth(path string) (*AuthMount, error) { + return c.GetAuthWithContext(context.Background(), path) +} + +func (c *Sys) GetAuthWithContext(ctx context.Context, path string) (*AuthMount, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + // use `sys/mounts/auth/:path` so we don't require sudo permissions + // historically, `sys/auth` doesn't require sudo, so we don't require it here either + r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/sys/mounts/auth/%s", path)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + mount := AuthMount{} + err = mapstructure.Decode(secret.Data, &mount) + if err != nil { + return nil, err + } + + return &mount, nil +} + func (c *Sys) ListAuth() (map[string]*AuthMount, error) { return c.ListAuthWithContext(context.Background()) } diff --git a/api/sys_capabilities.go b/api/sys_capabilities.go index 6310d42fcf46..d57b75711753 100644 --- a/api/sys_capabilities.go +++ b/api/sys_capabilities.go @@ -78,3 +78,56 @@ func (c *Sys) CapabilitiesWithContext(ctx context.Context, token, path string) ( return res, nil } + +func (c *Sys) CapabilitiesAccessor(accessor, path string) ([]string, error) { + return c.CapabilitiesAccessorWithContext(context.Background(), accessor, path) +} + +func (c *Sys) CapabilitiesAccessorWithContext(ctx context.Context, accessor, path string) ([]string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]string{ + "accessor": accessor, + "path": path, + } + + reqPath := "/v1/sys/capabilities-accessor" + + r := c.c.NewRequest(http.MethodPost, reqPath) + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var res []string + err = mapstructure.Decode(secret.Data[path], &res) + if err != nil { + return nil, err + } + + if len(res) == 0 { + _, ok := secret.Data["capabilities"] + if ok { + err = mapstructure.Decode(secret.Data["capabilities"], &res) + if err != nil { + return nil, err + } + } + } + + return res, nil +} diff --git a/api/sys_hastatus.go b/api/sys_hastatus.go index 2b2aa7c3e980..2e54651e1758 100644 --- a/api/sys_hastatus.go +++ b/api/sys_hastatus.go @@ -35,12 +35,15 @@ type HAStatusResponse struct { } type HANode struct { - Hostname string `json:"hostname"` - APIAddress string `json:"api_address"` - ClusterAddress string `json:"cluster_address"` - ActiveNode bool `json:"active_node"` - LastEcho *time.Time `json:"last_echo"` - Version string `json:"version"` - UpgradeVersion string `json:"upgrade_version,omitempty"` - RedundancyZone string `json:"redundancy_zone,omitempty"` + Hostname string `json:"hostname"` + APIAddress string `json:"api_address"` + ClusterAddress string `json:"cluster_address"` + ActiveNode bool `json:"active_node"` + LastEcho *time.Time `json:"last_echo"` + EchoDurationMillis int64 `json:"echo_duration_ms"` + ClockSkewMillis int64 `json:"clock_skew_ms"` + Version string `json:"version"` + UpgradeVersion string `json:"upgrade_version,omitempty"` + RedundancyZone string `json:"redundancy_zone,omitempty"` + ReplicationPrimaryCanaryAgeMillis int64 `json:"replication_primary_canary_age_ms"` } diff --git a/api/sys_health.go b/api/sys_health.go index 13fd8d4d3743..6868b96d77a0 100644 --- a/api/sys_health.go +++ b/api/sys_health.go @@ -38,15 +38,19 @@ func (c *Sys) HealthWithContext(ctx context.Context) (*HealthResponse, error) { } type HealthResponse struct { - Initialized bool `json:"initialized"` - Sealed bool `json:"sealed"` - Standby bool `json:"standby"` - PerformanceStandby bool `json:"performance_standby"` - ReplicationPerformanceMode string `json:"replication_performance_mode"` - ReplicationDRMode string `json:"replication_dr_mode"` - ServerTimeUTC int64 `json:"server_time_utc"` - Version string `json:"version"` - ClusterName string `json:"cluster_name,omitempty"` - ClusterID string `json:"cluster_id,omitempty"` - LastWAL uint64 `json:"last_wal,omitempty"` + Initialized bool `json:"initialized"` + Sealed bool `json:"sealed"` + Standby bool `json:"standby"` + PerformanceStandby bool `json:"performance_standby"` + ReplicationPerformanceMode string `json:"replication_performance_mode"` + ReplicationDRMode string `json:"replication_dr_mode"` + ServerTimeUTC int64 `json:"server_time_utc"` + Version string `json:"version"` + ClusterName string `json:"cluster_name,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + LastWAL uint64 `json:"last_wal,omitempty"` + Enterprise bool `json:"enterprise"` + EchoDurationMillis int64 `json:"echo_duration_ms"` + ClockSkewMillis int64 `json:"clock_skew_ms"` + ReplicationPrimaryCanaryAgeMillis int64 `json:"replication_primary_canary_age_ms"` } diff --git a/api/sys_mounts.go b/api/sys_mounts.go index a6c2a0f5412e..64529986af6a 100644 --- a/api/sys_mounts.go +++ b/api/sys_mounts.go @@ -13,6 +13,39 @@ import ( "github.com/mitchellh/mapstructure" ) +func (c *Sys) GetMount(path string) (*MountOutput, error) { + return c.GetMountWithContext(context.Background(), path) +} + +func (c *Sys) GetMountWithContext(ctx context.Context, path string) (*MountOutput, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/sys/mounts/%s", path)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + mount := MountOutput{} + err = mapstructure.Decode(secret.Data, &mount) + if err != nil { + return nil, err + } + + return &mount, nil +} + func (c *Sys) ListMounts() (map[string]*MountOutput, error) { return c.ListMountsWithContext(context.Background()) } @@ -271,6 +304,9 @@ type MountConfigInput struct { AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` PluginVersion string `json:"plugin_version,omitempty"` UserLockoutConfig *UserLockoutConfigInput `json:"user_lockout_config,omitempty"` + DelegatedAuthAccessors []string `json:"delegated_auth_accessors,omitempty" mapstructure:"delegated_auth_accessors"` + IdentityTokenKey string `json:"identity_token_key,omitempty" mapstructure:"identity_token_key"` + // Deprecated: This field will always be blank for newer server responses. PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` } @@ -303,6 +339,9 @@ type MountConfigOutput struct { TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` UserLockoutConfig *UserLockoutConfigOutput `json:"user_lockout_config,omitempty"` + DelegatedAuthAccessors []string `json:"delegated_auth_accessors,omitempty" mapstructure:"delegated_auth_accessors"` + IdentityTokenKey string `json:"identity_token_key,omitempty" mapstructure:"identity_token_key"` + // Deprecated: This field will always be blank for newer server responses. PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` } diff --git a/api/sys_plugins.go b/api/sys_plugins.go index 2ee024d9defc..9d424d009ec9 100644 --- a/api/sys_plugins.go +++ b/api/sys_plugins.go @@ -36,6 +36,8 @@ type ListPluginsResponse struct { type PluginDetails struct { Type string `json:"type"` Name string `json:"name"` + OCIImage string `json:"oci_image,omitempty" mapstructure:"oci_image"` + Runtime string `json:"runtime,omitempty"` Version string `json:"version,omitempty"` Builtin bool `json:"builtin"` DeprecationStatus string `json:"deprecation_status,omitempty" mapstructure:"deprecation_status"` @@ -146,6 +148,8 @@ type GetPluginResponse struct { Command string `json:"command"` Name string `json:"name"` SHA256 string `json:"sha256"` + OCIImage string `json:"oci_image,omitempty"` + Runtime string `json:"runtime,omitempty"` DeprecationStatus string `json:"deprecation_status,omitempty"` Version string `json:"version,omitempty"` } @@ -201,6 +205,16 @@ type RegisterPluginInput struct { // Version is the optional version of the plugin being registered Version string `json:"version,omitempty"` + + // OCIImage specifies the container image to run as a plugin. + OCIImage string `json:"oci_image,omitempty"` + + // Runtime is the Vault plugin runtime to use when running the plugin. + Runtime string `json:"runtime,omitempty"` + + // Env specifies a list of key=value pairs to add to the plugin's environment + // variables. + Env []string `json:"env,omitempty"` } // RegisterPlugin wraps RegisterPluginWithContext using context.Background. @@ -260,6 +274,22 @@ func (c *Sys) DeregisterPluginWithContext(ctx context.Context, i *DeregisterPlug return err } +// RootReloadPluginInput is used as input to the RootReloadPlugin function. +type RootReloadPluginInput struct { + Plugin string `json:"-"` // Plugin name, as registered in the plugin catalog. + Type PluginType `json:"-"` // Plugin type: auth, secret, or database. + Scope string `json:"scope,omitempty"` // Empty to reload on current node, "global" for all nodes. +} + +// RootReloadPlugin reloads plugins, possibly returning reloadID for a global +// scoped reload. This is only available in the root namespace, and reloads +// plugins across all namespaces, whereas ReloadPlugin is available in all +// namespaces but only reloads plugins in use in the request's namespace. +func (c *Sys) RootReloadPlugin(ctx context.Context, i *RootReloadPluginInput) (string, error) { + path := fmt.Sprintf("/v1/sys/plugins/reload/%s/%s", i.Type.String(), i.Plugin) + return c.reloadPluginInternal(ctx, path, i, i.Scope == "global") +} + // ReloadPluginInput is used as input to the ReloadPlugin function. type ReloadPluginInput struct { // Plugin is the name of the plugin to reload, as registered in the plugin catalog @@ -278,15 +308,20 @@ func (c *Sys) ReloadPlugin(i *ReloadPluginInput) (string, error) { } // ReloadPluginWithContext reloads mounted plugin backends, possibly returning -// reloadId for a cluster scoped reload +// reloadID for a cluster scoped reload. It is limited to reloading plugins that +// are in use in the request's namespace. See RootReloadPlugin for an API that +// can reload plugins across all namespaces. func (c *Sys) ReloadPluginWithContext(ctx context.Context, i *ReloadPluginInput) (string, error) { + return c.reloadPluginInternal(ctx, "/v1/sys/plugins/reload/backend", i, i.Scope == "global") +} + +func (c *Sys) reloadPluginInternal(ctx context.Context, path string, body any, global bool) (string, error) { ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - path := "/v1/sys/plugins/reload/backend" req := c.c.NewRequest(http.MethodPut, path) - if err := req.SetJSONBody(i); err != nil { + if err := req.SetJSONBody(body); err != nil { return "", err } @@ -296,7 +331,7 @@ func (c *Sys) ReloadPluginWithContext(ctx context.Context, i *ReloadPluginInput) } defer resp.Body.Close() - if i.Scope == "global" { + if global { // Get the reload id secret, parseErr := ParseSecret(resp.Body) if parseErr != nil { diff --git a/api/sys_plugins_runtimes.go b/api/sys_plugins_runtimes.go new file mode 100644 index 000000000000..b56a899f6507 --- /dev/null +++ b/api/sys_plugins_runtimes.go @@ -0,0 +1,190 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +// GetPluginRuntimeInput is used as input to the GetPluginRuntime function. +type GetPluginRuntimeInput struct { + Name string `json:"-"` + + // Type of the plugin runtime. Required. + Type PluginRuntimeType `json:"type"` +} + +// GetPluginRuntimeResponse is the response from the GetPluginRuntime call. +type GetPluginRuntimeResponse struct { + Type string `json:"type"` + Name string `json:"name"` + OCIRuntime string `json:"oci_runtime"` + CgroupParent string `json:"cgroup_parent"` + CPU int64 `json:"cpu_nanos"` + Memory int64 `json:"memory_bytes"` +} + +// GetPluginRuntime retrieves information about the plugin. +func (c *Sys) GetPluginRuntime(ctx context.Context, i *GetPluginRuntimeInput) (*GetPluginRuntimeResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + path := pluginRuntimeCatalogPathByType(i.Type, i.Name) + req := c.c.NewRequest(http.MethodGet, path) + + resp, err := c.c.rawRequestWithContext(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result struct { + Data *GetPluginRuntimeResponse + } + err = resp.DecodeJSON(&result) + if err != nil { + return nil, err + } + return result.Data, err +} + +// RegisterPluginRuntimeInput is used as input to the RegisterPluginRuntime function. +type RegisterPluginRuntimeInput struct { + // Name is the name of the plugin. Required. + Name string `json:"-"` + + // Type of the plugin. Required. + Type PluginRuntimeType `json:"type"` + + OCIRuntime string `json:"oci_runtime,omitempty"` + CgroupParent string `json:"cgroup_parent,omitempty"` + CPU int64 `json:"cpu_nanos,omitempty"` + Memory int64 `json:"memory_bytes,omitempty"` + Rootless bool `json:"rootless,omitempty"` +} + +// RegisterPluginRuntime registers the plugin with the given information. +func (c *Sys) RegisterPluginRuntime(ctx context.Context, i *RegisterPluginRuntimeInput) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + path := pluginRuntimeCatalogPathByType(i.Type, i.Name) + req := c.c.NewRequest(http.MethodPut, path) + + if err := req.SetJSONBody(i); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, req) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// DeregisterPluginRuntimeInput is used as input to the DeregisterPluginRuntime function. +type DeregisterPluginRuntimeInput struct { + // Name is the name of the plugin runtime. Required. + Name string `json:"-"` + + // Type of the plugin. Required. + Type PluginRuntimeType `json:"type"` +} + +// DeregisterPluginRuntime removes the plugin with the given name from the plugin +// catalog. +func (c *Sys) DeregisterPluginRuntime(ctx context.Context, i *DeregisterPluginRuntimeInput) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + path := pluginRuntimeCatalogPathByType(i.Type, i.Name) + req := c.c.NewRequest(http.MethodDelete, path) + resp, err := c.c.rawRequestWithContext(ctx, req) + if err == nil { + defer resp.Body.Close() + } + return err +} + +type PluginRuntimeDetails struct { + Type string `json:"type" mapstructure:"type"` + Name string `json:"name" mapstructure:"name"` + OCIRuntime string `json:"oci_runtime" mapstructure:"oci_runtime"` + CgroupParent string `json:"cgroup_parent" mapstructure:"cgroup_parent"` + CPU int64 `json:"cpu_nanos" mapstructure:"cpu_nanos"` + Memory int64 `json:"memory_bytes" mapstructure:"memory_bytes"` +} + +// ListPluginRuntimesInput is used as input to the ListPluginRuntimes function. +type ListPluginRuntimesInput struct { + // Type of the plugin. Required. + Type PluginRuntimeType `json:"type"` +} + +// ListPluginRuntimesResponse is the response from the ListPluginRuntimes call. +type ListPluginRuntimesResponse struct { + // RuntimesByType is the list of plugin runtimes by type. + Runtimes []PluginRuntimeDetails `json:"runtimes"` +} + +// ListPluginRuntimes lists all plugin runtimes in the catalog and returns their names as a +// list of strings. +func (c *Sys) ListPluginRuntimes(ctx context.Context, input *ListPluginRuntimesInput) (*ListPluginRuntimesResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + if input != nil && input.Type == PluginRuntimeTypeUnsupported { + return nil, fmt.Errorf("%q is not a supported runtime type", input.Type.String()) + } + + resp, err := c.c.rawRequestWithContext(ctx, c.c.NewRequest(http.MethodGet, "/v1/sys/plugins/runtimes/catalog")) + if err != nil && resp == nil { + return nil, err + } + if resp == nil { + return nil, nil + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + if _, ok := secret.Data["runtimes"]; !ok { + return nil, fmt.Errorf("data from server response does not contain runtimes") + } + + var runtimes []PluginRuntimeDetails + if err = mapstructure.Decode(secret.Data["runtimes"], &runtimes); err != nil { + return nil, err + } + + // return all runtimes in the catalog + if input == nil { + return &ListPluginRuntimesResponse{Runtimes: runtimes}, nil + } + + result := &ListPluginRuntimesResponse{ + Runtimes: []PluginRuntimeDetails{}, + } + for _, runtime := range runtimes { + if runtime.Type == input.Type.String() { + result.Runtimes = append(result.Runtimes, runtime) + } + } + return result, nil +} + +// pluginRuntimeCatalogPathByType is a helper to construct the proper API path by plugin type +func pluginRuntimeCatalogPathByType(runtimeType PluginRuntimeType, name string) string { + return fmt.Sprintf("/v1/sys/plugins/runtimes/catalog/%s/%s", runtimeType, name) +} diff --git a/api/sys_plugins_runtimes_test.go b/api/sys_plugins_runtimes_test.go new file mode 100644 index 000000000000..6c3486a31a00 --- /dev/null +++ b/api/sys_plugins_runtimes_test.go @@ -0,0 +1,268 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "net/http" + "net/http/httptest" + "reflect" + "testing" +) + +func TestRegisterPluginRuntime(t *testing.T) { + mockVaultServer := httptest.NewServer(http.HandlerFunc(mockVaultHandlerRegister)) + defer mockVaultServer.Close() + + cfg := DefaultConfig() + cfg.Address = mockVaultServer.URL + client, err := NewClient(cfg) + if err != nil { + t.Fatal(err) + } + + err = client.Sys().RegisterPluginRuntime(context.Background(), &RegisterPluginRuntimeInput{ + Name: "gvisor", + Type: PluginRuntimeTypeContainer, + OCIRuntime: "runsc", + CgroupParent: "/cpulimit/", + CPU: 1, + Memory: 10000, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestGetPluginRuntime(t *testing.T) { + for name, tc := range map[string]struct { + body string + expected GetPluginRuntimeResponse + }{ + "gvisor": { + body: getPluginRuntimeResponse, + expected: GetPluginRuntimeResponse{ + Name: "gvisor", + Type: PluginRuntimeTypeContainer.String(), + OCIRuntime: "runsc", + CgroupParent: "/cpulimit/", + CPU: 1, + Memory: 10000, + }, + }, + } { + t.Run(name, func(t *testing.T) { + mockVaultServer := httptest.NewServer(http.HandlerFunc(mockVaultHandlerInfo(tc.body))) + defer mockVaultServer.Close() + + cfg := DefaultConfig() + cfg.Address = mockVaultServer.URL + client, err := NewClient(cfg) + if err != nil { + t.Fatal(err) + } + + input := GetPluginRuntimeInput{ + Name: "gvisor", + Type: PluginRuntimeTypeContainer, + } + + info, err := client.Sys().GetPluginRuntime(context.Background(), &input) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(tc.expected, *info) { + t.Errorf("expected: %#v\ngot: %#v", tc.expected, info) + } + }) + } +} + +func TestListPluginRuntimeTyped(t *testing.T) { + for _, tc := range []struct { + runtimeType PluginRuntimeType + body string + expectedResponse *ListPluginRuntimesResponse + expectedErrNil bool + }{ + { + runtimeType: PluginRuntimeTypeContainer, + body: listPluginRuntimeTypedResponse, + expectedResponse: &ListPluginRuntimesResponse{ + Runtimes: []PluginRuntimeDetails{ + { + Type: "container", + Name: "gvisor", + OCIRuntime: "runsc", + CgroupParent: "/cpulimit/", + CPU: 1, + Memory: 10000, + }, + }, + }, + expectedErrNil: true, + }, + { + runtimeType: PluginRuntimeTypeUnsupported, + body: listPluginRuntimeTypedResponse, + expectedResponse: nil, + expectedErrNil: false, + }, + } { + t.Run(tc.runtimeType.String(), func(t *testing.T) { + mockVaultServer := httptest.NewServer(http.HandlerFunc(mockVaultHandlerInfo(tc.body))) + defer mockVaultServer.Close() + + cfg := DefaultConfig() + cfg.Address = mockVaultServer.URL + client, err := NewClient(cfg) + if err != nil { + t.Fatal(err) + } + + input := ListPluginRuntimesInput{ + Type: tc.runtimeType, + } + + list, err := client.Sys().ListPluginRuntimes(context.Background(), &input) + if tc.expectedErrNil && err != nil { + t.Fatal(err) + } + + if (tc.expectedErrNil && !reflect.DeepEqual(tc.expectedResponse, list)) || (!tc.expectedErrNil && list != nil) { + t.Errorf("expected: %#v\ngot: %#v", tc.expectedResponse, list) + } + }) + } +} + +func TestListPluginRuntimeUntyped(t *testing.T) { + for _, tc := range []struct { + body string + expectedResponse *ListPluginRuntimesResponse + expectedErrNil bool + }{ + { + body: listPluginRuntimeUntypedResponse, + expectedResponse: &ListPluginRuntimesResponse{ + Runtimes: []PluginRuntimeDetails{ + { + Type: "container", + Name: "gvisor", + OCIRuntime: "runsc", + CgroupParent: "/cpulimit/", + CPU: 1, + Memory: 10000, + }, + { + Type: "container", + Name: "foo", + OCIRuntime: "otherociruntime", + CgroupParent: "/memorylimit/", + CPU: 2, + Memory: 20000, + }, + { + Type: "container", + Name: "bar", + OCIRuntime: "otherociruntime", + CgroupParent: "/cpulimit/", + CPU: 3, + Memory: 30000, + }, + }, + }, + expectedErrNil: true, + }, + } { + t.Run("", func(t *testing.T) { + mockVaultServer := httptest.NewServer(http.HandlerFunc(mockVaultHandlerInfo(tc.body))) + defer mockVaultServer.Close() + + cfg := DefaultConfig() + cfg.Address = mockVaultServer.URL + client, err := NewClient(cfg) + if err != nil { + t.Fatal(err) + } + + info, err := client.Sys().ListPluginRuntimes(context.Background(), nil) + if tc.expectedErrNil && err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(tc.expectedResponse, info) { + t.Errorf("expected: %#v\ngot: %#v", tc.expectedResponse, info) + } + }) + } +} + +const getPluginRuntimeResponse = `{ + "request_id": "e93d3f93-8e4f-8443-a803-f1c97c123456", + "data": { + "name": "gvisor", + "type": "container", + "oci_runtime": "runsc", + "cgroup_parent": "/cpulimit/", + "cpu_nanos": 1, + "memory_bytes": 10000 + }, + "warnings": null, + "auth": null +}` + +const listPluginRuntimeTypedResponse = `{ + "request_id": "e93d3f93-8e4f-8443-a803-f1c97c123456", + "data": { + "runtimes": [ + { + "name": "gvisor", + "type": "container", + "oci_runtime": "runsc", + "cgroup_parent": "/cpulimit/", + "cpu_nanos": 1, + "memory_bytes": 10000 + } + ] + }, + "warnings": null, + "auth": null +} +` + +const listPluginRuntimeUntypedResponse = `{ + "request_id": "e93d3f93-8e4f-8443-a803-f1c97c123456", + "data": { + "runtimes": [ + { + "name": "gvisor", + "type": "container", + "oci_runtime": "runsc", + "cgroup_parent": "/cpulimit/", + "cpu_nanos": 1, + "memory_bytes": 10000 + }, + { + "name": "foo", + "type": "container", + "oci_runtime": "otherociruntime", + "cgroup_parent": "/memorylimit/", + "cpu_nanos": 2, + "memory_bytes": 20000 + }, + { + "name": "bar", + "type": "container", + "oci_runtime": "otherociruntime", + "cgroup_parent": "/cpulimit/", + "cpu_nanos": 3, + "memory_bytes": 30000 + } + ] + }, + "warnings": null, + "auth": null +}` diff --git a/api/sys_plugins_test.go b/api/sys_plugins_test.go index 3673181472a7..8ba8fc571410 100644 --- a/api/sys_plugins_test.go +++ b/api/sys_plugins_test.go @@ -161,6 +161,21 @@ func TestGetPlugin(t *testing.T) { Version: "", }, }, + "oci image": { + version: "v0.16.0", + body: getResponseOCIImageVersion, + expected: GetPluginResponse{ + Args: []string{}, + Builtin: false, + Command: "", + Name: "jwt", + OCIImage: "hashicorp/vault-plugin-auth-jwt", + Runtime: "gvisor", + SHA256: "8ba442dba253803685b05e35ad29dcdebc48dec16774614aa7a4ebe53c1e90e1", + DeprecationStatus: "", + Version: "v0.16.0", + }, + }, } { t.Run(name, func(t *testing.T) { mockVaultServer := httptest.NewServer(http.HandlerFunc(mockVaultHandlerInfo(tc.body))) @@ -253,6 +268,25 @@ const getResponseOldServerVersion = `{ "auth": null }` +const getResponseOCIImageVersion = `{ + "request_id": "e93d3f93-8e4f-8443-a803-f1c97c495241", + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": { + "args": [], + "builtin": false, + "name": "jwt", + "oci_image" : "hashicorp/vault-plugin-auth-jwt", + "runtime" : "gvisor", + "sha256": "8ba442dba253803685b05e35ad29dcdebc48dec16774614aa7a4ebe53c1e90e1", + "version": "v0.16.0" + }, + "wrap_info": null, + "warnings": null, + "auth": null +}` + func mockVaultHandlerList(w http.ResponseWriter, _ *http.Request) { _, _ = w.Write([]byte(listUntypedResponse)) } diff --git a/api/sys_raft.go b/api/sys_raft.go index 29bfed0f5613..f0e896271b39 100644 --- a/api/sys_raft.go +++ b/api/sys_raft.go @@ -9,6 +9,7 @@ import ( "context" "encoding/json" "errors" + "fmt" "io" "io/ioutil" "net/http" @@ -100,6 +101,23 @@ type AutopilotState struct { OptimisticFailureTolerance int `mapstructure:"optimistic_failure_tolerance,omitempty"` } +func (a *AutopilotState) String() string { + var result string + result += fmt.Sprintf("Healthy: %t. FailureTolerance: %d. Leader: %s. OptimisticFailureTolerance: %d\n", a.Healthy, a.FailureTolerance, a.Leader, a.OptimisticFailureTolerance) + for _, s := range a.Servers { + result += fmt.Sprintf("Server: %s\n", s) + } + result += fmt.Sprintf("Voters: %v\n", a.Voters) + result += fmt.Sprintf("NonVoters: %v\n", a.NonVoters) + + for name, zone := range a.RedundancyZones { + result += fmt.Sprintf("RedundancyZone %s: %s\n", name, &zone) + } + + result += fmt.Sprintf("Upgrade: %s", a.Upgrade) + return result +} + // AutopilotServer represents the server blocks in the response of the raft // autopilot state API. type AutopilotServer struct { @@ -119,12 +137,21 @@ type AutopilotServer struct { NodeType string `mapstructure:"node_type,omitempty"` } +func (a *AutopilotServer) String() string { + return fmt.Sprintf("ID: %s. Name: %s. Address: %s. NodeStatus: %s. LastContact: %s. LastTerm: %d. LastIndex: %d. Healthy: %t. StableSince: %s. Status: %s. Version: %s. UpgradeVersion: %s. RedundancyZone: %s. NodeType: %s", + a.ID, a.Name, a.Address, a.NodeStatus, a.LastContact, a.LastTerm, a.LastIndex, a.Healthy, a.StableSince, a.Status, a.Version, a.UpgradeVersion, a.RedundancyZone, a.NodeType) +} + type AutopilotZone struct { Servers []string `mapstructure:"servers,omitempty"` Voters []string `mapstructure:"voters,omitempty"` FailureTolerance int `mapstructure:"failure_tolerance,omitempty"` } +func (a *AutopilotZone) String() string { + return fmt.Sprintf("Servers: %v. Voters: %v. FailureTolerance: %d", a.Servers, a.Voters, a.FailureTolerance) +} + type AutopilotUpgrade struct { Status string `mapstructure:"status"` TargetVersion string `mapstructure:"target_version,omitempty"` @@ -137,6 +164,17 @@ type AutopilotUpgrade struct { RedundancyZones map[string]AutopilotZoneUpgradeVersions `mapstructure:"redundancy_zones,omitempty"` } +func (a *AutopilotUpgrade) String() string { + result := fmt.Sprintf("Status: %s. TargetVersion: %s. TargetVersionVoters: %v. TargetVersionNonVoters: %v. TargetVersionReadReplicas: %v. OtherVersionVoters: %v. OtherVersionNonVoters: %v. OtherVersionReadReplicas: %v", + a.Status, a.TargetVersion, a.TargetVersionVoters, a.TargetVersionNonVoters, a.TargetVersionReadReplicas, a.OtherVersionVoters, a.OtherVersionNonVoters, a.OtherVersionReadReplicas) + + for name, zone := range a.RedundancyZones { + result += fmt.Sprintf("Redundancy Zone %s: %s", name, zone) + } + + return result +} + type AutopilotZoneUpgradeVersions struct { TargetVersionVoters []string `mapstructure:"target_version_voters,omitempty"` TargetVersionNonVoters []string `mapstructure:"target_version_non_voters,omitempty"` @@ -144,6 +182,11 @@ type AutopilotZoneUpgradeVersions struct { OtherVersionNonVoters []string `mapstructure:"other_version_non_voters,omitempty"` } +func (a *AutopilotZoneUpgradeVersions) String() string { + return fmt.Sprintf("TargetVersionVoters: %v. TargetVersionNonVoters: %v. OtherVersionVoters: %v. OtherVersionNonVoters: %v", + a.TargetVersionVoters, a.TargetVersionNonVoters, a.OtherVersionVoters, a.OtherVersionNonVoters) +} + // RaftJoin wraps RaftJoinWithContext using context.Background. func (c *Sys) RaftJoin(opts *RaftJoinRequest) (*RaftJoinResponse, error) { return c.RaftJoinWithContext(context.Background(), opts) @@ -221,7 +264,7 @@ func (c *Sys) RaftSnapshotWithContext(ctx context.Context, snapWriter io.Writer) continue } var b []byte - b, err = ioutil.ReadAll(t) + b, err = io.ReadAll(t) if err != nil || len(b) == 0 { return } @@ -276,11 +319,19 @@ func (c *Sys) RaftAutopilotState() (*AutopilotState, error) { return c.RaftAutopilotStateWithContext(context.Background()) } +// RaftAutopilotStateWithToken wraps RaftAutopilotStateWithContext using the given token. +func (c *Sys) RaftAutopilotStateWithDRToken(drToken string) (*AutopilotState, error) { + return c.RaftAutopilotStateWithContext(context.WithValue(context.Background(), "dr-token", drToken)) +} + // RaftAutopilotStateWithContext returns the state of the raft cluster as seen by autopilot. func (c *Sys) RaftAutopilotStateWithContext(ctx context.Context) (*AutopilotState, error) { ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() + if ctx.Value("dr-token") != nil { + c.c.SetToken(ctx.Value("dr-token").(string)) + } r := c.c.NewRequest(http.MethodGet, "/v1/sys/storage/raft/autopilot/state") resp, err := c.c.rawRequestWithContext(ctx, r) @@ -316,11 +367,20 @@ func (c *Sys) RaftAutopilotConfiguration() (*AutopilotConfig, error) { return c.RaftAutopilotConfigurationWithContext(context.Background()) } +// RaftAutopilotConfigurationWithDRToken wraps RaftAutopilotConfigurationWithContext using the given token. +func (c *Sys) RaftAutopilotConfigurationWithDRToken(drToken string) (*AutopilotConfig, error) { + return c.RaftAutopilotConfigurationWithContext(context.WithValue(context.Background(), "dr-token", drToken)) +} + // RaftAutopilotConfigurationWithContext fetches the autopilot config. func (c *Sys) RaftAutopilotConfigurationWithContext(ctx context.Context) (*AutopilotConfig, error) { ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() + if ctx.Value("dr-token") != nil { + c.c.SetToken(ctx.Value("dr-token").(string)) + } + r := c.c.NewRequest(http.MethodGet, "/v1/sys/storage/raft/autopilot/configuration") resp, err := c.c.rawRequestWithContext(ctx, r) diff --git a/api/sys_seal.go b/api/sys_seal.go index 7a9c5621ed19..62002496c36b 100644 --- a/api/sys_seal.go +++ b/api/sys_seal.go @@ -109,6 +109,7 @@ type SealStatusResponse struct { ClusterName string `json:"cluster_name,omitempty"` ClusterID string `json:"cluster_id,omitempty"` RecoverySeal bool `json:"recovery_seal"` + RecoverySealType string `json:"recovery_seal_type,omitempty"` StorageType string `json:"storage_type,omitempty"` HCPLinkStatus string `json:"hcp_link_status,omitempty"` HCPLinkResourceID string `json:"hcp_link_resource_ID,omitempty"` diff --git a/api/sys_ui_custom_message.go b/api/sys_ui_custom_message.go new file mode 100644 index 000000000000..a129efea7631 --- /dev/null +++ b/api/sys_ui_custom_message.go @@ -0,0 +1,281 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "strconv" +) + +const ( + // baseEndpoint is the common base URL path for all endpoints used in this + // module. + baseEndpoint string = "/v1/sys/config/ui/custom-messages" +) + +// ListUICustomMessages calls ListUICustomMessagesWithContext using a background +// Context. +func (c *Sys) ListUICustomMessages(req UICustomMessageListRequest) (*Secret, error) { + return c.ListUICustomMessagesWithContext(context.Background(), req) +} + +// ListUICustomMessagesWithContext sends a request to the List custom messages +// endpoint using the provided Context and UICustomMessageListRequest value as +// the inputs. It returns a pointer to a Secret if a response was obtained from +// the server, including error responses; or an error if a response could not be +// obtained due to an error. +func (c *Sys) ListUICustomMessagesWithContext(ctx context.Context, req UICustomMessageListRequest) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest("LIST", fmt.Sprintf("%s/", baseEndpoint)) + if req.Active != nil { + r.Params.Add("active", strconv.FormatBool(*req.Active)) + } + if req.Authenticated != nil { + r.Params.Add("authenticated", strconv.FormatBool(*req.Authenticated)) + } + if req.Type != nil { + r.Params.Add("type", *req.Type) + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + return secret, nil +} + +// CreateUICustomMessage calls CreateUICustomMessageWithContext using a +// background Context. +func (c *Sys) CreateUICustomMessage(req UICustomMessageRequest) (*Secret, error) { + return c.CreateUICustomMessageWithContext(context.Background(), req) +} + +// CreateUICustomMessageWithContext sends a request to the Create custom +// messages endpoint using the provided Context and UICustomMessageRequest +// values as the inputs. It returns a pointer to a Secret if a response was +// obtained from the server, including error responses; or an error if a +// response could not be obtained due to an error. +func (c *Sys) CreateUICustomMessageWithContext(ctx context.Context, req UICustomMessageRequest) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, baseEndpoint) + if err := r.SetJSONBody(&req); err != nil { + return nil, fmt.Errorf("error encoding request body to json: %w", err) + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, fmt.Errorf("error sending request to server: %w", err) + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, fmt.Errorf("could not parse secret from server response: %w", err) + } + + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + return secret, nil +} + +// ReadUICustomMessage calls ReadUICustomMessageWithContext using a background +// Context. +func (c *Sys) ReadUICustomMessage(id string) (*Secret, error) { + return c.ReadUICustomMessageWithContext(context.Background(), id) +} + +// ReadUICustomMessageWithContext sends a request to the Read custom message +// endpoint using the provided Context and id values. It returns a pointer to a +// Secret if a response was obtained from the server, including error responses; +// or an error if a response could not be obtained due to an error. +func (c *Sys) ReadUICustomMessageWithContext(ctx context.Context, id string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("%s/%s", baseEndpoint, id)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, fmt.Errorf("error sending request to server: %w", err) + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, fmt.Errorf("could not parse secret from server response: %w", err) + } + + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + return secret, nil +} + +// UpdateUICustomMessage calls UpdateUICustomMessageWithContext using a +// background Context. +func (c *Sys) UpdateUICustomMessage(id string, req UICustomMessageRequest) error { + return c.UpdateUICustomMessageWithContext(context.Background(), id, req) +} + +// UpdateUICustomMessageWithContext sends a request to the Update custom message +// endpoint using the provided Context, id, and UICustomMessageRequest values. +// It returns a pointer to a Secret if a response was obtained from the server, +// including error responses; or an error if a response could not be obtained +// due to an error. +func (c *Sys) UpdateUICustomMessageWithContext(ctx context.Context, id string, req UICustomMessageRequest) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("%s/%s", baseEndpoint, id)) + if err := r.SetJSONBody(&req); err != nil { + return fmt.Errorf("error encoding request body to json: %w", err) + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return fmt.Errorf("error sending request to server: %w", err) + } + defer resp.Body.Close() + + return nil +} + +// DeleteUICustomMessage calls DeleteUICustomMessageWithContext using a +// background Context. +func (c *Sys) DeleteUICustomMessage(id string) error { + return c.DeletePolicyWithContext(context.Background(), id) +} + +// DeleteUICustomMessageWithContext sends a request to the Delete custom message +// endpoint using the provided Context and id values. It returns a pointer to a +// Secret if a response was obtained from the server, including error responses; +// or an error if a response could not be obtained due to an error. +func (c *Sys) DeleteUICustomMessageWithContext(ctx context.Context, id string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, fmt.Sprintf("%s/%s", baseEndpoint, id)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return fmt.Errorf("error sending request to server: %w", err) + } + defer resp.Body.Close() + + return nil +} + +// UICustomMessageListRequest is a struct used to contain inputs for the List +// custom messages request. Each field is optional, so their types are pointers. +// The With... methods can be used to easily set the fields with pointers to +// values. +type UICustomMessageListRequest struct { + Authenticated *bool + Type *string + Active *bool +} + +// WithAuthenticated sets the Authenticated field to a pointer referencing the +// provided bool value. +func (r *UICustomMessageListRequest) WithAuthenticated(value bool) *UICustomMessageListRequest { + r.Authenticated = &value + + return r +} + +// WithType sets the Type field to a pointer referencing the provided string +// value. +func (r *UICustomMessageListRequest) WithType(value string) *UICustomMessageListRequest { + r.Type = &value + + return r +} + +// WithActive sets the Active field to a pointer referencing the provided bool +// value. +func (r *UICustomMessageListRequest) WithActive(value bool) *UICustomMessageListRequest { + r.Active = &value + + return r +} + +// UICustomMessageRequest is a struct containing the properties of a custom +// message. The Link field can be set using the WithLink method. +type UICustomMessageRequest struct { + Title string `json:"title"` + Message string `json:"message"` + Authenticated bool `json:"authenticated"` + Type string `json:"type"` + StartTime string `json:"start_time"` + EndTime string `json:"end_time,omitempty"` + Link *uiCustomMessageLink `json:"link,omitempty"` + Options map[string]any `json:"options,omitempty"` +} + +// WithLink sets the Link field to the address of a new uiCustomMessageLink +// struct constructed from the provided title and href values. +func (r *UICustomMessageRequest) WithLink(title, href string) *UICustomMessageRequest { + r.Link = &uiCustomMessageLink{ + Title: title, + Href: href, + } + + return r +} + +// uiCustomMessageLink is a utility struct used to represent a link associated +// with a custom message. +type uiCustomMessageLink struct { + Title string + Href string +} + +// MarshalJSON encodes the state of the receiver uiCustomMessageLink as JSON and +// returns those encoded bytes or an error. +func (l uiCustomMessageLink) MarshalJSON() ([]byte, error) { + m := make(map[string]string) + + m[l.Title] = l.Href + + return json.Marshal(m) +} + +// UnmarshalJSON updates the state of the receiver uiCustomMessageLink from the +// provided JSON encoded bytes. It returns an error if there was a failure. +func (l *uiCustomMessageLink) UnmarshalJSON(b []byte) error { + m := make(map[string]string) + + if err := json.Unmarshal(b, &m); err != nil { + return err + } + + for k, v := range m { + l.Title = k + l.Href = v + break + } + + return nil +} diff --git a/api/sys_ui_custom_message_test.go b/api/sys_ui_custom_message_test.go new file mode 100644 index 000000000000..0082ad55d778 --- /dev/null +++ b/api/sys_ui_custom_message_test.go @@ -0,0 +1,193 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +var messageBase64 string = base64.StdEncoding.EncodeToString([]byte("message")) + +// TestUICustomMessageJsonMarshalling verifies that json marshalling (struct to +// json) works with the uiCustomMessageRequest type. +func TestUICustomMessageJsonMarshalling(t *testing.T) { + for _, testcase := range []struct { + name string + request UICustomMessageRequest + expectedJSON string + }{ + { + name: "no-link-no-options", + request: UICustomMessageRequest{ + Title: "title", + Message: messageBase64, + StartTime: "2024-01-01T00:00:00.000Z", + EndTime: "", + Type: "banner", + Authenticated: true, + }, + expectedJSON: fmt.Sprintf(`{"title":"title","message":"%s","authenticated":true,"type":"banner","start_time":"2024-01-01T00:00:00.000Z"}`, messageBase64), + }, + { + name: "link-no-options", + request: UICustomMessageRequest{ + Title: "title", + Message: messageBase64, + StartTime: "2024-01-01T00:00:00.000Z", + EndTime: "", + Type: "modal", + Authenticated: false, + Link: &uiCustomMessageLink{ + Title: "Click here", + Href: "https://www.example.org", + }, + }, + expectedJSON: fmt.Sprintf(`{"title":"title","message":"%s","authenticated":false,"type":"modal","start_time":"2024-01-01T00:00:00.000Z","link":{"Click here":"https://www.example.org"}}`, messageBase64), + }, + { + name: "no-link-options", + request: UICustomMessageRequest{ + Title: "title", + Message: messageBase64, + StartTime: "2024-01-01T00:00:00.000Z", + EndTime: "", + Authenticated: true, + Type: "banner", + Options: map[string]any{ + "key": "value", + }, + }, + expectedJSON: fmt.Sprintf(`{"title":"title","message":"%s","authenticated":true,"type":"banner","start_time":"2024-01-01T00:00:00.000Z","options":{"key":"value"}}`, messageBase64), + }, + { + name: "link-and-options", + request: UICustomMessageRequest{ + Title: "title", + Message: messageBase64, + StartTime: "2024-01-01T00:00:00.000Z", + EndTime: "", + Authenticated: true, + Type: "banner", + Link: &uiCustomMessageLink{ + Title: "Click here", + Href: "https://www.example.org", + }, + Options: map[string]any{ + "key": "value", + }, + }, + expectedJSON: fmt.Sprintf(`{"title":"title","message":"%s","authenticated":true,"type":"banner","start_time":"2024-01-01T00:00:00.000Z","link":{"Click here":"https://www.example.org"},"options":{"key":"value"}}`, messageBase64), + }, + } { + tc := testcase + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + bytes, err := json.Marshal(&tc.request) + assert.NoError(t, err) + assert.Equal(t, tc.expectedJSON, string(bytes)) + }) + } +} + +// TestUICustomMessageJsonUnmarshal verifies that json unmarshalling (json to +// struct) works with the uiCustomMessageRequest type. +func TestUICustomMessageJsonUnmarshal(t *testing.T) { + for _, testcase := range []struct { + name string + encodedBytes string + linkAssertion func(assert.TestingT, any, ...any) bool + checkLink bool + optionsAssertion func(assert.TestingT, any, ...any) bool + checkOptions bool + }{ + { + name: "no-link-no-options", + encodedBytes: fmt.Sprintf(`{"title":"title","message":"%s","authenticated":false,"type":"modal","start_time":"2024-01-01T00:00:00.000Z"}`, messageBase64), + linkAssertion: assert.Nil, + optionsAssertion: assert.Nil, + }, + { + name: "link-no-options", + encodedBytes: fmt.Sprintf(`{"title":"title","message":"%s","authenticated":false,"type":"modal","start_time":"2024-01-01T00:00:00.000Z","link":{"Click here":"https://www.example.org"}}`, messageBase64), + linkAssertion: assert.NotNil, + checkLink: true, + optionsAssertion: assert.Nil, + }, + { + name: "no-link-options", + encodedBytes: fmt.Sprintf(`{"title":"title","message":"%s","authenticated":false,"type":"modal","start_time":"2024-01-01T00:00:00.000Z","options":{"key":"value"}}`, messageBase64), + linkAssertion: assert.Nil, + optionsAssertion: assert.NotNil, + checkOptions: true, + }, + { + name: "link-and-options", + encodedBytes: fmt.Sprintf(`{"title":"title","message":"%s","authenticated":false,"type":"modal","start_time":"2024-01-01T00:00:00.000Z","link":{"Click here":"https://www.example.org"},"options":{"key":"value"}}`, messageBase64), + linkAssertion: assert.NotNil, + checkLink: true, + optionsAssertion: assert.NotNil, + checkOptions: true, + }, + } { + tc := testcase + t.Run(testcase.name, func(t *testing.T) { + t.Parallel() + + var request UICustomMessageRequest + + err := json.Unmarshal([]byte(tc.encodedBytes), &request) + assert.NoError(t, err) + tc.linkAssertion(t, request.Link) + tc.optionsAssertion(t, request.Options) + + if tc.checkLink { + assert.Equal(t, "Click here", request.Link.Title) + assert.Equal(t, "https://www.example.org", request.Link.Href) + } + + if tc.checkOptions { + assert.Contains(t, request.Options, "key") + } + }) + } +} + +// TestUICustomMessageListRequestOptions verifies the correct behaviour of all +// of the With... methods of the UICustomMessageListRequest. +func TestUICustomMessageListRequestOptions(t *testing.T) { + request := &UICustomMessageListRequest{} + assert.Nil(t, request.Active) + assert.Nil(t, request.Authenticated) + assert.Nil(t, request.Type) + + request = (&UICustomMessageListRequest{}).WithActive(true) + assert.NotNil(t, request.Active) + assert.True(t, *request.Active) + + request = (&UICustomMessageListRequest{}).WithActive(false) + assert.NotNil(t, request.Active) + assert.False(t, *request.Active) + + request = (&UICustomMessageListRequest{}).WithAuthenticated(true) + assert.NotNil(t, request.Authenticated) + assert.True(t, *request.Authenticated) + + request = (&UICustomMessageListRequest{}).WithAuthenticated(false) + assert.NotNil(t, request.Authenticated) + assert.False(t, *request.Authenticated) + + request = (&UICustomMessageListRequest{}).WithType("banner") + assert.NotNil(t, request.Type) + assert.Equal(t, "banner", *request.Type) + + request = (&UICustomMessageListRequest{}).WithType("modal") + assert.NotNil(t, request.Type) + assert.Equal(t, "modal", *request.Type) +} diff --git a/command/token/helper.go b/api/tokenhelper/helper.go similarity index 96% rename from command/token/helper.go rename to api/tokenhelper/helper.go index c8ce76326a7b..a70d3e20a22f 100644 --- a/command/token/helper.go +++ b/api/tokenhelper/helper.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package token +package tokenhelper // TokenHelper is an interface that contains basic operations that must be // implemented by a token helper diff --git a/command/token/helper_external.go b/api/tokenhelper/helper_external.go similarity index 95% rename from command/token/helper_external.go rename to api/tokenhelper/helper_external.go index 12557a4b3060..e9bfb18b8c59 100644 --- a/command/token/helper_external.go +++ b/api/tokenhelper/helper_external.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package token +package tokenhelper import ( "bytes" @@ -110,7 +110,7 @@ func (h *ExternalTokenHelper) Path() string { func (h *ExternalTokenHelper) cmd(op string) (*exec.Cmd, error) { script := strings.ReplaceAll(h.BinaryPath, "\\", "\\\\") + " " + op - cmd, err := ExecScript(script) + cmd, err := execScript(script) if err != nil { return nil, err } @@ -118,8 +118,8 @@ func (h *ExternalTokenHelper) cmd(op string) (*exec.Cmd, error) { return cmd, nil } -// ExecScript returns a command to execute a script -func ExecScript(script string) (*exec.Cmd, error) { +// execScript returns a command to execute a script +func execScript(script string) (*exec.Cmd, error) { var shell, flag string if runtime.GOOS == "windows" { shell = "cmd" diff --git a/command/token/helper_external_test.go b/api/tokenhelper/helper_external_test.go similarity index 93% rename from command/token/helper_external_test.go rename to api/tokenhelper/helper_external_test.go index d95c8890eaf3..8928c004026e 100644 --- a/command/token/helper_external_test.go +++ b/api/tokenhelper/helper_external_test.go @@ -1,12 +1,11 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package token +package tokenhelper import ( "fmt" "io" - "io/ioutil" "os" "runtime" "strings" @@ -54,10 +53,10 @@ func TestExternalTokenHelperPath(t *testing.T) { } func TestExternalTokenHelper(t *testing.T) { - Test(t, testExternalTokenHelper(t)) + test(t, testExternalTokenHelper()) } -func testExternalTokenHelper(t *testing.T) *ExternalTokenHelper { +func testExternalTokenHelper() *ExternalTokenHelper { return &ExternalTokenHelper{BinaryPath: helperPath("helper"), Env: helperEnv()} } @@ -73,7 +72,7 @@ func helperPath(s ...string) string { func helperEnv() []string { var env []string - tf, err := ioutil.TempFile("", "vault") + tf, err := os.CreateTemp("", "vault") if err != nil { panic(err) } diff --git a/command/token/helper_internal.go b/api/tokenhelper/helper_internal.go similarity index 99% rename from command/token/helper_internal.go rename to api/tokenhelper/helper_internal.go index aeb4faa9bef3..ce95aef07ce4 100644 --- a/command/token/helper_internal.go +++ b/api/tokenhelper/helper_internal.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package token +package tokenhelper import ( "bytes" diff --git a/command/token/helper_internal_test.go b/api/tokenhelper/helper_internal_test.go similarity index 92% rename from command/token/helper_internal_test.go rename to api/tokenhelper/helper_internal_test.go index e68359c82008..5250da014e2c 100644 --- a/command/token/helper_internal_test.go +++ b/api/tokenhelper/helper_internal_test.go @@ -1,10 +1,9 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package token +package tokenhelper import ( - "io/ioutil" "os" "path/filepath" "testing" @@ -17,11 +16,11 @@ func TestCommand(t *testing.T) { if err != nil { t.Fatal(err) } - Test(t, helper) + test(t, helper) } func TestInternalHelperFilePerms(t *testing.T) { - tmpDir, err := ioutil.TempDir("", t.Name()) + tmpDir, err := os.MkdirTemp("", t.Name()) if err != nil { t.Fatal(err) } diff --git a/api/tokenhelper/testing.go b/api/tokenhelper/testing.go new file mode 100644 index 000000000000..577b4940f325 --- /dev/null +++ b/api/tokenhelper/testing.go @@ -0,0 +1,38 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tokenhelper + +import ( + "testing" +) + +// test is a public function that can be used in other tests to +// test that a helper is functioning properly. +func test(t *testing.T, h TokenHelper) { + if err := h.Store("foo"); err != nil { + t.Fatalf("err: %s", err) + } + + v, err := h.Get() + if err != nil { + t.Fatalf("err: %s", err) + } + + if v != "foo" { + t.Fatalf("bad: %#v", v) + } + + if err := h.Erase(); err != nil { + t.Fatalf("err: %s", err) + } + + v, err = h.Get() + if err != nil { + t.Fatalf("err: %s", err) + } + + if v != "" { + t.Fatalf("bad: %#v", v) + } +} diff --git a/audit/audit.go b/audit/audit.go deleted file mode 100644 index 35a3d38a0558..000000000000 --- a/audit/audit.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package audit - -import ( - "context" - - "github.com/hashicorp/vault/sdk/helper/salt" - "github.com/hashicorp/vault/sdk/logical" -) - -// Backend interface must be implemented for an audit -// mechanism to be made available. Audit backends can be enabled to -// sink information to different backends such as logs, file, databases, -// or other external services. -type Backend interface { - // LogRequest is used to synchronously log a request. This is done after the - // request is authorized but before the request is executed. The arguments - // MUST not be modified in anyway. They should be deep copied if this is - // a possibility. - LogRequest(context.Context, *logical.LogInput) error - - // LogResponse is used to synchronously log a response. This is done after - // the request is processed but before the response is sent. The arguments - // MUST not be modified in anyway. They should be deep copied if this is - // a possibility. - LogResponse(context.Context, *logical.LogInput) error - - // LogTestMessage is used to check an audit backend before adding it - // permanently. It should attempt to synchronously log the given test - // message, WITHOUT using the normal Salt (which would require a storage - // operation on creation, which is currently disallowed.) - LogTestMessage(context.Context, *logical.LogInput, map[string]string) error - - // GetHash is used to return the given data with the backend's hash, - // so that a caller can determine if a value in the audit log matches - // an expected plaintext value - GetHash(context.Context, string) (string, error) - - // Reload is called on SIGHUP for supporting backends. - Reload(context.Context) error - - // Invalidate is called for path invalidation - Invalidate(context.Context) -} - -// BackendConfig contains configuration parameters used in the factory func to -// instantiate audit backends -type BackendConfig struct { - // The view to store the salt - SaltView logical.Storage - - // The salt config that should be used for any secret obfuscation - SaltConfig *salt.Config - - // Config is the opaque user configuration provided when mounting - Config map[string]string -} - -// Factory is the factory function to create an audit backend. -type Factory func(context.Context, *BackendConfig) (Backend, error) diff --git a/audit/backend.go b/audit/backend.go new file mode 100644 index 000000000000..4bce4b6edb7d --- /dev/null +++ b/audit/backend.go @@ -0,0 +1,269 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/internal/observability/event" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + optionElideListResponses = "elide_list_responses" + optionExclude = "exclude" + optionFallback = "fallback" + optionFilter = "filter" + optionFormat = "format" + optionHMACAccessor = "hmac_accessor" + optionLogRaw = "log_raw" + optionPrefix = "prefix" + + TypeFile = "file" + TypeSocket = "socket" + TypeSyslog = "syslog" +) + +var _ Backend = (*backend)(nil) + +// Factory is the factory function to create an audit backend. +type Factory func(*BackendConfig, HeaderFormatter) (Backend, error) + +// Backend interface must be implemented for an audit +// mechanism to be made available. Audit backends can be enabled to +// sink information to different backends such as logs, file, databases, +// or other external services. +type Backend interface { + // Salter interface must be implemented by anything implementing Backend. + Salter + + // The PipelineReader interface allows backends to surface information about their + // nodes for node and pipeline registration. + event.PipelineReader + + // IsFallback can be used to determine if this audit backend device is intended to + // be used as a fallback to catch all events that are not written when only using + // filtered pipelines. + IsFallback() bool + + // LogTestMessage is used to check an audit backend before adding it + // permanently. It should attempt to synchronously log the given test + // message, WITHOUT using the normal Salt (which would require a storage + // operation on creation). + LogTestMessage(context.Context, *logical.LogInput) error + + // Reload is called on SIGHUP for supporting backends. + Reload() error + + // Invalidate is called for path invalidation + Invalidate(context.Context) +} + +// Salter is an interface that provides a way to obtain a Salt for hashing. +type Salter interface { + // Salt returns a non-nil salt or an error. + Salt(context.Context) (*salt.Salt, error) +} + +// backend represents an audit backend's shared fields across supported devices (file, socket, syslog). +// NOTE: Use newBackend to initialize the backend. +// e.g. within NewFileBackend, NewSocketBackend, NewSyslogBackend. +type backend struct { + *backendEnt + name string + nodeIDList []eventlogger.NodeID + nodeMap map[eventlogger.NodeID]eventlogger.Node + salt *atomic.Value + saltConfig *salt.Config + saltMutex sync.RWMutex + saltView logical.Storage +} + +// newBackend will create the common backend which should be used by supported audit +// backend types (file, socket, syslog) to which they can create and add their sink. +// It handles basic validation of config and creates required pipelines nodes that +// precede the sink node. +func newBackend(headersConfig HeaderFormatter, conf *BackendConfig) (*backend, error) { + b := &backend{ + backendEnt: newBackendEnt(conf.Config), + name: conf.MountPath, + saltConfig: conf.SaltConfig, + saltView: conf.SaltView, + salt: new(atomic.Value), + nodeIDList: []eventlogger.NodeID{}, + nodeMap: make(map[eventlogger.NodeID]eventlogger.Node), + } + // Ensure we are working with the right type by explicitly storing a nil of the right type. + b.salt.Store((*salt.Salt)(nil)) + + if err := b.configureFilterNode(conf.Config[optionFilter]); err != nil { + return nil, err + } + + cfg, err := newFormatterConfig(headersConfig, conf.Config) + if err != nil { + return nil, err + } + + if err := b.configureFormatterNode(conf.MountPath, cfg, conf.Logger); err != nil { + return nil, err + } + + return b, nil +} + +// configureFormatterNode is used to configure a formatter node and associated ID on the Backend. +func (b *backend) configureFormatterNode(name string, formatConfig formatterConfig, logger hclog.Logger) error { + formatterNodeID, err := event.GenerateNodeID() + if err != nil { + return fmt.Errorf("error generating random NodeID for formatter node: %w: %w", ErrInternal, err) + } + + formatterNode, err := newEntryFormatter(name, formatConfig, b, logger) + if err != nil { + return fmt.Errorf("error creating formatter: %w", err) + } + + b.nodeIDList = append(b.nodeIDList, formatterNodeID) + b.nodeMap[formatterNodeID] = formatterNode + + return nil +} + +// wrapMetrics takes a sink node and augments it by wrapping it with metrics nodes. +// Metrics can be used to measure time and count. +func (b *backend) wrapMetrics(name string, id eventlogger.NodeID, n eventlogger.Node) error { + if n.Type() != eventlogger.NodeTypeSink { + return fmt.Errorf("unable to wrap node with metrics. %q is not a sink node: %w", name, ErrInvalidParameter) + } + + // Wrap the sink node with metrics middleware + sinkMetricTimer, err := newSinkMetricTimer(name, n) + if err != nil { + return fmt.Errorf("unable to add timing metrics to sink for path %q: %w", name, err) + } + + sinkMetricCounter, err := event.NewMetricsCounter(name, sinkMetricTimer, b.getMetricLabeler()) + if err != nil { + return fmt.Errorf("unable to add counting metrics to sink for path %q: %w", name, err) + } + + b.nodeIDList = append(b.nodeIDList, id) + b.nodeMap[id] = sinkMetricCounter + + return nil +} + +// Salt is used to provide a salt for HMAC'ing data. If the salt is not currently +// loaded from storage, then loading will be attempted to create a new salt, which +// will then be stored and returned on subsequent calls. +// NOTE: If invalidation occurs the salt will likely be cleared, forcing reload +// from storage. +func (b *backend) Salt(ctx context.Context) (*salt.Salt, error) { + s := b.salt.Load().(*salt.Salt) + if s != nil { + return s, nil + } + + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + + s = b.salt.Load().(*salt.Salt) + if s != nil { + return s, nil + } + + newSalt, err := salt.NewSalt(ctx, b.saltView, b.saltConfig) + if err != nil { + b.salt.Store((*salt.Salt)(nil)) + return nil, err + } + + b.salt.Store(newSalt) + return newSalt, nil +} + +// EventType returns the event type for the backend. +func (b *backend) EventType() eventlogger.EventType { + return event.AuditType.AsEventType() +} + +// HasFiltering determines if the first node for the pipeline is an eventlogger.NodeTypeFilter. +func (b *backend) HasFiltering() bool { + if b.nodeMap == nil { + return false + } + + return len(b.nodeIDList) > 0 && b.nodeMap[b.nodeIDList[0]].Type() == eventlogger.NodeTypeFilter +} + +// Name for this backend, this must correspond to the mount path for the audit device. +func (b *backend) Name() string { + return b.name +} + +// NodeIDs returns the IDs of the nodes, in the order they are required. +func (b *backend) NodeIDs() []eventlogger.NodeID { + return b.nodeIDList +} + +// Nodes returns the nodes which should be used by the event framework to process audit entries. +func (b *backend) Nodes() map[eventlogger.NodeID]eventlogger.Node { + return b.nodeMap +} + +func (b *backend) LogTestMessage(ctx context.Context, input *logical.LogInput) error { + if len(b.nodeIDList) > 0 { + return processManual(ctx, input, b.nodeIDList, b.nodeMap) + } + + return nil +} + +func (b *backend) Reload() error { + for _, n := range b.nodeMap { + if n.Type() == eventlogger.NodeTypeSink { + return n.Reopen() + } + } + + return nil +} + +func (b *backend) Invalidate(_ context.Context) { + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + b.salt.Store((*salt.Salt)(nil)) +} + +// HasInvalidOptions is used to determine if a non-Enterprise version of Vault +// is being used when supplying options that contain options exclusive to Enterprise. +func HasInvalidOptions(options map[string]string) bool { + return !constants.IsEnterprise && hasEnterpriseAuditOptions(options) +} + +// hasValidEnterpriseAuditOptions is used to check if any of the options supplied +// are only for use in the Enterprise version of Vault. +func hasEnterpriseAuditOptions(options map[string]string) bool { + enterpriseAuditOptions := []string{ + optionExclude, + optionFallback, + optionFilter, + } + + for _, o := range enterpriseAuditOptions { + if _, ok := options[o]; ok { + return true + } + } + + return false +} diff --git a/audit/backend_ce.go b/audit/backend_ce.go new file mode 100644 index 000000000000..58f19268395e --- /dev/null +++ b/audit/backend_ce.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package audit + +import "github.com/hashicorp/vault/internal/observability/event" + +type backendEnt struct{} + +func newBackendEnt(_ map[string]string) *backendEnt { + return &backendEnt{} +} + +func (b *backendEnt) IsFallback() bool { + return false +} + +// configureFilterNode is a no-op as filters are an Enterprise-only feature. +func (b *backend) configureFilterNode(_ string) error { + return nil +} + +func (b *backend) getMetricLabeler() event.Labeler { + return &metricLabelerAuditSink{} +} diff --git a/audit/backend_ce_test.go b/audit/backend_ce_test.go new file mode 100644 index 000000000000..bc49fe3ecabf --- /dev/null +++ b/audit/backend_ce_test.go @@ -0,0 +1,59 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package audit + +import ( + "testing" + + "github.com/hashicorp/eventlogger" + "github.com/stretchr/testify/require" +) + +// TestBackend_configureFilterNode ensures that configureFilterNode handles various +// filter values as expected. Empty (including whitespace) strings should return +// no error but skip configuration of the node. +// NOTE: Audit filtering is an Enterprise feature and behaves differently in the +// community edition of Vault. +func TestBackend_configureFilterNode(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + filter string + }{ + "happy": { + filter: "operation == \"update\"", + }, + "empty": { + filter: "", + }, + "spacey": { + filter: " ", + }, + "bad": { + filter: "___qwerty", + }, + "unsupported-field": { + filter: "foo == bar", + }, + } + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + b := &backend{ + nodeIDList: []eventlogger.NodeID{}, + nodeMap: map[eventlogger.NodeID]eventlogger.Node{}, + } + + err := b.configureFilterNode(tc.filter) + require.NoError(t, err) + require.Len(t, b.nodeIDList, 0) + require.Len(t, b.nodeMap, 0) + }) + } +} diff --git a/audit/backend_config.go b/audit/backend_config.go new file mode 100644 index 000000000000..019280efe434 --- /dev/null +++ b/audit/backend_config.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "fmt" + "reflect" + "strings" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +// BackendConfig contains configuration parameters used in the factory func to +// instantiate audit backends +type BackendConfig struct { + // The view to store the salt + SaltView logical.Storage + + // The salt config that should be used for any secret obfuscation + SaltConfig *salt.Config + + // Config is the opaque user configuration provided when mounting + Config map[string]string + + // MountPath is the path where this Backend is mounted + MountPath string + + // Logger is used to emit log messages usually captured in the server logs. + Logger hclog.Logger +} + +// Validate ensures that we have the required configuration to create audit backends. +func (c *BackendConfig) Validate() error { + if c.SaltConfig == nil { + return fmt.Errorf("nil salt config: %w", ErrInvalidParameter) + } + + if c.SaltView == nil { + return fmt.Errorf("nil salt view: %w", ErrInvalidParameter) + } + + if c.Logger == nil || reflect.ValueOf(c.Logger).IsNil() { + return fmt.Errorf("nil logger: %w", ErrInvalidParameter) + } + + if c.Config == nil { + return fmt.Errorf("config cannot be nil: %w", ErrInvalidParameter) + } + + if strings.TrimSpace(c.MountPath) == "" { + return fmt.Errorf("mount path cannot be empty: %w", ErrExternalOptions) + } + + // Validate actual config specific to Vault version (Enterprise/CE). + if err := c.validate(); err != nil { + return err + } + + return nil +} diff --git a/audit/backend_config_ce.go b/audit/backend_config_ce.go new file mode 100644 index 000000000000..2b3161214d0a --- /dev/null +++ b/audit/backend_config_ce.go @@ -0,0 +1,18 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package audit + +import "fmt" + +// validate ensures that this if we're not running Vault Enterprise, we cannot +// supply Enterprise-only audit configuration options. +func (c *BackendConfig) validate() error { + if HasInvalidOptions(c.Config) { + return fmt.Errorf("enterprise-only options supplied: %w", ErrExternalOptions) + } + + return nil +} diff --git a/audit/backend_file.go b/audit/backend_file.go new file mode 100644 index 000000000000..b321c6617699 --- /dev/null +++ b/audit/backend_file.go @@ -0,0 +1,154 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "fmt" + "reflect" + "strings" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/vault/internal/observability/event" +) + +const ( + stdout = "stdout" + discard = "discard" + + optionFilePath = "file_path" + optionMode = "mode" +) + +var _ Backend = (*fileBackend)(nil) + +type fileBackend struct { + *backend +} + +// NewFileBackend provides a wrapper to support the expectation elsewhere in Vault that +// all audit backends can be created via a factory that returns an interface (Backend). +func NewFileBackend(conf *BackendConfig, headersConfig HeaderFormatter) (be Backend, err error) { + be, err = newFileBackend(conf, headersConfig) + return +} + +// newFileBackend creates a backend and configures all nodes including a file sink. +func newFileBackend(conf *BackendConfig, headersConfig HeaderFormatter) (*fileBackend, error) { + if headersConfig == nil || reflect.ValueOf(headersConfig).IsNil() { + return nil, fmt.Errorf("nil header formatter: %w", ErrInvalidParameter) + } + if conf == nil { + return nil, fmt.Errorf("nil config: %w", ErrInvalidParameter) + } + if err := conf.Validate(); err != nil { + return nil, err + } + + // Get file path from config or fall back to the old option ('path') for compatibility + // (see commit bac4fe0799a372ba1245db642f3f6cd1f1d02669). + var filePath string + if p, ok := conf.Config[optionFilePath]; ok { + filePath = p + } else if p, ok = conf.Config["path"]; ok { + filePath = p + } else { + return nil, fmt.Errorf("%q is required: %w", optionFilePath, ErrExternalOptions) + } + + bec, err := newBackend(headersConfig, conf) + if err != nil { + return nil, err + } + b := &fileBackend{backend: bec} + + // normalize file path if configured for stdout + if strings.EqualFold(filePath, stdout) { + filePath = stdout + } + if strings.EqualFold(filePath, discard) { + filePath = discard + } + + // Configure the sink. + cfg, err := newFormatterConfig(headersConfig, conf.Config) + if err != nil { + return nil, err + } + + sinkOpts := []event.Option{event.WithLogger(conf.Logger)} + if mode, ok := conf.Config[optionMode]; ok { + sinkOpts = append(sinkOpts, event.WithFileMode(mode)) + } + + err = b.configureSinkNode(conf.MountPath, filePath, cfg.requiredFormat, sinkOpts...) + if err != nil { + return nil, err + } + + return b, nil +} + +// configureSinkNode is used internally by fileBackend to create and configure the +// sink node on the backend. +func (b *fileBackend) configureSinkNode(name string, filePath string, format format, opt ...event.Option) error { + name = strings.TrimSpace(name) + if name == "" { + return fmt.Errorf("name is required: %w", ErrExternalOptions) + } + + filePath = strings.TrimSpace(filePath) + if filePath == "" { + return fmt.Errorf("file path is required: %w", ErrExternalOptions) + } + + sinkNodeID, err := event.GenerateNodeID() + if err != nil { + return fmt.Errorf("error generating random NodeID for sink node: %w: %w", ErrInternal, err) + } + + // normalize file path if configured for stdout or discard + if strings.EqualFold(filePath, stdout) { + filePath = stdout + } else if strings.EqualFold(filePath, discard) { + filePath = discard + } + + var sinkNode eventlogger.Node + var sinkName string + + switch filePath { + case stdout: + sinkName = stdout + sinkNode, err = event.NewStdoutSinkNode(format.String()) + case discard: + sinkName = discard + sinkNode = event.NewNoopSink() + default: + // The NewFileSink function attempts to open the file and will return an error if it can't. + sinkName = name + sinkNode, err = event.NewFileSink(filePath, format.String(), opt...) + } + if err != nil { + return fmt.Errorf("file sink creation failed for path %q: %w", filePath, err) + } + + // Wrap the sink node with metrics middleware + err = b.wrapMetrics(sinkName, sinkNodeID, sinkNode) + if err != nil { + return err + } + + return nil +} + +// Reload will trigger the reload action on the sink node for this backend. +func (b *fileBackend) Reload() error { + for _, n := range b.nodeMap { + if n.Type() == eventlogger.NodeTypeSink { + return n.Reopen() + } + } + + return nil +} diff --git a/audit/backend_file_ce_test.go b/audit/backend_file_ce_test.go new file mode 100644 index 000000000000..732fbc7eb41e --- /dev/null +++ b/audit/backend_file_ce_test.go @@ -0,0 +1,147 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package audit + +import ( + "testing" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +// TestFileBackend_newFileBackend_fallback ensures that we get the correct errors +// in CE when we try to enable a fileBackend with enterprise options like fallback +// and filter. +func TestFileBackend_newFileBackend_fallback(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + backendConfig *BackendConfig + isErrorExpected bool + expectedErrorMessage string + }{ + "non-fallback-device-with-filter": { + backendConfig: &BackendConfig{ + MountPath: "discard", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "fallback": "false", + "file_path": discard, + "filter": "mount_type == kv", + }, + }, + isErrorExpected: true, + expectedErrorMessage: "enterprise-only options supplied: invalid configuration", + }, + "fallback-device-with-filter": { + backendConfig: &BackendConfig{ + MountPath: "discard", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "fallback": "true", + "file_path": discard, + "filter": "mount_type == kv", + }, + }, + isErrorExpected: true, + expectedErrorMessage: "enterprise-only options supplied: invalid configuration", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + be, err := newFileBackend(tc.backendConfig, &noopHeaderFormatter{}) + + if tc.isErrorExpected { + require.Error(t, err) + require.EqualError(t, err, tc.expectedErrorMessage) + } else { + require.NoError(t, err) + require.NotNil(t, be) + } + }) + } +} + +// TestFileBackend_newFileBackend_FilterFormatterSink ensures that when configuring +// a backend in community edition we cannot configure a filter node. +// We can verify that we have formatter and sink nodes added to the backend. +// The order of calls influences the slice of IDs on the Backend. +func TestFileBackend_newFileBackend_FilterFormatterSink(t *testing.T) { + t.Parallel() + + cfg := map[string]string{ + "file_path": "/tmp/foo", + "mode": "0777", + "format": "json", + "filter": "mount_type == \"kv\"", + } + + backendConfig := &BackendConfig{ + SaltView: &logical.InmemStorage{}, + SaltConfig: &salt.Config{}, + Config: cfg, + MountPath: "bar", + Logger: hclog.NewNullLogger(), + } + + b, err := newFileBackend(backendConfig, &noopHeaderFormatter{}) + require.Error(t, err) + require.EqualError(t, err, "enterprise-only options supplied: invalid configuration") + + // Try without filter option + delete(cfg, "filter") + b, err = newFileBackend(backendConfig, &noopHeaderFormatter{}) + require.NoError(t, err) + + require.Len(t, b.nodeIDList, 2) + require.Len(t, b.nodeMap, 2) + + id := b.nodeIDList[0] + node := b.nodeMap[id] + require.Equal(t, eventlogger.NodeTypeFormatter, node.Type()) + + id = b.nodeIDList[1] + node = b.nodeMap[id] + require.Equal(t, eventlogger.NodeTypeSink, node.Type()) +} + +// TestBackend_IsFallback ensures that no CE audit device can be a fallback. +func TestBackend_IsFallback(t *testing.T) { + t.Parallel() + + cfg := &BackendConfig{ + MountPath: "discard", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "fallback": "true", + "file_path": discard, + }, + } + + be, err := newFileBackend(cfg, &noopHeaderFormatter{}) + require.Error(t, err) + require.EqualError(t, err, "enterprise-only options supplied: invalid configuration") + + // Remove the option and try again + delete(cfg.Config, "fallback") + + be, err = newFileBackend(cfg, &noopHeaderFormatter{}) + require.NoError(t, err) + require.NotNil(t, be) + require.Equal(t, false, be.IsFallback()) +} diff --git a/audit/backend_file_test.go b/audit/backend_file_test.go new file mode 100644 index 000000000000..a69edc469d65 --- /dev/null +++ b/audit/backend_file_test.go @@ -0,0 +1,289 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "os" + "path/filepath" + "strconv" + "testing" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/internal/observability/event" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +// TestAuditFile_fileModeNew verifies that the backend Factory correctly sets +// the file mode when the mode argument is set. +func TestAuditFile_fileModeNew(t *testing.T) { + t.Parallel() + + modeStr := "0777" + mode, err := strconv.ParseUint(modeStr, 8, 32) + require.NoError(t, err) + + file := filepath.Join(t.TempDir(), "auditTest.txt") + + backendConfig := &BackendConfig{ + Config: map[string]string{ + "path": file, + "mode": modeStr, + }, + MountPath: "foo/bar", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + } + _, err = newFileBackend(backendConfig, &noopHeaderFormatter{}) + require.NoError(t, err) + + info, err := os.Stat(file) + require.NoErrorf(t, err, "cannot retrieve file mode from `Stat`") + require.Equalf(t, os.FileMode(mode), info.Mode(), "File mode does not match.") +} + +// TestAuditFile_fileModeExisting verifies that the backend Factory correctly sets +// the mode on an existing file. +func TestAuditFile_fileModeExisting(t *testing.T) { + t.Parallel() + + dir := t.TempDir() + f, err := os.CreateTemp(dir, "auditTest.log") + require.NoErrorf(t, err, "Failure to create test file.") + + err = os.Chmod(f.Name(), 0o777) + require.NoErrorf(t, err, "Failure to chmod temp file for testing.") + + err = f.Close() + require.NoErrorf(t, err, "Failure to close temp file for test.") + + backendConfig := &BackendConfig{ + Config: map[string]string{ + "path": f.Name(), + }, + MountPath: "foo/bar", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + } + + _, err = newFileBackend(backendConfig, &noopHeaderFormatter{}) + require.NoError(t, err) + + info, err := os.Stat(f.Name()) + require.NoErrorf(t, err, "cannot retrieve file mode from `Stat`") + require.Equalf(t, os.FileMode(0o600), info.Mode(), "File mode does not match.") +} + +// TestAuditFile_fileMode0000 verifies that setting the audit file mode to +// "0000" prevents Vault from modifying the permissions of the file. +func TestAuditFile_fileMode0000(t *testing.T) { + t.Parallel() + + dir := t.TempDir() + f, err := os.CreateTemp(dir, "auditTest.log") + require.NoErrorf(t, err, "Failure to create test file.") + + err = os.Chmod(f.Name(), 0o777) + require.NoErrorf(t, err, "Failure to chmod temp file for testing.") + + err = f.Close() + require.NoErrorf(t, err, "Failure to close temp file for test.") + + backendConfig := &BackendConfig{ + Config: map[string]string{ + "path": f.Name(), + "mode": "0000", + }, + MountPath: "foo/bar", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + } + + _, err = newFileBackend(backendConfig, &noopHeaderFormatter{}) + require.NoError(t, err) + + info, err := os.Stat(f.Name()) + require.NoErrorf(t, err, "cannot retrieve file mode from `Stat`. The error is %v", err) + require.Equalf(t, os.FileMode(0o777), info.Mode(), "File mode does not match.") +} + +// TestAuditFile_EventLogger_fileModeNew verifies that the Factory function +// correctly sets the file mode when the useEventLogger argument is set to +// true. +func TestAuditFile_EventLogger_fileModeNew(t *testing.T) { + modeStr := "0777" + mode, err := strconv.ParseUint(modeStr, 8, 32) + require.NoError(t, err) + + file := filepath.Join(t.TempDir(), "auditTest.txt") + + backendConfig := &BackendConfig{ + Config: map[string]string{ + "file_path": file, + "mode": modeStr, + }, + MountPath: "foo/bar", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + } + + _, err = newFileBackend(backendConfig, &noopHeaderFormatter{}) + require.NoError(t, err) + + info, err := os.Stat(file) + require.NoError(t, err) + require.Equalf(t, os.FileMode(mode), info.Mode(), "File mode does not match.") +} + +// TestFileBackend_newFileBackend ensures that we can correctly configure the sink +// node on the Backend, and any incorrect parameters result in the relevant errors. +func TestFileBackend_newFileBackend(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + mountPath string + filePath string + mode string + format string + wantErr bool + expectedErrMsg string + expectedName string + }{ + "name-empty": { + mountPath: "", + format: "json", + wantErr: true, + expectedErrMsg: "mount path cannot be empty: invalid configuration", + }, + "name-whitespace": { + mountPath: " ", + format: "json", + wantErr: true, + expectedErrMsg: "mount path cannot be empty: invalid configuration", + }, + "filePath-empty": { + mountPath: "foo", + filePath: "", + format: "json", + wantErr: true, + expectedErrMsg: "file path is required: invalid configuration", + }, + "filePath-whitespace": { + mountPath: "foo", + filePath: " ", + format: "json", + wantErr: true, + expectedErrMsg: "file path is required: invalid configuration", + }, + "filePath-stdout-lower": { + mountPath: "foo", + expectedName: "stdout", + filePath: "stdout", + format: "json", + }, + "filePath-stdout-upper": { + mountPath: "foo", + expectedName: "stdout", + filePath: "STDOUT", + format: "json", + }, + "filePath-stdout-mixed": { + mountPath: "foo", + expectedName: "stdout", + filePath: "StdOut", + format: "json", + }, + "filePath-discard-lower": { + mountPath: "foo", + expectedName: "discard", + filePath: "discard", + format: "json", + }, + "filePath-discard-upper": { + mountPath: "foo", + expectedName: "discard", + filePath: "DISCARD", + format: "json", + }, + "filePath-discard-mixed": { + mountPath: "foo", + expectedName: "discard", + filePath: "DisCArd", + format: "json", + }, + "format-empty": { + mountPath: "foo", + filePath: "/tmp/", + format: "", + wantErr: true, + expectedErrMsg: "unsupported \"format\": invalid configuration", + }, + "format-whitespace": { + mountPath: "foo", + filePath: "/tmp/", + format: " ", + wantErr: true, + expectedErrMsg: "unsupported \"format\": invalid configuration", + }, + "filePath-weird-with-mode-zero": { + mountPath: "foo", + filePath: "/tmp/qwerty", + format: "json", + mode: "0", + wantErr: true, + expectedErrMsg: "file sink creation failed for path \"/tmp/qwerty\": unable to determine existing file mode: stat /tmp/qwerty: no such file or directory", + }, + "happy": { + mountPath: "foo", + filePath: "/tmp/log", + mode: "", + format: "json", + wantErr: false, + expectedName: "foo", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + cfg := &BackendConfig{ + SaltView: &logical.InmemStorage{}, + SaltConfig: &salt.Config{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "file_path": tc.filePath, + "mode": tc.mode, + "format": tc.format, + }, + MountPath: tc.mountPath, + } + b, err := newFileBackend(cfg, &noopHeaderFormatter{}) + + if tc.wantErr { + require.Error(t, err) + require.EqualError(t, err, tc.expectedErrMsg) + require.Nil(t, b) + } else { + require.NoError(t, err) + require.Len(t, b.nodeIDList, 2) // Expect formatter + the sink + require.Len(t, b.nodeMap, 2) + id := b.nodeIDList[1] + node := b.nodeMap[id] + require.Equal(t, eventlogger.NodeTypeSink, node.Type()) + mc, ok := node.(*event.MetricsCounter) + require.True(t, ok) + require.Equal(t, tc.expectedName, mc.Name) + } + }) + } +} diff --git a/audit/backend_noop.go b/audit/backend_noop.go new file mode 100644 index 000000000000..b6d9e195b1c3 --- /dev/null +++ b/audit/backend_noop.go @@ -0,0 +1,358 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "context" + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "strings" + "sync" + "testing" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/internal/observability/event" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +var ( + _ Backend = (*NoopAudit)(nil) + _ eventlogger.Node = (*noopWrapper)(nil) +) + +// noopWrapper is designed to wrap a formatter node in order to allow access to +// bytes formatted, headers formatted and parts of the logical.LogInput. +// Some older tests relied on being able to query this information so while those +// tests stick around we should look after them. +type noopWrapper struct { + format string + node eventlogger.Node + backend *NoopAudit +} + +// SetListener provides a callback func to the NoopAudit which can be invoked +// during processing of the Event. +// +// Deprecated: SetListener should not be used in new tests. +func (n *NoopAudit) SetListener(listener func(event *Event)) { + n.listener = listener +} + +// NoopAudit only exists to allow legacy tests to continue working. +// +// Deprecated: NoopAudit should not be used in new tests. +type NoopAudit struct { + Config *BackendConfig + + ReqErr error + ReqAuth []*logical.Auth + Req []*logical.Request + ReqHeaders []map[string][]string + ReqNonHMACKeys []string + ReqErrs []error + + RespErr error + RespAuth []*logical.Auth + RespReq []*logical.Request + Resp []*logical.Response + RespNonHMACKeys [][]string + RespReqNonHMACKeys [][]string + RespErrs []error + records [][]byte + l sync.RWMutex + salt *salt.Salt + saltMutex sync.RWMutex + + nodeIDList []eventlogger.NodeID + nodeMap map[eventlogger.NodeID]eventlogger.Node + + listener func(event *Event) +} + +// noopHeaderFormatter can be used within no-op audit devices to do nothing when +// it comes to only allow configured headers to appear in the result. +// Whatever is passed in will be returned (nil becomes an empty map) in lowercase. +type noopHeaderFormatter struct{} + +// ApplyConfig implements the relevant interface to make noopHeaderFormatter an HeaderFormatter. +func (f *noopHeaderFormatter) ApplyConfig(_ context.Context, headers map[string][]string, _ Salter) (result map[string][]string, retErr error) { + if len(headers) < 1 { + return map[string][]string{}, nil + } + + // Make a copy of the incoming headers with everything lower so we can + // case-insensitively compare + lowerHeaders := make(map[string][]string, len(headers)) + for k, v := range headers { + lowerHeaders[strings.ToLower(k)] = v + } + + return lowerHeaders, nil +} + +// NewNoopAudit should be used to create a NoopAudit as it handles creation of a +// predictable salt and wraps eventlogger nodes so information can be retrieved on +// what they've seen or formatted. +// +// Deprecated: NewNoopAudit only exists to allow legacy tests to continue working. +func NewNoopAudit(config *BackendConfig) (*NoopAudit, error) { + view := &logical.InmemStorage{} + + // Create the salt with a known key for predictable hmac values. + se := &logical.StorageEntry{Key: "salt", Value: []byte("foo")} + err := view.Put(context.Background(), se) + if err != nil { + return nil, err + } + + // Override the salt related config settings. + backendConfig := &BackendConfig{ + SaltView: view, + SaltConfig: &salt.Config{ + HMAC: sha256.New, + HMACType: "hmac-sha256", + }, + Config: config.Config, + MountPath: config.MountPath, + } + + noopBackend := &NoopAudit{ + Config: backendConfig, + nodeIDList: make([]eventlogger.NodeID, 2), + nodeMap: make(map[eventlogger.NodeID]eventlogger.Node, 2), + } + + cfg, err := newFormatterConfig(&noopHeaderFormatter{}, nil) + if err != nil { + return nil, err + } + + formatterNodeID, err := event.GenerateNodeID() + if err != nil { + return nil, fmt.Errorf("error generating random NodeID for formatter node: %w", err) + } + + formatterNode, err := newEntryFormatter(config.MountPath, cfg, noopBackend, config.Logger) + if err != nil { + return nil, fmt.Errorf("error creating formatter: %w", err) + } + + // Wrap the formatting node, so we can get any bytes that were formatted etc. + wrappedFormatter := &noopWrapper{format: "json", node: formatterNode, backend: noopBackend} + + noopBackend.nodeIDList[0] = formatterNodeID + noopBackend.nodeMap[formatterNodeID] = wrappedFormatter + + sinkNode := event.NewNoopSink() + sinkNodeID, err := event.GenerateNodeID() + if err != nil { + return nil, fmt.Errorf("error generating random NodeID for sink node: %w", err) + } + + noopBackend.nodeIDList[1] = sinkNodeID + noopBackend.nodeMap[sinkNodeID] = sinkNode + + return noopBackend, nil +} + +// NoopAuditFactory should be used when the test needs a way to access bytes that +// have been formatted by the pipeline during audit requests. +// The records parameter will be repointed to the one used within the pipeline. +// +// Deprecated: NoopAuditFactory only exists to allow legacy tests to continue working. +func NoopAuditFactory(records **[][]byte) Factory { + return func(config *BackendConfig, _ HeaderFormatter) (Backend, error) { + n, err := NewNoopAudit(config) + if err != nil { + return nil, err + } + if records != nil { + *records = &n.records + } + + return n, nil + } +} + +// Process handles the contortions required by older test code to ensure behavior. +// It will attempt to do some pre/post processing of the logical.LogInput that should +// form part of the event's payload data, as well as capturing the resulting headers +// that were formatted and track the overall bytes that a formatted event uses when +// it's ready to head down the pipeline to the sink node (a noop for us). +func (n *noopWrapper) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { + n.backend.l.Lock() + defer n.backend.l.Unlock() + + var err error + + // We're expecting audit events since this is an audit device. + a, ok := e.Payload.(*Event) + if !ok { + return nil, errors.New("cannot parse payload as an audit event") + } + + if n.backend.listener != nil { + n.backend.listener(a) + } + + in := a.Data + + // Depending on the type of the audit event (request or response) we need to + // track different things. + switch a.Subtype { + case RequestType: + n.backend.ReqAuth = append(n.backend.ReqAuth, in.Auth) + n.backend.Req = append(n.backend.Req, in.Request) + n.backend.ReqNonHMACKeys = in.NonHMACReqDataKeys + n.backend.ReqErrs = append(n.backend.ReqErrs, in.OuterErr) + + if n.backend.ReqErr != nil { + return nil, n.backend.ReqErr + } + case ResponseType: + n.backend.RespAuth = append(n.backend.RespAuth, in.Auth) + n.backend.RespReq = append(n.backend.RespReq, in.Request) + n.backend.Resp = append(n.backend.Resp, in.Response) + n.backend.RespErrs = append(n.backend.RespErrs, in.OuterErr) + + if in.Response != nil { + n.backend.RespNonHMACKeys = append(n.backend.RespNonHMACKeys, in.NonHMACRespDataKeys) + n.backend.RespReqNonHMACKeys = append(n.backend.RespReqNonHMACKeys, in.NonHMACReqDataKeys) + } + + if n.backend.RespErr != nil { + return nil, n.backend.RespErr + } + default: + return nil, fmt.Errorf("unknown audit event type: %q", a.Subtype) + } + + // Once we've taken note of the relevant properties of the event, we get the + // underlying (wrapped) node to process it as normal. + e, err = n.node.Process(ctx, e) + if err != nil { + return nil, fmt.Errorf("error processing wrapped node: %w", err) + } + + // Once processing has been carried out, the underlying node (a formatter node) + // should contain the output ready for the sink node. We'll get that in order + // to track how many bytes we formatted. + b, ok := e.Format(n.format) + if ok { + n.backend.records = append(n.backend.records, b) + } + + // Finally, the last bit of post-processing is to make sure that we track the + // formatted headers that would have made it to the logs via the sink node. + // They only appear in requests. + if a.Subtype == RequestType { + reqEntry := &entry{} + err = json.Unmarshal(b, &reqEntry) + if err != nil { + return nil, fmt.Errorf("unable to parse formatted audit entry data: %w", err) + } + + n.backend.ReqHeaders = append(n.backend.ReqHeaders, reqEntry.Request.Headers) + } + + // Return the event and no error in order to let the pipeline continue on. + return e, nil +} + +func (n *noopWrapper) Reopen() error { + return n.node.Reopen() +} + +func (n *noopWrapper) Type() eventlogger.NodeType { + return n.node.Type() +} + +// LogTestMessage will manually crank the handle on the nodes associated with this backend. +func (n *NoopAudit) LogTestMessage(ctx context.Context, in *logical.LogInput) error { + if len(n.nodeIDList) > 0 { + return processManual(ctx, in, n.nodeIDList, n.nodeMap) + } + + return nil +} + +func (n *NoopAudit) Salt(ctx context.Context) (*salt.Salt, error) { + n.saltMutex.RLock() + if n.salt != nil { + defer n.saltMutex.RUnlock() + return n.salt, nil + } + n.saltMutex.RUnlock() + n.saltMutex.Lock() + defer n.saltMutex.Unlock() + if n.salt != nil { + return n.salt, nil + } + s, err := salt.NewSalt(ctx, n.Config.SaltView, n.Config.SaltConfig) + if err != nil { + return nil, err + } + n.salt = s + return s, nil +} + +func (n *NoopAudit) GetHash(ctx context.Context, data string) (string, error) { + s, err := n.Salt(ctx) + if err != nil { + return "", err + } + return s.GetIdentifiedHMAC(data), nil +} + +func (n *NoopAudit) Reload() error { + return nil +} + +func (n *NoopAudit) Invalidate(_ context.Context) { + n.saltMutex.Lock() + defer n.saltMutex.Unlock() + n.salt = nil +} + +func (n *NoopAudit) EventType() eventlogger.EventType { + return event.AuditType.AsEventType() +} + +func (n *NoopAudit) HasFiltering() bool { + return false +} + +func (n *NoopAudit) Name() string { + return n.Config.MountPath +} + +func (n *NoopAudit) Nodes() map[eventlogger.NodeID]eventlogger.Node { + return n.nodeMap +} + +func (n *NoopAudit) NodeIDs() []eventlogger.NodeID { + return n.nodeIDList +} + +func (n *NoopAudit) IsFallback() bool { + return false +} + +// Deprecated: TestNoopAudit only exists to allow legacy tests to continue working. +func TestNoopAudit(t *testing.T, path string, config map[string]string) *NoopAudit { + cfg := &BackendConfig{ + Config: config, + MountPath: path, + Logger: corehelpers.NewTestLogger(t), + } + n, err := NewNoopAudit(cfg) + if err != nil { + t.Fatal(err) + } + return n +} diff --git a/audit/backend_socket.go b/audit/backend_socket.go new file mode 100644 index 000000000000..7f5d396a927c --- /dev/null +++ b/audit/backend_socket.go @@ -0,0 +1,126 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "fmt" + "reflect" + "strings" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/vault/internal/observability/event" +) + +const ( + optionAddress = "address" + optionSocketType = "socket_type" + optionWriteTimeout = "write_timeout" +) + +var _ Backend = (*socketBackend)(nil) + +type socketBackend struct { + *backend +} + +// NewSocketBackend provides a means to create socket backend audit devices that +// satisfy the Factory pattern expected elsewhere in Vault. +func NewSocketBackend(conf *BackendConfig, headersConfig HeaderFormatter) (be Backend, err error) { + be, err = newSocketBackend(conf, headersConfig) + return +} + +// newSocketBackend creates a backend and configures all nodes including a socket sink. +func newSocketBackend(conf *BackendConfig, headersConfig HeaderFormatter) (*socketBackend, error) { + if headersConfig == nil || reflect.ValueOf(headersConfig).IsNil() { + return nil, fmt.Errorf("nil header formatter: %w", ErrInvalidParameter) + } + if conf == nil { + return nil, fmt.Errorf("nil config: %w", ErrInvalidParameter) + } + if err := conf.Validate(); err != nil { + return nil, err + } + + bec, err := newBackend(headersConfig, conf) + if err != nil { + return nil, err + } + + address, ok := conf.Config[optionAddress] + if !ok { + return nil, fmt.Errorf("%q is required: %w", optionAddress, ErrExternalOptions) + } + address = strings.TrimSpace(address) + if address == "" { + return nil, fmt.Errorf("%q cannot be empty: %w", optionAddress, ErrExternalOptions) + } + + socketType, ok := conf.Config[optionSocketType] + if !ok { + socketType = "tcp" + } + + writeDeadline, ok := conf.Config[optionWriteTimeout] + if !ok { + writeDeadline = "2s" + } + + sinkOpts := []event.Option{ + event.WithSocketType(socketType), + event.WithMaxDuration(writeDeadline), + event.WithLogger(conf.Logger), + } + + err = event.ValidateOptions(sinkOpts...) + if err != nil { + return nil, err + } + + b := &socketBackend{backend: bec} + + // Configure the sink. + cfg, err := newFormatterConfig(headersConfig, conf.Config) + if err != nil { + return nil, err + } + + err = b.configureSinkNode(conf.MountPath, address, cfg.requiredFormat, sinkOpts...) + if err != nil { + return nil, err + } + + return b, nil +} + +func (b *socketBackend) configureSinkNode(name string, address string, format format, opts ...event.Option) error { + sinkNodeID, err := event.GenerateNodeID() + if err != nil { + return fmt.Errorf("error generating random NodeID for sink node: %w", err) + } + + n, err := event.NewSocketSink(address, format.String(), opts...) + if err != nil { + return err + } + + // Wrap the sink node with metrics middleware + err = b.wrapMetrics(name, sinkNodeID, n) + if err != nil { + return err + } + + return nil +} + +// Reload will trigger the reload action on the sink node for this backend. +func (b *socketBackend) Reload() error { + for _, n := range b.nodeMap { + if n.Type() == eventlogger.NodeTypeSink { + return n.Reopen() + } + } + + return nil +} diff --git a/audit/backend_socket_test.go b/audit/backend_socket_test.go new file mode 100644 index 000000000000..f8386cc507d8 --- /dev/null +++ b/audit/backend_socket_test.go @@ -0,0 +1,136 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "testing" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/internal/observability/event" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +// TestSocketBackend_newSocketBackend ensures that we can correctly configure the sink +// node on the Backend, and any incorrect parameters result in the relevant errors. +func TestSocketBackend_newSocketBackend(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + mountPath string + address string + socketType string + writeDuration string + format string + wantErr bool + expectedErrMsg string + expectedName string + }{ + "name-empty": { + mountPath: "", + address: "wss://foo", + format: "json", + wantErr: true, + expectedErrMsg: "mount path cannot be empty: invalid configuration", + }, + "name-whitespace": { + mountPath: " ", + address: "wss://foo", + format: "json", + wantErr: true, + expectedErrMsg: "mount path cannot be empty: invalid configuration", + }, + "address-empty": { + mountPath: "foo", + address: "", + format: "json", + wantErr: true, + expectedErrMsg: "\"address\" cannot be empty: invalid configuration", + }, + "address-whitespace": { + mountPath: "foo", + address: " ", + format: "json", + wantErr: true, + expectedErrMsg: "\"address\" cannot be empty: invalid configuration", + }, + "format-empty": { + mountPath: "foo", + address: "wss://foo", + format: "", + wantErr: true, + expectedErrMsg: "unsupported \"format\": invalid configuration", + }, + "format-whitespace": { + mountPath: "foo", + address: "wss://foo", + format: " ", + wantErr: true, + expectedErrMsg: "unsupported \"format\": invalid configuration", + }, + "write-duration-valid": { + mountPath: "foo", + address: "wss://foo", + writeDuration: "5s", + format: "json", + wantErr: false, + expectedName: "foo", + }, + "write-duration-not-valid": { + mountPath: "foo", + address: "wss://foo", + writeDuration: "qwerty", + format: "json", + wantErr: true, + expectedErrMsg: "unable to parse max duration: invalid parameter: time: invalid duration \"qwerty\"", + }, + "happy": { + mountPath: "foo", + address: "wss://foo", + format: "json", + wantErr: false, + expectedName: "foo", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + cfg := &BackendConfig{ + SaltView: &logical.InmemStorage{}, + SaltConfig: &salt.Config{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "address": tc.address, + "format": tc.format, + "write_timeout": tc.writeDuration, + "socket": tc.socketType, + }, + MountPath: tc.mountPath, + } + b, err := newSocketBackend(cfg, &noopHeaderFormatter{}) + + if tc.wantErr { + require.Error(t, err) + require.EqualError(t, err, tc.expectedErrMsg) + require.Nil(t, b) + } else { + require.NoError(t, err) + require.Len(t, b.nodeIDList, 2) // formatter + sink + require.Len(t, b.nodeMap, 2) + id := b.nodeIDList[1] // sink is 2nd + node := b.nodeMap[id] + require.Equal(t, eventlogger.NodeTypeSink, node.Type()) + mc, ok := node.(*event.MetricsCounter) + require.True(t, ok) + require.Equal(t, tc.expectedName, mc.Name) + } + }) + } +} diff --git a/audit/backend_syslog.go b/audit/backend_syslog.go new file mode 100644 index 000000000000..b0fd90efa9cb --- /dev/null +++ b/audit/backend_syslog.go @@ -0,0 +1,109 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "fmt" + "reflect" + + "github.com/hashicorp/vault/internal/observability/event" +) + +const ( + optionFacility = "facility" + optionTag = "tag" +) + +var _ Backend = (*syslogBackend)(nil) + +type syslogBackend struct { + *backend +} + +// NewSyslogBackend provides a wrapper to support the expectation elsewhere in Vault that +// all audit backends can be created via a factory that returns an interface (Backend). +func NewSyslogBackend(conf *BackendConfig, headersConfig HeaderFormatter) (be Backend, err error) { + be, err = newSyslogBackend(conf, headersConfig) + return +} + +// newSyslogBackend creates a backend and configures all nodes including a socket sink. +func newSyslogBackend(conf *BackendConfig, headersConfig HeaderFormatter) (*syslogBackend, error) { + if headersConfig == nil || reflect.ValueOf(headersConfig).IsNil() { + return nil, fmt.Errorf("nil header formatter: %w", ErrInvalidParameter) + } + if conf == nil { + return nil, fmt.Errorf("nil config: %w", ErrInvalidParameter) + } + if err := conf.Validate(); err != nil { + return nil, err + } + + bec, err := newBackend(headersConfig, conf) + if err != nil { + return nil, err + } + + // Get facility or default to AUTH + facility, ok := conf.Config[optionFacility] + if !ok { + facility = "AUTH" + } + + // Get tag or default to 'vault' + tag, ok := conf.Config[optionTag] + if !ok { + tag = "vault" + } + + sinkOpts := []event.Option{ + event.WithFacility(facility), + event.WithTag(tag), + event.WithLogger(conf.Logger), + } + + err = event.ValidateOptions(sinkOpts...) + if err != nil { + return nil, err + } + + b := &syslogBackend{backend: bec} + + // Configure the sink. + cfg, err := newFormatterConfig(headersConfig, conf.Config) + if err != nil { + return nil, err + } + + err = b.configureSinkNode(conf.MountPath, cfg.requiredFormat, sinkOpts...) + if err != nil { + return nil, err + } + + return b, nil +} + +func (b *syslogBackend) configureSinkNode(name string, format format, opts ...event.Option) error { + sinkNodeID, err := event.GenerateNodeID() + if err != nil { + return fmt.Errorf("error generating random NodeID for sink node: %w: %w", ErrInternal, err) + } + + n, err := event.NewSyslogSink(format.String(), opts...) + if err != nil { + return fmt.Errorf("error creating syslog sink node: %w", err) + } + + err = b.wrapMetrics(name, sinkNodeID, n) + if err != nil { + return err + } + + return nil +} + +// Reload will trigger the reload action on the sink node for this backend. +func (b *syslogBackend) Reload() error { + return nil +} diff --git a/audit/backend_syslog_test.go b/audit/backend_syslog_test.go new file mode 100644 index 000000000000..f81c28104947 --- /dev/null +++ b/audit/backend_syslog_test.go @@ -0,0 +1,119 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "testing" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/internal/observability/event" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +// TestSyslogBackend_newSyslogBackend tests the ways we can try to create a new +// SyslogBackend both good and bad. +func TestSyslogBackend_newSyslogBackend(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + mountPath string + format string + tag string + facility string + wantErr bool + expectedErrMsg string + expectedName string + }{ + "name-empty": { + mountPath: "", + wantErr: true, + expectedErrMsg: "mount path cannot be empty: invalid configuration", + }, + "name-whitespace": { + mountPath: " ", + wantErr: true, + expectedErrMsg: "mount path cannot be empty: invalid configuration", + }, + "format-empty": { + mountPath: "foo", + format: "", + wantErr: true, + expectedErrMsg: "unsupported \"format\": invalid configuration", + }, + "format-whitespace": { + mountPath: "foo", + format: " ", + wantErr: true, + expectedErrMsg: "unsupported \"format\": invalid configuration", + }, + "happy": { + mountPath: "foo", + format: "json", + wantErr: false, + expectedName: "foo", + }, + "happy-tag": { + mountPath: "foo", + format: "json", + tag: "beep", + wantErr: false, + expectedName: "foo", + }, + "happy-facility": { + mountPath: "foo", + format: "json", + facility: "daemon", + wantErr: false, + expectedName: "foo", + }, + "happy-all": { + mountPath: "foo", + format: "json", + tag: "beep", + facility: "daemon", + wantErr: false, + expectedName: "foo", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + cfg := &BackendConfig{ + SaltView: &logical.InmemStorage{}, + SaltConfig: &salt.Config{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "tag": tc.tag, + "facility": tc.facility, + "format": tc.format, + }, + MountPath: tc.mountPath, + } + b, err := newSyslogBackend(cfg, &noopHeaderFormatter{}) + + if tc.wantErr { + require.Error(t, err) + require.EqualError(t, err, tc.expectedErrMsg) + require.Nil(t, b) + } else { + require.NoError(t, err) + require.Len(t, b.nodeIDList, 2) + require.Len(t, b.nodeMap, 2) + id := b.nodeIDList[1] + node := b.nodeMap[id] + require.Equal(t, eventlogger.NodeTypeSink, node.Type()) + mc, ok := node.(*event.MetricsCounter) + require.True(t, ok) + require.Equal(t, tc.expectedName, mc.Name) + } + }) + } +} diff --git a/audit/backend_test.go b/audit/backend_test.go new file mode 100644 index 000000000000..000b827f6cc7 --- /dev/null +++ b/audit/backend_test.go @@ -0,0 +1,272 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "testing" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/constants" + "github.com/stretchr/testify/require" +) + +// TestBackend_newFormatterConfig ensures that all the configuration values are +// parsed correctly when trying to create a new formatterConfig via newFormatterConfig. +func TestBackend_newFormatterConfig(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + config map[string]string + want formatterConfig + wantErr bool + expectedMessage string + }{ + "happy-path-json": { + config: map[string]string{ + "format": jsonFormat.String(), + "hmac_accessor": "true", + "log_raw": "true", + "elide_list_responses": "true", + }, + want: formatterConfig{ + raw: true, + hmacAccessor: true, + elideListResponses: true, + requiredFormat: "json", + }, wantErr: false, + }, + "happy-path-jsonx": { + config: map[string]string{ + "format": jsonxFormat.String(), + "hmac_accessor": "true", + "log_raw": "true", + "elide_list_responses": "true", + }, + want: formatterConfig{ + raw: true, + hmacAccessor: true, + elideListResponses: true, + requiredFormat: "jsonx", + }, + wantErr: false, + }, + "invalid-format": { + config: map[string]string{ + "format": " squiggly ", + "hmac_accessor": "true", + "log_raw": "true", + "elide_list_responses": "true", + }, + want: formatterConfig{}, + wantErr: true, + expectedMessage: "unsupported \"format\": invalid configuration", + }, + "invalid-hmac-accessor": { + config: map[string]string{ + "format": jsonFormat.String(), + "hmac_accessor": "maybe", + }, + want: formatterConfig{}, + wantErr: true, + expectedMessage: "unable to parse \"hmac_accessor\": invalid configuration", + }, + "invalid-log-raw": { + config: map[string]string{ + "format": jsonFormat.String(), + "hmac_accessor": "true", + "log_raw": "maybe", + }, + want: formatterConfig{}, + wantErr: true, + expectedMessage: "unable to parse \"log_raw\": invalid configuration", + }, + "invalid-elide-bool": { + config: map[string]string{ + "format": jsonFormat.String(), + "hmac_accessor": "true", + "log_raw": "true", + "elide_list_responses": "maybe", + }, + want: formatterConfig{}, + wantErr: true, + expectedMessage: "unable to parse \"elide_list_responses\": invalid configuration", + }, + "prefix": { + config: map[string]string{ + "format": jsonFormat.String(), + "prefix": "foo", + }, + want: formatterConfig{ + requiredFormat: jsonFormat, + prefix: "foo", + hmacAccessor: true, + }, + }, + } + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + got, err := newFormatterConfig(&noopHeaderFormatter{}, tc.config) + if tc.wantErr { + require.Error(t, err) + require.EqualError(t, err, tc.expectedMessage) + } else { + require.NoError(t, err) + } + require.Equal(t, tc.want.requiredFormat, got.requiredFormat) + require.Equal(t, tc.want.raw, got.raw) + require.Equal(t, tc.want.elideListResponses, got.elideListResponses) + require.Equal(t, tc.want.hmacAccessor, got.hmacAccessor) + require.Equal(t, tc.want.omitTime, got.omitTime) + require.Equal(t, tc.want.prefix, got.prefix) + }) + } +} + +// TestBackend_configureFormatterNode ensures that configureFormatterNode +// populates the nodeIDList and nodeMap on backend when given valid config. +func TestBackend_configureFormatterNode(t *testing.T) { + t.Parallel() + + b, err := newBackend(&noopHeaderFormatter{}, &BackendConfig{ + MountPath: "foo", + Logger: hclog.NewNullLogger(), + }) + require.NoError(t, err) + + require.Len(t, b.nodeIDList, 1) + require.Len(t, b.nodeMap, 1) + id := b.nodeIDList[0] + node := b.nodeMap[id] + require.Equal(t, eventlogger.NodeTypeFormatter, node.Type()) +} + +// TestBackend_hasEnterpriseAuditOptions checks that the existence of any Enterprise +// only options in the options which can be supplied to enable an audit device can +// be flagged. +func TestBackend_hasEnterpriseAuditOptions(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + input map[string]string + expected bool + }{ + "nil": { + expected: false, + }, + "empty": { + input: make(map[string]string), + expected: false, + }, + "non-ent-opts": { + input: map[string]string{ + "log_raw": "true", + }, + expected: false, + }, + "ent-opt-filter": { + input: map[string]string{ + "filter": "mount_type == kv", + }, + expected: true, + }, + "ent-opt-fallback": { + input: map[string]string{ + "fallback": "true", + }, + expected: true, + }, + "ent-opt-filter-and-fallback": { + input: map[string]string{ + "filter": "mount_type == kv", + "fallback": "true", + }, + expected: true, + }, + "ent-opt-exclude": { + input: map[string]string{ + "exclude": `{ + "condition": "\"/request/mount_type\" == transit", + "fields": [ "/request/data", "/response/data" ] + }`, + }, + expected: true, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + require.Equal(t, tc.expected, hasEnterpriseAuditOptions(tc.input)) + }) + } +} + +// TestBackend_hasInvalidAuditOptions tests that depending on whether we are running +// an Enterprise or non-Enterprise version of Vault, the options supplied to enable +// an audit device may or may not be valid. +// NOTE: In the non-Enterprise version of Vault supplying audit options such as +// 'filter' or 'fallback' is not allowed. +func TestBackend_hasInvalidAuditOptions(t *testing.T) { + tests := map[string]struct { + input map[string]string + expected bool + }{ + "non-ent-opts": { + input: map[string]string{ + "log_raw": "true", + }, + expected: false, + }, + "ent-opt": { + input: map[string]string{ + "filter": "mount_type == kv", + }, + expected: !constants.IsEnterprise, + }, + "ent-opt-filter": { + input: map[string]string{ + "filter": "mount_type == kv", + }, + expected: !constants.IsEnterprise, + }, + "ent-opt-fallback": { + input: map[string]string{ + "fallback": "true", + }, + expected: !constants.IsEnterprise, + }, + "ent-opt-filter-and-fallback": { + input: map[string]string{ + "filter": "mount_type == kv", + "fallback": "true", + }, + expected: !constants.IsEnterprise, + }, + "ent-opt-exclude": { + input: map[string]string{ + "exclude": `{ + "condition": "\"/request/mount_type\" == transit", + "fields": [ "/request/data", "/response/data" ] + }`, + }, + expected: !constants.IsEnterprise, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + require.Equal(t, tc.expected, HasInvalidOptions(tc.input)) + }) + } +} diff --git a/audit/broker.go b/audit/broker.go new file mode 100644 index 000000000000..1f9a859d3881 --- /dev/null +++ b/audit/broker.go @@ -0,0 +1,447 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "context" + "errors" + "fmt" + "reflect" + "strings" + "sync" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" + nshelper "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/internal/observability/event" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + // timeout is the duration which should be used for context related timeouts. + timeout = 10 * time.Second +) + +var ( + _ Registrar = (*Broker)(nil) + _ Auditor = (*Broker)(nil) +) + +// Registrar interface describes a means to register and deregister audit devices. +type Registrar interface { + Register(backend Backend, local bool) error + Deregister(ctx context.Context, name string) error + IsRegistered(name string) bool + IsLocal(name string) (bool, error) +} + +// Auditor interface describes methods which can be used to perform auditing. +type Auditor interface { + LogRequest(ctx context.Context, input *logical.LogInput) error + LogResponse(ctx context.Context, input *logical.LogInput) error + GetHash(ctx context.Context, name string, input string) (string, error) + Invalidate(ctx context.Context, key string) +} + +// backendEntry composes a backend with additional settings. +type backendEntry struct { + // backend is the underlying audit backend. + backend Backend + + // local indicates whether this audit backend should be local to the Vault cluster. + local bool +} + +// Broker represents an audit broker which performs actions such as registering/de-registering +// backends and logging audit entries for a request or response. +// NOTE: NewBroker should be used to initialize the Broker struct. +type Broker struct { + *brokerEnt + + sync.RWMutex + + logger hclog.Logger + + // backends is the map of audit device name to {thing} + backends map[string]backendEntry + + // broker is used to register pipelines for audit devices. + broker *eventlogger.Broker +} + +// NewBroker initializes a broker, which can be used to perform audit logging. +func NewBroker(logger hclog.Logger) (*Broker, error) { + if logger == nil || reflect.ValueOf(logger).IsNil() { + return nil, fmt.Errorf("cannot create a new audit broker with nil logger: %w", ErrInvalidParameter) + } + + eventBroker, err := eventlogger.NewBroker() + if err != nil { + return nil, fmt.Errorf("error creating event broker for audit events: %w", err) + } + + ent, err := newBrokerEnt() + if err != nil { + return nil, fmt.Errorf("error creating audit broker extentions: %w", err) + } + + return &Broker{ + backends: make(map[string]backendEntry), + broker: eventBroker, + brokerEnt: ent, + logger: logger, + }, nil +} + +// hasAuditPipelines can be used as a shorthand to check if a broker has any +// registered pipelines that are for the audit event type. +func hasAuditPipelines(broker *eventlogger.Broker) bool { + return broker.IsAnyPipelineRegistered(event.AuditType.AsEventType()) +} + +// isRegistered is used to check if a given audit backend is registered. +// This method should be used within the broker to prevent locking issues. +func (b *Broker) isRegistered(backend Backend) error { + if b.isRegisteredByName(backend.Name()) { + return fmt.Errorf("backend already registered '%s': %w", backend.Name(), ErrExternalOptions) + } + + if err := b.validateRegistrationRequest(backend); err != nil { + return err + } + + return nil +} + +// isRegisteredByName returns a boolean to indicate whether an audit backend is +// registered with the broker. +func (b *Broker) isRegisteredByName(name string) bool { + _, ok := b.backends[name] + return ok +} + +// register can be used to register a normal audit device, it will also calculate +// and configure the success threshold required for sinks. +// NOTE: register assumes that the backend which is being registered has not yet +// been added to the broker's backends. +func (b *Broker) register(backend Backend) error { + err := registerNodesAndPipeline(b.broker, backend) + if err != nil { + return fmt.Errorf("audit pipeline registration error: %w", err) + } + + threshold := 0 + if !backend.HasFiltering() { + threshold = 1 + } else { + threshold = b.requiredSuccessThresholdSinks() + } + + // Update the success threshold now that the pipeline is registered. + err = b.broker.SetSuccessThresholdSinks(event.AuditType.AsEventType(), threshold) + if err != nil { + return fmt.Errorf("unable to configure sink success threshold (%d): %w", threshold, err) + } + + return nil +} + +// deregister can be used to deregister an audit device, it will also configure +// the success threshold required for sinks. +// NOTE: deregister assumes that the backend which is being deregistered has already +// been removed from the broker's backends. +func (b *Broker) deregister(ctx context.Context, name string) error { + threshold := b.requiredSuccessThresholdSinks() + + err := b.broker.SetSuccessThresholdSinks(event.AuditType.AsEventType(), threshold) + if err != nil { + return fmt.Errorf("unable to reconfigure sink success threshold (%d): %w", threshold, err) + } + + // The first return value, a bool, indicates whether + // RemovePipelineAndNodes encountered the error while evaluating + // pre-conditions (false) or once it started removing the pipeline and + // the nodes (true). This code doesn't care either way. + _, err = b.broker.RemovePipelineAndNodes(ctx, event.AuditType.AsEventType(), eventlogger.PipelineID(name)) + if err != nil { + return fmt.Errorf("unable to remove pipeline and nodes: %w", err) + } + + return nil +} + +// registerNodesAndPipeline registers eventlogger nodes and a pipeline with the +// backend's name, on the specified eventlogger.Broker using the Backend to supply them. +func registerNodesAndPipeline(broker *eventlogger.Broker, b Backend) error { + for id, node := range b.Nodes() { + err := broker.RegisterNode(id, node) + if err != nil { + return fmt.Errorf("unable to register nodes for %q: %w", b.Name(), err) + } + } + + pipeline := eventlogger.Pipeline{ + PipelineID: eventlogger.PipelineID(b.Name()), + EventType: b.EventType(), + NodeIDs: b.NodeIDs(), + } + + err := broker.RegisterPipeline(pipeline) + if err != nil { + return fmt.Errorf("unable to register pipeline for %q: %w", b.Name(), err) + } + + return nil +} + +func (b *Broker) Register(backend Backend, local bool) error { + b.Lock() + defer b.Unlock() + + if backend == nil || reflect.ValueOf(backend).IsNil() { + return fmt.Errorf("backend cannot be nil: %w", ErrInvalidParameter) + } + + // If the backend is already registered, we cannot re-register it. + err := b.isRegistered(backend) + if err != nil { + return err + } + + if err := b.handlePipelineRegistration(backend); err != nil { + return err + } + + b.backends[backend.Name()] = backendEntry{ + backend: backend, + local: local, + } + + return nil +} + +func (b *Broker) Deregister(ctx context.Context, name string) error { + b.Lock() + defer b.Unlock() + + name = strings.TrimSpace(name) + if name == "" { + return fmt.Errorf("name is required: %w", ErrInvalidParameter) + } + + // If the backend isn't actually registered, then there's nothing to do. + // We don't return any error so that Deregister can be idempotent. + if !b.isRegisteredByName(name) { + return nil + } + + // Remove the Backend from the map first, so that if an error occurs while + // removing the pipeline and nodes, we can quickly exit this method with + // the error. + delete(b.backends, name) + + if err := b.handlePipelineDeregistration(ctx, name); err != nil { + return err + } + + return nil +} + +// LogRequest is used to ensure all the audit backends have an opportunity to +// log the given request and that *at least one* succeeds. +func (b *Broker) LogRequest(ctx context.Context, in *logical.LogInput) (retErr error) { + b.RLock() + defer b.RUnlock() + + // If no backends are registered then we have no devices to log the request. + if len(b.backends) < 1 { + return nil + } + + defer metrics.MeasureSince([]string{"audit", "log_request"}, time.Now()) + defer func() { + metricVal := float32(0.0) + if retErr != nil { + metricVal = 1.0 + } + metrics.IncrCounter([]string{"audit", "log_request_failure"}, metricVal) + }() + + e, err := newEvent(RequestType) + if err != nil { + return err + } + + e.Data = in + + // Get a context to use for auditing. + auditContext, auditCancel, err := getAuditContext(ctx) + if err != nil { + return err + } + defer auditCancel() + + var status eventlogger.Status + if hasAuditPipelines(b.broker) { + status, err = b.broker.Send(auditContext, event.AuditType.AsEventType(), e) + if err != nil { + return errors.Join(append([]error{err}, status.Warnings...)...) + } + } + + // Audit event ended up in at least 1 sink. + if len(status.CompleteSinks()) > 0 { + // We should log warnings to the operational logs regardless of whether + // we consider the overall auditing attempt to be successful. + if len(status.Warnings) > 0 { + b.logger.Error("log request underlying pipeline error(s)", "error", errors.Join(status.Warnings...)) + } + + return nil + } + + // There were errors from inside the pipeline and we didn't write to a sink. + if len(status.Warnings) > 0 { + return fmt.Errorf("error during audit pipeline processing: %w", errors.Join(status.Warnings...)) + } + + // Handle any additional audit that is required (Enterprise/CE dependant). + err = b.handleAdditionalAudit(auditContext, e) + if err != nil { + return err + } + + return nil +} + +// LogResponse is used to ensure all the audit backends have an opportunity to +// log the given response and that *at least one* succeeds. +func (b *Broker) LogResponse(ctx context.Context, in *logical.LogInput) (retErr error) { + b.RLock() + defer b.RUnlock() + + // If no backends are registered then we have no devices to send audit entries to. + if len(b.backends) < 1 { + return nil + } + + defer metrics.MeasureSince([]string{"audit", "log_response"}, time.Now()) + defer func() { + metricVal := float32(0.0) + if retErr != nil { + metricVal = 1.0 + } + metrics.IncrCounter([]string{"audit", "log_response_failure"}, metricVal) + }() + + e, err := newEvent(ResponseType) + if err != nil { + return err + } + + e.Data = in + + // Get a context to use for auditing. + auditContext, auditCancel, err := getAuditContext(ctx) + if err != nil { + return err + } + defer auditCancel() + + var status eventlogger.Status + if hasAuditPipelines(b.broker) { + status, err = b.broker.Send(auditContext, event.AuditType.AsEventType(), e) + if err != nil { + return errors.Join(append([]error{err}, status.Warnings...)...) + } + } + + // Audit event ended up in at least 1 sink. + if len(status.CompleteSinks()) > 0 { + // We should log warnings to the operational logs regardless of whether + // we consider the overall auditing attempt to be successful. + if len(status.Warnings) > 0 { + b.logger.Error("log response underlying pipeline error(s)", "error", errors.Join(status.Warnings...)) + } + + return nil + } + + // There were errors from inside the pipeline and we didn't write to a sink. + if len(status.Warnings) > 0 { + return fmt.Errorf("error during audit pipeline processing: %w", errors.Join(status.Warnings...)) + } + + // Handle any additional audit that is required (Enterprise/CE dependant). + err = b.handleAdditionalAudit(auditContext, e) + if err != nil { + return err + } + + return nil +} + +func (b *Broker) Invalidate(ctx context.Context, _ string) { + // For now, we ignore the key as this would only apply to salts. + // We just sort of brute force it on each one. + b.Lock() + defer b.Unlock() + + for _, be := range b.backends { + be.backend.Invalidate(ctx) + } +} + +// IsLocal is used to check if a given audit backend is registered +func (b *Broker) IsLocal(name string) (bool, error) { + b.RLock() + defer b.RUnlock() + + be, ok := b.backends[name] + if ok { + return be.local, nil + } + + return false, fmt.Errorf("unknown audit backend %q", name) +} + +// GetHash returns a hash using the salt of the given backend +func (b *Broker) GetHash(ctx context.Context, name string, input string) (string, error) { + b.RLock() + defer b.RUnlock() + + be, ok := b.backends[name] + if !ok { + return "", fmt.Errorf("unknown audit backend %q", name) + } + + return hashString(ctx, be.backend, input) +} + +// IsRegistered is used to check if a given audit backend is registered. +func (b *Broker) IsRegistered(name string) bool { + b.RLock() + defer b.RUnlock() + + return b.isRegisteredByName(name) +} + +// getAuditContext extracts the namespace from the specified context and returns +// a new context and cancelation function, completely detached from the original +// with a timeout. +// NOTE: When error is nil, the context.CancelFunc returned from this function +// should be deferred immediately by the caller to prevent resource leaks. +func getAuditContext(ctx context.Context) (context.Context, context.CancelFunc, error) { + ns, err := nshelper.FromContext(ctx) + if err != nil { + return nil, nil, fmt.Errorf("namespace missing from context: %w", err) + } + + tempContext := nshelper.ContextWithNamespace(context.Background(), ns) + auditContext, auditCancel := context.WithTimeout(tempContext, timeout) + + return auditContext, auditCancel, nil +} diff --git a/audit/broker_ce.go b/audit/broker_ce.go new file mode 100644 index 000000000000..7a2d56e9922c --- /dev/null +++ b/audit/broker_ce.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package audit + +import ( + "context" + "fmt" +) + +// brokerEnt provides extensions to the broker behavior, but not in the community edition. +type brokerEnt struct{} + +func newBrokerEnt() (*brokerEnt, error) { + return &brokerEnt{}, nil +} + +func (b *Broker) validateRegistrationRequest(_ Backend) error { + return nil +} + +func (b *Broker) handlePipelineRegistration(backend Backend) error { + err := b.register(backend) + if err != nil { + return fmt.Errorf("unable to register device for %q: %w", backend.Name(), err) + } + + return nil +} + +func (b *Broker) handlePipelineDeregistration(ctx context.Context, name string) error { + return b.deregister(ctx, name) +} + +// requiredSuccessThresholdSinks is the value that should be used as the success +// threshold in the eventlogger broker. +func (b *Broker) requiredSuccessThresholdSinks() int { + if len(b.backends) > 0 { + return 1 + } + + return 0 +} + +func (b *brokerEnt) handleAdditionalAudit(_ context.Context, _ *Event) error { + return nil +} diff --git a/audit/broker_test.go b/audit/broker_test.go new file mode 100644 index 000000000000..a7bdc960d128 --- /dev/null +++ b/audit/broker_test.go @@ -0,0 +1,209 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "context" + "crypto/sha256" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + nshelper "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +// testAuditBackend will create an audit.Backend (which expects to use the eventlogger). +// NOTE: this will create the backend, it does not care whether Enterprise only options are in place. +func testAuditBackend(t *testing.T, path string, config map[string]string) Backend { + t.Helper() + + headersCfg := &HeadersConfig{ + headerSettings: make(map[string]*headerSettings), + view: nil, + } + + view := &logical.InmemStorage{} + se := &logical.StorageEntry{Key: "salt", Value: []byte("juan")} + err := view.Put(context.Background(), se) + require.NoError(t, err) + + cfg := &BackendConfig{ + SaltView: view, + SaltConfig: &salt.Config{ + HMAC: sha256.New, + HMACType: "hmac-sha256", + }, + Logger: corehelpers.NewTestLogger(t), + Config: config, + MountPath: path, + } + + be, err := NewSyslogBackend(cfg, headersCfg) + require.NoError(t, err) + require.NotNil(t, be) + + return be +} + +// TestAuditBroker_Deregister_Multiple ensures that we can call deregister multiple +// times without issue if is no matching backend registered. +func TestAuditBroker_Deregister_Multiple(t *testing.T) { + t.Parallel() + + l := corehelpers.NewTestLogger(t) + a, err := NewBroker(l) + require.NoError(t, err) + require.NotNil(t, a) + + err = a.Deregister(context.Background(), "foo") + require.NoError(t, err) + + err = a.Deregister(context.Background(), "foo2") + require.NoError(t, err) +} + +// TestAuditBroker_Register_MultipleFails checks for failure when we try to +// re-register an audit backend. +func TestAuditBroker_Register_MultipleFails(t *testing.T) { + t.Parallel() + + l := corehelpers.NewTestLogger(t) + a, err := NewBroker(l) + require.NoError(t, err) + require.NotNil(t, a) + + path := "b2-no-filter" + noFilterBackend := testAuditBackend(t, path, map[string]string{}) + + err = a.Register(noFilterBackend, false) + require.NoError(t, err) + + err = a.Register(noFilterBackend, false) + require.Error(t, err) + require.EqualError(t, err, "backend already registered 'b2-no-filter': invalid configuration") +} + +// BenchmarkAuditBroker_File_Request_DevNull Attempts to register a single `file` +// audit device on the broker, which points at /dev/null. +// It will then attempt to benchmark how long it takes Vault to complete logging +// a request, this really only shows us how Vault can handle lots of calls to the +// broker to trigger the eventlogger pipelines that audit devices are configured as. +// Since we aren't writing anything to file or doing any I/O. +// This test used to live in the file package for the file backend, but once the +// move to eventlogger was complete, there wasn't a way to create a file backend +// and manually just write to the underlying file itself, the old code used to do +// formatting and writing all together, but we've split this up with eventlogger +// with different nodes in a pipeline (think 1 audit device:1 pipeline) each +// handling a responsibility, for example: +// filter nodes filter events, so you can select which ones make it to your audit log +// formatter nodes format the events (to JSON/JSONX and perform HMACing etc) +// sink nodes handle sending the formatted data to a file, syslog or socket. +func BenchmarkAuditBroker_File_Request_DevNull(b *testing.B) { + backendConfig := &BackendConfig{ + Config: map[string]string{ + "path": "/dev/null", + }, + MountPath: "test", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + } + + sink, err := NewFileBackend(backendConfig, nil) + require.NoError(b, err) + + broker, err := NewBroker(nil) + require.NoError(b, err) + + err = broker.Register(sink, false) + require.NoError(b, err) + + in := &logical.LogInput{ + Auth: &logical.Auth{ + ClientToken: "foo", + Accessor: "bar", + EntityID: "foobarentity", + DisplayName: "testtoken", + NoDefaultPolicy: true, + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, + }, + Request: &logical.Request{ + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + Headers: map[string][]string{ + "foo": {"bar"}, + }, + }, + } + + ctx := nshelper.RootContext(context.Background()) + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if err := broker.LogRequest(ctx, in); err != nil { + panic(err) + } + } + }) +} + +// TestBroker_getAuditContext_NoNamespace checks that we get the right error when +// trying to get an audit context with no namespace. +func TestBroker_getAuditContext_NoNamespace(t *testing.T) { + t.Parallel() + + _, _, err := getAuditContext(context.Background()) + require.Error(t, err) + require.EqualError(t, err, "namespace missing from context: no namespace") +} + +// TestBroker_getAuditContext checks that we get a context back which isn't linked +// to the original context, and contains our namespace. +func TestBroker_getAuditContext(t *testing.T) { + t.Parallel() + + // context with namespace + ns := &nshelper.Namespace{ + ID: "foo", + Path: "foo/", + } + + // Create a context with a namespace. + originalContext, originalCancel := context.WithCancel(context.Background()) + t.Cleanup(originalCancel) + nsContext := nshelper.ContextWithNamespace(originalContext, ns) + + // Get the audit context + auditContext, auditCancel, err := getAuditContext(nsContext) + t.Cleanup(auditCancel) + + require.NoError(t, err) + require.NotNil(t, auditContext) + require.NotNil(t, auditCancel) + + // Ensure the namespace is there too. + val, err := nshelper.FromContext(auditContext) + require.NoError(t, err) + require.Equal(t, ns, val) + + // Now cancel the original context and ensure it is done but audit context isn't. + originalCancel() + require.NotNil(t, originalContext.Err()) + require.Nil(t, auditContext.Err()) + + // Now cancel the audit context and ensure that it is done. + auditCancel() + require.NotNil(t, auditContext.Err()) +} diff --git a/audit/entry_filter.go b/audit/entry_filter.go new file mode 100644 index 000000000000..fbe0c1a1e729 --- /dev/null +++ b/audit/entry_filter.go @@ -0,0 +1,103 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-bexpr" + nshelper "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/logical" +) + +var _ eventlogger.Node = (*entryFilter)(nil) + +// entryFilter should be used to filter audit requests and responses which should +// make it to a sink. +type entryFilter struct { + // the evaluator for the bexpr expression that should be applied by the node. + evaluator *bexpr.Evaluator +} + +// newEntryFilter should be used to create an entryFilter node. +// The filter supplied should be in bexpr format and reference fields from logical.LogInputBexpr. +func newEntryFilter(filter string) (*entryFilter, error) { + filter = strings.TrimSpace(filter) + if filter == "" { + return nil, fmt.Errorf("cannot create new audit filter with empty filter expression: %w", ErrExternalOptions) + } + + eval, err := bexpr.CreateEvaluator(filter) + if err != nil { + return nil, fmt.Errorf("cannot create new audit filter: %w: %w", ErrExternalOptions, err) + } + + // Validate the filter by attempting to evaluate it with an empty input. + // This prevents users providing a filter with a field that would error during + // matching, and block all auditable requests to Vault. + li := logical.LogInputBexpr{} + _, err = eval.Evaluate(li) + if err != nil { + return nil, fmt.Errorf("filter references an unsupported field: %s: %w", filter, ErrExternalOptions) + } + + return &entryFilter{evaluator: eval}, nil +} + +// Reopen is a no-op for the filter node. +func (*entryFilter) Reopen() error { + return nil +} + +// Type describes the type of this node (filter). +func (*entryFilter) Type() eventlogger.NodeType { + return eventlogger.NodeTypeFilter +} + +// Process will attempt to parse the incoming event data and decide whether it +// should be filtered or remain in the pipeline and passed to the next node. +func (f *entryFilter) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if e == nil { + return nil, fmt.Errorf("event is nil: %w", ErrInvalidParameter) + } + + a, ok := e.Payload.(*Event) + if !ok { + return nil, fmt.Errorf("cannot parse event payload: %w", ErrInvalidParameter) + } + + // If we don't have data to process, then we're done. + if a.Data == nil { + return nil, nil + } + + ns, err := nshelper.FromContext(ctx) + if err != nil { + return nil, fmt.Errorf("cannot obtain namespace: %w", err) + } + + datum := a.Data.BexprDatum(ns.Path) + + result, err := f.evaluator.Evaluate(datum) + if err != nil { + return nil, fmt.Errorf("unable to evaluate filter: %w", err) + } + + if result { + // Allow this event to carry on through the pipeline. + return e, nil + } + + // End process of this pipeline. + return nil, nil +} diff --git a/audit/entry_filter_test.go b/audit/entry_filter_test.go new file mode 100644 index 000000000000..847f4ab799b1 --- /dev/null +++ b/audit/entry_filter_test.go @@ -0,0 +1,271 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "context" + "testing" + "time" + + "github.com/hashicorp/eventlogger" + nshelper "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/internal/observability/event" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +// TestEntryFilter_NewEntryFilter tests that we can create entryFilter types correctly. +func TestEntryFilter_NewEntryFilter(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Filter string + IsErrorExpected bool + ExpectedErrorMessage string + }{ + "empty-filter": { + Filter: "", + IsErrorExpected: true, + ExpectedErrorMessage: "cannot create new audit filter with empty filter expression: invalid configuration", + }, + "spacey-filter": { + Filter: " ", + IsErrorExpected: true, + ExpectedErrorMessage: "cannot create new audit filter with empty filter expression: invalid configuration", + }, + "bad-filter": { + Filter: "____", + IsErrorExpected: true, + ExpectedErrorMessage: "cannot create new audit filter", + }, + "unsupported-field-filter": { + Filter: "foo == bar", + IsErrorExpected: true, + ExpectedErrorMessage: "filter references an unsupported field: foo == bar", + }, + "good-filter-operation": { + Filter: "operation == create", + IsErrorExpected: false, + }, + "good-filter-mount_type": { + Filter: "mount_type == kv", + IsErrorExpected: false, + }, + "good-filter-mount_point": { + Filter: "mount_point == \"/auth/userpass\"", + IsErrorExpected: false, + }, + "good-filter-namespace": { + Filter: "namespace == juan", + IsErrorExpected: false, + }, + "good-filter-path": { + Filter: "path == foo", + IsErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + f, err := newEntryFilter(tc.Filter) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.ExpectedErrorMessage) + require.Nil(t, f) + default: + require.NoError(t, err) + require.NotNil(t, f) + } + }) + } +} + +// TestEntryFilter_Reopen ensures we can reopen the filter node. +func TestEntryFilter_Reopen(t *testing.T) { + t.Parallel() + + f := &entryFilter{} + res := f.Reopen() + require.Nil(t, res) +} + +// TestEntryFilter_Type ensures we always return the right type for this node. +func TestEntryFilter_Type(t *testing.T) { + t.Parallel() + + f := &entryFilter{} + require.Equal(t, eventlogger.NodeTypeFilter, f.Type()) +} + +// TestEntryFilter_Process_ContextDone ensures that we stop processing the event +// if the context was cancelled. +func TestEntryFilter_Process_ContextDone(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + + // Explicitly cancel the context + cancel() + + l, err := newEntryFilter("operation == foo") + require.NoError(t, err) + + // Fake audit event + a, err := newEvent(RequestType) + require.NoError(t, err) + + // Fake event logger event + e := &eventlogger.Event{ + Type: event.AuditType.AsEventType(), + CreatedAt: time.Now(), + Formatted: make(map[string][]byte), + Payload: a, + } + + e2, err := l.Process(ctx, e) + + require.Error(t, err) + require.ErrorContains(t, err, "context canceled") + + // Ensure that the pipeline won't continue. + require.Nil(t, e2) +} + +// TestEntryFilter_Process_NilEvent ensures we receive the right error when the +// event we are trying to process is nil. +func TestEntryFilter_Process_NilEvent(t *testing.T) { + t.Parallel() + + l, err := newEntryFilter("operation == foo") + require.NoError(t, err) + e, err := l.Process(context.Background(), nil) + require.Error(t, err) + require.EqualError(t, err, "event is nil: invalid internal parameter") + + // Ensure that the pipeline won't continue. + require.Nil(t, e) +} + +// TestEntryFilter_Process_BadPayload ensures we receive the correct error when +// attempting to process an event with a payload that cannot be parsed back to +// an audit event. +func TestEntryFilter_Process_BadPayload(t *testing.T) { + t.Parallel() + + l, err := newEntryFilter("operation == foo") + require.NoError(t, err) + + e := &eventlogger.Event{ + Type: event.AuditType.AsEventType(), + CreatedAt: time.Now(), + Formatted: make(map[string][]byte), + Payload: nil, + } + + e2, err := l.Process(context.Background(), e) + require.Error(t, err) + require.EqualError(t, err, "cannot parse event payload: invalid internal parameter") + + // Ensure that the pipeline won't continue. + require.Nil(t, e2) +} + +// TestEntryFilter_Process_NoAuditDataInPayload ensure we stop processing a pipeline +// when the data in the audit event is nil. +func TestEntryFilter_Process_NoAuditDataInPayload(t *testing.T) { + t.Parallel() + + l, err := newEntryFilter("operation == foo") + require.NoError(t, err) + + a, err := newEvent(RequestType) + require.NoError(t, err) + + // Ensure audit data is nil + a.Data = nil + + e := &eventlogger.Event{ + Type: event.AuditType.AsEventType(), + CreatedAt: time.Now(), + Formatted: make(map[string][]byte), + Payload: a, + } + + e2, err := l.Process(context.Background(), e) + + // Make sure we get the 'nil, nil' response to stop processing this pipeline. + require.NoError(t, err) + require.Nil(t, e2) +} + +// TestEntryFilter_Process_FilterSuccess tests that when a filter matches we +// receive no error and the event is not nil so it continues in the pipeline. +func TestEntryFilter_Process_FilterSuccess(t *testing.T) { + t.Parallel() + + l, err := newEntryFilter("mount_type == juan") + require.NoError(t, err) + + a, err := newEvent(RequestType) + require.NoError(t, err) + + a.Data = &logical.LogInput{ + Request: &logical.Request{ + Operation: logical.CreateOperation, + MountType: "juan", + }, + } + + e := &eventlogger.Event{ + Type: event.AuditType.AsEventType(), + CreatedAt: time.Now(), + Formatted: make(map[string][]byte), + Payload: a, + } + + ctx := nshelper.ContextWithNamespace(context.Background(), nshelper.RootNamespace) + + e2, err := l.Process(ctx, e) + + require.NoError(t, err) + require.NotNil(t, e2) +} + +// TestEntryFilter_Process_FilterFail tests that when a filter fails to match we +// receive no error, but also the event is nil so that the pipeline completes. +func TestEntryFilter_Process_FilterFail(t *testing.T) { + t.Parallel() + + l, err := newEntryFilter("mount_type == john and operation == create and namespace == root") + require.NoError(t, err) + + a, err := newEvent(RequestType) + require.NoError(t, err) + + a.Data = &logical.LogInput{ + Request: &logical.Request{ + Operation: logical.CreateOperation, + MountType: "juan", + }, + } + + e := &eventlogger.Event{ + Type: event.AuditType.AsEventType(), + CreatedAt: time.Now(), + Formatted: make(map[string][]byte), + Payload: a, + } + + ctx := nshelper.ContextWithNamespace(context.Background(), nshelper.RootNamespace) + + e2, err := l.Process(ctx, e) + + require.NoError(t, err) + require.Nil(t, e2) +} diff --git a/audit/entry_formatter.go b/audit/entry_formatter.go new file mode 100644 index 000000000000..9ea6bdb92924 --- /dev/null +++ b/audit/entry_formatter.go @@ -0,0 +1,616 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "context" + "errors" + "fmt" + "reflect" + "runtime/debug" + "strings" + "time" + + "github.com/go-jose/go-jose/v3/jwt" + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" + nshelper "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" + "github.com/jefferai/jsonx" + "github.com/mitchellh/copystructure" +) + +var _ eventlogger.Node = (*entryFormatter)(nil) + +// timeProvider offers a way to supply a pre-configured time. +type timeProvider interface { + // formatTime provides the pre-configured time in a particular format. + formattedTime() string +} + +// nonPersistentSalt is used for obtaining a salt that is not persisted. +type nonPersistentSalt struct{} + +// entryFormatter should be used to format audit requests and responses. +// NOTE: Use newEntryFormatter to initialize the entryFormatter struct. +type entryFormatter struct { + config formatterConfig + salter Salter + logger hclog.Logger + name string +} + +// newEntryFormatter should be used to create an entryFormatter. +func newEntryFormatter(name string, config formatterConfig, salter Salter, logger hclog.Logger) (*entryFormatter, error) { + name = strings.TrimSpace(name) + if name == "" { + return nil, fmt.Errorf("name is required: %w", ErrInvalidParameter) + } + + if salter == nil { + return nil, fmt.Errorf("cannot create a new audit formatter with nil salter: %w", ErrInvalidParameter) + } + + if logger == nil || reflect.ValueOf(logger).IsNil() { + return nil, fmt.Errorf("cannot create a new audit formatter with nil logger: %w", ErrInvalidParameter) + } + + return &entryFormatter{ + config: config, + salter: salter, + logger: logger, + name: name, + }, nil +} + +// Reopen is a no-op for the formatter node. +func (*entryFormatter) Reopen() error { + return nil +} + +// Type describes the type of this node (formatter). +func (*entryFormatter) Type() eventlogger.NodeType { + return eventlogger.NodeTypeFormatter +} + +// Process will attempt to parse the incoming event data into a corresponding +// audit request/response which is serialized to JSON/JSONx and stored within the event. +func (f *entryFormatter) Process(ctx context.Context, e *eventlogger.Event) (_ *eventlogger.Event, retErr error) { + // Return early if the context was cancelled, eventlogger will not carry on + // asking nodes to process, so any sink node in the pipeline won't be called. + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + // Perform validation on the event, then retrieve the underlying AuditEvent + // and LogInput (from the AuditEvent Data). + if e == nil { + return nil, fmt.Errorf("event is nil: %w", ErrInvalidParameter) + } + + a, ok := e.Payload.(*Event) + if !ok { + return nil, fmt.Errorf("cannot parse event payload: %w", ErrInvalidParameter) + } + + if a.Data == nil { + return nil, fmt.Errorf("cannot audit a '%s' event with no data: %w", a.Subtype, ErrInvalidParameter) + } + + // Handle panics + defer func() { + r := recover() + if r == nil { + return + } + + path := "unknown" + if a.Data.Request != nil { + path = a.Data.Request.Path + } + + f.logger.Error("panic during logging", + "request_path", path, + "audit_device_path", f.name, + "error", r, + "stacktrace", string(debug.Stack())) + + // Ensure that we add this error onto any pre-existing error that was being returned. + retErr = errors.Join(retErr, fmt.Errorf("panic generating audit log: %q", f.name)) + }() + + // Using 'any' to make exclusion easier, the JSON encoder doesn't care about types. + var entry any + var err error + entry, err = f.createEntry(ctx, a) + if err != nil { + return nil, err + } + + // If this pipeline has been configured with (Enterprise-only) exclusions then + // attempt to exclude the fields from the audit entry. + if f.shouldExclude() { + m, err := f.excludeFields(entry) + if err != nil { + return nil, fmt.Errorf("unable to exclude %s audit data from %q: %w", a.Subtype, f.name, err) + } + + entry = m + } + + result, err := jsonutil.EncodeJSON(entry) + if err != nil { + return nil, fmt.Errorf("unable to format %s: %w", a.Subtype, err) + } + + if f.config.requiredFormat == jsonxFormat { + var err error + result, err = jsonx.EncodeJSONBytes(result) + if err != nil { + return nil, fmt.Errorf("unable to encode JSONx using JSON data: %w", err) + } + if result == nil { + return nil, fmt.Errorf("encoded JSONx was nil: %w", err) + } + } + + // This makes a bit of a mess of the 'format' since both JSON and XML (JSONx) + // don't support a prefix just sitting there. + // However, this would be a breaking change to how Vault currently works to + // include the prefix as part of the JSON object or XML document. + if f.config.prefix != "" { + result = append([]byte(f.config.prefix), result...) + } + + // Create a new event, so we can store our formatted data without conflict. + e2 := &eventlogger.Event{ + Type: e.Type, + CreatedAt: e.CreatedAt, + Formatted: make(map[string][]byte), // we are about to set this ourselves. + Payload: a, + } + + e2.FormattedAs(f.config.requiredFormat.String(), result) + + return e2, nil +} + +// remoteAddr safely gets the remote address avoiding a nil pointer. +func remoteAddr(req *logical.Request) string { + if req != nil && req.Connection != nil { + return req.Connection.RemoteAddr + } + return "" +} + +// remotePort safely gets the remote port avoiding a nil pointer. +func remotePort(req *logical.Request) int { + if req != nil && req.Connection != nil { + return req.Connection.RemotePort + } + return 0 +} + +// clientCertSerialNumber attempts the retrieve the serial number of the peer +// certificate from the specified tls.ConnectionState. +func clientCertSerialNumber(req *logical.Request) string { + if req == nil || req.Connection == nil { + return "" + } + + connState := req.Connection.ConnState + + if connState == nil || len(connState.VerifiedChains) == 0 || len(connState.VerifiedChains[0]) == 0 { + return "" + } + + return connState.VerifiedChains[0][0].SerialNumber.String() +} + +// parseVaultTokenFromJWT returns a string iff the token was a JWT, and we could +// extract the original token ID from inside +func parseVaultTokenFromJWT(token string) *string { + if strings.Count(token, ".") != 2 { + return nil + } + + parsedJWT, err := jwt.ParseSigned(token) + if err != nil { + return nil + } + + var claims jwt.Claims + if err = parsedJWT.UnsafeClaimsWithoutVerification(&claims); err != nil { + return nil + } + + return &claims.ID +} + +// newTemporaryEntryFormatter creates a cloned entryFormatter instance with a non-persistent Salter. +func newTemporaryEntryFormatter(n *entryFormatter) *entryFormatter { + return &entryFormatter{ + salter: &nonPersistentSalt{}, + config: n.config, + } +} + +// Salt returns a new salt with default configuration and no storage usage, and no error. +func (s *nonPersistentSalt) Salt(_ context.Context) (*salt.Salt, error) { + return salt.NewNonpersistentSalt(), nil +} + +// clone can be used to deep clone the specified type. +func clone[V any](s V) (V, error) { + s2, err := copystructure.Copy(s) + + return s2.(V), err +} + +// newAuth takes a logical.Auth and the number of remaining client token uses +// (which should be supplied from the logical.Request's client token), and creates +// an audit auth. +// tokenRemainingUses should be the client token remaining uses to include in auth. +// This usually can be found in logical.Request.ClientTokenRemainingUses. +// NOTE: supplying a nil value for auth will result in a nil return value and +// (nil) error. The caller should check the return value before attempting to use it. +// ignore-nil-nil-function-check. +func newAuth(input *logical.Auth, tokenRemainingUses int) (*auth, error) { + if input == nil { + return nil, nil + } + + extNSPolicies, err := clone(input.ExternalNamespacePolicies) + if err != nil { + return nil, fmt.Errorf("unable to clone logical auth: external namespace policies: %w", err) + } + + identityPolicies, err := clone(input.IdentityPolicies) + if err != nil { + return nil, fmt.Errorf("unable to clone logical auth: identity policies: %w", err) + } + + metadata, err := clone(input.Metadata) + if err != nil { + return nil, fmt.Errorf("unable to clone logical auth: metadata: %w", err) + } + + policies, err := clone(input.Policies) + if err != nil { + return nil, fmt.Errorf("unable to clone logical auth: policies: %w", err) + } + + var polRes *policyResults + if input.PolicyResults != nil { + polRes = &policyResults{ + Allowed: input.PolicyResults.Allowed, + GrantingPolicies: make([]policyInfo, len(input.PolicyResults.GrantingPolicies)), + } + + for _, p := range input.PolicyResults.GrantingPolicies { + polRes.GrantingPolicies = append(polRes.GrantingPolicies, policyInfo{ + Name: p.Name, + NamespaceId: p.NamespaceId, + NamespacePath: p.NamespacePath, + Type: p.Type, + }) + } + } + + tokenPolicies, err := clone(input.TokenPolicies) + if err != nil { + return nil, fmt.Errorf("unable to clone logical auth: token policies: %w", err) + } + + var tokenIssueTime string + if !input.IssueTime.IsZero() { + tokenIssueTime = input.IssueTime.Format(time.RFC3339) + } + + return &auth{ + Accessor: input.Accessor, + ClientToken: input.ClientToken, + DisplayName: input.DisplayName, + EntityCreated: input.EntityCreated, + EntityID: input.EntityID, + ExternalNamespacePolicies: extNSPolicies, + IdentityPolicies: identityPolicies, + Metadata: metadata, + NoDefaultPolicy: input.NoDefaultPolicy, + NumUses: input.NumUses, + Policies: policies, + PolicyResults: polRes, + RemainingUses: tokenRemainingUses, + TokenPolicies: tokenPolicies, + TokenIssueTime: tokenIssueTime, + TokenTTL: int64(input.TTL.Seconds()), + TokenType: input.TokenType.String(), + }, nil +} + +// newRequest takes a logical.Request and namespace.Namespace, transforms and +// aggregates them into an audit request. +func newRequest(req *logical.Request, ns *nshelper.Namespace) (*request, error) { + if req == nil { + return nil, fmt.Errorf("request cannot be nil") + } + + remoteAddr := remoteAddr(req) + remotePort := remotePort(req) + clientCertSerial := clientCertSerialNumber(req) + + data, err := clone(req.Data) + if err != nil { + return nil, fmt.Errorf("unable to clone logical request: data: %w", err) + } + + headers, err := clone(req.Headers) + if err != nil { + return nil, fmt.Errorf("unable to clone logical request: headers: %w", err) + } + + var reqURI string + if req.HTTPRequest != nil && req.HTTPRequest.RequestURI != req.Path { + reqURI = req.HTTPRequest.RequestURI + } + var wrapTTL int + if req.WrapInfo != nil { + wrapTTL = int(req.WrapInfo.TTL / time.Second) + } + + return &request{ + ClientCertificateSerialNumber: clientCertSerial, + ClientID: req.ClientID, + ClientToken: req.ClientToken, + ClientTokenAccessor: req.ClientTokenAccessor, + Data: data, + Headers: headers, + ID: req.ID, + MountAccessor: req.MountAccessor, + MountClass: req.MountClass(), + MountIsExternalPlugin: req.MountIsExternalPlugin(), + MountPoint: req.MountPoint, + MountRunningSha256: req.MountRunningSha256(), + MountRunningVersion: req.MountRunningVersion(), + MountType: req.MountType, + Namespace: &namespace{ + ID: ns.ID, + Path: ns.Path, + }, + Operation: req.Operation, + Path: req.Path, + PolicyOverride: req.PolicyOverride, + RemoteAddr: remoteAddr, + RemotePort: remotePort, + ReplicationCluster: req.ReplicationCluster, + RequestURI: reqURI, + WrapTTL: wrapTTL, + }, nil +} + +// newResponse takes a logical.Response and logical.Request, transforms and +// aggregates them into an audit response. +// isElisionRequired is used to indicate that response 'Data' should be elided. +// NOTE: supplying a nil value for response will result in a nil return value and +// (nil) error. The caller should check the return value before attempting to use it. +// ignore-nil-nil-function-check. +func newResponse(resp *logical.Response, req *logical.Request, isElisionRequired bool) (*response, error) { + if resp == nil { + return nil, nil + } + + if req == nil { + // Request should never be nil, even for a response. + return nil, fmt.Errorf("request cannot be nil") + } + + auth, err := newAuth(resp.Auth, req.ClientTokenRemainingUses) + if err != nil { + return nil, fmt.Errorf("unable to convert logical auth response: %w", err) + } + + var data map[string]any + if resp.Data != nil { + data = make(map[string]any, len(resp.Data)) + + if isElisionRequired { + // Performs the actual elision (ideally for list operations) of response data, + // once surrounding code has determined it should apply to a particular request. + // If the value for a key should not be elided, then it will be cloned. + for k, v := range resp.Data { + isCloneRequired := true + switch k { + case "keys": + if vSlice, ok := v.([]string); ok { + data[k] = len(vSlice) + isCloneRequired = false + } + case "key_info": + if vMap, ok := v.(map[string]any); ok { + data[k] = len(vMap) + isCloneRequired = false + } + } + + // Clone values if they weren't legitimate keys or key_info. + if isCloneRequired { + v2, err := clone(v) + if err != nil { + return nil, fmt.Errorf("unable to clone response data while eliding: %w", err) + } + data[k] = v2 + } + } + } else { + // Deep clone all values, no shortcuts here. + data, err = clone(resp.Data) + if err != nil { + return nil, fmt.Errorf("unable to clone response data: %w", err) + } + } + } + + headers, err := clone(resp.Headers) + if err != nil { + return nil, fmt.Errorf("unable to clone logical response: headers: %w", err) + } + + var s *secret + if resp.Secret != nil { + s = &secret{LeaseID: resp.Secret.LeaseID} + } + + var wrapInfo *responseWrapInfo + if resp.WrapInfo != nil { + token := resp.WrapInfo.Token + if jwtToken := parseVaultTokenFromJWT(token); jwtToken != nil { + token = *jwtToken + } + + ttl := int(resp.WrapInfo.TTL / time.Second) + wrapInfo = &responseWrapInfo{ + TTL: ttl, + Token: token, + Accessor: resp.WrapInfo.Accessor, + CreationTime: resp.WrapInfo.CreationTime.UTC().Format(time.RFC3339Nano), + CreationPath: resp.WrapInfo.CreationPath, + WrappedAccessor: resp.WrapInfo.WrappedAccessor, + } + } + + warnings, err := clone(resp.Warnings) + if err != nil { + return nil, fmt.Errorf("unable to clone logical response: warnings: %w", err) + } + + return &response{ + Auth: auth, + Data: data, + Headers: headers, + MountAccessor: req.MountAccessor, + MountClass: req.MountClass(), + MountIsExternalPlugin: req.MountIsExternalPlugin(), + MountPoint: req.MountPoint, + MountRunningSha256: req.MountRunningSha256(), + MountRunningVersion: req.MountRunningVersion(), + MountType: req.MountType, + Redirect: resp.Redirect, + Secret: s, + WrapInfo: wrapInfo, + Warnings: warnings, + }, nil +} + +// createEntry takes the AuditEvent and builds an audit entry. +// The entry will be HMAC'd and elided where required. +func (f *entryFormatter) createEntry(ctx context.Context, a *Event) (*entry, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + + } + + data := a.Data + + if data.Request == nil { + // Request should never be nil, even for a response. + return nil, fmt.Errorf("unable to parse request from '%s' audit event: request cannot be nil", a.Subtype) + } + + ns, err := nshelper.FromContext(ctx) + if err != nil { + return nil, fmt.Errorf("unable to retrieve namespace from context: %w", err) + } + + auth, err := newAuth(data.Auth, data.Request.ClientTokenRemainingUses) + if err != nil { + return nil, fmt.Errorf("cannot convert auth: %w", err) + } + + req, err := newRequest(data.Request, ns) + if err != nil { + return nil, fmt.Errorf("cannot convert request: %w", err) + } + + var resp *response + if a.Subtype == ResponseType { + shouldElide := f.config.elideListResponses && req.Operation == logical.ListOperation + resp, err = newResponse(data.Response, data.Request, shouldElide) + if err != nil { + return nil, fmt.Errorf("cannot convert response: %w", err) + } + } + + var outerErr string + if data.OuterErr != nil { + outerErr = data.OuterErr.Error() + } + + entryType := data.Type + if entryType == "" { + entryType = a.Subtype.String() + } + + entry := &entry{ + Auth: auth, + Error: outerErr, + Forwarded: false, + ForwardedFrom: data.Request.ForwardedFrom, + Request: req, + Response: resp, + Type: entryType, + } + + if !f.config.omitTime { + // Use the time provider to supply the time for this entry. + entry.Time = a.timeProvider().formattedTime() + } + + // If the request is present in the input data, apply header configuration + // regardless. We shouldn't be in a situation where the header formatter isn't + // present as it's required. + if entry.Request != nil { + // Ensure that any headers in the request, are formatted as required, and are + // only present if they have been configured to appear in the audit log. + // e.g. via: /sys/config/auditing/request-headers/:name + entry.Request.Headers, err = f.config.headerFormatter.ApplyConfig(ctx, entry.Request.Headers, f.salter) + if err != nil { + return nil, fmt.Errorf("unable to transform headers for auditing: %w", err) + } + } + + // If the request contains a Server-Side Consistency Token (SSCT), and we + // have an auth response, overwrite the existing client token with the SSCT, + // so that the SSCT appears in the audit log for this entry. + if data.Request != nil && data.Request.InboundSSCToken != "" && entry.Auth != nil { + entry.Auth.ClientToken = data.Request.InboundSSCToken + } + + // Hash the entry if we aren't expecting raw output. + if !f.config.raw { + // Requests and responses have auth and request. + err = hashAuth(ctx, f.salter, entry.Auth, f.config.hmacAccessor) + if err != nil { + return nil, err + } + + err = hashRequest(ctx, f.salter, entry.Request, f.config.hmacAccessor, data.NonHMACReqDataKeys) + if err != nil { + return nil, err + } + + if a.Subtype == ResponseType { + if err = hashResponse(ctx, f.salter, entry.Response, f.config.hmacAccessor, data.NonHMACRespDataKeys); err != nil { + return nil, err + } + } + } + + return entry, nil +} diff --git a/audit/entry_formatter_ce.go b/audit/entry_formatter_ce.go new file mode 100644 index 000000000000..7c3f33212e69 --- /dev/null +++ b/audit/entry_formatter_ce.go @@ -0,0 +1,18 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package audit + +import ( + "errors" +) + +func (f *entryFormatter) shouldExclude() bool { + return false +} + +func (f *entryFormatter) excludeFields(entry any) (map[string]any, error) { + return nil, errors.New("enterprise-only feature: audit exclusion") +} diff --git a/audit/entry_formatter_ce_test.go b/audit/entry_formatter_ce_test.go new file mode 100644 index 000000000000..ffe181f4b882 --- /dev/null +++ b/audit/entry_formatter_ce_test.go @@ -0,0 +1,37 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package audit + +import ( + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" +) + +// TestEntryFormatter_excludeFields tests that we can exclude data based on the +// pre-configured conditions/fields of the EntryFormatter. It covers some scenarios +// where we expect errors due to invalid input, which is unlikely to happen in reality. +func TestEntryFormatter_excludeFields(t *testing.T) { + // Create the formatter node. + cfg, err := newFormatterConfig(&testHeaderFormatter{}, nil) + require.NoError(t, err) + ss := newStaticSalt(t) + + // We intentionally create the EntryFormatter manually, as we wouldn't be + // able to set exclusions via NewEntryFormatter WithExclusions option. + formatter := &entryFormatter{ + config: cfg, + salter: ss, + logger: hclog.NewNullLogger(), + name: "juan", + } + + res, err := formatter.excludeFields(nil) + require.Error(t, err) + require.EqualError(t, err, "enterprise-only feature: audit exclusion") + require.Nil(t, res) +} diff --git a/audit/entry_formatter_config.go b/audit/entry_formatter_config.go new file mode 100644 index 000000000000..aa55a2ea2d2b --- /dev/null +++ b/audit/entry_formatter_config.go @@ -0,0 +1,121 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "fmt" + "reflect" + "strconv" +) + +// formatterConfig is used to provide basic configuration to a formatter. +// Use newFormatterConfig to initialize the formatterConfig struct. +type formatterConfig struct { + formatterConfigEnt + + raw bool + hmacAccessor bool + + // Vault lacks pagination in its APIs. As a result, certain list operations can return **very** large responses. + // The user's chosen audit sinks may experience difficulty consuming audit records that swell to tens of megabytes + // of JSON. The responses of list operations are typically not very interesting, as they are mostly lists of keys, + // or, even when they include a "key_info" field, are not returning confidential information. They become even less + // interesting once HMAC-ed by the audit system. + // + // Some example Vault "list" operations that are prone to becoming very large in an active Vault installation are: + // auth/token/accessors/ + // identity/entity/id/ + // identity/entity-alias/id/ + // pki/certs/ + // + // This option exists to provide such users with the option to have response data elided from audit logs, only when + // the operation type is "list". For added safety, the elision only applies to the "keys" and "key_info" fields + // within the response data - these are conventionally the only fields present in a list response - see + // logical.ListResponse, and logical.ListResponseWithInfo. However, other fields are technically possible if a + // plugin author writes unusual code, and these will be preserved in the audit log even with this option enabled. + // The elision replaces the values of the "keys" and "key_info" fields with an integer count of the number of + // entries. This allows even the elided audit logs to still be useful for answering questions like + // "Was any data returned?" or "How many records were listed?". + elideListResponses bool + + // This should only ever be used in a testing context + omitTime bool + + // The required/target format for the event (supported: jsonFormat and jsonxFormat). + requiredFormat format + + // headerFormatter specifies the formatter used for headers that existing in any incoming audit request. + headerFormatter HeaderFormatter + + // prefix specifies a prefix that should be prepended to any formatted request or response before serialization. + prefix string +} + +// newFormatterConfig creates the configuration required by a formatter node using the config map supplied to the factory. +func newFormatterConfig(headerFormatter HeaderFormatter, config map[string]string) (formatterConfig, error) { + if headerFormatter == nil || reflect.ValueOf(headerFormatter).IsNil() { + return formatterConfig{}, fmt.Errorf("header formatter is required: %w", ErrInvalidParameter) + } + + var opt []option + + if format, ok := config[optionFormat]; ok { + if !isValidFormat(format) { + return formatterConfig{}, fmt.Errorf("unsupported %q: %w", optionFormat, ErrExternalOptions) + } + + opt = append(opt, withFormat(format)) + } + + // Check if hashing of accessor is disabled + if hmacAccessorRaw, ok := config[optionHMACAccessor]; ok { + v, err := strconv.ParseBool(hmacAccessorRaw) + if err != nil { + return formatterConfig{}, fmt.Errorf("unable to parse %q: %w", optionHMACAccessor, ErrExternalOptions) + } + opt = append(opt, withHMACAccessor(v)) + } + + // Check if raw logging is enabled + if raw, ok := config[optionLogRaw]; ok { + v, err := strconv.ParseBool(raw) + if err != nil { + return formatterConfig{}, fmt.Errorf("unable to parse %q: %w", optionLogRaw, ErrExternalOptions) + } + opt = append(opt, withRaw(v)) + } + + if elideListResponsesRaw, ok := config[optionElideListResponses]; ok { + v, err := strconv.ParseBool(elideListResponsesRaw) + if err != nil { + return formatterConfig{}, fmt.Errorf("unable to parse %q: %w", optionElideListResponses, ErrExternalOptions) + } + opt = append(opt, withElision(v)) + } + + if prefix, ok := config[optionPrefix]; ok { + opt = append(opt, withPrefix(prefix)) + } + + opts, err := getOpts(opt...) + if err != nil { + return formatterConfig{}, err + } + + fmtCfgEnt, err := newFormatterConfigEnt(config) + if err != nil { + return formatterConfig{}, err + } + + return formatterConfig{ + formatterConfigEnt: fmtCfgEnt, + headerFormatter: headerFormatter, + elideListResponses: opts.withElision, + hmacAccessor: opts.withHMACAccessor, + omitTime: opts.withOmitTime, // This must be set in code after creation. + prefix: opts.withPrefix, + raw: opts.withRaw, + requiredFormat: opts.withFormat, + }, nil +} diff --git a/audit/entry_formatter_config_ce.go b/audit/entry_formatter_config_ce.go new file mode 100644 index 000000000000..43b7307f4d71 --- /dev/null +++ b/audit/entry_formatter_config_ce.go @@ -0,0 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package audit + +// formatterConfigEnt provides extensions to a formatterConfig which behave differently +// for Enterprise and community edition. +// NOTE: Use newFormatterConfigEnt to initialize the formatterConfigEnt struct. +type formatterConfigEnt struct{} + +// newFormatterConfigEnt should be used to create formatterConfigEnt. +func newFormatterConfigEnt(config map[string]string) (formatterConfigEnt, error) { + return formatterConfigEnt{}, nil +} diff --git a/audit/entry_formatter_test.go b/audit/entry_formatter_test.go new file mode 100644 index 000000000000..35bf5cb058ed --- /dev/null +++ b/audit/entry_formatter_test.go @@ -0,0 +1,1372 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + "testing" + "time" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-sockaddr" + nshelper "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/internal/observability/event" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/copystructure" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const testFormatJSONReqBasicStrFmt = ` +{ + "time": "2015-08-05T13:45:46Z", + "type": "request", + "auth": { + "client_token": "%s", + "accessor": "bar", + "display_name": "testtoken", + "policies": [ + "root" + ], + "no_default_policy": true, + "metadata": null, + "entity_id": "foobarentity", + "token_type": "service", + "token_ttl": 14400, + "token_issue_time": "2020-05-28T13:40:18-05:00" + }, + "request": { + "operation": "update", + "path": "/foo", + "data": null, + "wrap_ttl": 60, + "remote_address": "127.0.0.1", + "headers": { + "foo": [ + "bar" + ] + } + }, + "error": "this is an error" +} +` + +// testHeaderFormatter is a stub to prevent the need to import the vault package +// to bring in vault.HeadersConfig for testing. +type testHeaderFormatter struct { + shouldReturnEmpty bool +} + +// ApplyConfig satisfies the HeaderFormatter interface for testing. +// It will either return the headers it was supplied or empty headers depending +// on how it is configured. +// ignore-nil-nil-function-check. +func (f *testHeaderFormatter) ApplyConfig(_ context.Context, headers map[string][]string, salter Salter) (result map[string][]string, retErr error) { + if f.shouldReturnEmpty { + return make(map[string][]string), nil + } + + return headers, nil +} + +// testTimeProvider is just a test struct used to imitate an AuditEvent's ability +// to provide a formatted time. +type testTimeProvider struct{} + +// formattedTime always returns the same value for 22nd March 2024 at 10:00:05 (and 10 nanos). +func (p *testTimeProvider) formattedTime() string { + return time.Date(2024, time.March, 22, 10, 0o0, 5, 10, time.UTC).UTC().Format(time.RFC3339Nano) +} + +// TestNewEntryFormatter ensures we can create new entryFormatter structs. +func TestNewEntryFormatter(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Name string + UseStaticSalt bool + Logger hclog.Logger + Options map[string]string + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedFormat format + ExpectedPrefix string + }{ + "empty-name": { + Name: "", + IsErrorExpected: true, + ExpectedErrorMessage: "name is required: invalid internal parameter", + }, + "spacey-name": { + Name: " ", + IsErrorExpected: true, + ExpectedErrorMessage: "name is required: invalid internal parameter", + }, + "nil-salter": { + Name: "juan", + UseStaticSalt: false, + IsErrorExpected: true, + ExpectedErrorMessage: "cannot create a new audit formatter with nil salter: invalid internal parameter", + }, + "nil-logger": { + Name: "juan", + UseStaticSalt: true, + Logger: nil, + IsErrorExpected: true, + ExpectedErrorMessage: "cannot create a new audit formatter with nil logger: invalid internal parameter", + }, + "static-salter": { + Name: "juan", + UseStaticSalt: true, + Logger: hclog.NewNullLogger(), + IsErrorExpected: false, + Options: map[string]string{ + "format": "json", + }, + ExpectedFormat: jsonFormat, + }, + "default": { + Name: "juan", + UseStaticSalt: true, + Logger: hclog.NewNullLogger(), + IsErrorExpected: false, + ExpectedFormat: jsonFormat, + }, + "config-json": { + Name: "juan", + UseStaticSalt: true, + Logger: hclog.NewNullLogger(), + Options: map[string]string{ + "format": "json", + }, + IsErrorExpected: false, + ExpectedFormat: jsonFormat, + }, + "config-jsonx": { + Name: "juan", + UseStaticSalt: true, + Logger: hclog.NewNullLogger(), + Options: map[string]string{ + "format": "jsonx", + }, + IsErrorExpected: false, + ExpectedFormat: jsonxFormat, + }, + "config-json-prefix": { + Name: "juan", + UseStaticSalt: true, + Logger: hclog.NewNullLogger(), + Options: map[string]string{ + "prefix": "foo", + "format": "json", + }, + IsErrorExpected: false, + ExpectedFormat: jsonFormat, + ExpectedPrefix: "foo", + }, + "config-jsonx-prefix": { + Name: "juan", + UseStaticSalt: true, + Logger: hclog.NewNullLogger(), + Options: map[string]string{ + "prefix": "foo", + "format": "jsonx", + }, + IsErrorExpected: false, + ExpectedFormat: jsonxFormat, + ExpectedPrefix: "foo", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + var ss Salter + if tc.UseStaticSalt { + ss = newStaticSalt(t) + } + + cfg, err := newFormatterConfig(&testHeaderFormatter{}, tc.Options) + require.NoError(t, err) + f, err := newEntryFormatter(tc.Name, cfg, ss, tc.Logger) + + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + require.Nil(t, f) + default: + require.NoError(t, err) + require.NotNil(t, f) + require.Equal(t, tc.ExpectedFormat, f.config.requiredFormat) + require.Equal(t, tc.ExpectedPrefix, f.config.prefix) + } + }) + } +} + +// TestEntryFormatter_Reopen ensures that we do not get an error when calling Reopen. +func TestEntryFormatter_Reopen(t *testing.T) { + t.Parallel() + + ss := newStaticSalt(t) + cfg, err := newFormatterConfig(&testHeaderFormatter{}, nil) + require.NoError(t, err) + + f, err := newEntryFormatter("juan", cfg, ss, hclog.NewNullLogger()) + require.NoError(t, err) + require.NotNil(t, f) + require.NoError(t, f.Reopen()) +} + +// TestEntryFormatter_Type ensures that the node is a 'formatter' type. +func TestEntryFormatter_Type(t *testing.T) { + t.Parallel() + + ss := newStaticSalt(t) + cfg, err := newFormatterConfig(&testHeaderFormatter{}, nil) + require.NoError(t, err) + + f, err := newEntryFormatter("juan", cfg, ss, hclog.NewNullLogger()) + require.NoError(t, err) + require.NotNil(t, f) + require.Equal(t, eventlogger.NodeTypeFormatter, f.Type()) +} + +// TestEntryFormatter_Process attempts to run the Process method to convert the +// logical.LogInput within an audit event to JSON and JSONx (entry), +func TestEntryFormatter_Process(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + IsErrorExpected bool + ExpectedErrorMessage string + Subtype subtype + RequiredFormat format + Data *logical.LogInput + RootNamespace bool + }{ + "json-request-no-data": { + IsErrorExpected: true, + ExpectedErrorMessage: "cannot audit a 'request' event with no data: invalid internal parameter", + Subtype: RequestType, + RequiredFormat: jsonFormat, + Data: nil, + }, + "json-response-no-data": { + IsErrorExpected: true, + ExpectedErrorMessage: "cannot audit a 'response' event with no data: invalid internal parameter", + Subtype: ResponseType, + RequiredFormat: jsonFormat, + Data: nil, + }, + "json-request-basic-input": { + IsErrorExpected: true, + ExpectedErrorMessage: "unable to parse request from 'request' audit event: request cannot be nil", + Subtype: RequestType, + RequiredFormat: jsonFormat, + Data: &logical.LogInput{Type: "magic"}, + RootNamespace: true, + }, + "json-response-basic-input": { + IsErrorExpected: true, + ExpectedErrorMessage: "unable to parse request from 'response' audit event: request cannot be nil", + Subtype: ResponseType, + RequiredFormat: jsonFormat, + Data: &logical.LogInput{Type: "magic"}, + }, + "json-request-basic-input-and-request-no-ns": { + IsErrorExpected: true, + ExpectedErrorMessage: "unable to retrieve namespace from context: no namespace", + Subtype: RequestType, + RequiredFormat: jsonFormat, + Data: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + }, + "json-response-basic-input-and-request-no-ns": { + IsErrorExpected: true, + ExpectedErrorMessage: "unable to retrieve namespace from context: no namespace", + Subtype: ResponseType, + RequiredFormat: jsonFormat, + Data: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + }, + "json-request-basic-input-and-request-with-ns": { + IsErrorExpected: false, + Subtype: RequestType, + RequiredFormat: jsonFormat, + Data: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + RootNamespace: true, + }, + "json-response-basic-input-and-request-with-ns": { + IsErrorExpected: false, + Subtype: ResponseType, + RequiredFormat: jsonFormat, + Data: &logical.LogInput{ + Request: &logical.Request{ID: "123"}, + Response: &logical.Response{}, + }, + RootNamespace: true, + }, + "jsonx-request-no-data": { + IsErrorExpected: true, + ExpectedErrorMessage: "cannot audit a 'request' event with no data: invalid internal parameter", + Subtype: RequestType, + RequiredFormat: jsonxFormat, + Data: nil, + }, + "jsonx-response-no-data": { + IsErrorExpected: true, + ExpectedErrorMessage: "cannot audit a 'response' event with no data: invalid internal parameter", + Subtype: ResponseType, + RequiredFormat: jsonxFormat, + Data: nil, + }, + "jsonx-request-basic-input": { + IsErrorExpected: true, + ExpectedErrorMessage: "unable to parse request from 'request' audit event: request cannot be nil", + Subtype: RequestType, + RequiredFormat: jsonxFormat, + Data: &logical.LogInput{Type: "magic"}, + RootNamespace: true, + }, + "jsonx-response-basic-input": { + IsErrorExpected: true, + ExpectedErrorMessage: "unable to parse request from 'response' audit event: request cannot be nil", + Subtype: ResponseType, + RequiredFormat: jsonxFormat, + Data: &logical.LogInput{Type: "magic"}, + RootNamespace: true, + }, + "jsonx-request-basic-input-and-request-no-ns": { + IsErrorExpected: true, + ExpectedErrorMessage: "unable to retrieve namespace from context: no namespace", + Subtype: RequestType, + RequiredFormat: jsonxFormat, + Data: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + }, + "jsonx-response-basic-input-and-request-no-ns": { + IsErrorExpected: true, + ExpectedErrorMessage: "unable to retrieve namespace from context: no namespace", + Subtype: ResponseType, + RequiredFormat: jsonxFormat, + Data: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + }, + "jsonx-request-basic-input-and-request-with-ns": { + IsErrorExpected: false, + Subtype: RequestType, + RequiredFormat: jsonxFormat, + Data: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + RootNamespace: true, + }, + "jsonx-response-basic-input-and-request-with-ns": { + IsErrorExpected: false, + Subtype: ResponseType, + RequiredFormat: jsonxFormat, + Data: &logical.LogInput{ + Request: &logical.Request{ID: "123"}, + Response: &logical.Response{}, + }, + RootNamespace: true, + }, + "no-request": { + IsErrorExpected: true, + ExpectedErrorMessage: "unable to parse request from 'response' audit event: request cannot be nil", + Subtype: ResponseType, + RequiredFormat: jsonxFormat, + Data: &logical.LogInput{ + Auth: &logical.Auth{}, + }, + RootNamespace: true, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + e := fakeEvent(t, tc.Subtype, tc.Data) + require.NotNil(t, e) + + ss := newStaticSalt(t) + cfg, err := newFormatterConfig(&testHeaderFormatter{}, map[string]string{"format": tc.RequiredFormat.String()}) + require.NoError(t, err) + + f, err := newEntryFormatter("juan", cfg, ss, corehelpers.NewTestLogger(t)) + require.NoError(t, err) + require.NotNil(t, f) + + var ctx context.Context + switch { + case tc.RootNamespace: + ctx = nshelper.RootContext(context.Background()) + default: + ctx = context.Background() + } + + processed, err := f.Process(ctx, e) + + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + require.Nil(t, processed) + default: + require.NoError(t, err) + require.NotNil(t, processed) + b, found := processed.Format(string(tc.RequiredFormat)) + require.True(t, found) + require.NotNil(t, b) + } + }) + } +} + +// BenchmarkAuditFileSink_Process benchmarks the entryFormatter and then event.FileSink calling Process. +// This should replicate the original benchmark testing which used to perform both of these roles together. +func BenchmarkAuditFileSink_Process(b *testing.B) { + // Base input + in := &logical.LogInput{ + Auth: &logical.Auth{ + ClientToken: "foo", + Accessor: "bar", + EntityID: "foobarentity", + DisplayName: "testtoken", + NoDefaultPolicy: true, + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, + }, + Request: &logical.Request{ + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + Headers: map[string][]string{ + "foo": {"bar"}, + }, + }, + } + + ctx := nshelper.RootContext(context.Background()) + + // Create the formatter node. + cfg, err := newFormatterConfig(&testHeaderFormatter{}, nil) + require.NoError(b, err) + ss := newStaticSalt(b) + formatter, err := newEntryFormatter("juan", cfg, ss, hclog.NewNullLogger()) + require.NoError(b, err) + require.NotNil(b, formatter) + + // Create the sink node. + sink, err := event.NewFileSink("/dev/null", jsonFormat.String()) + require.NoError(b, err) + require.NotNil(b, sink) + + // Generate the event + e := fakeEvent(b, RequestType, in) + require.NotNil(b, e) + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + e, err = formatter.Process(ctx, e) + if err != nil { + panic(err) + } + _, err := sink.Process(ctx, e) + if err != nil { + panic(err) + } + } + }) +} + +// TestEntryFormatter_Process_Request exercises entryFormatter process an event +// with varying inputs. +func TestEntryFormatter_Process_Request(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Input *logical.LogInput + ShouldOmitTime bool + IsErrorExpected bool + ExpectedErrorMessage string + RootNamespace bool + }{ + "nil": { + Input: nil, + IsErrorExpected: true, + ExpectedErrorMessage: "cannot audit a 'request' event with no data: invalid internal parameter", + }, + "basic-input": { + Input: &logical.LogInput{}, + IsErrorExpected: true, + ExpectedErrorMessage: "unable to parse request from 'request' audit event: request cannot be nil", + }, + "input-and-request-no-ns": { + Input: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + IsErrorExpected: true, + ExpectedErrorMessage: "unable to retrieve namespace from context: no namespace", + RootNamespace: false, + }, + "input-and-request-with-ns": { + Input: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + IsErrorExpected: false, + RootNamespace: true, + }, + "omit-time": { + Input: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + ShouldOmitTime: true, + RootNamespace: true, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + ss := newStaticSalt(t) + cfg, err := newFormatterConfig(&testHeaderFormatter{}, nil) + cfg.omitTime = tc.ShouldOmitTime + require.NoError(t, err) + f, err := newEntryFormatter("juan", cfg, ss, hclog.NewNullLogger()) + require.NoError(t, err) + + var ctx context.Context + switch { + case tc.RootNamespace: + ctx = nshelper.RootContext(context.Background()) + default: + ctx = context.Background() + } + + auditEvent, err := newEvent(RequestType) + auditEvent.setTimeProvider(&testTimeProvider{}) + require.NoError(t, err) + auditEvent.Data = tc.Input + + e := &eventlogger.Event{ + Type: event.AuditType.AsEventType(), + CreatedAt: time.Now(), + Formatted: make(map[string][]byte), + Payload: auditEvent, + } + + e2, err := f.Process(ctx, e) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + require.Nil(t, e2) + case tc.ShouldOmitTime: + require.NoError(t, err) + require.NotNil(t, e2) + b, ok := e2.Format(jsonFormat.String()) + require.True(t, ok) + var entry *entry + err = json.Unmarshal(b, &entry) + require.NoError(t, err) + require.Zero(t, entry.Time) + default: + require.NoError(t, err) + require.NotNil(t, e2) + b, ok := e2.Format(jsonFormat.String()) + require.True(t, ok) + var entry *entry + err = json.Unmarshal(b, &entry) + require.NoError(t, err) + require.NotZero(t, entry.Time) + require.Equal(t, "2024-03-22T10:00:05.00000001Z", entry.Time) + } + }) + } +} + +// TestEntryFormatter_Process_ResponseType exercises entryFormatter +// with varying inputs also checking if the time can be omitted. +func TestEntryFormatter_Process_ResponseType(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Input *logical.LogInput + ShouldOmitTime bool + IsErrorExpected bool + ExpectedErrorMessage string + RootNamespace bool + }{ + "nil": { + Input: nil, + IsErrorExpected: true, + ExpectedErrorMessage: "cannot audit a 'response' event with no data: invalid internal parameter", + }, + "basic-input": { + Input: &logical.LogInput{}, + IsErrorExpected: true, + ExpectedErrorMessage: "unable to parse request from 'response' audit event: request cannot be nil", + }, + "input-and-request-no-ns": { + Input: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + IsErrorExpected: true, + ExpectedErrorMessage: "unable to retrieve namespace from context: no namespace", + RootNamespace: false, + }, + "input-and-request-with-ns": { + Input: &logical.LogInput{ + Request: &logical.Request{ID: "123"}, + Response: &logical.Response{}, + }, + IsErrorExpected: false, + RootNamespace: true, + }, + "omit-time": { + Input: &logical.LogInput{ + Request: &logical.Request{ID: "123"}, + Response: &logical.Response{}, + }, + ShouldOmitTime: true, + IsErrorExpected: false, + RootNamespace: true, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + ss := newStaticSalt(t) + cfg, err := newFormatterConfig(&testHeaderFormatter{}, nil) + cfg.omitTime = tc.ShouldOmitTime + require.NoError(t, err) + f, err := newEntryFormatter("juan", cfg, ss, hclog.NewNullLogger()) + require.NoError(t, err) + + var ctx context.Context + switch { + case tc.RootNamespace: + ctx = nshelper.RootContext(context.Background()) + default: + ctx = context.Background() + } + + auditEvent, err := newEvent(ResponseType) + auditEvent.setTimeProvider(&testTimeProvider{}) + require.NoError(t, err) + auditEvent.Data = tc.Input + + e := &eventlogger.Event{ + Type: event.AuditType.AsEventType(), + CreatedAt: time.Now(), + Formatted: make(map[string][]byte), + Payload: auditEvent, + } + + e2, err := f.Process(ctx, e) + + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + require.Nil(t, e2) + case tc.ShouldOmitTime: + require.NoError(t, err) + require.NotNil(t, e2) + b, ok := e2.Format(jsonFormat.String()) + require.True(t, ok) + var entry *entry + err = json.Unmarshal(b, &entry) + require.NoError(t, err) + require.Zero(t, entry.Time) + default: + require.NoError(t, err) + require.NotNil(t, e2) + b, ok := e2.Format(jsonFormat.String()) + require.True(t, ok) + var entry *entry + err = json.Unmarshal(b, &entry) + require.NoError(t, err) + require.NotZero(t, entry.Time) + require.Equal(t, "2024-03-22T10:00:05.00000001Z", entry.Time) + } + }) + } +} + +// TestEntryFormatter_Process_JSON ensures that the JSON output we get matches what +// we expect for the specified LogInput. +func TestEntryFormatter_Process_JSON(t *testing.T) { + t.Parallel() + + ss := newStaticSalt(t) + + expectedResultStr := fmt.Sprintf(testFormatJSONReqBasicStrFmt, ss.salt.GetIdentifiedHMAC("foo")) + + issueTime, _ := time.Parse(time.RFC3339, "2020-05-28T13:40:18-05:00") + cases := map[string]struct { + Auth *logical.Auth + Req *logical.Request + Err error + Prefix string + ExpectedStr string + }{ + "auth, request": { + &logical.Auth{ + ClientToken: "foo", + Accessor: "bar", + DisplayName: "testtoken", + EntityID: "foobarentity", + NoDefaultPolicy: true, + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, + LeaseOptions: logical.LeaseOptions{ + TTL: time.Hour * 4, + IssueTime: issueTime, + }, + }, + &logical.Request{ + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + Headers: map[string][]string{ + "foo": {"bar"}, + }, + }, + errors.New("this is an error"), + "", + expectedResultStr, + }, + "auth, request with prefix": { + &logical.Auth{ + ClientToken: "foo", + Accessor: "bar", + EntityID: "foobarentity", + DisplayName: "testtoken", + NoDefaultPolicy: true, + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, + LeaseOptions: logical.LeaseOptions{ + TTL: time.Hour * 4, + IssueTime: issueTime, + }, + }, + &logical.Request{ + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + Headers: map[string][]string{ + "foo": {"bar"}, + }, + }, + errors.New("this is an error"), + "@cee: ", + expectedResultStr, + }, + } + + for name, tc := range cases { + cfg, err := newFormatterConfig(&testHeaderFormatter{}, map[string]string{ + "hmac_accessor": "false", + "prefix": tc.Prefix, + }) + require.NoError(t, err) + formatter, err := newEntryFormatter("juan", cfg, ss, hclog.NewNullLogger()) + require.NoError(t, err) + + in := &logical.LogInput{ + Auth: tc.Auth, + Request: tc.Req, + OuterErr: tc.Err, + } + + // Create an audit event and more generic eventlogger.event to allow us + // to process (format). + auditEvent, err := newEvent(RequestType) + require.NoError(t, err) + auditEvent.Data = in + + e := &eventlogger.Event{ + Type: event.AuditType.AsEventType(), + CreatedAt: time.Now(), + Formatted: make(map[string][]byte), + Payload: auditEvent, + } + + e2, err := formatter.Process(nshelper.RootContext(nil), e) + require.NoErrorf(t, err, "bad: %s\nerr: %s", name, err) + + jsonBytes, ok := e2.Format(jsonFormat.String()) + require.True(t, ok) + require.Positive(t, len(jsonBytes)) + + if !strings.HasPrefix(string(jsonBytes), tc.Prefix) { + t.Fatalf("no prefix: %s \n log: %s\nprefix: %s", name, expectedResultStr, tc.Prefix) + } + + expectedJSON := new(entry) + + if err := jsonutil.DecodeJSON([]byte(expectedResultStr), &expectedJSON); err != nil { + t.Fatalf("bad json: %s", err) + } + expectedJSON.Request.Namespace = &namespace{ID: "root"} + + actualJSON := new(entry) + if err := jsonutil.DecodeJSON(jsonBytes[len(tc.Prefix):], &actualJSON); err != nil { + t.Fatalf("bad json: %s", err) + } + + expectedJSON.Time = actualJSON.Time + + expectedBytes, err := json.Marshal(expectedJSON) + if err != nil { + t.Fatalf("unable to marshal json: %s", err) + } + + if !strings.HasSuffix(strings.TrimSpace(string(jsonBytes)), string(expectedBytes)) { + t.Fatalf("bad: %s\nResult:\n\n%q\n\nExpected:\n\n%q", name, string(jsonBytes), string(expectedBytes)) + } + } +} + +// TestEntryFormatter_Process_JSONx ensures that the JSONx output we get matches what +// we expect for the specified LogInput. +func TestEntryFormatter_Process_JSONx(t *testing.T) { + t.Parallel() + + s, err := salt.NewSalt(context.Background(), nil, nil) + require.NoError(t, err) + tempStaticSalt := &staticSalt{salt: s} + + fooSalted := s.GetIdentifiedHMAC("foo") + issueTime, _ := time.Parse(time.RFC3339, "2020-05-28T13:40:18-05:00") + + cases := map[string]struct { + Auth *logical.Auth + Req *logical.Request + Err error + Prefix string + Result string + ExpectedStr string + }{ + "auth, request": { + &logical.Auth{ + ClientToken: "foo", + Accessor: "bar", + DisplayName: "testtoken", + EntityID: "foobarentity", + NoDefaultPolicy: true, + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, + LeaseOptions: logical.LeaseOptions{ + TTL: time.Hour * 4, + IssueTime: issueTime, + }, + }, + &logical.Request{ + ID: "request", + ClientToken: "foo", + ClientTokenAccessor: "bar", + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + Headers: map[string][]string{ + "foo": {"bar"}, + }, + PolicyOverride: true, + }, + errors.New("this is an error"), + "", + "", + fmt.Sprintf(`bar%stesttokenfoobarentitytrueroot2020-05-28T13:40:18-05:0014400servicethis is an error%sbarbarrequestrootupdate/footrue127.0.0.160request`, + fooSalted, fooSalted), + }, + "auth, request with prefix": { + &logical.Auth{ + ClientToken: "foo", + Accessor: "bar", + DisplayName: "testtoken", + NoDefaultPolicy: true, + EntityID: "foobarentity", + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, + LeaseOptions: logical.LeaseOptions{ + TTL: time.Hour * 4, + IssueTime: issueTime, + }, + }, + &logical.Request{ + ID: "request", + ClientToken: "foo", + ClientTokenAccessor: "bar", + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + Headers: map[string][]string{ + "foo": {"bar"}, + }, + PolicyOverride: true, + }, + errors.New("this is an error"), + "", + "@cee: ", + fmt.Sprintf(`bar%stesttokenfoobarentitytrueroot2020-05-28T13:40:18-05:0014400servicethis is an error%sbarbarrequestrootupdate/footrue127.0.0.160request`, + fooSalted, fooSalted), + }, + } + + for name, tc := range cases { + cfg, err := newFormatterConfig( + &testHeaderFormatter{}, + map[string]string{ + "format": "jsonx", + "hmac_accessor": "false", + "prefix": tc.Prefix, + }) + cfg.omitTime = true + require.NoError(t, err) + formatter, err := newEntryFormatter("juan", cfg, tempStaticSalt, hclog.NewNullLogger()) + require.NoError(t, err) + require.NotNil(t, formatter) + + in := &logical.LogInput{ + Auth: tc.Auth, + Request: tc.Req, + OuterErr: tc.Err, + } + + // Create an audit event and more generic eventlogger.event to allow us + // to process (format). + auditEvent, err := newEvent(RequestType) + require.NoError(t, err) + auditEvent.Data = in + + e := &eventlogger.Event{ + Type: event.AuditType.AsEventType(), + CreatedAt: time.Now(), + Formatted: make(map[string][]byte), + Payload: auditEvent, + } + + e2, err := formatter.Process(nshelper.RootContext(nil), e) + require.NoErrorf(t, err, "bad: %s\nerr: %s", name, err) + + jsonxBytes, ok := e2.Format(jsonxFormat.String()) + require.True(t, ok) + require.Positive(t, len(jsonxBytes)) + + if !strings.HasPrefix(string(jsonxBytes), tc.Prefix) { + t.Fatalf("no prefix: %s \n log: %s\nprefix: %s", name, tc.Result, tc.Prefix) + } + + if !strings.HasSuffix(strings.TrimSpace(string(jsonxBytes)), string(tc.ExpectedStr)) { + t.Fatalf( + "bad: %s\nResult:\n\n%q\n\nExpected:\n\n%q", + name, strings.TrimSpace(string(jsonxBytes)), string(tc.ExpectedStr)) + } + } +} + +// TestEntryFormatter_ElideListResponses ensures that we correctly elide data in +// responses to LIST operations. +func TestEntryFormatter_ElideListResponses(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + inputData map[string]any + expectedData map[string]any + }{ + "nil data": { + nil, + nil, + }, + "Normal list (keys only)": { + map[string]any{ + "keys": []string{"foo", "bar", "baz"}, + }, + map[string]any{ + "keys": 3, + }, + }, + "Enhanced list (has key_info)": { + map[string]any{ + "keys": []string{"foo", "bar", "baz", "quux"}, + "key_info": map[string]any{ + "foo": "alpha", + "bar": "beta", + "baz": "gamma", + "quux": "delta", + }, + }, + map[string]any{ + "keys": 4, + "key_info": 4, + }, + }, + "Unconventional other values in a list response are not touched": { + map[string]any{ + "keys": []string{"foo", "bar"}, + "something_else": "baz", + }, + map[string]any{ + "keys": 2, + "something_else": "baz", + }, + }, + "Conventional values in a list response are not elided if their data types are unconventional": { + map[string]any{ + "keys": map[string]any{ + "You wouldn't expect keys to be a map": nil, + }, + "key_info": []string{ + "You wouldn't expect key_info to be a slice", + }, + }, + map[string]any{ + "keys": map[string]any{ + "You wouldn't expect keys to be a map": nil, + }, + "key_info": []string{ + "You wouldn't expect key_info to be a slice", + }, + }, + }, + } + + oneInterestingTestCase := tests["Enhanced list (has key_info)"] + + ss := newStaticSalt(t) + ctx := nshelper.RootContext(context.Background()) + var formatter *entryFormatter + var err error + + format := func(t *testing.T, config formatterConfig, operation logical.Operation, inputData map[string]any) *entry { + formatter, err = newEntryFormatter("juan", config, ss, hclog.NewNullLogger()) + require.NoError(t, err) + require.NotNil(t, formatter) + + in := &logical.LogInput{ + Request: &logical.Request{Operation: operation}, + Response: &logical.Response{Data: inputData}, + } + + auditEvent, err := newEvent(ResponseType) + require.NoError(t, err) + auditEvent.Data = in + + entry, err := formatter.createEntry(ctx, auditEvent) + require.NoError(t, err) + require.NotNil(t, entry) + + return entry + } + + t.Run("Default case", func(t *testing.T) { + config, err := newFormatterConfig(&testHeaderFormatter{}, map[string]string{"elide_list_responses": "true"}) + require.NoError(t, err) + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + entry := format(t, config, logical.ListOperation, tc.inputData) + assert.Equal(t, formatter.hashExpectedValueForComparison(tc.expectedData), entry.Response.Data) + }) + } + }) + + t.Run("When Operation is not list, eliding does not happen", func(t *testing.T) { + config, err := newFormatterConfig(&testHeaderFormatter{}, map[string]string{"elide_list_responses": "true"}) + require.NoError(t, err) + tc := oneInterestingTestCase + entry := format(t, config, logical.ReadOperation, tc.inputData) + hashedExpected := formatter.hashExpectedValueForComparison(tc.inputData) + assert.Equal(t, hashedExpected, entry.Response.Data) + }) + + t.Run("When elideListResponses is false, eliding does not happen", func(t *testing.T) { + config, err := newFormatterConfig(&testHeaderFormatter{}, map[string]string{ + "elide_list_responses": "false", + "format": "json", + }) + require.NoError(t, err) + tc := oneInterestingTestCase + entry := format(t, config, logical.ListOperation, tc.inputData) + assert.Equal(t, formatter.hashExpectedValueForComparison(tc.inputData), entry.Response.Data) + }) + + t.Run("When raw is true, eliding still happens", func(t *testing.T) { + config, err := newFormatterConfig(&testHeaderFormatter{}, map[string]string{ + "elide_list_responses": "true", + "format": "json", + "log_raw": "true", + }) + require.NoError(t, err) + tc := oneInterestingTestCase + entry := format(t, config, logical.ListOperation, tc.inputData) + assert.Equal(t, tc.expectedData, entry.Response.Data) + }) +} + +// TestEntryFormatter_Process_NoMutation tests that the event returned by an +// entryFormatter.Process method is not the same as the one that it accepted. +func TestEntryFormatter_Process_NoMutation(t *testing.T) { + t.Parallel() + + // Create the formatter node. + cfg, err := newFormatterConfig(&testHeaderFormatter{}, nil) + require.NoError(t, err) + ss := newStaticSalt(t) + formatter, err := newEntryFormatter("juan", cfg, ss, hclog.NewNullLogger()) + require.NoError(t, err) + require.NotNil(t, formatter) + + in := &logical.LogInput{ + Auth: &logical.Auth{ + ClientToken: "foo", + Accessor: "bar", + EntityID: "foobarentity", + DisplayName: "testtoken", + NoDefaultPolicy: true, + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, + }, + Request: &logical.Request{ + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + Headers: map[string][]string{ + "foo": {"bar"}, + }, + }, + } + + e := fakeEvent(t, RequestType, in) + + e2, err := formatter.Process(nshelper.RootContext(nil), e) + require.NoError(t, err) + require.NotNil(t, e2) + + // Ensure the pointers are different. + require.NotEqual(t, e2, e) +} + +// TestEntryFormatter_Process_Panic tries to send data into the entryFormatter +// which will currently cause a panic when a response is formatted due to the +// underlying hashing that is done with reflectwalk. +func TestEntryFormatter_Process_Panic(t *testing.T) { + t.Parallel() + + // Create the formatter node. + cfg, err := newFormatterConfig(&testHeaderFormatter{}, nil) + require.NoError(t, err) + ss := newStaticSalt(t) + formatter, err := newEntryFormatter("juan", cfg, ss, hclog.NewNullLogger()) + require.NoError(t, err) + require.NotNil(t, formatter) + + // The secret sauce, create a bad addr. + // see: https://github.com/hashicorp/vault/issues/16462 + badAddr, err := sockaddr.NewSockAddr("10.10.10.2/32 10.10.10.3/32") + require.NoError(t, err) + + in := &logical.LogInput{ + Auth: &logical.Auth{ + ClientToken: "foo", + Accessor: "bar", + EntityID: "foobarentity", + DisplayName: "testtoken", + NoDefaultPolicy: true, + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, + }, + Request: &logical.Request{ + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + Headers: map[string][]string{ + "foo": {"bar"}, + }, + Data: map[string]interface{}{}, + }, + Response: &logical.Response{ + Auth: &logical.Auth{}, + Data: map[string]any{ + "token_bound_cidrs": []*sockaddr.SockAddrMarshaler{ + {SockAddr: badAddr}, + }, + }, + }, + } + + e := fakeEvent(t, ResponseType, in) + + e2, err := formatter.Process(nshelper.RootContext(nil), e) + require.Error(t, err) + require.Contains(t, err.Error(), "panic generating audit log: \"juan\"") + require.Nil(t, e2) +} + +// TestEntryFormatter_NewFormatterConfig_NilHeaderFormatter ensures we cannot +// create a formatterConfig using NewFormatterConfig if we supply a nil formatter. +func TestEntryFormatter_NewFormatterConfig_NilHeaderFormatter(t *testing.T) { + _, err := newFormatterConfig(nil, nil) + require.Error(t, err) +} + +// TestEntryFormatter_Process_NeverLeaksHeaders ensures that if we never accidentally +// leak headers if applying them means we don't have any. This is more like a sense +// check to ensure the returned event doesn't somehow end up with the headers 'back'. +func TestEntryFormatter_Process_NeverLeaksHeaders(t *testing.T) { + t.Parallel() + + // Create the formatter node. + cfg, err := newFormatterConfig(&testHeaderFormatter{shouldReturnEmpty: true}, nil) + require.NoError(t, err) + ss := newStaticSalt(t) + formatter, err := newEntryFormatter("juan", cfg, ss, hclog.NewNullLogger()) + require.NoError(t, err) + require.NotNil(t, formatter) + + // Set up the input and verify we have a single foo:bar header. + var input *logical.LogInput + err = json.Unmarshal([]byte(testFormatJSONReqBasicStrFmt), &input) + require.NoError(t, err) + require.NotNil(t, input) + require.ElementsMatch(t, input.Request.Headers["foo"], []string{"bar"}) + + e := fakeEvent(t, RequestType, input) + + // Process the node. + ctx := nshelper.RootContext(context.Background()) + e2, err := formatter.Process(ctx, e) + require.NoError(t, err) + require.NotNil(t, e2) + + // Now check we can retrieve the formatted JSON. + jsonFormatted, b2 := e2.Format(jsonFormat.String()) + require.True(t, b2) + require.NotNil(t, jsonFormatted) + var input2 *logical.LogInput + err = json.Unmarshal(jsonFormatted, &input2) + require.NoError(t, err) + require.NotNil(t, input2) + require.Len(t, input2.Request.Headers, 0) +} + +// hashExpectedValueForComparison replicates enough of the audit HMAC process on a piece of expected data in a test, +// so that we can use assert.Equal to compare the expected and output values. +func (f *entryFormatter) hashExpectedValueForComparison(input map[string]any) map[string]any { + // Copy input before modifying, since we may re-use the same data in another test + copied, err := copystructure.Copy(input) + if err != nil { + panic(err) + } + copiedAsMap := copied.(map[string]any) + + s, err := f.salter.Salt(context.Background()) + if err != nil { + panic(err) + } + + err = hashMap(s.GetIdentifiedHMAC, copiedAsMap, nil) + if err != nil { + panic(err) + } + + return copiedAsMap +} + +// fakeEvent will return a new fake event containing audit data based on the +// specified subtype, format and logical.LogInput. +func fakeEvent(tb testing.TB, subtype subtype, input *logical.LogInput) *eventlogger.Event { + tb.Helper() + + date := time.Date(2023, time.July, 11, 15, 49, 10, 0o0, time.Local) + + auditEvent, err := newEvent(subtype, + withID("123"), + withNow(date), + ) + require.NoError(tb, err) + require.NotNil(tb, auditEvent) + require.Equal(tb, "123", auditEvent.ID) + require.Equal(tb, "v0.1", auditEvent.Version) + require.Equal(tb, subtype, auditEvent.Subtype) + require.Equal(tb, date, auditEvent.Timestamp) + + auditEvent.Data = input + auditEvent.setTimeProvider(&testTimeProvider{}) + + e := &eventlogger.Event{ + Type: eventlogger.EventType(event.AuditType), + CreatedAt: auditEvent.Timestamp, + Formatted: make(map[string][]byte), + Payload: auditEvent, + } + + return e +} + +// newStaticSalt returns a new staticSalt for use in testing. +func newStaticSalt(tb testing.TB) *staticSalt { + s, err := salt.NewSalt(context.Background(), nil, nil) + require.NoError(tb, err) + + return &staticSalt{salt: s} +} + +// staticSalt is a struct which can be used to obtain a static salt. +// a salt must be assigned when the struct is initialized. +type staticSalt struct { + salt *salt.Salt +} + +// Salt returns the static salt and no error. +func (s *staticSalt) Salt(_ context.Context) (*salt.Salt, error) { + return s.salt, nil +} diff --git a/audit/errors.go b/audit/errors.go new file mode 100644 index 000000000000..31ac9fb4fbfe --- /dev/null +++ b/audit/errors.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import "errors" + +var ( + // ErrInternal should be used to represent an unexpected error that occurred + // within the audit system. + ErrInternal = errors.New("audit system internal error") + + // ErrInvalidParameter should be used to represent an error in which the + // internal audit system is receiving invalid parameters from other parts of + // Vault which should have already been validated. + ErrInvalidParameter = errors.New("invalid internal parameter") + + // ErrExternalOptions should be used to represent an error related to + // invalid configuration provided to Vault (i.e. by the Vault Operator). + ErrExternalOptions = errors.New("invalid configuration") +) + +// ConvertToExternalError handles converting an audit related error that was generated +// in Vault and should appear as-is in the server logs, to an error that can be +// returned to calling clients (via the API/CLI). +func ConvertToExternalError(err error) error { + // If the error is an internal error, the contents will have been logged, and + // we should probably shield the caller from the details. + if errors.Is(err, ErrInternal) { + return ErrInternal + } + + return err +} diff --git a/audit/errors_test.go b/audit/errors_test.go new file mode 100644 index 000000000000..2d6314843402 --- /dev/null +++ b/audit/errors_test.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +// TestErrors_ConvertToExternalError is used to check that we 'mute' errors which +// have an internal error in their tree. +func TestErrors_ConvertToExternalError(t *testing.T) { + t.Parallel() + + err := fmt.Errorf("wrap this error: %w", ErrInternal) + res := ConvertToExternalError(err) + require.EqualError(t, res, "audit system internal error") + + err = fmt.Errorf("test: %w", errors.New("this is just an error")) + res = ConvertToExternalError(err) + require.Equal(t, "test: this is just an error", res.Error()) +} diff --git a/audit/event.go b/audit/event.go new file mode 100644 index 000000000000..11f02e6b9e9f --- /dev/null +++ b/audit/event.go @@ -0,0 +1,184 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "fmt" + "strings" + "time" + + "github.com/hashicorp/vault/internal/observability/event" + "github.com/hashicorp/vault/sdk/logical" +) + +// version defines the version of audit events. +const version = "v0.1" + +// Audit subtypes. +const ( + RequestType subtype = "AuditRequest" + ResponseType subtype = "AuditResponse" +) + +// Audit formats. +const ( + jsonFormat format = "json" + jsonxFormat format = "jsonx" +) + +// Check AuditEvent implements the timeProvider at compile time. +var _ timeProvider = (*Event)(nil) + +// Event is the audit event. +type Event struct { + ID string `json:"id"` + Version string `json:"version"` + Subtype subtype `json:"subtype"` // the subtype of the audit event. + Timestamp time.Time `json:"timestamp"` + Data *logical.LogInput `json:"data"` + prov timeProvider +} + +// setTimeProvider can be used to set a specific time provider which is used when +// creating an entry. +// NOTE: This is primarily used for testing to supply a known time value. +func (a *Event) setTimeProvider(t timeProvider) { + a.prov = t +} + +// timeProvider returns a configured time provider, or the default if not set. +func (a *Event) timeProvider() timeProvider { + if a.prov == nil { + return a + } + + return a.prov +} + +// format defines types of format audit events support. +type format string + +// subtype defines the type of audit event. +type subtype string + +// newEvent should be used to create an audit event. The subtype field is needed +// for audit events. It will generate an ID if no ID is supplied. Supported +// options: withID, withNow. +func newEvent(s subtype, opt ...option) (*Event, error) { + // Get the default options + opts, err := getOpts(opt...) + if err != nil { + return nil, err + } + + if opts.withID == "" { + var err error + + opts.withID, err = event.NewID(string(event.AuditType)) + if err != nil { + return nil, fmt.Errorf("error creating ID for event: %w", err) + } + } + + audit := &Event{ + ID: opts.withID, + Timestamp: opts.withNow, + Version: version, + Subtype: s, + } + + if err := audit.validate(); err != nil { + return nil, err + } + return audit, nil +} + +// validate attempts to ensure the audit event in its present state is valid. +func (a *Event) validate() error { + if a == nil { + return fmt.Errorf("event is nil: %w", ErrInvalidParameter) + } + + if a.ID == "" { + return fmt.Errorf("missing ID: %w", ErrInvalidParameter) + } + + if a.Version != version { + return fmt.Errorf("event version unsupported: %w", ErrInvalidParameter) + } + + if a.Timestamp.IsZero() { + return fmt.Errorf("event timestamp cannot be the zero time instant: %w", ErrInvalidParameter) + } + + err := a.Subtype.validate() + if err != nil { + return err + } + + return nil +} + +// validate ensures that subtype is one of the set of allowed event subtypes. +func (t subtype) validate() error { + switch t { + case RequestType, ResponseType: + return nil + default: + return fmt.Errorf("invalid event subtype %q: %w", t, ErrInvalidParameter) + } +} + +// validate ensures that format is one of the set of allowed event formats. +func (f format) validate() error { + switch f { + case jsonFormat, jsonxFormat: + return nil + default: + return fmt.Errorf("invalid format %q: %w", f, ErrInvalidParameter) + } +} + +// String returns the string version of a format. +func (f format) String() string { + return string(f) +} + +// MetricTag returns a tag corresponding to this subtype to include in metrics. +// If a tag cannot be found the value is returned 'as-is' in string format. +func (t subtype) MetricTag() string { + switch t { + case RequestType: + return "log_request" + case ResponseType: + return "log_response" + } + + return t.String() +} + +// String returns the subtype as a human-readable string. +func (t subtype) String() string { + switch t { + case RequestType: + return "request" + case ResponseType: + return "response" + } + + return string(t) +} + +// formattedTime returns the UTC time the AuditEvent was created in the RFC3339Nano +// format (which removes trailing zeros from the seconds field). +func (a *Event) formattedTime() string { + return a.Timestamp.UTC().Format(time.RFC3339Nano) +} + +// isValidFormat provides a means to validate whether the supplied format is valid. +// Examples of valid formats are JSON and JSONx. +func isValidFormat(v string) bool { + err := format(strings.TrimSpace(strings.ToLower(v))).validate() + return err == nil +} diff --git a/audit/event_test.go b/audit/event_test.go new file mode 100644 index 000000000000..a9f17677fdc3 --- /dev/null +++ b/audit/event_test.go @@ -0,0 +1,446 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// TestAuditEvent_new exercises the newEvent func to create audit events. +func TestAuditEvent_new(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Options []option + Subtype subtype + Format format + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedID string + ExpectedFormat format + ExpectedSubtype subtype + ExpectedTimestamp time.Time + IsNowExpected bool + }{ + "nil": { + Options: nil, + Subtype: subtype(""), + Format: format(""), + IsErrorExpected: true, + ExpectedErrorMessage: "invalid event subtype \"\": invalid internal parameter", + }, + "empty-option": { + Options: []option{}, + Subtype: subtype(""), + Format: format(""), + IsErrorExpected: true, + ExpectedErrorMessage: "invalid event subtype \"\": invalid internal parameter", + }, + "bad-id": { + Options: []option{withID("")}, + Subtype: ResponseType, + Format: jsonFormat, + IsErrorExpected: true, + ExpectedErrorMessage: "id cannot be empty", + }, + "good": { + Options: []option{ + withID("audit_123"), + withFormat(string(jsonFormat)), + withSubtype(string(ResponseType)), + withNow(time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local)), + }, + Subtype: RequestType, + Format: jsonxFormat, + IsErrorExpected: false, + ExpectedID: "audit_123", + ExpectedTimestamp: time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local), + ExpectedSubtype: RequestType, + ExpectedFormat: jsonxFormat, + }, + "good-no-time": { + Options: []option{ + withID("audit_123"), + withFormat(string(jsonFormat)), + withSubtype(string(ResponseType)), + }, + Subtype: RequestType, + Format: jsonxFormat, + IsErrorExpected: false, + ExpectedID: "audit_123", + ExpectedSubtype: RequestType, + ExpectedFormat: jsonxFormat, + IsNowExpected: true, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + audit, err := newEvent(tc.Subtype, tc.Options...) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + require.Nil(t, audit) + default: + require.NoError(t, err) + require.NotNil(t, audit) + require.Equal(t, tc.ExpectedID, audit.ID) + require.Equal(t, tc.ExpectedSubtype, audit.Subtype) + switch { + case tc.IsNowExpected: + require.True(t, time.Now().After(audit.Timestamp)) + require.False(t, audit.Timestamp.IsZero()) + default: + require.Equal(t, tc.ExpectedTimestamp, audit.Timestamp) + } + } + }) + } +} + +// TestAuditEvent_Validate exercises the validation for an audit event. +func TestAuditEvent_Validate(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value *Event + IsErrorExpected bool + ExpectedErrorMessage string + }{ + "nil": { + Value: nil, + IsErrorExpected: true, + ExpectedErrorMessage: "event is nil: invalid internal parameter", + }, + "default": { + Value: &Event{}, + IsErrorExpected: true, + ExpectedErrorMessage: "missing ID: invalid internal parameter", + }, + "id-empty": { + Value: &Event{ + ID: "", + Version: version, + Subtype: RequestType, + Timestamp: time.Now(), + Data: nil, + }, + IsErrorExpected: true, + ExpectedErrorMessage: "missing ID: invalid internal parameter", + }, + "version-fiddled": { + Value: &Event{ + ID: "audit_123", + Version: "magic-v2", + Subtype: RequestType, + Timestamp: time.Now(), + Data: nil, + }, + IsErrorExpected: true, + ExpectedErrorMessage: "event version unsupported: invalid internal parameter", + }, + "subtype-fiddled": { + Value: &Event{ + ID: "audit_123", + Version: version, + Subtype: subtype("moon"), + Timestamp: time.Now(), + Data: nil, + }, + IsErrorExpected: true, + ExpectedErrorMessage: "invalid event subtype \"moon\": invalid internal parameter", + }, + "default-time": { + Value: &Event{ + ID: "audit_123", + Version: version, + Subtype: ResponseType, + Timestamp: time.Time{}, + Data: nil, + }, + IsErrorExpected: true, + ExpectedErrorMessage: "event timestamp cannot be the zero time instant: invalid internal parameter", + }, + "valid": { + Value: &Event{ + ID: "audit_123", + Version: version, + Subtype: ResponseType, + Timestamp: time.Now(), + Data: nil, + }, + IsErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + err := tc.Value.validate() + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + } + }) + } +} + +// TestAuditEvent_Validate_Subtype exercises the validation for an audit event's subtype. +func TestAuditEvent_Validate_Subtype(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value string + IsErrorExpected bool + ExpectedErrorMessage string + }{ + "empty": { + Value: "", + IsErrorExpected: true, + ExpectedErrorMessage: "invalid event subtype \"\": invalid internal parameter", + }, + "unsupported": { + Value: "foo", + IsErrorExpected: true, + ExpectedErrorMessage: "invalid event subtype \"foo\": invalid internal parameter", + }, + "request": { + Value: "AuditRequest", + IsErrorExpected: false, + }, + "response": { + Value: "AuditResponse", + IsErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + err := subtype(tc.Value).validate() + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + } + }) + } +} + +// TestAuditEvent_Validate_Format exercises the validation for an audit event's format. +func TestAuditEvent_Validate_Format(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value string + IsErrorExpected bool + ExpectedErrorMessage string + }{ + "empty": { + Value: "", + IsErrorExpected: true, + ExpectedErrorMessage: "invalid format \"\": invalid internal parameter", + }, + "unsupported": { + Value: "foo", + IsErrorExpected: true, + ExpectedErrorMessage: "invalid format \"foo\": invalid internal parameter", + }, + "json": { + Value: "json", + IsErrorExpected: false, + }, + "jsonx": { + Value: "jsonx", + IsErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + err := format(tc.Value).validate() + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + } + }) + } +} + +// TestAuditEvent_Subtype_MetricTag is used to ensure that we get the string value +// we expect for a subtype when we want to use it as a metrics tag. +// In some strange scenario where the subtype was never validated, it is technically +// possible to get a value that isn't related to request/response, but this shouldn't +// really be happening, so we will return it as is. +func TestAuditEvent_Subtype_MetricTag(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + input string + expectedOutput string + }{ + "request": { + input: "AuditRequest", + expectedOutput: "log_request", + }, + "response": { + input: "AuditResponse", + expectedOutput: "log_response", + }, + "non-validated": { + input: "juan", + expectedOutput: "juan", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + st := subtype(tc.input) + tag := st.MetricTag() + require.Equal(t, tc.expectedOutput, tag) + }) + } +} + +// TestAuditEvent_Subtype_String is used to ensure that we get the string value +// we expect for a subtype when it is used with the Stringer interface. +// e.g. an AuditRequest subtype is 'request' +func TestAuditEvent_Subtype_String(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + input string + expectedOutput string + }{ + "request": { + input: "AuditRequest", + expectedOutput: "request", + }, + "response": { + input: "AuditResponse", + expectedOutput: "response", + }, + "non-validated": { + input: "juan", + expectedOutput: "juan", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + st := subtype(tc.input) + require.Equal(t, tc.expectedOutput, st.String()) + }) + } +} + +// TestAuditEvent_formattedTime is used to check the output from the formattedTime +// method returns the correct format. +func TestAuditEvent_formattedTime(t *testing.T) { + theTime := time.Date(2024, time.March, 22, 10, 0o0, 5, 10, time.UTC) + a, err := newEvent(ResponseType, withNow(theTime)) + require.NoError(t, err) + require.NotNil(t, a) + require.Equal(t, "2024-03-22T10:00:05.00000001Z", a.formattedTime()) +} + +// TestEvent_IsValidFormat ensures that we can correctly determine valid and +// invalid formats. +func TestEvent_IsValidFormat(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + input string + expected bool + }{ + "empty": { + input: "", + expected: false, + }, + "whitespace": { + input: " ", + expected: false, + }, + "invalid-test": { + input: "test", + expected: false, + }, + "valid-json": { + input: "json", + expected: true, + }, + "upper-json": { + input: "JSON", + expected: true, + }, + "mixed-json": { + input: "Json", + expected: true, + }, + "spacey-json": { + input: " json ", + expected: true, + }, + "valid-jsonx": { + input: "jsonx", + expected: true, + }, + "upper-jsonx": { + input: "JSONX", + expected: true, + }, + "mixed-jsonx": { + input: "JsonX", + expected: true, + }, + "spacey-jsonx": { + input: " jsonx ", + expected: true, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + res := isValidFormat(tc.input) + require.Equal(t, tc.expected, res) + }) + } +} diff --git a/audit/format.go b/audit/format.go deleted file mode 100644 index 83ec18ad40a0..000000000000 --- a/audit/format.go +++ /dev/null @@ -1,559 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package audit - -import ( - "context" - "crypto/tls" - "fmt" - "io" - "strings" - "time" - - squarejwt "gopkg.in/square/go-jose.v2/jwt" - - "github.com/hashicorp/vault/helper/namespace" - "github.com/hashicorp/vault/sdk/helper/salt" - "github.com/hashicorp/vault/sdk/logical" -) - -type AuditFormatWriter interface { - // WriteRequest writes the request entry to the writer or returns an error. - WriteRequest(io.Writer, *AuditRequestEntry) error - // WriteResponse writes the response entry to the writer or returns an error. - WriteResponse(io.Writer, *AuditResponseEntry) error - // Salt returns a non-nil salt or an error. - Salt(context.Context) (*salt.Salt, error) -} - -// AuditFormatter implements the Formatter interface, and allows the underlying -// marshaller to be swapped out -type AuditFormatter struct { - AuditFormatWriter -} - -var _ Formatter = (*AuditFormatter)(nil) - -func (f *AuditFormatter) FormatRequest(ctx context.Context, w io.Writer, config FormatterConfig, in *logical.LogInput) error { - if in == nil || in.Request == nil { - return fmt.Errorf("request to request-audit a nil request") - } - - if w == nil { - return fmt.Errorf("writer for audit request is nil") - } - - if f.AuditFormatWriter == nil { - return fmt.Errorf("no format writer specified") - } - - salt, err := f.Salt(ctx) - if err != nil { - return fmt.Errorf("error fetching salt: %w", err) - } - - // Set these to the input values at first - auth := in.Auth - req := in.Request - var connState *tls.ConnectionState - if auth == nil { - auth = new(logical.Auth) - } - - if in.Request.Connection != nil && in.Request.Connection.ConnState != nil { - connState = in.Request.Connection.ConnState - } - - if !config.Raw { - auth, err = HashAuth(salt, auth, config.HMACAccessor) - if err != nil { - return err - } - - req, err = HashRequest(salt, req, config.HMACAccessor, in.NonHMACReqDataKeys) - if err != nil { - return err - } - } - - var errString string - if in.OuterErr != nil { - errString = in.OuterErr.Error() - } - - ns, err := namespace.FromContext(ctx) - if err != nil { - return err - } - - reqType := in.Type - if reqType == "" { - reqType = "request" - } - reqEntry := &AuditRequestEntry{ - Type: reqType, - Error: errString, - - Auth: &AuditAuth{ - ClientToken: auth.ClientToken, - Accessor: auth.Accessor, - DisplayName: auth.DisplayName, - Policies: auth.Policies, - TokenPolicies: auth.TokenPolicies, - IdentityPolicies: auth.IdentityPolicies, - ExternalNamespacePolicies: auth.ExternalNamespacePolicies, - NoDefaultPolicy: auth.NoDefaultPolicy, - Metadata: auth.Metadata, - EntityID: auth.EntityID, - RemainingUses: req.ClientTokenRemainingUses, - TokenType: auth.TokenType.String(), - TokenTTL: int64(auth.TTL.Seconds()), - }, - - Request: &AuditRequest{ - ID: req.ID, - ClientID: req.ClientID, - ClientToken: req.ClientToken, - ClientTokenAccessor: req.ClientTokenAccessor, - Operation: req.Operation, - MountType: req.MountType, - MountAccessor: req.MountAccessor, - Namespace: &AuditNamespace{ - ID: ns.ID, - Path: ns.Path, - }, - Path: req.Path, - Data: req.Data, - PolicyOverride: req.PolicyOverride, - RemoteAddr: getRemoteAddr(req), - RemotePort: getRemotePort(req), - ReplicationCluster: req.ReplicationCluster, - Headers: req.Headers, - ClientCertificateSerialNumber: getClientCertificateSerialNumber(connState), - }, - } - - if !auth.IssueTime.IsZero() { - reqEntry.Auth.TokenIssueTime = auth.IssueTime.Format(time.RFC3339) - } - - if auth.PolicyResults != nil { - reqEntry.Auth.PolicyResults = &AuditPolicyResults{ - Allowed: auth.PolicyResults.Allowed, - } - - for _, p := range auth.PolicyResults.GrantingPolicies { - reqEntry.Auth.PolicyResults.GrantingPolicies = append(reqEntry.Auth.PolicyResults.GrantingPolicies, PolicyInfo{ - Name: p.Name, - NamespaceId: p.NamespaceId, - Type: p.Type, - }) - } - } - - if req.WrapInfo != nil { - reqEntry.Request.WrapTTL = int(req.WrapInfo.TTL / time.Second) - } - - if !config.OmitTime { - reqEntry.Time = time.Now().UTC().Format(time.RFC3339Nano) - } - - return f.AuditFormatWriter.WriteRequest(w, reqEntry) -} - -func (f *AuditFormatter) FormatResponse(ctx context.Context, w io.Writer, config FormatterConfig, in *logical.LogInput) error { - if in == nil || in.Request == nil { - return fmt.Errorf("request to response-audit a nil request") - } - - if w == nil { - return fmt.Errorf("writer for audit request is nil") - } - - if f.AuditFormatWriter == nil { - return fmt.Errorf("no format writer specified") - } - - salt, err := f.Salt(ctx) - if err != nil { - return fmt.Errorf("error fetching salt: %w", err) - } - - // Set these to the input values at first - auth, req, resp := in.Auth, in.Request, in.Response - if auth == nil { - auth = new(logical.Auth) - } - if resp == nil { - resp = new(logical.Response) - } - var connState *tls.ConnectionState - - if in.Request.Connection != nil && in.Request.Connection.ConnState != nil { - connState = in.Request.Connection.ConnState - } - - elideListResponseData := config.ElideListResponses && req.Operation == logical.ListOperation - - var respData map[string]interface{} - if config.Raw { - // In the non-raw case, elision of list response data occurs inside HashResponse, to avoid redundant deep - // copies and hashing of data only to elide it later. In the raw case, we need to do it here. - if elideListResponseData && resp.Data != nil { - // Copy the data map before making changes, but we only need to go one level deep in this case - respData = make(map[string]interface{}, len(resp.Data)) - for k, v := range resp.Data { - respData[k] = v - } - - doElideListResponseData(respData) - } else { - respData = resp.Data - } - } else { - auth, err = HashAuth(salt, auth, config.HMACAccessor) - if err != nil { - return err - } - - req, err = HashRequest(salt, req, config.HMACAccessor, in.NonHMACReqDataKeys) - if err != nil { - return err - } - - resp, err = HashResponse(salt, resp, config.HMACAccessor, in.NonHMACRespDataKeys, elideListResponseData) - if err != nil { - return err - } - - respData = resp.Data - } - - var errString string - if in.OuterErr != nil { - errString = in.OuterErr.Error() - } - - ns, err := namespace.FromContext(ctx) - if err != nil { - return err - } - - var respAuth *AuditAuth - if resp.Auth != nil { - respAuth = &AuditAuth{ - ClientToken: resp.Auth.ClientToken, - Accessor: resp.Auth.Accessor, - DisplayName: resp.Auth.DisplayName, - Policies: resp.Auth.Policies, - TokenPolicies: resp.Auth.TokenPolicies, - IdentityPolicies: resp.Auth.IdentityPolicies, - ExternalNamespacePolicies: resp.Auth.ExternalNamespacePolicies, - NoDefaultPolicy: resp.Auth.NoDefaultPolicy, - Metadata: resp.Auth.Metadata, - NumUses: resp.Auth.NumUses, - EntityID: resp.Auth.EntityID, - TokenType: resp.Auth.TokenType.String(), - TokenTTL: int64(resp.Auth.TTL.Seconds()), - } - if !resp.Auth.IssueTime.IsZero() { - respAuth.TokenIssueTime = resp.Auth.IssueTime.Format(time.RFC3339) - } - } - - var respSecret *AuditSecret - if resp.Secret != nil { - respSecret = &AuditSecret{ - LeaseID: resp.Secret.LeaseID, - } - } - - var respWrapInfo *AuditResponseWrapInfo - if resp.WrapInfo != nil { - token := resp.WrapInfo.Token - if jwtToken := parseVaultTokenFromJWT(token); jwtToken != nil { - token = *jwtToken - } - respWrapInfo = &AuditResponseWrapInfo{ - TTL: int(resp.WrapInfo.TTL / time.Second), - Token: token, - Accessor: resp.WrapInfo.Accessor, - CreationTime: resp.WrapInfo.CreationTime.UTC().Format(time.RFC3339Nano), - CreationPath: resp.WrapInfo.CreationPath, - WrappedAccessor: resp.WrapInfo.WrappedAccessor, - } - } - - respType := in.Type - if respType == "" { - respType = "response" - } - respEntry := &AuditResponseEntry{ - Type: respType, - Error: errString, - Auth: &AuditAuth{ - ClientToken: auth.ClientToken, - Accessor: auth.Accessor, - DisplayName: auth.DisplayName, - Policies: auth.Policies, - TokenPolicies: auth.TokenPolicies, - IdentityPolicies: auth.IdentityPolicies, - ExternalNamespacePolicies: auth.ExternalNamespacePolicies, - NoDefaultPolicy: auth.NoDefaultPolicy, - Metadata: auth.Metadata, - RemainingUses: req.ClientTokenRemainingUses, - EntityID: auth.EntityID, - EntityCreated: auth.EntityCreated, - TokenType: auth.TokenType.String(), - TokenTTL: int64(auth.TTL.Seconds()), - }, - - Request: &AuditRequest{ - ID: req.ID, - ClientToken: req.ClientToken, - ClientTokenAccessor: req.ClientTokenAccessor, - ClientID: req.ClientID, - Operation: req.Operation, - MountType: req.MountType, - MountAccessor: req.MountAccessor, - Namespace: &AuditNamespace{ - ID: ns.ID, - Path: ns.Path, - }, - Path: req.Path, - Data: req.Data, - PolicyOverride: req.PolicyOverride, - RemoteAddr: getRemoteAddr(req), - RemotePort: getRemotePort(req), - ClientCertificateSerialNumber: getClientCertificateSerialNumber(connState), - ReplicationCluster: req.ReplicationCluster, - Headers: req.Headers, - }, - - Response: &AuditResponse{ - MountType: req.MountType, - MountAccessor: req.MountAccessor, - Auth: respAuth, - Secret: respSecret, - Data: respData, - Warnings: resp.Warnings, - Redirect: resp.Redirect, - WrapInfo: respWrapInfo, - Headers: resp.Headers, - }, - } - - if auth.PolicyResults != nil { - respEntry.Auth.PolicyResults = &AuditPolicyResults{ - Allowed: auth.PolicyResults.Allowed, - } - - for _, p := range auth.PolicyResults.GrantingPolicies { - respEntry.Auth.PolicyResults.GrantingPolicies = append(respEntry.Auth.PolicyResults.GrantingPolicies, PolicyInfo{ - Name: p.Name, - NamespaceId: p.NamespaceId, - Type: p.Type, - }) - } - } - - if !auth.IssueTime.IsZero() { - respEntry.Auth.TokenIssueTime = auth.IssueTime.Format(time.RFC3339) - } - if req.WrapInfo != nil { - respEntry.Request.WrapTTL = int(req.WrapInfo.TTL / time.Second) - } - - if !config.OmitTime { - respEntry.Time = time.Now().UTC().Format(time.RFC3339Nano) - } - - return f.AuditFormatWriter.WriteResponse(w, respEntry) -} - -// AuditRequestEntry is the structure of a request audit log entry in Audit. -type AuditRequestEntry struct { - Time string `json:"time,omitempty"` - Type string `json:"type,omitempty"` - Auth *AuditAuth `json:"auth,omitempty"` - Request *AuditRequest `json:"request,omitempty"` - Error string `json:"error,omitempty"` -} - -// AuditResponseEntry is the structure of a response audit log entry in Audit. -type AuditResponseEntry struct { - Time string `json:"time,omitempty"` - Type string `json:"type,omitempty"` - Auth *AuditAuth `json:"auth,omitempty"` - Request *AuditRequest `json:"request,omitempty"` - Response *AuditResponse `json:"response,omitempty"` - Error string `json:"error,omitempty"` -} - -type AuditRequest struct { - ID string `json:"id,omitempty"` - ClientID string `json:"client_id,omitempty"` - ReplicationCluster string `json:"replication_cluster,omitempty"` - Operation logical.Operation `json:"operation,omitempty"` - MountType string `json:"mount_type,omitempty"` - MountAccessor string `json:"mount_accessor,omitempty"` - ClientToken string `json:"client_token,omitempty"` - ClientTokenAccessor string `json:"client_token_accessor,omitempty"` - Namespace *AuditNamespace `json:"namespace,omitempty"` - Path string `json:"path,omitempty"` - Data map[string]interface{} `json:"data,omitempty"` - PolicyOverride bool `json:"policy_override,omitempty"` - RemoteAddr string `json:"remote_address,omitempty"` - RemotePort int `json:"remote_port,omitempty"` - WrapTTL int `json:"wrap_ttl,omitempty"` - Headers map[string][]string `json:"headers,omitempty"` - ClientCertificateSerialNumber string `json:"client_certificate_serial_number,omitempty"` -} - -type AuditResponse struct { - Auth *AuditAuth `json:"auth,omitempty"` - MountType string `json:"mount_type,omitempty"` - MountAccessor string `json:"mount_accessor,omitempty"` - Secret *AuditSecret `json:"secret,omitempty"` - Data map[string]interface{} `json:"data,omitempty"` - Warnings []string `json:"warnings,omitempty"` - Redirect string `json:"redirect,omitempty"` - WrapInfo *AuditResponseWrapInfo `json:"wrap_info,omitempty"` - Headers map[string][]string `json:"headers,omitempty"` -} - -type AuditAuth struct { - ClientToken string `json:"client_token,omitempty"` - Accessor string `json:"accessor,omitempty"` - DisplayName string `json:"display_name,omitempty"` - Policies []string `json:"policies,omitempty"` - TokenPolicies []string `json:"token_policies,omitempty"` - IdentityPolicies []string `json:"identity_policies,omitempty"` - ExternalNamespacePolicies map[string][]string `json:"external_namespace_policies,omitempty"` - NoDefaultPolicy bool `json:"no_default_policy,omitempty"` - PolicyResults *AuditPolicyResults `json:"policy_results,omitempty"` - Metadata map[string]string `json:"metadata,omitempty"` - NumUses int `json:"num_uses,omitempty"` - RemainingUses int `json:"remaining_uses,omitempty"` - EntityID string `json:"entity_id,omitempty"` - EntityCreated bool `json:"entity_created,omitempty"` - TokenType string `json:"token_type,omitempty"` - TokenTTL int64 `json:"token_ttl,omitempty"` - TokenIssueTime string `json:"token_issue_time,omitempty"` -} - -type AuditPolicyResults struct { - Allowed bool `json:"allowed"` - GrantingPolicies []PolicyInfo `json:"granting_policies,omitempty"` -} - -type PolicyInfo struct { - Name string `json:"name,omitempty"` - NamespaceId string `json:"namespace_id,omitempty"` - Type string `json:"type"` -} - -type AuditSecret struct { - LeaseID string `json:"lease_id,omitempty"` -} - -type AuditResponseWrapInfo struct { - TTL int `json:"ttl,omitempty"` - Token string `json:"token,omitempty"` - Accessor string `json:"accessor,omitempty"` - CreationTime string `json:"creation_time,omitempty"` - CreationPath string `json:"creation_path,omitempty"` - WrappedAccessor string `json:"wrapped_accessor,omitempty"` -} - -type AuditNamespace struct { - ID string `json:"id,omitempty"` - Path string `json:"path,omitempty"` -} - -// getRemoteAddr safely gets the remote address avoiding a nil pointer -func getRemoteAddr(req *logical.Request) string { - if req != nil && req.Connection != nil { - return req.Connection.RemoteAddr - } - return "" -} - -// getRemotePort safely gets the remote port avoiding a nil pointer -func getRemotePort(req *logical.Request) int { - if req != nil && req.Connection != nil { - return req.Connection.RemotePort - } - return 0 -} - -func getClientCertificateSerialNumber(connState *tls.ConnectionState) string { - if connState == nil || len(connState.VerifiedChains) == 0 || len(connState.VerifiedChains[0]) == 0 { - return "" - } - - return connState.VerifiedChains[0][0].SerialNumber.String() -} - -// parseVaultTokenFromJWT returns a string iff the token was a JWT and we could -// extract the original token ID from inside -func parseVaultTokenFromJWT(token string) *string { - if strings.Count(token, ".") != 2 { - return nil - } - - parsedJWT, err := squarejwt.ParseSigned(token) - if err != nil { - return nil - } - - var claims squarejwt.Claims - if err = parsedJWT.UnsafeClaimsWithoutVerification(&claims); err != nil { - return nil - } - - return &claims.ID -} - -// NewTemporaryFormatter creates a formatter not backed by a persistent salt -func NewTemporaryFormatter(format, prefix string) *AuditFormatter { - temporarySalt := func(ctx context.Context) (*salt.Salt, error) { - return salt.NewNonpersistentSalt(), nil - } - ret := &AuditFormatter{} - - switch format { - case "jsonx": - ret.AuditFormatWriter = &JSONxFormatWriter{ - Prefix: prefix, - SaltFunc: temporarySalt, - } - default: - ret.AuditFormatWriter = &JSONFormatWriter{ - Prefix: prefix, - SaltFunc: temporarySalt, - } - } - return ret -} - -// doElideListResponseData performs the actual elision of list operation response data, once surrounding code has -// determined it should apply to a particular request. The data map that is passed in must be a copy that is safe to -// modify in place, but need not be a full recursive deep copy, as only top-level keys are changed. -// -// See the documentation of the controlling option in FormatterConfig for more information on the purpose. -func doElideListResponseData(data map[string]interface{}) { - for k, v := range data { - if k == "keys" { - if vSlice, ok := v.([]string); ok { - data[k] = len(vSlice) - } - } else if k == "key_info" { - if vMap, ok := v.(map[string]interface{}); ok { - data[k] = len(vMap) - } - } - } -} diff --git a/audit/format_json.go b/audit/format_json.go deleted file mode 100644 index 74f4138184a4..000000000000 --- a/audit/format_json.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package audit - -import ( - "context" - "encoding/json" - "fmt" - "io" - - "github.com/hashicorp/vault/sdk/helper/salt" -) - -// JSONFormatWriter is an AuditFormatWriter implementation that structures data into -// a JSON format. -type JSONFormatWriter struct { - Prefix string - SaltFunc func(context.Context) (*salt.Salt, error) -} - -func (f *JSONFormatWriter) WriteRequest(w io.Writer, req *AuditRequestEntry) error { - if req == nil { - return fmt.Errorf("request entry was nil, cannot encode") - } - - if len(f.Prefix) > 0 { - _, err := w.Write([]byte(f.Prefix)) - if err != nil { - return err - } - } - - enc := json.NewEncoder(w) - return enc.Encode(req) -} - -func (f *JSONFormatWriter) WriteResponse(w io.Writer, resp *AuditResponseEntry) error { - if resp == nil { - return fmt.Errorf("response entry was nil, cannot encode") - } - - if len(f.Prefix) > 0 { - _, err := w.Write([]byte(f.Prefix)) - if err != nil { - return err - } - } - - enc := json.NewEncoder(w) - return enc.Encode(resp) -} - -func (f *JSONFormatWriter) Salt(ctx context.Context) (*salt.Salt, error) { - return f.SaltFunc(ctx) -} diff --git a/audit/format_json_test.go b/audit/format_json_test.go deleted file mode 100644 index fa31cde83bea..000000000000 --- a/audit/format_json_test.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package audit - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "strings" - "testing" - "time" - - "github.com/hashicorp/vault/helper/namespace" - "github.com/hashicorp/vault/sdk/helper/jsonutil" - "github.com/hashicorp/vault/sdk/helper/salt" - "github.com/hashicorp/vault/sdk/logical" -) - -func TestFormatJSON_formatRequest(t *testing.T) { - salter, err := salt.NewSalt(context.Background(), nil, nil) - if err != nil { - t.Fatal(err) - } - saltFunc := func(context.Context) (*salt.Salt, error) { - return salter, nil - } - - expectedResultStr := fmt.Sprintf(testFormatJSONReqBasicStrFmt, salter.GetIdentifiedHMAC("foo")) - - issueTime, _ := time.Parse(time.RFC3339, "2020-05-28T13:40:18-05:00") - cases := map[string]struct { - Auth *logical.Auth - Req *logical.Request - Err error - Prefix string - ExpectedStr string - }{ - "auth, request": { - &logical.Auth{ - ClientToken: "foo", - Accessor: "bar", - DisplayName: "testtoken", - EntityID: "foobarentity", - NoDefaultPolicy: true, - Policies: []string{"root"}, - TokenType: logical.TokenTypeService, - LeaseOptions: logical.LeaseOptions{ - TTL: time.Hour * 4, - IssueTime: issueTime, - }, - }, - &logical.Request{ - Operation: logical.UpdateOperation, - Path: "/foo", - Connection: &logical.Connection{ - RemoteAddr: "127.0.0.1", - }, - WrapInfo: &logical.RequestWrapInfo{ - TTL: 60 * time.Second, - }, - Headers: map[string][]string{ - "foo": {"bar"}, - }, - }, - errors.New("this is an error"), - "", - expectedResultStr, - }, - "auth, request with prefix": { - &logical.Auth{ - ClientToken: "foo", - Accessor: "bar", - EntityID: "foobarentity", - DisplayName: "testtoken", - NoDefaultPolicy: true, - Policies: []string{"root"}, - TokenType: logical.TokenTypeService, - LeaseOptions: logical.LeaseOptions{ - TTL: time.Hour * 4, - IssueTime: issueTime, - }, - }, - &logical.Request{ - Operation: logical.UpdateOperation, - Path: "/foo", - Connection: &logical.Connection{ - RemoteAddr: "127.0.0.1", - }, - WrapInfo: &logical.RequestWrapInfo{ - TTL: 60 * time.Second, - }, - Headers: map[string][]string{ - "foo": {"bar"}, - }, - }, - errors.New("this is an error"), - "@cee: ", - expectedResultStr, - }, - } - - for name, tc := range cases { - var buf bytes.Buffer - formatter := AuditFormatter{ - AuditFormatWriter: &JSONFormatWriter{ - Prefix: tc.Prefix, - SaltFunc: saltFunc, - }, - } - config := FormatterConfig{ - HMACAccessor: false, - } - in := &logical.LogInput{ - Auth: tc.Auth, - Request: tc.Req, - OuterErr: tc.Err, - } - if err := formatter.FormatRequest(namespace.RootContext(nil), &buf, config, in); err != nil { - t.Fatalf("bad: %s\nerr: %s", name, err) - } - - if !strings.HasPrefix(buf.String(), tc.Prefix) { - t.Fatalf("no prefix: %s \n log: %s\nprefix: %s", name, expectedResultStr, tc.Prefix) - } - - expectedjson := new(AuditRequestEntry) - - if err := jsonutil.DecodeJSON([]byte(expectedResultStr), &expectedjson); err != nil { - t.Fatalf("bad json: %s", err) - } - expectedjson.Request.Namespace = &AuditNamespace{ID: "root"} - - actualjson := new(AuditRequestEntry) - if err := jsonutil.DecodeJSON([]byte(buf.String())[len(tc.Prefix):], &actualjson); err != nil { - t.Fatalf("bad json: %s", err) - } - - expectedjson.Time = actualjson.Time - - expectedBytes, err := json.Marshal(expectedjson) - if err != nil { - t.Fatalf("unable to marshal json: %s", err) - } - - if !strings.HasSuffix(strings.TrimSpace(buf.String()), string(expectedBytes)) { - t.Fatalf( - "bad: %s\nResult:\n\n%q\n\nExpected:\n\n%q", - name, buf.String(), string(expectedBytes)) - } - } -} - -const testFormatJSONReqBasicStrFmt = `{"time":"2015-08-05T13:45:46Z","type":"request","auth":{"client_token":"%s","accessor":"bar","display_name":"testtoken","policies":["root"],"no_default_policy":true,"metadata":null,"entity_id":"foobarentity","token_type":"service", "token_ttl": 14400, "token_issue_time": "2020-05-28T13:40:18-05:00"},"request":{"operation":"update","path":"/foo","data":null,"wrap_ttl":60,"remote_address":"127.0.0.1","headers":{"foo":["bar"]}},"error":"this is an error"} -` diff --git a/audit/format_jsonx.go b/audit/format_jsonx.go deleted file mode 100644 index 20352a2deade..000000000000 --- a/audit/format_jsonx.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package audit - -import ( - "context" - "encoding/json" - "fmt" - "io" - - "github.com/hashicorp/vault/sdk/helper/salt" - "github.com/jefferai/jsonx" -) - -// JSONxFormatWriter is an AuditFormatWriter implementation that structures data into -// a XML format. -type JSONxFormatWriter struct { - Prefix string - SaltFunc func(context.Context) (*salt.Salt, error) -} - -func (f *JSONxFormatWriter) WriteRequest(w io.Writer, req *AuditRequestEntry) error { - if req == nil { - return fmt.Errorf("request entry was nil, cannot encode") - } - - if len(f.Prefix) > 0 { - _, err := w.Write([]byte(f.Prefix)) - if err != nil { - return err - } - } - - jsonBytes, err := json.Marshal(req) - if err != nil { - return err - } - - xmlBytes, err := jsonx.EncodeJSONBytes(jsonBytes) - if err != nil { - return err - } - - _, err = w.Write(xmlBytes) - return err -} - -func (f *JSONxFormatWriter) WriteResponse(w io.Writer, resp *AuditResponseEntry) error { - if resp == nil { - return fmt.Errorf("response entry was nil, cannot encode") - } - - if len(f.Prefix) > 0 { - _, err := w.Write([]byte(f.Prefix)) - if err != nil { - return err - } - } - - jsonBytes, err := json.Marshal(resp) - if err != nil { - return err - } - - xmlBytes, err := jsonx.EncodeJSONBytes(jsonBytes) - if err != nil { - return err - } - - _, err = w.Write(xmlBytes) - return err -} - -func (f *JSONxFormatWriter) Salt(ctx context.Context) (*salt.Salt, error) { - return f.SaltFunc(ctx) -} diff --git a/audit/format_jsonx_test.go b/audit/format_jsonx_test.go deleted file mode 100644 index fb6046195187..000000000000 --- a/audit/format_jsonx_test.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package audit - -import ( - "bytes" - "context" - "errors" - "fmt" - "strings" - "testing" - "time" - - "github.com/hashicorp/vault/helper/namespace" - "github.com/hashicorp/vault/sdk/helper/salt" - "github.com/hashicorp/vault/sdk/logical" -) - -func TestFormatJSONx_formatRequest(t *testing.T) { - salter, err := salt.NewSalt(context.Background(), nil, nil) - if err != nil { - t.Fatal(err) - } - saltFunc := func(context.Context) (*salt.Salt, error) { - return salter, nil - } - - fooSalted := salter.GetIdentifiedHMAC("foo") - issueTime, _ := time.Parse(time.RFC3339, "2020-05-28T13:40:18-05:00") - - cases := map[string]struct { - Auth *logical.Auth - Req *logical.Request - Err error - Prefix string - Result string - ExpectedStr string - }{ - "auth, request": { - &logical.Auth{ - ClientToken: "foo", - Accessor: "bar", - DisplayName: "testtoken", - EntityID: "foobarentity", - NoDefaultPolicy: true, - Policies: []string{"root"}, - TokenType: logical.TokenTypeService, - LeaseOptions: logical.LeaseOptions{ - TTL: time.Hour * 4, - IssueTime: issueTime, - }, - }, - &logical.Request{ - ID: "request", - ClientToken: "foo", - ClientTokenAccessor: "bar", - Operation: logical.UpdateOperation, - Path: "/foo", - Connection: &logical.Connection{ - RemoteAddr: "127.0.0.1", - }, - WrapInfo: &logical.RequestWrapInfo{ - TTL: 60 * time.Second, - }, - Headers: map[string][]string{ - "foo": {"bar"}, - }, - PolicyOverride: true, - }, - errors.New("this is an error"), - "", - "", - fmt.Sprintf(`bar%stesttokenfoobarentitytrueroot2020-05-28T13:40:18-05:0014400servicethis is an error%sbarbarrequestrootupdate/footrue127.0.0.160request`, - fooSalted, fooSalted), - }, - "auth, request with prefix": { - &logical.Auth{ - ClientToken: "foo", - Accessor: "bar", - DisplayName: "testtoken", - NoDefaultPolicy: true, - EntityID: "foobarentity", - Policies: []string{"root"}, - TokenType: logical.TokenTypeService, - LeaseOptions: logical.LeaseOptions{ - TTL: time.Hour * 4, - IssueTime: issueTime, - }, - }, - &logical.Request{ - ID: "request", - ClientToken: "foo", - ClientTokenAccessor: "bar", - Operation: logical.UpdateOperation, - Path: "/foo", - Connection: &logical.Connection{ - RemoteAddr: "127.0.0.1", - }, - WrapInfo: &logical.RequestWrapInfo{ - TTL: 60 * time.Second, - }, - Headers: map[string][]string{ - "foo": {"bar"}, - }, - PolicyOverride: true, - }, - errors.New("this is an error"), - "", - "@cee: ", - fmt.Sprintf(`bar%stesttokenfoobarentitytrueroot2020-05-28T13:40:18-05:0014400servicethis is an error%sbarbarrequestrootupdate/footrue127.0.0.160request`, - fooSalted, fooSalted), - }, - } - - for name, tc := range cases { - var buf bytes.Buffer - formatter := AuditFormatter{ - AuditFormatWriter: &JSONxFormatWriter{ - Prefix: tc.Prefix, - SaltFunc: saltFunc, - }, - } - config := FormatterConfig{ - OmitTime: true, - HMACAccessor: false, - } - in := &logical.LogInput{ - Auth: tc.Auth, - Request: tc.Req, - OuterErr: tc.Err, - } - if err := formatter.FormatRequest(namespace.RootContext(nil), &buf, config, in); err != nil { - t.Fatalf("bad: %s\nerr: %s", name, err) - } - - if !strings.HasPrefix(buf.String(), tc.Prefix) { - t.Fatalf("no prefix: %s \n log: %s\nprefix: %s", name, tc.Result, tc.Prefix) - } - - if !strings.HasSuffix(strings.TrimSpace(buf.String()), string(tc.ExpectedStr)) { - t.Fatalf( - "bad: %s\nResult:\n\n%q\n\nExpected:\n\n%q", - name, strings.TrimSpace(buf.String()), string(tc.ExpectedStr)) - } - } -} diff --git a/audit/format_test.go b/audit/format_test.go deleted file mode 100644 index 5395d916cc81..000000000000 --- a/audit/format_test.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package audit - -import ( - "context" - "io" - "testing" - - "github.com/hashicorp/vault/helper/namespace" - "github.com/hashicorp/vault/sdk/helper/salt" - "github.com/hashicorp/vault/sdk/logical" - "github.com/mitchellh/copystructure" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type testingFormatWriter struct { - salt *salt.Salt - lastRequest *AuditRequestEntry - lastResponse *AuditResponseEntry -} - -func (fw *testingFormatWriter) WriteRequest(_ io.Writer, entry *AuditRequestEntry) error { - fw.lastRequest = entry - return nil -} - -func (fw *testingFormatWriter) WriteResponse(_ io.Writer, entry *AuditResponseEntry) error { - fw.lastResponse = entry - return nil -} - -func (fw *testingFormatWriter) Salt(ctx context.Context) (*salt.Salt, error) { - if fw.salt != nil { - return fw.salt, nil - } - var err error - fw.salt, err = salt.NewSalt(ctx, nil, nil) - if err != nil { - return nil, err - } - return fw.salt, nil -} - -// hashExpectedValueForComparison replicates enough of the audit HMAC process on a piece of expected data in a test, -// so that we can use assert.Equal to compare the expected and output values. -func (fw *testingFormatWriter) hashExpectedValueForComparison(input map[string]interface{}) map[string]interface{} { - // Copy input before modifying, since we may re-use the same data in another test - copied, err := copystructure.Copy(input) - if err != nil { - panic(err) - } - copiedAsMap := copied.(map[string]interface{}) - - salter, err := fw.Salt(context.Background()) - if err != nil { - panic(err) - } - - err = hashMap(salter.GetIdentifiedHMAC, copiedAsMap, nil) - if err != nil { - panic(err) - } - - return copiedAsMap -} - -func TestFormatRequestErrors(t *testing.T) { - config := FormatterConfig{} - formatter := AuditFormatter{ - AuditFormatWriter: &testingFormatWriter{}, - } - - if err := formatter.FormatRequest(context.Background(), io.Discard, config, &logical.LogInput{}); err == nil { - t.Fatal("expected error due to nil request") - } - - in := &logical.LogInput{ - Request: &logical.Request{}, - } - if err := formatter.FormatRequest(context.Background(), nil, config, in); err == nil { - t.Fatal("expected error due to nil writer") - } -} - -func TestFormatResponseErrors(t *testing.T) { - config := FormatterConfig{} - formatter := AuditFormatter{ - AuditFormatWriter: &testingFormatWriter{}, - } - - if err := formatter.FormatResponse(context.Background(), io.Discard, config, &logical.LogInput{}); err == nil { - t.Fatal("expected error due to nil request") - } - - in := &logical.LogInput{ - Request: &logical.Request{}, - } - if err := formatter.FormatResponse(context.Background(), nil, config, in); err == nil { - t.Fatal("expected error due to nil writer") - } -} - -func TestElideListResponses(t *testing.T) { - tfw := testingFormatWriter{} - formatter := AuditFormatter{&tfw} - ctx := namespace.RootContext(context.Background()) - - type test struct { - name string - inputData map[string]interface{} - expectedData map[string]interface{} - } - - tests := []test{ - { - "nil data", - nil, - nil, - }, - { - "Normal list (keys only)", - map[string]interface{}{ - "keys": []string{"foo", "bar", "baz"}, - }, - map[string]interface{}{ - "keys": 3, - }, - }, - { - "Enhanced list (has key_info)", - map[string]interface{}{ - "keys": []string{"foo", "bar", "baz", "quux"}, - "key_info": map[string]interface{}{ - "foo": "alpha", - "bar": "beta", - "baz": "gamma", - "quux": "delta", - }, - }, - map[string]interface{}{ - "keys": 4, - "key_info": 4, - }, - }, - { - "Unconventional other values in a list response are not touched", - map[string]interface{}{ - "keys": []string{"foo", "bar"}, - "something_else": "baz", - }, - map[string]interface{}{ - "keys": 2, - "something_else": "baz", - }, - }, - { - "Conventional values in a list response are not elided if their data types are unconventional", - map[string]interface{}{ - "keys": map[string]interface{}{ - "You wouldn't expect keys to be a map": nil, - }, - "key_info": []string{ - "You wouldn't expect key_info to be a slice", - }, - }, - map[string]interface{}{ - "keys": map[string]interface{}{ - "You wouldn't expect keys to be a map": nil, - }, - "key_info": []string{ - "You wouldn't expect key_info to be a slice", - }, - }, - }, - } - oneInterestingTestCase := tests[2] - - formatResponse := func( - t *testing.T, - config FormatterConfig, - operation logical.Operation, - inputData map[string]interface{}, - ) { - err := formatter.FormatResponse(ctx, io.Discard, config, &logical.LogInput{ - Request: &logical.Request{Operation: operation}, - Response: &logical.Response{Data: inputData}, - }) - require.Nil(t, err) - } - - t.Run("Default case", func(t *testing.T) { - config := FormatterConfig{ElideListResponses: true} - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - formatResponse(t, config, logical.ListOperation, tc.inputData) - assert.Equal(t, tfw.hashExpectedValueForComparison(tc.expectedData), tfw.lastResponse.Response.Data) - }) - } - }) - - t.Run("When Operation is not list, eliding does not happen", func(t *testing.T) { - config := FormatterConfig{ElideListResponses: true} - tc := oneInterestingTestCase - formatResponse(t, config, logical.ReadOperation, tc.inputData) - assert.Equal(t, tfw.hashExpectedValueForComparison(tc.inputData), tfw.lastResponse.Response.Data) - }) - - t.Run("When ElideListResponses is false, eliding does not happen", func(t *testing.T) { - config := FormatterConfig{ElideListResponses: false} - tc := oneInterestingTestCase - formatResponse(t, config, logical.ListOperation, tc.inputData) - assert.Equal(t, tfw.hashExpectedValueForComparison(tc.inputData), tfw.lastResponse.Response.Data) - }) - - t.Run("When Raw is true, eliding still happens", func(t *testing.T) { - config := FormatterConfig{ElideListResponses: true, Raw: true} - tc := oneInterestingTestCase - formatResponse(t, config, logical.ListOperation, tc.inputData) - assert.Equal(t, tc.expectedData, tfw.lastResponse.Response.Data) - }) -} diff --git a/audit/formatter.go b/audit/formatter.go deleted file mode 100644 index 98c393c3b817..000000000000 --- a/audit/formatter.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package audit - -import ( - "context" - "io" - - "github.com/hashicorp/vault/sdk/logical" -) - -// Formatter is an interface that is responsible for formatting a -// request/response into some format. Formatters write their output -// to an io.Writer. -// -// It is recommended that you pass data through Hash prior to formatting it. -type Formatter interface { - FormatRequest(context.Context, io.Writer, FormatterConfig, *logical.LogInput) error - FormatResponse(context.Context, io.Writer, FormatterConfig, *logical.LogInput) error -} - -type FormatterConfig struct { - Raw bool - HMACAccessor bool - - // Vault lacks pagination in its APIs. As a result, certain list operations can return **very** large responses. - // The user's chosen audit sinks may experience difficulty consuming audit records that swell to tens of megabytes - // of JSON. The responses of list operations are typically not very interesting, as they are mostly lists of keys, - // or, even when they include a "key_info" field, are not returning confidential information. They become even less - // interesting once HMAC-ed by the audit system. - // - // Some example Vault "list" operations that are prone to becoming very large in an active Vault installation are: - // auth/token/accessors/ - // identity/entity/id/ - // identity/entity-alias/id/ - // pki/certs/ - // - // This option exists to provide such users with the option to have response data elided from audit logs, only when - // the operation type is "list". For added safety, the elision only applies to the "keys" and "key_info" fields - // within the response data - these are conventionally the only fields present in a list response - see - // logical.ListResponse, and logical.ListResponseWithInfo. However, other fields are technically possible if a - // plugin author writes unusual code, and these will be preserved in the audit log even with this option enabled. - // The elision replaces the values of the "keys" and "key_info" fields with an integer count of the number of - // entries. This allows even the elided audit logs to still be useful for answering questions like - // "Was any data returned?" or "How many records were listed?". - ElideListResponses bool - - // This should only ever be used in a testing context - OmitTime bool -} diff --git a/audit/hashstructure.go b/audit/hashstructure.go index cd4f8085d13d..da6f2fe4fa99 100644 --- a/audit/hashstructure.go +++ b/audit/hashstructure.go @@ -1,94 +1,96 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package audit import ( + "context" "encoding/json" "errors" "reflect" "time" "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/hashicorp/vault/sdk/helper/salt" - "github.com/hashicorp/vault/sdk/helper/wrapping" "github.com/hashicorp/vault/sdk/logical" - "github.com/mitchellh/copystructure" "github.com/mitchellh/reflectwalk" ) -// HashString hashes the given opaque string and returns it -func HashString(salter *salt.Salt, data string) string { - return salter.GetIdentifiedHMAC(data) +// hashString uses the Salter to hash the supplied opaque string and returns it. +func hashString(ctx context.Context, salter Salter, data string) (string, error) { + salt, err := salter.Salt(ctx) + if err != nil { + return "", err + } + + return salt.GetIdentifiedHMAC(data), nil } -// HashAuth returns a hashed copy of the logical.Auth input. -func HashAuth(salter *salt.Salt, in *logical.Auth, HMACAccessor bool) (*logical.Auth, error) { - if in == nil { - return nil, nil +// hashAuth uses the Salter to hash the supplied auth (modifying it). +// hmacAccessor is used to indicate whether the accessor should also be HMAC'd +// when present. +func hashAuth(ctx context.Context, salter Salter, auth *auth, hmacAccessor bool) error { + if auth == nil { + return nil + } + + salt, err := salter.Salt(ctx) + if err != nil { + return err } - fn := salter.GetIdentifiedHMAC - auth := *in + fn := salt.GetIdentifiedHMAC if auth.ClientToken != "" { auth.ClientToken = fn(auth.ClientToken) } - if HMACAccessor && auth.Accessor != "" { + if hmacAccessor && auth.Accessor != "" { auth.Accessor = fn(auth.Accessor) } - return &auth, nil + + return nil } -// HashRequest returns a hashed copy of the logical.Request input. -func HashRequest(salter *salt.Salt, in *logical.Request, HMACAccessor bool, nonHMACDataKeys []string) (*logical.Request, error) { - if in == nil { - return nil, nil +// hashRequest uses the Salter to hash the supplied request (modifying it). +// nonHMACDataKeys is used when hashing any 'Data' field within the request which +// prevents those specific keys from HMAC'd. +// hmacAccessor is used to indicate whether some accessors should also be HMAC'd +// when present. +// nonHMACDataKeys is used when hashing any 'Data' field within the request which +// prevents those specific keys from HMAC'd. +func hashRequest(ctx context.Context, salter Salter, req *request, hmacAccessor bool, nonHMACDataKeys []string) error { + if req == nil { + return nil } - fn := salter.GetIdentifiedHMAC - req := *in - - if req.Auth != nil { - cp, err := copystructure.Copy(req.Auth) - if err != nil { - return nil, err - } - - req.Auth, err = HashAuth(salter, cp.(*logical.Auth), HMACAccessor) - if err != nil { - return nil, err - } + salt, err := salter.Salt(ctx) + if err != nil { + return err } + fn := salt.GetIdentifiedHMAC + if req.ClientToken != "" { req.ClientToken = fn(req.ClientToken) } - if HMACAccessor && req.ClientTokenAccessor != "" { + if hmacAccessor && req.ClientTokenAccessor != "" { req.ClientTokenAccessor = fn(req.ClientTokenAccessor) } if req.Data != nil { - copy, err := copystructure.Copy(req.Data) + err = hashMap(fn, req.Data, nonHMACDataKeys) if err != nil { - return nil, err + return err } - - err = hashMap(fn, copy.(map[string]interface{}), nonHMACDataKeys) - if err != nil { - return nil, err - } - req.Data = copy.(map[string]interface{}) } - return &req, nil + return nil } -func hashMap(fn func(string) string, data map[string]interface{}, nonHMACDataKeys []string) error { +func hashMap(hashFunc hashCallback, data map[string]interface{}, nonHMACDataKeys []string) error { for k, v := range data { if o, ok := v.(logical.OptMarshaler); ok { marshaled, err := o.MarshalJSONWithOptions(&logical.MarshalOptions{ - ValueHasher: fn, + ValueHasher: hashFunc, }) if err != nil { return err @@ -97,106 +99,87 @@ func hashMap(fn func(string) string, data map[string]interface{}, nonHMACDataKey } } - return HashStructure(data, fn, nonHMACDataKeys) + return hashStructure(data, hashFunc, nonHMACDataKeys) } -// HashResponse returns a hashed copy of the logical.Request input. -func HashResponse( - salter *salt.Salt, - in *logical.Response, - HMACAccessor bool, - nonHMACDataKeys []string, - elideListResponseData bool, -) (*logical.Response, error) { - if in == nil { - return nil, nil +// hashResponse uses the Salter to hash the supplied response (modifying it). +// hmacAccessor is used to indicate whether some accessors should also be HMAC'd +// when present. +// nonHMACDataKeys is used when hashing any 'Data' field within the response which +// prevents those specific keys from HMAC'd. +// See: /vault/docs/audit#eliding-list-response-bodies +func hashResponse(ctx context.Context, salter Salter, resp *response, hmacAccessor bool, nonHMACDataKeys []string) error { + if resp == nil { + return nil } - fn := salter.GetIdentifiedHMAC - resp := *in + salt, err := salter.Salt(ctx) + if err != nil { + return err + } - if resp.Auth != nil { - cp, err := copystructure.Copy(resp.Auth) - if err != nil { - return nil, err - } + fn := salt.GetIdentifiedHMAC - resp.Auth, err = HashAuth(salter, cp.(*logical.Auth), HMACAccessor) - if err != nil { - return nil, err - } + err = hashAuth(ctx, salter, resp.Auth, hmacAccessor) + if err != nil { + return err } if resp.Data != nil { - copy, err := copystructure.Copy(resp.Data) - if err != nil { - return nil, err - } - - mapCopy := copy.(map[string]interface{}) - if b, ok := mapCopy[logical.HTTPRawBody].([]byte); ok { - mapCopy[logical.HTTPRawBody] = string(b) - } - - // Processing list response data elision takes place at this point in the code for performance reasons: - // - take advantage of the deep copy of resp.Data that was going to be done anyway for hashing - // - but elide data before potentially spending time hashing it - if elideListResponseData { - doElideListResponseData(mapCopy) + if b, ok := resp.Data[logical.HTTPRawBody].([]byte); ok { + resp.Data[logical.HTTPRawBody] = string(b) } - err = hashMap(fn, mapCopy, nonHMACDataKeys) + err = hashMap(fn, resp.Data, nonHMACDataKeys) if err != nil { - return nil, err + return err } - resp.Data = mapCopy } if resp.WrapInfo != nil { var err error - resp.WrapInfo, err = HashWrapInfo(salter, resp.WrapInfo, HMACAccessor) + err = hashWrapInfo(fn, resp.WrapInfo, hmacAccessor) if err != nil { - return nil, err + return err } } - return &resp, nil + return nil } -// HashWrapInfo returns a hashed copy of the wrapping.ResponseWrapInfo input. -func HashWrapInfo(salter *salt.Salt, in *wrapping.ResponseWrapInfo, HMACAccessor bool) (*wrapping.ResponseWrapInfo, error) { - if in == nil { - return nil, nil +// hashWrapInfo uses the supplied hashing function to hash responseWrapInfo (modifying it). +// hmacAccessor is used to indicate whether some accessors should also be HMAC'd +// when present. +func hashWrapInfo(hashFunc hashCallback, wrapInfo *responseWrapInfo, hmacAccessor bool) error { + if wrapInfo == nil { + return nil } - fn := salter.GetIdentifiedHMAC - wrapinfo := *in - - wrapinfo.Token = fn(wrapinfo.Token) + wrapInfo.Token = hashFunc(wrapInfo.Token) - if HMACAccessor { - wrapinfo.Accessor = fn(wrapinfo.Accessor) + if hmacAccessor { + wrapInfo.Accessor = hashFunc(wrapInfo.Accessor) - if wrapinfo.WrappedAccessor != "" { - wrapinfo.WrappedAccessor = fn(wrapinfo.WrappedAccessor) + if wrapInfo.WrappedAccessor != "" { + wrapInfo.WrappedAccessor = hashFunc(wrapInfo.WrappedAccessor) } } - return &wrapinfo, nil + return nil } -// HashStructure takes an interface and hashes all the values within +// hashStructure takes an interface and hashes all the values within // the structure. Only _values_ are hashed: keys of objects are not. // -// For the HashCallback, see the built-in HashCallbacks below. -func HashStructure(s interface{}, cb HashCallback, ignoredKeys []string) error { +// For the hashCallback, see the built-in HashCallbacks below. +func hashStructure(s interface{}, cb hashCallback, ignoredKeys []string) error { walker := &hashWalker{Callback: cb, IgnoredKeys: ignoredKeys} return reflectwalk.Walk(s, walker) } -// HashCallback is the callback called for HashStructure to hash +// hashCallback is the callback called for hashStructure to hash // a value. -type HashCallback func(string) string +type hashCallback func(string) string // hashWalker implements interfaces for the reflectwalk package // (github.com/mitchellh/reflectwalk) that can be used to automatically @@ -205,21 +188,27 @@ type hashWalker struct { // Callback is the function to call with the primitive that is // to be hashed. If there is an error, walking will be halted // immediately and the error returned. - Callback HashCallback - // IgnoreKeys are the keys that wont have the HashCallback applied + Callback hashCallback + + // IgnoreKeys are the keys that won't have the hashCallback applied IgnoredKeys []string + // MapElem appends the key itself (not the reflect.Value) to key. // The last element in key is the most recently entered map key. // Since Exit pops the last element of key, only nesting to another // structure increases the size of this slice. - key []string + key []string + lastValue reflect.Value + // Enter appends to loc and exit pops loc. The last element of loc is thus // the current location. loc []reflectwalk.Location + // Map and Slice append to cs, Exit pops the last element off cs. // The last element in cs is the most recently entered map or slice. cs []reflect.Value + // MapElem and SliceElem append to csKey. The last element in csKey is the // most recently entered map key or slice index. Since Exit pops the last // element of csKey, only nesting to another structure increases the size of diff --git a/audit/hashstructure_test.go b/audit/hashstructure_test.go index c65931f7c5be..17d611652c32 100644 --- a/audit/hashstructure_test.go +++ b/audit/hashstructure_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package audit @@ -7,18 +7,17 @@ import ( "context" "crypto/sha256" "encoding/json" - "fmt" "reflect" "testing" "time" - "github.com/go-test/deep" - + nshelper "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/salt" "github.com/hashicorp/vault/sdk/helper/wrapping" "github.com/hashicorp/vault/sdk/logical" "github.com/mitchellh/copystructure" + "github.com/stretchr/testify/require" ) func TestCopy_auth(t *testing.T) { @@ -98,76 +97,97 @@ func TestCopy_response(t *testing.T) { } } -func TestHashString(t *testing.T) { +// testSalter is a structure that implements the Salter interface in a trivial +// manner. +type testSalter struct{} + +// Salt returns a salt.Salt pointer based on dummy data stored in an in-memory +// storage instance. +func (*testSalter) Salt(ctx context.Context) (*salt.Salt, error) { inmemStorage := &logical.InmemStorage{} - inmemStorage.Put(context.Background(), &logical.StorageEntry{ + err := inmemStorage.Put(context.Background(), &logical.StorageEntry{ Key: "salt", Value: []byte("foo"), }) - localSalt, err := salt.NewSalt(context.Background(), inmemStorage, &salt.Config{ + if err != nil { + return nil, err + } + + return salt.NewSalt(context.Background(), inmemStorage, &salt.Config{ HMAC: sha256.New, HMACType: "hmac-sha256", }) +} + +func TestHashString(t *testing.T) { + salter := &testSalter{} + + out, err := hashString(context.Background(), salter, "foo") if err != nil { t.Fatalf("Error instantiating salt: %s", err) } - out := HashString(localSalt, "foo") if out != "hmac-sha256:08ba357e274f528065766c770a639abf6809b39ccfd37c2a3157c7f51954da0a" { - t.Fatalf("err: HashString output did not match expected") + t.Fatalf("err: hashString output did not match expected") } } func TestHashAuth(t *testing.T) { - cases := []struct { + cases := map[string]struct { Input *logical.Auth - Output *logical.Auth + Output *auth HMACAccessor bool }{ - { - &logical.Auth{ClientToken: "foo"}, - &logical.Auth{ClientToken: "hmac-sha256:08ba357e274f528065766c770a639abf6809b39ccfd37c2a3157c7f51954da0a"}, - false, - }, - { + "no-accessor-hmac": { &logical.Auth{ + ClientToken: "foo", + Accessor: "very-accessible", LeaseOptions: logical.LeaseOptions{ TTL: 1 * time.Hour, }, - - ClientToken: "foo", + TokenType: logical.TokenTypeService, + }, + &auth{ + ClientToken: "hmac-sha256:08ba357e274f528065766c770a639abf6809b39ccfd37c2a3157c7f51954da0a", + Accessor: "very-accessible", + TokenTTL: 3600, + TokenType: "service", + RemainingUses: 5, }, + false, + }, + "accessor-hmac": { &logical.Auth{ + Accessor: "very-accessible", + ClientToken: "foo", LeaseOptions: logical.LeaseOptions{ TTL: 1 * time.Hour, }, - - ClientToken: "hmac-sha256:08ba357e274f528065766c770a639abf6809b39ccfd37c2a3157c7f51954da0a", + TokenType: logical.TokenTypeBatch, }, - false, + &auth{ + ClientToken: "hmac-sha256:08ba357e274f528065766c770a639abf6809b39ccfd37c2a3157c7f51954da0a", + Accessor: "hmac-sha256:5d6d7c8da5b699ace193ea453bbf77082a8aaca42a474436509487d646a7c0af", + TokenTTL: 3600, + TokenType: "batch", + RemainingUses: 5, + }, + true, }, } inmemStorage := &logical.InmemStorage{} - inmemStorage.Put(context.Background(), &logical.StorageEntry{ + err := inmemStorage.Put(context.Background(), &logical.StorageEntry{ Key: "salt", Value: []byte("foo"), }) - localSalt, err := salt.NewSalt(context.Background(), inmemStorage, &salt.Config{ - HMAC: sha256.New, - HMACType: "hmac-sha256", - }) - if err != nil { - t.Fatalf("Error instantiating salt: %s", err) - } + require.NoError(t, err) + salter := &testSalter{} for _, tc := range cases { - input := fmt.Sprintf("%#v", tc.Input) - out, err := HashAuth(localSalt, tc.Input, tc.HMACAccessor) - if err != nil { - t.Fatalf("err: %s\n\n%s", err, input) - } - if !reflect.DeepEqual(out, tc.Output) { - t.Fatalf("bad:\nInput:\n%s\nOutput:\n%#v\nExpected output:\n%#v", input, out, tc.Output) - } + auditAuth, err := newAuth(tc.Input, 5) + require.NoError(t, err) + err = hashAuth(context.Background(), salter, auditAuth, tc.HMACAccessor) + require.NoError(t, err) + require.Equal(t, tc.Output, auditAuth) } } @@ -185,7 +205,7 @@ var _ logical.OptMarshaler = &testOptMarshaler{} func TestHashRequest(t *testing.T) { cases := []struct { Input *logical.Request - Output *logical.Request + Output *request NonHMACDataKeys []string HMACAccessor bool }{ @@ -198,13 +218,17 @@ func TestHashRequest(t *testing.T) { "om": &testOptMarshaler{S: "bar", I: 1}, }, }, - &logical.Request{ + &request{ Data: map[string]interface{}{ "foo": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317", "baz": "foobar", "private_key_type": "hmac-sha256:995230dca56fffd310ff591aa404aab52b2abb41703c787cfa829eceb4595bf1", "om": json.RawMessage(`{"S":"hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317","I":1}`), }, + Namespace: &namespace{ + ID: nshelper.RootNamespace.ID, + Path: nshelper.RootNamespace.Path, + }, }, []string{"baz"}, false, @@ -212,98 +236,108 @@ func TestHashRequest(t *testing.T) { } inmemStorage := &logical.InmemStorage{} - inmemStorage.Put(context.Background(), &logical.StorageEntry{ + err := inmemStorage.Put(context.Background(), &logical.StorageEntry{ Key: "salt", Value: []byte("foo"), }) - localSalt, err := salt.NewSalt(context.Background(), inmemStorage, &salt.Config{ - HMAC: sha256.New, - HMACType: "hmac-sha256", - }) - if err != nil { - t.Fatalf("Error instantiating salt: %s", err) - } + require.NoError(t, err) + salter := &testSalter{} for _, tc := range cases { - input := fmt.Sprintf("%#v", tc.Input) - out, err := HashRequest(localSalt, tc.Input, tc.HMACAccessor, tc.NonHMACDataKeys) - if err != nil { - t.Fatalf("err: %s\n\n%s", err, input) - } - if diff := deep.Equal(out, tc.Output); len(diff) > 0 { - t.Fatalf("bad:\nInput:\n%s\nDiff:\n%#v", input, diff) - } + auditReq, err := newRequest(tc.Input, nshelper.RootNamespace) + require.NoError(t, err) + err = hashRequest(context.Background(), salter, auditReq, tc.HMACAccessor, tc.NonHMACDataKeys) + require.NoError(t, err) + require.Equal(t, tc.Output, auditReq) } } func TestHashResponse(t *testing.T) { now := time.Now() - cases := []struct { - Input *logical.Response - Output *logical.Response - NonHMACDataKeys []string - HMACAccessor bool - }{ - { - &logical.Response{ - Data: map[string]interface{}{ - "foo": "bar", - "baz": "foobar", - // Responses can contain time values, so test that with - // a known fixed value. - "bar": now, - "om": &testOptMarshaler{S: "bar", I: 1}, - }, - WrapInfo: &wrapping.ResponseWrapInfo{ - TTL: 60, - Token: "bar", - Accessor: "flimflam", - CreationTime: now, - WrappedAccessor: "bar", - }, + resp := &logical.Response{ + Data: map[string]interface{}{ + "foo": "bar", + "baz": "foobar", + // Responses can contain time values, so test that with a known fixed value. + "bar": now, + "om": &testOptMarshaler{S: "bar", I: 1}, + }, + WrapInfo: &wrapping.ResponseWrapInfo{ + TTL: 1 * time.Minute, + Token: "bar", + Accessor: "flimflam", + CreationTime: now, + WrappedAccessor: "bar", + }, + Auth: &logical.Auth{ + ClientToken: "hvs.QWERTY-T1q5lEjIWux1Tjx-VGqAYJdd4FZtbp1wpD5Ym9pGh4KHGh2cy5TSjRndGoxaU44NzNscm5MSlRLQXZ0ZGg", + Accessor: "ABClk9ZNLGOCuTrOEIAooJG3", + TokenType: logical.TokenTypeService, + }, + Secret: &logical.Secret{ + LeaseOptions: logical.LeaseOptions{ + TTL: 3, + MaxTTL: 5, + Renewable: false, + Increment: 1, + IssueTime: now, }, - &logical.Response{ - Data: map[string]interface{}{ - "foo": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317", - "baz": "foobar", - "bar": now.Format(time.RFC3339Nano), - "om": json.RawMessage(`{"S":"hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317","I":1}`), - }, - WrapInfo: &wrapping.ResponseWrapInfo{ - TTL: 60, - Token: "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317", - Accessor: "hmac-sha256:7c9c6fe666d0af73b3ebcfbfabe6885015558213208e6635ba104047b22f6390", - CreationTime: now, - WrappedAccessor: "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317", - }, + InternalData: map[string]any{ + "foo": "bar", }, - []string{"baz"}, - true, + LeaseID: "abc", + }, + } + + req := &logical.Request{MountPoint: "/foo/bar"} + req.SetMountClass("kv") + req.SetMountIsExternalPlugin(true) + req.SetMountRunningVersion("123") + req.SetMountRunningSha256("256-256!") + + nonHMACDataKeys := []string{"baz"} + + expected := &response{ + Auth: &auth{ + Accessor: "hmac-sha256:253184715b2d5a6c3a2fc7afe0d2294085f5e886a1275ca735646a6f23be2587", + ClientToken: "hmac-sha256:2ce541100a8bcd687e8ec7712c8bb4c975a8d8599c02d98945e63ecd413bf0f3", + TokenType: "service", + }, + Data: map[string]interface{}{ + "foo": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317", + "baz": "foobar", + "bar": now.Format(time.RFC3339Nano), + "om": json.RawMessage(`{"S":"hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317","I":1}`), + }, + WrapInfo: &responseWrapInfo{ + TTL: 60, + Token: "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317", + Accessor: "hmac-sha256:7c9c6fe666d0af73b3ebcfbfabe6885015558213208e6635ba104047b22f6390", + CreationTime: now.UTC().Format(time.RFC3339Nano), + WrappedAccessor: "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317", + }, + MountClass: "kv", + MountIsExternalPlugin: true, + MountPoint: "/foo/bar", + MountRunningVersion: "123", + MountRunningSha256: "256-256!", + Secret: &secret{ + LeaseID: "abc", }, } inmemStorage := &logical.InmemStorage{} - inmemStorage.Put(context.Background(), &logical.StorageEntry{ + err := inmemStorage.Put(context.Background(), &logical.StorageEntry{ Key: "salt", Value: []byte("foo"), }) - localSalt, err := salt.NewSalt(context.Background(), inmemStorage, &salt.Config{ - HMAC: sha256.New, - HMACType: "hmac-sha256", - }) - if err != nil { - t.Fatalf("Error instantiating salt: %s", err) - } - for _, tc := range cases { - input := fmt.Sprintf("%#v", tc.Input) - out, err := HashResponse(localSalt, tc.Input, tc.HMACAccessor, tc.NonHMACDataKeys, false) - if err != nil { - t.Fatalf("err: %s\n\n%s", err, input) - } - if diff := deep.Equal(out, tc.Output); len(diff) > 0 { - t.Fatalf("bad:\nInput:\n%s\nDiff:\n%#v", input, diff) - } - } + require.NoError(t, err) + salter := &testSalter{} + auditResp, err := newResponse(resp, req, false) + require.NoError(t, err) + err = hashResponse(context.Background(), salter, auditResp, true, nonHMACDataKeys) + require.NoError(t, err) + require.Equal(t, expected, auditResp) } func TestHashWalker(t *testing.T) { @@ -333,7 +367,7 @@ func TestHashWalker(t *testing.T) { } for _, tc := range cases { - err := HashStructure(tc.Input, func(string) string { + err := hashStructure(tc.Input, func(string) string { return replaceText }, nil) if err != nil { @@ -387,7 +421,7 @@ func TestHashWalker_TimeStructs(t *testing.T) { } for _, tc := range cases { - err := HashStructure(tc.Input, func(s string) string { + err := hashStructure(tc.Input, func(s string) string { return s + replaceText }, nil) if err != nil { diff --git a/audit/headers.go b/audit/headers.go new file mode 100644 index 000000000000..a6ba6b00cd14 --- /dev/null +++ b/audit/headers.go @@ -0,0 +1,264 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/hashicorp/vault/sdk/logical" +) + +// N.B.: While we could use textproto to get the canonical mime header, HTTP/2 +// requires all headers to be converted to lower case, so we just do that. + +const ( + // auditedHeadersEntry is the key used in storage to store and retrieve the header config + auditedHeadersEntry = "audited-headers" + + // AuditedHeadersSubPath is the path used to create a sub view within storage. + AuditedHeadersSubPath = "audited-headers-config/" +) + +type durableStorer interface { + Get(ctx context.Context, key string) (*logical.StorageEntry, error) + Put(ctx context.Context, entry *logical.StorageEntry) error +} + +// HeaderFormatter is an interface defining the methods of the +// vault.HeadersConfig structure needed in this package. +type HeaderFormatter interface { + // ApplyConfig returns a map of header values that consists of the + // intersection of the provided set of header values with a configured + // set of headers and will hash headers that have been configured as such. + ApplyConfig(context.Context, map[string][]string, Salter) (map[string][]string, error) +} + +// AuditedHeadersKey returns the key at which audit header configuration is stored. +func AuditedHeadersKey() string { + return AuditedHeadersSubPath + auditedHeadersEntry +} + +type headerSettings struct { + // HMAC is used to indicate whether the value of the header should be HMAC'd. + HMAC bool `json:"hmac"` +} + +// HeadersConfig is used by the Audit Broker to write only approved +// headers to the audit logs. It uses a BarrierView to persist the settings. +type HeadersConfig struct { + // headerSettings stores the current headers that should be audited, and their settings. + headerSettings map[string]*headerSettings + + // view is the barrier view which should be used to access underlying audit header config data. + view durableStorer + + sync.RWMutex +} + +// NewHeadersConfig should be used to create HeadersConfig. +func NewHeadersConfig(view durableStorer) (*HeadersConfig, error) { + if view == nil { + return nil, fmt.Errorf("barrier view cannot be nil") + } + + // This should be the only place where the HeadersConfig struct is initialized. + // Store the view so that we can reload headers when we 'Invalidate'. + return &HeadersConfig{ + view: view, + headerSettings: make(map[string]*headerSettings), + }, nil +} + +// Header attempts to retrieve a copy of the settings associated with the specified header. +// The second boolean return parameter indicates whether the header existed in configuration, +// it should be checked as when 'false' the returned settings will have the default values. +func (a *HeadersConfig) Header(name string) (headerSettings, bool) { + a.RLock() + defer a.RUnlock() + + var s headerSettings + v, ok := a.headerSettings[strings.ToLower(name)] + + if ok { + s.HMAC = v.HMAC + } + + return s, ok +} + +// Headers returns all existing headers along with a copy of their current settings. +func (a *HeadersConfig) Headers() map[string]headerSettings { + a.RLock() + defer a.RUnlock() + + // We know how many entries the map should have. + headers := make(map[string]headerSettings, len(a.headerSettings)) + + // Clone the headers + for name, setting := range a.headerSettings { + headers[name] = headerSettings{HMAC: setting.HMAC} + } + + return headers +} + +// Add adds or overwrites a header in the config and updates the barrier view +// NOTE: Add will acquire a write lock in order to update the underlying headers. +func (a *HeadersConfig) Add(ctx context.Context, header string, hmac bool) error { + if header == "" { + return fmt.Errorf("header value cannot be empty") + } + + // Grab a write lock + a.Lock() + defer a.Unlock() + + if a.headerSettings == nil { + a.headerSettings = make(map[string]*headerSettings, 1) + } + + a.headerSettings[strings.ToLower(header)] = &headerSettings{hmac} + entry, err := logical.StorageEntryJSON(auditedHeadersEntry, a.headerSettings) + if err != nil { + return fmt.Errorf("failed to persist audited headers config: %w", err) + } + + if err := a.view.Put(ctx, entry); err != nil { + return fmt.Errorf("failed to persist audited headers config: %w", err) + } + + return nil +} + +// Remove deletes a header out of the header config and updates the barrier view +// NOTE: Remove will acquire a write lock in order to update the underlying headers. +func (a *HeadersConfig) Remove(ctx context.Context, header string) error { + if header == "" { + return fmt.Errorf("header value cannot be empty") + } + + // Grab a write lock + a.Lock() + defer a.Unlock() + + // Nothing to delete + if len(a.headerSettings) == 0 { + return nil + } + + delete(a.headerSettings, strings.ToLower(header)) + entry, err := logical.StorageEntryJSON(auditedHeadersEntry, a.headerSettings) + if err != nil { + return fmt.Errorf("failed to persist audited headers config: %w", err) + } + + if err := a.view.Put(ctx, entry); err != nil { + return fmt.Errorf("failed to persist audited headers config: %w", err) + } + + return nil +} + +// DefaultHeaders can be used to retrieve the set of default headers that will be +// added to HeadersConfig in order to allow them to appear in audit logs in a raw +// format. If the Vault Operator adds their own setting for any of the defaults, +// their setting will be honored. +func (a *HeadersConfig) DefaultHeaders() map[string]*headerSettings { + // Support deprecated 'x-' prefix (https://datatracker.ietf.org/doc/html/rfc6648) + const correlationID = "correlation-id" + xCorrelationID := fmt.Sprintf("x-%s", correlationID) + + return map[string]*headerSettings{ + correlationID: {}, + xCorrelationID: {}, + } +} + +// Invalidate attempts to refresh the allowed audit headers and their settings. +// NOTE: Invalidate will acquire a write lock in order to update the underlying headers. +func (a *HeadersConfig) Invalidate(ctx context.Context) error { + a.Lock() + defer a.Unlock() + + // Get the actual headers entries, e.g. sys/audited-headers-config/audited-headers + out, err := a.view.Get(ctx, auditedHeadersEntry) + if err != nil { + return fmt.Errorf("failed to read config: %w", err) + } + + // If we cannot update the stored 'new' headers, we will clear the existing + // ones as part of invalidation. + headers := make(map[string]*headerSettings) + if out != nil { + err = out.DecodeJSON(&headers) + if err != nil { + return fmt.Errorf("failed to parse config: %w", err) + } + } + + // Ensure that we are able to case-sensitively access the headers; + // necessary for the upgrade case + lowerHeaders := make(map[string]*headerSettings, len(headers)) + for k, v := range headers { + lowerHeaders[strings.ToLower(k)] = v + } + + // Ensure that we have default headers configured to appear in the audit log. + // Add them if they're missing. + for header, setting := range a.DefaultHeaders() { + if _, ok := lowerHeaders[header]; !ok { + lowerHeaders[header] = setting + } + } + + a.headerSettings = lowerHeaders + return nil +} + +// ApplyConfig returns a map of approved headers and their values, either HMAC'd or plaintext. +// If the supplied headers are empty or nil, an empty set of headers will be returned. +func (a *HeadersConfig) ApplyConfig(ctx context.Context, headers map[string][]string, salter Salter) (result map[string][]string, retErr error) { + // Return early if we don't have headers. + if len(headers) < 1 { + return map[string][]string{}, nil + } + + // Grab a read lock + a.RLock() + defer a.RUnlock() + + // Make a copy of the incoming headers with everything lower so we can + // case-insensitively compare + lowerHeaders := make(map[string][]string, len(headers)) + for k, v := range headers { + lowerHeaders[strings.ToLower(k)] = v + } + + result = make(map[string][]string, len(a.headerSettings)) + for key, settings := range a.headerSettings { + if val, ok := lowerHeaders[key]; ok { + // copy the header values so we don't overwrite them + hVals := make([]string, len(val)) + copy(hVals, val) + + // Optionally hmac the values + if settings.HMAC { + for i, el := range hVals { + hVal, err := hashString(ctx, salter, el) + if err != nil { + return nil, err + } + hVals[i] = hVal + } + } + + result[key] = hVals + } + } + + return result, nil +} diff --git a/audit/headers_test.go b/audit/headers_test.go new file mode 100644 index 000000000000..025f4a422f8b --- /dev/null +++ b/audit/headers_test.go @@ -0,0 +1,632 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "context" + "encoding/json" + "errors" + "reflect" + "strings" + "testing" + + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +// mockStorage is a struct that is used to mock barrier storage. +type mockStorage struct { + mock.Mock + v map[string][]byte +} + +// List implements List from BarrierStorage interface. +// ignore-nil-nil-function-check. +func (m *mockStorage) List(_ context.Context, _ string) ([]string, error) { + return nil, nil +} + +// Get implements Get from BarrierStorage interface. +// ignore-nil-nil-function-check. +func (m *mockStorage) Get(_ context.Context, key string) (*logical.StorageEntry, error) { + b, ok := m.v[key] + if !ok { + return nil, nil + } + + var entry *logical.StorageEntry + err := json.Unmarshal(b, &entry) + + return entry, err +} + +// Put implements Put from BarrierStorage interface. +func (m *mockStorage) Put(_ context.Context, entry *logical.StorageEntry) error { + b, err := json.Marshal(entry) + if err != nil { + return err + } + + m.v[entry.Key] = b + + return nil +} + +// Delete implements Delete from BarrierStorage interface. +func (m *mockStorage) Delete(_ context.Context, _ string) error { + return nil +} + +func newMockStorage(t *testing.T) *mockStorage { + t.Helper() + + return &mockStorage{ + Mock: mock.Mock{}, + v: make(map[string][]byte), + } +} + +func mockAuditedHeadersConfig(t *testing.T) *HeadersConfig { + return &HeadersConfig{ + headerSettings: make(map[string]*headerSettings), + view: newMockStorage(t), + } +} + +func TestAuditedHeadersConfig_CRUD(t *testing.T) { + t.Parallel() + + conf := mockAuditedHeadersConfig(t) + + testAddHeaders(t, conf) + testRemoveHeaders(t, conf) +} + +func testAddHeaders(t *testing.T, conf *HeadersConfig) { + t.Helper() + + err := conf.Add(context.Background(), "X-Test-Header", false) + if err != nil { + t.Fatalf("Error when adding header to config: %s", err) + } + + settings, ok := conf.headerSettings["x-test-header"] + if !ok { + t.Fatal("Expected header to be found in config") + } + + if settings.HMAC { + t.Fatal("Expected HMAC to be set to false, got true") + } + + out, err := conf.view.Get(context.Background(), auditedHeadersEntry) + if err != nil { + t.Fatalf("Could not retrieve headers entry from config: %s", err) + } + if out == nil { + t.Fatal("nil value") + } + + headers := make(map[string]*headerSettings) + err = out.DecodeJSON(&headers) + if err != nil { + t.Fatalf("Error decoding header view: %s", err) + } + + expected := map[string]*headerSettings{ + "x-test-header": { + HMAC: false, + }, + } + + if !reflect.DeepEqual(headers, expected) { + t.Fatalf("Expected config didn't match actual. Expected: %#v, Got: %#v", expected, headers) + } + + err = conf.Add(context.Background(), "X-Vault-Header", true) + if err != nil { + t.Fatalf("Error when adding header to config: %s", err) + } + + settings, ok = conf.headerSettings["x-vault-header"] + if !ok { + t.Fatal("Expected header to be found in config") + } + + if !settings.HMAC { + t.Fatal("Expected HMAC to be set to true, got false") + } + + out, err = conf.view.Get(context.Background(), auditedHeadersEntry) + if err != nil { + t.Fatalf("Could not retrieve headers entry from config: %s", err) + } + if out == nil { + t.Fatal("nil value") + } + + headers = make(map[string]*headerSettings) + err = out.DecodeJSON(&headers) + if err != nil { + t.Fatalf("Error decoding header view: %s", err) + } + + expected["x-vault-header"] = &headerSettings{ + HMAC: true, + } + + if !reflect.DeepEqual(headers, expected) { + t.Fatalf("Expected config didn't match actual. Expected: %#v, Got: %#v", expected, headers) + } +} + +func testRemoveHeaders(t *testing.T, conf *HeadersConfig) { + t.Helper() + + err := conf.Remove(context.Background(), "X-Test-Header") + if err != nil { + t.Fatalf("Error when adding header to config: %s", err) + } + + _, ok := conf.headerSettings["x-Test-HeAder"] + if ok { + t.Fatal("Expected header to not be found in config") + } + + out, err := conf.view.Get(context.Background(), auditedHeadersEntry) + if err != nil { + t.Fatalf("Could not retrieve headers entry from config: %s", err) + } + if out == nil { + t.Fatal("nil value") + } + + headers := make(map[string]*headerSettings) + err = out.DecodeJSON(&headers) + if err != nil { + t.Fatalf("Error decoding header view: %s", err) + } + + expected := map[string]*headerSettings{ + "x-vault-header": { + HMAC: true, + }, + } + + if !reflect.DeepEqual(headers, expected) { + t.Fatalf("Expected config didn't match actual. Expected: %#v, Got: %#v", expected, headers) + } + + err = conf.Remove(context.Background(), "x-VaulT-Header") + if err != nil { + t.Fatalf("Error when adding header to config: %s", err) + } + + _, ok = conf.headerSettings["x-vault-header"] + if ok { + t.Fatal("Expected header to not be found in config") + } + + out, err = conf.view.Get(context.Background(), auditedHeadersEntry) + if err != nil { + t.Fatalf("Could not retrieve headers entry from config: %s", err) + } + if out == nil { + t.Fatal("nil value") + } + + headers = make(map[string]*headerSettings) + err = out.DecodeJSON(&headers) + if err != nil { + t.Fatalf("Error decoding header view: %s", err) + } + + expected = make(map[string]*headerSettings) + + if !reflect.DeepEqual(headers, expected) { + t.Fatalf("Expected config didn't match actual. Expected: %#v, Got: %#v", expected, headers) + } +} + +func TestAuditedHeadersConfig_ApplyConfig(t *testing.T) { + t.Parallel() + + conf := mockAuditedHeadersConfig(t) + + err := conf.Add(context.Background(), "X-TesT-Header", false) + require.NoError(t, err) + err = conf.Add(context.Background(), "X-Vault-HeAdEr", true) + require.NoError(t, err) + + reqHeaders := map[string][]string{ + "X-Test-Header": {"foo"}, + "X-Vault-Header": {"bar", "bar"}, + "Content-Type": {"json"}, + } + + salter := &testSalter{} + + result, err := conf.ApplyConfig(context.Background(), reqHeaders, salter) + if err != nil { + t.Fatal(err) + } + + expected := map[string][]string{ + "x-test-header": {"foo"}, + "x-vault-header": {"hmac-sha256:", "hmac-sha256:"}, + } + + if len(expected) != len(result) { + t.Fatalf("Expected headers count did not match actual count: Expected count %d\n Got %d\n", len(expected), len(result)) + } + + for resultKey, resultValues := range result { + expectedValues := expected[resultKey] + + if len(expectedValues) != len(resultValues) { + t.Fatalf("Expected header values count did not match actual values count: Expected count: %d\n Got %d\n", len(expectedValues), len(resultValues)) + } + + for i, e := range expectedValues { + if e == "hmac-sha256:" { + if !strings.HasPrefix(resultValues[i], e) { + t.Fatalf("Expected headers did not match actual: Expected %#v...\n Got %#v\n", e, resultValues[i]) + } + } else { + if e != resultValues[i] { + t.Fatalf("Expected headers did not match actual: Expected %#v\n Got %#v\n", e, resultValues[i]) + } + } + } + } + + // Make sure we didn't edit the reqHeaders map + reqHeadersCopy := map[string][]string{ + "X-Test-Header": {"foo"}, + "X-Vault-Header": {"bar", "bar"}, + "Content-Type": {"json"}, + } + + if !reflect.DeepEqual(reqHeaders, reqHeadersCopy) { + t.Fatalf("Req headers were changed, expected %#v\n got %#v", reqHeadersCopy, reqHeaders) + } +} + +// TestAuditedHeadersConfig_ApplyConfig_NoHeaders tests the case where there are +// no headers in the request. +func TestAuditedHeadersConfig_ApplyConfig_NoRequestHeaders(t *testing.T) { + t.Parallel() + + conf := mockAuditedHeadersConfig(t) + + err := conf.Add(context.Background(), "X-TesT-Header", false) + require.NoError(t, err) + err = conf.Add(context.Background(), "X-Vault-HeAdEr", true) + require.NoError(t, err) + + salter := &testSalter{} + + // Test sending in nil headers first. + result, err := conf.ApplyConfig(context.Background(), nil, salter) + require.NoError(t, err) + require.NotNil(t, result) + + result, err = conf.ApplyConfig(context.Background(), map[string][]string{}, salter) + require.NoError(t, err) + require.NotNil(t, result) + require.Len(t, result, 0) +} + +func TestAuditedHeadersConfig_ApplyConfig_NoConfiguredHeaders(t *testing.T) { + t.Parallel() + + conf := mockAuditedHeadersConfig(t) + + reqHeaders := map[string][]string{ + "X-Test-Header": {"foo"}, + "X-Vault-Header": {"bar", "bar"}, + "Content-Type": {"json"}, + } + + salter := &testSalter{} + + result, err := conf.ApplyConfig(context.Background(), reqHeaders, salter) + if err != nil { + t.Fatal(err) + } + + if len(result) != 0 { + t.Fatalf("Expected no headers but actually got: %d\n", len(result)) + } + + // Make sure we didn't edit the reqHeaders map + reqHeadersCopy := map[string][]string{ + "X-Test-Header": {"foo"}, + "X-Vault-Header": {"bar", "bar"}, + "Content-Type": {"json"}, + } + + if !reflect.DeepEqual(reqHeaders, reqHeadersCopy) { + t.Fatalf("Req headers were changed, expected %#v\n got %#v", reqHeadersCopy, reqHeaders) + } +} + +// FailingSalter is an implementation of the Salter interface where the Salt +// method always returns an error. +type FailingSalter struct{} + +// Salt always returns an error. +func (s *FailingSalter) Salt(context.Context) (*salt.Salt, error) { + return nil, errors.New("testing error") +} + +// TestAuditedHeadersConfig_ApplyConfig_HashStringError tests the case where +// an error is returned from hashString instead of a map of headers. +func TestAuditedHeadersConfig_ApplyConfig_HashStringError(t *testing.T) { + t.Parallel() + + conf := mockAuditedHeadersConfig(t) + + err := conf.Add(context.Background(), "X-TesT-Header", false) + require.NoError(t, err) + err = conf.Add(context.Background(), "X-Vault-HeAdEr", true) + require.NoError(t, err) + + reqHeaders := map[string][]string{ + "X-Test-Header": {"foo"}, + "X-Vault-Header": {"bar", "bar"}, + "Content-Type": {"json"}, + } + + salter := &FailingSalter{} + + _, err = conf.ApplyConfig(context.Background(), reqHeaders, salter) + if err == nil { + t.Fatal("expected error from ApplyConfig") + } +} + +func BenchmarkAuditedHeaderConfig_ApplyConfig(b *testing.B) { + conf := &HeadersConfig{ + headerSettings: make(map[string]*headerSettings), + view: nil, + } + + conf.headerSettings = map[string]*headerSettings{ + "X-Test-Header": {false}, + "X-Vault-Header": {true}, + } + + reqHeaders := map[string][]string{ + "X-Test-Header": {"foo"}, + "X-Vault-Header": {"bar", "bar"}, + "Content-Type": {"json"}, + } + + salter := &testSalter{} + + // Reset the timer since we did a lot above + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := conf.ApplyConfig(context.Background(), reqHeaders, salter) + require.NoError(b, err) + } +} + +// TestAuditedHeaders_auditedHeadersKey is used to check the key we use to handle +// invalidation doesn't change when we weren't expecting it to. +func TestAuditedHeaders_auditedHeadersKey(t *testing.T) { + t.Parallel() + + require.Equal(t, "audited-headers-config/audited-headers", AuditedHeadersKey()) +} + +// TestAuditedHeaders_NewAuditedHeadersConfig checks supplying incorrect params to +// the constructor for HeadersConfig returns an error. +func TestAuditedHeaders_NewAuditedHeadersConfig(t *testing.T) { + t.Parallel() + + ac, err := NewHeadersConfig(nil) + require.Error(t, err) + require.Nil(t, ac) + + ac, err = NewHeadersConfig(newMockStorage(t)) + require.NoError(t, err) + require.NotNil(t, ac) +} + +// TestAuditedHeaders_invalidate ensures that we can update the headers on HeadersConfig +// when we invalidate, and load the updated headers from the view/storage. +func TestAuditedHeaders_invalidate(t *testing.T) { + t.Parallel() + + view := newMockStorage(t) + ahc, err := NewHeadersConfig(view) + require.NoError(t, err) + require.Len(t, ahc.headerSettings, 0) + + // Store some data using the view. + fakeHeaders1 := map[string]*headerSettings{"x-magic-header": {}} + fakeBytes1, err := json.Marshal(fakeHeaders1) + require.NoError(t, err) + err = view.Put(context.Background(), &logical.StorageEntry{Key: auditedHeadersEntry, Value: fakeBytes1}) + require.NoError(t, err) + + // Invalidate and check we now see the header we stored + err = ahc.Invalidate(context.Background()) + require.NoError(t, err) + require.Equal(t, len(ahc.DefaultHeaders())+1, len(ahc.headerSettings)) // (defaults + 1). + _, ok := ahc.headerSettings["x-magic-header"] + require.True(t, ok) + + // Do it again with more headers and random casing. + fakeHeaders2 := map[string]*headerSettings{ + "x-magic-header": {}, + "x-even-MORE-magic-header": {}, + } + fakeBytes2, err := json.Marshal(fakeHeaders2) + require.NoError(t, err) + err = view.Put(context.Background(), &logical.StorageEntry{Key: auditedHeadersEntry, Value: fakeBytes2}) + require.NoError(t, err) + + // Invalidate and check we now see the header we stored + err = ahc.Invalidate(context.Background()) + require.NoError(t, err) + require.Equal(t, len(ahc.DefaultHeaders())+2, len(ahc.headerSettings)) // (defaults + 2 new headers) + _, ok = ahc.headerSettings["x-magic-header"] + require.True(t, ok) + _, ok = ahc.headerSettings["x-even-more-magic-header"] + require.True(t, ok) +} + +// TestAuditedHeaders_invalidate_nil_view ensures that we invalidate the headers +// correctly (clear them) when we get nil for the storage entry from the view. +func TestAuditedHeaders_invalidate_nil_view(t *testing.T) { + t.Parallel() + + view := newMockStorage(t) + ahc, err := NewHeadersConfig(view) + require.NoError(t, err) + require.Len(t, ahc.headerSettings, 0) + + // Store some data using the view. + fakeHeaders1 := map[string]*headerSettings{"x-magic-header": {}} + fakeBytes1, err := json.Marshal(fakeHeaders1) + require.NoError(t, err) + err = view.Put(context.Background(), &logical.StorageEntry{Key: auditedHeadersEntry, Value: fakeBytes1}) + require.NoError(t, err) + + // Invalidate and check we now see the header we stored + err = ahc.Invalidate(context.Background()) + require.NoError(t, err) + require.Equal(t, len(ahc.DefaultHeaders())+1, len(ahc.headerSettings)) // defaults + 1 + _, ok := ahc.headerSettings["x-magic-header"] + require.True(t, ok) + + // Swap out the view with a mock that returns nil when we try to invalidate. + // This should mean we end up just clearing the headers (no errors). + mockStorageBarrier := newMockStorage(t) + mockStorageBarrier.On("Get", mock.Anything, mock.Anything).Return(nil, nil) + ahc.view = mockStorageBarrier + // ahc.view = NewBarrierView(mockStorageBarrier, AuditedHeadersSubPath) + + // Invalidate should clear out the existing headers without error + err = ahc.Invalidate(context.Background()) + require.NoError(t, err) + require.Equal(t, len(ahc.DefaultHeaders()), len(ahc.headerSettings)) // defaults +} + +// TestAuditedHeaders_invalidate_bad_data ensures that we correctly error if the +// underlying data cannot be parsed as expected. +func TestAuditedHeaders_invalidate_bad_data(t *testing.T) { + t.Parallel() + + view := newMockStorage(t) + ahc, err := NewHeadersConfig(view) + require.NoError(t, err) + require.Len(t, ahc.headerSettings, 0) + + // Store some bad data using the view. + badBytes, err := json.Marshal("i am bad") + require.NoError(t, err) + err = view.Put(context.Background(), &logical.StorageEntry{Key: auditedHeadersEntry, Value: badBytes}) + require.NoError(t, err) + + // Invalidate should + err = ahc.Invalidate(context.Background()) + require.Error(t, err) + require.ErrorContains(t, err, "failed to parse config") +} + +// TestAuditedHeaders_header checks we can return a copy of settings associated with +// an existing header, and we also know when a header wasn't found. +func TestAuditedHeaders_header(t *testing.T) { + t.Parallel() + + view := newMockStorage(t) + ahc, err := NewHeadersConfig(view) + require.NoError(t, err) + require.Len(t, ahc.headerSettings, 0) + + err = ahc.Add(context.Background(), "juan", true) + require.NoError(t, err) + require.Len(t, ahc.headerSettings, 1) + + s, ok := ahc.Header("juan") + require.True(t, ok) + require.Equal(t, true, s.HMAC) + + s, ok = ahc.Header("x-magic-token") + require.False(t, ok) +} + +// TestAuditedHeaders_headers checks we are able to return a copy of the existing +// configured headers. +func TestAuditedHeaders_headers(t *testing.T) { + t.Parallel() + + view := newMockStorage(t) + ahc, err := NewHeadersConfig(view) + require.NoError(t, err) + require.Len(t, ahc.headerSettings, 0) + + err = ahc.Add(context.Background(), "juan", true) + require.NoError(t, err) + err = ahc.Add(context.Background(), "john", false) + require.NoError(t, err) + require.Len(t, ahc.headerSettings, 2) + + s := ahc.Headers() + require.Len(t, s, 2) + require.Equal(t, true, s["juan"].HMAC) + require.Equal(t, false, s["john"].HMAC) +} + +// TestAuditedHeaders_invalidate_defaults checks that we ensure any 'default' headers +// are present after invalidation, and if they were loaded from storage then they +// do not get overwritten with our defaults. +func TestAuditedHeaders_invalidate_defaults(t *testing.T) { + t.Parallel() + + view := newMockStorage(t) + ahc, err := NewHeadersConfig(view) + require.NoError(t, err) + require.Len(t, ahc.headerSettings, 0) + + // Store some data using the view. + fakeHeaders1 := map[string]*headerSettings{"x-magic-header": {}} + fakeBytes1, err := json.Marshal(fakeHeaders1) + require.NoError(t, err) + err = view.Put(context.Background(), &logical.StorageEntry{Key: auditedHeadersEntry, Value: fakeBytes1}) + require.NoError(t, err) + + // Invalidate and check we now see the header we stored + err = ahc.Invalidate(context.Background()) + require.NoError(t, err) + require.Equal(t, len(ahc.DefaultHeaders())+1, len(ahc.headerSettings)) // (defaults + 1 new header) + _, ok := ahc.headerSettings["x-magic-header"] + require.True(t, ok) + s, ok := ahc.headerSettings["x-correlation-id"] + require.True(t, ok) + require.False(t, s.HMAC) + + // Add correlation ID specifically with HMAC and make sure it doesn't get blasted away. + fakeHeaders1 = map[string]*headerSettings{"x-magic-header": {}, "X-Correlation-ID": {HMAC: true}} + fakeBytes1, err = json.Marshal(fakeHeaders1) + require.NoError(t, err) + err = view.Put(context.Background(), &logical.StorageEntry{Key: auditedHeadersEntry, Value: fakeBytes1}) + require.NoError(t, err) + + // Invalidate and check we now see the header we stored + err = ahc.Invalidate(context.Background()) + require.NoError(t, err) + require.Equal(t, len(ahc.DefaultHeaders())+1, len(ahc.headerSettings)) // (defaults + 1 new header, 1 is also a default) + _, ok = ahc.headerSettings["x-magic-header"] + require.True(t, ok) + s, ok = ahc.headerSettings["x-correlation-id"] + require.True(t, ok) + require.True(t, s.HMAC) +} diff --git a/audit/nodes.go b/audit/nodes.go new file mode 100644 index 000000000000..bea1a2049e4e --- /dev/null +++ b/audit/nodes.go @@ -0,0 +1,96 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/vault/internal/observability/event" + "github.com/hashicorp/vault/sdk/logical" +) + +// processManual will attempt to create an (audit) event with the specified data +// and manually iterate over the supplied nodes calling Process on each until the +// event is nil (which indicates the pipeline has completed). +// Order of IDs in the NodeID slice determines the order they are processed. +// (Audit) Event will be of RequestType (as opposed to ResponseType). +// The last node must be a filter node (eventlogger.NodeTypeFilter) or +// sink node (eventlogger.NodeTypeSink). +func processManual(ctx context.Context, data *logical.LogInput, ids []eventlogger.NodeID, nodes map[eventlogger.NodeID]eventlogger.Node) error { + switch { + case data == nil: + return errors.New("data cannot be nil") + case len(ids) < 2: + return errors.New("minimum of 2 ids are required") + case nodes == nil: + return errors.New("nodes cannot be nil") + case len(nodes) == 0: + return errors.New("nodes are required") + } + + // Create an audit event. + a, err := newEvent(RequestType) + if err != nil { + return err + } + + // Insert the data into the audit event. + a.Data = data + + // Create an eventlogger event with the audit event as the payload. + e := &eventlogger.Event{ + Type: event.AuditType.AsEventType(), + CreatedAt: time.Now(), + Formatted: make(map[string][]byte), + Payload: a, + } + + var lastSeen eventlogger.NodeType + + // Process nodes in order, updating the event with the result. + // This means we *should* do: + // 1. filter (optional if configured) + // 2. formatter (temporary) + // 3. sink + for _, id := range ids { + // If the event is nil, we've completed processing the pipeline (hopefully + // by either a filter node or a sink node). + if e == nil { + break + } + node, ok := nodes[id] + if !ok { + return fmt.Errorf("node not found: %v", id) + } + + switch node.Type() { + case eventlogger.NodeTypeFormatter: + // Use a temporary formatter node which doesn't persist its salt anywhere. + if formatNode, ok := node.(*entryFormatter); ok && formatNode != nil { + e, err = newTemporaryEntryFormatter(formatNode).Process(ctx, e) + } + default: + e, err = node.Process(ctx, e) + } + + if err != nil { + return err + } + + // Track the last node we have processed, as we should end with a filter or sink. + lastSeen = node.Type() + } + + switch lastSeen { + case eventlogger.NodeTypeSink, eventlogger.NodeTypeFilter: + default: + return errors.New("last node must be a filter or sink") + } + + return nil +} diff --git a/audit/nodes_test.go b/audit/nodes_test.go new file mode 100644 index 000000000000..43f7eae354ac --- /dev/null +++ b/audit/nodes_test.go @@ -0,0 +1,333 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "context" + "testing" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-uuid" + nshelper "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/internal/observability/event" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +// TestProcessManual_NilData tests processManual when nil data is supplied. +func TestProcessManual_NilData(t *testing.T) { + t.Parallel() + + var ids []eventlogger.NodeID + nodes := make(map[eventlogger.NodeID]eventlogger.Node) + + // Formatter node + formatterId, formatterNode := newFormatterNode(t) + ids = append(ids, formatterId) + nodes[formatterId] = formatterNode + + // Sink node + sinkId, sinkNode := newSinkNode(t) + ids = append(ids, sinkId) + nodes[sinkId] = sinkNode + + err := processManual(nshelper.RootContext(context.Background()), nil, ids, nodes) + require.Error(t, err) + require.EqualError(t, err, "data cannot be nil") +} + +// TestProcessManual_BadIDs tests processManual when different bad values are +// supplied for the ID parameter. +func TestProcessManual_BadIDs(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + IDs []eventlogger.NodeID + ExpectedErrorMessage string + }{ + "nil": { + IDs: nil, + ExpectedErrorMessage: "minimum of 2 ids are required", + }, + "one": { + IDs: []eventlogger.NodeID{"1"}, + ExpectedErrorMessage: "minimum of 2 ids are required", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + nodes := make(map[eventlogger.NodeID]eventlogger.Node) + + // Formatter node + formatterId, formatterNode := newFormatterNode(t) + nodes[formatterId] = formatterNode + + // Sink node + sinkId, sinkNode := newSinkNode(t) + nodes[sinkId] = sinkNode + + // Data + requestId, err := uuid.GenerateUUID() + require.NoError(t, err) + data := newData(requestId) + + err = processManual(nshelper.RootContext(context.Background()), data, tc.IDs, nodes) + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + }) + } +} + +// TestProcessManual_NoNodes tests processManual when no nodes are supplied. +func TestProcessManual_NoNodes(t *testing.T) { + t.Parallel() + + var ids []eventlogger.NodeID + nodes := make(map[eventlogger.NodeID]eventlogger.Node) + + // Formatter node + formatterId, _ := newFormatterNode(t) + ids = append(ids, formatterId) + + // Sink node + sinkId, _ := newSinkNode(t) + ids = append(ids, sinkId) + + // Data + requestId, err := uuid.GenerateUUID() + require.NoError(t, err) + data := newData(requestId) + + err = processManual(nshelper.RootContext(context.Background()), data, ids, nodes) + require.Error(t, err) + require.EqualError(t, err, "nodes are required") +} + +// TestProcessManual_IdNodeMismatch tests processManual when IDs don't match with +// the nodes in the supplied map. +func TestProcessManual_IdNodeMismatch(t *testing.T) { + t.Parallel() + + var ids []eventlogger.NodeID + nodes := make(map[eventlogger.NodeID]eventlogger.Node) + + // Formatter node + formatterId, formatterNode := newFormatterNode(t) + ids = append(ids, formatterId) + nodes[formatterId] = formatterNode + + // Sink node + sinkId, _ := newSinkNode(t) + ids = append(ids, sinkId) + + // Data + requestId, err := uuid.GenerateUUID() + require.NoError(t, err) + data := newData(requestId) + + err = processManual(nshelper.RootContext(context.Background()), data, ids, nodes) + require.Error(t, err) + require.ErrorContains(t, err, "node not found: ") +} + +// TestProcessManual_NotEnoughNodes tests processManual when there is only one +// node provided. +func TestProcessManual_NotEnoughNodes(t *testing.T) { + t.Parallel() + + var ids []eventlogger.NodeID + nodes := make(map[eventlogger.NodeID]eventlogger.Node) + + // Formatter node + formatterId, formatterNode := newFormatterNode(t) + ids = append(ids, formatterId) + nodes[formatterId] = formatterNode + + // Data + requestId, err := uuid.GenerateUUID() + require.NoError(t, err) + data := newData(requestId) + + err = processManual(nshelper.RootContext(context.Background()), data, ids, nodes) + require.Error(t, err) + require.EqualError(t, err, "minimum of 2 ids are required") +} + +// TestProcessManual_LastNodeNotSink tests processManual when the last node is +// not a Sink node. +func TestProcessManual_LastNodeNotSink(t *testing.T) { + t.Parallel() + + var ids []eventlogger.NodeID + nodes := make(map[eventlogger.NodeID]eventlogger.Node) + + // Formatter node + formatterId, formatterNode := newFormatterNode(t) + ids = append(ids, formatterId) + nodes[formatterId] = formatterNode + + // Another Formatter node + formatterId, formatterNode = newFormatterNode(t) + ids = append(ids, formatterId) + nodes[formatterId] = formatterNode + + // Data + requestId, err := uuid.GenerateUUID() + require.NoError(t, err) + data := newData(requestId) + + err = processManual(nshelper.RootContext(context.Background()), data, ids, nodes) + require.Error(t, err) + require.EqualError(t, err, "last node must be a filter or sink") +} + +// TestProcessManualEndWithSink ensures that the manual processing of a test +// message works as expected with proper inputs, which mean processing ends with +// sink node. +func TestProcessManualEndWithSink(t *testing.T) { + t.Parallel() + + var ids []eventlogger.NodeID + nodes := make(map[eventlogger.NodeID]eventlogger.Node) + + // Formatter node + formatterId, formatterNode := newFormatterNode(t) + ids = append(ids, formatterId) + nodes[formatterId] = formatterNode + + // Sink node + sinkId, sinkNode := newSinkNode(t) + ids = append(ids, sinkId) + nodes[sinkId] = sinkNode + + // Data + requestId, err := uuid.GenerateUUID() + require.NoError(t, err) + data := newData(requestId) + + err = processManual(nshelper.RootContext(context.Background()), data, ids, nodes) + require.NoError(t, err) +} + +// TestProcessManual_EndWithFilter ensures that the manual processing of a test +// message works as expected with proper inputs, which mean processing ends with +// sink node. +func TestProcessManual_EndWithFilter(t *testing.T) { + t.Parallel() + + var ids []eventlogger.NodeID + nodes := make(map[eventlogger.NodeID]eventlogger.Node) + + // Filter node + filterId, filterNode := newFilterNode(t) + ids = append(ids, filterId) + nodes[filterId] = filterNode + + // Formatter node + formatterId, formatterNode := newFormatterNode(t) + ids = append(ids, formatterId) + nodes[formatterId] = formatterNode + + // Sink node + sinkId, sinkNode := newSinkNode(t) + ids = append(ids, sinkId) + nodes[sinkId] = sinkNode + + // Data + requestId, err := uuid.GenerateUUID() + require.NoError(t, err) + data := newData(requestId) + + err = processManual(nshelper.RootContext(context.Background()), data, ids, nodes) + require.NoError(t, err) +} + +// newSinkNode creates a new UUID and NoopSink (sink node). +func newSinkNode(t *testing.T) (eventlogger.NodeID, *event.NoopSink) { + t.Helper() + + sinkId, err := event.GenerateNodeID() + require.NoError(t, err) + sinkNode := event.NewNoopSink() + + return sinkId, sinkNode +} + +// TestFilter is a trivial implementation of eventlogger.Node used as a placeholder +// for Filter nodes in tests. +type TestFilter struct{} + +// Process trivially filters the event preventing it from being processed by subsequent nodes. +func (f *TestFilter) Process(_ context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { + return nil, nil +} + +// Reopen does nothing. +func (f *TestFilter) Reopen() error { + return nil +} + +// Type returns the eventlogger.NodeTypeFormatter type. +func (f *TestFilter) Type() eventlogger.NodeType { + return eventlogger.NodeTypeFilter +} + +// TestFormatter is a trivial implementation of the eventlogger.Node interface +// used as a place-holder for Formatter nodes in tests. +type TestFormatter struct{} + +// Process trivially formats the event by storing "test" as a byte slice under +// the test format type. +func (f *TestFormatter) Process(_ context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { + e.FormattedAs("test", []byte("test")) + + return e, nil +} + +// Reopen does nothing. +func (f *TestFormatter) Reopen() error { + return nil +} + +// Type returns the eventlogger.NodeTypeFormatter type. +func (f *TestFormatter) Type() eventlogger.NodeType { + return eventlogger.NodeTypeFormatter +} + +// newFilterNode creates a new TestFormatter (filter node). +func newFilterNode(t *testing.T) (eventlogger.NodeID, *TestFilter) { + nodeId, err := event.GenerateNodeID() + require.NoError(t, err) + node := &TestFilter{} + + return nodeId, node +} + +// newFormatterNode creates a new TestFormatter (formatter node). +func newFormatterNode(t *testing.T) (eventlogger.NodeID, *TestFormatter) { + nodeId, err := event.GenerateNodeID() + require.NoError(t, err) + node := &TestFormatter{} + + return nodeId, node +} + +// newData creates a sample logical.LogInput to be used as data for tests. +func newData(id string) *logical.LogInput { + return &logical.LogInput{ + Type: "request", + Auth: nil, + Request: &logical.Request{ + ID: id, + Operation: "update", + Path: "sys/audit/test", + }, + Response: nil, + OuterErr: nil, + } +} diff --git a/audit/options.go b/audit/options.go new file mode 100644 index 000000000000..73c46afb7d0b --- /dev/null +++ b/audit/options.go @@ -0,0 +1,163 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "errors" + "strings" + "time" +) + +// option is how options are passed as arguments. +type option func(*options) error + +// options are used to represent configuration for a audit related nodes. +type options struct { + withID string + withNow time.Time + withSubtype subtype + withFormat format + withPrefix string + withRaw bool + withElision bool + withOmitTime bool + withHMACAccessor bool +} + +// getDefaultOptions returns options with their default values. +func getDefaultOptions() options { + return options{ + withNow: time.Now(), + withFormat: jsonFormat, + withHMACAccessor: true, + } +} + +// getOpts applies each supplied option and returns the fully configured options. +// Each option is applied in the order it appears in the argument list, so it is +// possible to supply the same option numerous times and the 'last write wins'. +func getOpts(opt ...option) (options, error) { + opts := getDefaultOptions() + for _, o := range opt { + if o == nil { + continue + } + if err := o(&opts); err != nil { + return options{}, err + } + } + return opts, nil +} + +// withID provides an optional ID. +func withID(id string) option { + return func(o *options) error { + var err error + + id := strings.TrimSpace(id) + switch { + case id == "": + err = errors.New("id cannot be empty") + default: + o.withID = id + } + + return err + } +} + +// withNow provides an option to represent 'now'. +func withNow(now time.Time) option { + return func(o *options) error { + var err error + + switch { + case now.IsZero(): + err = errors.New("cannot specify 'now' to be the zero time instant") + default: + o.withNow = now + } + + return err + } +} + +// withSubtype provides an option to represent the event subtype. +func withSubtype(s string) option { + return func(o *options) error { + s := strings.TrimSpace(s) + if s == "" { + return errors.New("subtype cannot be empty") + } + parsed := subtype(s) + err := parsed.validate() + if err != nil { + return err + } + + o.withSubtype = parsed + return nil + } +} + +// withFormat provides an option to represent event format. +func withFormat(f string) option { + return func(o *options) error { + f := strings.TrimSpace(strings.ToLower(f)) + if f == "" { + // Return early, we won't attempt to apply this option if its empty. + return nil + } + + parsed := format(f) + err := parsed.validate() + if err != nil { + return err + } + + o.withFormat = parsed + return nil + } +} + +// withPrefix provides an option to represent a prefix for a file sink. +func withPrefix(prefix string) option { + return func(o *options) error { + o.withPrefix = prefix + + return nil + } +} + +// withRaw provides an option to represent whether 'raw' is required. +func withRaw(r bool) option { + return func(o *options) error { + o.withRaw = r + return nil + } +} + +// withElision provides an option to represent whether elision (...) is required. +func withElision(e bool) option { + return func(o *options) error { + o.withElision = e + return nil + } +} + +// withOmitTime provides an option to represent whether to omit time. +func withOmitTime(t bool) option { + return func(o *options) error { + o.withOmitTime = t + return nil + } +} + +// withHMACAccessor provides an option to represent whether an HMAC accessor is applicable. +func withHMACAccessor(h bool) option { + return func(o *options) error { + o.withHMACAccessor = h + return nil + } +} diff --git a/audit/options_test.go b/audit/options_test.go new file mode 100644 index 000000000000..b4a668c3a8bf --- /dev/null +++ b/audit/options_test.go @@ -0,0 +1,509 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// TestOptions_withFormat exercises withFormat option to ensure it performs as expected. +func TestOptions_withFormat(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value string + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedValue format + }{ + "empty": { + Value: "", + IsErrorExpected: false, + ExpectedValue: format(""), + }, + "whitespace": { + Value: " ", + IsErrorExpected: false, + ExpectedValue: format(""), + }, + "invalid-test": { + Value: "test", + IsErrorExpected: true, + ExpectedErrorMessage: "invalid format \"test\": invalid internal parameter", + }, + "valid-json": { + Value: "json", + IsErrorExpected: false, + ExpectedValue: jsonFormat, + }, + "valid-jsonx": { + Value: "jsonx", + IsErrorExpected: false, + ExpectedValue: jsonxFormat, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := withFormat(tc.Value) + err := applyOption(opts) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withFormat) + } + }) + } +} + +// TestOptions_withSubtype exercises withSubtype option to ensure it performs as expected. +func TestOptions_withSubtype(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value string + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedValue subtype + }{ + "empty": { + Value: "", + IsErrorExpected: true, + ExpectedErrorMessage: "subtype cannot be empty", + }, + "whitespace": { + Value: " ", + IsErrorExpected: true, + ExpectedErrorMessage: "subtype cannot be empty", + }, + "valid": { + Value: "AuditResponse", + IsErrorExpected: false, + ExpectedValue: ResponseType, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := withSubtype(tc.Value) + err := applyOption(opts) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withSubtype) + } + }) + } +} + +// TestOptions_withNow exercises withNow option to ensure it performs as expected. +func TestOptions_withNow(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value time.Time + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedValue time.Time + }{ + "default-time": { + Value: time.Time{}, + IsErrorExpected: true, + ExpectedErrorMessage: "cannot specify 'now' to be the zero time instant", + }, + "valid-time": { + Value: time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local), + IsErrorExpected: false, + ExpectedValue: time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local), + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + opts := &options{} + applyOption := withNow(tc.Value) + err := applyOption(opts) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withNow) + } + }) + } +} + +// TestOptions_withID exercises withID option to ensure it performs as expected. +func TestOptions_withID(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value string + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedValue string + }{ + "empty": { + Value: "", + IsErrorExpected: true, + ExpectedErrorMessage: "id cannot be empty", + }, + "whitespace": { + Value: " ", + IsErrorExpected: true, + ExpectedErrorMessage: "id cannot be empty", + }, + "valid": { + Value: "test", + IsErrorExpected: false, + ExpectedValue: "test", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := withID(tc.Value) + err := applyOption(opts) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withID) + } + }) + } +} + +// TestOptions_withPrefix exercises withPrefix option to ensure it performs as expected. +func TestOptions_withPrefix(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value string + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedValue string + }{ + "empty": { + Value: "", + IsErrorExpected: false, + ExpectedValue: "", + }, + "whitespace": { + Value: " ", + IsErrorExpected: false, + ExpectedValue: " ", + }, + "valid": { + Value: "test", + IsErrorExpected: false, + ExpectedValue: "test", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := withPrefix(tc.Value) + err := applyOption(opts) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withPrefix) + } + }) + } +} + +// TestOptions_withRaw exercises withRaw option to ensure it performs as expected. +func TestOptions_withRaw(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value bool + ExpectedValue bool + }{ + "true": { + Value: true, + ExpectedValue: true, + }, + "false": { + Value: false, + ExpectedValue: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := withRaw(tc.Value) + err := applyOption(opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withRaw) + }) + } +} + +// TestOptions_withElision exercises withElision option to ensure it performs as expected. +func TestOptions_withElision(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value bool + ExpectedValue bool + }{ + "true": { + Value: true, + ExpectedValue: true, + }, + "false": { + Value: false, + ExpectedValue: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := withElision(tc.Value) + err := applyOption(opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withElision) + }) + } +} + +// TestOptions_withHMACAccessor exercises withHMACAccessor option to ensure it performs as expected. +func TestOptions_withHMACAccessor(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value bool + ExpectedValue bool + }{ + "true": { + Value: true, + ExpectedValue: true, + }, + "false": { + Value: false, + ExpectedValue: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := withHMACAccessor(tc.Value) + err := applyOption(opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withHMACAccessor) + }) + } +} + +// TestOptions_withOmitTime exercises withOmitTime option to ensure it performs as expected. +func TestOptions_withOmitTime(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value bool + ExpectedValue bool + }{ + "true": { + Value: true, + ExpectedValue: true, + }, + "false": { + Value: false, + ExpectedValue: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := withOmitTime(tc.Value) + err := applyOption(opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withOmitTime) + }) + } +} + +// TestOptions_Default exercises getDefaultOptions to assert the default values. +func TestOptions_Default(t *testing.T) { + t.Parallel() + + opts := getDefaultOptions() + require.NotNil(t, opts) + require.True(t, time.Now().After(opts.withNow)) + require.False(t, opts.withNow.IsZero()) +} + +// TestOptions_Opts exercises GetOpts with various option values. +func TestOptions_Opts(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + opts []option + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedID string + ExpectedSubtype subtype + ExpectedFormat format + IsNowExpected bool + ExpectedNow time.Time + }{ + "nil-options": { + opts: nil, + IsErrorExpected: false, + IsNowExpected: true, + ExpectedFormat: jsonFormat, + }, + "empty-options": { + opts: []option{}, + IsErrorExpected: false, + IsNowExpected: true, + ExpectedFormat: jsonFormat, + }, + "with-multiple-valid-id": { + opts: []option{ + withID("qwerty"), + withID("juan"), + }, + IsErrorExpected: false, + ExpectedID: "juan", + IsNowExpected: true, + ExpectedFormat: jsonFormat, + }, + "with-multiple-valid-subtype": { + opts: []option{ + withSubtype("AuditRequest"), + withSubtype("AuditResponse"), + }, + IsErrorExpected: false, + ExpectedSubtype: ResponseType, + IsNowExpected: true, + ExpectedFormat: jsonFormat, + }, + "with-multiple-valid-format": { + opts: []option{ + withFormat("json"), + withFormat("jsonx"), + }, + IsErrorExpected: false, + ExpectedFormat: jsonxFormat, + IsNowExpected: true, + }, + "with-multiple-valid-now": { + opts: []option{ + withNow(time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local)), + withNow(time.Date(2023, time.July, 4, 13, 3, 0, 0, time.Local)), + }, + IsErrorExpected: false, + ExpectedNow: time.Date(2023, time.July, 4, 13, 3, 0, 0, time.Local), + IsNowExpected: false, + ExpectedFormat: jsonFormat, + }, + "with-multiple-valid-then-invalid-now": { + opts: []option{ + withNow(time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local)), + withNow(time.Time{}), + }, + IsErrorExpected: true, + ExpectedErrorMessage: "cannot specify 'now' to be the zero time instant", + ExpectedFormat: jsonFormat, + }, + "with-multiple-valid-options": { + opts: []option{ + withID("qwerty"), + withSubtype("AuditRequest"), + withFormat("json"), + withNow(time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local)), + }, + IsErrorExpected: false, + ExpectedID: "qwerty", + ExpectedSubtype: RequestType, + ExpectedFormat: jsonFormat, + ExpectedNow: time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local), + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts, err := getOpts(tc.opts...) + + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NotNil(t, opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedID, opts.withID) + require.Equal(t, tc.ExpectedSubtype, opts.withSubtype) + require.Equal(t, tc.ExpectedFormat, opts.withFormat) + switch { + case tc.IsNowExpected: + require.True(t, time.Now().After(opts.withNow)) + require.False(t, opts.withNow.IsZero()) + default: + require.Equal(t, tc.ExpectedNow, opts.withNow) + } + + } + }) + } +} diff --git a/audit/sink_metric_labeler.go b/audit/sink_metric_labeler.go new file mode 100644 index 000000000000..a433fe325781 --- /dev/null +++ b/audit/sink_metric_labeler.go @@ -0,0 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/vault/internal/observability/event" +) + +var _ event.Labeler = (*metricLabelerAuditSink)(nil) + +var ( + metricLabelAuditSinkSuccess = []string{"audit", "sink", "success"} + metricLabelAuditSinkFailure = []string{"audit", "sink", "failure"} +) + +// metricLabelerAuditSink can be used to provide labels for the success or failure +// of a sink node used for a normal audit device. +type metricLabelerAuditSink struct{} + +// Labels provides the success and failure labels for an audit sink, based on the error supplied. +// Success: 'vault.audit.sink.success' +// Failure: 'vault.audit.sink.failure' +func (m metricLabelerAuditSink) Labels(_ *eventlogger.Event, err error) []string { + if err != nil { + // NOTE: a cancelled context would still result in an error. + return metricLabelAuditSinkFailure + } + + return metricLabelAuditSinkSuccess +} diff --git a/audit/sink_metric_labeler_test.go b/audit/sink_metric_labeler_test.go new file mode 100644 index 000000000000..0a5956a2d999 --- /dev/null +++ b/audit/sink_metric_labeler_test.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +// TestMetricLabelerAuditSink_Label ensures we always get the right label based +// on the input value of the error. +func TestMetricLabelerAuditSink_Label(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + err error + expected []string + }{ + "nil": { + err: nil, + expected: []string{"audit", "sink", "success"}, + }, + "error": { + err: errors.New("I am an error"), + expected: []string{"audit", "sink", "failure"}, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + m := &metricLabelerAuditSink{} + result := m.Labels(nil, tc.err) + assert.Equal(t, tc.expected, result) + }) + } +} diff --git a/audit/sink_metric_timer.go b/audit/sink_metric_timer.go new file mode 100644 index 000000000000..461a6b371730 --- /dev/null +++ b/audit/sink_metric_timer.go @@ -0,0 +1,76 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/armon/go-metrics" + "github.com/hashicorp/eventlogger" +) + +var _ eventlogger.Node = (*sinkMetricTimer)(nil) + +// sinkMetricTimer is a wrapper for any kind of eventlogger.NodeTypeSink node that +// processes events containing an AuditEvent payload. +// It decorates the implemented eventlogger.Node Process method in order to emit +// timing metrics for the duration between the creation time of the event and the +// time the node completes processing. +type sinkMetricTimer struct { + name string + sink eventlogger.Node +} + +// newSinkMetricTimer should be used to create the sinkMetricTimer. +// It expects that an eventlogger.NodeTypeSink should be supplied as the sink. +func newSinkMetricTimer(name string, sink eventlogger.Node) (*sinkMetricTimer, error) { + name = strings.TrimSpace(name) + if name == "" { + return nil, fmt.Errorf("name is required: %w", ErrInvalidParameter) + } + + if sink == nil || reflect.ValueOf(sink).IsNil() { + return nil, fmt.Errorf("sink node is required: %w", ErrInvalidParameter) + } + + if sink.Type() != eventlogger.NodeTypeSink { + return nil, fmt.Errorf("sink node must be of type 'sink': %w", ErrInvalidParameter) + } + + return &sinkMetricTimer{ + name: name, + sink: sink, + }, nil +} + +// Process wraps the Process method of underlying sink (eventlogger.Node). +// Additionally, when the supplied eventlogger.Event has an AuditEvent as its payload, +// it measures the elapsed time between the creation of the eventlogger.Event and +// the completion of processing, emitting this as a metric. +// Examples: +// 'vault.audit.{DEVICE}.log_request' +// 'vault.audit.{DEVICE}.log_response' +func (s *sinkMetricTimer) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { + defer func() { + auditEvent, ok := e.Payload.(*Event) + if ok { + metrics.MeasureSince([]string{"audit", s.name, auditEvent.Subtype.MetricTag()}, e.CreatedAt) + } + }() + + return s.sink.Process(ctx, e) +} + +// Reopen wraps the Reopen method of this underlying sink (eventlogger.Node). +func (s *sinkMetricTimer) Reopen() error { + return s.sink.Reopen() +} + +// Type wraps the Type method of this underlying sink (eventlogger.Node). +func (s *sinkMetricTimer) Type() eventlogger.NodeType { + return s.sink.Type() +} diff --git a/audit/sink_metric_timer_test.go b/audit/sink_metric_timer_test.go new file mode 100644 index 000000000000..8aaec172087b --- /dev/null +++ b/audit/sink_metric_timer_test.go @@ -0,0 +1,68 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "testing" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/vault/internal/observability/event" + "github.com/stretchr/testify/require" +) + +// TestNewSinkMetricTimer ensures that parameters are checked correctly and errors +// reported as expected when attempting to create a sinkMetricTimer. +func TestNewSinkMetricTimer(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + name string + node eventlogger.Node + isErrorExpected bool + expectedErrorMessage string + }{ + "happy": { + name: "foo", + node: &event.FileSink{}, + isErrorExpected: false, + }, + "no-name": { + name: "", + isErrorExpected: true, + expectedErrorMessage: "name is required: invalid internal parameter", + }, + "no-node": { + name: "foo", + node: nil, + isErrorExpected: true, + expectedErrorMessage: "sink node is required: invalid internal parameter", + }, + "bad-node": { + name: "foo", + node: &entryFormatter{}, + isErrorExpected: true, + expectedErrorMessage: "sink node must be of type 'sink': invalid internal parameter", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + m, err := newSinkMetricTimer(tc.name, tc.node) + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.expectedErrorMessage) + require.Nil(t, m) + default: + require.NoError(t, err) + require.NotNil(t, m) + } + }) + } +} diff --git a/audit/types.go b/audit/types.go new file mode 100644 index 000000000000..f034c2e8eddd --- /dev/null +++ b/audit/types.go @@ -0,0 +1,114 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "github.com/hashicorp/vault/sdk/logical" +) + +// entry represents an audit entry. +// It could be an entry for a request or response. +type entry struct { + Auth *auth `json:"auth,omitempty"` + Error string `json:"error,omitempty"` + Forwarded bool `json:"forwarded,omitempty"` + ForwardedFrom string `json:"forwarded_from,omitempty"` // Populated in Enterprise when a request is forwarded + Request *request `json:"request,omitempty"` + Response *response `json:"response,omitempty"` + Time string `json:"time,omitempty"` + Type string `json:"type,omitempty"` +} + +type request struct { + ClientCertificateSerialNumber string `json:"client_certificate_serial_number,omitempty"` + ClientID string `json:"client_id,omitempty"` + ClientToken string `json:"client_token,omitempty"` + ClientTokenAccessor string `json:"client_token_accessor,omitempty"` + Data map[string]interface{} `json:"data,omitempty"` + Headers map[string][]string `json:"headers,omitempty"` + ID string `json:"id,omitempty"` + MountAccessor string `json:"mount_accessor,omitempty"` + MountClass string `json:"mount_class,omitempty"` + MountIsExternalPlugin bool `json:"mount_is_external_plugin,omitempty"` + MountPoint string `json:"mount_point,omitempty"` + MountRunningSha256 string `json:"mount_running_sha256,omitempty"` + MountRunningVersion string `json:"mount_running_version,omitempty"` + MountType string `json:"mount_type,omitempty"` + Namespace *namespace `json:"namespace,omitempty"` + Operation logical.Operation `json:"operation,omitempty"` + Path string `json:"path,omitempty"` + PolicyOverride bool `json:"policy_override,omitempty"` + RemoteAddr string `json:"remote_address,omitempty"` + RemotePort int `json:"remote_port,omitempty"` + ReplicationCluster string `json:"replication_cluster,omitempty"` + RequestURI string `json:"request_uri,omitempty"` + WrapTTL int `json:"wrap_ttl,omitempty"` +} + +type response struct { + Auth *auth `json:"auth,omitempty"` + Data map[string]interface{} `json:"data,omitempty"` + Headers map[string][]string `json:"headers,omitempty"` + MountAccessor string `json:"mount_accessor,omitempty"` + MountClass string `json:"mount_class,omitempty"` + MountIsExternalPlugin bool `json:"mount_is_external_plugin,omitempty"` + MountPoint string `json:"mount_point,omitempty"` + MountRunningSha256 string `json:"mount_running_sha256,omitempty"` + MountRunningVersion string `json:"mount_running_plugin_version,omitempty"` + MountType string `json:"mount_type,omitempty"` + Redirect string `json:"redirect,omitempty"` + Secret *secret `json:"secret,omitempty"` + WrapInfo *responseWrapInfo `json:"wrap_info,omitempty"` + Warnings []string `json:"warnings,omitempty"` +} + +type auth struct { + Accessor string `json:"accessor,omitempty"` + ClientToken string `json:"client_token,omitempty"` + DisplayName string `json:"display_name,omitempty"` + EntityCreated bool `json:"entity_created,omitempty"` + EntityID string `json:"entity_id,omitempty"` + ExternalNamespacePolicies map[string][]string `json:"external_namespace_policies,omitempty"` + IdentityPolicies []string `json:"identity_policies,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` + NoDefaultPolicy bool `json:"no_default_policy,omitempty"` + NumUses int `json:"num_uses,omitempty"` + Policies []string `json:"policies,omitempty"` + PolicyResults *policyResults `json:"policy_results,omitempty"` + RemainingUses int `json:"remaining_uses,omitempty"` + TokenPolicies []string `json:"token_policies,omitempty"` + TokenIssueTime string `json:"token_issue_time,omitempty"` + TokenTTL int64 `json:"token_ttl,omitempty"` + TokenType string `json:"token_type,omitempty"` +} + +type policyResults struct { + Allowed bool `json:"allowed"` + GrantingPolicies []policyInfo `json:"granting_policies,omitempty"` +} + +type policyInfo struct { + Name string `json:"name,omitempty"` + NamespaceId string `json:"namespace_id,omitempty"` + NamespacePath string `json:"namespace_path,omitempty"` + Type string `json:"type"` +} + +type secret struct { + LeaseID string `json:"lease_id,omitempty"` +} + +type responseWrapInfo struct { + Accessor string `json:"accessor,omitempty"` + CreationPath string `json:"creation_path,omitempty"` + CreationTime string `json:"creation_time,omitempty"` + Token string `json:"token,omitempty"` + TTL int `json:"ttl,omitempty"` + WrappedAccessor string `json:"wrapped_accessor,omitempty"` +} + +type namespace struct { + ID string `json:"id,omitempty"` + Path string `json:"path,omitempty"` +} diff --git a/buf.gen.yaml b/buf.gen.yaml new file mode 100644 index 000000000000..bc2e940659e0 --- /dev/null +++ b/buf.gen.yaml @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +version: v1 +plugins: + - plugin: go + out: . + opt: + - paths=source_relative + - plugin: go-grpc + out: . + opt: + - paths=source_relative + diff --git a/buf.lock b/buf.lock new file mode 100644 index 000000000000..37619defaeec --- /dev/null +++ b/buf.lock @@ -0,0 +1,8 @@ +# Generated by buf. DO NOT EDIT. +version: v1 +deps: + - remote: buf.build + owner: hashicorp + repository: go-kms-wrapping + commit: b117606343c8401082b98ec432af4cce + digest: shake256:6d6ec23f81669bf1d380b0783e6b4b86805f28733aed46222e7358441402b71760689ea10b45592db98caa3215a115120e03b1319192dfc918f966ccdc845715 diff --git a/buf.yaml b/buf.yaml new file mode 100644 index 000000000000..5db4eeed484a --- /dev/null +++ b/buf.yaml @@ -0,0 +1,125 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +version: v1 +breaking: + use: + - FILE +deps: + - buf.build/hashicorp/go-kms-wrapping +lint: + ignore_only: + ENUM_VALUE_PREFIX: + - sdk/helper/clientcountutil/generation/generate_data.proto + - vault/hcp_link/proto/node_status/status.proto + - vault/replication_services_ent.proto + ENUM_ZERO_VALUE_SUFFIX: + - sdk/helper/clientcountutil/generation/generate_data.proto + - vault/hcp_link/proto/node_status/status.proto + - vault/replication_services_ent.proto + FIELD_LOWER_SNAKE_CASE: + - enthelpers/wal/types_ent.proto + - sdk/database/dbplugin/database.proto + - sdk/database/dbplugin/v5/proto/database.proto + - sdk/logical/identity.proto + - sdk/plugin/pb/backend.proto + - vault/hcp_link/proto/meta/meta.proto + - vault/hcp_link/proto/node_status/status.proto + - vault/replication/replication_resolver_ent.proto + - vault/replication_services_ent.proto + IMPORT_USED: + - vault/hcp_link/proto/node_status/status.proto + - vault/replication_services_ent.proto + PACKAGE_DIRECTORY_MATCH: + - builtin/logical/pki/metadata.proto + - enthelpers/merkle/types_ent.proto + - enthelpers/wal/types_ent.proto + - helper/forwarding/types.proto + - helper/identity/mfa/types.proto + - helper/identity/types.proto + - helper/storagepacker/types.proto + - physical/raft/types.proto + - sdk/database/dbplugin/database.proto + - sdk/database/dbplugin/v5/proto/database.proto + - sdk/helper/pluginutil/multiplexing.proto + - sdk/logical/event.proto + - sdk/logical/identity.proto + - sdk/logical/plugin.proto + - sdk/logical/version.proto + - sdk/plugin/pb/backend.proto + - vault/activity/activity_log.proto + - sdk/helper/clientcountutil/generation/generate_data.proto + - vault/hcp_link/proto/link_control/link_control.proto + - vault/hcp_link/proto/meta/meta.proto + - vault/hcp_link/proto/node_status/status.proto + - vault/replication/replication_resolver_ent.proto + - vault/seal/multi_wrap_value.proto + - vault/tokens/token.proto + PACKAGE_SAME_DIRECTORY: + - vault/replication/replication_resolver_ent.proto + - vault/replication_services_ent.proto + - vault/request_forwarding_service.proto + PACKAGE_SAME_GO_PACKAGE: + - vault/replication/replication_resolver_ent.proto + - vault/replication_services_ent.proto + - vault/request_forwarding_service.proto + PACKAGE_VERSION_SUFFIX: + - builtin/logical/pki/metadata.proto + - enthelpers/merkle/types_ent.proto + - enthelpers/wal/types_ent.proto + - helper/forwarding/types.proto + - helper/identity/mfa/types.proto + - helper/identity/types.proto + - helper/storagepacker/types.proto + - physical/raft/types.proto + - sdk/database/dbplugin/database.proto + - sdk/helper/pluginutil/multiplexing.proto + - sdk/logical/event.proto + - sdk/logical/identity.proto + - sdk/logical/plugin.proto + - sdk/logical/version.proto + - sdk/plugin/pb/backend.proto + - vault/activity/activity_log.proto + - sdk/helper/clientcountutil/generation/generate_data.proto + - vault/hcp_link/proto/link_control/link_control.proto + - vault/hcp_link/proto/meta/meta.proto + - vault/hcp_link/proto/node_status/status.proto + - vault/replication/replication_resolver_ent.proto + - vault/replication_services_ent.proto + - vault/request_forwarding_service.proto + - vault/seal/multi_wrap_value.proto + - vault/tokens/token.proto + RPC_REQUEST_RESPONSE_UNIQUE: + - sdk/database/dbplugin/database.proto + - sdk/database/dbplugin/v5/proto/database.proto + - sdk/plugin/pb/backend.proto + - vault/replication_services_ent.proto + - vault/request_forwarding_service.proto + RPC_REQUEST_STANDARD_NAME: + - sdk/database/dbplugin/database.proto + - sdk/database/dbplugin/v5/proto/database.proto + - sdk/logical/version.proto + - sdk/plugin/pb/backend.proto + - vault/replication/replication_resolver_ent.proto + - vault/replication_services_ent.proto + - vault/request_forwarding_service.proto + RPC_RESPONSE_STANDARD_NAME: + - sdk/database/dbplugin/database.proto + - sdk/database/dbplugin/v5/proto/database.proto + - sdk/logical/version.proto + - sdk/plugin/pb/backend.proto + - vault/hcp_link/proto/meta/meta.proto + - vault/replication/replication_resolver_ent.proto + - vault/replication_services_ent.proto + - vault/request_forwarding_service.proto + SERVICE_SUFFIX: + - sdk/database/dbplugin/database.proto + - sdk/database/dbplugin/v5/proto/database.proto + - sdk/helper/pluginutil/multiplexing.proto + - sdk/logical/version.proto + - sdk/plugin/pb/backend.proto + - vault/hcp_link/proto/link_control/link_control.proto + - vault/hcp_link/proto/meta/meta.proto + - vault/replication/replication_resolver_ent.proto + - vault/replication_services_ent.proto + - vault/request_forwarding_service.proto diff --git a/builtin/audit/file/backend.go b/builtin/audit/file/backend.go deleted file mode 100644 index 2c3ef3f8e077..000000000000 --- a/builtin/audit/file/backend.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package file - -import ( - "bytes" - "context" - "fmt" - "io" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "sync/atomic" - - "github.com/hashicorp/vault/audit" - "github.com/hashicorp/vault/sdk/helper/salt" - "github.com/hashicorp/vault/sdk/logical" -) - -func Factory(ctx context.Context, conf *audit.BackendConfig) (audit.Backend, error) { - if conf.SaltConfig == nil { - return nil, fmt.Errorf("nil salt config") - } - if conf.SaltView == nil { - return nil, fmt.Errorf("nil salt view") - } - - path, ok := conf.Config["file_path"] - if !ok { - path, ok = conf.Config["path"] - if !ok { - return nil, fmt.Errorf("file_path is required") - } - } - - // normalize path if configured for stdout - if strings.EqualFold(path, "stdout") { - path = "stdout" - } - if strings.EqualFold(path, "discard") { - path = "discard" - } - - format, ok := conf.Config["format"] - if !ok { - format = "json" - } - switch format { - case "json", "jsonx": - default: - return nil, fmt.Errorf("unknown format type %q", format) - } - - // Check if hashing of accessor is disabled - hmacAccessor := true - if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok { - value, err := strconv.ParseBool(hmacAccessorRaw) - if err != nil { - return nil, err - } - hmacAccessor = value - } - - // Check if raw logging is enabled - logRaw := false - if raw, ok := conf.Config["log_raw"]; ok { - b, err := strconv.ParseBool(raw) - if err != nil { - return nil, err - } - logRaw = b - } - - elideListResponses := false - if elideListResponsesRaw, ok := conf.Config["elide_list_responses"]; ok { - value, err := strconv.ParseBool(elideListResponsesRaw) - if err != nil { - return nil, err - } - elideListResponses = value - } - - // Check if mode is provided - mode := os.FileMode(0o600) - if modeRaw, ok := conf.Config["mode"]; ok { - m, err := strconv.ParseUint(modeRaw, 8, 32) - if err != nil { - return nil, err - } - switch m { - case 0: - // if mode is 0000, then do not modify file mode - if path != "stdout" && path != "discard" { - fileInfo, err := os.Stat(path) - if err != nil { - return nil, err - } - mode = fileInfo.Mode() - } - default: - mode = os.FileMode(m) - - } - - } - - b := &Backend{ - path: path, - mode: mode, - saltConfig: conf.SaltConfig, - saltView: conf.SaltView, - salt: new(atomic.Value), - formatConfig: audit.FormatterConfig{ - Raw: logRaw, - HMACAccessor: hmacAccessor, - ElideListResponses: elideListResponses, - }, - } - - // Ensure we are working with the right type by explicitly storing a nil of - // the right type - b.salt.Store((*salt.Salt)(nil)) - - switch format { - case "json": - b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{ - Prefix: conf.Config["prefix"], - SaltFunc: b.Salt, - } - case "jsonx": - b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{ - Prefix: conf.Config["prefix"], - SaltFunc: b.Salt, - } - } - - switch path { - case "stdout", "discard": - // no need to test opening file if outputting to stdout or discarding - default: - // Ensure that the file can be successfully opened for writing; - // otherwise it will be too late to catch later without problems - // (ref: https://github.com/hashicorp/vault/issues/550) - if err := b.open(); err != nil { - return nil, fmt.Errorf("sanity check failed; unable to open %q for writing: %w", path, err) - } - } - - return b, nil -} - -// Backend is the audit backend for the file-based audit store. -// -// NOTE: This audit backend is currently very simple: it appends to a file. -// It doesn't do anything more at the moment to assist with rotation -// or reset the write cursor, this should be done in the future. -type Backend struct { - path string - - formatter audit.AuditFormatter - formatConfig audit.FormatterConfig - - fileLock sync.RWMutex - f *os.File - mode os.FileMode - - saltMutex sync.RWMutex - salt *atomic.Value - saltConfig *salt.Config - saltView logical.Storage -} - -var _ audit.Backend = (*Backend)(nil) - -func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) { - s := b.salt.Load().(*salt.Salt) - if s != nil { - return s, nil - } - - b.saltMutex.Lock() - defer b.saltMutex.Unlock() - - s = b.salt.Load().(*salt.Salt) - if s != nil { - return s, nil - } - - newSalt, err := salt.NewSalt(ctx, b.saltView, b.saltConfig) - if err != nil { - b.salt.Store((*salt.Salt)(nil)) - return nil, err - } - - b.salt.Store(newSalt) - return newSalt, nil -} - -func (b *Backend) GetHash(ctx context.Context, data string) (string, error) { - salt, err := b.Salt(ctx) - if err != nil { - return "", err - } - - return audit.HashString(salt, data), nil -} - -func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error { - var writer io.Writer - switch b.path { - case "stdout": - writer = os.Stdout - case "discard": - return nil - } - - buf := bytes.NewBuffer(make([]byte, 0, 2000)) - err := b.formatter.FormatRequest(ctx, buf, b.formatConfig, in) - if err != nil { - return err - } - - return b.log(ctx, buf, writer) -} - -func (b *Backend) log(ctx context.Context, buf *bytes.Buffer, writer io.Writer) error { - reader := bytes.NewReader(buf.Bytes()) - - b.fileLock.Lock() - - if writer == nil { - if err := b.open(); err != nil { - b.fileLock.Unlock() - return err - } - writer = b.f - } - - if _, err := reader.WriteTo(writer); err == nil { - b.fileLock.Unlock() - return nil - } else if b.path == "stdout" { - b.fileLock.Unlock() - return err - } - - // If writing to stdout there's no real reason to think anything would have - // changed so return above. Otherwise, opportunistically try to re-open the - // FD, once per call. - b.f.Close() - b.f = nil - - if err := b.open(); err != nil { - b.fileLock.Unlock() - return err - } - - reader.Seek(0, io.SeekStart) - _, err := reader.WriteTo(writer) - b.fileLock.Unlock() - return err -} - -func (b *Backend) LogResponse(ctx context.Context, in *logical.LogInput) error { - var writer io.Writer - switch b.path { - case "stdout": - writer = os.Stdout - case "discard": - return nil - } - - buf := bytes.NewBuffer(make([]byte, 0, 6000)) - err := b.formatter.FormatResponse(ctx, buf, b.formatConfig, in) - if err != nil { - return err - } - - return b.log(ctx, buf, writer) -} - -func (b *Backend) LogTestMessage(ctx context.Context, in *logical.LogInput, config map[string]string) error { - var writer io.Writer - switch b.path { - case "stdout": - writer = os.Stdout - case "discard": - return nil - } - - var buf bytes.Buffer - temporaryFormatter := audit.NewTemporaryFormatter(config["format"], config["prefix"]) - if err := temporaryFormatter.FormatRequest(ctx, &buf, b.formatConfig, in); err != nil { - return err - } - - return b.log(ctx, &buf, writer) -} - -// The file lock must be held before calling this -func (b *Backend) open() error { - if b.f != nil { - return nil - } - if err := os.MkdirAll(filepath.Dir(b.path), b.mode); err != nil { - return err - } - - var err error - b.f, err = os.OpenFile(b.path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, b.mode) - if err != nil { - return err - } - - // Change the file mode in case the log file already existed. We special - // case /dev/null since we can't chmod it and bypass if the mode is zero - switch b.path { - case "/dev/null": - default: - if b.mode != 0 { - err = os.Chmod(b.path, b.mode) - if err != nil { - return err - } - } - } - - return nil -} - -func (b *Backend) Reload(_ context.Context) error { - switch b.path { - case "stdout", "discard": - return nil - } - - b.fileLock.Lock() - defer b.fileLock.Unlock() - - if b.f == nil { - return b.open() - } - - err := b.f.Close() - // Set to nil here so that even if we error out, on the next access open() - // will be tried - b.f = nil - if err != nil { - return err - } - - return b.open() -} - -func (b *Backend) Invalidate(_ context.Context) { - b.saltMutex.Lock() - defer b.saltMutex.Unlock() - b.salt.Store((*salt.Salt)(nil)) -} diff --git a/builtin/audit/file/backend_test.go b/builtin/audit/file/backend_test.go deleted file mode 100644 index ad082ace5d43..000000000000 --- a/builtin/audit/file/backend_test.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package file - -import ( - "context" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "testing" - "time" - - "github.com/hashicorp/vault/audit" - "github.com/hashicorp/vault/helper/namespace" - "github.com/hashicorp/vault/sdk/helper/salt" - "github.com/hashicorp/vault/sdk/logical" -) - -func TestAuditFile_fileModeNew(t *testing.T) { - modeStr := "0777" - mode, err := strconv.ParseUint(modeStr, 8, 32) - if err != nil { - t.Fatal(err) - } - - path, err := ioutil.TempDir("", "vault-test_audit_file-file_mode_new") - if err != nil { - t.Fatal(err) - } - - defer os.RemoveAll(path) - - file := filepath.Join(path, "auditTest.txt") - - config := map[string]string{ - "path": file, - "mode": modeStr, - } - - _, err = Factory(context.Background(), &audit.BackendConfig{ - SaltConfig: &salt.Config{}, - SaltView: &logical.InmemStorage{}, - Config: config, - }) - if err != nil { - t.Fatal(err) - } - - info, err := os.Stat(file) - if err != nil { - t.Fatalf("Cannot retrieve file mode from `Stat`") - } - if info.Mode() != os.FileMode(mode) { - t.Fatalf("File mode does not match.") - } -} - -func TestAuditFile_fileModeExisting(t *testing.T) { - f, err := ioutil.TempFile("", "test") - if err != nil { - t.Fatalf("Failure to create test file.") - } - defer os.Remove(f.Name()) - - err = os.Chmod(f.Name(), 0o777) - if err != nil { - t.Fatalf("Failure to chmod temp file for testing.") - } - - err = f.Close() - if err != nil { - t.Fatalf("Failure to close temp file for test.") - } - - config := map[string]string{ - "path": f.Name(), - } - - _, err = Factory(context.Background(), &audit.BackendConfig{ - Config: config, - SaltConfig: &salt.Config{}, - SaltView: &logical.InmemStorage{}, - }) - if err != nil { - t.Fatal(err) - } - - info, err := os.Stat(f.Name()) - if err != nil { - t.Fatalf("cannot retrieve file mode from `Stat`") - } - if info.Mode() != os.FileMode(0o600) { - t.Fatalf("File mode does not match.") - } -} - -func TestAuditFile_fileMode0000(t *testing.T) { - f, err := ioutil.TempFile("", "test") - if err != nil { - t.Fatalf("Failure to create test file. The error is %v", err) - } - defer os.Remove(f.Name()) - - err = os.Chmod(f.Name(), 0o777) - if err != nil { - t.Fatalf("Failure to chmod temp file for testing. The error is %v", err) - } - - err = f.Close() - if err != nil { - t.Fatalf("Failure to close temp file for test. The error is %v", err) - } - - config := map[string]string{ - "path": f.Name(), - "mode": "0000", - } - - _, err = Factory(context.Background(), &audit.BackendConfig{ - Config: config, - SaltConfig: &salt.Config{}, - SaltView: &logical.InmemStorage{}, - }) - if err != nil { - t.Fatal(err) - } - - info, err := os.Stat(f.Name()) - if err != nil { - t.Fatalf("cannot retrieve file mode from `Stat`. The error is %v", err) - } - if info.Mode() != os.FileMode(0o777) { - t.Fatalf("File mode does not match.") - } -} - -func BenchmarkAuditFile_request(b *testing.B) { - config := map[string]string{ - "path": "/dev/null", - } - sink, err := Factory(context.Background(), &audit.BackendConfig{ - Config: config, - SaltConfig: &salt.Config{}, - SaltView: &logical.InmemStorage{}, - }) - if err != nil { - b.Fatal(err) - } - - in := &logical.LogInput{ - Auth: &logical.Auth{ - ClientToken: "foo", - Accessor: "bar", - EntityID: "foobarentity", - DisplayName: "testtoken", - NoDefaultPolicy: true, - Policies: []string{"root"}, - TokenType: logical.TokenTypeService, - }, - Request: &logical.Request{ - Operation: logical.UpdateOperation, - Path: "/foo", - Connection: &logical.Connection{ - RemoteAddr: "127.0.0.1", - }, - WrapInfo: &logical.RequestWrapInfo{ - TTL: 60 * time.Second, - }, - Headers: map[string][]string{ - "foo": {"bar"}, - }, - }, - } - - ctx := namespace.RootContext(nil) - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - if err := sink.LogRequest(ctx, in); err != nil { - panic(err) - } - } - }) -} diff --git a/builtin/audit/socket/backend.go b/builtin/audit/socket/backend.go deleted file mode 100644 index 4c649e0e9c74..000000000000 --- a/builtin/audit/socket/backend.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package socket - -import ( - "bytes" - "context" - "fmt" - "net" - "strconv" - "sync" - "time" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-secure-stdlib/parseutil" - "github.com/hashicorp/vault/audit" - "github.com/hashicorp/vault/sdk/helper/salt" - "github.com/hashicorp/vault/sdk/logical" -) - -func Factory(ctx context.Context, conf *audit.BackendConfig) (audit.Backend, error) { - if conf.SaltConfig == nil { - return nil, fmt.Errorf("nil salt config") - } - if conf.SaltView == nil { - return nil, fmt.Errorf("nil salt view") - } - - address, ok := conf.Config["address"] - if !ok { - return nil, fmt.Errorf("address is required") - } - - socketType, ok := conf.Config["socket_type"] - if !ok { - socketType = "tcp" - } - - writeDeadline, ok := conf.Config["write_timeout"] - if !ok { - writeDeadline = "2s" - } - writeDuration, err := parseutil.ParseDurationSecond(writeDeadline) - if err != nil { - return nil, err - } - - format, ok := conf.Config["format"] - if !ok { - format = "json" - } - switch format { - case "json", "jsonx": - default: - return nil, fmt.Errorf("unknown format type %q", format) - } - - // Check if hashing of accessor is disabled - hmacAccessor := true - if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok { - value, err := strconv.ParseBool(hmacAccessorRaw) - if err != nil { - return nil, err - } - hmacAccessor = value - } - - // Check if raw logging is enabled - logRaw := false - if raw, ok := conf.Config["log_raw"]; ok { - b, err := strconv.ParseBool(raw) - if err != nil { - return nil, err - } - logRaw = b - } - - elideListResponses := false - if elideListResponsesRaw, ok := conf.Config["elide_list_responses"]; ok { - value, err := strconv.ParseBool(elideListResponsesRaw) - if err != nil { - return nil, err - } - elideListResponses = value - } - - b := &Backend{ - saltConfig: conf.SaltConfig, - saltView: conf.SaltView, - formatConfig: audit.FormatterConfig{ - Raw: logRaw, - HMACAccessor: hmacAccessor, - ElideListResponses: elideListResponses, - }, - - writeDuration: writeDuration, - address: address, - socketType: socketType, - } - - switch format { - case "json": - b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{ - Prefix: conf.Config["prefix"], - SaltFunc: b.Salt, - } - case "jsonx": - b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{ - Prefix: conf.Config["prefix"], - SaltFunc: b.Salt, - } - } - - return b, nil -} - -// Backend is the audit backend for the socket audit transport. -type Backend struct { - connection net.Conn - - formatter audit.AuditFormatter - formatConfig audit.FormatterConfig - - writeDuration time.Duration - address string - socketType string - - sync.Mutex - - saltMutex sync.RWMutex - salt *salt.Salt - saltConfig *salt.Config - saltView logical.Storage -} - -var _ audit.Backend = (*Backend)(nil) - -func (b *Backend) GetHash(ctx context.Context, data string) (string, error) { - salt, err := b.Salt(ctx) - if err != nil { - return "", err - } - return audit.HashString(salt, data), nil -} - -func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error { - var buf bytes.Buffer - if err := b.formatter.FormatRequest(ctx, &buf, b.formatConfig, in); err != nil { - return err - } - - b.Lock() - defer b.Unlock() - - err := b.write(ctx, buf.Bytes()) - if err != nil { - rErr := b.reconnect(ctx) - if rErr != nil { - err = multierror.Append(err, rErr) - } else { - // Try once more after reconnecting - err = b.write(ctx, buf.Bytes()) - } - } - - return err -} - -func (b *Backend) LogResponse(ctx context.Context, in *logical.LogInput) error { - var buf bytes.Buffer - if err := b.formatter.FormatResponse(ctx, &buf, b.formatConfig, in); err != nil { - return err - } - - b.Lock() - defer b.Unlock() - - err := b.write(ctx, buf.Bytes()) - if err != nil { - rErr := b.reconnect(ctx) - if rErr != nil { - err = multierror.Append(err, rErr) - } else { - // Try once more after reconnecting - err = b.write(ctx, buf.Bytes()) - } - } - - return err -} - -func (b *Backend) LogTestMessage(ctx context.Context, in *logical.LogInput, config map[string]string) error { - var buf bytes.Buffer - temporaryFormatter := audit.NewTemporaryFormatter(config["format"], config["prefix"]) - if err := temporaryFormatter.FormatRequest(ctx, &buf, b.formatConfig, in); err != nil { - return err - } - - b.Lock() - defer b.Unlock() - - err := b.write(ctx, buf.Bytes()) - if err != nil { - rErr := b.reconnect(ctx) - if rErr != nil { - err = multierror.Append(err, rErr) - } else { - // Try once more after reconnecting - err = b.write(ctx, buf.Bytes()) - } - } - - return err -} - -func (b *Backend) write(ctx context.Context, buf []byte) error { - if b.connection == nil { - if err := b.reconnect(ctx); err != nil { - return err - } - } - - err := b.connection.SetWriteDeadline(time.Now().Add(b.writeDuration)) - if err != nil { - return err - } - - _, err = b.connection.Write(buf) - if err != nil { - return err - } - - return nil -} - -func (b *Backend) reconnect(ctx context.Context) error { - if b.connection != nil { - b.connection.Close() - b.connection = nil - } - - timeoutContext, cancel := context.WithTimeout(ctx, b.writeDuration) - defer cancel() - - dialer := net.Dialer{} - conn, err := dialer.DialContext(timeoutContext, b.socketType, b.address) - if err != nil { - return err - } - - b.connection = conn - - return nil -} - -func (b *Backend) Reload(ctx context.Context) error { - b.Lock() - defer b.Unlock() - - err := b.reconnect(ctx) - - return err -} - -func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) { - b.saltMutex.RLock() - if b.salt != nil { - defer b.saltMutex.RUnlock() - return b.salt, nil - } - b.saltMutex.RUnlock() - b.saltMutex.Lock() - defer b.saltMutex.Unlock() - if b.salt != nil { - return b.salt, nil - } - salt, err := salt.NewSalt(ctx, b.saltView, b.saltConfig) - if err != nil { - return nil, err - } - b.salt = salt - return salt, nil -} - -func (b *Backend) Invalidate(_ context.Context) { - b.saltMutex.Lock() - defer b.saltMutex.Unlock() - b.salt = nil -} diff --git a/builtin/audit/syslog/backend.go b/builtin/audit/syslog/backend.go deleted file mode 100644 index 2da92fe2e40e..000000000000 --- a/builtin/audit/syslog/backend.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package syslog - -import ( - "bytes" - "context" - "fmt" - "strconv" - "sync" - - gsyslog "github.com/hashicorp/go-syslog" - "github.com/hashicorp/vault/audit" - "github.com/hashicorp/vault/sdk/helper/salt" - "github.com/hashicorp/vault/sdk/logical" -) - -func Factory(ctx context.Context, conf *audit.BackendConfig) (audit.Backend, error) { - if conf.SaltConfig == nil { - return nil, fmt.Errorf("nil salt config") - } - if conf.SaltView == nil { - return nil, fmt.Errorf("nil salt view") - } - - // Get facility or default to AUTH - facility, ok := conf.Config["facility"] - if !ok { - facility = "AUTH" - } - - // Get tag or default to 'vault' - tag, ok := conf.Config["tag"] - if !ok { - tag = "vault" - } - - format, ok := conf.Config["format"] - if !ok { - format = "json" - } - switch format { - case "json", "jsonx": - default: - return nil, fmt.Errorf("unknown format type %q", format) - } - - // Check if hashing of accessor is disabled - hmacAccessor := true - if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok { - value, err := strconv.ParseBool(hmacAccessorRaw) - if err != nil { - return nil, err - } - hmacAccessor = value - } - - // Check if raw logging is enabled - logRaw := false - if raw, ok := conf.Config["log_raw"]; ok { - b, err := strconv.ParseBool(raw) - if err != nil { - return nil, err - } - logRaw = b - } - - elideListResponses := false - if elideListResponsesRaw, ok := conf.Config["elide_list_responses"]; ok { - value, err := strconv.ParseBool(elideListResponsesRaw) - if err != nil { - return nil, err - } - elideListResponses = value - } - - // Get the logger - logger, err := gsyslog.NewLogger(gsyslog.LOG_INFO, facility, tag) - if err != nil { - return nil, err - } - - b := &Backend{ - logger: logger, - saltConfig: conf.SaltConfig, - saltView: conf.SaltView, - formatConfig: audit.FormatterConfig{ - Raw: logRaw, - HMACAccessor: hmacAccessor, - ElideListResponses: elideListResponses, - }, - } - - switch format { - case "json": - b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{ - Prefix: conf.Config["prefix"], - SaltFunc: b.Salt, - } - case "jsonx": - b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{ - Prefix: conf.Config["prefix"], - SaltFunc: b.Salt, - } - } - - return b, nil -} - -// Backend is the audit backend for the syslog-based audit store. -type Backend struct { - logger gsyslog.Syslogger - - formatter audit.AuditFormatter - formatConfig audit.FormatterConfig - - saltMutex sync.RWMutex - salt *salt.Salt - saltConfig *salt.Config - saltView logical.Storage -} - -var _ audit.Backend = (*Backend)(nil) - -func (b *Backend) GetHash(ctx context.Context, data string) (string, error) { - salt, err := b.Salt(ctx) - if err != nil { - return "", err - } - return audit.HashString(salt, data), nil -} - -func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error { - var buf bytes.Buffer - if err := b.formatter.FormatRequest(ctx, &buf, b.formatConfig, in); err != nil { - return err - } - - // Write out to syslog - _, err := b.logger.Write(buf.Bytes()) - return err -} - -func (b *Backend) LogResponse(ctx context.Context, in *logical.LogInput) error { - var buf bytes.Buffer - if err := b.formatter.FormatResponse(ctx, &buf, b.formatConfig, in); err != nil { - return err - } - - // Write out to syslog - _, err := b.logger.Write(buf.Bytes()) - return err -} - -func (b *Backend) LogTestMessage(ctx context.Context, in *logical.LogInput, config map[string]string) error { - var buf bytes.Buffer - temporaryFormatter := audit.NewTemporaryFormatter(config["format"], config["prefix"]) - if err := temporaryFormatter.FormatRequest(ctx, &buf, b.formatConfig, in); err != nil { - return err - } - - // Send to syslog - _, err := b.logger.Write(buf.Bytes()) - return err -} - -func (b *Backend) Reload(_ context.Context) error { - return nil -} - -func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) { - b.saltMutex.RLock() - if b.salt != nil { - defer b.saltMutex.RUnlock() - return b.salt, nil - } - b.saltMutex.RUnlock() - b.saltMutex.Lock() - defer b.saltMutex.Unlock() - if b.salt != nil { - return b.salt, nil - } - salt, err := salt.NewSalt(ctx, b.saltView, b.saltConfig) - if err != nil { - return nil, err - } - b.salt = salt - return salt, nil -} - -func (b *Backend) Invalidate(_ context.Context) { - b.saltMutex.Lock() - defer b.saltMutex.Unlock() - b.salt = nil -} diff --git a/builtin/credential/approle/backend.go b/builtin/credential/approle/backend.go index 60d463f085a3..4afdd596078c 100644 --- a/builtin/credential/approle/backend.go +++ b/builtin/credential/approle/backend.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package approle @@ -15,6 +15,7 @@ import ( ) const ( + operationPrefixAppRole = "app-role" secretIDPrefix = "secret_id/" secretIDLocalPrefix = "secret_id_local/" secretIDAccessorPrefix = "accessor/" diff --git a/builtin/credential/approle/backend_test.go b/builtin/credential/approle/backend_test.go index 683249e316dc..95912952d82b 100644 --- a/builtin/credential/approle/backend_test.go +++ b/builtin/credential/approle/backend_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package approle @@ -8,9 +8,8 @@ import ( "strings" "testing" - "github.com/stretchr/testify/require" - "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" ) func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) { diff --git a/builtin/credential/approle/cmd/approle/main.go b/builtin/credential/approle/cmd/approle/main.go index 9000ea95810a..d28cea383beb 100644 --- a/builtin/credential/approle/cmd/approle/main.go +++ b/builtin/credential/approle/cmd/approle/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/builtin/credential/approle/path_login.go b/builtin/credential/approle/path_login.go index 33df709747cd..ed1bc2ff0c36 100644 --- a/builtin/credential/approle/path_login.go +++ b/builtin/credential/approle/path_login.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package approle @@ -19,6 +19,10 @@ import ( func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: "login$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationVerb: "login", + }, Fields: map[string]*framework.FieldSchema{ "role_id": { Type: framework.TypeString, @@ -95,7 +99,7 @@ func (b *backend) pathLoginResolveRole(ctx context.Context, req *logical.Request return nil, err } if roleIDIndex == nil { - return logical.ErrorResponse("invalid role ID"), nil + return logical.ErrorResponse("invalid role or secret ID"), nil } roleName := roleIDIndex.Name @@ -109,7 +113,7 @@ func (b *backend) pathLoginResolveRole(ctx context.Context, req *logical.Request return nil, err } if role == nil { - return logical.ErrorResponse("invalid role ID"), nil + return logical.ErrorResponse("invalid role or secret ID"), nil } return logical.ResolveRoleResponse(roleName) @@ -121,7 +125,7 @@ func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, dat // RoleID must be supplied during every login roleID := strings.TrimSpace(data.Get("role_id").(string)) if roleID == "" { - return logical.ErrorResponse("missing role_id"), nil + return nil, logical.ErrInvalidCredentials } // Look for the storage entry that maps the roleID to role @@ -130,7 +134,7 @@ func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, dat return nil, err } if roleIDIndex == nil { - return logical.ErrorResponse("invalid role ID"), nil + return logical.ErrorResponse("invalid role or secret ID"), nil } roleName := roleIDIndex.Name @@ -144,7 +148,7 @@ func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, dat return nil, err } if role == nil { - return logical.ErrorResponse("invalid role ID"), nil + return logical.ErrorResponse("invalid role or secret ID"), nil } metadata := make(map[string]string) @@ -180,7 +184,7 @@ func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, dat return nil, err } if entry == nil { - return logical.ErrorResponse("invalid secret id"), logical.ErrInvalidCredentials + return logical.ErrorResponse("invalid role or secret ID"), logical.ErrInvalidCredentials } // If a secret ID entry does not have a corresponding accessor @@ -200,7 +204,7 @@ func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, dat return nil, err } if entry == nil { - return logical.ErrorResponse("invalid secret id"), nil + return logical.ErrorResponse("invalid role or secret ID"), nil } accessorEntry, err := b.secretIDAccessorEntry(ctx, req.Storage, entry.SecretIDAccessor, role.SecretIDPrefix) @@ -213,7 +217,7 @@ func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, dat return nil, fmt.Errorf("error deleting secret ID %q from storage: %w", secretIDHMAC, err) } } - return logical.ErrorResponse("invalid secret id"), nil + return logical.ErrorResponse("invalid role or secret ID"), nil } switch { diff --git a/builtin/credential/approle/path_login_test.go b/builtin/credential/approle/path_login_test.go index 5a09c6c4e3f4..7dd8c7f0ff14 100644 --- a/builtin/credential/approle/path_login_test.go +++ b/builtin/credential/approle/path_login_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package approle @@ -354,7 +354,7 @@ func TestAppRole_RoleDoesNotExist(t *testing.T) { t.Fatal("Error not part of response.") } - if !strings.Contains(errString, "invalid role ID") { + if !strings.Contains(errString, "invalid role or secret ID") { t.Fatalf("Error was not due to invalid role ID. Error: %s", errString) } } diff --git a/builtin/credential/approle/path_role.go b/builtin/credential/approle/path_role.go index a5dcfe659de7..eaffcdaa453f 100644 --- a/builtin/credential/approle/path_role.go +++ b/builtin/credential/approle/path_role.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package approle @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-secure-stdlib/strutil" - uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/parseip" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/cidrutil" @@ -124,6 +124,10 @@ func rolePaths(b *backend) []*framework.Path { p := &framework.Path{ Pattern: "role/" + framework.GenericNameRegex("role_name"), + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "role", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -214,7 +218,7 @@ can only be set during role creation and once set, it can't be reset later.`, Description: "Number of times a secret ID can access the role, after which the secret ID will expire.", }, "secret_id_ttl": { - Type: framework.TypeDurationSecond, + Type: framework.TypeInt64, Required: true, Description: "Duration in seconds after which the issued secret ID expires.", }, @@ -229,12 +233,12 @@ can only be set during role creation and once set, it can't be reset later.`, Description: `Comma separated string or JSON list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.`, }, "token_explicit_max_ttl": { - Type: framework.TypeDurationSecond, + Type: framework.TypeInt64, Required: true, Description: "If set, tokens created via this role carry an explicit maximum TTL. During renewal, the current maximum TTL values of the role and the mount are not checked for changes, and any updates to these values will have no effect on the token being renewed.", }, "token_max_ttl": { - Type: framework.TypeDurationSecond, + Type: framework.TypeInt64, Required: true, Description: "The maximum lifetime of the generated token", }, @@ -244,7 +248,7 @@ can only be set during role creation and once set, it can't be reset later.`, Description: "If true, the 'default' policy will not automatically be added to generated tokens", }, "token_period": { - Type: framework.TypeDurationSecond, + Type: framework.TypeInt64, Required: true, Description: "If set, tokens created via this role will have no max lifetime; instead, their renewal period will be fixed to this value.", }, @@ -260,7 +264,7 @@ can only be set during role creation and once set, it can't be reset later.`, Description: "The type of token to generate, service or batch", }, "token_ttl": { - Type: framework.TypeDurationSecond, + Type: framework.TypeInt64, Required: true, Description: "The initial ttl of the token to generate", }, @@ -270,7 +274,7 @@ can only be set during role creation and once set, it can't be reset later.`, Description: "The maximum number of times a token may be used, a value of zero means unlimited", }, "period": { - Type: framework.TypeDurationSecond, + Type: framework.TypeInt64, Required: false, Description: tokenutil.DeprecationText("token_period"), Deprecated: true, @@ -300,20 +304,13 @@ can only be set during role creation and once set, it can't be reset later.`, p, { Pattern: "role/?", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "roles", + }, Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathRoleList, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "keys": { - Type: framework.TypeStringSlice, - Required: true, - }, - }, - }}, - }, }, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-list"][0]), @@ -321,6 +318,10 @@ can only be set during role creation and once set, it can't be reset later.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/local-secret-ids$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "local-secret-ids", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -349,6 +350,10 @@ can only be set during role creation and once set, it can't be reset later.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/policies$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "policies", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -400,6 +405,10 @@ can only be set during role creation and once set, it can't be reset later.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/bound-cidr-list$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "bound-cidr-list", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -442,6 +451,10 @@ of CIDR blocks. If set, specifies the blocks of IP addresses which can perform t }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-bound-cidrs$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "secret-id-bound-cidrs", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -483,6 +496,10 @@ IP addresses which can perform the login operation.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-bound-cidrs$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "token-bound-cidrs", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -523,6 +540,10 @@ IP addresses which can perform the login operation.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/bind-secret-id$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "bind-secret-id", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -564,6 +585,10 @@ IP addresses which can perform the login operation.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-num-uses$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "secret-id-num-uses", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -604,6 +629,10 @@ IP addresses which can perform the login operation.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-ttl$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "secret-id-ttl", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -627,7 +656,7 @@ to 0, meaning no expiration.`, Description: "OK", Fields: map[string]*framework.FieldSchema{ "secret_id_ttl": { - Type: framework.TypeDurationSecond, + Type: framework.TypeInt64, Required: true, Description: "Duration in seconds after which the issued secret ID should expire. Defaults to 0, meaning no expiration.", }, @@ -645,6 +674,10 @@ to 0, meaning no expiration.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/period$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "period", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -672,13 +705,13 @@ to 0, meaning no expiration.`, Description: "OK", Fields: map[string]*framework.FieldSchema{ "period": { - Type: framework.TypeDurationSecond, + Type: framework.TypeInt64, Required: false, Description: tokenutil.DeprecationText("token_period"), Deprecated: true, }, "token_period": { - Type: framework.TypeDurationSecond, + Type: framework.TypeInt64, Required: true, Description: defTokenFields["token_period"].Description, }, @@ -696,6 +729,10 @@ to 0, meaning no expiration.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-num-uses$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "token-num-uses", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -736,6 +773,10 @@ to 0, meaning no expiration.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-ttl$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "token-ttl", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -758,7 +799,7 @@ to 0, meaning no expiration.`, Description: "OK", Fields: map[string]*framework.FieldSchema{ "token_ttl": { - Type: framework.TypeDurationSecond, + Type: framework.TypeInt64, Required: true, Description: defTokenFields["token_ttl"].Description, }, @@ -776,6 +817,10 @@ to 0, meaning no expiration.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-max-ttl$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "token-max-ttl", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -798,7 +843,7 @@ to 0, meaning no expiration.`, Description: "OK", Fields: map[string]*framework.FieldSchema{ "token_max_ttl": { - Type: framework.TypeDurationSecond, + Type: framework.TypeInt64, Required: true, Description: defTokenFields["token_max_ttl"].Description, }, @@ -816,6 +861,10 @@ to 0, meaning no expiration.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/role-id$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "role-id", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -852,6 +901,10 @@ to 0, meaning no expiration.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "secret-id", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -902,7 +955,7 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's Description: "Accessor of the secret ID", }, "secret_id_ttl": { - Type: framework.TypeDurationSecond, + Type: framework.TypeInt64, Required: true, Description: "Duration in seconds after which the issued secret ID expires.", }, @@ -917,16 +970,8 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's }, logical.ListOperation: &framework.PathOperation{ Callback: b.pathRoleSecretIDList, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "keys": { - Required: true, - Type: framework.TypeStringSlice, - }, - }, - }}, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "secret-ids", }, }, }, @@ -935,6 +980,11 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id/lookup/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "secret-id", + OperationVerb: "look-up", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -958,7 +1008,7 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's Description: "Accessor of the secret ID", }, "secret_id_ttl": { - Type: framework.TypeDurationSecond, + Type: framework.TypeInt64, Required: true, Description: "Duration in seconds after which the issued secret ID expires.", }, @@ -1003,6 +1053,10 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id/destroy/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationVerb: "destroy", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -1011,16 +1065,23 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's "secret_id": { Type: framework.TypeString, Description: "SecretID attached to the role.", + Query: true, }, }, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathRoleSecretIDDestroyUpdateDelete, Responses: responseNoContent, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "secret-id", + }, }, logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathRoleSecretIDDestroyUpdateDelete, Responses: responseNoContent, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "secret-id2", + }, }, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-destroy"][0]), @@ -1028,6 +1089,11 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-accessor/lookup/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "secret-id-by-accessor", + OperationVerb: "look-up", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -1051,7 +1117,7 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's Description: "Accessor of the secret ID", }, "secret_id_ttl": { - Type: framework.TypeDurationSecond, + Type: framework.TypeInt64, Required: true, Description: "Duration in seconds after which the issued secret ID expires.", }, @@ -1096,6 +1162,10 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-accessor/destroy/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationVerb: "destroy", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -1104,16 +1174,23 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's "secret_id_accessor": { Type: framework.TypeString, Description: "Accessor of the SecretID", + Query: true, }, }, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathRoleSecretIDAccessorDestroyUpdateDelete, Responses: responseNoContent, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "secret-id-by-accessor", + }, }, logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathRoleSecretIDAccessorDestroyUpdateDelete, Responses: responseNoContent, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "secret-id-by-accessor2", + }, }, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-accessor"][0]), @@ -1121,6 +1198,10 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/custom-secret-id$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "custom-secret-id", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -1176,7 +1257,7 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's Description: "Accessor of the secret ID", }, "secret_id_ttl": { - Type: framework.TypeDurationSecond, + Type: framework.TypeInt64, Required: true, Description: "Duration in seconds after which the issued secret ID expires.", }, diff --git a/builtin/credential/approle/path_role_test.go b/builtin/credential/approle/path_role_test.go index a5ea9d1d5159..6b35afc725e6 100644 --- a/builtin/credential/approle/path_role_test.go +++ b/builtin/credential/approle/path_role_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package approle @@ -923,7 +923,7 @@ func TestAppRoleSecretIDLookup(t *testing.T) { expected := &logical.Response{ Data: map[string]interface{}{ "http_content_type": "application/json", - "http_raw_body": `{"request_id":"","lease_id":"","renewable":false,"lease_duration":0,"data":{"error":"failed to find accessor entry for secret_id_accessor: \"invalid\""},"wrap_info":null,"warnings":null,"auth":null}`, + "http_raw_body": `{"request_id":"","lease_id":"","renewable":false,"lease_duration":0,"data":{"error":"failed to find accessor entry for secret_id_accessor: \"invalid\""},"wrap_info":null,"warnings":null,"auth":null,"mount_type":""}`, "http_status_code": 404, }, } diff --git a/builtin/credential/approle/path_tidy_user_id.go b/builtin/credential/approle/path_tidy_user_id.go index f677f017561e..0367a0940f29 100644 --- a/builtin/credential/approle/path_tidy_user_id.go +++ b/builtin/credential/approle/path_tidy_user_id.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package approle @@ -20,6 +20,12 @@ func pathTidySecretID(b *backend) *framework.Path { return &framework.Path{ Pattern: "tidy/secret-id$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "secret-id", + OperationVerb: "tidy", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathTidySecretIDUpdate, diff --git a/builtin/credential/approle/path_tidy_user_id_test.go b/builtin/credential/approle/path_tidy_user_id_test.go index c03686e89cbf..4b932cd11f5e 100644 --- a/builtin/credential/approle/path_tidy_user_id_test.go +++ b/builtin/credential/approle/path_tidy_user_id_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package approle diff --git a/builtin/credential/approle/validation.go b/builtin/credential/approle/validation.go index 70f2194aa678..b99af755d818 100644 --- a/builtin/credential/approle/validation.go +++ b/builtin/credential/approle/validation.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package approle diff --git a/builtin/credential/approle/validation_test.go b/builtin/credential/approle/validation_test.go index 7f7366b67937..d3386aa35615 100644 --- a/builtin/credential/approle/validation_test.go +++ b/builtin/credential/approle/validation_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package approle diff --git a/builtin/credential/aws/backend.go b/builtin/credential/aws/backend.go index 5e94db7b915d..df5f7ec23325 100644 --- a/builtin/credential/aws/backend.go +++ b/builtin/credential/aws/backend.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth @@ -20,7 +20,11 @@ import ( cache "github.com/patrickmn/go-cache" ) -const amzHeaderPrefix = "X-Amz-" +const ( + amzHeaderPrefix = "X-Amz-" + amzSignedHeaders = "X-Amz-SignedHeaders" + operationPrefixAWS = "aws" +) var defaultAllowedSTSRequestHeaders = []string{ "X-Amz-Algorithm", @@ -29,7 +33,8 @@ var defaultAllowedSTSRequestHeaders = []string{ "X-Amz-Date", "X-Amz-Security-Token", "X-Amz-Signature", - "X-Amz-SignedHeaders", + amzSignedHeaders, + "X-Amz-User-Agent", } func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { @@ -126,7 +131,9 @@ func Backend(_ *logical.BackendConfig) (*backend, error) { deprecatedTerms: strings.NewReplacer( "accesslist", "whitelist", + "access-list", "whitelist", "denylist", "blacklist", + "deny-list", "blacklist", ), } @@ -312,7 +319,7 @@ func (b *backend) resolveArnToRealUniqueId(ctx context.Context, s logical.Storag switch entity.Type { case "user": - userInfo, err := iamClient.GetUser(&iam.GetUserInput{UserName: &entity.FriendlyName}) + userInfo, err := iamClient.GetUserWithContext(ctx, &iam.GetUserInput{UserName: &entity.FriendlyName}) if err != nil { return "", awsutil.AppendAWSError(err) } @@ -321,7 +328,7 @@ func (b *backend) resolveArnToRealUniqueId(ctx context.Context, s logical.Storag } return *userInfo.User.UserId, nil case "role": - roleInfo, err := iamClient.GetRole(&iam.GetRoleInput{RoleName: &entity.FriendlyName}) + roleInfo, err := iamClient.GetRoleWithContext(ctx, &iam.GetRoleInput{RoleName: &entity.FriendlyName}) if err != nil { return "", awsutil.AppendAWSError(err) } @@ -330,7 +337,7 @@ func (b *backend) resolveArnToRealUniqueId(ctx context.Context, s logical.Storag } return *roleInfo.Role.RoleId, nil case "instance-profile": - profileInfo, err := iamClient.GetInstanceProfile(&iam.GetInstanceProfileInput{InstanceProfileName: &entity.FriendlyName}) + profileInfo, err := iamClient.GetInstanceProfileWithContext(ctx, &iam.GetInstanceProfileInput{InstanceProfileName: &entity.FriendlyName}) if err != nil { return "", awsutil.AppendAWSError(err) } @@ -343,13 +350,33 @@ func (b *backend) resolveArnToRealUniqueId(ctx context.Context, s logical.Storag } } -// genDeprecatedPath will return a deprecated version of a framework.Path. The will include -// using deprecated terms in the path pattern, and marking the path as deprecated. +// genDeprecatedPath will return a deprecated version of a framework.Path. The +// path pattern and display attributes (if any) will contain deprecated terms, +// and the path will be marked as deprecated. func (b *backend) genDeprecatedPath(path *framework.Path) *framework.Path { pathDeprecated := *path pathDeprecated.Pattern = b.deprecatedTerms.Replace(path.Pattern) pathDeprecated.Deprecated = true + if path.DisplayAttrs != nil { + deprecatedDisplayAttrs := *path.DisplayAttrs + deprecatedDisplayAttrs.OperationPrefix = b.deprecatedTerms.Replace(path.DisplayAttrs.OperationPrefix) + deprecatedDisplayAttrs.OperationVerb = b.deprecatedTerms.Replace(path.DisplayAttrs.OperationVerb) + deprecatedDisplayAttrs.OperationSuffix = b.deprecatedTerms.Replace(path.DisplayAttrs.OperationSuffix) + pathDeprecated.DisplayAttrs = &deprecatedDisplayAttrs + } + + for i, op := range path.Operations { + if op.Properties().DisplayAttrs != nil { + deprecatedDisplayAttrs := *op.Properties().DisplayAttrs + deprecatedDisplayAttrs.OperationPrefix = b.deprecatedTerms.Replace(op.Properties().DisplayAttrs.OperationPrefix) + deprecatedDisplayAttrs.OperationVerb = b.deprecatedTerms.Replace(op.Properties().DisplayAttrs.OperationVerb) + deprecatedDisplayAttrs.OperationSuffix = b.deprecatedTerms.Replace(op.Properties().DisplayAttrs.OperationSuffix) + deprecatedProperties := pathDeprecated.Operations[i].(*framework.PathOperation) + deprecatedProperties.DisplayAttrs = &deprecatedDisplayAttrs + } + } + return &pathDeprecated } diff --git a/builtin/credential/aws/backend_e2e_test.go b/builtin/credential/aws/backend_e2e_test.go index e8939b9d67af..ea8076b8761c 100644 --- a/builtin/credential/aws/backend_e2e_test.go +++ b/builtin/credential/aws/backend_e2e_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth @@ -8,10 +8,8 @@ import ( "testing" "time" - hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" ) @@ -104,9 +102,7 @@ func TestBackend_E2E_Initialize(t *testing.T) { func setupAwsTestCluster(t *testing.T, _ context.Context) *vault.TestCluster { // create a cluster with the aws auth backend built-in - logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ - Logger: logger, CredentialBackends: map[string]logical.Factory{ "aws": Factory, }, diff --git a/builtin/credential/aws/backend_test.go b/builtin/credential/aws/backend_test.go index dea280c00262..5770b2c5ed36 100644 --- a/builtin/credential/aws/backend_test.go +++ b/builtin/credential/aws/backend_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth @@ -8,7 +8,7 @@ import ( "encoding/base64" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "os" "strings" @@ -1046,6 +1046,7 @@ This is an acceptance test. export TEST_AWS_EC2_IAM_ROLE_ARN=$(aws iam get-role --role-name $(curl -q http://169.254.169.254/latest/meta-data/iam/security-credentials/ -S -s) --query Role.Arn --output text) export TEST_AWS_EC2_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) + If the test is not being run on an EC2 instance that has access to credentials using EC2RoleProvider, on top of the above vars, following needs to be set: @@ -1407,6 +1408,11 @@ func TestBackend_pathStsConfig(t *testing.T) { "sts_role": "arn:aws:iam:account1:role/myRole", } + data2 := map[string]interface{}{ + "sts_role": "arn:aws:iam:account2:role/myRole2", + "external_id": "fake_id", + } + stsReq.Data = data // test create operation resp, err := b.HandleRequest(context.Background(), stsReq) @@ -1440,13 +1446,28 @@ func TestBackend_pathStsConfig(t *testing.T) { stsReq.Operation = logical.CreateOperation stsReq.Path = "config/sts/account2" - stsReq.Data = data - // create another entry to test the list operation + stsReq.Data = data2 + // create another entry with alternate data to test ExternalID and LIST resp, err = b.HandleRequest(context.Background(), stsReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatal(err) } + // test second read + stsReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), stsReq) + if err != nil { + t.Fatal(err) + } + expectedStsRole = "arn:aws:iam:account2:role/myRole2" + expectedExternalID := "fake_id" + if resp.Data["sts_role"].(string) != expectedStsRole { + t.Fatalf("bad: expected:%s\n got:%s\n", expectedStsRole, resp.Data["sts_role"].(string)) + } + if resp.Data["external_id"].(string) != expectedExternalID { + t.Fatalf("bad: expected:%s\n got:%s\n", expectedExternalID, resp.Data["external_id"].(string)) + } + stsReq.Operation = logical.ListOperation stsReq.Path = "config/sts" // test list operation @@ -1495,7 +1516,7 @@ func buildCallerIdentityLoginData(request *http.Request, roleName string) (map[s if err != nil { return nil, err } - requestBody, err := ioutil.ReadAll(request.Body) + requestBody, err := io.ReadAll(request.Body) if err != nil { return nil, err } @@ -1504,7 +1525,7 @@ func buildCallerIdentityLoginData(request *http.Request, roleName string) (map[s "iam_request_url": base64.StdEncoding.EncodeToString([]byte(request.URL.String())), "iam_request_headers": base64.StdEncoding.EncodeToString(headersJson), "iam_request_body": base64.StdEncoding.EncodeToString(requestBody), - "request_role": roleName, + "role": roleName, }, nil } diff --git a/builtin/credential/aws/certificates.go b/builtin/credential/aws/certificates.go index 4b97a952b6fa..a56b757521f5 100644 --- a/builtin/credential/aws/certificates.go +++ b/builtin/credential/aws/certificates.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth diff --git a/builtin/credential/aws/cli.go b/builtin/credential/aws/cli.go index a1695574f3b6..2cc228ac54b0 100644 --- a/builtin/credential/aws/cli.go +++ b/builtin/credential/aws/cli.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth diff --git a/builtin/credential/aws/client.go b/builtin/credential/aws/client.go index 079eabbe817f..d06900b9f123 100644 --- a/builtin/credential/aws/client.go +++ b/builtin/credential/aws/client.go @@ -1,20 +1,26 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth import ( "context" "fmt" + "strconv" + "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/sts" cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -58,6 +64,26 @@ func (b *backend) getRawClientConfig(ctx context.Context, s logical.Storage, reg credsConfig.AccessKey = config.AccessKey credsConfig.SecretKey = config.SecretKey maxRetries = config.MaxRetries + + if config.IdentityTokenAudience != "" { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get namespace from context: %w", err) + } + + fetcher := &PluginIdentityTokenFetcher{ + sys: b.System(), + logger: b.Logger(), + ns: ns, + audience: config.IdentityTokenAudience, + ttl: config.IdentityTokenTTL, + } + + sessionSuffix := strconv.FormatInt(time.Now().UnixNano(), 10) + credsConfig.RoleSessionName = fmt.Sprintf("vault-aws-auth-%s", sessionSuffix) + credsConfig.WebIdentityTokenFetcher = fetcher + credsConfig.RoleARN = config.RoleARN + } } credsConfig.HTTPClient = cleanhttp.DefaultClient() @@ -84,7 +110,7 @@ func (b *backend) getRawClientConfig(ctx context.Context, s logical.Storage, reg // It uses getRawClientConfig to obtain config for the runtime environment, and if // stsRole is a non-empty string, it will use AssumeRole to obtain a set of assumed // credentials. The credentials will expire after 15 minutes but will auto-refresh. -func (b *backend) getClientConfig(ctx context.Context, s logical.Storage, region, stsRole, accountID, clientType string) (*aws.Config, error) { +func (b *backend) getClientConfig(ctx context.Context, s logical.Storage, region, stsRole, externalID, accountID, clientType string) (*aws.Config, error) { config, err := b.getRawClientConfig(ctx, s, region, clientType) if err != nil { return nil, err @@ -105,7 +131,12 @@ func (b *backend) getClientConfig(ctx context.Context, s logical.Storage, region if err != nil { return nil, err } - assumedCredentials := stscreds.NewCredentials(sess, stsRole) + var assumedCredentials *credentials.Credentials + if externalID != "" { + assumedCredentials = stscreds.NewCredentials(sess, stsRole, func(p *stscreds.AssumeRoleProvider) { p.ExternalID = aws.String(externalID) }) + } else { + assumedCredentials = stscreds.NewCredentials(sess, stsRole) + } // Test that we actually have permissions to assume the role if _, err = assumedCredentials.Get(); err != nil { return nil, err @@ -122,7 +153,7 @@ func (b *backend) getClientConfig(ctx context.Context, s logical.Storage, region return nil, fmt.Errorf("could not obtain sts client: %w", err) } inputParams := &sts.GetCallerIdentityInput{} - identity, err := client.GetCallerIdentity(inputParams) + identity, err := client.GetCallerIdentityWithContext(ctx, inputParams) if err != nil { return nil, fmt.Errorf("unable to fetch current caller: %w", err) } @@ -180,22 +211,22 @@ func (b *backend) setCachedUserId(userId, arn string) { } } -func (b *backend) stsRoleForAccount(ctx context.Context, s logical.Storage, accountID string) (string, error) { +func (b *backend) stsRoleForAccount(ctx context.Context, s logical.Storage, accountID string) (string, string, error) { // Check if an STS configuration exists for the AWS account sts, err := b.lockedAwsStsEntry(ctx, s, accountID) if err != nil { - return "", fmt.Errorf("error fetching STS config for account ID %q: %w", accountID, err) + return "", "", fmt.Errorf("error fetching STS config for account ID %q: %w", accountID, err) } // An empty STS role signifies the master account if sts != nil { - return sts.StsRole, nil + return sts.StsRole, sts.ExternalID, nil } - return "", nil + return "", "", nil } // clientEC2 creates a client to interact with AWS EC2 API func (b *backend) clientEC2(ctx context.Context, s logical.Storage, region, accountID string) (*ec2.EC2, error) { - stsRole, err := b.stsRoleForAccount(ctx, s, accountID) + stsRole, stsExternalID, err := b.stsRoleForAccount(ctx, s, accountID) if err != nil { return nil, err } @@ -218,8 +249,7 @@ func (b *backend) clientEC2(ctx context.Context, s logical.Storage, region, acco // Create an AWS config object using a chain of providers var awsConfig *aws.Config - awsConfig, err = b.getClientConfig(ctx, s, region, stsRole, accountID, "ec2") - + awsConfig, err = b.getClientConfig(ctx, s, region, stsRole, stsExternalID, accountID, "ec2") if err != nil { return nil, err } @@ -248,7 +278,7 @@ func (b *backend) clientEC2(ctx context.Context, s logical.Storage, region, acco // clientIAM creates a client to interact with AWS IAM API func (b *backend) clientIAM(ctx context.Context, s logical.Storage, region, accountID string) (*iam.IAM, error) { - stsRole, err := b.stsRoleForAccount(ctx, s, accountID) + stsRole, stsExternalID, err := b.stsRoleForAccount(ctx, s, accountID) if err != nil { return nil, err } @@ -278,8 +308,7 @@ func (b *backend) clientIAM(ctx context.Context, s logical.Storage, region, acco // Create an AWS config object using a chain of providers var awsConfig *aws.Config - awsConfig, err = b.getClientConfig(ctx, s, region, stsRole, accountID, "iam") - + awsConfig, err = b.getClientConfig(ctx, s, region, stsRole, stsExternalID, accountID, "iam") if err != nil { return nil, err } @@ -304,3 +333,36 @@ func (b *backend) clientIAM(ctx context.Context, s logical.Storage, region, acco } return b.IAMClientsMap[region][stsRole], nil } + +// PluginIdentityTokenFetcher fetches plugin identity tokens from Vault. It is provided +// to the AWS SDK client to keep assumed role credentials refreshed through expiration. +// When the client's STS credentials expire, it will use this interface to fetch a new +// plugin identity token and exchange it for new STS credentials. +type PluginIdentityTokenFetcher struct { + sys logical.SystemView + logger hclog.Logger + audience string + ns *namespace.Namespace + ttl time.Duration +} + +var _ stscreds.TokenFetcher = (*PluginIdentityTokenFetcher)(nil) + +func (f PluginIdentityTokenFetcher) FetchToken(ctx aws.Context) ([]byte, error) { + nsCtx := namespace.ContextWithNamespace(ctx, f.ns) + resp, err := f.sys.GenerateIdentityToken(nsCtx, &pluginutil.IdentityTokenRequest{ + Audience: f.audience, + TTL: f.ttl, + }) + if err != nil { + return nil, fmt.Errorf("failed to generate plugin identity token: %w", err) + } + f.logger.Info("fetched new plugin identity token") + + if resp.TTL < f.ttl { + f.logger.Debug("generated plugin identity token has shorter TTL than requested", + "requested", f.ttl, "actual", resp.TTL) + } + + return []byte(resp.Token.Token()), nil +} diff --git a/builtin/credential/aws/cmd/aws/main.go b/builtin/credential/aws/cmd/aws/main.go index c7fce3e33fab..8a1ecff0bee9 100644 --- a/builtin/credential/aws/cmd/aws/main.go +++ b/builtin/credential/aws/cmd/aws/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/builtin/credential/aws/path_config_certificate.go b/builtin/credential/aws/path_config_certificate.go index 7143f991b922..1b15fcf3b574 100644 --- a/builtin/credential/aws/path_config_certificate.go +++ b/builtin/credential/aws/path_config_certificate.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth @@ -21,6 +21,11 @@ func (b *backend) pathListCertificates() *framework.Path { return &framework.Path{ Pattern: "config/certificates/?", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "certificate-configurations", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathCertificatesList, @@ -35,6 +40,11 @@ func (b *backend) pathListCertificates() *framework.Path { func (b *backend) pathConfigCertificate() *framework.Path { return &framework.Path{ Pattern: "config/certificate/" + framework.GenericNameRegex("cert_name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + }, + Fields: map[string]*framework.FieldSchema{ "cert_name": { Type: framework.TypeString, @@ -61,15 +71,29 @@ vary. Defaults to "pkcs7".`, Operations: map[logical.Operation]framework.OperationHandler{ logical.CreateOperation: &framework.PathOperation{ Callback: b.pathConfigCertificateCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "certificate", + }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigCertificateCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "certificate", + }, }, logical.ReadOperation: &framework.PathOperation{ Callback: b.pathConfigCertificateRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "certificate-configuration", + }, }, logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathConfigCertificateDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "certificate-configuration", + }, }, }, diff --git a/builtin/credential/aws/path_config_client.go b/builtin/credential/aws/path_config_client.go index e94b355ec130..b47cee617ee0 100644 --- a/builtin/credential/aws/path_config_client.go +++ b/builtin/credential/aws/path_config_client.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth @@ -8,17 +8,25 @@ import ( "errors" "net/http" "net/textproto" + "net/url" "strings" "github.com/aws/aws-sdk-go/aws" "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/pluginidentityutil" + "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" ) func (b *backend) pathConfigClient() *framework.Path { - return &framework.Path{ + p := &framework.Path{ Pattern: "config/client$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + }, + Fields: map[string]*framework.FieldSchema{ "access_key": { Type: framework.TypeString, @@ -56,6 +64,12 @@ func (b *backend) pathConfigClient() *framework.Path { Description: "The region ID for the sts_endpoint, if set.", }, + "use_sts_region_from_client": { + Type: framework.TypeBool, + Default: false, + Description: "Uses the STS region from client requests for making AWS STS API calls.", + }, + "iam_server_id_header_value": { Type: framework.TypeString, Default: "", @@ -73,6 +87,12 @@ func (b *backend) pathConfigClient() *framework.Path { Default: aws.UseServiceDefaultRetries, Description: "Maximum number of retries for recoverable exceptions of AWS APIs", }, + + "role_arn": { + Type: framework.TypeString, + Default: "", + Description: "Role ARN to assume for plugin identity token federation", + }, }, ExistenceCheck: b.pathConfigClientExistenceCheck, @@ -80,21 +100,38 @@ func (b *backend) pathConfigClient() *framework.Path { Operations: map[logical.Operation]framework.OperationHandler{ logical.CreateOperation: &framework.PathOperation{ Callback: b.pathConfigClientCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "client", + }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigClientCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "client", + }, }, logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathConfigClientDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "client-configuration", + }, }, logical.ReadOperation: &framework.PathOperation{ Callback: b.pathConfigClientRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "client-configuration", + }, }, }, HelpSynopsis: pathConfigClientHelpSyn, HelpDescription: pathConfigClientHelpDesc, } + pluginidentityutil.AddPluginIdentityTokenFields(p.Fields) + + return p } // Establishes dichotomy of request operation between CreateOperation and UpdateOperation. @@ -142,17 +179,22 @@ func (b *backend) pathConfigClientRead(ctx context.Context, req *logical.Request return nil, nil } + configData := map[string]interface{}{ + "access_key": clientConfig.AccessKey, + "endpoint": clientConfig.Endpoint, + "iam_endpoint": clientConfig.IAMEndpoint, + "sts_endpoint": clientConfig.STSEndpoint, + "sts_region": clientConfig.STSRegion, + "use_sts_region_from_client": clientConfig.UseSTSRegionFromClient, + "iam_server_id_header_value": clientConfig.IAMServerIdHeaderValue, + "max_retries": clientConfig.MaxRetries, + "allowed_sts_header_values": clientConfig.AllowedSTSHeaderValues, + "role_arn": clientConfig.RoleARN, + } + + clientConfig.PopulatePluginIdentityTokenData(configData) return &logical.Response{ - Data: map[string]interface{}{ - "access_key": clientConfig.AccessKey, - "endpoint": clientConfig.Endpoint, - "iam_endpoint": clientConfig.IAMEndpoint, - "sts_endpoint": clientConfig.STSEndpoint, - "sts_region": clientConfig.STSRegion, - "iam_server_id_header_value": clientConfig.IAMServerIdHeaderValue, - "max_retries": clientConfig.MaxRetries, - "allowed_sts_header_values": clientConfig.AllowedSTSHeaderValues, - }, + Data: configData, }, nil } @@ -262,6 +304,14 @@ func (b *backend) pathConfigClientCreateUpdate(ctx context.Context, req *logical } } + useSTSRegionFromClientRaw, ok := data.GetOk("use_sts_region_from_client") + if ok { + if configEntry.UseSTSRegionFromClient != useSTSRegionFromClientRaw.(bool) { + changedCreds = true + configEntry.UseSTSRegionFromClient = useSTSRegionFromClientRaw.(bool) + } + } + headerValStr, ok := data.GetOk("iam_server_id_header_value") if ok { if configEntry.IAMServerIdHeaderValue != headerValStr.(string) { @@ -299,6 +349,41 @@ func (b *backend) pathConfigClientCreateUpdate(ctx context.Context, req *logical configEntry.MaxRetries = data.Get("max_retries").(int) } + roleArnStr, ok := data.GetOk("role_arn") + if ok { + if configEntry.RoleARN != roleArnStr.(string) { + changedCreds = true + configEntry.RoleARN = roleArnStr.(string) + } + } else if req.Operation == logical.CreateOperation { + configEntry.RoleARN = data.Get("role_arn").(string) + } + + if err := configEntry.ParsePluginIdentityTokenFields(data); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + // handle mutual exclusivity + if configEntry.IdentityTokenAudience != "" && configEntry.AccessKey != "" { + return logical.ErrorResponse("only one of 'access_key' or 'identity_token_audience' can be set"), nil + } + + if configEntry.IdentityTokenAudience != "" && configEntry.RoleARN == "" { + return logical.ErrorResponse("role_arn must be set when identity_token_audience is set"), nil + } + + if configEntry.IdentityTokenAudience != "" { + _, err := b.System().GenerateIdentityToken(ctx, &pluginutil.IdentityTokenRequest{ + Audience: configEntry.IdentityTokenAudience, + }) + if err != nil { + if errors.Is(err, pluginidentityutil.ErrPluginWorkloadIdentityUnsupported) { + return logical.ErrorResponse(err.Error()), nil + } + return nil, err + } + } + // Since this endpoint supports both create operation and update operation, // the error checks for access_key and secret_key not being set are not present. // This allows calling this endpoint multiple times to provide the values. @@ -338,20 +423,27 @@ func (b *backend) configClientToEntry(conf *clientConfig) (*logical.StorageEntry // Struct to hold 'aws_access_key' and 'aws_secret_key' that are required to // interact with the AWS EC2 API. type clientConfig struct { + pluginidentityutil.PluginIdentityTokenParams + AccessKey string `json:"access_key"` SecretKey string `json:"secret_key"` Endpoint string `json:"endpoint"` IAMEndpoint string `json:"iam_endpoint"` STSEndpoint string `json:"sts_endpoint"` STSRegion string `json:"sts_region"` + UseSTSRegionFromClient bool `json:"use_sts_region_from_client"` IAMServerIdHeaderValue string `json:"iam_server_id_header_value"` AllowedSTSHeaderValues []string `json:"allowed_sts_header_values"` MaxRetries int `json:"max_retries"` + RoleARN string `json:"role_arn"` } func (c *clientConfig) validateAllowedSTSHeaderValues(headers http.Header) error { for k := range headers { h := textproto.CanonicalMIMEHeaderKey(k) + if h == "X-Amz-Signedheaders" { + h = amzSignedHeaders + } if strings.HasPrefix(h, amzHeaderPrefix) && !strutil.StrListContains(defaultAllowedSTSRequestHeaders, h) && !strutil.StrListContains(c.AllowedSTSHeaderValues, h) { @@ -361,6 +453,21 @@ func (c *clientConfig) validateAllowedSTSHeaderValues(headers http.Header) error return nil } +func (c *clientConfig) validateAllowedSTSQueryValues(params url.Values) error { + for k := range params { + h := textproto.CanonicalMIMEHeaderKey(k) + if h == "X-Amz-Signedheaders" { + h = amzSignedHeaders + } + if strings.HasPrefix(h, amzHeaderPrefix) && + !strutil.StrListContains(defaultAllowedSTSRequestHeaders, k) && + !strutil.StrListContains(c.AllowedSTSHeaderValues, k) { + return errors.New("invalid request query param: " + k) + } + } + return nil +} + const pathConfigClientHelpSyn = ` Configure AWS IAM credentials that are used to query instance and role details from the AWS API. ` diff --git a/builtin/credential/aws/path_config_client_test.go b/builtin/credential/aws/path_config_client_test.go index 4c807d1b40f8..7d5bd7920251 100644 --- a/builtin/credential/aws/path_config_client_test.go +++ b/builtin/credential/aws/path_config_client_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth @@ -7,7 +7,10 @@ import ( "context" "testing" + "github.com/hashicorp/vault/sdk/helper/pluginidentityutil" + "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/assert" ) func TestBackend_pathConfigClient(t *testing.T) { @@ -98,7 +101,6 @@ func TestBackend_pathConfigClient(t *testing.T) { Data: data, Storage: storage, }) - if err != nil { t.Fatal(err) } @@ -130,3 +132,47 @@ func TestBackend_pathConfigClient(t *testing.T) { data["sts_region"], resp.Data["sts_region"]) } } + +// TestBackend_PathConfigClient_PluginIdentityToken tests that configuration +// of plugin WIF returns an immediate error. +func TestBackend_PathConfigClient_PluginIdentityToken(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = &testSystemView{} + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + configData := map[string]interface{}{ + "identity_token_ttl": int64(10), + "identity_token_audience": "test-aud", + "role_arn": "test-role-arn", + } + + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: config.StorageView, + Path: "config/client", + Data: configData, + } + + resp, err := b.HandleRequest(context.Background(), configReq) + assert.NoError(t, err) + assert.NotNil(t, resp) + assert.ErrorContains(t, resp.Error(), pluginidentityutil.ErrPluginWorkloadIdentityUnsupported.Error()) +} + +type testSystemView struct { + logical.StaticSystemView +} + +func (d testSystemView) GenerateIdentityToken(_ context.Context, _ *pluginutil.IdentityTokenRequest) (*pluginutil.IdentityTokenResponse, error) { + return nil, pluginidentityutil.ErrPluginWorkloadIdentityUnsupported +} diff --git a/builtin/credential/aws/path_config_identity.go b/builtin/credential/aws/path_config_identity.go index ded8d9ff373e..7974f29caebe 100644 --- a/builtin/credential/aws/path_config_identity.go +++ b/builtin/credential/aws/path_config_identity.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth @@ -32,6 +32,7 @@ var ( "inferred_aws_region", "inferred_entity_id", "inferred_entity_type", + "inferred_hostname", }, } @@ -57,11 +58,16 @@ var ( func (b *backend) pathConfigIdentity() *framework.Path { return &framework.Path{ Pattern: "config/identity$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + }, + Fields: map[string]*framework.FieldSchema{ "iam_alias": { Type: framework.TypeString, Default: identityAliasIAMUniqueID, - Description: fmt.Sprintf("Configure how the AWS auth method generates entity aliases when using IAM auth. Valid values are %q, %q, and %q. Defaults to %q.", identityAliasRoleID, identityAliasIAMUniqueID, identityAliasIAMFullArn, identityAliasRoleID), + Description: fmt.Sprintf("Configure how the AWS auth method generates entity aliases when using IAM auth. Valid values are %q, %q, %q and %q. Defaults to %q.", identityAliasRoleID, identityAliasIAMUniqueID, identityAliasIAMFullArn, identityAliasIAMCanonicalArn, identityAliasRoleID), }, iamAuthMetadataFields.FieldName: authmetadata.FieldSchema(iamAuthMetadataFields), "ec2_alias": { @@ -75,9 +81,16 @@ func (b *backend) pathConfigIdentity() *framework.Path { Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: pathConfigIdentityRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "identity-integration-configuration", + }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: pathConfigIdentityUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "identity-integration", + }, }, }, @@ -138,7 +151,7 @@ func pathConfigIdentityUpdate(ctx context.Context, req *logical.Request, data *f iamAliasRaw, ok := data.GetOk("iam_alias") if ok { iamAlias := iamAliasRaw.(string) - allowedIAMAliasValues := []string{identityAliasRoleID, identityAliasIAMUniqueID, identityAliasIAMFullArn} + allowedIAMAliasValues := []string{identityAliasRoleID, identityAliasIAMUniqueID, identityAliasIAMFullArn, identityAliasIAMCanonicalArn} if !strutil.StrListContains(allowedIAMAliasValues, iamAlias) { return logical.ErrorResponse(fmt.Sprintf("iam_alias of %q not in set of allowed values: %v", iamAlias, allowedIAMAliasValues)), nil } @@ -182,11 +195,12 @@ type identityConfig struct { } const ( - identityAliasIAMUniqueID = "unique_id" - identityAliasIAMFullArn = "full_arn" - identityAliasEC2InstanceID = "instance_id" - identityAliasEC2ImageID = "image_id" - identityAliasRoleID = "role_id" + identityAliasIAMUniqueID = "unique_id" + identityAliasIAMFullArn = "full_arn" + identityAliasIAMCanonicalArn = "canonical_arn" + identityAliasEC2InstanceID = "instance_id" + identityAliasEC2ImageID = "image_id" + identityAliasRoleID = "role_id" ) const pathConfigIdentityHelpSyn = ` diff --git a/builtin/credential/aws/path_config_identity_test.go b/builtin/credential/aws/path_config_identity_test.go index 8a7db09f2bbd..085cf18b4f7b 100644 --- a/builtin/credential/aws/path_config_identity_test.go +++ b/builtin/credential/aws/path_config_identity_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth diff --git a/builtin/credential/aws/path_config_rotate_root.go b/builtin/credential/aws/path_config_rotate_root.go index 0a28b627b8d5..0a0e64fcb000 100644 --- a/builtin/credential/aws/path_config_rotate_root.go +++ b/builtin/credential/aws/path_config_rotate_root.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth @@ -24,6 +24,12 @@ func (b *backend) pathConfigRotateRoot() *framework.Path { return &framework.Path{ Pattern: "config/rotate-root", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationVerb: "rotate", + OperationSuffix: "root-credentials", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigRotateRootUpdate, @@ -100,7 +106,7 @@ func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.R // Get the current user's name since it's required to create an access key. // Empty input means get the current user. var getUserInput iam.GetUserInput - getUserRes, err := iamClient.GetUser(&getUserInput) + getUserRes, err := iamClient.GetUserWithContext(ctx, &getUserInput) if err != nil { return nil, fmt.Errorf("error calling GetUser: %w", err) } @@ -118,7 +124,7 @@ func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.R createAccessKeyInput := iam.CreateAccessKeyInput{ UserName: getUserRes.User.UserName, } - createAccessKeyRes, err := iamClient.CreateAccessKey(&createAccessKeyInput) + createAccessKeyRes, err := iamClient.CreateAccessKeyWithContext(ctx, &createAccessKeyInput) if err != nil { return nil, fmt.Errorf("error calling CreateAccessKey: %w", err) } @@ -142,7 +148,7 @@ func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.R AccessKeyId: createAccessKeyRes.AccessKey.AccessKeyId, UserName: getUserRes.User.UserName, } - if _, err := iamClient.DeleteAccessKey(&deleteAccessKeyInput); err != nil { + if _, err := iamClient.DeleteAccessKeyWithContext(ctx, &deleteAccessKeyInput); err != nil { // Include this error in the errs returned by this method. errs = multierror.Append(errs, fmt.Errorf("error deleting newly created but unstored access key ID %s: %s", *createAccessKeyRes.AccessKey.AccessKeyId, err)) } @@ -179,7 +185,7 @@ func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.R AccessKeyId: aws.String(oldAccessKey), UserName: getUserRes.User.UserName, } - if _, err = iamClient.DeleteAccessKey(&deleteAccessKeyInput); err != nil { + if _, err = iamClient.DeleteAccessKeyWithContext(ctx, &deleteAccessKeyInput); err != nil { errs = multierror.Append(errs, fmt.Errorf("error deleting old access key ID %s: %w", oldAccessKey, err)) return nil, errs } diff --git a/builtin/credential/aws/path_config_rotate_root_test.go b/builtin/credential/aws/path_config_rotate_root_test.go index 21f7f0fbbe47..d457f9787faf 100644 --- a/builtin/credential/aws/path_config_rotate_root_test.go +++ b/builtin/credential/aws/path_config_rotate_root_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth @@ -8,6 +8,7 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/iam/iamiface" @@ -15,9 +16,23 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) +type mockIAMClient awsutil.MockIAM + +func (m *mockIAMClient) GetUserWithContext(_ aws.Context, input *iam.GetUserInput, _ ...request.Option) (*iam.GetUserOutput, error) { + return (*awsutil.MockIAM)(m).GetUser(input) +} + +func (m *mockIAMClient) CreateAccessKeyWithContext(_ aws.Context, input *iam.CreateAccessKeyInput, _ ...request.Option) (*iam.CreateAccessKeyOutput, error) { + return (*awsutil.MockIAM)(m).CreateAccessKey(input) +} + +func (m *mockIAMClient) DeleteAccessKeyWithContext(_ aws.Context, input *iam.DeleteAccessKeyInput, _ ...request.Option) (*iam.DeleteAccessKeyOutput, error) { + return (*awsutil.MockIAM)(m).DeleteAccessKey(input) +} + func TestPathConfigRotateRoot(t *testing.T) { getIAMClient = func(sess *session.Session) iamiface.IAMAPI { - return &awsutil.MockIAM{ + return &mockIAMClient{ CreateAccessKeyOutput: &iam.CreateAccessKeyOutput{ AccessKey: &iam.AccessKey{ AccessKeyId: aws.String("fizz2"), diff --git a/builtin/credential/aws/path_config_sts.go b/builtin/credential/aws/path_config_sts.go index 58e57a800dda..d2ff2d15a3f5 100644 --- a/builtin/credential/aws/path_config_sts.go +++ b/builtin/credential/aws/path_config_sts.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth @@ -13,13 +13,19 @@ import ( // awsStsEntry is used to store details of an STS role for assumption type awsStsEntry struct { - StsRole string `json:"sts_role"` + StsRole string `json:"sts_role"` + ExternalID string `json:"external_id,omitempty"` // optional, but recommended } func (b *backend) pathListSts() *framework.Path { return &framework.Path{ Pattern: "config/sts/?", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "sts-role-relationships", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathStsList, @@ -34,6 +40,12 @@ func (b *backend) pathListSts() *framework.Path { func (b *backend) pathConfigSts() *framework.Path { return &framework.Path{ Pattern: "config/sts/" + framework.GenericNameRegex("account_id"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "sts-role", + }, + Fields: map[string]*framework.FieldSchema{ "account_id": { Type: framework.TypeString, @@ -46,6 +58,11 @@ instances in this account.`, Description: `AWS ARN for STS role to be assumed when interacting with the account specified. The Vault server must have permissions to assume this role.`, }, + "external_id": { + Type: framework.TypeString, + Description: `AWS external ID to be used when assuming the STS role.`, + Required: false, + }, }, ExistenceCheck: b.pathConfigStsExistenceCheck, @@ -181,10 +198,15 @@ func (b *backend) pathConfigStsRead(ctx context.Context, req *logical.Request, d return nil, nil } + dt := map[string]interface{}{ + "sts_role": stsEntry.StsRole, + } + if stsEntry.ExternalID != "" { + dt["external_id"] = stsEntry.ExternalID + } + return &logical.Response{ - Data: map[string]interface{}{ - "sts_role": stsEntry.StsRole, - }, + Data: dt, }, nil } @@ -219,6 +241,13 @@ func (b *backend) pathConfigStsCreateUpdate(ctx context.Context, req *logical.Re return logical.ErrorResponse("sts role cannot be empty"), nil } + stsExternalID, ok := data.GetOk("external_id") + if ok { + stsEntry.ExternalID = stsExternalID.(string) + } + + b.Logger().Info("setting sts", "account_id", accountID, "sts_role", stsEntry.StsRole, "external_id", stsEntry.ExternalID) + // save the provided STS role if err := b.nonLockedSetAwsStsEntry(ctx, req.Storage, accountID, stsEntry); err != nil { return nil, err diff --git a/builtin/credential/aws/path_config_tidy_identity_accesslist.go b/builtin/credential/aws/path_config_tidy_identity_accesslist.go index 5882fa009619..b9d194b5197d 100644 --- a/builtin/credential/aws/path_config_tidy_identity_accesslist.go +++ b/builtin/credential/aws/path_config_tidy_identity_accesslist.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth @@ -18,6 +18,11 @@ const ( func (b *backend) pathConfigTidyIdentityAccessList() *framework.Path { return &framework.Path{ Pattern: fmt.Sprintf("%s$", "config/tidy/identity-accesslist"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + }, + Fields: map[string]*framework.FieldSchema{ "safety_buffer": { Type: framework.TypeDurationSecond, @@ -37,15 +42,29 @@ expiration, before it is removed from the backend storage.`, Operations: map[logical.Operation]framework.OperationHandler{ logical.CreateOperation: &framework.PathOperation{ Callback: b.pathConfigTidyIdentityAccessListCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "identity-access-list-tidy-operation", + }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigTidyIdentityAccessListCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "identity-access-list-tidy-operation", + }, }, logical.ReadOperation: &framework.PathOperation{ Callback: b.pathConfigTidyIdentityAccessListRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "identity-access-list-tidy-settings", + }, }, logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathConfigTidyIdentityAccessListDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "identity-access-list-tidy-settings", + }, }, }, diff --git a/builtin/credential/aws/path_config_tidy_roletag_denylist.go b/builtin/credential/aws/path_config_tidy_roletag_denylist.go index 4d32327608a7..7707ff7c1f85 100644 --- a/builtin/credential/aws/path_config_tidy_roletag_denylist.go +++ b/builtin/credential/aws/path_config_tidy_roletag_denylist.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth @@ -17,6 +17,11 @@ const ( func (b *backend) pathConfigTidyRoletagDenyList() *framework.Path { return &framework.Path{ Pattern: "config/tidy/roletag-denylist$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + }, + Fields: map[string]*framework.FieldSchema{ "safety_buffer": { Type: framework.TypeDurationSecond, @@ -38,15 +43,29 @@ Defaults to 4320h (180 days).`, Operations: map[logical.Operation]framework.OperationHandler{ logical.CreateOperation: &framework.PathOperation{ Callback: b.pathConfigTidyRoletagDenyListCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "role-tag-deny-list-tidy-operation", + }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigTidyRoletagDenyListCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "role-tag-deny-list-tidy-operation", + }, }, logical.ReadOperation: &framework.PathOperation{ Callback: b.pathConfigTidyRoletagDenyListRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "role-tag-deny-list-tidy-settings", + }, }, logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathConfigTidyRoletagDenyListDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "role-tag-deny-list-tidy-settings", + }, }, }, diff --git a/builtin/credential/aws/path_identity_accesslist.go b/builtin/credential/aws/path_identity_accesslist.go index 00bfde6229fa..8c7462bd5f6b 100644 --- a/builtin/credential/aws/path_identity_accesslist.go +++ b/builtin/credential/aws/path_identity_accesslist.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth @@ -16,6 +16,12 @@ const identityAccessListStorage = "whitelist/identity/" func (b *backend) pathIdentityAccessList() *framework.Path { return &framework.Path{ Pattern: "identity-accesslist/" + framework.GenericNameRegex("instance_id"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "identity-access-list", + }, + Fields: map[string]*framework.FieldSchema{ "instance_id": { Type: framework.TypeString, @@ -42,6 +48,11 @@ func (b *backend) pathListIdentityAccessList() *framework.Path { return &framework.Path{ Pattern: "identity-accesslist/?", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "identity-access-list", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathAccessListIdentitiesList, diff --git a/builtin/credential/aws/path_login.go b/builtin/credential/aws/path_login.go index 320230534ae2..fdb37da72b22 100644 --- a/builtin/credential/aws/path_login.go +++ b/builtin/credential/aws/path_login.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth @@ -12,7 +12,7 @@ import ( "encoding/xml" "errors" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "regexp" @@ -21,15 +21,18 @@ import ( "github.com/aws/aws-sdk-go/aws" awsClient "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/sts" "github.com/hashicorp/errwrap" - cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-retryablehttp" "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-secure-stdlib/strutil" - uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/builtin/credential/aws/pkcs7" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/pkcs7" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/cidrutil" "github.com/hashicorp/vault/sdk/helper/jsonutil" @@ -55,6 +58,10 @@ var ( func (b *backend) pathLogin() *framework.Path { return &framework.Path{ Pattern: "login$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationVerb: "login", + }, Fields: map[string]*framework.FieldSchema{ "role": { Type: framework.TypeString, @@ -89,7 +96,7 @@ significance.`, Type: framework.TypeString, Description: `HTTP method to use for the AWS request when auth_type is iam. This must match what has been signed in the -presigned request. Currently, POST is the only supported value`, +presigned request.`, }, "iam_request_url": { @@ -106,8 +113,8 @@ This must match the request body included in the signature.`, "iam_request_headers": { Type: framework.TypeHeader, Description: `Key/value pairs of headers for use in the -sts:GetCallerIdentity HTTP requests headers when auth_type is iam. Can be either -a Base64-encoded, JSON-serialized string, or a JSON object of key/value pairs. +sts:GetCallerIdentity HTTP requests headers when auth_type is iam. Can be either +a Base64-encoded, JSON-serialized string, or a JSON object of key/value pairs. This must at a minimum include the headers over which AWS has included a signature.`, }, "identity": { @@ -245,9 +252,8 @@ func (b *backend) pathLoginIamGetRoleNameCallerIdAndEntity(ctx context.Context, return "", nil, nil, logical.ErrorResponse("missing iam_http_request_method"), nil } - // In the future, might consider supporting GET - if method != "POST" { - return "", nil, nil, logical.ErrorResponse("invalid iam_http_request_method; currently only 'POST' is supported"), nil + if method != http.MethodGet && method != http.MethodPost { + return "", nil, nil, logical.ErrorResponse("invalid iam_http_request_method; currently only 'GET' and 'POST' are supported"), nil } rawUrlB64 := data.Get("iam_request_url").(string) @@ -262,16 +268,12 @@ func (b *backend) pathLoginIamGetRoleNameCallerIdAndEntity(ctx context.Context, if err != nil { return "", nil, nil, logical.ErrorResponse("error parsing iam_request_url"), nil } - if parsedUrl.RawQuery != "" { - // Should be no query parameters - return "", nil, nil, logical.ErrorResponse(logical.ErrInvalidRequest.Error()), nil + if err = validateLoginIamRequestUrl(method, parsedUrl); err != nil { + return "", nil, nil, logical.ErrorResponse(err.Error()), nil } - // TODO: There are two potentially valid cases we're not yet supporting that would - // necessitate this check being changed. First, if we support GET requests. - // Second if we support presigned POST requests bodyB64 := data.Get("iam_request_body").(string) - if bodyB64 == "" { - return "", nil, nil, logical.ErrorResponse("missing iam_request_body"), nil + if bodyB64 == "" && method != http.MethodGet { + return "", nil, nil, logical.ErrorResponse("missing iam_request_body which is required for POST requests"), nil } bodyRaw, err := base64.StdEncoding.DecodeString(bodyB64) if err != nil { @@ -289,7 +291,7 @@ func (b *backend) pathLoginIamGetRoleNameCallerIdAndEntity(ctx context.Context, config, err := b.lockedClientConfigEntry(ctx, req.Storage) if err != nil { - return "", nil, nil, logical.ErrorResponse("error getting configuration"), nil + return "", nil, nil, nil, fmt.Errorf("error getting configuration: %w", err) } endpoint := "https://sts.amazonaws.com" @@ -297,7 +299,7 @@ func (b *backend) pathLoginIamGetRoleNameCallerIdAndEntity(ctx context.Context, maxRetries := awsClient.DefaultRetryerMaxNumRetries if config != nil { if config.IAMServerIdHeaderValue != "" { - err = validateVaultHeaderValue(headers, parsedUrl, config.IAMServerIdHeaderValue) + err = validateVaultHeaderValue(method, headers, parsedUrl, config.IAMServerIdHeaderValue) if err != nil { return "", nil, nil, logical.ErrorResponse(fmt.Sprintf("error validating %s header: %v", iamServerIdHeader, err)), nil } @@ -305,14 +307,37 @@ func (b *backend) pathLoginIamGetRoleNameCallerIdAndEntity(ctx context.Context, if err = config.validateAllowedSTSHeaderValues(headers); err != nil { return "", nil, nil, logical.ErrorResponse(err.Error()), nil } + if method == http.MethodGet { + if err = config.validateAllowedSTSQueryValues(parsedUrl.Query()); err != nil { + return "", nil, nil, logical.ErrorResponse(err.Error()), nil + } + } if config.STSEndpoint != "" { endpoint = config.STSEndpoint } if config.MaxRetries >= 0 { maxRetries = config.MaxRetries } + + // Extract and use a regional STS endpoint + // based on the region set in the Authorization header. + if config.UseSTSRegionFromClient { + clientSpecifiedRegion, err := awsRegionFromHeader(headers.Get("Authorization")) + if err != nil { + return "", nil, nil, logical.ErrorResponse("region missing from Authorization header"), nil + } + + url, err := stsRegionalEndpoint(clientSpecifiedRegion) + if err != nil { + return "", nil, nil, logical.ErrorResponse(err.Error()), nil + } + + b.Logger().Debug("use_sts_region_from_client set; using region specified from header", "region", clientSpecifiedRegion) + endpoint = url + } } + b.Logger().Debug("submitting caller identity request", "endpoint", endpoint) callerID, err := submitCallerIdentityRequest(ctx, maxRetries, method, endpoint, parsedUrl, body, headers) if err != nil { return "", nil, nil, logical.ErrorResponse(fmt.Sprintf("error making upstream request: %v", err)), nil @@ -340,7 +365,7 @@ func (b *backend) pathLoginResolveRoleIam(ctx context.Context, req *logical.Requ // instanceIamRoleARN fetches the IAM role ARN associated with the given // instance profile name -func (b *backend) instanceIamRoleARN(iamClient *iam.IAM, instanceProfileName string) (string, error) { +func (b *backend) instanceIamRoleARN(ctx context.Context, iamClient *iam.IAM, instanceProfileName string) (string, error) { if iamClient == nil { return "", fmt.Errorf("nil iamClient") } @@ -348,7 +373,7 @@ func (b *backend) instanceIamRoleARN(iamClient *iam.IAM, instanceProfileName str return "", fmt.Errorf("missing instance profile name") } - profile, err := iamClient.GetInstanceProfile(&iam.GetInstanceProfileInput{ + profile, err := iamClient.GetInstanceProfileWithContext(ctx, &iam.GetInstanceProfileInput{ InstanceProfileName: aws.String(instanceProfileName), }) if err != nil { @@ -382,7 +407,7 @@ func (b *backend) validateInstance(ctx context.Context, s logical.Storage, insta return nil, err } - status, err := ec2Client.DescribeInstances(&ec2.DescribeInstancesInput{ + status, err := ec2Client.DescribeInstancesWithContext(ctx, &ec2.DescribeInstancesInput{ InstanceIds: []*string{ aws.String(instanceID), }, @@ -724,7 +749,7 @@ func (b *backend) verifyInstanceMeetsRoleRequirements(ctx context.Context, } else if iamClient == nil { return nil, fmt.Errorf("received a nil iamClient") } - iamRoleARN, err := b.instanceIamRoleARN(iamClient, iamInstanceProfileEntity.FriendlyName) + iamRoleARN, err := b.instanceIamRoleARN(ctx, iamClient, iamInstanceProfileEntity.FriendlyName) if err != nil { return nil, fmt.Errorf("IAM role ARN could not be fetched: %w", err) } @@ -1287,7 +1312,7 @@ func (b *backend) pathLoginRenewEc2(ctx context.Context, req *logical.Request, _ // If the login was made using the role tag, then max_ttl from tag // is cached in internal data during login and used here to cap the // max_ttl of renewal. - rTagMaxTTL, err := time.ParseDuration(req.Auth.Metadata["role_tag_max_ttl"]) + rTagMaxTTL, err := parseutil.ParseDurationSecond(req.Auth.Metadata["role_tag_max_ttl"]) if err != nil { return nil, err } @@ -1371,6 +1396,8 @@ func (b *backend) pathLoginUpdateIam(ctx context.Context, req *logical.Request, identityAlias = callerUniqueId case identityAliasIAMFullArn: identityAlias = callerID.Arn + case identityAliasIAMCanonicalArn: + identityAlias = entity.canonicalArn() } // If we're just looking up for MFA, return the Alias info @@ -1429,6 +1456,7 @@ func (b *backend) pathLoginUpdateIam(ctx context.Context, req *logical.Request, inferredEntityType := "" inferredEntityID := "" + inferredHostname := "" if roleEntry.InferredEntityType == ec2EntityType { instance, err := b.validateInstance(ctx, req.Storage, entity.SessionInfo, roleEntry.InferredAWSRegion, callerID.Account) if err != nil { @@ -1455,6 +1483,7 @@ func (b *backend) pathLoginUpdateIam(ctx context.Context, req *logical.Request, inferredEntityType = ec2EntityType inferredEntityID = entity.SessionInfo + inferredHostname = *instance.PrivateDnsName } auth := &logical.Auth{ @@ -1469,6 +1498,7 @@ func (b *backend) pathLoginUpdateIam(ctx context.Context, req *logical.Request, "inferred_entity_id": inferredEntityID, "inferred_aws_region": roleEntry.InferredAWSRegion, "account_id": entity.AccountNumber, + "inferred_hostname": inferredHostname, }, DisplayName: entity.FriendlyName, Alias: &logical.Alias{ @@ -1490,6 +1520,7 @@ func (b *backend) pathLoginUpdateIam(ctx context.Context, req *logical.Request, "inferred_entity_id": inferredEntityID, "inferred_aws_region": roleEntry.InferredAWSRegion, "account_id": entity.AccountNumber, + "inferred_hostname": inferredHostname, }); err != nil { b.Logger().Warn(fmt.Sprintf("unable to set alias metadata due to %s", err)) } @@ -1508,6 +1539,31 @@ func hasWildcardBind(boundIamPrincipalARNs []string) bool { return false } +// Validate that the iam_request_url passed is valid for the STS request +func validateLoginIamRequestUrl(method string, parsedUrl *url.URL) error { + switch method { + case http.MethodGet: + actions := map[string][]string(parsedUrl.Query())["Action"] + if len(actions) == 0 { + return fmt.Errorf("no action found in request") + } + if len(actions) != 1 { + return fmt.Errorf("found multiple actions") + } + if actions[0] != "GetCallerIdentity" { + return fmt.Errorf("unexpected action parameter, %s", actions[0]) + } + return nil + case http.MethodPost: + if parsedUrl.RawQuery != "" { + return logical.ErrInvalidRequest + } + return nil + default: + return fmt.Errorf("unsupported method, %s", method) + } +} + // Validate that the iam_request_body passed is valid for the STS request func validateLoginIamRequestBody(body string) error { qs, err := url.ParseQuery(body) @@ -1544,11 +1600,11 @@ func hasValuesForEc2Auth(data *framework.FieldData) (bool, bool) { } func hasValuesForIamAuth(data *framework.FieldData) (bool, bool) { - _, hasRequestMethod := data.GetOk("iam_http_request_method") + method, hasRequestMethod := data.GetOk("iam_http_request_method") _, hasRequestURL := data.GetOk("iam_request_url") _, hasRequestBody := data.GetOk("iam_request_body") _, hasRequestHeaders := data.GetOk("iam_request_headers") - return (hasRequestMethod && hasRequestURL && hasRequestBody && hasRequestHeaders), + return (hasRequestMethod && hasRequestURL && (method == http.MethodGet || hasRequestBody) && hasRequestHeaders), (hasRequestMethod || hasRequestURL || hasRequestBody || hasRequestHeaders) } @@ -1602,7 +1658,7 @@ func parseIamArn(iamArn string) (*iamEntity, error) { return &entity, nil } -func validateVaultHeaderValue(headers http.Header, _ *url.URL, requiredHeaderValue string) error { +func validateVaultHeaderValue(method string, headers http.Header, parsedUrl *url.URL, requiredHeaderValue string) error { providedValue := "" for k, v := range headers { if strings.EqualFold(iamServerIdHeader, k) { @@ -1618,25 +1674,29 @@ func validateVaultHeaderValue(headers http.Header, _ *url.URL, requiredHeaderVal if providedValue != requiredHeaderValue { return fmt.Errorf("expected %q but got %q", requiredHeaderValue, providedValue) } - - if authzHeaders, ok := headers["Authorization"]; ok { - // authzHeader looks like AWS4-HMAC-SHA256 Credential=AKI..., SignedHeaders=host;x-amz-date;x-vault-awsiam-id, Signature=... - // We need to extract out the SignedHeaders - re := regexp.MustCompile(".*SignedHeaders=([^,]+)") - authzHeader := strings.Join(authzHeaders, ",") - matches := re.FindSubmatch([]byte(authzHeader)) - if len(matches) < 1 { - return fmt.Errorf("vault header wasn't signed") - } - if len(matches) > 2 { - return fmt.Errorf("found multiple SignedHeaders components") + switch method { + case http.MethodPost: + if authzHeaders, ok := headers["Authorization"]; ok { + // authzHeader looks like AWS4-HMAC-SHA256 Credential=AKI..., SignedHeaders=host;x-amz-date;x-vault-awsiam-id, Signature=... + // We need to extract out the SignedHeaders + re := regexp.MustCompile(".*SignedHeaders=([^,]+)") + authzHeader := strings.Join(authzHeaders, ",") + matches := re.FindSubmatch([]byte(authzHeader)) + if len(matches) < 1 { + return fmt.Errorf("vault header wasn't signed") + } + if len(matches) > 2 { + return fmt.Errorf("found multiple SignedHeaders components") + } + signedHeaders := string(matches[1]) + return ensureHeaderIsSigned(signedHeaders, iamServerIdHeader) } - signedHeaders := string(matches[1]) - return ensureHeaderIsSigned(signedHeaders, iamServerIdHeader) + return fmt.Errorf("missing Authorization header") + case http.MethodGet: + return ensureHeaderIsSigned(parsedUrl.Query().Get(amzSignedHeaders), iamServerIdHeader) + default: + return fmt.Errorf("unsupported method, %s", method) } - // TODO: If we support GET requests, then we need to parse the X-Amz-SignedHeaders - // argument out of the query string and search in there for the header value - return fmt.Errorf("missing Authorization header") } func buildHttpRequest(method, endpoint string, parsedUrl *url.URL, body string, headers http.Header) *http.Request { @@ -1743,7 +1803,7 @@ func submitCallerIdentityRequest(ctx context.Context, maxRetries int, method, en } // we check for status code afterwards to also print out response body - responseBody, err := ioutil.ReadAll(response.Body) + responseBody, err := io.ReadAll(response.Body) if err != nil { return nil, err } @@ -1835,7 +1895,7 @@ func (b *backend) fullArn(ctx context.Context, e *iamEntity, s logical.Storage) input := iam.GetUserInput{ UserName: aws.String(e.FriendlyName), } - resp, err := client.GetUser(&input) + resp, err := client.GetUserWithContext(ctx, &input) if err != nil { return "", fmt.Errorf("error fetching user %q: %w", e.FriendlyName, err) } @@ -1849,7 +1909,7 @@ func (b *backend) fullArn(ctx context.Context, e *iamEntity, s logical.Storage) input := iam.GetRoleInput{ RoleName: aws.String(e.FriendlyName), } - resp, err := client.GetRole(&input) + resp, err := client.GetRoleWithContext(ctx, &input) if err != nil { return "", fmt.Errorf("error fetching role %q: %w", e.FriendlyName, err) } @@ -1879,6 +1939,43 @@ func getMetadataValue(fromAuth *logical.Auth, forKey string) (string, error) { return "", fmt.Errorf("%q not found in auth metadata", forKey) } +func awsRegionFromHeader(authorizationHeader string) (string, error) { + // https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html + // The Authorization header takes the following form. + // Authorization: AWS4-HMAC-SHA256 + // Credential=AKIAIOSFODNN7EXAMPLE/20230719/us-east-1/sts/aws4_request, + // SignedHeaders=content-length;content-type;host;x-amz-date, + // Signature=fe5f80f77d5fa3beca038a248ff027d0445342fe2855ddc963176630326f1024 + // + // The credential is in the form of "////aws4_request" + fields := strings.Split(authorizationHeader, " ") + for _, field := range fields { + if strings.HasPrefix(field, "Credential=") { + fields := strings.Split(field, "/") + if len(fields) < 3 { + return "", fmt.Errorf("invalid header format") + } + + region := fields[2] + return region, nil + } + } + + return "", fmt.Errorf("invalid header format") +} + +func stsRegionalEndpoint(region string) (string, error) { + stsService := sts.EndpointsID + resolver := endpoints.DefaultResolver() + resolvedEndpoint, err := resolver.EndpointFor(stsService, region, + endpoints.STSRegionalEndpointOption, + endpoints.StrictMatchingOption) + if err != nil { + return "", fmt.Errorf("unable to get regional STS endpoint for region: %v", region) + } + return resolvedEndpoint.URL, nil +} + const iamServerIdHeader = "X-Vault-AWS-IAM-Server-ID" const pathLoginSyn = ` diff --git a/builtin/credential/aws/path_login_test.go b/builtin/credential/aws/path_login_test.go index 2c0262075ad3..b5b567960767 100644 --- a/builtin/credential/aws/path_login_test.go +++ b/builtin/credential/aws/path_login_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth @@ -17,6 +17,7 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/sts" "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/assert" ) func TestBackend_pathLogin_getCallerIdentityResponse(t *testing.T) { @@ -124,9 +125,129 @@ func TestBackend_pathLogin_parseIamArn(t *testing.T) { } } -func TestBackend_validateVaultHeaderValue(t *testing.T) { +func TestBackend_validateVaultGetRequestValues(t *testing.T) { const canaryHeaderValue = "Vault-Server" - requestURL, err := url.Parse("https://sts.amazonaws.com/") + + getHeadersMissing := http.Header{ + "Host": []string{"Foo"}, + } + getHeadersInvalid := http.Header{ + "Host": []string{"Foo"}, + iamServerIdHeader: []string{"InvalidValue"}, + } + getHeadersValid := http.Header{ + "Host": []string{"Foo"}, + iamServerIdHeader: []string{canaryHeaderValue}, + } + getQueryValid := url.Values(map[string][]string{ + "X-Amz-Algorithm": {"AWS4-HMAC-SHA256"}, + "X-Amz-Credential": {"AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request"}, + amzSignedHeaders: {"host;x-vault-aws-iam-server-id"}, + "X-Amz-Signature": {"5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"}, + "X-Amz-User-Agent": {"aws-sdk-go-v2/1.2.0 os/linux lang/go/1.16 md/GOOS/linux md/GOARCH/amd64"}, + "Action": {"GetCallerIdentity"}, + "Version": {"2011-06-15"}, + }) + getQueryUnsigned := url.Values(map[string][]string{ + "X-Amz-Algorithm": {"AWS4-HMAC-SHA256"}, + "X-Amz-Credential": {"AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request"}, + amzSignedHeaders: {"host"}, + "X-Amz-Signature": {"5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"}, + "X-Amz-User-Agent": {"aws-sdk-go-v2/1.2.0 os/linux lang/go/1.16 md/GOOS/linux md/GOARCH/amd64"}, + "Action": {"GetCallerIdentity"}, + "Version": {"2011-06-15"}, + }) + getQueryNoAction := url.Values(map[string][]string{ + "X-Amz-Algorithm": {"AWS4-HMAC-SHA256"}, + "X-Amz-Credential": {"AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request"}, + amzSignedHeaders: {"host;x-vault-aws-iam-server-id"}, + "X-Amz-Signature": {"5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"}, + "X-Amz-User-Agent": {"aws-sdk-go-v2/1.2.0 os/linux lang/go/1.16 md/GOOS/linux md/GOARCH/amd64"}, + "Version": {"2011-06-15"}, + }) + getQueryInvalidAction := url.Values(map[string][]string{ + "X-Amz-Algorithm": {"AWS4-HMAC-SHA256"}, + "X-Amz-Credential": {"AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request"}, + amzSignedHeaders: {"host;x-vault-aws-iam-server-id"}, + "X-Amz-Signature": {"5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"}, + "X-Amz-User-Agent": {"aws-sdk-go-v2/1.2.0 os/linux lang/go/1.16 md/GOOS/linux md/GOARCH/amd64"}, + "Action": {"GetSessionToken"}, + "Version": {"2011-06-15"}, + }) + getQueryMultipleActions := url.Values(map[string][]string{ + "X-Amz-Algorithm": {"AWS4-HMAC-SHA256"}, + "X-Amz-Credential": {"AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request"}, + amzSignedHeaders: {"host;x-vault-aws-iam-server-id"}, + "X-Amz-Signature": {"5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"}, + "X-Amz-User-Agent": {"aws-sdk-go-v2/1.2.0 os/linux lang/go/1.16 md/GOOS/linux md/GOARCH/amd64"}, + "Action": {"GetCallerIdentity;GetSessionToken"}, + "Version": {"2011-06-15"}, + }) + validGetRequestURL, err := url.Parse("https://sts.amazonaws.com/?" + getQueryValid.Encode()) + if err != nil { + t.Fatalf("error parsing test URL: %v", err) + } + unsignedGetRequestURL, err := url.Parse("https://sts.amazonaws.com/?" + getQueryUnsigned.Encode()) + if err != nil { + t.Fatalf("error parsing test URL: %v", err) + } + noActionGetRequestURL, err := url.Parse("https://sts.amazonaws.com/?" + getQueryNoAction.Encode()) + if err != nil { + t.Fatalf("error parsing test URL: %v", err) + } + invalidActionGetRequestURL, err := url.Parse("https://sts.amazonaws.com/?" + getQueryInvalidAction.Encode()) + if err != nil { + t.Fatalf("error parsing test URL: %v", err) + } + multipleActionsGetRequestURL, err := url.Parse("https://sts.amazonaws.com/?" + getQueryMultipleActions.Encode()) + if err != nil { + t.Fatalf("error parsing test URL: %v", err) + } + + err = validateVaultHeaderValue(http.MethodGet, getHeadersMissing, validGetRequestURL, canaryHeaderValue) + if err == nil { + t.Error("validated GET request with missing Vault header") + } + + err = validateVaultHeaderValue(http.MethodGet, getHeadersInvalid, validGetRequestURL, canaryHeaderValue) + if err == nil { + t.Error("validated GET request with invalid Vault header value") + } + + err = validateVaultHeaderValue(http.MethodGet, getHeadersValid, unsignedGetRequestURL, canaryHeaderValue) + if err == nil { + t.Error("validated GET request with unsigned Vault header") + } + + err = validateLoginIamRequestUrl(http.MethodGet, noActionGetRequestURL) + if err == nil { + t.Error("validated GET request with no Action parameter") + } + + err = validateLoginIamRequestUrl(http.MethodGet, multipleActionsGetRequestURL) + if err == nil { + t.Error("validated GET request with multiple Action parameters") + } + + err = validateLoginIamRequestUrl(http.MethodGet, invalidActionGetRequestURL) + if err == nil { + t.Error("validated GET request with an invalid Action parameter") + } + + err = validateLoginIamRequestUrl(http.MethodGet, validGetRequestURL) + if err != nil { + t.Errorf("did NOT validate valid GET request: %v", err) + } + + err = validateVaultHeaderValue(http.MethodGet, getHeadersValid, validGetRequestURL, canaryHeaderValue) + if err != nil { + t.Errorf("did NOT validate valid GET request: %v", err) + } +} + +func TestBackend_validateVaultPostRequestValues(t *testing.T) { + const canaryHeaderValue = "Vault-Server" + postRequestURL, err := url.Parse("https://sts.amazonaws.com/") if err != nil { t.Fatalf("error parsing test URL: %v", err) } @@ -149,39 +270,93 @@ func TestBackend_validateVaultHeaderValue(t *testing.T) { iamServerIdHeader: []string{canaryHeaderValue}, "Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"}, } - postHeadersSplit := http.Header{ "Host": []string{"Foo"}, iamServerIdHeader: []string{canaryHeaderValue}, "Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request", "SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"}, } - err = validateVaultHeaderValue(postHeadersMissing, requestURL, canaryHeaderValue) + err = validateVaultHeaderValue(http.MethodPost, postHeadersMissing, postRequestURL, canaryHeaderValue) if err == nil { t.Error("validated POST request with missing Vault header") } - err = validateVaultHeaderValue(postHeadersInvalid, requestURL, canaryHeaderValue) + err = validateVaultHeaderValue(http.MethodPost, postHeadersInvalid, postRequestURL, canaryHeaderValue) if err == nil { t.Error("validated POST request with invalid Vault header value") } - err = validateVaultHeaderValue(postHeadersUnsigned, requestURL, canaryHeaderValue) + err = validateVaultHeaderValue(http.MethodPost, postHeadersUnsigned, postRequestURL, canaryHeaderValue) if err == nil { t.Error("validated POST request with unsigned Vault header") } - err = validateVaultHeaderValue(postHeadersValid, requestURL, canaryHeaderValue) + err = validateVaultHeaderValue(http.MethodPost, postHeadersValid, postRequestURL, canaryHeaderValue) if err != nil { t.Errorf("did NOT validate valid POST request: %v", err) } - err = validateVaultHeaderValue(postHeadersSplit, requestURL, canaryHeaderValue) + err = validateLoginIamRequestUrl(http.MethodPost, postRequestURL) + if err != nil { + t.Errorf("did NOT validate valid POST request: %v", err) + } + + err = validateVaultHeaderValue(http.MethodPost, postHeadersSplit, postRequestURL, canaryHeaderValue) if err != nil { t.Errorf("did NOT validate valid POST request with split Authorization header: %v", err) } } +// TestBackend_pathLogin_NoClientConfig covers logging in via IAM auth when the +// client config does not exist. This is a regression test to cover potential +// panics when referencing the potentially-nil config in the login handler. For +// details see https://github.com/hashicorp/vault/issues/23361. +func TestBackend_pathLogin_NoClientConfig(t *testing.T) { + storage := new(logical.InmemStorage) + config := logical.TestBackendConfig() + config.StorageView = storage + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // Intentionally left out the client configuration + + roleEntry := &awsRoleEntry{ + RoleID: "foo", + Version: currentRoleStorageVersion, + AuthType: iamAuthType, + } + err = b.setRole(context.Background(), storage, testValidRoleName, roleEntry) + if err != nil { + t.Fatal(err) + } + + loginData, err := defaultLoginData() + if err != nil { + t.Fatal(err) + } + loginRequest := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "login", + Storage: storage, + Data: loginData, + Connection: &logical.Connection{}, + } + resp, err := b.HandleRequest(context.Background(), loginRequest) + if err != nil { + t.Fatalf("expected nil error, got: %v", err) + } + if !resp.IsError() { + t.Fatalf("expected error response, got: %+v", resp) + } +} + // TestBackend_pathLogin_IAMHeaders tests login with iam_request_headers, // supporting both base64 encoded string and JSON headers func TestBackend_pathLogin_IAMHeaders(t *testing.T) { @@ -233,6 +408,7 @@ func TestBackend_pathLogin_IAMHeaders(t *testing.T) { "inferred_aws_region", "inferred_entity_id", "inferred_entity_type", + "inferred_hostname", }, "ec2_alias": "role_id", "ec2_metadata": []string{ @@ -439,6 +615,7 @@ func TestBackend_pathLogin_IAMRoleResolution(t *testing.T) { "inferred_aws_region", "inferred_entity_id", "inferred_entity_type", + "inferred_hostname", }, "ec2_alias": "role_id", "ec2_metadata": []string{ @@ -625,6 +802,58 @@ func TestBackend_defaultAliasMetadata(t *testing.T) { } } +func TestRegionFromHeader(t *testing.T) { + tcs := map[string]struct { + header string + expectedRegion string + expectedSTSEndpoint string + }{ + "us-east-1": { + header: "AWS4-HMAC-SHA256 Credential=AAAAAAAAAAAAAAAAAAAA/20230719/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date, Signature=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + expectedRegion: "us-east-1", + expectedSTSEndpoint: "https://sts.us-east-1.amazonaws.com", + }, + "us-west-2": { + header: "AWS4-HMAC-SHA256 Credential=AAAAAAAAAAAAAAAAAAAA/20230719/us-west-2/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date, Signature=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + expectedRegion: "us-west-2", + expectedSTSEndpoint: "https://sts.us-west-2.amazonaws.com", + }, + "ap-northeast-3": { + header: "AWS4-HMAC-SHA256 Credential=AAAAAAAAAAAAAAAAAAAA/20230719/ap-northeast-3/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date, Signature=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + expectedRegion: "ap-northeast-3", + expectedSTSEndpoint: "https://sts.ap-northeast-3.amazonaws.com", + }, + "us-gov-east-1": { + header: "AWS4-HMAC-SHA256 Credential=AAAAAAAAAAAAAAAAAAAA/20230719/us-gov-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date, Signature=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + expectedRegion: "us-gov-east-1", + expectedSTSEndpoint: "https://sts.us-gov-east-1.amazonaws.com", + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + region, err := awsRegionFromHeader(tc.header) + assert.NoError(t, err) + assert.Equal(t, tc.expectedRegion, region) + + stsEndpoint, err := stsRegionalEndpoint(region) + assert.NoError(t, err) + assert.Equal(t, tc.expectedSTSEndpoint, stsEndpoint) + }) + } + + t.Run("invalid-header", func(t *testing.T) { + region, err := awsRegionFromHeader("this-is-an-invalid-header/foobar") + assert.EqualError(t, err, "invalid header format") + assert.Empty(t, region) + }) + + t.Run("invalid-region", func(t *testing.T) { + endpoint, err := stsRegionalEndpoint("fake-region-1") + assert.EqualError(t, err, "unable to get regional STS endpoint for region: fake-region-1") + assert.Empty(t, endpoint) + }) +} + func defaultLoginData() (map[string]interface{}, error) { awsSession, err := session.NewSession() if err != nil { diff --git a/builtin/credential/aws/path_role.go b/builtin/credential/aws/path_role.go index 90243619fc62..ae725e571832 100644 --- a/builtin/credential/aws/path_role.go +++ b/builtin/credential/aws/path_role.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth @@ -23,6 +23,12 @@ var currentRoleStorageVersion = 3 func (b *backend) pathRole() *framework.Path { p := &framework.Path{ Pattern: "role/" + framework.GenericNameRegex("role"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "auth-role", + }, + Fields: map[string]*framework.FieldSchema{ "role": { Type: framework.TypeString, @@ -81,6 +87,9 @@ auth_type is ec2 or inferred_entity_type is ec2_instance.`, given instance IDs. Can be a list or comma-separated string of EC2 instance IDs. This is only applicable when auth_type is ec2 or inferred_entity_type is ec2_instance.`, + DisplayAttrs: &framework.DisplayAttributes{ + Description: "If set, defines a constraint on the EC2 instances to have one of the given instance IDs. A list of EC2 instance IDs. This is only applicable when auth_type is ec2 or inferred_entity_type is ec2_instance.", + }, }, "resolve_aws_unique_ids": { Type: framework.TypeBool, @@ -202,6 +211,11 @@ func (b *backend) pathListRole() *framework.Path { return &framework.Path{ Pattern: "role/?", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "auth-roles", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathRoleList, @@ -217,6 +231,11 @@ func (b *backend) pathListRoles() *framework.Path { return &framework.Path{ Pattern: "roles/?", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "auth-roles2", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathRoleList, @@ -376,7 +395,7 @@ func (b *backend) initialize(ctx context.Context, req *logical.InitializationReq return nil } -// awsVersion stores info about the the latest aws version that we have +// awsVersion stores info about the latest aws version that we have // upgraded to. type awsVersion struct { Version int `json:"version"` diff --git a/builtin/credential/aws/path_role_tag.go b/builtin/credential/aws/path_role_tag.go index e365f0307d89..3584f08f26a4 100644 --- a/builtin/credential/aws/path_role_tag.go +++ b/builtin/credential/aws/path_role_tag.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth @@ -14,6 +14,7 @@ import ( "strings" "time" + "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-secure-stdlib/strutil" uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/framework" @@ -26,6 +27,12 @@ const roleTagVersion = "v1" func (b *backend) pathRoleTag() *framework.Path { return &framework.Path{ Pattern: "role/" + framework.GenericNameRegex("role") + "/tag$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "role-tag", + }, + Fields: map[string]*framework.FieldSchema{ "role": { Type: framework.TypeString, @@ -341,7 +348,7 @@ func (b *backend) parseAndVerifyRoleTagValue(ctx context.Context, s logical.Stor return nil, err } case strings.HasPrefix(tagItem, "t="): - rTag.MaxTTL, err = time.ParseDuration(fmt.Sprintf("%ss", strings.TrimPrefix(tagItem, "t="))) + rTag.MaxTTL, err = parseutil.ParseDurationSecond(fmt.Sprintf("%ss", strings.TrimPrefix(tagItem, "t="))) if err != nil { return nil, err } diff --git a/builtin/credential/aws/path_role_test.go b/builtin/credential/aws/path_role_test.go index 3a63d4cd3555..3d3fbc3c6f71 100644 --- a/builtin/credential/aws/path_role_test.go +++ b/builtin/credential/aws/path_role_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth @@ -304,7 +304,6 @@ func TestBackend_pathIam(t *testing.T) { Data: data, Storage: storage, }) - if err != nil { t.Fatal(err) } diff --git a/builtin/credential/aws/path_roletag_denylist.go b/builtin/credential/aws/path_roletag_denylist.go index 8a90a383ef16..131ea717058e 100644 --- a/builtin/credential/aws/path_roletag_denylist.go +++ b/builtin/credential/aws/path_roletag_denylist.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth @@ -15,6 +15,12 @@ import ( func (b *backend) pathRoletagDenyList() *framework.Path { return &framework.Path{ Pattern: "roletag-denylist/(?P.*)", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "role-tag-deny-list", + }, + Fields: map[string]*framework.FieldSchema{ "role_tag": { Type: framework.TypeString, @@ -45,6 +51,11 @@ func (b *backend) pathListRoletagDenyList() *framework.Path { return &framework.Path{ Pattern: "roletag-denylist/?", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "role-tag-deny-lists", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathRoletagDenyListsList, diff --git a/builtin/credential/aws/path_tidy_identity_accesslist.go b/builtin/credential/aws/path_tidy_identity_accesslist.go index b1e649ce9c9d..acfff00b1a90 100644 --- a/builtin/credential/aws/path_tidy_identity_accesslist.go +++ b/builtin/credential/aws/path_tidy_identity_accesslist.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth @@ -18,6 +18,13 @@ import ( func (b *backend) pathTidyIdentityAccessList() *framework.Path { return &framework.Path{ Pattern: "tidy/identity-accesslist$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "identity-access-list", + OperationVerb: "tidy", + }, + Fields: map[string]*framework.FieldSchema{ "safety_buffer": { Type: framework.TypeDurationSecond, diff --git a/builtin/credential/aws/path_tidy_roletag_denylist.go b/builtin/credential/aws/path_tidy_roletag_denylist.go index 8bd788dcf8b4..665cb0319f1e 100644 --- a/builtin/credential/aws/path_tidy_roletag_denylist.go +++ b/builtin/credential/aws/path_tidy_roletag_denylist.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package awsauth @@ -22,6 +22,13 @@ const ( func (b *backend) pathTidyRoletagDenyList() *framework.Path { return &framework.Path{ Pattern: "tidy/roletag-denylist$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "role-tag-deny-list", + OperationVerb: "tidy", + }, + Fields: map[string]*framework.FieldSchema{ "safety_buffer": { Type: framework.TypeDurationSecond, diff --git a/builtin/credential/aws/pkcs7/verify_dsa_test.go b/builtin/credential/aws/pkcs7/verify_dsa_test.go deleted file mode 100644 index 857ea4dbf181..000000000000 --- a/builtin/credential/aws/pkcs7/verify_dsa_test.go +++ /dev/null @@ -1,181 +0,0 @@ -//go:build go1.11 || go1.12 || go1.13 || go1.14 || go1.15 - -package pkcs7 - -import ( - "crypto/x509" - "encoding/pem" - "io/ioutil" - "os" - "os/exec" - "testing" -) - -func TestVerifyEC2(t *testing.T) { - fixture := UnmarshalDSATestFixture(EC2IdentityDocumentFixture) - p7, err := Parse(fixture.Input) - if err != nil { - t.Errorf("Parse encountered unexpected error: %v", err) - } - p7.Certificates = []*x509.Certificate{fixture.Certificate} - if err := p7.Verify(); err != nil { - t.Errorf("Verify failed with error: %v", err) - } -} - -var EC2IdentityDocumentFixture = ` ------BEGIN PKCS7----- -MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAaCA -JIAEggGmewogICJwcml2YXRlSXAiIDogIjE3Mi4zMC4wLjI1MiIsCiAgImRldnBh -eVByb2R1Y3RDb2RlcyIgOiBudWxsLAogICJhdmFpbGFiaWxpdHlab25lIiA6ICJ1 -cy1lYXN0LTFhIiwKICAidmVyc2lvbiIgOiAiMjAxMC0wOC0zMSIsCiAgImluc3Rh -bmNlSWQiIDogImktZjc5ZmU1NmMiLAogICJiaWxsaW5nUHJvZHVjdHMiIDogbnVs -bCwKICAiaW5zdGFuY2VUeXBlIiA6ICJ0Mi5taWNybyIsCiAgImFjY291bnRJZCIg -OiAiMTIxNjU5MDE0MzM0IiwKICAiaW1hZ2VJZCIgOiAiYW1pLWZjZTNjNjk2IiwK -ICAicGVuZGluZ1RpbWUiIDogIjIwMTYtMDQtMDhUMDM6MDE6MzhaIiwKICAiYXJj -aGl0ZWN0dXJlIiA6ICJ4ODZfNjQiLAogICJrZXJuZWxJZCIgOiBudWxsLAogICJy -YW1kaXNrSWQiIDogbnVsbCwKICAicmVnaW9uIiA6ICJ1cy1lYXN0LTEiCn0AAAAA -AAAxggEYMIIBFAIBATBpMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5n -dG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2Vi -IFNlcnZpY2VzIExMQwIJAJa6SNnlXhpnMAkGBSsOAwIaBQCgXTAYBgkqhkiG9w0B -CQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJBTEPFw0xNjA0MDgwMzAxNDRaMCMG -CSqGSIb3DQEJBDEWBBTuUc28eBXmImAautC+wOjqcFCBVjAJBgcqhkjOOAQDBC8w -LQIVAKA54NxGHWWCz5InboDmY/GHs33nAhQ6O/ZI86NwjA9Vz3RNMUJrUPU5tAAA -AAAAAA== ------END PKCS7----- ------BEGIN CERTIFICATE----- -MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw -FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD -VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z -ODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u -IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl -cnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e -ih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3 -VyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P -hviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j -k+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U -hhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF -lRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf -MNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW -MXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw -vSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw -7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K ------END CERTIFICATE-----` - -func TestDSASignWithOpenSSLAndVerify(t *testing.T) { - content := []byte(` -A ship in port is safe, -but that's not what ships are built for. --- Grace Hopper`) - // write the content to a temp file - tmpContentFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_content") - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(tmpContentFile.Name(), content, 0o755) - - // write the signer cert to a temp file - tmpSignerCertFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signer") - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(tmpSignerCertFile.Name(), dsaPublicCert, 0o755) - - // write the signer key to a temp file - tmpSignerKeyFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_key") - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(tmpSignerKeyFile.Name(), dsaPrivateKey, 0o755) - - tmpSignedFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signature") - if err != nil { - t.Fatal(err) - } - // call openssl to sign the content - opensslCMD := exec.Command("openssl", "smime", "-sign", "-nodetach", "-md", "sha1", - "-in", tmpContentFile.Name(), "-out", tmpSignedFile.Name(), - "-signer", tmpSignerCertFile.Name(), "-inkey", tmpSignerKeyFile.Name(), - "-certfile", tmpSignerCertFile.Name(), "-outform", "PEM") - out, err := opensslCMD.CombinedOutput() - if err != nil { - t.Fatalf("openssl command failed with %s: %s", err, out) - } - - // verify the signed content - pemSignature, err := ioutil.ReadFile(tmpSignedFile.Name()) - if err != nil { - t.Fatal(err) - } - t.Logf("%s\n", pemSignature) - derBlock, _ := pem.Decode(pemSignature) - if derBlock == nil { - t.Fatalf("failed to read DER block from signature PEM %s", tmpSignedFile.Name()) - } - p7, err := Parse(derBlock.Bytes) - if err != nil { - t.Fatalf("Parse encountered unexpected error: %v", err) - } - if err := p7.Verify(); err != nil { - t.Fatalf("Verify failed with error: %v", err) - } - os.Remove(tmpSignerCertFile.Name()) // clean up - os.Remove(tmpSignerKeyFile.Name()) // clean up - os.Remove(tmpContentFile.Name()) // clean up -} - -var dsaPrivateKey = []byte(`-----BEGIN PRIVATE KEY----- -MIIBSwIBADCCASwGByqGSM44BAEwggEfAoGBAP1/U4EddRIpUt9KnC7s5Of2EbdS -PO9EAMMeP4C2USZpRV1AIlH7WT2NWPq/xfW6MPbLm1Vs14E7gB00b/JmYLdrmVCl -pJ+f6AR7ECLCT7up1/63xhv4O1fnxqimFQ8E+4P208UewwI1VBNaFpEy9nXzrith -1yrv8iIDGZ3RSAHHAhUAl2BQjxUjC8yykrmCouuEC/BYHPUCgYEA9+GghdabPd7L -vKtcNrhXuXmUr7v6OuqC+VdMCz0HgmdRWVeOutRZT+ZxBxCBgLRJFnEj6EwoFhO3 -zwkyjMim4TwWeotUfI0o4KOuHiuzpnWRbqN/C/ohNWLx+2J6ASQ7zKTxvqhRkImo -g9/hWuWfBpKLZl6Ae1UlZAFMO/7PSSoEFgIUfW4aPdQBn9gJZp2KuNpzgHzvfsE= ------END PRIVATE KEY-----`) - -var dsaPublicCert = []byte(`-----BEGIN CERTIFICATE----- -MIIDOjCCAvWgAwIBAgIEPCY/UDANBglghkgBZQMEAwIFADBsMRAwDgYDVQQGEwdV -bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD -VQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRAwDgYDVQQDEwdVbmtub3du -MB4XDTE4MTAyMjEzNDMwN1oXDTQ2MDMwOTEzNDMwN1owbDEQMA4GA1UEBhMHVW5r -bm93bjEQMA4GA1UECBMHVW5rbm93bjEQMA4GA1UEBxMHVW5rbm93bjEQMA4GA1UE -ChMHVW5rbm93bjEQMA4GA1UECxMHVW5rbm93bjEQMA4GA1UEAxMHVW5rbm93bjCC -AbgwggEsBgcqhkjOOAQBMIIBHwKBgQD9f1OBHXUSKVLfSpwu7OTn9hG3UjzvRADD -Hj+AtlEmaUVdQCJR+1k9jVj6v8X1ujD2y5tVbNeBO4AdNG/yZmC3a5lQpaSfn+gE -exAiwk+7qdf+t8Yb+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1864rYdcq7/Ii -Axmd0UgBxwIVAJdgUI8VIwvMspK5gqLrhAvwWBz1AoGBAPfhoIXWmz3ey7yrXDa4 -V7l5lK+7+jrqgvlXTAs9B4JnUVlXjrrUWU/mcQcQgYC0SRZxI+hMKBYTt88JMozI -puE8FnqLVHyNKOCjrh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk8b6oUZCJqIPf4Vrl -nwaSi2ZegHtVJWQBTDv+z0kqA4GFAAKBgQDCriMPbEVBoRK4SOUeFwg7+VRf4TTp -rcOQC9IVVoCjXzuWEGrp3ZI7YWJSpFnSch4lk29RH8O0HpI/NOzKnOBtnKr782pt -1k/bJVMH9EaLd6MKnAVjrCDMYBB0MhebZ8QHY2elZZCWoqDYAcIDOsEx+m4NLErT -ypPnjS5M0jm1PKMhMB8wHQYDVR0OBBYEFC0Yt5XdM0Kc95IX8NQ8XRssGPx7MA0G -CWCGSAFlAwQDAgUAAzAAMC0CFQCIgQtrZZ9hdZG1ROhR5hc8nYEmbgIUAIlgC688 -qzy/7yePTlhlpj+ahMM= ------END CERTIFICATE-----`) - -type DSATestFixture struct { - Input []byte - Certificate *x509.Certificate -} - -func UnmarshalDSATestFixture(testPEMBlock string) DSATestFixture { - var result DSATestFixture - var derBlock *pem.Block - pemBlock := []byte(testPEMBlock) - for { - derBlock, pemBlock = pem.Decode(pemBlock) - if derBlock == nil { - break - } - switch derBlock.Type { - case "PKCS7": - result.Input = derBlock.Bytes - case "CERTIFICATE": - result.Certificate, _ = x509.ParseCertificate(derBlock.Bytes) - } - } - - return result -} diff --git a/builtin/credential/cert/backend.go b/builtin/credential/cert/backend.go index 6c6c55ca6213..7c9b2ef55edb 100644 --- a/builtin/credential/cert/backend.go +++ b/builtin/credential/cert/backend.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cert @@ -16,11 +16,22 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" + lru "github.com/hashicorp/golang-lru/v2" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/locksutil" "github.com/hashicorp/vault/sdk/helper/ocsp" "github.com/hashicorp/vault/sdk/logical" ) +const ( + operationPrefixCert = "cert" + trustedCertPath = "cert/" + + defaultRoleCacheSize = 200 + defaultOcspMaxRetries = 4 + maxRoleCacheSize = 100000 +) + func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() if err := b.Setup(ctx, conf); err != nil { @@ -30,7 +41,12 @@ func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, } func Backend() *backend { - var b backend + // ignoring the error as it only can occur with <= 0 size + cache, _ := lru.New[string, *trusted](defaultRoleCacheSize) + b := backend{ + trustedCache: cache, + trustedCacheLocks: locksutil.CreateLocks(), + } b.Backend = &framework.Backend{ Help: backendHelp, PathsSpecial: &logical.Paths{ @@ -57,6 +73,13 @@ func Backend() *backend { return &b } +type trusted struct { + pool *x509.CertPool + trusted []*ParsedCert + trustedNonCAs []*ParsedCert + ocspConf *ocsp.VerifyConfig +} + type backend struct { *framework.Backend MapCertId *framework.PathMap @@ -66,6 +89,11 @@ type backend struct { ocspClientMutex sync.RWMutex ocspClient *ocsp.Client configUpdated atomic.Bool + + trustedCache *lru.Cache[string, *trusted] + trustedCacheDisabled atomic.Bool + trustedCacheLocks []*locksutil.LockEntry + trustedCacheFull atomic.Pointer[trusted] } func (b *backend) initialize(ctx context.Context, req *logical.InitializationRequest) error { @@ -96,6 +124,7 @@ func (b *backend) invalidate(_ context.Context, key string) { case key == "config": b.configUpdated.Store(true) } + b.flushTrustedCache() } func (b *backend) initOCSPClient(cacheSize int) { @@ -107,9 +136,21 @@ func (b *backend) initOCSPClient(cacheSize int) { func (b *backend) updatedConfig(config *config) { b.ocspClientMutex.Lock() defer b.ocspClientMutex.Unlock() + + switch { + case config.RoleCacheSize < 0: + // Just to clean up memory + b.trustedCacheDisabled.Store(true) + b.flushTrustedCache() + case config.RoleCacheSize == 0: + config.RoleCacheSize = defaultRoleCacheSize + fallthrough + default: + b.trustedCache.Resize(config.RoleCacheSize) + b.trustedCacheDisabled.Store(false) + } b.initOCSPClient(config.OcspCacheSize) b.configUpdated.Store(false) - return } func (b *backend) fetchCRL(ctx context.Context, storage logical.Storage, name string, crl *CRLInfo) error { @@ -159,6 +200,13 @@ func (b *backend) storeConfig(ctx context.Context, storage logical.Storage, conf return nil } +func (b *backend) flushTrustedCache() { + if b.trustedCache != nil { // defensive + b.trustedCache.Purge() + } + b.trustedCacheFull.Store(nil) +} + const backendHelp = ` The "cert" credential provider allows authentication using TLS client certificates. A client connects to Vault and uses diff --git a/builtin/credential/cert/backend_test.go b/builtin/credential/cert/backend_test.go index 6f2b79a9aaa0..e4affa3b5296 100644 --- a/builtin/credential/cert/backend_test.go +++ b/builtin/credential/cert/backend_test.go @@ -1,10 +1,11 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cert import ( "context" + "crypto" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" @@ -20,6 +21,7 @@ import ( mathrand "math/rand" "net" "net/http" + "net/http/httptest" "net/url" "os" "path/filepath" @@ -28,24 +30,22 @@ import ( "time" "github.com/go-test/deep" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-rootcerts" "github.com/hashicorp/go-sockaddr" - - "golang.org/x/net/http2" - - cleanhttp "github.com/hashicorp/go-cleanhttp" - log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" - vaulthttp "github.com/hashicorp/vault/http" - - rootcerts "github.com/hashicorp/go-rootcerts" "github.com/hashicorp/vault/builtin/logical/pki" logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/tokenutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/require" + "golang.org/x/crypto/ocsp" + "golang.org/x/net/http2" ) const ( @@ -253,9 +253,6 @@ func connectionState(serverCAPath, serverCertPath, serverKeyPath, clientCertPath func TestBackend_PermittedDNSDomainsIntermediateCA(t *testing.T) { // Enable PKI secret engine and Cert auth method coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: log.NewNullLogger(), CredentialBackends: map[string]logical.Factory{ "cert": Factory, }, @@ -479,9 +476,6 @@ func TestBackend_PermittedDNSDomainsIntermediateCA(t *testing.T) { func TestBackend_MetadataBasedACLPolicy(t *testing.T) { // Start cluster with cert auth method enabled coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: log.NewNullLogger(), CredentialBackends: map[string]logical.Factory{ "cert": Factory, }, @@ -849,7 +843,7 @@ func TestBackend_NonCAExpiry(t *testing.T) { time.Sleep(5 * time.Second) // Login attempt after certificate expiry should fail - resp, err = b.HandleRequest(context.Background(), loginReq) + _, err = b.HandleRequest(context.Background(), loginReq) if err == nil { t.Fatalf("expected error due to expired certificate") } @@ -1306,6 +1300,12 @@ func TestBackend_ext_singleCert(t *testing.T) { testAccStepLoginInvalid(t, connState), testAccStepCert(t, "web", ca, "foo", allowed{names: "invalid", ext: "2.1.1.1:*,2.1.1.2:The Wrong Value"}, false), testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com", ext: "hex:2.5.29.17:*87047F000002*"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com", ext: "hex:2.5.29.17:*87047F000001*"}, false), + testAccStepLogin(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com", ext: "2.5.29.17:"}, false), + testAccStepLogin(t, connState), testAccStepReadConfig(t, config{EnableIdentityAliasMetadata: false}, connState), testAccStepCert(t, "web", ca, "foo", allowed{metadata_ext: "2.1.1.1,1.2.3.45"}, false), testAccStepLoginWithMetadata(t, connState, "web", map[string]string{"2-1-1-1": "A UTF8String Extension"}, false), @@ -1968,6 +1968,27 @@ func testAccStepCertWithExtraParams(t *testing.T, name string, cert []byte, poli } } +func testAccStepReadCertPolicy(t *testing.T, name string, expectError bool, expected map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "certs/" + name, + ErrorOk: expectError, + Data: nil, + Check: func(resp *logical.Response) error { + if (resp == nil || len(resp.Data) == 0) && expectError { + return fmt.Errorf("expected error but received nil") + } + for key, expectedValue := range expected { + actualValue := resp.Data[key] + if expectedValue != actualValue { + return fmt.Errorf("Expected to get [%v]=[%v] but read [%v]=[%v] from server for certs/%v: %v", key, expectedValue, key, actualValue, name, resp) + } + } + return nil + }, + } +} + func testAccStepCertLease( t *testing.T, name string, cert []byte, policies string, ) logicaltest.TestStep { @@ -2040,6 +2061,11 @@ func testConnState(certPath, keyPath, rootCertPath string) (tls.ConnectionState, if err != nil { return tls.ConnectionState{}, err } + + return testConnStateWithCert(cert, rootCAs) +} + +func testConnStateWithCert(cert tls.Certificate, rootCAs *x509.CertPool) (tls.ConnectionState, error) { listenConf := &tls.Config{ Certificates: []tls.Certificate{cert}, ClientAuth: tls.RequestClientCert, @@ -2152,12 +2178,13 @@ func Test_Renew(t *testing.T) { Raw: map[string]interface{}{ "name": "test", "certificate": ca, - "policies": "foo,bar", + // Uppercase B should not cause an issue during renewal + "token_policies": "foo,Bar", }, Schema: pathCerts(b).Fields, } - resp, err := b.pathCertWrite(context.Background(), req, fd) + _, err = b.pathCertWrite(context.Background(), req, fd) if err != nil { t.Fatal(err) } @@ -2166,7 +2193,7 @@ func Test_Renew(t *testing.T) { Raw: map[string]interface{}{}, Schema: pathLogin(b).Fields, } - resp, err = b.pathLogin(context.Background(), req, empty_login_fd) + resp, err := b.pathLogin(context.Background(), req, empty_login_fd) if err != nil { t.Fatal(err) } @@ -2193,20 +2220,20 @@ func Test_Renew(t *testing.T) { } // Change the policies -- this should fail - fd.Raw["policies"] = "zip,zap" - resp, err = b.pathCertWrite(context.Background(), req, fd) + fd.Raw["token_policies"] = "zip,zap" + _, err = b.pathCertWrite(context.Background(), req, fd) if err != nil { t.Fatal(err) } - resp, err = b.pathLoginRenew(context.Background(), req, empty_login_fd) + _, err = b.pathLoginRenew(context.Background(), req, empty_login_fd) if err == nil { t.Fatal("expected error") } // Put the policies back, this should be okay - fd.Raw["policies"] = "bar,foo" - resp, err = b.pathCertWrite(context.Background(), req, fd) + fd.Raw["token_policies"] = "bar,foo" + _, err = b.pathCertWrite(context.Background(), req, fd) if err != nil { t.Fatal(err) } @@ -2225,7 +2252,7 @@ func Test_Renew(t *testing.T) { // Add period value to cert entry period := 350 * time.Second fd.Raw["period"] = period.String() - resp, err = b.pathCertWrite(context.Background(), req, fd) + _, err = b.pathCertWrite(context.Background(), req, fd) if err != nil { t.Fatal(err) } @@ -2246,7 +2273,7 @@ func Test_Renew(t *testing.T) { } // Delete CA, make sure we can't renew - resp, err = b.pathCertDelete(context.Background(), req, fd) + _, err = b.pathCertDelete(context.Background(), req, fd) if err != nil { t.Fatal(err) } @@ -2319,3 +2346,596 @@ func TestBackend_CertUpgrade(t *testing.T) { t.Fatal(diff) } } + +// TestOCSPFailOpenWithBadIssuer validates we fail all different types of cert auth +// login scenarios if we encounter an OCSP verification error +func TestOCSPFailOpenWithBadIssuer(t *testing.T) { + caFile := "test-fixtures/root/rootcacert.pem" + pemCa, err := os.ReadFile(caFile) + require.NoError(t, err, "failed reading in file %s", caFile) + caTLS := loadCerts(t, caFile, "test-fixtures/root/rootcakey.pem") + leafTLS := loadCerts(t, "test-fixtures/keys/cert.pem", "test-fixtures/keys/key.pem") + + rootConfig := &rootcerts.Config{ + CAFile: caFile, + } + rootCAs, err := rootcerts.LoadCACerts(rootConfig) + connState, err := testConnStateWithCert(leafTLS, rootCAs) + require.NoError(t, err, "error testing connection state: %v", err) + + badCa, badCaKey := createCa(t) + + // Setup an OCSP handler + ocspHandler := func(ca *x509.Certificate, caKey crypto.Signer) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + ocspRes := ocsp.Response{ + SerialNumber: leafTLS.Leaf.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(30 * time.Minute), + Status: ocsp.Good, + } + response, err := ocsp.CreateResponse(ca, ca, ocspRes, caKey) + if err != nil { + t.Fatalf("failed generating OCSP response: %v", err) + } + _, _ = w.Write(response) + }) + } + goodTs := httptest.NewServer(ocspHandler(caTLS.Leaf, caTLS.PrivateKey.(crypto.Signer))) + badTs := httptest.NewServer(ocspHandler(badCa, badCaKey)) + defer goodTs.Close() + defer badTs.Close() + + steps := []logicaltest.TestStep{ + // step 1/2: This should fail as we get a response from a bad root, even with ocsp_fail_open is set to true + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{badTs.URL}, + "ocsp_query_all_servers": false, + "ocsp_fail_open": true, + }), + testAccStepLoginInvalid(t, connState), + // step 3/4: This should fail as we query all the servers which will get a response with an invalid signature + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{goodTs.URL, badTs.URL}, + "ocsp_query_all_servers": true, + "ocsp_fail_open": true, + }), + testAccStepLoginInvalid(t, connState), + // step 5/6: This should fail as we will query the OCSP server with the bad root key first. + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{badTs.URL, goodTs.URL}, + "ocsp_query_all_servers": false, + "ocsp_fail_open": true, + }), + testAccStepLoginInvalid(t, connState), + // step 7/8: This should pass as we will only query the first server with the valid root signature + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{goodTs.URL, badTs.URL}, + "ocsp_query_all_servers": false, + "ocsp_fail_open": true, + }), + testAccStepLogin(t, connState), + } + + // Setup a new factory everytime to avoid OCSP caching from influencing the test + for i := 0; i < len(steps); i += 2 { + setup := i + execute := i + 1 + t.Run(fmt.Sprintf("steps-%d-%d", setup+1, execute+1), func(t *testing.T) { + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{steps[setup], steps[execute]}, + }) + }) + } +} + +// TestOCSPWithMixedValidResponses validates the expected behavior of multiple OCSP servers configured, +// with and without ocsp_query_all_servers enabled or disabled. +func TestOCSPWithMixedValidResponses(t *testing.T) { + caFile := "test-fixtures/root/rootcacert.pem" + pemCa, err := os.ReadFile(caFile) + require.NoError(t, err, "failed reading in file %s", caFile) + caTLS := loadCerts(t, caFile, "test-fixtures/root/rootcakey.pem") + leafTLS := loadCerts(t, "test-fixtures/keys/cert.pem", "test-fixtures/keys/key.pem") + + rootConfig := &rootcerts.Config{ + CAFile: caFile, + } + rootCAs, err := rootcerts.LoadCACerts(rootConfig) + connState, err := testConnStateWithCert(leafTLS, rootCAs) + require.NoError(t, err, "error testing connection state: %v", err) + + // Setup an OCSP handler + ocspHandler := func(status int) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + ocspRes := ocsp.Response{ + SerialNumber: leafTLS.Leaf.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(30 * time.Minute), + Status: status, + } + response, err := ocsp.CreateResponse(caTLS.Leaf, caTLS.Leaf, ocspRes, caTLS.PrivateKey.(crypto.Signer)) + if err != nil { + t.Fatalf("failed generating OCSP response: %v", err) + } + _, _ = w.Write(response) + }) + } + goodTs := httptest.NewServer(ocspHandler(ocsp.Good)) + revokeTs := httptest.NewServer(ocspHandler(ocsp.Revoked)) + defer goodTs.Close() + defer revokeTs.Close() + + steps := []logicaltest.TestStep{ + // step 1/2: This should pass as we will query the first server and get a valid good response, not testing + // the second configured server + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{goodTs.URL, revokeTs.URL}, + "ocsp_query_all_servers": false, + }), + testAccStepLogin(t, connState), + // step 3/4: This should fail as we will query the revoking OCSP server first and get a revoke response + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{revokeTs.URL, goodTs.URL}, + "ocsp_query_all_servers": false, + }), + testAccStepLoginInvalid(t, connState), + // step 5/6: This should fail as we will query all the OCSP servers and prefer the revoke response + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", + allowed{names: "cert.example.com"}, false, map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{goodTs.URL, revokeTs.URL}, + "ocsp_query_all_servers": true, + }), + testAccStepLoginInvalid(t, connState), + } + + // Setup a new factory everytime to avoid OCSP caching from influencing the test + for i := 0; i < len(steps); i += 2 { + setup := i + execute := i + 1 + t.Run(fmt.Sprintf("steps-%d-%d", setup+1, execute+1), func(t *testing.T) { + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{steps[setup], steps[execute]}, + }) + }) + } +} + +// TestOCSPFailOpenWithGoodResponse validates the expected behavior with multiple OCSP servers configured +// one that returns a Good response the other is not available, along with the ocsp_fail_open in multiple modes +func TestOCSPFailOpenWithGoodResponse(t *testing.T) { + caFile := "test-fixtures/root/rootcacert.pem" + pemCa, err := os.ReadFile(caFile) + require.NoError(t, err, "failed reading in file %s", caFile) + caTLS := loadCerts(t, caFile, "test-fixtures/root/rootcakey.pem") + leafTLS := loadCerts(t, "test-fixtures/keys/cert.pem", "test-fixtures/keys/key.pem") + + rootConfig := &rootcerts.Config{ + CAFile: caFile, + } + rootCAs, err := rootcerts.LoadCACerts(rootConfig) + connState, err := testConnStateWithCert(leafTLS, rootCAs) + require.NoError(t, err, "error testing connection state: %v", err) + + // Setup an OCSP handler + ocspHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + ocspRes := ocsp.Response{ + SerialNumber: leafTLS.Leaf.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(30 * time.Minute), + Status: ocsp.Good, + } + response, err := ocsp.CreateResponse(caTLS.Leaf, caTLS.Leaf, ocspRes, caTLS.PrivateKey.(crypto.Signer)) + if err != nil { + t.Fatalf("failed generating OCSP response: %v", err) + } + _, _ = w.Write(response) + }) + ts := httptest.NewServer(ocspHandler) + defer ts.Close() + + steps := []logicaltest.TestStep{ + // Step 1/2 With no proper responses from any OCSP server and fail_open to true, we should pass validation + // as fail_open is true + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{"http://127.0.0.1:30000", "http://127.0.0.1:30001"}, + "ocsp_fail_open": true, + "ocsp_query_all_servers": false, + "ocsp_max_retries": 0, + }), + testAccStepLogin(t, connState), + // Step 3/4 With no proper responses from any OCSP server and fail_open to false we should fail validation + // as fail_open is false + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", + allowed{names: "cert.example.com"}, false, map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{"http://127.0.0.1:30000", "http://127.0.0.1:30001"}, + "ocsp_fail_open": false, + "ocsp_query_all_servers": false, + "ocsp_max_retries": 0, + }), + testAccStepLoginInvalid(t, connState), + // Step 5/6 With a single positive response, query all servers set to false and fail open true, pass validation + // as query all servers is false + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": true, + "ocsp_query_all_servers": false, + "ocsp_max_retries": 0, + }), + testAccStepLogin(t, connState), + // Step 7/8 With a single positive response, query all servers set to false and fail open false, pass validation + // as query all servers is false + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", + allowed{names: "cert.example.com"}, false, map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": false, + "ocsp_query_all_servers": false, + "ocsp_max_retries": 0, + }), + testAccStepLogin(t, connState), + // Step 9/10 With a single positive response, query all servers set to true and fail open true, pass validation + // as fail open is true + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": true, + "ocsp_query_all_servers": true, + "ocsp_max_retries": 0, + }), + testAccStepLogin(t, connState), + // Step 11/12 With a single positive response, query all servers set to true and fail open false, fail validation + // as not all servers agree + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", + allowed{names: "cert.example.com"}, false, map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": false, + "ocsp_query_all_servers": true, + "ocsp_max_retries": 0, + }), + testAccStepLoginInvalid(t, connState), + } + + // Setup a new factory everytime to avoid OCSP caching from influencing the test + for i := 0; i < len(steps); i += 2 { + setup := i + execute := i + 1 + t.Run(fmt.Sprintf("steps-%d-%d", setup+1, execute+1), func(t *testing.T) { + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{steps[setup], steps[execute]}, + }) + }) + } +} + +// TestOCSPFailOpenWithRevokeResponse validates the expected behavior with multiple OCSP servers configured +// one that returns a Revoke response the other is not available, along with the ocsp_fail_open in multiple modes +func TestOCSPFailOpenWithRevokeResponse(t *testing.T) { + caFile := "test-fixtures/root/rootcacert.pem" + pemCa, err := os.ReadFile(caFile) + require.NoError(t, err, "failed reading in file %s", caFile) + caTLS := loadCerts(t, caFile, "test-fixtures/root/rootcakey.pem") + leafTLS := loadCerts(t, "test-fixtures/keys/cert.pem", "test-fixtures/keys/key.pem") + + rootConfig := &rootcerts.Config{ + CAFile: caFile, + } + rootCAs, err := rootcerts.LoadCACerts(rootConfig) + connState, err := testConnStateWithCert(leafTLS, rootCAs) + require.NoError(t, err, "error testing connection state: %v", err) + + // Setup an OCSP handler + ocspHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + ocspRes := ocsp.Response{ + SerialNumber: leafTLS.Leaf.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(30 * time.Minute), + Status: ocsp.Revoked, + } + response, err := ocsp.CreateResponse(caTLS.Leaf, caTLS.Leaf, ocspRes, caTLS.PrivateKey.(crypto.Signer)) + if err != nil { + t.Fatalf("failed generating OCSP response: %v", err) + } + _, _ = w.Write(response) + }) + ts := httptest.NewServer(ocspHandler) + defer ts.Close() + + // With no OCSP servers available, make sure that we behave as we expect + steps := []logicaltest.TestStep{ + // Step 1/2 With a single revoke response, query all servers set to false and fail open true, fail validation + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": true, + "ocsp_query_all_servers": false, + "ocsp_max_retries": 0, + }), + testAccStepLoginInvalid(t, connState), + // Step 3/4 With a single revoke response, query all servers set to false and fail open false, fail validation + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", + allowed{names: "cert.example.com"}, false, map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": false, + "ocsp_query_all_servers": false, + "ocsp_max_retries": 0, + }), + testAccStepLoginInvalid(t, connState), + // Step 5/6 With a single revoke response, query all servers set to true and fail open false, fail validation + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", + allowed{names: "cert.example.com"}, false, map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": false, + "ocsp_query_all_servers": true, + "ocsp_max_retries": 0, + }), + testAccStepLoginInvalid(t, connState), + // Step 7/8 With a single revoke response, query all servers set to true and fail open true, fail validation + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": true, + "ocsp_query_all_servers": true, + "ocsp_max_retries": 0, + }), + testAccStepLoginInvalid(t, connState), + } + + // Setup a new factory everytime to avoid OCSP caching from influencing the test + for i := 0; i < len(steps); i += 2 { + setup := i + execute := i + 1 + t.Run(fmt.Sprintf("steps-%d-%d", setup+1, execute+1), func(t *testing.T) { + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{steps[setup], steps[execute]}, + }) + }) + } +} + +// TestOCSPFailOpenWithUnknownResponse validates the expected behavior with multiple OCSP servers configured +// one that returns an Unknown response the other is not available, along with the ocsp_fail_open in multiple modes +func TestOCSPFailOpenWithUnknownResponse(t *testing.T) { + caFile := "test-fixtures/root/rootcacert.pem" + pemCa, err := os.ReadFile(caFile) + require.NoError(t, err, "failed reading in file %s", caFile) + caTLS := loadCerts(t, caFile, "test-fixtures/root/rootcakey.pem") + leafTLS := loadCerts(t, "test-fixtures/keys/cert.pem", "test-fixtures/keys/key.pem") + + rootConfig := &rootcerts.Config{ + CAFile: caFile, + } + rootCAs, err := rootcerts.LoadCACerts(rootConfig) + connState, err := testConnStateWithCert(leafTLS, rootCAs) + require.NoError(t, err, "error testing connection state: %v", err) + + // Setup an OCSP handler + ocspHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + ocspRes := ocsp.Response{ + SerialNumber: leafTLS.Leaf.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(30 * time.Minute), + Status: ocsp.Unknown, + } + response, err := ocsp.CreateResponse(caTLS.Leaf, caTLS.Leaf, ocspRes, caTLS.PrivateKey.(crypto.Signer)) + if err != nil { + t.Fatalf("failed generating OCSP response: %v", err) + } + _, _ = w.Write(response) + }) + ts := httptest.NewServer(ocspHandler) + defer ts.Close() + + // With no OCSP servers available, make sure that we behave as we expect + steps := []logicaltest.TestStep{ + // Step 1/2 With a single unknown response, query all servers set to false and fail open true, pass validation + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": true, + "ocsp_query_all_servers": false, + "ocsp_max_retries": 0, + }), + testAccStepLogin(t, connState), + // Step 3/4 With a single unknown response, query all servers set to false and fail open false, fail validation + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", + allowed{names: "cert.example.com"}, false, map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": false, + "ocsp_query_all_servers": false, + "ocsp_max_retries": 0, + }), + testAccStepLoginInvalid(t, connState), + // Step 5/6 With a single unknown response, query all servers set to true and fail open true, fail validation + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": true, + "ocsp_query_all_servers": true, + "ocsp_max_retries": 0, + }), + testAccStepLogin(t, connState), + // Step 7/8 With a single unknown response, query all servers set to true and fail open false, fail validation + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", + allowed{names: "cert.example.com"}, false, map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": false, + "ocsp_query_all_servers": true, + "ocsp_max_retries": 0, + }), + testAccStepLoginInvalid(t, connState), + } + + // Setup a new factory everytime to avoid OCSP caching from influencing the test + for i := 0; i < len(steps); i += 2 { + setup := i + execute := i + 1 + t.Run(fmt.Sprintf("steps-%d-%d", setup+1, execute+1), func(t *testing.T) { + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{steps[setup], steps[execute]}, + }) + }) + } +} + +// TestOcspMaxRetriesUpdate verifies that the ocsp_max_retries field is properly initialized +// with our default value of 4, legacy roles have it initialized automatically to 4 and we +// can properly store and retrieve updates to the field. +func TestOcspMaxRetriesUpdate(t *testing.T) { + storage := &logical.InmemStorage{} + ctx := context.Background() + + lb, err := Factory(context.Background(), &logical.BackendConfig{ + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: 300 * time.Second, + MaxLeaseTTLVal: 1800 * time.Second, + }, + StorageView: storage, + }) + require.NoError(t, err, "failed creating backend") + + caFile := "test-fixtures/root/rootcacert.pem" + pemCa, err := os.ReadFile(caFile) + require.NoError(t, err, "failed reading in file %s", caFile) + + data := map[string]interface{}{ + "certificate": string(pemCa), + "display_name": "test", + } + + // Test initial creation of role sets ocsp_max_retries to a default of 4 + _, err = lb.HandleRequest(ctx, &logical.Request{ + Operation: logical.UpdateOperation, + Path: "certs/test", + Data: data, + Storage: storage, + }) + require.NoError(t, err, "failed initial role creation request") + + resp, err := lb.HandleRequest(ctx, &logical.Request{ + Operation: logical.ReadOperation, + Path: "certs/test", + Storage: storage, + }) + require.NoError(t, err, "failed reading role request") + require.NotNil(t, resp) + require.Equal(t, 4, resp.Data["ocsp_max_retries"], "ocsp config didn't match expectations") + + // Test we can update the field and read it back + data["ocsp_max_retries"] = 1 + _, err = lb.HandleRequest(ctx, &logical.Request{ + Operation: logical.UpdateOperation, + Path: "certs/test", + Data: data, + Storage: storage, + }) + require.NoError(t, err, "failed updating role request") + + resp, err = lb.HandleRequest(ctx, &logical.Request{ + Operation: logical.ReadOperation, + Path: "certs/test", + Storage: storage, + }) + require.NoError(t, err, "failed reading role request") + require.NotNil(t, resp) + require.Equal(t, 1, resp.Data["ocsp_max_retries"], "ocsp config didn't match expectations on update") + + // Verify existing storage entries get updated with a value of 4 + entry := &logical.StorageEntry{ + Key: "cert/legacy", + Value: []byte(`{"token_bound_cidrs":null,"token_explicit_max_ttl":0,"token_max_ttl":0, + "token_no_default_policy":false,"token_num_uses":0,"token_period":0, + "token_policies":null,"token_type":0,"token_ttl":0,"Name":"test", + "Certificate":"-----BEGIN CERTIFICATE-----\nMIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL\nBQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw\nMjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN\nAQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7\nQ7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0\nz2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x\nAHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb\n6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH\nSWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G\nA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx\n7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc\nBgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA\nwHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2\nU946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa\ncNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N\nScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ\nt2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk\nzehNe5dFTjFpylg1o6b8Ow==\n-----END CERTIFICATE-----\n", + "DisplayName":"test","Policies":null,"TTL":0,"MaxTTL":0,"Period":0, + "AllowedNames":null,"AllowedCommonNames":null,"AllowedDNSSANs":null, + "AllowedEmailSANs":null,"AllowedURISANs":null,"AllowedOrganizationalUnits":null, + "RequiredExtensions":null,"AllowedMetadataExtensions":null,"BoundCIDRs":null, + "OcspCaCertificates":"","OcspEnabled":false,"OcspServersOverride":null, + "OcspFailOpen":false,"OcspQueryAllServers":false}`), + } + err = storage.Put(ctx, entry) + require.NoError(t, err, "failed putting legacy storage entry") + + resp, err = lb.HandleRequest(ctx, &logical.Request{ + Operation: logical.ReadOperation, + Path: "certs/legacy", + Storage: storage, + }) + require.NoError(t, err, "failed reading role request") + require.NotNil(t, resp) + require.Equal(t, 4, resp.Data["ocsp_max_retries"], "ocsp config didn't match expectations on legacy entry") +} + +func loadCerts(t *testing.T, certFile, certKey string) tls.Certificate { + caTLS, err := tls.LoadX509KeyPair(certFile, certKey) + require.NoError(t, err, "failed reading ca/key files") + + caTLS.Leaf, err = x509.ParseCertificate(caTLS.Certificate[0]) + require.NoError(t, err, "failed parsing certificate from file %s", certFile) + + return caTLS +} + +func createCa(t *testing.T) (*x509.Certificate, *ecdsa.PrivateKey) { + rootCaKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated root key for CA") + + // Validate we reject CSRs that contain CN that aren't in the original order + cr := &x509.Certificate{ + Subject: pkix.Name{CommonName: "Root Cert"}, + SerialNumber: big.NewInt(1), + IsCA: true, + BasicConstraintsValid: true, + SignatureAlgorithm: x509.ECDSAWithSHA256, + NotBefore: time.Now().Add(-1 * time.Second), + NotAfter: time.Now().AddDate(1, 0, 0), + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageOCSPSigning}, + } + rootCaBytes, err := x509.CreateCertificate(rand.Reader, cr, cr, &rootCaKey.PublicKey, rootCaKey) + require.NoError(t, err, "failed generating root ca") + + rootCa, err := x509.ParseCertificate(rootCaBytes) + require.NoError(t, err, "failed parsing root ca") + + return rootCa, rootCaKey +} diff --git a/builtin/credential/cert/cli.go b/builtin/credential/cert/cli.go index 3ba1e712d134..2e7a8b8e22e3 100644 --- a/builtin/credential/cert/cli.go +++ b/builtin/credential/cert/cli.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cert diff --git a/builtin/credential/cert/cmd/cert/main.go b/builtin/credential/cert/cmd/cert/main.go index 5b80a54cde77..45eb75d36caf 100644 --- a/builtin/credential/cert/cmd/cert/main.go +++ b/builtin/credential/cert/cmd/cert/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/builtin/credential/cert/path_certs.go b/builtin/credential/cert/path_certs.go index d2419d9a1908..19963827c555 100644 --- a/builtin/credential/cert/path_certs.go +++ b/builtin/credential/cert/path_certs.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cert @@ -10,8 +10,8 @@ import ( "strings" "time" + "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-sockaddr" - "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/tokenutil" "github.com/hashicorp/vault/sdk/logical" @@ -21,22 +21,33 @@ func pathListCerts(b *backend) *framework.Path { return &framework.Path{ Pattern: "certs/?", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixCert, + OperationSuffix: "certificates", + Navigation: true, + ItemType: "Certificate", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathCertList, }, HelpSynopsis: pathCertHelpSyn, HelpDescription: pathCertHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Navigation: true, - ItemType: "Certificate", - }, } } func pathCerts(b *backend) *framework.Path { p := &framework.Path{ Pattern: "certs/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixCert, + OperationSuffix: "certificate", + Action: "Create", + ItemType: "Certificate", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -66,6 +77,9 @@ Must be x509 PEM encoded.`, Type: framework.TypeCommaStringSlice, Description: `A comma-separated list of OCSP server addresses. If unset, the OCSP server is determined from the AuthorityInformationAccess extension on the certificate being inspected.`, + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of OCSP server addresses. If unset, the OCSP server is determined from the AuthorityInformationAccess extension on the certificate being inspected.", + }, }, "ocsp_fail_open": { Type: framework.TypeBool, @@ -77,6 +91,16 @@ from the AuthorityInformationAccess extension on the certificate being inspected Default: false, Description: "If set to true, rather than accepting the first successful OCSP response, query all servers and consider the certificate valid only if all servers agree.", }, + "ocsp_this_update_max_age": { + Type: framework.TypeDurationSecond, + Default: 0, + Description: "If greater than 0, specifies the maximum age of an OCSP thisUpdate field to avoid accepting old responses without a nextUpdate field.", + }, + "ocsp_max_retries": { + Type: framework.TypeInt, + Default: 4, + Description: "The number of retries the OCSP client should attempt per query.", + }, "allowed_names": { Type: framework.TypeCommaStringSlice, Description: `A comma-separated list of names. @@ -84,7 +108,8 @@ At least one must exist in either the Common Name or SANs. Supports globbing. This parameter is deprecated, please use allowed_common_names, allowed_dns_sans, allowed_email_sans, allowed_uri_sans.`, DisplayAttrs: &framework.DisplayAttributes{ - Group: "Constraints", + Group: "Constraints", + Description: "A list of names. At least one must exist in either the Common Name or SANs. Supports globbing. This parameter is deprecated, please use allowed_common_names, allowed_dns_sans, allowed_email_sans, allowed_uri_sans.", }, }, @@ -93,7 +118,8 @@ allowed_email_sans, allowed_uri_sans.`, Description: `A comma-separated list of names. At least one must exist in the Common Name. Supports globbing.`, DisplayAttrs: &framework.DisplayAttributes{ - Group: "Constraints", + Group: "Constraints", + Description: "A list of names. At least one must exist in the Common Name. Supports globbing.", }, }, @@ -102,8 +128,9 @@ At least one must exist in the Common Name. Supports globbing.`, Description: `A comma-separated list of DNS names. At least one must exist in the SANs. Supports globbing.`, DisplayAttrs: &framework.DisplayAttributes{ - Name: "Allowed DNS SANs", - Group: "Constraints", + Name: "Allowed DNS SANs", + Group: "Constraints", + Description: "A list of DNS names. At least one must exist in the SANs. Supports globbing.", }, }, @@ -112,8 +139,9 @@ At least one must exist in the SANs. Supports globbing.`, Description: `A comma-separated list of Email Addresses. At least one must exist in the SANs. Supports globbing.`, DisplayAttrs: &framework.DisplayAttributes{ - Name: "Allowed Email SANs", - Group: "Constraints", + Name: "Allowed Email SANs", + Group: "Constraints", + Description: "A list of Email Addresses. At least one must exist in the SANs. Supports globbing.", }, }, @@ -122,8 +150,9 @@ At least one must exist in the SANs. Supports globbing.`, Description: `A comma-separated list of URIs. At least one must exist in the SANs. Supports globbing.`, DisplayAttrs: &framework.DisplayAttributes{ - Name: "Allowed URI SANs", - Group: "Constraints", + Name: "Allowed URI SANs", + Group: "Constraints", + Description: "A list of URIs. At least one must exist in the SANs. Supports globbing.", }, }, @@ -132,7 +161,8 @@ At least one must exist in the SANs. Supports globbing.`, Description: `A comma-separated list of Organizational Units names. At least one must exist in the OU field.`, DisplayAttrs: &framework.DisplayAttributes{ - Group: "Constraints", + Group: "Constraints", + Description: "A list of Organizational Units names. At least one must exist in the OU field.", }, }, @@ -141,6 +171,9 @@ At least one must exist in the OU field.`, Description: `A comma-separated string or array of extensions formatted as "oid:value". Expects the extension value to be some type of ASN1 encoded string. All values much match. Supports globbing on "value".`, + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of extensions formatted as 'oid:value'. Expects the extension value to be some type of ASN1 encoded string. All values much match. Supports globbing on 'value'.", + }, }, "allowed_metadata_extensions": { @@ -149,6 +182,9 @@ All values much match. Supports globbing on "value".`, Upon successful authentication, these extensions will be added as metadata if they are present in the certificate. The metadata key will be the string consisting of the oid numbers separated by a dash (-) instead of a dot (.) to allow usage in ACL templates.`, + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of OID extensions. Upon successful authentication, these extensions will be added as metadata if they are present in the certificate. The metadata key will be the string consisting of the OID numbers separated by a dash (-) instead of a dot (.) to allow usage in ACL templates.", + }, }, "display_name": { @@ -202,10 +238,6 @@ certificate.`, HelpSynopsis: pathCertHelpSyn, HelpDescription: pathCertHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Action: "Create", - ItemType: "Certificate", - }, } tokenutil.AddTokenFields(p.Fields) @@ -213,7 +245,7 @@ certificate.`, } func (b *backend) Cert(ctx context.Context, s logical.Storage, n string) (*CertEntry, error) { - entry, err := s.Get(ctx, "cert/"+strings.ToLower(n)) + entry, err := s.Get(ctx, trustedCertPath+strings.ToLower(n)) if err != nil { return nil, err } @@ -221,7 +253,7 @@ func (b *backend) Cert(ctx context.Context, s logical.Storage, n string) (*CertE return nil, nil } - var result CertEntry + result := CertEntry{OcspMaxRetries: defaultOcspMaxRetries} // Specify our defaults if the key is missing if err := entry.DecodeJSON(&result); err != nil { return nil, err } @@ -246,7 +278,8 @@ func (b *backend) Cert(ctx context.Context, s logical.Storage, n string) (*CertE } func (b *backend) pathCertDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - err := req.Storage.Delete(ctx, "cert/"+strings.ToLower(d.Get("name").(string))) + defer b.flushTrustedCache() + err := req.Storage.Delete(ctx, trustedCertPath+strings.ToLower(d.Get("name").(string))) if err != nil { return nil, err } @@ -254,7 +287,7 @@ func (b *backend) pathCertDelete(ctx context.Context, req *logical.Request, d *f } func (b *backend) pathCertList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - certs, err := req.Storage.List(ctx, "cert/") + certs, err := req.Storage.List(ctx, trustedCertPath) if err != nil { return nil, err } @@ -281,6 +314,13 @@ func (b *backend) pathCertRead(ctx context.Context, req *logical.Request, d *fra "allowed_organizational_units": cert.AllowedOrganizationalUnits, "required_extensions": cert.RequiredExtensions, "allowed_metadata_extensions": cert.AllowedMetadataExtensions, + "ocsp_ca_certificates": cert.OcspCaCertificates, + "ocsp_enabled": cert.OcspEnabled, + "ocsp_servers_override": cert.OcspServersOverride, + "ocsp_fail_open": cert.OcspFailOpen, + "ocsp_query_all_servers": cert.OcspQueryAllServers, + "ocsp_this_update_max_age": int64(cert.OcspThisUpdateMaxAge.Seconds()), + "ocsp_max_retries": cert.OcspMaxRetries, } cert.PopulateTokenData(data) @@ -306,6 +346,7 @@ func (b *backend) pathCertRead(ctx context.Context, req *logical.Request, d *fra } func (b *backend) pathCertWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + defer b.flushTrustedCache() name := strings.ToLower(d.Get("name").(string)) cert, err := b.Cert(ctx, req.Storage, name) @@ -315,7 +356,8 @@ func (b *backend) pathCertWrite(ctx context.Context, req *logical.Request, d *fr if cert == nil { cert = &CertEntry{ - Name: name, + Name: name, + OcspMaxRetries: defaultOcspMaxRetries, } } @@ -338,6 +380,19 @@ func (b *backend) pathCertWrite(ctx context.Context, req *logical.Request, d *fr if ocspQueryAll, ok := d.GetOk("ocsp_query_all_servers"); ok { cert.OcspQueryAllServers = ocspQueryAll.(bool) } + if ocspThisUpdateMaxAge, ok := d.GetOk("ocsp_this_update_max_age"); ok { + maxAgeDuration, err := parseutil.ParseDurationSecond(ocspThisUpdateMaxAge) + if err != nil { + return nil, fmt.Errorf("failed to parse ocsp_this_update_max_age: %w", err) + } + cert.OcspThisUpdateMaxAge = maxAgeDuration + } + if ocspMaxRetries, ok := d.GetOk("ocsp_max_retries"); ok { + cert.OcspMaxRetries = ocspMaxRetries.(int) + if cert.OcspMaxRetries < 0 { + return nil, fmt.Errorf("ocsp_max_retries can not be a negative number") + } + } if displayNameRaw, ok := d.GetOk("display_name"); ok { cert.DisplayName = displayNameRaw.(string) } @@ -448,7 +503,7 @@ func (b *backend) pathCertWrite(ctx context.Context, req *logical.Request, d *fr } // Store it - entry, err := logical.StorageEntryJSON("cert/"+name, cert) + entry, err := logical.StorageEntryJSON(trustedCertPath+name, cert) if err != nil { return nil, err } @@ -483,11 +538,13 @@ type CertEntry struct { AllowedMetadataExtensions []string BoundCIDRs []*sockaddr.SockAddrMarshaler - OcspCaCertificates string - OcspEnabled bool - OcspServersOverride []string - OcspFailOpen bool - OcspQueryAllServers bool + OcspCaCertificates string + OcspEnabled bool + OcspServersOverride []string + OcspFailOpen bool + OcspQueryAllServers bool + OcspThisUpdateMaxAge time.Duration + OcspMaxRetries int } const pathCertHelpSyn = ` diff --git a/builtin/credential/cert/path_config.go b/builtin/credential/cert/path_config.go index e0c70d588a79..1183775f6bc4 100644 --- a/builtin/credential/cert/path_config.go +++ b/builtin/credential/cert/path_config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cert @@ -11,11 +11,16 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -const maxCacheSize = 100000 +const maxOcspCacheSize = 100000 func pathConfig(b *backend) *framework.Path { return &framework.Path{ Pattern: "config", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixCert, + }, + Fields: map[string]*framework.FieldSchema{ "disable_binding": { Type: framework.TypeBool, @@ -32,11 +37,26 @@ func pathConfig(b *backend) *framework.Path { Default: 100, Description: `The size of the in memory OCSP response cache, shared by all configured certs`, }, + "role_cache_size": { + Type: framework.TypeInt, + Default: defaultRoleCacheSize, + Description: `The size of the in memory role cache`, + }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathConfigWrite, - logical.ReadOperation: b.pathConfigRead, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "configuration", + }, + }, }, } } @@ -55,11 +75,18 @@ func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, dat } if cacheSizeRaw, ok := data.GetOk("ocsp_cache_size"); ok { cacheSize := cacheSizeRaw.(int) - if cacheSize < 2 || cacheSize > maxCacheSize { - return logical.ErrorResponse("invalid cache size, must be >= 2 and <= %d", maxCacheSize), nil + if cacheSize < 2 || cacheSize > maxOcspCacheSize { + return logical.ErrorResponse("invalid ocsp cache size, must be >= 2 and <= %d", maxOcspCacheSize), nil } config.OcspCacheSize = cacheSize } + if cacheSizeRaw, ok := data.GetOk("role_cache_size"); ok { + cacheSize := cacheSizeRaw.(int) + if (cacheSize < 0 && cacheSize != -1) || cacheSize > maxRoleCacheSize { + return logical.ErrorResponse("invalid role cache size, must be <= %d or -1 to disable role caching", maxRoleCacheSize), nil + } + config.RoleCacheSize = cacheSize + } if err := b.storeConfig(ctx, req.Storage, config); err != nil { return nil, err } @@ -76,6 +103,7 @@ func (b *backend) pathConfigRead(ctx context.Context, req *logical.Request, d *f "disable_binding": cfg.DisableBinding, "enable_identity_alias_metadata": cfg.EnableIdentityAliasMetadata, "ocsp_cache_size": cfg.OcspCacheSize, + "role_cache_size": cfg.RoleCacheSize, } return &logical.Response{ @@ -104,4 +132,5 @@ type config struct { DisableBinding bool `json:"disable_binding"` EnableIdentityAliasMetadata bool `json:"enable_identity_alias_metadata"` OcspCacheSize int `json:"ocsp_cache_size"` + RoleCacheSize int `json:"role_cache_size"` } diff --git a/builtin/credential/cert/path_crls.go b/builtin/credential/cert/path_crls.go index 022f963b7d94..f38654869d73 100644 --- a/builtin/credential/cert/path_crls.go +++ b/builtin/credential/cert/path_crls.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cert @@ -22,6 +22,10 @@ import ( func pathListCRLs(b *backend) *framework.Path { return &framework.Path{ Pattern: "crls/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixCert, + OperationSuffix: "crls", + }, Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathCRLsList, @@ -44,6 +48,12 @@ func (b *backend) pathCRLsList(ctx context.Context, req *logical.Request, d *fra func pathCRLs(b *backend) *framework.Path { return &framework.Path{ Pattern: "crls/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixCert, + OperationSuffix: "crl", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -180,6 +190,7 @@ func (b *backend) pathCRLDelete(ctx context.Context, req *logical.Request, d *fr b.crlUpdateMutex.Lock() defer b.crlUpdateMutex.Unlock() + defer b.flushTrustedCache() _, ok := b.crls[name] if !ok { @@ -303,6 +314,8 @@ func (b *backend) setCRL(ctx context.Context, storage logical.Storage, certList } b.crls[name] = crlInfo + b.flushTrustedCache() + return err } diff --git a/builtin/credential/cert/path_crls_test.go b/builtin/credential/cert/path_crls_test.go index 24211f5cad5b..26a47b329330 100644 --- a/builtin/credential/cert/path_crls_test.go +++ b/builtin/credential/cert/path_crls_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cert @@ -19,7 +19,6 @@ import ( "time" "github.com/hashicorp/vault/helper/testhelpers/corehelpers" - "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" diff --git a/builtin/credential/cert/path_login.go b/builtin/credential/cert/path_login.go index bfc2450f4166..53571b26185e 100644 --- a/builtin/credential/cert/path_login.go +++ b/builtin/credential/cert/path_login.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cert @@ -10,20 +10,23 @@ import ( "crypto/x509" "encoding/asn1" "encoding/base64" + "encoding/hex" "encoding/pem" "errors" "fmt" + "net/url" "strings" - "github.com/hashicorp/vault/sdk/helper/ocsp" - + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/cidrutil" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/helper/ocsp" "github.com/hashicorp/vault/sdk/helper/policyutil" "github.com/hashicorp/vault/sdk/logical" - - "github.com/hashicorp/vault/sdk/helper/cidrutil" - glob "github.com/ryanuber/go-glob" + "github.com/ryanuber/go-glob" ) // ParsedCert is a certificate that has been configured as trusted @@ -32,9 +35,15 @@ type ParsedCert struct { Certificates []*x509.Certificate } +const certAuthFailMsg = "failed to match all constraints for this login certificate" + func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: "login", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixCert, + OperationVerb: "login", + }, Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -251,7 +260,7 @@ func (b *backend) verifyCredentials(ctx context.Context, req *logical.Request, d } // Load the trusted certificates and other details - roots, trusted, trustedNonCAs, verifyConf := b.loadTrustedCerts(ctx, req.Storage, certName) + roots, trusted, trustedNonCAs, verifyConf := b.getTrustedCerts(ctx, req.Storage, certName) // Get the list of full chains matching the connection and validates the // certificate itself @@ -267,16 +276,35 @@ func (b *backend) verifyCredentials(ctx context.Context, req *logical.Request, d // If trustedNonCAs is not empty it means that client had registered a non-CA cert // with the backend. + var retErr error if len(trustedNonCAs) != 0 { for _, trustedNonCA := range trustedNonCAs { tCert := trustedNonCA.Certificates[0] // Check for client cert being explicitly listed in the config (and matching other constraints) if tCert.SerialNumber.Cmp(clientCert.SerialNumber) == 0 && bytes.Equal(tCert.AuthorityKeyId, clientCert.AuthorityKeyId) { - matches, err := b.matchesConstraints(ctx, clientCert, trustedNonCA.Certificates, trustedNonCA, verifyConf) + pkMatch, err := certutil.ComparePublicKeysAndType(tCert.PublicKey, clientCert.PublicKey) if err != nil { return nil, nil, err } + if !pkMatch { + // Someone may be trying to pass off a forged certificate as the trusted non-CA cert. Reject early. + return nil, logical.ErrorResponse("public key mismatch of a trusted leaf certificate"), nil + } + matches, err := b.matchesConstraints(ctx, clientCert, trustedNonCA.Certificates, trustedNonCA, verifyConf) + + // matchesConstraints returns an error when OCSP verification fails, + // but some other path might still give us success. Add to the + // retErr multierror, but avoid duplicates. This way, if we reach a + // failure later, we can give additional context. + // + // XXX: If matchesConstraints is updated to generate additional, + // immediately fatal errors, we likely need to extend it to return + // another boolean (fatality) or other detection scheme. + if err != nil && (retErr == nil || !errwrap.Contains(retErr, err.Error())) { + retErr = multierror.Append(retErr, err) + } + if matches { return trustedNonCA, nil, nil } @@ -287,23 +315,37 @@ func (b *backend) verifyCredentials(ctx context.Context, req *logical.Request, d // If no trusted chain was found, client is not authenticated // This check happens after checking for a matching configured non-CA certs if len(trustedChains) == 0 { - return nil, logical.ErrorResponse("invalid certificate or no client certificate supplied"), nil + if retErr != nil { + return nil, logical.ErrorResponse(fmt.Sprintf("%s; additionally got errors during verification: %v", certAuthFailMsg, retErr)), nil + } + + return nil, logical.ErrorResponse(certAuthFailMsg), nil } // Search for a ParsedCert that intersects with the validated chains and any additional constraints - matches := make([]*ParsedCert, 0) for _, trust := range trusted { // For each ParsedCert in the config for _, tCert := range trust.Certificates { // For each certificate in the entry for _, chain := range trustedChains { // For each root chain that we matched for _, cCert := range chain { // For each cert in the matched chain if tCert.Equal(cCert) { // ParsedCert intersects with matched chain match, err := b.matchesConstraints(ctx, clientCert, chain, trust, verifyConf) // validate client cert + matched chain against the config - if err != nil { - return nil, nil, err + + // See note above. + if err != nil && (retErr == nil || !errwrap.Contains(retErr, err.Error())) { + retErr = multierror.Append(retErr, err) } - if match { - // Add the match to the list - matches = append(matches, trust) + + // Return the first matching entry (for backwards + // compatibility, we continue to just pick the first + // one if we have multiple matches). + // + // Here, we return directly: this means that any + // future OCSP errors would be ignored; in the future, + // if these become fatal, we could revisit this + // choice and choose the first match after evaluating + // all possible candidates. + if match && err == nil { + return trust, nil, nil } } } @@ -311,13 +353,11 @@ func (b *backend) verifyCredentials(ctx context.Context, req *logical.Request, d } } - // Fail on no matches - if len(matches) == 0 { - return nil, logical.ErrorResponse("no chain matching all constraints could be found for this login certificate"), nil + if retErr != nil { + return nil, logical.ErrorResponse(fmt.Sprintf("%s; additionally got errors during verification: %v", certAuthFailMsg, retErr)), nil } - // Return the first matching entry (for backwards compatibility, we continue to just pick one if multiple match) - return matches[0], nil, nil + return nil, logical.ErrorResponse(certAuthFailMsg), nil } func (b *backend) matchesConstraints(ctx context.Context, clientCert *x509.Certificate, trustedChain []*x509.Certificate, @@ -480,18 +520,43 @@ func (b *backend) matchesCertificateExtensions(clientCert *x509.Certificate, con // including its ASN.1 type tag bytes. For the sake of simplicity, assume string type // and drop the tag bytes. And get the number of bytes from the tag. clientExtMap := make(map[string]string, len(clientCert.Extensions)) + hexExtMap := make(map[string]string, len(clientCert.Extensions)) + for _, ext := range clientCert.Extensions { var parsedValue string - asn1.Unmarshal(ext.Value, &parsedValue) - clientExtMap[ext.Id.String()] = parsedValue + _, err := asn1.Unmarshal(ext.Value, &parsedValue) + if err != nil { + clientExtMap[ext.Id.String()] = "" + } else { + clientExtMap[ext.Id.String()] = parsedValue + } + + hexExtMap[ext.Id.String()] = hex.EncodeToString(ext.Value) } - // If any of the required extensions don'log match the constraint fails + + // If any of the required extensions don't match the constraint fails for _, requiredExt := range config.Entry.RequiredExtensions { reqExt := strings.SplitN(requiredExt, ":", 2) - clientExtValue, clientExtValueOk := clientExtMap[reqExt[0]] - if !clientExtValueOk || !glob.Glob(reqExt[1], clientExtValue) { + if len(reqExt) != 2 { return false } + + if reqExt[0] == "hex" { + reqHexExt := strings.SplitN(reqExt[1], ":", 2) + if len(reqHexExt) != 2 { + return false + } + + clientExtValue, clientExtValueOk := hexExtMap[reqHexExt[0]] + if !clientExtValueOk || !glob.Glob(strings.ToLower(reqHexExt[1]), clientExtValue) { + return false + } + } else { + clientExtValue, clientExtValueOk := clientExtMap[reqExt[0]] + if !clientExtValueOk || !glob.Glob(reqExt[1], clientExtValue) { + return false + } + } } return true } @@ -528,10 +593,45 @@ func (b *backend) certificateExtensionsMetadata(clientCert *x509.Certificate, co return metadata } +// getTrustedCerts is used to load all the trusted certificates from the backend, cached + +func (b *backend) getTrustedCerts(ctx context.Context, storage logical.Storage, certName string) (pool *x509.CertPool, trusted []*ParsedCert, trustedNonCAs []*ParsedCert, conf *ocsp.VerifyConfig) { + if !b.trustedCacheDisabled.Load() { + trusted, found := b.getTrustedCertsFromCache(certName) + if found { + return trusted.pool, trusted.trusted, trusted.trustedNonCAs, trusted.ocspConf + } + } + return b.loadTrustedCerts(ctx, storage, certName) +} + +func (b *backend) getTrustedCertsFromCache(certName string) (*trusted, bool) { + if certName == "" { + trusted := b.trustedCacheFull.Load() + if trusted != nil { + return trusted, true + } + } else if trusted, found := b.trustedCache.Get(certName); found { + return trusted, true + } + return nil, false +} + // loadTrustedCerts is used to load all the trusted certificates from the backend -func (b *backend) loadTrustedCerts(ctx context.Context, storage logical.Storage, certName string) (pool *x509.CertPool, trusted []*ParsedCert, trustedNonCAs []*ParsedCert, conf *ocsp.VerifyConfig) { +func (b *backend) loadTrustedCerts(ctx context.Context, storage logical.Storage, certName string) (pool *x509.CertPool, trustedCerts []*ParsedCert, trustedNonCAs []*ParsedCert, conf *ocsp.VerifyConfig) { + lock := locksutil.LockForKey(b.trustedCacheLocks, certName) + lock.Lock() + defer lock.Unlock() + + if !b.trustedCacheDisabled.Load() { + trusted, found := b.getTrustedCertsFromCache(certName) + if found { + return trusted.pool, trusted.trusted, trusted.trustedNonCAs, trusted.ocspConf + } + } + pool = x509.NewCertPool() - trusted = make([]*ParsedCert, 0) + trustedCerts = make([]*ParsedCert, 0) trustedNonCAs = make([]*ParsedCert, 0) var names []string @@ -539,7 +639,7 @@ func (b *backend) loadTrustedCerts(ctx context.Context, storage logical.Storage, names = append(names, certName) } else { var err error - names, err = storage.List(ctx, "cert/") + names, err = storage.List(ctx, trustedCertPath) if err != nil { b.Logger().Error("failed to list trusted certs", "error", err) return @@ -548,7 +648,7 @@ func (b *backend) loadTrustedCerts(ctx context.Context, storage logical.Storage, conf = &ocsp.VerifyConfig{} for _, name := range names { - entry, err := b.Cert(ctx, storage, strings.TrimPrefix(name, "cert/")) + entry, err := b.Cert(ctx, storage, strings.TrimPrefix(name, trustedCertPath)) if err != nil { b.Logger().Error("failed to load trusted cert", "name", name, "error", err) continue @@ -577,7 +677,7 @@ func (b *backend) loadTrustedCerts(ctx context.Context, storage logical.Storage, } // Create a ParsedCert entry - trusted = append(trusted, &ParsedCert{ + trustedCerts = append(trustedCerts, &ParsedCert{ Entry: entry, Certificates: parsed, }) @@ -591,6 +691,31 @@ func (b *backend) loadTrustedCerts(ctx context.Context, storage logical.Storage, conf.OcspFailureMode = ocsp.FailOpenFalse } conf.QueryAllServers = conf.QueryAllServers || entry.OcspQueryAllServers + conf.OcspThisUpdateMaxAge = entry.OcspThisUpdateMaxAge + conf.OcspMaxRetries = entry.OcspMaxRetries + + if len(entry.OcspCaCertificates) > 0 { + certs, err := certutil.ParseCertsPEM([]byte(entry.OcspCaCertificates)) + if err != nil { + b.Logger().Error("failed to parse ocsp_ca_certificates", "name", name, "error", err) + continue + } + conf.ExtraCas = certs + } + } + } + + if !b.trustedCacheDisabled.Load() { + entry := &trusted{ + pool: pool, + trusted: trustedCerts, + trustedNonCAs: trustedNonCAs, + ocspConf: conf, + } + if certName == "" { + b.trustedCacheFull.Store(entry) + } else { + b.trustedCache.Add(certName, entry) } } return @@ -604,11 +729,49 @@ func (b *backend) checkForCertInOCSP(ctx context.Context, clientCert *x509.Certi defer b.ocspClientMutex.RUnlock() err := b.ocspClient.VerifyLeafCertificate(ctx, clientCert, chain[1], conf) if err != nil { + if ocsp.IsOcspVerificationError(err) { + // We don't want anything to override an OCSP verification error + return false, err + } + if conf.OcspFailureMode == ocsp.FailOpenTrue { + onlyNetworkErrors := b.handleOcspErrorInFailOpen(err) + if onlyNetworkErrors { + return true, nil + } + } + // We want to preserve error messages when they have additional, + // potentially useful information. Just having a revoked cert + // isn't additionally useful. + if !strings.Contains(err.Error(), "has been revoked") { + return false, err + } return false, nil } return true, nil } +func (b *backend) handleOcspErrorInFailOpen(err error) bool { + urlError := &url.Error{} + allNetworkErrors := true + if multiError, ok := err.(*multierror.Error); ok { + for _, myErr := range multiError.Errors { + if !errors.As(myErr, &urlError) { + allNetworkErrors = false + } + } + } else if !errors.As(err, &urlError) { + allNetworkErrors = false + } + + if allNetworkErrors { + b.Logger().Warn("OCSP is set to fail-open, and could not retrieve "+ + "OCSP based revocation but proceeding.", "detail", err) + return true + } + + return false +} + func (b *backend) checkForChainInCRLs(chain []*x509.Certificate) bool { badChain := false for _, cert := range chain { diff --git a/builtin/credential/cert/path_login_test.go b/builtin/credential/cert/path_login_test.go index e2fde0157fa6..ad1030464f35 100644 --- a/builtin/credential/cert/path_login_test.go +++ b/builtin/credential/cert/path_login_test.go @@ -1,10 +1,11 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cert import ( "context" + "crypto" "crypto/tls" "crypto/x509" "crypto/x509/pkix" @@ -20,13 +21,10 @@ import ( "testing" "time" - "github.com/hashicorp/vault/sdk/helper/certutil" - - "golang.org/x/crypto/ocsp" - logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" - + "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" + "golang.org/x/crypto/ocsp" ) var ocspPort int @@ -94,6 +92,10 @@ func TestCert_RoleResolve(t *testing.T) { testAccStepCert(t, "web", ca, "foo", allowed{dns: "example.com"}, false), testAccStepLoginWithName(t, connState, "web"), testAccStepResolveRoleWithName(t, connState, "web"), + // Test with caching disabled + testAccStepSetRoleCacheSize(t, -1), + testAccStepLoginWithName(t, connState, "web"), + testAccStepResolveRoleWithName(t, connState, "web"), }, }) } @@ -151,10 +153,23 @@ func TestCert_RoleResolveWithoutProvidingCertName(t *testing.T) { testAccStepCert(t, "web", ca, "foo", allowed{dns: "example.com"}, false), testAccStepLoginWithName(t, connState, "web"), testAccStepResolveRoleWithEmptyDataMap(t, connState, "web"), + testAccStepSetRoleCacheSize(t, -1), + testAccStepLoginWithName(t, connState, "web"), + testAccStepResolveRoleWithEmptyDataMap(t, connState, "web"), }, }) } +func testAccStepSetRoleCacheSize(t *testing.T, size int) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config", + Data: map[string]interface{}{ + "role_cache_size": size, + }, + } +} + func testAccStepResolveRoleWithEmptyDataMap(t *testing.T, connState tls.ConnectionState, certName string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.ResolveRoleOperation, @@ -188,7 +203,7 @@ func testAccStepResolveRoleExpectRoleResolutionToFail(t *testing.T, connState tl t.Fatal("Error not part of response.") } - if !strings.Contains(errString, "invalid certificate") { + if !strings.Contains(errString, certAuthFailMsg) { t.Fatalf("Error was not due to invalid role name. Error: %s", errString) } return nil @@ -216,7 +231,7 @@ func testAccStepResolveRoleOCSPFail(t *testing.T, connState tls.ConnectionState, t.Fatal("Error not part of response.") } - if !strings.Contains(errString, "no chain matching") { + if !strings.Contains(errString, certAuthFailMsg) { t.Fatalf("Error was not due to OCSP failure. Error: %s", errString) } return nil @@ -267,19 +282,6 @@ func TestCert_RoleResolve_RoleDoesNotExist(t *testing.T) { } func TestCert_RoleResolveOCSP(t *testing.T) { - cases := []struct { - name string - failOpen bool - certStatus int - errExpected bool - }{ - {"failFalseGoodCert", false, ocsp.Good, false}, - {"failFalseRevokedCert", false, ocsp.Revoked, true}, - {"failFalseUnknownCert", false, ocsp.Unknown, true}, - {"failTrueGoodCert", true, ocsp.Good, false}, - {"failTrueRevokedCert", true, ocsp.Revoked, true}, - {"failTrueUnknownCert", true, ocsp.Unknown, false}, - } certTemplate := &x509.Certificate{ Subject: pkix.Name{ CommonName: "example.com", @@ -318,15 +320,76 @@ func TestCert_RoleResolveOCSP(t *testing.T) { t.Fatalf("err: %v", err) } + tempDir, connState2, err := generateTestCertAndConnState(t, certTemplate) + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } + ca2, err := ioutil.ReadFile(filepath.Join(tempDir, "ca_cert.pem")) + if err != nil { + t.Fatalf("err: %v", err) + } + + issuer2 := parsePEM(ca2) + pkf2, err := ioutil.ReadFile(filepath.Join(tempDir, "ca_key.pem")) + if err != nil { + t.Fatalf("err: %v", err) + } + pk2, err := certutil.ParsePEMBundle(string(pkf2)) + if err != nil { + t.Fatalf("err: %v", err) + } + + type caData struct { + privateKey crypto.Signer + caBytes []byte + caChain []*x509.Certificate + connState tls.ConnectionState + } + + ca1Data := caData{ + pk.PrivateKey, + ca, + issuer, + connState, + } + ca2Data := caData{ + pk2.PrivateKey, + ca2, + issuer2, + connState2, + } + + cases := []struct { + name string + failOpen bool + certStatus int + errExpected bool + caData caData + ocspCaCerts string + }{ + {name: "failFalseGoodCert", certStatus: ocsp.Good, caData: ca1Data}, + {name: "failFalseRevokedCert", certStatus: ocsp.Revoked, errExpected: true, caData: ca1Data}, + {name: "failFalseUnknownCert", certStatus: ocsp.Unknown, errExpected: true, caData: ca1Data}, + {name: "failTrueGoodCert", failOpen: true, certStatus: ocsp.Good, caData: ca1Data}, + {name: "failTrueRevokedCert", failOpen: true, certStatus: ocsp.Revoked, errExpected: true, caData: ca1Data}, + {name: "failTrueUnknownCert", failOpen: true, certStatus: ocsp.Unknown, caData: ca1Data}, + {name: "failFalseGoodCertExtraCas", certStatus: ocsp.Good, caData: ca2Data, ocspCaCerts: string(pkf2)}, + {name: "failFalseRevokedCertExtraCas", certStatus: ocsp.Revoked, errExpected: true, caData: ca2Data, ocspCaCerts: string(pkf2)}, + {name: "failFalseUnknownCertExtraCas", certStatus: ocsp.Unknown, errExpected: true, caData: ca2Data, ocspCaCerts: string(pkf2)}, + {name: "failTrueGoodCertExtraCas", failOpen: true, certStatus: ocsp.Good, caData: ca2Data, ocspCaCerts: string(pkf2)}, + {name: "failTrueRevokedCertExtraCas", failOpen: true, certStatus: ocsp.Revoked, errExpected: true, caData: ca2Data, ocspCaCerts: string(pkf2)}, + {name: "failTrueUnknownCertExtraCas", failOpen: true, certStatus: ocsp.Unknown, caData: ca2Data, ocspCaCerts: string(pkf2)}, + } + for _, c := range cases { t.Run(c.name, func(t *testing.T) { - resp, err := ocsp.CreateResponse(issuer[0], issuer[0], ocsp.Response{ + resp, err := ocsp.CreateResponse(c.caData.caChain[0], c.caData.caChain[0], ocsp.Response{ Status: c.certStatus, SerialNumber: certTemplate.SerialNumber, ProducedAt: time.Now(), ThisUpdate: time.Now(), NextUpdate: time.Now().Add(time.Hour), - }, pk.PrivateKey) + }, c.caData.privateKey) if err != nil { t.Fatal(err) } @@ -337,17 +400,18 @@ func TestCert_RoleResolveOCSP(t *testing.T) { var resolveStep logicaltest.TestStep var loginStep logicaltest.TestStep if c.errExpected { - loginStep = testAccStepLoginWithNameInvalid(t, connState, "web") - resolveStep = testAccStepResolveRoleOCSPFail(t, connState, "web") + loginStep = testAccStepLoginWithNameInvalid(t, c.caData.connState, "web") + resolveStep = testAccStepResolveRoleOCSPFail(t, c.caData.connState, "web") } else { - loginStep = testAccStepLoginWithName(t, connState, "web") - resolveStep = testAccStepResolveRoleWithName(t, connState, "web") + loginStep = testAccStepLoginWithName(t, c.caData.connState, "web") + resolveStep = testAccStepResolveRoleWithName(t, c.caData.connState, "web") } logicaltest.Test(t, logicaltest.TestCase{ CredentialBackend: b, Steps: []logicaltest.TestStep{ - testAccStepCertWithExtraParams(t, "web", ca, "foo", allowed{dns: "example.com"}, false, - map[string]interface{}{"ocsp_enabled": true, "ocsp_fail_open": c.failOpen}), + testAccStepCertWithExtraParams(t, "web", c.caData.caBytes, "foo", allowed{dns: "example.com"}, false, + map[string]interface{}{"ocsp_enabled": true, "ocsp_fail_open": c.failOpen, "ocsp_ca_certificates": c.ocspCaCerts}), + testAccStepReadCertPolicy(t, "web", false, map[string]interface{}{"ocsp_enabled": true, "ocsp_fail_open": c.failOpen, "ocsp_ca_certificates": c.ocspCaCerts}), loginStep, resolveStep, }, diff --git a/builtin/credential/cert/test_responder.go b/builtin/credential/cert/test_responder.go index d68ebe080e08..56fb4727be1a 100644 --- a/builtin/credential/cert/test_responder.go +++ b/builtin/credential/cert/test_responder.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 // Package ocsp implements an OCSP responder based on a generic storage backend. // It provides a couple of sample implementations. @@ -19,7 +19,7 @@ import ( "encoding/base64" "errors" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "time" @@ -168,7 +168,7 @@ func (rs *Responder) ServeHTTP(response http.ResponseWriter, request *http.Reque return } case "POST": - requestBody, err = ioutil.ReadAll(request.Body) + requestBody, err = io.ReadAll(request.Body) if err != nil { rs.log.Log("Problem reading body of POST", err) response.WriteHeader(http.StatusBadRequest) diff --git a/builtin/credential/github/backend.go b/builtin/credential/github/backend.go index 12633f8ceb2d..6e80e7b313d6 100644 --- a/builtin/credential/github/backend.go +++ b/builtin/credential/github/backend.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package github @@ -8,12 +8,14 @@ import ( "net/url" "github.com/google/go-github/github" - cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" "golang.org/x/oauth2" ) +const operationPrefixGithub = "github" + func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() if err := b.Setup(ctx, conf); err != nil { @@ -31,6 +33,32 @@ func Backend() *backend { DefaultKey: "default", } + teamMapPaths := b.TeamMap.Paths() + + teamMapPaths[0].DisplayAttrs = &framework.DisplayAttributes{ + OperationPrefix: operationPrefixGithub, + OperationSuffix: "teams", + } + teamMapPaths[1].DisplayAttrs = &framework.DisplayAttributes{ + OperationPrefix: operationPrefixGithub, + OperationSuffix: "team-mapping", + } + teamMapPaths[0].Operations = map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: teamMapPaths[0].Callbacks[logical.ListOperation], + Summary: teamMapPaths[0].HelpSynopsis, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: teamMapPaths[0].Callbacks[logical.ReadOperation], + Summary: teamMapPaths[0].HelpSynopsis, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "list", + OperationSuffix: "teams2", // The ReadOperation is redundant with the ListOperation + }, + }, + } + teamMapPaths[0].Callbacks = nil + b.UserMap = &framework.PolicyMap{ PathMap: framework.PathMap{ Name: "users", @@ -38,7 +66,33 @@ func Backend() *backend { DefaultKey: "default", } - allPaths := append(b.TeamMap.Paths(), b.UserMap.Paths()...) + userMapPaths := b.UserMap.Paths() + + userMapPaths[0].DisplayAttrs = &framework.DisplayAttributes{ + OperationPrefix: operationPrefixGithub, + OperationSuffix: "users", + } + userMapPaths[1].DisplayAttrs = &framework.DisplayAttributes{ + OperationPrefix: operationPrefixGithub, + OperationSuffix: "user-mapping", + } + userMapPaths[0].Operations = map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: userMapPaths[0].Callbacks[logical.ListOperation], + Summary: userMapPaths[0].HelpSynopsis, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: userMapPaths[0].Callbacks[logical.ReadOperation], + Summary: userMapPaths[0].HelpSynopsis, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "list", + OperationSuffix: "users2", // The ReadOperation is redundant with the ListOperation + }, + }, + } + userMapPaths[0].Callbacks = nil + + allPaths := append(teamMapPaths, userMapPaths...) b.Backend = &framework.Backend{ Help: backendHelp, diff --git a/builtin/credential/github/backend_test.go b/builtin/credential/github/backend_test.go index 6ea08ee58134..4f3dee078131 100644 --- a/builtin/credential/github/backend_test.go +++ b/builtin/credential/github/backend_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package github diff --git a/builtin/credential/github/cli.go b/builtin/credential/github/cli.go index d40f1b56d9e7..177433bde49b 100644 --- a/builtin/credential/github/cli.go +++ b/builtin/credential/github/cli.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package github diff --git a/builtin/credential/github/cmd/github/main.go b/builtin/credential/github/cmd/github/main.go index 499469a0f681..40a3a0002bfb 100644 --- a/builtin/credential/github/cmd/github/main.go +++ b/builtin/credential/github/cmd/github/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/builtin/credential/github/path_config.go b/builtin/credential/github/path_config.go index 0b1721b15bdb..abe78760fc1d 100644 --- a/builtin/credential/github/path_config.go +++ b/builtin/credential/github/path_config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package github @@ -20,6 +20,11 @@ import ( func pathConfig(b *backend) *framework.Path { p := &framework.Path{ Pattern: "config", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixGithub, + }, + Fields: map[string]*framework.FieldSchema{ "organization": { Type: framework.TypeString, @@ -52,9 +57,20 @@ API-compatible authentication server.`, }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathConfigWrite, - logical.ReadOperation: b.pathConfigRead, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixGithub, + OperationVerb: "configure", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "configuration", + }, + }, }, } diff --git a/builtin/credential/github/path_config_test.go b/builtin/credential/github/path_config_test.go index 2f592b21f7b8..19338ff44d23 100644 --- a/builtin/credential/github/path_config_test.go +++ b/builtin/credential/github/path_config_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package github diff --git a/builtin/credential/github/path_login.go b/builtin/credential/github/path_login.go index d802641e237c..181076a6587f 100644 --- a/builtin/credential/github/path_login.go +++ b/builtin/credential/github/path_login.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package github @@ -19,6 +19,12 @@ import ( func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: "login", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixGithub, + OperationVerb: "login", + }, + Fields: map[string]*framework.FieldSchema{ "token": { Type: framework.TypeString, diff --git a/builtin/credential/github/path_login_test.go b/builtin/credential/github/path_login_test.go index 282e3fa9401d..cfc47a984345 100644 --- a/builtin/credential/github/path_login_test.go +++ b/builtin/credential/github/path_login_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package github diff --git a/builtin/credential/ldap/backend.go b/builtin/credential/ldap/backend.go index e9654b8b4921..a649bbbde49e 100644 --- a/builtin/credential/ldap/backend.go +++ b/builtin/credential/ldap/backend.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ldap @@ -7,14 +7,20 @@ import ( "context" "fmt" "strings" + "sync" + "github.com/hashicorp/cap/ldap" "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/ldaputil" "github.com/hashicorp/vault/sdk/logical" ) -const errUserBindFailed = `ldap operation failed: failed to bind as user` +const ( + operationPrefixLDAP = "ldap" + errUserBindFailed = "ldap operation failed: failed to bind as user" + defaultPasswordLength = 64 // length to use for configured root password on rotations by default +) func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() @@ -46,6 +52,7 @@ func Backend() *backend { pathUsers(&b), pathUsersList(&b), pathLogin(&b), + pathConfigRotateRoot(&b), }, AuthRenew: b.pathLoginRenew, @@ -57,6 +64,8 @@ func Backend() *backend { type backend struct { *framework.Backend + + mu sync.RWMutex } func (b *backend) Login(ctx context.Context, req *logical.Request, username string, password string, usernameAsAlias bool) (string, []string, *logical.Response, []string, error) { @@ -72,82 +81,25 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri return "", nil, logical.ErrorResponse("password cannot be of zero length when passwordless binds are being denied"), nil, nil } - ldapClient := ldaputil.Client{ - Logger: b.Logger(), - LDAP: ldaputil.NewLDAP(), - } - - c, err := ldapClient.DialLDAP(cfg.ConfigEntry) + ldapClient, err := ldap.NewClient(ctx, ldaputil.ConvertConfig(cfg.ConfigEntry)) if err != nil { return "", nil, logical.ErrorResponse(err.Error()), nil, nil } - if c == nil { - return "", nil, logical.ErrorResponse("invalid connection returned from LDAP dial"), nil, nil - } // Clean connection - defer c.Close() - - userBindDN, err := ldapClient.GetUserBindDN(cfg.ConfigEntry, c, username) - if err != nil { - if b.Logger().IsDebug() { - b.Logger().Debug("error getting user bind DN", "error", err) - } - return "", nil, logical.ErrorResponse(errUserBindFailed), nil, nil - } - - if b.Logger().IsDebug() { - b.Logger().Debug("user binddn fetched", "username", username, "binddn", userBindDN) - } - - // Try to bind as the login user. This is where the actual authentication takes place. - if len(password) > 0 { - err = c.Bind(userBindDN, password) - } else { - err = c.UnauthenticatedBind(userBindDN) - } - if err != nil { - if b.Logger().IsDebug() { - b.Logger().Debug("ldap bind failed", "error", err) - } - return "", nil, logical.ErrorResponse(errUserBindFailed), nil, logical.ErrInvalidCredentials - } + defer ldapClient.Close(ctx) - // We re-bind to the BindDN if it's defined because we assume - // the BindDN should be the one to search, not the user logging in. - if cfg.BindDN != "" && cfg.BindPassword != "" { - if err := c.Bind(cfg.BindDN, cfg.BindPassword); err != nil { - if b.Logger().IsDebug() { - b.Logger().Debug("error while attempting to re-bind with the BindDN User", "error", err) - } - return "", nil, logical.ErrorResponse("ldap operation failed: failed to re-bind with the BindDN user"), nil, logical.ErrInvalidCredentials - } - if b.Logger().IsDebug() { - b.Logger().Debug("re-bound to original binddn") - } - } - - userDN, err := ldapClient.GetUserDN(cfg.ConfigEntry, c, userBindDN, username) + c, err := ldapClient.Authenticate(ctx, username, password, ldap.WithGroups(), ldap.WithUserAttributes()) if err != nil { - return "", nil, logical.ErrorResponse(err.Error()), nil, nil - } - - if cfg.AnonymousGroupSearch { - c, err = ldapClient.DialLDAP(cfg.ConfigEntry) - if err != nil { - return "", nil, logical.ErrorResponse("ldap operation failed: failed to connect to LDAP server"), nil, nil + if strings.Contains(err.Error(), "discovery of user bind DN failed") || + strings.Contains(err.Error(), "unable to bind user") { + return "", nil, logical.ErrorResponse(errUserBindFailed), nil, logical.ErrInvalidCredentials } - defer c.Close() // Defer closing of this connection as the deferal above closes the other defined connection - } - ldapGroups, err := ldapClient.GetLdapGroups(cfg.ConfigEntry, c, userDN, username) - if err != nil { return "", nil, logical.ErrorResponse(err.Error()), nil, nil } - if b.Logger().IsDebug() { - b.Logger().Debug("groups fetched from server", "num_server_groups", len(ldapGroups), "server_groups", ldapGroups) - } + ldapGroups := c.Groups ldapResponse := &logical.Response{ Data: map[string]interface{}{}, } @@ -158,6 +110,10 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri ldapResponse.AddWarning(errString) } + for _, warning := range c.Warnings { + ldapResponse.AddWarning(string(warning)) + } + var allGroups []string canonicalUsername := username cs := *cfg.CaseSensitiveNames @@ -202,13 +158,11 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri return username, policies, ldapResponse, allGroups, nil } - entityAliasAttribute, err := ldapClient.GetUserAliasAttributeValue(cfg.ConfigEntry, c, username) - if err != nil { - return "", nil, logical.ErrorResponse(err.Error()), nil, nil - } - if entityAliasAttribute == "" { + userAttrValues := c.UserAttributes[cfg.UserAttr] + if len(userAttrValues) == 0 { return "", nil, logical.ErrorResponse("missing entity alias attribute value"), nil, nil } + entityAliasAttribute := userAttrValues[0] return entityAliasAttribute, policies, ldapResponse, allGroups, nil } diff --git a/builtin/credential/ldap/backend_test.go b/builtin/credential/ldap/backend_test.go index cd4775582704..c791cb4cf795 100644 --- a/builtin/credential/ldap/backend_test.go +++ b/builtin/credential/ldap/backend_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ldap @@ -267,7 +267,7 @@ func TestLdapAuthBackend_CaseSensitivity(t *testing.T) { } } - cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + cleanup, cfg := ldap.PrepareTestContainer(t, "master") defer cleanup() configReq := &logical.Request{ Operation: logical.UpdateOperation, @@ -313,7 +313,7 @@ func TestLdapAuthBackend_UserPolicies(t *testing.T) { var err error b, storage := createBackendWithStorage(t) - cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + cleanup, cfg := ldap.PrepareTestContainer(t, "master") defer cleanup() configReq := &logical.Request{ Operation: logical.UpdateOperation, @@ -418,9 +418,77 @@ func factory(t *testing.T) logical.Backend { return b } +// TestBackend_LoginRegression_AnonBind is a test for the regression reported in +// https://github.com/hashicorp/vault/issues/26183. +func TestBackend_LoginRegression_AnonBind(t *testing.T) { + b := factory(t) + cleanup, cfg := ldap.PrepareTestContainer(t, "master") + cfg.AnonymousGroupSearch = true + defer cleanup() + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfigUrl(t, cfg), + // Map Admin_staff group (from LDAP server) with foo policy + testAccStepGroup(t, "admin_staff", "foo"), + + // Map engineers group (local) with bar policy + testAccStepGroup(t, "engineers", "bar"), + + // Map hermes conrad user with local engineers group + testAccStepUser(t, "hermes conrad", "engineers"), + + // Authenticate + testAccStepLogin(t, "hermes conrad", "hermes"), + + // Verify both groups mappings can be listed back + testAccStepGroupList(t, []string{"engineers", "admin_staff"}), + + // Verify user mapping can be listed back + testAccStepUserList(t, []string{"hermes conrad"}), + }, + }) +} + +// TestBackend_LoginRegression_UserAttr is a test for the regression reported in +// https://github.com/hashicorp/vault/issues/26171. +// Vault relies on case insensitive user attribute keys for mapping user +// attributes to entity alias metadata. +func TestBackend_LoginRegression_UserAttr(t *testing.T) { + b := factory(t) + cleanup, cfg := ldap.PrepareTestContainer(t, "master") + cfg.UserAttr = "givenName" + defer cleanup() + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfigUrl(t, cfg), + // Map Admin_staff group (from LDAP server) with foo policy + testAccStepGroup(t, "admin_staff", "foo"), + + // Map engineers group (local) with bar policy + testAccStepGroup(t, "engineers", "bar"), + + // Map hermes conrad user with local engineers group + testAccStepUser(t, "hermes", "engineers"), + + // Authenticate + testAccStepLogin(t, "hermes", "hermes"), + + // Verify both groups mappings can be listed back + testAccStepGroupList(t, []string{"engineers", "admin_staff"}), + + // Verify user mapping can be listed back + testAccStepUserList(t, []string{"hermes"}), + }, + }) +} + func TestBackend_basic(t *testing.T) { b := factory(t) - cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + cleanup, cfg := ldap.PrepareTestContainer(t, "master") defer cleanup() logicaltest.Test(t, logicaltest.TestCase{ @@ -450,7 +518,7 @@ func TestBackend_basic(t *testing.T) { func TestBackend_basic_noPolicies(t *testing.T) { b := factory(t) - cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + cleanup, cfg := ldap.PrepareTestContainer(t, "master") defer cleanup() logicaltest.Test(t, logicaltest.TestCase{ @@ -468,7 +536,7 @@ func TestBackend_basic_noPolicies(t *testing.T) { func TestBackend_basic_group_noPolicies(t *testing.T) { b := factory(t) - cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + cleanup, cfg := ldap.PrepareTestContainer(t, "master") defer cleanup() logicaltest.Test(t, logicaltest.TestCase{ @@ -489,7 +557,7 @@ func TestBackend_basic_group_noPolicies(t *testing.T) { func TestBackend_basic_authbind(t *testing.T) { b := factory(t) - cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + cleanup, cfg := ldap.PrepareTestContainer(t, "master") defer cleanup() logicaltest.Test(t, logicaltest.TestCase{ @@ -506,7 +574,7 @@ func TestBackend_basic_authbind(t *testing.T) { func TestBackend_basic_authbind_userfilter(t *testing.T) { b := factory(t) - cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + cleanup, cfg := ldap.PrepareTestContainer(t, "master") defer cleanup() // userattr not used in the userfilter should result in a warning in the response @@ -649,7 +717,7 @@ func TestBackend_basic_authbind_userfilter(t *testing.T) { func TestBackend_basic_authbind_metadata_name(t *testing.T) { b := factory(t) - cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + cleanup, cfg := ldap.PrepareTestContainer(t, "master") defer cleanup() cfg.UserAttr = "cn" @@ -714,7 +782,7 @@ func addUPNAttributeToLDAPSchemaAndUser(t *testing.T, cfg *ldaputil.ConfigEntry, func TestBackend_basic_discover(t *testing.T) { b := factory(t) - cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + cleanup, cfg := ldap.PrepareTestContainer(t, "master") defer cleanup() logicaltest.Test(t, logicaltest.TestCase{ @@ -731,7 +799,7 @@ func TestBackend_basic_discover(t *testing.T) { func TestBackend_basic_nogroupdn(t *testing.T) { b := factory(t) - cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + cleanup, cfg := ldap.PrepareTestContainer(t, "master") defer cleanup() logicaltest.Test(t, logicaltest.TestCase{ @@ -821,18 +889,20 @@ func testAccStepConfigUrl(t *testing.T, cfg *ldaputil.ConfigEntry) logicaltest.T Operation: logical.UpdateOperation, Path: "config", Data: map[string]interface{}{ - "url": cfg.Url, - "userattr": cfg.UserAttr, - "userdn": cfg.UserDN, - "userfilter": cfg.UserFilter, - "groupdn": cfg.GroupDN, - "groupattr": cfg.GroupAttr, - "binddn": cfg.BindDN, - "bindpass": cfg.BindPassword, - "case_sensitive_names": true, - "token_policies": "abc,xyz", - "request_timeout": cfg.RequestTimeout, - "username_as_alias": cfg.UsernameAsAlias, + "url": cfg.Url, + "userattr": cfg.UserAttr, + "userdn": cfg.UserDN, + "userfilter": cfg.UserFilter, + "groupdn": cfg.GroupDN, + "groupattr": cfg.GroupAttr, + "binddn": cfg.BindDN, + "bindpass": cfg.BindPassword, + "anonymous_group_search": cfg.AnonymousGroupSearch, + "case_sensitive_names": true, + "token_policies": "abc,xyz", + "request_timeout": cfg.RequestTimeout, + "connection_timeout": cfg.ConnectionTimeout, + "username_as_alias": cfg.UsernameAsAlias, }, } } @@ -854,6 +924,7 @@ func testAccStepConfigUrlWithAuthBind(t *testing.T, cfg *ldaputil.ConfigEntry) l "case_sensitive_names": true, "token_policies": "abc,xyz", "request_timeout": cfg.RequestTimeout, + "connection_timeout": cfg.ConnectionTimeout, }, } } @@ -874,6 +945,7 @@ func testAccStepConfigUrlWithDiscover(t *testing.T, cfg *ldaputil.ConfigEntry) l "case_sensitive_names": true, "token_policies": "abc,xyz", "request_timeout": cfg.RequestTimeout, + "connection_timeout": cfg.ConnectionTimeout, }, } } @@ -891,6 +963,7 @@ func testAccStepConfigUrlNoGroupDN(t *testing.T, cfg *ldaputil.ConfigEntry) logi "discoverdn": true, "case_sensitive_names": true, "request_timeout": cfg.RequestTimeout, + "connection_timeout": cfg.ConnectionTimeout, }, } } @@ -911,6 +984,7 @@ func testAccStepConfigUrlWarningCheck(t *testing.T, cfg *ldaputil.ConfigEntry, o "case_sensitive_names": true, "token_policies": "abc,xyz", "request_timeout": cfg.RequestTimeout, + "connection_timeout": cfg.ConnectionTimeout, }, Check: func(response *logical.Response) error { if len(response.Warnings) == 0 { @@ -1175,7 +1249,7 @@ func TestLdapAuthBackend_ConfigUpgrade(t *testing.T) { ctx := context.Background() - cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + cleanup, cfg := ldap.PrepareTestContainer(t, "master") defer cleanup() configReq := &logical.Request{ Operation: logical.UpdateOperation, @@ -1192,6 +1266,8 @@ func TestLdapAuthBackend_ConfigUpgrade(t *testing.T) { "token_period": "5m", "token_explicit_max_ttl": "24h", "request_timeout": cfg.RequestTimeout, + "max_page_size": cfg.MaximumPageSize, + "connection_timeout": cfg.ConnectionTimeout, }, Storage: storage, Connection: &logical.Connection{}, @@ -1233,8 +1309,10 @@ func TestLdapAuthBackend_ConfigUpgrade(t *testing.T) { CaseSensitiveNames: falseBool, UsePre111GroupCNBehavior: new(bool), RequestTimeout: cfg.RequestTimeout, + ConnectionTimeout: cfg.ConnectionTimeout, UsernameAsAlias: false, DerefAliases: "never", + MaximumPageSize: 1000, }, } diff --git a/builtin/credential/ldap/cli.go b/builtin/credential/ldap/cli.go index 772603434940..f7f4a63156d3 100644 --- a/builtin/credential/ldap/cli.go +++ b/builtin/credential/ldap/cli.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ldap diff --git a/builtin/credential/ldap/cmd/ldap/main.go b/builtin/credential/ldap/cmd/ldap/main.go index 2dcb802e2092..8594cc527e76 100644 --- a/builtin/credential/ldap/cmd/ldap/main.go +++ b/builtin/credential/ldap/cmd/ldap/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/builtin/credential/ldap/path_config.go b/builtin/credential/ldap/path_config.go index 1497f918cc49..e24d04b295c7 100644 --- a/builtin/credential/ldap/path_config.go +++ b/builtin/credential/ldap/path_config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ldap @@ -19,22 +19,41 @@ const userFilterWarning = "userfilter configured does not consider userattr and func pathConfig(b *backend) *framework.Path { p := &framework.Path{ Pattern: `config`, - Fields: ldaputil.ConfigFields(), - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathConfigRead, - logical.UpdateOperation: b.pathConfigWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixLDAP, + Action: "Configure", + }, + + Fields: ldaputil.ConfigFields(), + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "auth-configuration", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure-auth", + }, + }, }, HelpSynopsis: pathConfigHelpSyn, HelpDescription: pathConfigHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Action: "Configure", - }, } tokenutil.AddTokenFields(p.Fields) p.Fields["token_policies"].Description += ". This will apply to all tokens generated by this auth method, in addition to any configured for specific users/groups." + + p.Fields["password_policy"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Password policy to use to rotate the root password", + } + return p } @@ -105,6 +124,9 @@ func (b *backend) Config(ctx context.Context, req *logical.Request) (*ldapConfig } func (b *backend) pathConfigRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + b.mu.RLock() + defer b.mu.RUnlock() + cfg, err := b.Config(ctx, req) if err != nil { return nil, err @@ -115,6 +137,7 @@ func (b *backend) pathConfigRead(ctx context.Context, req *logical.Request, d *f data := cfg.PasswordlessMap() cfg.PopulateTokenData(data) + data["password_policy"] = cfg.PasswordPolicy resp := &logical.Response{ Data: data, @@ -151,6 +174,9 @@ func (b *backend) checkConfigUserFilter(cfg *ldapConfigEntry) []string { } func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + b.mu.Lock() + defer b.mu.Unlock() + cfg, err := b.Config(ctx, req) if err != nil { return nil, err @@ -181,6 +207,10 @@ func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, d * return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest } + if passwordPolicy, ok := d.GetOk("password_policy"); ok { + cfg.PasswordPolicy = passwordPolicy.(string) + } + entry, err := logical.StorageEntryJSON("config", cfg) if err != nil { return nil, err @@ -221,6 +251,8 @@ func (b *backend) getConfigFieldData() (*framework.FieldData, error) { type ldapConfigEntry struct { tokenutil.TokenParams *ldaputil.ConfigEntry + + PasswordPolicy string `json:"password_policy"` } const pathConfigHelpSyn = ` diff --git a/builtin/credential/ldap/path_config_rotate_root.go b/builtin/credential/ldap/path_config_rotate_root.go new file mode 100644 index 000000000000..e095bc35036b --- /dev/null +++ b/builtin/credential/ldap/path_config_rotate_root.go @@ -0,0 +1,113 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-2.0 + +package ldap + +import ( + "context" + + "github.com/go-ldap/ldap/v3" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/base62" + "github.com/hashicorp/vault/sdk/helper/ldaputil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathConfigRotateRoot(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/rotate-root", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixLDAP, + OperationVerb: "rotate", + OperationSuffix: "root-credentials", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigRotateRootUpdate, + ForwardPerformanceSecondary: true, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathConfigRotateRootHelpSyn, + HelpDescription: pathConfigRotateRootHelpDesc, + } +} + +func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // lock the backend's state - really just the config state - for mutating + b.mu.Lock() + defer b.mu.Unlock() + + cfg, err := b.Config(ctx, req) + if err != nil { + return nil, err + } + if cfg == nil { + return logical.ErrorResponse("attempted to rotate root on an undefined config"), nil + } + + u, p := cfg.BindDN, cfg.BindPassword + if u == "" || p == "" { + return logical.ErrorResponse("auth is not using authenticated search, no root to rotate"), nil + } + + // grab our ldap client + client := ldaputil.Client{ + Logger: b.Logger(), + LDAP: ldaputil.NewLDAP(), + } + + conn, err := client.DialLDAP(cfg.ConfigEntry) + if err != nil { + return nil, err + } + + err = conn.Bind(u, p) + if err != nil { + return nil, err + } + + lreq := &ldap.ModifyRequest{ + DN: cfg.BindDN, + } + + var newPassword string + if cfg.PasswordPolicy != "" { + newPassword, err = b.System().GeneratePasswordFromPolicy(ctx, cfg.PasswordPolicy) + } else { + newPassword, err = base62.Random(defaultPasswordLength) + } + if err != nil { + return nil, err + } + + lreq.Replace("userPassword", []string{newPassword}) + + err = conn.Modify(lreq) + if err != nil { + return nil, err + } + // update config with new password + cfg.BindPassword = newPassword + entry, err := logical.StorageEntryJSON("config", cfg) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + // we might have to roll-back the password here? + return nil, err + } + + return nil, nil +} + +const pathConfigRotateRootHelpSyn = ` +Request to rotate the LDAP credentials used by Vault +` + +const pathConfigRotateRootHelpDesc = ` +This path attempts to rotate the LDAP bindpass used by Vault for this mount. +` diff --git a/builtin/credential/ldap/path_config_rotate_root_test.go b/builtin/credential/ldap/path_config_rotate_root_test.go new file mode 100644 index 000000000000..65073472ca00 --- /dev/null +++ b/builtin/credential/ldap/path_config_rotate_root_test.go @@ -0,0 +1,66 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-2.0 + +package ldap + +import ( + "context" + "os" + "testing" + + "github.com/hashicorp/vault/helper/testhelpers/ldap" + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/logical" +) + +// This test relies on a docker ldap server with a suitable person object (cn=admin,dc=planetexpress,dc=com) +// with bindpassword "admin". `PrepareTestContainer` does this for us. - see the backend_test for more details +func TestRotateRoot(t *testing.T) { + if os.Getenv(logicaltest.TestEnvVar) == "" { + t.Skip("skipping rotate root tests because VAULT_ACC is unset") + } + ctx := context.Background() + + b, store := createBackendWithStorage(t) + cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + defer cleanup() + // set up auth config + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config", + Storage: store, + Data: map[string]interface{}{ + "url": cfg.Url, + "binddn": cfg.BindDN, + "bindpass": cfg.BindPassword, + "userdn": cfg.UserDN, + }, + } + + resp, err := b.HandleRequest(ctx, req) + if err != nil { + t.Fatalf("failed to initialize ldap auth config: %s", err) + } + if resp != nil && resp.IsError() { + t.Fatalf("failed to initialize ldap auth config: %s", resp.Data["error"]) + } + + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/rotate-root", + Storage: store, + } + + _, err = b.HandleRequest(ctx, req) + if err != nil { + t.Fatalf("failed to rotate password: %s", err) + } + + newCFG, err := b.Config(ctx, req) + if newCFG.BindDN != cfg.BindDN { + t.Fatalf("a value in config that should have stayed the same changed: %s", cfg.BindDN) + } + if newCFG.BindPassword == cfg.BindPassword { + t.Fatalf("the password should have changed, but it didn't") + } +} diff --git a/builtin/credential/ldap/path_groups.go b/builtin/credential/ldap/path_groups.go index 5908d1b1424f..645b6428fd8e 100644 --- a/builtin/credential/ldap/path_groups.go +++ b/builtin/credential/ldap/path_groups.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ldap @@ -16,22 +16,33 @@ func pathGroupsList(b *backend) *framework.Path { return &framework.Path{ Pattern: "groups/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixLDAP, + OperationSuffix: "groups", + Navigation: true, + ItemType: "Group", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathGroupList, }, HelpSynopsis: pathGroupHelpSyn, HelpDescription: pathGroupHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Navigation: true, - ItemType: "Group", - }, } } func pathGroups(b *backend) *framework.Path { return &framework.Path{ Pattern: `groups/(?P.+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixLDAP, + OperationSuffix: "group", + Action: "Create", + ItemType: "Group", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -41,6 +52,9 @@ func pathGroups(b *backend) *framework.Path { "policies": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies associated to the group.", + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of policies associated to the group.", + }, }, }, @@ -52,10 +66,6 @@ func pathGroups(b *backend) *framework.Path { HelpSynopsis: pathGroupHelpSyn, HelpDescription: pathGroupHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Action: "Create", - ItemType: "Group", - }, } } diff --git a/builtin/credential/ldap/path_login.go b/builtin/credential/ldap/path_login.go index 84f68ca996d1..a1c28dea02b8 100644 --- a/builtin/credential/ldap/path_login.go +++ b/builtin/credential/ldap/path_login.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ldap @@ -16,6 +16,12 @@ import ( func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: `login/(?P.+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixLDAP, + OperationVerb: "login", + }, + Fields: map[string]*framework.FieldSchema{ "username": { Type: framework.TypeString, @@ -77,17 +83,8 @@ func (b *backend) pathLogin(ctx context.Context, req *logical.Request, d *framew password := d.Get("password").(string) effectiveUsername, policies, resp, groupNames, err := b.Login(ctx, req, username, password, cfg.UsernameAsAlias) - // Handle an internal error - if err != nil { - return nil, err - } - if resp != nil { - // Handle a logical error - if resp.IsError() { - return resp, nil - } - } else { - resp = &logical.Response{} + if err != nil || (resp != nil && resp.IsError()) { + return resp, err } auth := &logical.Auth{ diff --git a/builtin/credential/ldap/path_users.go b/builtin/credential/ldap/path_users.go index 32e4e290bdc8..55326f640862 100644 --- a/builtin/credential/ldap/path_users.go +++ b/builtin/credential/ldap/path_users.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ldap @@ -17,22 +17,33 @@ func pathUsersList(b *backend) *framework.Path { return &framework.Path{ Pattern: "users/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixLDAP, + OperationSuffix: "users", + Navigation: true, + ItemType: "User", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathUserList, }, HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Navigation: true, - ItemType: "User", - }, } } func pathUsers(b *backend) *framework.Path { return &framework.Path{ Pattern: `users/(?P.+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixLDAP, + OperationSuffix: "user", + Action: "Create", + ItemType: "User", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -42,11 +53,17 @@ func pathUsers(b *backend) *framework.Path { "groups": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of additional groups associated with the user.", + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of additional groups associated with the user.", + }, }, "policies": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies associated with the user.", + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of policies associated with the user.", + }, }, }, @@ -58,10 +75,6 @@ func pathUsers(b *backend) *framework.Path { HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Action: "Create", - ItemType: "User", - }, } } diff --git a/builtin/credential/okta/backend.go b/builtin/credential/okta/backend.go index a71cad22e59c..96507f7879c1 100644 --- a/builtin/credential/okta/backend.go +++ b/builtin/credential/okta/backend.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package okta @@ -18,8 +18,9 @@ import ( ) const ( - mfaPushMethod = "push" - mfaTOTPMethod = "token:software:totp" + operationPrefixOkta = "okta" + mfaPushMethod = "push" + mfaTOTPMethod = "token:software:totp" ) func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { diff --git a/builtin/credential/okta/backend_test.go b/builtin/credential/okta/backend_test.go index 85642e802a49..b347524656a8 100644 --- a/builtin/credential/okta/backend_test.go +++ b/builtin/credential/okta/backend_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package okta diff --git a/builtin/credential/okta/cli.go b/builtin/credential/okta/cli.go index df252960a2c2..faa7f86f2faa 100644 --- a/builtin/credential/okta/cli.go +++ b/builtin/credential/okta/cli.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package okta diff --git a/builtin/credential/okta/cmd/okta/main.go b/builtin/credential/okta/cmd/okta/main.go index e28b34a016c8..2b6c3e9496d8 100644 --- a/builtin/credential/okta/cmd/okta/main.go +++ b/builtin/credential/okta/cmd/okta/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/builtin/credential/okta/path_config.go b/builtin/credential/okta/path_config.go index 7ed38e93b14b..6bdb241b2d2e 100644 --- a/builtin/credential/okta/path_config.go +++ b/builtin/credential/okta/path_config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package okta @@ -27,6 +27,12 @@ const ( func pathConfig(b *backend) *framework.Path { p := &framework.Path{ Pattern: `config`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixOkta, + Action: "Configure", + }, + Fields: map[string]*framework.FieldSchema{ "organization": { Type: framework.TypeString, @@ -83,18 +89,30 @@ func pathConfig(b *backend) *framework.Path { }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathConfigRead, - logical.CreateOperation: b.pathConfigWrite, - logical.UpdateOperation: b.pathConfigWrite, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "configuration", + }, + }, + logical.CreateOperation: &framework.PathOperation{ + Callback: b.pathConfigWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + }, + }, }, ExistenceCheck: b.pathConfigExistenceCheck, HelpSynopsis: pathConfigHelp, - DisplayAttrs: &framework.DisplayAttributes{ - Action: "Configure", - }, } tokenutil.AddTokenFields(p.Fields) diff --git a/builtin/credential/okta/path_groups.go b/builtin/credential/okta/path_groups.go index b7b6ac7489e2..9ae9826d309f 100644 --- a/builtin/credential/okta/path_groups.go +++ b/builtin/credential/okta/path_groups.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package okta @@ -16,22 +16,33 @@ func pathGroupsList(b *backend) *framework.Path { return &framework.Path{ Pattern: "groups/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixOkta, + OperationSuffix: "groups", + Navigation: true, + ItemType: "Group", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathGroupList, }, HelpSynopsis: pathGroupHelpSyn, HelpDescription: pathGroupHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Navigation: true, - ItemType: "Group", - }, } } func pathGroups(b *backend) *framework.Path { return &framework.Path{ Pattern: `groups/(?P.+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixOkta, + OperationSuffix: "group", + Action: "Create", + ItemType: "Group", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -41,6 +52,9 @@ func pathGroups(b *backend) *framework.Path { "policies": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies associated to the group.", + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of policies associated to the group.", + }, }, }, @@ -52,10 +66,6 @@ func pathGroups(b *backend) *framework.Path { HelpSynopsis: pathGroupHelpSyn, HelpDescription: pathGroupHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Action: "Create", - ItemType: "Group", - }, } } diff --git a/builtin/credential/okta/path_groups_test.go b/builtin/credential/okta/path_groups_test.go index 8e4ba8cc2d5d..f8fac1d2fed1 100644 --- a/builtin/credential/okta/path_groups_test.go +++ b/builtin/credential/okta/path_groups_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package okta @@ -10,7 +10,6 @@ import ( "time" "github.com/go-test/deep" - log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" diff --git a/builtin/credential/okta/path_login.go b/builtin/credential/okta/path_login.go index 2e1c670a5397..5b86545d20ed 100644 --- a/builtin/credential/okta/path_login.go +++ b/builtin/credential/okta/path_login.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package okta @@ -23,6 +23,12 @@ const ( func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: `login/(?P.+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixOkta, + OperationVerb: "login", + }, + Fields: map[string]*framework.FieldSchema{ "username": { Type: framework.TypeString, @@ -192,6 +198,10 @@ func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *f func pathVerify(b *backend) *framework.Path { return &framework.Path{ Pattern: `verify/(?P.+)`, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixOkta, + OperationVerb: "verify", + }, Fields: map[string]*framework.FieldSchema{ "nonce": { Type: framework.TypeString, diff --git a/builtin/credential/okta/path_users.go b/builtin/credential/okta/path_users.go index 7f464d2a1145..d66a5ed463e3 100644 --- a/builtin/credential/okta/path_users.go +++ b/builtin/credential/okta/path_users.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package okta @@ -14,22 +14,33 @@ func pathUsersList(b *backend) *framework.Path { return &framework.Path{ Pattern: "users/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixOkta, + OperationSuffix: "users", + Navigation: true, + ItemType: "User", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathUserList, }, HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Navigation: true, - ItemType: "User", - }, } } func pathUsers(b *backend) *framework.Path { return &framework.Path{ Pattern: `users/(?P.+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixOkta, + OperationSuffix: "user", + Action: "Create", + ItemType: "User", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -55,10 +66,6 @@ func pathUsers(b *backend) *framework.Path { HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Action: "Create", - ItemType: "User", - }, } } diff --git a/builtin/credential/radius/backend.go b/builtin/credential/radius/backend.go index deec6a809ce7..40e680ebcc27 100644 --- a/builtin/credential/radius/backend.go +++ b/builtin/credential/radius/backend.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package radius @@ -10,6 +10,8 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) +const operationPrefixRadius = "radius" + func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() if err := b.Setup(ctx, conf); err != nil { diff --git a/builtin/credential/radius/backend_test.go b/builtin/credential/radius/backend_test.go index 4a3d09c63995..3c885008422a 100644 --- a/builtin/credential/radius/backend_test.go +++ b/builtin/credential/radius/backend_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package radius @@ -8,13 +8,14 @@ import ( "fmt" "os" "reflect" + "runtime" "strconv" "strings" "testing" "time" - "github.com/hashicorp/vault/helper/testhelpers/docker" logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/helper/docker" "github.com/hashicorp/vault/sdk/logical" ) @@ -30,6 +31,10 @@ const ( ) func prepareRadiusTestContainer(t *testing.T) (func(), string, int) { + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as this image is not supported on ARM architectures") + } + if os.Getenv(envRadiusRadiusHost) != "" { port, _ := strconv.Atoi(os.Getenv(envRadiusPort)) return func() {}, os.Getenv(envRadiusRadiusHost), port diff --git a/builtin/credential/radius/cmd/radius/main.go b/builtin/credential/radius/cmd/radius/main.go index b3045a31a1a7..9adc5bfc78d2 100644 --- a/builtin/credential/radius/cmd/radius/main.go +++ b/builtin/credential/radius/cmd/radius/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/builtin/credential/radius/path_config.go b/builtin/credential/radius/path_config.go index eb915b76e92f..1ed33ede6c10 100644 --- a/builtin/credential/radius/path_config.go +++ b/builtin/credential/radius/path_config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package radius @@ -15,6 +15,12 @@ import ( func pathConfig(b *backend) *framework.Path { p := &framework.Path{ Pattern: "config", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRadius, + Action: "Configure", + }, + Fields: map[string]*framework.FieldSchema{ "host": { Type: framework.TypeString, @@ -38,9 +44,10 @@ func pathConfig(b *backend) *framework.Path { "unregistered_user_policies": { Type: framework.TypeString, Default: "", - Description: "Comma-separated list of policies to grant upon successful RADIUS authentication of an unregisted user (default: empty)", + Description: "Comma-separated list of policies to grant upon successful RADIUS authentication of an unregistered user (default: empty)", DisplayAttrs: &framework.DisplayAttributes{ - Name: "Policies for unregistered users", + Name: "Policies for unregistered users", + Description: "List of policies to grant upon successful RADIUS authentication of an unregistered user (default: empty)", }, }, "dial_timeout": { @@ -80,17 +87,29 @@ func pathConfig(b *backend) *framework.Path { ExistenceCheck: b.configExistenceCheck, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathConfigRead, - logical.CreateOperation: b.pathConfigCreateUpdate, - logical.UpdateOperation: b.pathConfigCreateUpdate, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "configuration", + }, + }, + logical.CreateOperation: &framework.PathOperation{ + Callback: b.pathConfigCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + }, + }, }, HelpSynopsis: pathConfigHelpSyn, HelpDescription: pathConfigHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Action: "Configure", - }, } tokenutil.AddTokenFields(p.Fields) diff --git a/builtin/credential/radius/path_login.go b/builtin/credential/radius/path_login.go index 929e41734f9d..b38e90cdf2ae 100644 --- a/builtin/credential/radius/path_login.go +++ b/builtin/credential/radius/path_login.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package radius @@ -11,18 +11,24 @@ import ( "strings" "time" - "layeh.com/radius" - . "layeh.com/radius/rfc2865" - "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/cidrutil" "github.com/hashicorp/vault/sdk/helper/policyutil" "github.com/hashicorp/vault/sdk/logical" + "layeh.com/radius" + . "layeh.com/radius/rfc2865" ) func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: "login" + framework.OptionalParamRegex("urlusername"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRadius, + OperationVerb: "login", + OperationSuffix: "|with-username", + }, + Fields: map[string]*framework.FieldSchema{ "urlusername": { Type: framework.TypeString, diff --git a/builtin/credential/radius/path_users.go b/builtin/credential/radius/path_users.go index 948513b381b1..21ebd262f0d7 100644 --- a/builtin/credential/radius/path_users.go +++ b/builtin/credential/radius/path_users.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package radius @@ -17,22 +17,33 @@ func pathUsersList(b *backend) *framework.Path { return &framework.Path{ Pattern: "users/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRadius, + OperationSuffix: "users", + Navigation: true, + ItemType: "User", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathUserList, }, HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Navigation: true, - ItemType: "User", - }, } } func pathUsers(b *backend) *framework.Path { return &framework.Path{ Pattern: `users/(?P.+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRadius, + OperationSuffix: "user", + Action: "Create", + ItemType: "User", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -42,6 +53,9 @@ func pathUsers(b *backend) *framework.Path { "policies": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies associated to the user.", + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of policies associated to the user.", + }, }, }, @@ -56,10 +70,6 @@ func pathUsers(b *backend) *framework.Path { HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Action: "Create", - ItemType: "User", - }, } } diff --git a/builtin/credential/token/cli.go b/builtin/credential/token/cli.go index 853d6eade7c4..f3ecd97058cb 100644 --- a/builtin/credential/token/cli.go +++ b/builtin/credential/token/cli.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package token diff --git a/builtin/credential/userpass/backend.go b/builtin/credential/userpass/backend.go index efdc178fe3b3..e361f08ca48b 100644 --- a/builtin/credential/userpass/backend.go +++ b/builtin/credential/userpass/backend.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package userpass @@ -10,6 +10,8 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) +const operationPrefixUserpass = "userpass" + func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() if err := b.Setup(ctx, conf); err != nil { diff --git a/builtin/credential/userpass/backend_test.go b/builtin/credential/userpass/backend_test.go index 3df8cfa2a9bb..c1b6547b4cc6 100644 --- a/builtin/credential/userpass/backend_test.go +++ b/builtin/credential/userpass/backend_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package userpass @@ -12,7 +12,7 @@ import ( "time" "github.com/go-test/deep" - sockaddr "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/go-sockaddr" logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" "github.com/hashicorp/vault/sdk/helper/policyutil" "github.com/hashicorp/vault/sdk/helper/tokenutil" diff --git a/builtin/credential/userpass/cli.go b/builtin/credential/userpass/cli.go index e100ae9f244e..ab1f138e7806 100644 --- a/builtin/credential/userpass/cli.go +++ b/builtin/credential/userpass/cli.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package userpass diff --git a/builtin/credential/userpass/cmd/userpass/main.go b/builtin/credential/userpass/cmd/userpass/main.go index 4747a56f4409..d8dfed7f5e10 100644 --- a/builtin/credential/userpass/cmd/userpass/main.go +++ b/builtin/credential/userpass/cmd/userpass/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/builtin/credential/userpass/path_login.go b/builtin/credential/userpass/path_login.go index 9211a8056d52..37fc7fbde570 100644 --- a/builtin/credential/userpass/path_login.go +++ b/builtin/credential/userpass/path_login.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package userpass @@ -19,6 +19,12 @@ import ( func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: "login/" + framework.GenericNameRegex("username"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixUserpass, + OperationVerb: "login", + }, + Fields: map[string]*framework.FieldSchema{ "username": { Type: framework.TypeString, @@ -61,7 +67,7 @@ func (b *backend) pathLogin(ctx context.Context, req *logical.Request, d *framew password := d.Get("password").(string) if password == "" { - return nil, fmt.Errorf("missing password") + return nil, logical.ErrInvalidCredentials } // Get the user and validate auth diff --git a/builtin/credential/userpass/path_user_password.go b/builtin/credential/userpass/path_user_password.go index 7172dadb6910..431028b9cf95 100644 --- a/builtin/credential/userpass/path_user_password.go +++ b/builtin/credential/userpass/path_user_password.go @@ -1,31 +1,64 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package userpass import ( "context" "fmt" - - "golang.org/x/crypto/bcrypt" + "strings" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" + "golang.org/x/crypto/bcrypt" +) + +const ( + pathUserPasswordHelpDesc = ` +This endpoint allows resetting the user's password. +` + pathUserPasswordHelpSyn = ` +Reset user's password. +` + + // The name of the username parameter supplied via the API. + paramUsername = "username" + + // The name of the password parameter supplied via the API. + paramPassword = "password" + + // The name of the password hash parameter supplied via the API. + paramPasswordHash = "password_hash" + + // The expected length of any hash generated by bcrypt. + bcryptHashLength = 60 ) func pathUserPassword(b *backend) *framework.Path { return &framework.Path{ - Pattern: "users/" + framework.GenericNameRegex("username") + "/password$", + Pattern: "users/" + framework.GenericNameRegex(paramUsername) + "/password$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixUserpass, + OperationVerb: "reset", + OperationSuffix: "password", + }, + Fields: map[string]*framework.FieldSchema{ - "username": { + paramUsername: { Type: framework.TypeString, Description: "Username for this user.", }, - "password": { + paramPassword: { Type: framework.TypeString, Description: "Password for this user.", }, + + paramPasswordHash: { + Type: framework.TypeString, + Description: "Pre-hashed password in bcrypt format for this user.", + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -38,7 +71,7 @@ func pathUserPassword(b *backend) *framework.Path { } func (b *backend) pathUserPasswordUpdate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - username := d.Get("username").(string) + username := d.Get(paramUsername).(string) userEntry, err := b.user(ctx, req.Storage, username) if err != nil { @@ -59,24 +92,51 @@ func (b *backend) pathUserPasswordUpdate(ctx context.Context, req *logical.Reque return nil, b.setUser(ctx, req.Storage, username, userEntry) } -func (b *backend) updateUserPassword(req *logical.Request, d *framework.FieldData, userEntry *UserEntry) (error, error) { - password := d.Get("password").(string) - if password == "" { - return fmt.Errorf("missing password"), nil +func (b *backend) updateUserPassword(_ *logical.Request, d *framework.FieldData, userEntry *UserEntry) (error, error) { + password := d.Get(paramPassword).(string) + passwordHash := d.Get(paramPasswordHash).(string) + + var hash []byte + var err error + + switch { + case password != "" && passwordHash != "": + return fmt.Errorf("%q and %q cannot be supplied together", paramPassword, paramPasswordHash), nil + case password == "" && passwordHash == "": + return fmt.Errorf("%q or %q must be supplied", paramPassword, paramPasswordHash), nil + case password != "": + hash, err = bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) + case passwordHash != "": + hash, err = parsePasswordHash(passwordHash) } - // Generate a hash of the password - hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) + if err != nil { return nil, err } + userEntry.PasswordHash = hash + return nil, nil } -const pathUserPasswordHelpSyn = ` -Reset user's password. -` +// parsePasswordHash is used to parse a password hash that follows the bcrypt standard. +// It examines the prefix of the string supplied to verify it complies with a supported +// version before returning the string in bytes. +func parsePasswordHash(passwordHash string) ([]byte, error) { + var res []byte -const pathUserPasswordHelpDesc = ` -This endpoint allows resetting the user's password. -` + switch { + // All bcrypt hashes should be the same length. + case len(passwordHash) != bcryptHashLength: + return nil, fmt.Errorf("password hash has incorrect length") + // See: https://en.wikipedia.org/wiki/Bcrypt for versioning history. + case strings.HasPrefix(passwordHash, "$2a$"), // $2a% (non-ASCII character support) + strings.HasPrefix(passwordHash, "$2y$"), // $2y$ (PHP fixed) + strings.HasPrefix(passwordHash, "$2b$"): // $2b$ (truncation fix) + res = []byte(passwordHash) + default: + return nil, fmt.Errorf("password hash has incorrect prefix") + } + + return res, nil +} diff --git a/builtin/credential/userpass/path_user_password_test.go b/builtin/credential/userpass/path_user_password_test.go new file mode 100644 index 000000000000..1501d1215d66 --- /dev/null +++ b/builtin/credential/userpass/path_user_password_test.go @@ -0,0 +1,92 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package userpass + +import ( + "testing" + + "github.com/stretchr/testify/require" + "golang.org/x/crypto/bcrypt" +) + +// TestUserPass_ParseHash ensures that we correctly validate password hashes that +// conform to the bcrypt standard based on the prefix of the hash. +func TestUserPass_ParseHash(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + input string + isErrorExpected bool + expectedErrorMessage string + }{ + "too-short": { + input: "too short", + isErrorExpected: true, + expectedErrorMessage: "password hash has incorrect length", + }, + "60-spaces": { + input: " ", + isErrorExpected: true, + expectedErrorMessage: "password hash has incorrect prefix", + }, + "jibberish": { + input: "jibberfishjibberfishjibberfishjibberfishjibberfishjibberfish", + isErrorExpected: true, + expectedErrorMessage: "password hash has incorrect prefix", + }, + "non-ascii-prefix": { + input: "$2a$qwertyjibberfishjibberfishjibberfishjibberfishjibberfish", + isErrorExpected: false, + }, + "truncation-prefix": { + input: "$2b$qwertyjibberfishjibberfishjibberfishjibberfishjibberfish", + isErrorExpected: false, + }, + "php-only-fixed-prefix": { + input: "$2y$qwertyjibberfishjibberfishjibberfishjibberfishjibberfish", + isErrorExpected: false, + }, + "php-only-existing": { + input: "$2x$qwertyjibberfishjibberfishjibberfishjibberfishjibberfish", + isErrorExpected: true, + expectedErrorMessage: "password hash has incorrect prefix", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + got, err := parsePasswordHash(tc.input) + switch { + case tc.isErrorExpected: + require.EqualError(t, err, tc.expectedErrorMessage) + default: + require.NoError(t, err) + require.Equal(t, tc.input, string(got)) + } + }) + } +} + +// TestUserPass_BcryptHashLength ensures that using the bcrypt library to generate +// a hash from a password always produces the same length. +func TestUserPass_BcryptHashLength(t *testing.T) { + t.Parallel() + + tests := []string{ + "", + " ", + "foo", + "this is a long password woo", + } + + for _, input := range tests { + hash, err := bcrypt.GenerateFromPassword([]byte(input), bcrypt.DefaultCost) + require.NoError(t, err) + require.Len(t, hash, bcryptHashLength) + } +} diff --git a/builtin/credential/userpass/path_user_policies.go b/builtin/credential/userpass/path_user_policies.go index 126f1aa999e7..1dd9b9675de4 100644 --- a/builtin/credential/userpass/path_user_policies.go +++ b/builtin/credential/userpass/path_user_policies.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package userpass @@ -16,6 +16,13 @@ import ( func pathUserPolicies(b *backend) *framework.Path { return &framework.Path{ Pattern: "users/" + framework.GenericNameRegex("username") + "/policies$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixUserpass, + OperationVerb: "update", + OperationSuffix: "policies", + }, + Fields: map[string]*framework.FieldSchema{ "username": { Type: framework.TypeString, @@ -29,6 +36,9 @@ func pathUserPolicies(b *backend) *framework.Path { "token_policies": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies", + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of policies that will apply to the generated token for this user.", + }, }, }, diff --git a/builtin/credential/userpass/path_users.go b/builtin/credential/userpass/path_users.go index a822c017cc31..e4c82d9d017d 100644 --- a/builtin/credential/userpass/path_users.go +++ b/builtin/credential/userpass/path_users.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package userpass @@ -9,7 +9,7 @@ import ( "strings" "time" - sockaddr "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/go-sockaddr" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/tokenutil" "github.com/hashicorp/vault/sdk/logical" @@ -19,29 +19,40 @@ func pathUsersList(b *backend) *framework.Path { return &framework.Path{ Pattern: "users/?", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixUserpass, + OperationSuffix: "users", + Navigation: true, + ItemType: "User", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathUserList, }, HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Navigation: true, - ItemType: "User", - }, } } func pathUsers(b *backend) *framework.Path { p := &framework.Path{ - Pattern: "users/" + framework.GenericNameRegex("username"), + Pattern: "users/" + framework.GenericNameRegex(paramUsername), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixUserpass, + OperationSuffix: "user", + Action: "Create", + ItemType: "User", + }, + Fields: map[string]*framework.FieldSchema{ - "username": { + paramUsername: { Type: framework.TypeString, Description: "Username for this user.", }, - "password": { + paramPassword: { Type: framework.TypeString, Description: "Password for this user.", DisplayAttrs: &framework.DisplayAttributes{ @@ -49,6 +60,14 @@ func pathUsers(b *backend) *framework.Path { }, }, + paramPasswordHash: { + Type: framework.TypeString, + Description: "Pre-hashed password in bcrypt format for this user.", + DisplayAttrs: &framework.DisplayAttributes{ + Sensitive: true, + }, + }, + "policies": { Type: framework.TypeCommaStringSlice, Description: tokenutil.DeprecationText("token_policies"), @@ -85,10 +104,6 @@ func pathUsers(b *backend) *framework.Path { HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Action: "Create", - ItemType: "User", - }, } tokenutil.AddTokenFields(p.Fields) @@ -96,7 +111,7 @@ func pathUsers(b *backend) *framework.Path { } func (b *backend) userExistenceCheck(ctx context.Context, req *logical.Request, d *framework.FieldData) (bool, error) { - userEntry, err := b.user(ctx, req.Storage, d.Get("username").(string)) + userEntry, err := b.user(ctx, req.Storage, d.Get(paramUsername).(string)) if err != nil { return false, err } @@ -156,7 +171,7 @@ func (b *backend) pathUserList(ctx context.Context, req *logical.Request, d *fra } func (b *backend) pathUserDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - err := req.Storage.Delete(ctx, "user/"+strings.ToLower(d.Get("username").(string))) + err := req.Storage.Delete(ctx, "user/"+strings.ToLower(d.Get(paramUsername).(string))) if err != nil { return nil, err } @@ -165,7 +180,7 @@ func (b *backend) pathUserDelete(ctx context.Context, req *logical.Request, d *f } func (b *backend) pathUserRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - user, err := b.user(ctx, req.Storage, strings.ToLower(d.Get("username").(string))) + user, err := b.user(ctx, req.Storage, strings.ToLower(d.Get(paramUsername).(string))) if err != nil { return nil, err } @@ -196,7 +211,7 @@ func (b *backend) pathUserRead(ctx context.Context, req *logical.Request, d *fra } func (b *backend) userCreateUpdate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - username := strings.ToLower(d.Get("username").(string)) + username := strings.ToLower(d.Get(paramUsername).(string)) userEntry, err := b.user(ctx, req.Storage, username) if err != nil { return nil, err @@ -210,7 +225,7 @@ func (b *backend) userCreateUpdate(ctx context.Context, req *logical.Request, d return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest } - if _, ok := d.GetOk("password"); ok { + if d.Get(paramPassword).(string) != "" || d.Get(paramPasswordHash).(string) != "" { userErr, intErr := b.updateUserPassword(req, d, userEntry) if intErr != nil { return nil, intErr @@ -243,11 +258,18 @@ func (b *backend) userCreateUpdate(ctx context.Context, req *logical.Request, d } func (b *backend) pathUserWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - password := d.Get("password").(string) - if req.Operation == logical.CreateOperation && password == "" { - return logical.ErrorResponse("missing password"), logical.ErrInvalidRequest + password := d.Get(paramPassword).(string) + passwordHash := d.Get(paramPasswordHash).(string) + + switch { + case password != "" && passwordHash != "": + return logical.ErrorResponse(fmt.Sprintf("%q and %q cannot be supplied together", paramPassword, paramPasswordHash)), logical.ErrInvalidRequest + case password == "" && passwordHash == "" && req.Operation == logical.CreateOperation: + // Password or pre-hashed password are only required on 'create'. + return logical.ErrorResponse(fmt.Sprintf("%q or %q must be supplied", paramPassword, paramPasswordHash)), logical.ErrInvalidRequest + default: + return b.userCreateUpdate(ctx, req, d) } - return b.userCreateUpdate(ctx, req, d) } type UserEntry struct { diff --git a/builtin/credential/userpass/stepwise_test.go b/builtin/credential/userpass/stepwise_test.go index ab797ed200c9..241d7707b73f 100644 --- a/builtin/credential/userpass/stepwise_test.go +++ b/builtin/credential/userpass/stepwise_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package userpass diff --git a/builtin/logical/aws/backend.go b/builtin/logical/aws/backend.go index 34ca5cdc7f93..b33fb1b4d693 100644 --- a/builtin/logical/aws/backend.go +++ b/builtin/logical/aws/backend.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package aws @@ -13,23 +13,27 @@ import ( "github.com/aws/aws-sdk-go/service/sts/stsiface" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" ) const ( rootConfigPath = "config/root" minAwsUserRollbackAge = 5 * time.Minute + operationPrefixAWS = "aws" + operationPrefixAWSASD = "aws-config" ) func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { - b := Backend() + b := Backend(conf) if err := b.Setup(ctx, conf); err != nil { return nil, err } return b, nil } -func Backend() *backend { +func Backend(_ *logical.BackendConfig) *backend { var b backend + b.credRotationQueue = queue.New() b.Backend = &framework.Backend{ Help: strings.TrimSpace(backendHelp), @@ -38,7 +42,8 @@ func Backend() *backend { framework.WALPrefix, }, SealWrapStorage: []string{ - "config/root", + rootConfigPath, + pathStaticCreds + "/", }, }, @@ -48,6 +53,8 @@ func Backend() *backend { pathConfigLease(&b), pathRoles(&b), pathListRoles(&b), + pathStaticRoles(&b), + pathStaticCredentials(&b), pathUser(&b), }, @@ -58,7 +65,13 @@ func Backend() *backend { Invalidate: b.invalidate, WALRollback: b.walRollback, WALRollbackMinAge: minAwsUserRollbackAge, - BackendType: logical.TypeLogical, + PeriodicFunc: func(ctx context.Context, req *logical.Request) error { + if b.WriteSafeReplicationState() { + return b.rotateExpiredStaticCreds(ctx, req) + } + return nil + }, + BackendType: logical.TypeLogical, } return &b @@ -77,6 +90,10 @@ type backend struct { // to enable mocking with AWS iface for tests iamClient iamiface.IAMAPI stsClient stsiface.STSAPI + + // the age of a static role's credential is tracked by a priority queue and handled + // by the PeriodicFunc + credRotationQueue *queue.PriorityQueue } const backendHelp = ` @@ -124,7 +141,7 @@ func (b *backend) clientIAM(ctx context.Context, s logical.Storage) (iamiface.IA return b.iamClient, nil } - iamClient, err := nonCachedClientIAM(ctx, s, b.Logger()) + iamClient, err := b.nonCachedClientIAM(ctx, s, b.Logger()) if err != nil { return nil, err } @@ -151,7 +168,7 @@ func (b *backend) clientSTS(ctx context.Context, s logical.Storage) (stsiface.ST return b.stsClient, nil } - stsClient, err := nonCachedClientSTS(ctx, s, b.Logger()) + stsClient, err := b.nonCachedClientSTS(ctx, s, b.Logger()) if err != nil { return nil, err } diff --git a/builtin/logical/aws/backend_test.go b/builtin/logical/aws/backend_test.go index 5e59fdf2eb12..56cd095a3ba7 100644 --- a/builtin/logical/aws/backend_test.go +++ b/builtin/logical/aws/backend_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package aws @@ -19,6 +19,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/ec2" @@ -35,11 +36,28 @@ import ( var initSetup sync.Once +// This looks a bit curious. The policy document and the role document act +// as a logical intersection of policies. The role allows ec2:Describe* +// (among other permissions). This policy allows everything BUT +// ec2:DescribeAvailabilityZones. Thus, the logical intersection of the two +// is all ec2:Describe* EXCEPT ec2:DescribeAvailabilityZones, and so the +// describeAZs call should fail +const allowAllButDescribeAzs = `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "NotAction": "ec2:DescribeAvailabilityZones", + "Resource": "*" + } + ] +}` + type mockIAMClient struct { iamiface.IAMAPI } -func (m *mockIAMClient) CreateUser(input *iam.CreateUserInput) (*iam.CreateUserOutput, error) { +func (m *mockIAMClient) CreateUserWithContext(_ aws.Context, input *iam.CreateUserInput, _ ...request.Option) (*iam.CreateUserOutput, error) { return nil, awserr.New("Throttling", "", nil) } @@ -96,7 +114,7 @@ func TestAcceptanceBackend_basicSTS(t *testing.T) { PreCheck: func() { testAccPreCheck(t) createUser(t, userName, accessKey) - createRole(t, roleName, awsAccountID, []string{ec2PolicyArn}) + createRole(t, roleName, awsAccountID, []string{ec2PolicyArn}, nil) // Sleep sometime because AWS is eventually consistent // Both the createUser and createRole depend on this log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...") @@ -122,7 +140,8 @@ func TestAcceptanceBackend_basicSTS(t *testing.T) { }) } -func TestBackend_policyCrud(t *testing.T) { +// TestBackend_policyCRUD tests the CRUD operations for a policy. +func TestBackend_policyCRUD(t *testing.T) { t.Parallel() compacted, err := compactJSON(testDynamoPolicy) if err != nil { @@ -147,7 +166,7 @@ func TestBackend_throttled(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend() + b := Backend(config) if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } @@ -251,23 +270,32 @@ func getAccountID() (string, error) { return *res.Account, nil } -func createRole(t *testing.T, roleName, awsAccountID string, policyARNs []string) { - const testRoleAssumePolicy = `{ +func createRole(t *testing.T, roleName, awsAccountID string, policyARNs, extraTrustPolicies []string) { + t.Helper() + + trustPolicyStmts := append([]string{ + fmt.Sprintf(` + { + "Effect":"Allow", + "Principal": { + "AWS": "arn:aws:iam::%s:root" + }, + "Action": [ + "sts:AssumeRole", + "sts:SetSourceIdentity" + ] + }`, awsAccountID), + }, + extraTrustPolicies...) + + testRoleAssumePolicy := fmt.Sprintf(`{ "Version": "2012-10-17", "Statement": [ - { - "Effect":"Allow", - "Principal": { - "AWS": "arn:aws:iam::%s:root" - }, - "Action": [ - "sts:AssumeRole", - "sts:SetSourceIdentity" - ] - } +%s ] } -` +`, strings.Join(trustPolicyStmts, ",")) + awsConfig := &aws.Config{ Region: aws.String("us-east-1"), HTTPClient: cleanhttp.DefaultClient(), @@ -277,26 +305,25 @@ func createRole(t *testing.T, roleName, awsAccountID string, policyARNs []string t.Fatal(err) } svc := iam.New(sess) - trustPolicy := fmt.Sprintf(testRoleAssumePolicy, awsAccountID) params := &iam.CreateRoleInput{ - AssumeRolePolicyDocument: aws.String(trustPolicy), + AssumeRolePolicyDocument: aws.String(testRoleAssumePolicy), RoleName: aws.String(roleName), Path: aws.String("/"), } log.Printf("[INFO] AWS CreateRole: %s", roleName) - if _, err := svc.CreateRole(params); err != nil { + output, err := svc.CreateRole(params) + if err != nil { t.Fatalf("AWS CreateRole failed: %v", err) } for _, policyARN := range policyARNs { attachment := &iam.AttachRolePolicyInput{ PolicyArn: aws.String(policyARN), - RoleName: aws.String(roleName), // Required + RoleName: output.Role.RoleName, } _, err = svc.AttachRolePolicy(attachment) - if err != nil { t.Fatalf("AWS AttachRolePolicy failed: %v", err) } @@ -315,21 +342,21 @@ func createUser(t *testing.T, userName string, accessKey *awsAccessKey) { // do anything // 4. Generate API creds to get an actual access key and secret key timebombPolicyTemplate := `{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Deny", - "Action": "*", - "Resource": "*", - "Condition": { - "DateGreaterThan": { - "aws:CurrentTime": "%s" - } + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny", + "Action": "*", + "Resource": "*", + "Condition": { + "DateGreaterThan": { + "aws:CurrentTime": "%s" } } - ] - } - ` + } + ] +} +` validity := time.Duration(2 * time.Hour) expiry := time.Now().Add(validity) timebombPolicy := fmt.Sprintf(timebombPolicyTemplate, expiry.Format(time.RFC3339)) @@ -464,7 +491,6 @@ func deleteTestRole(roleName string) error { log.Printf("[INFO] AWS DeleteRole: %s", roleName) _, err = svc.DeleteRole(params) - if err != nil { log.Printf("[WARN] AWS DeleteRole failed: %v", err) return err @@ -658,7 +684,7 @@ func testAccStepRotateRoot(oldAccessKey *awsAccessKey) logicaltest.TestStep { } } -func testAccStepRead(t *testing.T, path, name string, credentialTests []credentialTestFunc) logicaltest.TestStep { +func testAccStepRead(_ *testing.T, path, name string, credentialTests []credentialTestFunc) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.ReadOperation, Path: path + "/" + name, @@ -666,7 +692,7 @@ func testAccStepRead(t *testing.T, path, name string, credentialTests []credenti var d struct { AccessKey string `mapstructure:"access_key"` SecretKey string `mapstructure:"secret_key"` - STSToken string `mapstructure:"security_token"` + STSToken string `mapstructure:"session_token"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { return err @@ -683,26 +709,28 @@ func testAccStepRead(t *testing.T, path, name string, credentialTests []credenti } } -func testAccStepReadSTSResponse(name string, maximumTTL uint64) logicaltest.TestStep { +func testAccStepReadWithMFA(t *testing.T, path, name, mfaCode string, credentialTests []credentialTestFunc) logicaltest.TestStep { + step := testAccStepRead(t, path, name, credentialTests) + step.Data = map[string]interface{}{ + "mfa_code": mfaCode, + } + + return step +} + +func testAccStepReadSTSResponse(name string, maximumTTL time.Duration) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.ReadOperation, Path: "creds/" + name, Check: func(resp *logical.Response) error { - if resp.Secret != nil { - return fmt.Errorf("bad: STS tokens should return a nil secret, received: %+v", resp.Secret) + if resp.Secret == nil { + return fmt.Errorf("bad: nil Secret returned") } - - if ttl, exists := resp.Data["ttl"]; exists { - ttlVal := ttl.(uint64) - - if ttlVal > maximumTTL { - return fmt.Errorf("bad: ttl of %d greater than maximum of %d", ttl, maximumTTL) - } - - return nil + ttl := resp.Secret.TTL + if ttl > maximumTTL { + return fmt.Errorf("bad: ttl of %d greater than maximum of %d", ttl/time.Second, maximumTTL/time.Second) } - - return fmt.Errorf("response data missing ttl, received: %+v", resp.Data) + return nil }, } } @@ -907,6 +935,9 @@ func testAccStepReadPolicy(t *testing.T, name string, value string) logicaltest. "permissions_boundary_arn": "", "iam_groups": []string(nil), "iam_tags": map[string]string(nil), + "mfa_serial_number": "", + "session_tags": map[string]string(nil), + "external_id": "", } if !reflect.DeepEqual(resp.Data, expected) { return fmt.Errorf("bad: got: %#v\nexpected: %#v", resp.Data, expected) @@ -1030,6 +1061,7 @@ func TestAcceptanceBackend_iamUserManagedInlinePoliciesGroups(t *testing.T) { "permissions_boundary_arn": "", "iam_groups": []string{groupName}, "iam_tags": map[string]string(nil), + "mfa_serial_number": "", } logicaltest.Test(t, logicaltest.TestCase{ @@ -1074,6 +1106,7 @@ func TestAcceptanceBackend_iamUserGroups(t *testing.T) { "permissions_boundary_arn": "", "iam_groups": []string{group1Name, group2Name}, "iam_tags": map[string]string(nil), + "mfa_serial_number": "", } logicaltest.Test(t, logicaltest.TestCase{ @@ -1103,22 +1136,7 @@ func TestAcceptanceBackend_iamUserGroups(t *testing.T) { func TestAcceptanceBackend_AssumedRoleWithPolicyDoc(t *testing.T) { t.Parallel() roleName := generateUniqueRoleName(t.Name()) - // This looks a bit curious. The policy document and the role document act - // as a logical intersection of policies. The role allows ec2:Describe* - // (among other permissions). This policy allows everything BUT - // ec2:DescribeAvailabilityZones. Thus, the logical intersection of the two - // is all ec2:Describe* EXCEPT ec2:DescribeAvailabilityZones, and so the - // describeAZs call should fail - allowAllButDescribeAzs := ` -{ - "Version": "2012-10-17", - "Statement": [{ - "Effect": "Allow", - "NotAction": "ec2:DescribeAvailabilityZones", - "Resource": "*" - }] -} -` + awsAccountID, err := getAccountID() if err != nil { t.Logf("Unable to retrive user via sts:GetCallerIdentity: %#v", err) @@ -1133,7 +1151,7 @@ func TestAcceptanceBackend_AssumedRoleWithPolicyDoc(t *testing.T) { AcceptanceTest: true, PreCheck: func() { testAccPreCheck(t) - createRole(t, roleName, awsAccountID, []string{ec2PolicyArn}) + createRole(t, roleName, awsAccountID, []string{ec2PolicyArn}, nil) // Sleep sometime because AWS is eventually consistent log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...") time.Sleep(10 * time.Second) @@ -1169,7 +1187,7 @@ func TestAcceptanceBackend_AssumedRoleWithPolicyARN(t *testing.T) { AcceptanceTest: true, PreCheck: func() { testAccPreCheck(t) - createRole(t, roleName, awsAccountID, []string{ec2PolicyArn, iamPolicyArn}) + createRole(t, roleName, awsAccountID, []string{ec2PolicyArn, iamPolicyArn}, nil) log.Printf("[WARN] Sleeping for 10 seconds waiting for AWS...") time.Sleep(10 * time.Second) }, @@ -1190,22 +1208,7 @@ func TestAcceptanceBackend_AssumedRoleWithGroups(t *testing.T) { t.Parallel() roleName := generateUniqueRoleName(t.Name()) groupName := generateUniqueGroupName(t.Name()) - // This looks a bit curious. The policy document and the role document act - // as a logical intersection of policies. The role allows ec2:Describe* - // (among other permissions). This policy allows everything BUT - // ec2:DescribeAvailabilityZones. Thus, the logical intersection of the two - // is all ec2:Describe* EXCEPT ec2:DescribeAvailabilityZones, and so the - // describeAZs call should fail - allowAllButDescribeAzs := `{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "NotAction": "ec2:DescribeAvailabilityZones", - "Resource": "*" - } - ] -}` + awsAccountID, err := getAccountID() if err != nil { t.Logf("Unable to retrive user via sts:GetCallerIdentity: %#v", err) @@ -1221,7 +1224,7 @@ func TestAcceptanceBackend_AssumedRoleWithGroups(t *testing.T) { AcceptanceTest: true, PreCheck: func() { testAccPreCheck(t) - createRole(t, roleName, awsAccountID, []string{ec2PolicyArn}) + createRole(t, roleName, awsAccountID, []string{ec2PolicyArn}, nil) createGroup(t, groupName, allowAllButDescribeAzs, []string{}) // Sleep sometime because AWS is eventually consistent log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...") @@ -1243,6 +1246,62 @@ func TestAcceptanceBackend_AssumedRoleWithGroups(t *testing.T) { }) } +// TestAcceptanceBackend_AssumedRoleWithSessionTags tests that session tags are +// passed to the assumed role. +func TestAcceptanceBackend_AssumedRoleWithSessionTags(t *testing.T) { + t.Parallel() + roleName := generateUniqueRoleName(t.Name()) + awsAccountID, err := getAccountID() + if err != nil { + t.Logf("Unable to retrive user via sts:GetCallerIdentity: %#v", err) + t.Skip("Could not determine AWS account ID from sts:GetCallerIdentity for acceptance tests, skipping") + } + + roleARN := fmt.Sprintf("arn:aws:iam::%s:role/%s", awsAccountID, roleName) + roleData := map[string]interface{}{ + "policy_document": allowAllButDescribeAzs, + "role_arns": []string{roleARN}, + "credential_type": assumedRoleCred, + "session_tags": map[string]string{ + "foo": "bar", + "baz": "qux", + }, + } + + // allowSessionTagsPolicy allows the role to tag the session, it needs to be + // included in the trust policy. + allowSessionTagsPolicy := fmt.Sprintf(` + { + "Sid": "AllowPassSessionTagsAndTransitive", + "Effect": "Allow", + "Action": "sts:TagSession", + "Principal": { + "AWS": "arn:aws:iam::%s:root" + } + } +`, awsAccountID) + + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: true, + PreCheck: func() { + testAccPreCheck(t) + createRole(t, roleName, awsAccountID, []string{ec2PolicyArn}, []string{allowSessionTagsPolicy}) + log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...") + time.Sleep(10 * time.Second) + }, + LogicalBackend: getBackend(t), + Steps: []logicaltest.TestStep{ + testAccStepConfig(t), + testAccStepWriteRole(t, "test", roleData), + testAccStepRead(t, "sts", "test", []credentialTestFunc{describeInstancesTest, describeAzsTestUnauthorized}), + testAccStepRead(t, "creds", "test", []credentialTestFunc{describeInstancesTest, describeAzsTestUnauthorized}), + }, + Teardown: func() error { + return deleteTestRole(roleName) + }, + }) +} + func TestAcceptanceBackend_FederationTokenWithPolicyARN(t *testing.T) { t.Parallel() userName := generateUniqueUserName(t.Name()) @@ -1324,6 +1383,87 @@ func TestAcceptanceBackend_FederationTokenWithGroups(t *testing.T) { }) } +// TestAcceptanceBackend_SessionToken +func TestAcceptanceBackend_SessionToken(t *testing.T) { + t.Parallel() + userName := generateUniqueUserName(t.Name()) + accessKey := &awsAccessKey{} + + roleData := map[string]interface{}{ + "credential_type": sessionTokenCred, + } + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: true, + PreCheck: func() { + testAccPreCheck(t) + createUser(t, userName, accessKey) + // Sleep sometime because AWS is eventually consistent + log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...") + time.Sleep(10 * time.Second) + }, + LogicalBackend: getBackend(t), + Steps: []logicaltest.TestStep{ + testAccStepConfigWithCreds(t, accessKey), + testAccStepWriteRole(t, "test", roleData), + testAccStepRead(t, "sts", "test", []credentialTestFunc{listDynamoTablesTest}), + testAccStepRead(t, "creds", "test", []credentialTestFunc{listDynamoTablesTest}), + }, + Teardown: func() error { + return deleteTestUser(accessKey, userName) + }, + }) +} + +// Running this test requires a pre-made IAM user that has the necessary access permissions set +// and a set MFA device. This device serial number along with the other associated values must +// be set to the environment variables in the function below. +// For this reason, the test is currently a manually run-only acceptance test. +func TestAcceptanceBackend_SessionTokenWithMFA(t *testing.T) { + t.Parallel() + + serial, found := os.LookupEnv("AWS_TEST_MFA_SERIAL_NUMBER") + if !found { + t.Skipf("AWS_TEST_MFA_SERIAL_NUMBER not set, skipping") + } + code, found := os.LookupEnv("AWS_TEST_MFA_CODE") + if !found { + t.Skipf("AWS_TEST_MFA_CODE not set, skipping") + } + accessKeyID, found := os.LookupEnv("AWS_TEST_MFA_USER_ACCESS_KEY") + if !found { + t.Skipf("AWS_TEST_MFA_USER_ACCESS_KEY not set, skipping") + } + secretKey, found := os.LookupEnv("AWS_TEST_MFA_USER_SECRET_KEY") + if !found { + t.Skipf("AWS_TEST_MFA_USER_SECRET_KEY not set, skipping") + } + + accessKey := &awsAccessKey{} + accessKey.AccessKeyID = accessKeyID + accessKey.SecretAccessKey = secretKey + + roleData := map[string]interface{}{ + "credential_type": sessionTokenCred, + "mfa_serial_number": serial, + } + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: true, + PreCheck: func() { + testAccPreCheck(t) + // Sleep sometime because AWS is eventually consistent + log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...") + time.Sleep(10 * time.Second) + }, + LogicalBackend: getBackend(t), + Steps: []logicaltest.TestStep{ + testAccStepConfigWithCreds(t, accessKey), + testAccStepWriteRole(t, "test", roleData), + testAccStepReadWithMFA(t, "sts", "test", code, []credentialTestFunc{listDynamoTablesTest}), + testAccStepReadWithMFA(t, "creds", "test", code, []credentialTestFunc{listDynamoTablesTest}), + }, + }) +} + func TestAcceptanceBackend_RoleDefaultSTSTTL(t *testing.T) { t.Parallel() roleName := generateUniqueRoleName(t.Name()) @@ -1343,7 +1483,7 @@ func TestAcceptanceBackend_RoleDefaultSTSTTL(t *testing.T) { AcceptanceTest: true, PreCheck: func() { testAccPreCheck(t) - createRole(t, roleName, awsAccountID, []string{ec2PolicyArn}) + createRole(t, roleName, awsAccountID, []string{ec2PolicyArn}, nil) log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...") time.Sleep(10 * time.Second) }, @@ -1351,7 +1491,7 @@ func TestAcceptanceBackend_RoleDefaultSTSTTL(t *testing.T) { Steps: []logicaltest.TestStep{ testAccStepConfig(t), testAccStepWriteRole(t, "test", roleData), - testAccStepReadSTSResponse("test", uint64(minAwsAssumeRoleDuration)), // allow a little slack + testAccStepReadSTSResponse("test", time.Duration(minAwsAssumeRoleDuration)*time.Second), // allow a little slack }, Teardown: func() error { return deleteTestRole(roleName) @@ -1359,7 +1499,8 @@ func TestAcceptanceBackend_RoleDefaultSTSTTL(t *testing.T) { }) } -func TestBackend_policyArnCrud(t *testing.T) { +// TestBackend_policyArnCRUD test the CRUD operations for policy ARNs. +func TestBackend_policyArnCRUD(t *testing.T) { t.Parallel() logicaltest.Test(t, logicaltest.TestCase{ AcceptanceTest: false, @@ -1398,6 +1539,9 @@ func testAccStepReadArnPolicy(t *testing.T, name string, value string) logicalte "permissions_boundary_arn": "", "iam_groups": []string(nil), "iam_tags": map[string]string(nil), + "mfa_serial_number": "", + "session_tags": map[string]string(nil), + "external_id": "", } if !reflect.DeepEqual(resp.Data, expected) { return fmt.Errorf("bad: got: %#v\nexpected: %#v", resp.Data, expected) @@ -1418,7 +1562,8 @@ func testAccStepWriteArnRoleRef(t *testing.T, vaultRoleName, awsRoleName, awsAcc } } -func TestBackend_iamGroupsCrud(t *testing.T) { +// TestBackend_iamGroupsCRUD tests CRUD operations for IAM groups. +func TestBackend_iamGroupsCRUD(t *testing.T) { t.Parallel() logicaltest.Test(t, logicaltest.TestCase{ AcceptanceTest: false, @@ -1468,6 +1613,9 @@ func testAccStepReadIamGroups(t *testing.T, name string, groups []string) logica "permissions_boundary_arn": "", "iam_groups": groups, "iam_tags": map[string]string(nil), + "mfa_serial_number": "", + "session_tags": map[string]string(nil), + "external_id": "", } if !reflect.DeepEqual(resp.Data, expected) { return fmt.Errorf("bad: got: %#v\nexpected: %#v", resp.Data, expected) @@ -1478,7 +1626,8 @@ func testAccStepReadIamGroups(t *testing.T, name string, groups []string) logica } } -func TestBackend_iamTagsCrud(t *testing.T) { +// TestBackend_iamTagsCRUD tests the CRUD operations for IAM tags. +func TestBackend_iamTagsCRUD(t *testing.T) { logicaltest.Test(t, logicaltest.TestCase{ AcceptanceTest: false, LogicalBackend: getBackend(t), @@ -1527,6 +1676,177 @@ func testAccStepReadIamTags(t *testing.T, name string, tags map[string]string) l "permissions_boundary_arn": "", "iam_groups": []string(nil), "iam_tags": tags, + "mfa_serial_number": "", + "session_tags": map[string]string(nil), + "external_id": "", + } + if !reflect.DeepEqual(resp.Data, expected) { + return fmt.Errorf("bad: got: %#v\nexpected: %#v", resp.Data, expected) + } + + return nil + }, + } +} + +// TestBackend_stsSessionTagsCRUD tests the CRUD operations for STS session tags. +func TestBackend_stsSessionTagsCRUD(t *testing.T) { + t.Parallel() + + tagParams0 := map[string]string{"tag1": "value1", "tag2": "value2"} + tagParams1 := map[string]string{"tag1": "value1", "tag2": "value4", "tag3": "value3"} + + // list of tags in the form of "key=value" + tagParamsList0 := []string{"key1=value1", "key2=value2"} + tagParamsList0Expect := map[string]string{"key1": "value1", "key2": "value2"} + tagParamsList1 := []string{"key1=value2", "key3=value4"} + tagParamsList1Expect := map[string]string{"key1": "value2", "key3": "value4"} + + type testCase struct { + name string + expectTags []map[string]string + tagsParams []any + externalIDs []string + } + + for _, tt := range []testCase{ + { + name: "mapped-only", + tagsParams: []any{ + tagParams0, + map[string]string{}, + tagParams1, + }, + expectTags: []map[string]string{ + tagParams0, + {}, + tagParams1, + }, + externalIDs: []string{"foo", "", "bar"}, + }, + { + name: "string-list-only", + tagsParams: []any{ + tagParamsList0, + tagParamsList1, + }, + expectTags: []map[string]string{ + tagParamsList0Expect, + tagParamsList1Expect, + }, + externalIDs: []string{"foo"}, + }, + { + name: "mixed-param-types", + tagsParams: []any{ + tagParams0, + tagParamsList0, + tagParams1, + tagParamsList1, + }, + expectTags: []map[string]string{ + tagParams0, + tagParamsList0Expect, + tagParams1, + tagParamsList1Expect, + }, + externalIDs: []string{"foo", "bar"}, + }, + { + name: "unset-tags", + tagsParams: []any{ + tagParams0, + map[string]string{}, + }, + expectTags: []map[string]string{ + tagParams0, + {}, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + steps := []logicaltest.TestStep{ + testAccStepConfig(t), + } + + if len(tt.tagsParams) != len(tt.expectTags) { + t.Fatalf("invalid test case: test case params and expect must have the same length") + } + + // lastNonEmptyExternalID is used to store the last non-empty external ID for the + // test case. The value will is expected to be set on the role. Setting the value + // to an empty string has no effect on update operations. + var lastNonEmptyExternalID string + for idx, params := range tt.tagsParams { + var externalID string + if len(tt.externalIDs) > idx { + externalID = tt.externalIDs[idx] + } + if externalID != "" { + lastNonEmptyExternalID = externalID + } + steps = append(steps, testAccStepWriteSTSSessionTags(t, tt.name, params, externalID)) + steps = append(steps, testAccStepReadSTSSessionTags(t, tt.name, tt.expectTags[idx], lastNonEmptyExternalID, false)) + } + steps = append( + steps, + testAccStepDeletePolicy(t, tt.name), + testAccStepReadSTSSessionTags(t, tt.name, nil, "", true), + ) + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: false, + LogicalBackend: getBackend(t), + Steps: steps, + }) + }) + } +} + +func testAccStepWriteSTSSessionTags(t *testing.T, name string, tags any, externalID string) logicaltest.TestStep { + t.Helper() + + data := map[string]interface{}{ + "credential_type": assumedRoleCred, + "session_tags": tags, + } + if externalID != "" { + data["external_id"] = externalID + } + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "roles/" + name, + Data: data, + } +} + +func testAccStepReadSTSSessionTags(t *testing.T, name string, tags any, externalID string, expectNilResp bool) logicaltest.TestStep { + t.Helper() + + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "roles/" + name, + Check: func(resp *logical.Response) error { + if resp == nil { + if expectNilResp { + return nil + } + return fmt.Errorf("vault response not received") + } + + expected := map[string]interface{}{ + "policy_arns": []string(nil), + "role_arns": []string(nil), + "policy_document": "", + "credential_type": assumedRoleCred, + "default_sts_ttl": int64(0), + "max_sts_ttl": int64(0), + "user_path": "", + "permissions_boundary_arn": "", + "iam_groups": []string(nil), + "iam_tags": map[string]string(nil), + "mfa_serial_number": "", + "session_tags": tags, + "external_id": externalID, } if !reflect.DeepEqual(resp.Data, expected) { return fmt.Errorf("bad: got: %#v\nexpected: %#v", resp.Data, expected) diff --git a/builtin/logical/aws/client.go b/builtin/logical/aws/client.go index 71d24f3abb2e..dd6a58196631 100644 --- a/builtin/logical/aws/client.go +++ b/builtin/logical/aws/client.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package aws @@ -7,19 +7,24 @@ import ( "context" "fmt" "os" + "strconv" + "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/sts" - cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" ) // NOTE: The caller is required to ensure that b.clientMutex is at least read locked -func getRootConfig(ctx context.Context, s logical.Storage, clientType string, logger hclog.Logger) (*aws.Config, error) { +func (b *backend) getRootConfig(ctx context.Context, s logical.Storage, clientType string, logger hclog.Logger) (*aws.Config, error) { credsConfig := &awsutil.CredentialsConfig{} var endpoint string var maxRetries int = aws.UseServiceDefaultRetries @@ -44,6 +49,26 @@ func getRootConfig(ctx context.Context, s logical.Storage, clientType string, lo case clientType == "sts" && config.STSEndpoint != "": endpoint = *aws.String(config.STSEndpoint) } + + if config.IdentityTokenAudience != "" { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get namespace from context: %w", err) + } + + fetcher := &PluginIdentityTokenFetcher{ + sys: b.System(), + logger: b.Logger(), + ns: ns, + audience: config.IdentityTokenAudience, + ttl: config.IdentityTokenTTL, + } + + sessionSuffix := strconv.FormatInt(time.Now().UnixNano(), 10) + credsConfig.RoleSessionName = fmt.Sprintf("vault-aws-secrets-%s", sessionSuffix) + credsConfig.WebIdentityTokenFetcher = fetcher + credsConfig.RoleARN = config.RoleARN + } } if credsConfig.Region == "" { @@ -74,8 +99,8 @@ func getRootConfig(ctx context.Context, s logical.Storage, clientType string, lo }, nil } -func nonCachedClientIAM(ctx context.Context, s logical.Storage, logger hclog.Logger) (*iam.IAM, error) { - awsConfig, err := getRootConfig(ctx, s, "iam", logger) +func (b *backend) nonCachedClientIAM(ctx context.Context, s logical.Storage, logger hclog.Logger) (*iam.IAM, error) { + awsConfig, err := b.getRootConfig(ctx, s, "iam", logger) if err != nil { return nil, err } @@ -90,8 +115,8 @@ func nonCachedClientIAM(ctx context.Context, s logical.Storage, logger hclog.Log return client, nil } -func nonCachedClientSTS(ctx context.Context, s logical.Storage, logger hclog.Logger) (*sts.STS, error) { - awsConfig, err := getRootConfig(ctx, s, "sts", logger) +func (b *backend) nonCachedClientSTS(ctx context.Context, s logical.Storage, logger hclog.Logger) (*sts.STS, error) { + awsConfig, err := b.getRootConfig(ctx, s, "sts", logger) if err != nil { return nil, err } @@ -105,3 +130,36 @@ func nonCachedClientSTS(ctx context.Context, s logical.Storage, logger hclog.Log } return client, nil } + +// PluginIdentityTokenFetcher fetches plugin identity tokens from Vault. It is provided +// to the AWS SDK client to keep assumed role credentials refreshed through expiration. +// When the client's STS credentials expire, it will use this interface to fetch a new +// plugin identity token and exchange it for new STS credentials. +type PluginIdentityTokenFetcher struct { + sys logical.SystemView + logger hclog.Logger + audience string + ns *namespace.Namespace + ttl time.Duration +} + +var _ stscreds.TokenFetcher = (*PluginIdentityTokenFetcher)(nil) + +func (f PluginIdentityTokenFetcher) FetchToken(ctx aws.Context) ([]byte, error) { + nsCtx := namespace.ContextWithNamespace(ctx, f.ns) + resp, err := f.sys.GenerateIdentityToken(nsCtx, &pluginutil.IdentityTokenRequest{ + Audience: f.audience, + TTL: f.ttl, + }) + if err != nil { + return nil, fmt.Errorf("failed to generate plugin identity token: %w", err) + } + f.logger.Info("fetched new plugin identity token") + + if resp.TTL < f.ttl { + f.logger.Debug("generated plugin identity token has shorter TTL than requested", + "requested", f.ttl, "actual", resp.TTL) + } + + return []byte(resp.Token.Token()), nil +} diff --git a/builtin/logical/aws/cmd/aws/main.go b/builtin/logical/aws/cmd/aws/main.go index 28de1eb3f8d9..62c7efe6cf3a 100644 --- a/builtin/logical/aws/cmd/aws/main.go +++ b/builtin/logical/aws/cmd/aws/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/builtin/logical/aws/iam_policies.go b/builtin/logical/aws/iam_policies.go index 27b6f1822651..9735a2af81a6 100644 --- a/builtin/logical/aws/iam_policies.go +++ b/builtin/logical/aws/iam_policies.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package aws @@ -73,7 +73,7 @@ func (b *backend) getGroupPolicies(ctx context.Context, s logical.Storage, iamGr for _, g := range iamGroups { // Collect managed policy ARNs from the IAM Group - agp, err = iamClient.ListAttachedGroupPolicies(&iam.ListAttachedGroupPoliciesInput{ + agp, err = iamClient.ListAttachedGroupPoliciesWithContext(ctx, &iam.ListAttachedGroupPoliciesInput{ GroupName: aws.String(g), }) if err != nil { @@ -84,14 +84,14 @@ func (b *backend) getGroupPolicies(ctx context.Context, s logical.Storage, iamGr } // Collect inline policy names from the IAM Group - inlinePolicies, err = iamClient.ListGroupPolicies(&iam.ListGroupPoliciesInput{ + inlinePolicies, err = iamClient.ListGroupPoliciesWithContext(ctx, &iam.ListGroupPoliciesInput{ GroupName: aws.String(g), }) if err != nil { return nil, nil, err } for _, iP := range inlinePolicies.PolicyNames { - inlinePolicyDoc, err = iamClient.GetGroupPolicy(&iam.GetGroupPolicyInput{ + inlinePolicyDoc, err = iamClient.GetGroupPolicyWithContext(ctx, &iam.GetGroupPolicyInput{ GroupName: &g, PolicyName: iP, }) diff --git a/builtin/logical/aws/iam_policies_test.go b/builtin/logical/aws/iam_policies_test.go index 5840186305a0..15d0ab801649 100644 --- a/builtin/logical/aws/iam_policies_test.go +++ b/builtin/logical/aws/iam_policies_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package aws @@ -8,6 +8,7 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/iam/iamiface" "github.com/hashicorp/vault/sdk/logical" @@ -29,15 +30,15 @@ type mockGroupIAMClient struct { GetGroupPolicyResp iam.GetGroupPolicyOutput } -func (m mockGroupIAMClient) ListAttachedGroupPolicies(in *iam.ListAttachedGroupPoliciesInput) (*iam.ListAttachedGroupPoliciesOutput, error) { +func (m mockGroupIAMClient) ListAttachedGroupPoliciesWithContext(_ aws.Context, in *iam.ListAttachedGroupPoliciesInput, _ ...request.Option) (*iam.ListAttachedGroupPoliciesOutput, error) { return &m.ListAttachedGroupPoliciesResp, nil } -func (m mockGroupIAMClient) ListGroupPolicies(in *iam.ListGroupPoliciesInput) (*iam.ListGroupPoliciesOutput, error) { +func (m mockGroupIAMClient) ListGroupPoliciesWithContext(_ aws.Context, in *iam.ListGroupPoliciesInput, _ ...request.Option) (*iam.ListGroupPoliciesOutput, error) { return &m.ListGroupPoliciesResp, nil } -func (m mockGroupIAMClient) GetGroupPolicy(in *iam.GetGroupPolicyInput) (*iam.GetGroupPolicyOutput, error) { +func (m mockGroupIAMClient) GetGroupPolicyWithContext(_ aws.Context, in *iam.GetGroupPolicyInput, _ ...request.Option) (*iam.GetGroupPolicyOutput, error) { return &m.GetGroupPolicyResp, nil } @@ -140,7 +141,7 @@ func Test_getGroupPolicies(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend() + b := Backend(config) if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } diff --git a/builtin/logical/aws/path_config_lease.go b/builtin/logical/aws/path_config_lease.go index 05f06bb39024..0e2ad43afe80 100644 --- a/builtin/logical/aws/path_config_lease.go +++ b/builtin/logical/aws/path_config_lease.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package aws @@ -8,6 +8,7 @@ import ( "fmt" "time" + "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) @@ -15,6 +16,11 @@ import ( func pathConfigLease(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/lease", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + }, + Fields: map[string]*framework.FieldSchema{ "lease": { Type: framework.TypeString, @@ -27,9 +33,20 @@ func pathConfigLease(b *backend) *framework.Path { }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathLeaseRead, - logical.UpdateOperation: b.pathLeaseWrite, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathLeaseRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "lease-configuration", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathLeaseWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "lease", + }, + }, }, HelpSynopsis: pathConfigLeaseHelpSyn, @@ -66,12 +83,12 @@ func (b *backend) pathLeaseWrite(ctx context.Context, req *logical.Request, d *f return logical.ErrorResponse("'lease_max' is a required parameter"), nil } - lease, err := time.ParseDuration(leaseRaw) + lease, err := parseutil.ParseDurationSecond(leaseRaw) if err != nil { return logical.ErrorResponse(fmt.Sprintf( "Invalid lease: %s", err)), nil } - leaseMax, err := time.ParseDuration(leaseMaxRaw) + leaseMax, err := parseutil.ParseDurationSecond(leaseMaxRaw) if err != nil { return logical.ErrorResponse(fmt.Sprintf( "Invalid lease_max: %s", err)), nil diff --git a/builtin/logical/aws/path_config_root.go b/builtin/logical/aws/path_config_root.go index 7a531f6cc26c..93fccc370e71 100644 --- a/builtin/logical/aws/path_config_root.go +++ b/builtin/logical/aws/path_config_root.go @@ -1,13 +1,16 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package aws import ( "context" + "errors" "github.com/aws/aws-sdk-go/aws" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/pluginidentityutil" + "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -15,8 +18,13 @@ import ( const defaultUserNameTemplate = `{{ if (eq .Type "STS") }}{{ printf "vault-%s-%s" (unix_time) (random 20) | truncate 32 }}{{ else }}{{ printf "vault-%s-%s-%s" (printf "%s-%s" (.DisplayName) (.PolicyName) | truncate 42) (unix_time) (random 20) | truncate 64 }}{{ end }}` func pathConfigRoot(b *backend) *framework.Path { - return &framework.Path{ + p := &framework.Path{ Pattern: "config/root", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + }, + Fields: map[string]*framework.FieldSchema{ "access_key": { Type: framework.TypeString, @@ -49,16 +57,34 @@ func pathConfigRoot(b *backend) *framework.Path { Type: framework.TypeString, Description: "Template to generate custom IAM usernames", }, + "role_arn": { + Type: framework.TypeString, + Description: "Role ARN to assume for plugin identity token federation", + }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathConfigRootRead, - logical.UpdateOperation: b.pathConfigRootWrite, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigRootRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "root-iam-credentials-configuration", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigRootWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "root-iam-credentials", + }, + }, }, HelpSynopsis: pathConfigRootHelpSyn, HelpDescription: pathConfigRootHelpDesc, } + pluginidentityutil.AddPluginIdentityTokenFields(p.Fields) + + return p } func (b *backend) pathConfigRootRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { @@ -86,7 +112,10 @@ func (b *backend) pathConfigRootRead(ctx context.Context, req *logical.Request, "sts_endpoint": config.STSEndpoint, "max_retries": config.MaxRetries, "username_template": config.UsernameTemplate, + "role_arn": config.RoleARN, } + + config.PopulatePluginIdentityTokenData(configData) return &logical.Response{ Data: configData, }, nil @@ -97,6 +126,7 @@ func (b *backend) pathConfigRootWrite(ctx context.Context, req *logical.Request, iamendpoint := data.Get("iam_endpoint").(string) stsendpoint := data.Get("sts_endpoint").(string) maxretries := data.Get("max_retries").(int) + roleARN := data.Get("role_arn").(string) usernameTemplate := data.Get("username_template").(string) if usernameTemplate == "" { usernameTemplate = defaultUserNameTemplate @@ -105,7 +135,7 @@ func (b *backend) pathConfigRootWrite(ctx context.Context, req *logical.Request, b.clientMutex.Lock() defer b.clientMutex.Unlock() - entry, err := logical.StorageEntryJSON("config/root", rootConfig{ + rc := rootConfig{ AccessKey: data.Get("access_key").(string), SecretKey: data.Get("secret_key").(string), IAMEndpoint: iamendpoint, @@ -113,7 +143,33 @@ func (b *backend) pathConfigRootWrite(ctx context.Context, req *logical.Request, Region: region, MaxRetries: maxretries, UsernameTemplate: usernameTemplate, - }) + RoleARN: roleARN, + } + if err := rc.ParsePluginIdentityTokenFields(data); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + if rc.IdentityTokenAudience != "" && rc.AccessKey != "" { + return logical.ErrorResponse("only one of 'access_key' or 'identity_token_audience' can be set"), nil + } + + if rc.IdentityTokenAudience != "" && rc.RoleARN == "" { + return logical.ErrorResponse("missing required 'role_arn' when 'identity_token_audience' is set"), nil + } + + if rc.IdentityTokenAudience != "" { + _, err := b.System().GenerateIdentityToken(ctx, &pluginutil.IdentityTokenRequest{ + Audience: rc.IdentityTokenAudience, + }) + if err != nil { + if errors.Is(err, pluginidentityutil.ErrPluginWorkloadIdentityUnsupported) { + return logical.ErrorResponse(err.Error()), nil + } + return nil, err + } + } + + entry, err := logical.StorageEntryJSON("config/root", rc) if err != nil { return nil, err } @@ -131,6 +187,8 @@ func (b *backend) pathConfigRootWrite(ctx context.Context, req *logical.Request, } type rootConfig struct { + pluginidentityutil.PluginIdentityTokenParams + AccessKey string `json:"access_key"` SecretKey string `json:"secret_key"` IAMEndpoint string `json:"iam_endpoint"` @@ -138,6 +196,7 @@ type rootConfig struct { Region string `json:"region"` MaxRetries int `json:"max_retries"` UsernameTemplate string `json:"username_template"` + RoleARN string `json:"role_arn"` } const pathConfigRootHelpSyn = ` diff --git a/builtin/logical/aws/path_config_root_test.go b/builtin/logical/aws/path_config_root_test.go index d15dce3771a8..783745ac0ed8 100644 --- a/builtin/logical/aws/path_config_root_test.go +++ b/builtin/logical/aws/path_config_root_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package aws @@ -8,26 +8,33 @@ import ( "reflect" "testing" + "github.com/hashicorp/vault/sdk/helper/pluginidentityutil" + "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestBackend_PathConfigRoot(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend() + b := Backend(config) if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } configData := map[string]interface{}{ - "access_key": "AKIAEXAMPLE", - "secret_key": "RandomData", - "region": "us-west-2", - "iam_endpoint": "https://iam.amazonaws.com", - "sts_endpoint": "https://sts.us-west-2.amazonaws.com", - "max_retries": 10, - "username_template": defaultUserNameTemplate, + "access_key": "AKIAEXAMPLE", + "secret_key": "RandomData", + "region": "us-west-2", + "iam_endpoint": "https://iam.amazonaws.com", + "sts_endpoint": "https://sts.us-west-2.amazonaws.com", + "max_retries": 10, + "username_template": defaultUserNameTemplate, + "role_arn": "", + "identity_token_audience": "", + "identity_token_ttl": int64(0), } configReq := &logical.Request{ @@ -52,7 +59,47 @@ func TestBackend_PathConfigRoot(t *testing.T) { } delete(configData, "secret_key") + require.Equal(t, configData, resp.Data) if !reflect.DeepEqual(resp.Data, configData) { t.Errorf("bad: expected to read config root as %#v, got %#v instead", configData, resp.Data) } } + +// TestBackend_PathConfigRoot_PluginIdentityToken tests that configuration +// of plugin WIF returns an immediate error. +func TestBackend_PathConfigRoot_PluginIdentityToken(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = &testSystemView{} + + b := Backend(config) + if err := b.Setup(context.Background(), config); err != nil { + t.Fatal(err) + } + + configData := map[string]interface{}{ + "identity_token_ttl": int64(10), + "identity_token_audience": "test-aud", + "role_arn": "test-role-arn", + } + + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: config.StorageView, + Path: "config/root", + Data: configData, + } + + resp, err := b.HandleRequest(context.Background(), configReq) + assert.NoError(t, err) + assert.NotNil(t, resp) + assert.ErrorContains(t, resp.Error(), pluginidentityutil.ErrPluginWorkloadIdentityUnsupported.Error()) +} + +type testSystemView struct { + logical.StaticSystemView +} + +func (d testSystemView) GenerateIdentityToken(_ context.Context, _ *pluginutil.IdentityTokenRequest) (*pluginutil.IdentityTokenResponse, error) { + return nil, pluginidentityutil.ErrPluginWorkloadIdentityUnsupported +} diff --git a/builtin/logical/aws/path_config_rotate_root.go b/builtin/logical/aws/path_config_rotate_root.go index 212a9eb3a800..72f9c82e4d0f 100644 --- a/builtin/logical/aws/path_config_rotate_root.go +++ b/builtin/logical/aws/path_config_rotate_root.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package aws @@ -16,6 +16,13 @@ import ( func pathConfigRotateRoot(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/rotate-root", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "root-iam-credentials", + OperationVerb: "rotate", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigRotateRootUpdate, @@ -59,7 +66,7 @@ func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.R } var getUserInput iam.GetUserInput // empty input means get current user - getUserRes, err := client.GetUser(&getUserInput) + getUserRes, err := client.GetUserWithContext(ctx, &getUserInput) if err != nil { return nil, fmt.Errorf("error calling GetUser: %w", err) } @@ -76,7 +83,7 @@ func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.R createAccessKeyInput := iam.CreateAccessKeyInput{ UserName: getUserRes.User.UserName, } - createAccessKeyRes, err := client.CreateAccessKey(&createAccessKeyInput) + createAccessKeyRes, err := client.CreateAccessKeyWithContext(ctx, &createAccessKeyInput) if err != nil { return nil, fmt.Errorf("error calling CreateAccessKey: %w", err) } @@ -107,7 +114,7 @@ func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.R AccessKeyId: aws.String(oldAccessKey), UserName: getUserRes.User.UserName, } - _, err = client.DeleteAccessKey(&deleteAccessKeyInput) + _, err = client.DeleteAccessKeyWithContext(ctx, &deleteAccessKeyInput) if err != nil { return nil, fmt.Errorf("error deleting old access key: %w", err) } diff --git a/builtin/logical/aws/path_roles.go b/builtin/logical/aws/path_roles.go index b28f3fa0228d..1c1ef3546aed 100644 --- a/builtin/logical/aws/path_roles.go +++ b/builtin/logical/aws/path_roles.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package aws @@ -27,6 +27,11 @@ func pathListRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "roles", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, }, @@ -39,18 +44,24 @@ func pathListRoles(b *backend) *framework.Path { func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/" + framework.GenericNameWithAtRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "role", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, - Description: "Name of the policy", + Description: "Name of the role", DisplayAttrs: &framework.DisplayAttributes{ - Name: "Policy Name", + Name: "Role Name", }, }, "credential_type": { Type: framework.TypeString, - Description: fmt.Sprintf("Type of credential to retrieve. Must be one of %s, %s, or %s", assumedRoleCred, iamUserCred, federationTokenCred), + Description: fmt.Sprintf("Type of credential to retrieve. Must be one of %s, %s, %s, or %s", assumedRoleCred, iamUserCred, federationTokenCred, sessionTokenCred), }, "role_arns": { @@ -104,10 +115,26 @@ delimited key pairs.`, Value: "[key1=value1, key2=value2]", }, }, - + "session_tags": { + Type: framework.TypeKVPairs, + Description: fmt.Sprintf(`Session tags to be set for %q creds created by this role. These must be presented +as Key-Value pairs. This can be represented as a map or a list of equal sign +delimited key pairs.`, assumedRoleCred), + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Session Tags", + Value: "[key1=value1, key2=value2]", + }, + }, + "external_id": { + Type: framework.TypeString, + Description: "External ID to set when assuming the role; only valid when credential_type is " + assumedRoleCred, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "External ID", + }, + }, "default_sts_ttl": { Type: framework.TypeDurationSecond, - Description: fmt.Sprintf("Default TTL for %s and %s credential types when no TTL is explicitly requested with the credentials", assumedRoleCred, federationTokenCred), + Description: fmt.Sprintf("Default TTL for %s, %s, and %s credential types when no TTL is explicitly requested with the credentials", assumedRoleCred, federationTokenCred, sessionTokenCred), DisplayAttrs: &framework.DisplayAttributes{ Name: "Default STS TTL", }, @@ -115,7 +142,7 @@ delimited key pairs.`, "max_sts_ttl": { Type: framework.TypeDurationSecond, - Description: fmt.Sprintf("Max allowed TTL for %s and %s credential types", assumedRoleCred, federationTokenCred), + Description: fmt.Sprintf("Max allowed TTL for %s, %s, and %s credential types", assumedRoleCred, federationTokenCred, sessionTokenCred), DisplayAttrs: &framework.DisplayAttributes{ Name: "Max STS TTL", }, @@ -150,6 +177,15 @@ delimited key pairs.`, }, Default: "/", }, + + "mfa_serial_number": { + Type: framework.TypeString, + Description: fmt.Sprintf(`Identification number or ARN of the MFA device associated with the root config user. Only valid +when credential_type is %s. This is only required when the IAM user has an MFA device configured.`, sessionTokenCred), + DisplayAttrs: &framework.DisplayAttributes{ + Name: "MFA Device Serial Number", + }, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -317,6 +353,18 @@ func (b *backend) pathRolesWrite(ctx context.Context, req *logical.Request, d *f roleEntry.IAMTags = iamTags.(map[string]string) } + if serialNumber, ok := d.GetOk("mfa_serial_number"); ok { + roleEntry.SerialNumber = serialNumber.(string) + } + + if sessionTags, ok := d.GetOk("session_tags"); ok { + roleEntry.SessionTags = sessionTags.(map[string]string) + } + + if externalID, ok := d.GetOk("external_id"); ok { + roleEntry.ExternalID = externalID.(string) + } + if legacyRole != "" { roleEntry = upgradeLegacyPolicyEntry(legacyRole) if roleEntry.InvalidData != "" { @@ -503,6 +551,8 @@ type awsRoleEntry struct { PolicyDocument string `json:"policy_document"` // JSON-serialized inline policy to attach to IAM users and/or to specify as the Policy parameter in AssumeRole calls IAMGroups []string `json:"iam_groups"` // Names of IAM groups that generated IAM users will be added to IAMTags map[string]string `json:"iam_tags"` // IAM tags that will be added to the generated IAM users + SessionTags map[string]string `json:"session_tags"` // Session tags that will be added as Tags parameter in AssumedRole calls + ExternalID string `json:"external_id"` // External ID to added as ExternalID in AssumeRole calls InvalidData string `json:"invalid_data,omitempty"` // Invalid role data. Exists to support converting the legacy role data into the new format ProhibitFlexibleCredPath bool `json:"prohibit_flexible_cred_path,omitempty"` // Disallow accessing STS credentials via the creds path and vice verse Version int `json:"version"` // Version number of the role format @@ -510,6 +560,7 @@ type awsRoleEntry struct { MaxSTSTTL time.Duration `json:"max_sts_ttl"` // Max allowed TTL for STS credentials UserPath string `json:"user_path"` // The path for the IAM user when using "iam_user" credential type PermissionsBoundaryARN string `json:"permissions_boundary_arn"` // ARN of an IAM policy to attach as a permissions boundary + SerialNumber string `json:"mfa_serial_number"` // Serial number or ARN of the MFA device } func (r *awsRoleEntry) toResponseData() map[string]interface{} { @@ -520,10 +571,13 @@ func (r *awsRoleEntry) toResponseData() map[string]interface{} { "policy_document": r.PolicyDocument, "iam_groups": r.IAMGroups, "iam_tags": r.IAMTags, + "session_tags": r.SessionTags, + "external_id": r.ExternalID, "default_sts_ttl": int64(r.DefaultSTSTTL.Seconds()), "max_sts_ttl": int64(r.MaxSTSTTL.Seconds()), "user_path": r.UserPath, "permissions_boundary_arn": r.PermissionsBoundaryARN, + "mfa_serial_number": r.SerialNumber, } if r.InvalidData != "" { @@ -539,19 +593,19 @@ func (r *awsRoleEntry) validate() error { errors = multierror.Append(errors, fmt.Errorf("did not supply credential_type")) } - allowedCredentialTypes := []string{iamUserCred, assumedRoleCred, federationTokenCred} + allowedCredentialTypes := []string{iamUserCred, assumedRoleCred, federationTokenCred, sessionTokenCred} for _, credType := range r.CredentialTypes { if !strutil.StrListContains(allowedCredentialTypes, credType) { errors = multierror.Append(errors, fmt.Errorf("unrecognized credential type: %s", credType)) } } - if r.DefaultSTSTTL != 0 && !strutil.StrListContains(r.CredentialTypes, assumedRoleCred) && !strutil.StrListContains(r.CredentialTypes, federationTokenCred) { - errors = multierror.Append(errors, fmt.Errorf("default_sts_ttl parameter only valid for %s and %s credential types", assumedRoleCred, federationTokenCred)) + if r.DefaultSTSTTL != 0 && !strutil.StrListContains(r.CredentialTypes, assumedRoleCred) && !strutil.StrListContains(r.CredentialTypes, federationTokenCred) && !strutil.StrListContains(r.CredentialTypes, sessionTokenCred) { + errors = multierror.Append(errors, fmt.Errorf("default_sts_ttl parameter only valid for %s, %s, and %s credential types", assumedRoleCred, federationTokenCred, sessionTokenCred)) } - if r.MaxSTSTTL != 0 && !strutil.StrListContains(r.CredentialTypes, assumedRoleCred) && !strutil.StrListContains(r.CredentialTypes, federationTokenCred) { - errors = multierror.Append(errors, fmt.Errorf("max_sts_ttl parameter only valid for %s and %s credential types", assumedRoleCred, federationTokenCred)) + if r.MaxSTSTTL != 0 && !strutil.StrListContains(r.CredentialTypes, assumedRoleCred) && !strutil.StrListContains(r.CredentialTypes, federationTokenCred) && !strutil.StrListContains(r.CredentialTypes, sessionTokenCred) { + errors = multierror.Append(errors, fmt.Errorf("max_sts_ttl parameter only valid for %s, %s, and %s credential types", assumedRoleCred, federationTokenCred, sessionTokenCred)) } if r.MaxSTSTTL > 0 && @@ -565,7 +619,7 @@ func (r *awsRoleEntry) validate() error { errors = multierror.Append(errors, fmt.Errorf("user_path parameter only valid for %s credential type", iamUserCred)) } if !userPathRegex.MatchString(r.UserPath) { - errors = multierror.Append(errors, fmt.Errorf("The specified value for user_path is invalid. It must match %q regexp", userPathRegex.String())) + errors = multierror.Append(errors, fmt.Errorf("the specified value for user_path is invalid. It must match %q regexp", userPathRegex.String())) } } @@ -578,10 +632,22 @@ func (r *awsRoleEntry) validate() error { } } + if (r.PolicyDocument != "" || len(r.PolicyArns) != 0) && strutil.StrListContains(r.CredentialTypes, sessionTokenCred) { + errors = multierror.Append(errors, fmt.Errorf("cannot supply a policy or role when using credential_type %s", sessionTokenCred)) + } + if len(r.RoleArns) > 0 && !strutil.StrListContains(r.CredentialTypes, assumedRoleCred) { errors = multierror.Append(errors, fmt.Errorf("cannot supply role_arns when credential_type isn't %s", assumedRoleCred)) } + if len(r.SessionTags) > 0 && !strutil.StrListContains(r.CredentialTypes, assumedRoleCred) { + errors = multierror.Append(errors, fmt.Errorf("cannot supply session_tags when credential_type isn't %s", assumedRoleCred)) + } + + if r.ExternalID != "" && !strutil.StrListContains(r.CredentialTypes, assumedRoleCred) { + errors = multierror.Append(errors, fmt.Errorf("cannot supply external_id when credential_type isn't %s", assumedRoleCred)) + } + return errors.ErrorOrNil() } @@ -595,6 +661,7 @@ const ( assumedRoleCred = "assumed_role" iamUserCred = "iam_user" federationTokenCred = "federation_token" + sessionTokenCred = "session_token" ) const pathListRolesHelpSyn = `List the existing roles in this backend` diff --git a/builtin/logical/aws/path_roles_test.go b/builtin/logical/aws/path_roles_test.go index eb136b4bcf58..80328cc5f01a 100644 --- a/builtin/logical/aws/path_roles_test.go +++ b/builtin/logical/aws/path_roles_test.go @@ -1,15 +1,17 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package aws import ( "context" + "errors" "reflect" "strconv" "strings" "testing" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/sdk/logical" ) @@ -21,7 +23,7 @@ func TestBackend_PathListRoles(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend() + b := Backend(config) if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } @@ -224,7 +226,7 @@ func TestRoleCRUDWithPermissionsBoundary(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend() + b := Backend(config) if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } @@ -268,7 +270,7 @@ func TestRoleWithPermissionsBoundaryValidation(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend() + b := Backend(config) if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } @@ -366,22 +368,74 @@ func TestRoleEntryValidationIamUserCred(t *testing.T) { CredentialTypes: []string{iamUserCred}, RoleArns: []string{"arn:aws:iam::123456789012:role/SomeRole"}, } - if roleEntry.validate() == nil { - t.Errorf("bad: invalid roleEntry with invalid RoleArns parameter %#v passed validation", roleEntry) - } + assertMultiError(t, roleEntry.validate(), + []error{ + errors.New( + "cannot supply role_arns when credential_type isn't assumed_role", + ), + }) roleEntry = awsRoleEntry{ CredentialTypes: []string{iamUserCred}, PolicyArns: []string{adminAccessPolicyARN}, DefaultSTSTTL: 1, } - if roleEntry.validate() == nil { - t.Errorf("bad: invalid roleEntry with unrecognized DefaultSTSTTL %#v passed validation", roleEntry) - } + assertMultiError(t, roleEntry.validate(), + []error{ + errors.New( + "default_sts_ttl parameter only valid for assumed_role, federation_token, and session_token credential types", + ), + }) roleEntry.DefaultSTSTTL = 0 + roleEntry.MaxSTSTTL = 1 - if roleEntry.validate() == nil { - t.Errorf("bad: invalid roleEntry with unrecognized MaxSTSTTL %#v passed validation", roleEntry) + assertMultiError(t, roleEntry.validate(), + []error{ + errors.New( + "max_sts_ttl parameter only valid for assumed_role, federation_token, and session_token credential types", + ), + }) + roleEntry.MaxSTSTTL = 0 + + roleEntry.SessionTags = map[string]string{ + "Key1": "Value1", + "Key2": "Value2", + } + assertMultiError(t, roleEntry.validate(), + []error{ + errors.New( + "cannot supply session_tags when credential_type isn't assumed_role", + ), + }) + roleEntry.SessionTags = nil + + roleEntry.ExternalID = "my-ext-id" + assertMultiError(t, roleEntry.validate(), + []error{ + errors.New( + "cannot supply external_id when credential_type isn't assumed_role"), + }) +} + +func assertMultiError(t *testing.T, err error, expected []error) { + t.Helper() + + if err == nil { + t.Errorf("expected error, got nil") + return + } + + var multiErr *multierror.Error + if errors.As(err, &multiErr) { + if multiErr.Len() != len(expected) { + t.Errorf("expected %d error, got %d", len(expected), multiErr.Len()) + } else { + if !reflect.DeepEqual(expected, multiErr.Errors) { + t.Errorf("expected error %q, actual %q", expected, multiErr.Errors) + } + } + } else { + t.Errorf("expected multierror, got %T", err) } } @@ -392,8 +446,13 @@ func TestRoleEntryValidationAssumedRoleCred(t *testing.T) { RoleArns: []string{"arn:aws:iam::123456789012:role/SomeRole"}, PolicyArns: []string{adminAccessPolicyARN}, PolicyDocument: allowAllPolicyDocument, - DefaultSTSTTL: 2, - MaxSTSTTL: 3, + ExternalID: "my-ext-id", + SessionTags: map[string]string{ + "Key1": "Value1", + "Key2": "Value2", + }, + DefaultSTSTTL: 2, + MaxSTSTTL: 3, } if err := roleEntry.validate(); err != nil { t.Errorf("bad: valid roleEntry %#v failed validation: %v", roleEntry, err) diff --git a/builtin/logical/aws/path_static_creds.go b/builtin/logical/aws/path_static_creds.go new file mode 100644 index 000000000000..14fca7cd6de4 --- /dev/null +++ b/builtin/logical/aws/path_static_creds.go @@ -0,0 +1,102 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package aws + +import ( + "context" + "fmt" + "net/http" + + "github.com/fatih/structs" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + pathStaticCreds = "static-creds" + + paramAccessKeyID = "access_key" + paramSecretsAccessKey = "secret_key" +) + +type awsCredentials struct { + AccessKeyID string `json:"access_key" structs:"access_key" mapstructure:"access_key"` + SecretAccessKey string `json:"secret_key" structs:"secret_key" mapstructure:"secret_key"` +} + +func pathStaticCredentials(b *backend) *framework.Path { + return &framework.Path{ + Pattern: fmt.Sprintf("%s/%s", pathStaticCreds, framework.GenericNameWithAtRegex(paramRoleName)), + Fields: map[string]*framework.FieldSchema{ + paramRoleName: { + Type: framework.TypeString, + Description: descRoleName, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathStaticCredsRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: http.StatusText(http.StatusOK), + Fields: map[string]*framework.FieldSchema{ + paramAccessKeyID: { + Type: framework.TypeString, + Description: descAccessKeyID, + }, + paramSecretsAccessKey: { + Type: framework.TypeString, + Description: descSecretAccessKey, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: pathStaticCredsHelpSyn, + HelpDescription: pathStaticCredsHelpDesc, + } +} + +func (b *backend) pathStaticCredsRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName, ok := data.GetOk(paramRoleName) + if !ok { + return nil, fmt.Errorf("missing %q parameter", paramRoleName) + } + + entry, err := req.Storage.Get(ctx, formatCredsStoragePath(roleName.(string))) + if err != nil { + return nil, fmt.Errorf("failed to read credentials for role %q: %w", roleName, err) + } + if entry == nil { + return nil, nil + } + + var credentials awsCredentials + if err := entry.DecodeJSON(&credentials); err != nil { + return nil, fmt.Errorf("failed to decode credentials: %w", err) + } + + return &logical.Response{ + Data: structs.New(credentials).Map(), + }, nil +} + +func formatCredsStoragePath(roleName string) string { + return fmt.Sprintf("%s/%s", pathStaticCreds, roleName) +} + +const pathStaticCredsHelpSyn = `Retrieve static credentials from the named role.` + +const pathStaticCredsHelpDesc = ` +This path reads AWS credentials for a certain static role. The keys are rotated +periodically according to their configuration, and will return the same password +until they are rotated.` + +const ( + descAccessKeyID = "The access key of the AWS Credential" + descSecretAccessKey = "The secret key of the AWS Credential" +) diff --git a/builtin/logical/aws/path_static_creds_test.go b/builtin/logical/aws/path_static_creds_test.go new file mode 100644 index 000000000000..e128a5f0730a --- /dev/null +++ b/builtin/logical/aws/path_static_creds_test.go @@ -0,0 +1,93 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package aws + +import ( + "context" + "reflect" + "testing" + + "github.com/fatih/structs" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// TestStaticCredsRead verifies that we can correctly read a cred that exists, and correctly _not read_ +// a cred that does not exist. +func TestStaticCredsRead(t *testing.T) { + // setup + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + bgCTX := context.Background() // for brevity later + + // insert a cred to get + creds := &awsCredentials{ + AccessKeyID: "foo", + SecretAccessKey: "bar", + } + entry, err := logical.StorageEntryJSON(formatCredsStoragePath("test"), creds) + if err != nil { + t.Fatal(err) + } + err = config.StorageView.Put(bgCTX, entry) + if err != nil { + t.Fatal(err) + } + + // cases + cases := []struct { + name string + roleName string + expectedError error + expectedResponse *logical.Response + }{ + { + name: "get existing creds", + roleName: "test", + expectedResponse: &logical.Response{ + Data: structs.New(creds).Map(), + }, + }, + { + name: "get non-existent creds", + roleName: "this-doesnt-exist", + // returns nil, nil + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + b := Backend(config) + + req := &logical.Request{ + Storage: config.StorageView, + Data: map[string]interface{}{ + "name": c.roleName, + }, + } + resp, err := b.pathStaticCredsRead(bgCTX, req, staticCredsFieldData(req.Data)) + + if err != c.expectedError { + t.Fatalf("got error %q, but expected %q", err, c.expectedError) + } + if !reflect.DeepEqual(resp, c.expectedResponse) { + t.Fatalf("got response %v, but expected %v", resp, c.expectedResponse) + } + }) + } +} + +func staticCredsFieldData(data map[string]interface{}) *framework.FieldData { + schema := map[string]*framework.FieldSchema{ + paramRoleName: { + Type: framework.TypeString, + Description: descRoleName, + }, + } + + return &framework.FieldData{ + Raw: data, + Schema: schema, + } +} diff --git a/builtin/logical/aws/path_static_roles.go b/builtin/logical/aws/path_static_roles.go new file mode 100644 index 000000000000..f07eab54ab18 --- /dev/null +++ b/builtin/logical/aws/path_static_roles.go @@ -0,0 +1,348 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package aws + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/fatih/structs" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" +) + +const ( + pathStaticRole = "static-roles" + + paramRoleName = "name" + paramUsername = "username" + paramRotationPeriod = "rotation_period" +) + +type staticRoleEntry struct { + Name string `json:"name" structs:"name" mapstructure:"name"` + ID string `json:"id" structs:"id" mapstructure:"id"` + Username string `json:"username" structs:"username" mapstructure:"username"` + RotationPeriod time.Duration `json:"rotation_period" structs:"rotation_period" mapstructure:"rotation_period"` +} + +func pathStaticRoles(b *backend) *framework.Path { + roleResponse := map[int][]framework.Response{ + http.StatusOK: {{ + Description: http.StatusText(http.StatusOK), + Fields: map[string]*framework.FieldSchema{ + paramRoleName: { + Type: framework.TypeString, + Description: descRoleName, + }, + paramUsername: { + Type: framework.TypeString, + Description: descUsername, + }, + paramRotationPeriod: { + Type: framework.TypeDurationSecond, + Description: descRotationPeriod, + }, + }, + }}, + } + + return &framework.Path{ + Pattern: fmt.Sprintf("%s/%s", pathStaticRole, framework.GenericNameWithAtRegex(paramRoleName)), + Fields: map[string]*framework.FieldSchema{ + paramRoleName: { + Type: framework.TypeString, + Description: descRoleName, + }, + paramUsername: { + Type: framework.TypeString, + Description: descUsername, + }, + paramRotationPeriod: { + Type: framework.TypeDurationSecond, + Description: descRotationPeriod, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathStaticRolesRead, + Responses: roleResponse, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathStaticRolesWrite, + ForwardPerformanceSecondary: true, + ForwardPerformanceStandby: true, + Responses: roleResponse, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathStaticRolesDelete, + ForwardPerformanceSecondary: true, + ForwardPerformanceStandby: true, + Responses: map[int][]framework.Response{ + http.StatusNoContent: {{ + Description: http.StatusText(http.StatusNoContent), + }}, + }, + }, + }, + + HelpSynopsis: pathStaticRolesHelpSyn, + HelpDescription: pathStaticRolesHelpDesc, + } +} + +func (b *backend) pathStaticRolesRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName, ok := data.GetOk(paramRoleName) + if !ok { + return nil, fmt.Errorf("missing %q parameter", paramRoleName) + } + + b.roleMutex.RLock() + defer b.roleMutex.RUnlock() + + entry, err := req.Storage.Get(ctx, formatRoleStoragePath(roleName.(string))) + if err != nil { + return nil, fmt.Errorf("failed to read configuration for static role %q: %w", roleName, err) + } + if entry == nil { + return nil, nil + } + + var config staticRoleEntry + if err := entry.DecodeJSON(&config); err != nil { + return nil, fmt.Errorf("failed to decode configuration for static role %q: %w", roleName, err) + } + + return &logical.Response{ + Data: formatResponse(config), + }, nil +} + +func (b *backend) pathStaticRolesWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Create & validate config from request parameters + config := staticRoleEntry{} + isCreate := req.Operation == logical.CreateOperation + + if rawRoleName, ok := data.GetOk(paramRoleName); ok { + config.Name = rawRoleName.(string) + + if err := b.validateRoleName(config.Name); err != nil { + return nil, err + } + } else { + return logical.ErrorResponse("missing %q parameter", paramRoleName), nil + } + + // retrieve old role value + entry, err := req.Storage.Get(ctx, formatRoleStoragePath(config.Name)) + if err != nil { + return nil, fmt.Errorf("couldn't check storage for pre-existing role: %w", err) + } + + if entry != nil { + err = entry.DecodeJSON(&config) + if err != nil { + return nil, fmt.Errorf("couldn't convert existing role into config struct: %w", err) + } + } else { + // if we couldn't find an entry, this is a create event + isCreate = true + } + + // other params are optional if we're not Creating + + if rawUsername, ok := data.GetOk(paramUsername); ok { + config.Username = rawUsername.(string) + + if err := b.validateIAMUserExists(ctx, req.Storage, &config, isCreate); err != nil { + return nil, err + } + } else if isCreate { + return logical.ErrorResponse("missing %q parameter", paramUsername), nil + } + + if rawRotationPeriod, ok := data.GetOk(paramRotationPeriod); ok { + config.RotationPeriod = time.Duration(rawRotationPeriod.(int)) * time.Second + + if err := b.validateRotationPeriod(config.RotationPeriod); err != nil { + return nil, err + } + } else if isCreate { + return logical.ErrorResponse("missing %q parameter", paramRotationPeriod), nil + } + + b.roleMutex.Lock() + defer b.roleMutex.Unlock() + + // Upsert role config + newRole, err := logical.StorageEntryJSON(formatRoleStoragePath(config.Name), config) + if err != nil { + return nil, fmt.Errorf("failed to marshal object to JSON: %w", err) + } + err = req.Storage.Put(ctx, newRole) + if err != nil { + return nil, fmt.Errorf("failed to save object in storage: %w", err) + } + + // Bootstrap initial set of keys if they did not exist before. AWS Secret Access Keys can only be obtained on creation, + // so we need to boostrap new roles with a new initial set of keys to be able to serve valid credentials to Vault clients. + existingCreds, err := req.Storage.Get(ctx, formatCredsStoragePath(config.Name)) + if err != nil { + return nil, fmt.Errorf("unable to verify if credentials already exist for role %q: %w", config.Name, err) + } + if existingCreds == nil { + err := b.createCredential(ctx, req.Storage, config, false) + if err != nil { + return nil, fmt.Errorf("failed to create new credentials for role %q: %w", config.Name, err) + } + + err = b.credRotationQueue.Push(&queue.Item{ + Key: config.Name, + Value: config, + Priority: time.Now().Add(config.RotationPeriod).Unix(), + }) + if err != nil { + return nil, fmt.Errorf("failed to add item into the rotation queue for role %q: %w", config.Name, err) + } + } else { + // creds already exist, so all we need to do is update the rotation + // what here stays the same and what changes? Can we change the name? + i, err := b.credRotationQueue.PopByKey(config.Name) + if err != nil { + return nil, fmt.Errorf("expected an item with name %q, but got an error: %w", config.Name, err) + } + i.Value = config + // update the next rotation to occur at now + the new rotation period + i.Priority = time.Now().Add(config.RotationPeriod).Unix() + err = b.credRotationQueue.Push(i) + if err != nil { + return nil, fmt.Errorf("failed to add updated item into the rotation queue for role %q: %w", config.Name, err) + } + } + + return &logical.Response{ + Data: formatResponse(config), + }, nil +} + +func (b *backend) pathStaticRolesDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName, ok := data.GetOk(paramRoleName) + if !ok { + return nil, fmt.Errorf("missing %q parameter", paramRoleName) + } + + b.roleMutex.Lock() + defer b.roleMutex.Unlock() + + entry, err := req.Storage.Get(ctx, formatRoleStoragePath(roleName.(string))) + if err != nil { + return nil, fmt.Errorf("couldn't locate role in storage due to error: %w", err) + } + // no entry in storage, but no error either, congrats, it's deleted! + if entry == nil { + return nil, nil + } + var cfg staticRoleEntry + err = entry.DecodeJSON(&cfg) + if err != nil { + return nil, fmt.Errorf("couldn't convert storage entry to role config") + } + + err = b.deleteCredential(ctx, req.Storage, cfg, false) + if err != nil { + return nil, fmt.Errorf("failed to clean credentials while deleting role %q: %w", roleName.(string), err) + } + + // delete from the queue + _, err = b.credRotationQueue.PopByKey(cfg.Name) + if err != nil { + return nil, fmt.Errorf("couldn't delete key from queue: %w", err) + } + + return nil, req.Storage.Delete(ctx, formatRoleStoragePath(roleName.(string))) +} + +func (b *backend) validateRoleName(name string) error { + if name == "" { + return errors.New("empty role name attribute given") + } + return nil +} + +// validateIAMUser checks the user information we have for the role against the information on AWS. On a create, it uses the username +// to retrieve the user information and _sets_ the userID. On update, it validates the userID and username. +func (b *backend) validateIAMUserExists(ctx context.Context, storage logical.Storage, entry *staticRoleEntry, isCreate bool) error { + c, err := b.clientIAM(ctx, storage) + if err != nil { + return fmt.Errorf("unable to validate username %q: %w", entry.Username, err) + } + + // we don't really care about the content of the result, just that it's not an error + out, err := c.GetUser(&iam.GetUserInput{ + UserName: aws.String(entry.Username), + }) + if err != nil || out.User == nil { + return fmt.Errorf("unable to validate username %q: %w", entry.Username, err) + } + if *out.User.UserName != entry.Username { + return fmt.Errorf("AWS GetUser returned a username, but it didn't match: %q was requested, but %q was returned", entry.Username, *out.User.UserName) + } + + if !isCreate && *out.User.UserId != entry.ID { + return fmt.Errorf("AWS GetUser returned a user, but the ID did not match: %q was requested, but %q was returned", entry.ID, *out.User.UserId) + } else { + // if this is an insert, store the userID. This is the immutable part of an IAM user, but it's not exactly user-friendly. + // So, we allow users to specify usernames, but on updates we'll use the ID as a verification cross-check. + entry.ID = *out.User.UserId + } + + return nil +} + +const ( + minAllowableRotationPeriod = 1 * time.Minute +) + +func (b *backend) validateRotationPeriod(period time.Duration) error { + if period < minAllowableRotationPeriod { + return fmt.Errorf("role rotation period out of range: must be greater than %.2f seconds", minAllowableRotationPeriod.Seconds()) + } + return nil +} + +func formatResponse(cfg staticRoleEntry) map[string]interface{} { + response := structs.New(cfg).Map() + response[paramRotationPeriod] = int64(cfg.RotationPeriod.Seconds()) + + return response +} + +func formatRoleStoragePath(roleName string) string { + return fmt.Sprintf("%s/%s", pathStaticRole, roleName) +} + +const pathStaticRolesHelpSyn = ` +Manage static roles for AWS. +` + +const pathStaticRolesHelpDesc = ` +This path lets you manage static roles (users) for the AWS secret backend. +A static role is associated with a single IAM user, and manages the access +keys based on a rotation period, automatically rotating the credential. If +the IAM user has multiple access keys, the oldest key will be rotated. +` + +const ( + descRoleName = "The name of this role." + descUsername = "The IAM user to adopt as a static role." + descRotationPeriod = `Period by which to rotate the backing credential of the adopted user. +This can be a Go duration (e.g, '1m', 24h'), or an integer number of seconds.` +) diff --git a/builtin/logical/aws/path_static_roles_test.go b/builtin/logical/aws/path_static_roles_test.go new file mode 100644 index 000000000000..0244d6a39c7a --- /dev/null +++ b/builtin/logical/aws/path_static_roles_test.go @@ -0,0 +1,545 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package aws + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" +) + +// TestStaticRolesValidation verifies that valid requests pass validation and that invalid requests fail validation. +// This includes the user already existing in IAM roles, and the rotation period being sufficiently long. +func TestStaticRolesValidation(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + bgCTX := context.Background() // for brevity + + cases := []struct { + name string + opts []awsutil.MockIAMOption + requestData map[string]interface{} + isError bool + }{ + { + name: "all good", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("jane-doe"), UserId: aws.String("unique-id")}}), + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String("abcdefghijklmnopqrstuvwxyz"), + SecretAccessKey: aws.String("zyxwvutsrqponmlkjihgfedcba"), + UserName: aws.String("jane-doe"), + }, + }), + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{}, + IsTruncated: aws.Bool(false), + }), + }, + requestData: map[string]interface{}{ + "name": "test", + "username": "jane-doe", + "rotation_period": "1d", + }, + }, + { + name: "bad user", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserError(errors.New("oh no")), + }, + requestData: map[string]interface{}{ + "name": "test", + "username": "jane-doe", + "rotation_period": "24h", + }, + isError: true, + }, + { + name: "user mismatch", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("ms-impostor"), UserId: aws.String("fake-id")}}), + }, + requestData: map[string]interface{}{ + "name": "test", + "username": "jane-doe", + "rotation_period": "1d2h", + }, + isError: true, + }, + { + name: "bad rotation period", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("jane-doe"), UserId: aws.String("unique-id")}}), + }, + requestData: map[string]interface{}{ + "name": "test", + "username": "jane-doe", + "rotation_period": "45s", + }, + isError: true, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + b := Backend(config) + miam, err := awsutil.NewMockIAM(c.opts...)(nil) + if err != nil { + t.Fatal(err) + } + b.iamClient = miam + if err := b.Setup(bgCTX, config); err != nil { + t.Fatal(err) + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: config.StorageView, + Data: c.requestData, + Path: "static-roles/test", + } + _, err = b.pathStaticRolesWrite(bgCTX, req, staticRoleFieldData(req.Data)) + if c.isError && err == nil { + t.Fatal("expected an error but didn't get one") + } else if !c.isError && err != nil { + t.Fatalf("got an unexpected error: %s", err) + } + }) + } +} + +// TestStaticRolesWrite validates that we can write a new entry for a new static role, and that we correctly +// do not write if the request is invalid in some way. +func TestStaticRolesWrite(t *testing.T) { + bgCTX := context.Background() + + cases := []struct { + name string + // objects to return from mock IAM. + // You'll need a GetUserOutput (to validate the existence of the user being written, + // the keys the user has already been assigned, + // and the new key vault requests. + opts []awsutil.MockIAMOption // objects to return from the mock IAM + // the name, username if updating, and rotation_period of the user. This is the inbound request the cod would get. + data map[string]interface{} + expectedError bool + findUser bool + // if data is sent the name "johnny", then we'll match an existing user with rotation period 24 hours. + isUpdate bool + newPriority int64 // update time of new item in queue, skip if isUpdate false. There is a wiggle room of 5 seconds + // so the deltas between the old and the new update time should be larger than that to ensure the difference + // can be detected. + }{ + { + name: "happy path", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("jane-doe"), UserId: aws.String("unique-id")}}), + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{}, + IsTruncated: aws.Bool(false), + }), + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String("abcdefghijklmnopqrstuvwxyz"), + SecretAccessKey: aws.String("zyxwvutsrqponmlkjihgfedcba"), + UserName: aws.String("jane-doe"), + }, + }), + }, + data: map[string]interface{}{ + "name": "test", + "username": "jane-doe", + "rotation_period": "1d", + }, + // writes role, writes cred + findUser: true, + }, + { + name: "no aws user", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserError(errors.New("no such user, etc etc")), + }, + data: map[string]interface{}{ + "name": "test", + "username": "a-nony-mous", + "rotation_period": "15s", + }, + expectedError: true, + }, + { + name: "update existing user, decreased rotation duration", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("john-doe"), UserId: aws.String("unique-id")}}), + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{}, + IsTruncated: aws.Bool(false), + }), + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String("abcdefghijklmnopqrstuvwxyz"), + SecretAccessKey: aws.String("zyxwvutsrqponmlkjihgfedcba"), + UserName: aws.String("john-doe"), + }, + }), + }, + data: map[string]interface{}{ + "name": "johnny", + "rotation_period": "19m", + }, + findUser: true, + isUpdate: true, + newPriority: time.Now().Add(19 * time.Minute).Unix(), + }, + { + name: "update existing user, increased rotation duration", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("john-doe"), UserId: aws.String("unique-id")}}), + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{}, + IsTruncated: aws.Bool(false), + }), + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String("abcdefghijklmnopqrstuvwxyz"), + SecretAccessKey: aws.String("zyxwvutsrqponmlkjihgfedcba"), + UserName: aws.String("john-doe"), + }, + }), + }, + data: map[string]interface{}{ + "name": "johnny", + "rotation_period": "40h", + }, + findUser: true, + isUpdate: true, + newPriority: time.Now().Add(40 * time.Hour).Unix(), + }, + } + + // if a user exists (user doesn't exist is tested in validation) + // we'll check how many keys the user has - if it's two, we delete one. + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + miam, err := awsutil.NewMockIAM( + c.opts..., + )(nil) + if err != nil { + t.Fatal(err) + } + + b := Backend(config) + b.iamClient = miam + if err := b.Setup(bgCTX, config); err != nil { + t.Fatal(err) + } + + // put a role in storage for update tests + staticRole := staticRoleEntry{ + Name: "johnny", + Username: "john-doe", + ID: "unique-id", + RotationPeriod: 24 * time.Hour, + } + entry, err := logical.StorageEntryJSON(formatRoleStoragePath(staticRole.Name), staticRole) + if err != nil { + t.Fatal(err) + } + err = config.StorageView.Put(bgCTX, entry) + if err != nil { + t.Fatal(err) + } + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: config.StorageView, + Data: c.data, + Path: "static-roles/" + c.data["name"].(string), + } + + r, err := b.pathStaticRolesWrite(bgCTX, req, staticRoleFieldData(req.Data)) + if c.expectedError && err == nil { + t.Fatal(err) + } else if c.expectedError { + return // save us some if statements + } + + if err != nil { + t.Fatalf("got an error back unexpectedly: %s", err) + } + + if c.findUser && r == nil { + t.Fatal("response was nil, but it shouldn't have been") + } + + role, err := config.StorageView.Get(bgCTX, req.Path) + if c.findUser && (err != nil || role == nil) { + t.Fatalf("couldn't find the role we should have stored: %s", err) + } + var actualData staticRoleEntry + err = role.DecodeJSON(&actualData) + if err != nil { + t.Fatalf("couldn't convert storage data to role entry: %s", err) + } + + // construct expected data + var expectedData staticRoleEntry + fieldData := staticRoleFieldData(c.data) + if c.isUpdate { + // data is johnny + c.data + expectedData = staticRole + } + + var actualItem *queue.Item + if c.isUpdate { + actualItem, _ = b.credRotationQueue.PopByKey(expectedData.Name) + } + + if u, ok := fieldData.GetOk("username"); ok { + expectedData.Username = u.(string) + } + if r, ok := fieldData.GetOk("rotation_period"); ok { + expectedData.RotationPeriod = time.Duration(r.(int)) * time.Second + } + if n, ok := fieldData.GetOk("name"); ok { + expectedData.Name = n.(string) + } + + // validate fields + if eu, au := expectedData.Username, actualData.Username; eu != au { + t.Fatalf("mismatched username, expected %q but got %q", eu, au) + } + if er, ar := expectedData.RotationPeriod, actualData.RotationPeriod; er != ar { + t.Fatalf("mismatched rotation period, expected %q but got %q", er, ar) + } + if en, an := expectedData.Name, actualData.Name; en != an { + t.Fatalf("mismatched role name, expected %q, but got %q", en, an) + } + + // one-off to avoid importing/casting + abs := func(x int64) int64 { + if x < 0 { + return -x + } + return x + } + + if c.isUpdate { + if ep, ap := c.newPriority, actualItem.Priority; abs(ep-ap) > 5 { // 5 second wiggle room for how long the test takes + t.Fatalf("mismatched updated priority, expected %d but got %d", ep, ap) + } + } + }) + } +} + +// TestStaticRoleRead validates that we can read a configured role and correctly do not read anything if we +// request something that doesn't exist. +func TestStaticRoleRead(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + bgCTX := context.Background() + + // test cases are run against an inmem storage holding a role called "test" attached to an IAM user called "jane-doe" + cases := []struct { + name string + roleName string + found bool + }{ + { + name: "role name exists", + roleName: "test", + found: true, + }, + { + name: "role name not found", + roleName: "toast", + found: false, // implied, but set for clarity + }, + } + + staticRole := staticRoleEntry{ + Name: "test", + Username: "jane-doe", + RotationPeriod: 24 * time.Hour, + } + entry, err := logical.StorageEntryJSON(formatRoleStoragePath(staticRole.Name), staticRole) + if err != nil { + t.Fatal(err) + } + err = config.StorageView.Put(bgCTX, entry) + if err != nil { + t.Fatal(err) + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + req := &logical.Request{ + Operation: logical.ReadOperation, + Storage: config.StorageView, + Data: map[string]interface{}{ + "name": c.roleName, + }, + Path: formatRoleStoragePath(c.roleName), + } + + b := Backend(config) + + r, err := b.pathStaticRolesRead(bgCTX, req, staticRoleFieldData(req.Data)) + if err != nil { + t.Fatal(err) + } + if c.found { + if r == nil { + t.Fatal("response was nil, but it shouldn't have been") + } + } else { + if r != nil { + t.Fatal("response should have been nil on a non-existent role") + } + } + }) + } +} + +// TestStaticRoleDelete validates that we correctly remove a role on a delete request, and that we correctly do not +// remove anything if a role does not exist with that name. +func TestStaticRoleDelete(t *testing.T) { + bgCTX := context.Background() + + // test cases are run against an inmem storage holding a role called "test" attached to an IAM user called "jane-doe" + cases := []struct { + name string + role string + found bool + }{ + { + name: "role found", + role: "test", + found: true, + }, + { + name: "role not found", + role: "tossed", + found: false, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + // fake an IAM + var iamfunc awsutil.IAMAPIFunc + if !c.found { + iamfunc = awsutil.NewMockIAM(awsutil.WithDeleteAccessKeyError(errors.New("shouldn't have called delete"))) + } else { + iamfunc = awsutil.NewMockIAM() + } + miam, err := iamfunc(nil) + if err != nil { + t.Fatalf("couldn't initialize mockiam: %s", err) + } + + b := Backend(config) + b.iamClient = miam + + // put in storage + staticRole := staticRoleEntry{ + Name: "test", + Username: "jane-doe", + RotationPeriod: 24 * time.Hour, + } + entry, err := logical.StorageEntryJSON(formatRoleStoragePath(staticRole.Name), staticRole) + if err != nil { + t.Fatal(err) + } + err = config.StorageView.Put(bgCTX, entry) + if err != nil { + t.Fatal(err) + } + + l, err := config.StorageView.List(bgCTX, "") + if err != nil || len(l) != 1 { + t.Fatalf("couldn't add an entry to storage during test setup: %s", err) + } + + // put in queue + err = b.credRotationQueue.Push(&queue.Item{ + Key: staticRole.Name, + Value: staticRole, + Priority: time.Now().Add(90 * time.Hour).Unix(), + }) + if err != nil { + t.Fatalf("couldn't add items to pq") + } + + req := &logical.Request{ + Operation: logical.ReadOperation, + Storage: config.StorageView, + Data: map[string]interface{}{ + "name": c.role, + }, + Path: formatRoleStoragePath(c.role), + } + + r, err := b.pathStaticRolesDelete(bgCTX, req, staticRoleFieldData(req.Data)) + if err != nil { + t.Fatal(err) + } + if r != nil { + t.Fatal("response wasn't nil, but it should have been") + } + + l, err = config.StorageView.List(bgCTX, "") + if err != nil { + t.Fatal(err) + } + if c.found && len(l) != 0 { + t.Fatal("size of role storage is non zero after delete") + } else if !c.found && len(l) != 1 { + t.Fatal("size of role storage changed after what should have been no deletion") + } + + if c.found && b.credRotationQueue.Len() != 0 { + t.Fatal("size of queue is non-zero after delete") + } else if !c.found && b.credRotationQueue.Len() != 1 { + t.Fatal("size of queue changed after what should have been no deletion") + } + }) + } +} + +func staticRoleFieldData(data map[string]interface{}) *framework.FieldData { + schema := map[string]*framework.FieldSchema{ + paramRoleName: { + Type: framework.TypeString, + Description: descRoleName, + }, + paramUsername: { + Type: framework.TypeString, + Description: descUsername, + }, + paramRotationPeriod: { + Type: framework.TypeDurationSecond, + Description: descRotationPeriod, + }, + } + + return &framework.FieldData{ + Raw: data, + Schema: schema, + } +} diff --git a/builtin/logical/aws/path_user.go b/builtin/logical/aws/path_user.go index ca5e1a295519..430f7754eec9 100644 --- a/builtin/logical/aws/path_user.go +++ b/builtin/logical/aws/path_user.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package aws @@ -21,6 +21,12 @@ import ( func pathUser(b *backend) *framework.Path { return &framework.Path{ Pattern: "(creds|sts)/" + framework.GenericNameWithAtRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationVerb: "generate", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -29,21 +35,38 @@ func pathUser(b *backend) *framework.Path { "role_arn": { Type: framework.TypeString, Description: "ARN of role to assume when credential_type is " + assumedRoleCred, + Query: true, }, "ttl": { Type: framework.TypeDurationSecond, Description: "Lifetime of the returned credentials in seconds", Default: 3600, + Query: true, }, "role_session_name": { Type: framework.TypeString, Description: "Session name to use when assuming role. Max chars: 64", + Query: true, + }, + "mfa_code": { + Type: framework.TypeString, + Description: "MFA code to provide for session tokens", }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathCredsRead, - logical.UpdateOperation: b.pathCredsRead, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathCredsRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "credentials|sts-credentials", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathCredsRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "credentials-with-parameters|sts-credentials-with-parameters", + }, + }, }, HelpSynopsis: pathUserHelpSyn, @@ -88,6 +111,7 @@ func (b *backend) pathCredsRead(ctx context.Context, req *logical.Request, d *fr roleArn := d.Get("role_arn").(string) roleSessionName := d.Get("role_session_name").(string) + mfaCode := d.Get("mfa_code").(string) var credentialType string switch { @@ -133,9 +157,11 @@ func (b *backend) pathCredsRead(ctx context.Context, req *logical.Request, d *fr case !strutil.StrListContains(role.RoleArns, roleArn): return logical.ErrorResponse(fmt.Sprintf("role_arn %q not in allowed role arns for Vault role %q", roleArn, roleName)), nil } - return b.assumeRole(ctx, req.Storage, req.DisplayName, roleName, roleArn, role.PolicyDocument, role.PolicyArns, role.IAMGroups, ttl, roleSessionName) + return b.assumeRole(ctx, req.Storage, req.DisplayName, roleName, roleArn, role.PolicyDocument, role.PolicyArns, role.IAMGroups, ttl, roleSessionName, role.SessionTags, role.ExternalID) case federationTokenCred: return b.getFederationToken(ctx, req.Storage, req.DisplayName, roleName, role.PolicyDocument, role.PolicyArns, role.IAMGroups, ttl) + case sessionTokenCred: + return b.getSessionToken(ctx, req.Storage, role.SerialNumber, mfaCode, ttl) default: return logical.ErrorResponse(fmt.Sprintf("unknown credential_type: %q", credentialType)), nil } @@ -155,7 +181,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k } // Get information about this user - groupsResp, err := client.ListGroupsForUser(&iam.ListGroupsForUserInput{ + groupsResp, err := client.ListGroupsForUserWithContext(ctx, &iam.ListGroupsForUserInput{ UserName: aws.String(username), MaxItems: aws.Int64(1000), }) @@ -194,7 +220,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k groups := groupsResp.Groups // Inline (user) policies - policiesResp, err := client.ListUserPolicies(&iam.ListUserPoliciesInput{ + policiesResp, err := client.ListUserPoliciesWithContext(ctx, &iam.ListUserPoliciesInput{ UserName: aws.String(username), MaxItems: aws.Int64(1000), }) @@ -204,7 +230,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k policies := policiesResp.PolicyNames // Attached managed policies - manPoliciesResp, err := client.ListAttachedUserPolicies(&iam.ListAttachedUserPoliciesInput{ + manPoliciesResp, err := client.ListAttachedUserPoliciesWithContext(ctx, &iam.ListAttachedUserPoliciesInput{ UserName: aws.String(username), MaxItems: aws.Int64(1000), }) @@ -213,7 +239,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k } manPolicies := manPoliciesResp.AttachedPolicies - keysResp, err := client.ListAccessKeys(&iam.ListAccessKeysInput{ + keysResp, err := client.ListAccessKeysWithContext(ctx, &iam.ListAccessKeysInput{ UserName: aws.String(username), MaxItems: aws.Int64(1000), }) @@ -224,7 +250,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k // Revoke all keys for _, k := range keys { - _, err = client.DeleteAccessKey(&iam.DeleteAccessKeyInput{ + _, err = client.DeleteAccessKeyWithContext(ctx, &iam.DeleteAccessKeyInput{ AccessKeyId: k.AccessKeyId, UserName: aws.String(username), }) @@ -235,7 +261,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k // Detach managed policies for _, p := range manPolicies { - _, err = client.DetachUserPolicy(&iam.DetachUserPolicyInput{ + _, err = client.DetachUserPolicyWithContext(ctx, &iam.DetachUserPolicyInput{ UserName: aws.String(username), PolicyArn: p.PolicyArn, }) @@ -246,7 +272,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k // Delete any inline (user) policies for _, p := range policies { - _, err = client.DeleteUserPolicy(&iam.DeleteUserPolicyInput{ + _, err = client.DeleteUserPolicyWithContext(ctx, &iam.DeleteUserPolicyInput{ UserName: aws.String(username), PolicyName: p, }) @@ -257,7 +283,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k // Remove the user from all their groups for _, g := range groups { - _, err = client.RemoveUserFromGroup(&iam.RemoveUserFromGroupInput{ + _, err = client.RemoveUserFromGroupWithContext(ctx, &iam.RemoveUserFromGroupInput{ GroupName: g.GroupName, UserName: aws.String(username), }) @@ -267,7 +293,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k } // Delete the user - _, err = client.DeleteUser(&iam.DeleteUserInput{ + _, err = client.DeleteUserWithContext(ctx, &iam.DeleteUserInput{ UserName: aws.String(username), }) if err != nil { diff --git a/builtin/logical/aws/rollback.go b/builtin/logical/aws/rollback.go index 847ecd1c258b..6136db9baae6 100644 --- a/builtin/logical/aws/rollback.go +++ b/builtin/logical/aws/rollback.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package aws diff --git a/builtin/logical/aws/rotation.go b/builtin/logical/aws/rotation.go new file mode 100644 index 000000000000..0e9e22fc8273 --- /dev/null +++ b/builtin/logical/aws/rotation.go @@ -0,0 +1,199 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package aws + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" +) + +// rotateExpiredStaticCreds will pop expired credentials (credentials whose priority +// represents a time before the present), rotate the associated credential, and push +// them back onto the queue with the new priority. +func (b *backend) rotateExpiredStaticCreds(ctx context.Context, req *logical.Request) error { + var errs *multierror.Error + + for { + keepGoing, err := b.rotateCredential(ctx, req.Storage) + if err != nil { + errs = multierror.Append(errs, err) + } + if !keepGoing { + if errs.ErrorOrNil() != nil { + return fmt.Errorf("error(s) occurred while rotating expired static credentials: %w", errs) + } else { + return nil + } + } + } +} + +// rotateCredential pops an element from the priority queue, and if it is expired, rotate and re-push. +// If a cred was ready for rotation, return true, otherwise return false. +func (b *backend) rotateCredential(ctx context.Context, storage logical.Storage) (wasReady bool, err error) { + // If queue is empty or first item does not need a rotation (priority is next rotation timestamp) there is nothing to do + item, err := b.credRotationQueue.Pop() + if err != nil { + // the queue is just empty, which is fine. + if errors.Is(err, queue.ErrEmpty) { + return false, nil + } + return false, fmt.Errorf("failed to pop from queue for role %q: %w", item.Key, err) + } + if item.Priority > time.Now().Unix() { + // no rotation required + // push the item back into priority queue + err = b.credRotationQueue.Push(item) + if err != nil { + return false, fmt.Errorf("failed to add item into the rotation queue for role %q: %w", item.Key, err) + } + return false, nil + } + + cfg := item.Value.(staticRoleEntry) + + err = b.createCredential(ctx, storage, cfg, true) + if err != nil { + // put it back in the queue with a backoff + item.Priority = time.Now().Add(10 * time.Second).Unix() + innerErr := b.credRotationQueue.Push(item) + if innerErr != nil { + return true, fmt.Errorf("failed to add item into the rotation queue for role %q(%w), while attempting to recover from failure to create credential: %w", cfg.Name, innerErr, err) + } + // there was one that "should have" rotated, so we want to keep looking further down the queue + return true, err + } + + // set new priority and re-queue + item.Priority = time.Now().Add(cfg.RotationPeriod).Unix() + err = b.credRotationQueue.Push(item) + if err != nil { + return true, fmt.Errorf("failed to add item into the rotation queue for role %q: %w", cfg.Name, err) + } + + return true, nil +} + +// createCredential will create a new iam credential, deleting the oldest one if necessary. +func (b *backend) createCredential(ctx context.Context, storage logical.Storage, cfg staticRoleEntry, shouldLockStorage bool) error { + iamClient, err := b.clientIAM(ctx, storage) + if err != nil { + return fmt.Errorf("unable to get the AWS IAM client: %w", err) + } + + // IAM users can have a most 2 sets of keys at a time. + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html) + // Ideally we would get this value through an api check, but I'm not sure one exists. + const maxAllowedKeys = 2 + + err = b.validateIAMUserExists(ctx, storage, &cfg, false) + if err != nil { + return fmt.Errorf("iam user didn't exist, or username/userid didn't match: %w", err) + } + + accessKeys, err := iamClient.ListAccessKeys(&iam.ListAccessKeysInput{ + UserName: aws.String(cfg.Username), + }) + if err != nil { + return fmt.Errorf("unable to list existing access keys for IAM user %q: %w", cfg.Username, err) + } + + // If we have the maximum number of keys, we have to delete one to make another (so we can get the credentials). + // We'll delete the oldest one. + // + // Since this check relies on a pre-coded maximum, it's a bit fragile. If the number goes up, we risk deleting + // a key when we didn't need to. If this number goes down, we'll start throwing errors because we think we're + // allowed to create a key and aren't. In either case, adjusting the constant should be sufficient to fix things. + if len(accessKeys.AccessKeyMetadata) >= maxAllowedKeys { + oldestKey := accessKeys.AccessKeyMetadata[0] + + for i := 1; i < len(accessKeys.AccessKeyMetadata); i++ { + if accessKeys.AccessKeyMetadata[i].CreateDate.Before(*oldestKey.CreateDate) { + oldestKey = accessKeys.AccessKeyMetadata[i] + } + } + + _, err := iamClient.DeleteAccessKey(&iam.DeleteAccessKeyInput{ + AccessKeyId: oldestKey.AccessKeyId, + UserName: oldestKey.UserName, + }) + if err != nil { + return fmt.Errorf("unable to delete oldest access keys for user %q: %w", cfg.Username, err) + } + } + + // Create new set of keys + out, err := iamClient.CreateAccessKey(&iam.CreateAccessKeyInput{ + UserName: aws.String(cfg.Username), + }) + if err != nil { + return fmt.Errorf("unable to create new access keys for user %q: %w", cfg.Username, err) + } + + // Persist new keys + entry, err := logical.StorageEntryJSON(formatCredsStoragePath(cfg.Name), &awsCredentials{ + AccessKeyID: *out.AccessKey.AccessKeyId, + SecretAccessKey: *out.AccessKey.SecretAccessKey, + }) + if err != nil { + return fmt.Errorf("failed to marshal object to JSON: %w", err) + } + if shouldLockStorage { + b.roleMutex.Lock() + defer b.roleMutex.Unlock() + } + err = storage.Put(ctx, entry) + if err != nil { + return fmt.Errorf("failed to save object in storage: %w", err) + } + + return nil +} + +// delete credential will remove the credential associated with the role from storage. +func (b *backend) deleteCredential(ctx context.Context, storage logical.Storage, cfg staticRoleEntry, shouldLockStorage bool) error { + // synchronize storage access if we didn't in the caller. + if shouldLockStorage { + b.roleMutex.Lock() + defer b.roleMutex.Unlock() + } + + key, err := storage.Get(ctx, formatCredsStoragePath(cfg.Name)) + if err != nil { + return fmt.Errorf("couldn't find key in storage: %w", err) + } + // no entry, so i guess we deleted it already + if key == nil { + return nil + } + var creds awsCredentials + err = key.DecodeJSON(&creds) + if err != nil { + return fmt.Errorf("couldn't decode storage entry to a valid credential: %w", err) + } + + err = storage.Delete(ctx, formatCredsStoragePath(cfg.Name)) + if err != nil { + return fmt.Errorf("couldn't delete from storage: %w", err) + } + + // because we have the information, this is the one we created, so it's safe for us to delete. + _, err = b.iamClient.DeleteAccessKey(&iam.DeleteAccessKeyInput{ + AccessKeyId: aws.String(creds.AccessKeyID), + UserName: aws.String(cfg.Username), + }) + if err != nil { + return fmt.Errorf("couldn't delete from IAM: %w", err) + } + + return nil +} diff --git a/builtin/logical/aws/rotation_test.go b/builtin/logical/aws/rotation_test.go new file mode 100644 index 000000000000..51de3141b0ad --- /dev/null +++ b/builtin/logical/aws/rotation_test.go @@ -0,0 +1,439 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package aws + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/iam/iamiface" + "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" +) + +// TestRotation verifies that the rotation code and priority queue correctly selects and rotates credentials +// for static secrets. +func TestRotation(t *testing.T) { + bgCTX := context.Background() + + type credToInsert struct { + config staticRoleEntry // role configuration from a normal createRole request + age time.Duration // how old the cred should be - if this is longer than the config.RotationPeriod, + // the cred is 'pre-expired' + + changed bool // whether we expect the cred to change - this is technically redundant to a comparison between + // rotationPeriod and age. + } + + // due to a limitation with the mockIAM implementation, any cred you want to rotate must have + // username jane-doe and userid unique-id, since we can only pre-can one exact response to GetUser + cases := []struct { + name string + creds []credToInsert + }{ + { + name: "refresh one", + creds: []credToInsert{ + { + config: staticRoleEntry{ + Name: "test", + Username: "jane-doe", + ID: "unique-id", + RotationPeriod: 2 * time.Second, + }, + age: 5 * time.Second, + changed: true, + }, + }, + }, + { + name: "refresh none", + creds: []credToInsert{ + { + config: staticRoleEntry{ + Name: "test", + Username: "jane-doe", + ID: "unique-id", + RotationPeriod: 1 * time.Minute, + }, + age: 5 * time.Second, + changed: false, + }, + }, + }, + { + name: "refresh one of two", + creds: []credToInsert{ + { + config: staticRoleEntry{ + Name: "toast", + Username: "john-doe", + ID: "other-id", + RotationPeriod: 1 * time.Minute, + }, + age: 5 * time.Second, + changed: false, + }, + { + config: staticRoleEntry{ + Name: "test", + Username: "jane-doe", + ID: "unique-id", + RotationPeriod: 1 * time.Second, + }, + age: 5 * time.Second, + changed: true, + }, + }, + }, + { + name: "no creds to rotate", + creds: []credToInsert{}, + }, + } + + ak := "long-access-key-id" + oldSecret := "abcdefghijklmnopqrstuvwxyz" + newSecret := "zyxwvutsrqponmlkjihgfedcba" + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + b := Backend(config) + + // insert all our creds + for i, cred := range c.creds { + + // all the creds will be the same for every user, but that's okay + // since what we care about is whether they changed on a single-user basis. + miam, err := awsutil.NewMockIAM( + // blank list for existing user + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{ + {}, + }, + }), + // initial key to store + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String(ak), + SecretAccessKey: aws.String(oldSecret), + }, + }), + awsutil.WithGetUserOutput(&iam.GetUserOutput{ + User: &iam.User{ + UserId: aws.String(cred.config.ID), + UserName: aws.String(cred.config.Username), + }, + }), + )(nil) + if err != nil { + t.Fatalf("couldn't initialze mock IAM handler: %s", err) + } + b.iamClient = miam + + err = b.createCredential(bgCTX, config.StorageView, cred.config, true) + if err != nil { + t.Fatalf("couldn't insert credential %d: %s", i, err) + } + + item := &queue.Item{ + Key: cred.config.Name, + Value: cred.config, + Priority: time.Now().Add(-1 * cred.age).Add(cred.config.RotationPeriod).Unix(), + } + err = b.credRotationQueue.Push(item) + if err != nil { + t.Fatalf("couldn't push item onto queue: %s", err) + } + } + + // update aws responses, same argument for why it's okay every cred will be the same + miam, err := awsutil.NewMockIAM( + // old key + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{ + { + AccessKeyId: aws.String(ak), + }, + }, + }), + // new key + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String(ak), + SecretAccessKey: aws.String(newSecret), + }, + }), + awsutil.WithGetUserOutput(&iam.GetUserOutput{ + User: &iam.User{ + UserId: aws.String("unique-id"), + UserName: aws.String("jane-doe"), + }, + }), + )(nil) + if err != nil { + t.Fatalf("couldn't initialze mock IAM handler: %s", err) + } + b.iamClient = miam + + req := &logical.Request{ + Storage: config.StorageView, + } + err = b.rotateExpiredStaticCreds(bgCTX, req) + if err != nil { + t.Fatalf("got an error rotating credentials: %s", err) + } + + // check our credentials + for i, cred := range c.creds { + entry, err := config.StorageView.Get(bgCTX, formatCredsStoragePath(cred.config.Name)) + if err != nil { + t.Fatalf("got an error retrieving credentials %d", i) + } + var out awsCredentials + err = entry.DecodeJSON(&out) + if err != nil { + t.Fatalf("could not unmarshal storage view entry for cred %d to an aws credential: %s", i, err) + } + + if cred.changed && out.SecretAccessKey != newSecret { + t.Fatalf("expected the key for cred %d to have changed, but it hasn't", i) + } else if !cred.changed && out.SecretAccessKey != oldSecret { + t.Fatalf("expected the key for cred %d to have stayed the same, but it changed", i) + } + } + }) + } +} + +type fakeIAM struct { + iamiface.IAMAPI + delReqs []*iam.DeleteAccessKeyInput +} + +func (f *fakeIAM) DeleteAccessKey(r *iam.DeleteAccessKeyInput) (*iam.DeleteAccessKeyOutput, error) { + f.delReqs = append(f.delReqs, r) + return f.IAMAPI.DeleteAccessKey(r) +} + +// TestCreateCredential verifies that credential creation firstly only deletes credentials if it needs to (i.e., two +// or more credentials on IAM), and secondly correctly deletes the oldest one. +func TestCreateCredential(t *testing.T) { + cases := []struct { + name string + username string + id string + deletedKey string + opts []awsutil.MockIAMOption + }{ + { + name: "zero keys", + username: "jane-doe", + id: "unique-id", + opts: []awsutil.MockIAMOption{ + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{}, + }), + // delete should _not_ be called + awsutil.WithDeleteAccessKeyError(errors.New("should not have been called")), + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String("key"), + SecretAccessKey: aws.String("itsasecret"), + }, + }), + awsutil.WithGetUserOutput(&iam.GetUserOutput{ + User: &iam.User{ + UserId: aws.String("unique-id"), + UserName: aws.String("jane-doe"), + }, + }), + }, + }, + { + name: "one key", + username: "jane-doe", + id: "unique-id", + opts: []awsutil.MockIAMOption{ + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{ + {AccessKeyId: aws.String("foo"), CreateDate: aws.Time(time.Now())}, + }, + }), + // delete should _not_ be called + awsutil.WithDeleteAccessKeyError(errors.New("should not have been called")), + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String("key"), + SecretAccessKey: aws.String("itsasecret"), + }, + }), + awsutil.WithGetUserOutput(&iam.GetUserOutput{ + User: &iam.User{ + UserId: aws.String("unique-id"), + UserName: aws.String("jane-doe"), + }, + }), + }, + }, + { + name: "two keys", + username: "jane-doe", + id: "unique-id", + deletedKey: "foo", + opts: []awsutil.MockIAMOption{ + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{ + {AccessKeyId: aws.String("foo"), CreateDate: aws.Time(time.Time{})}, + {AccessKeyId: aws.String("bar"), CreateDate: aws.Time(time.Now())}, + }, + }), + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String("key"), + SecretAccessKey: aws.String("itsasecret"), + }, + }), + awsutil.WithGetUserOutput(&iam.GetUserOutput{ + User: &iam.User{ + UserId: aws.String("unique-id"), + UserName: aws.String("jane-doe"), + }, + }), + }, + }, + } + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + miam, err := awsutil.NewMockIAM( + c.opts..., + )(nil) + if err != nil { + t.Fatal(err) + } + fiam := &fakeIAM{ + IAMAPI: miam, + } + + b := Backend(config) + b.iamClient = fiam + + err = b.createCredential(context.Background(), config.StorageView, staticRoleEntry{Username: c.username, ID: c.id}, true) + if err != nil { + t.Fatalf("got an error we didn't expect: %q", err) + } + + if c.deletedKey != "" { + if len(fiam.delReqs) != 1 { + t.Fatalf("called the wrong number of deletes (called %d deletes)", len(fiam.delReqs)) + } + actualKey := *fiam.delReqs[0].AccessKeyId + if c.deletedKey != actualKey { + t.Fatalf("we deleted the wrong key: %q instead of %q", actualKey, c.deletedKey) + } + } + }) + } +} + +// TestRequeueOnError verifies that in the case of an error, the entry will still be in the queue for later rotation +func TestRequeueOnError(t *testing.T) { + bgCTX := context.Background() + + cred := staticRoleEntry{ + Name: "test", + Username: "jane-doe", + RotationPeriod: 30 * time.Minute, + } + + ak := "long-access-key-id" + oldSecret := "abcdefghijklmnopqrstuvwxyz" + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + b := Backend(config) + + // go through the process of adding a key + miam, err := awsutil.NewMockIAM( + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{ + {}, + }, + }), + // initial key to store + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String(ak), + SecretAccessKey: aws.String(oldSecret), + }, + }), + awsutil.WithGetUserOutput(&iam.GetUserOutput{ + User: &iam.User{ + UserId: aws.String(cred.ID), + UserName: aws.String(cred.Username), + }, + }), + )(nil) + if err != nil { + t.Fail() + } + + b.iamClient = miam + + err = b.createCredential(bgCTX, config.StorageView, cred, true) + if err != nil { + t.Fatalf("couldn't insert credential: %s", err) + } + + // put the cred in the queue but age it out + item := &queue.Item{ + Key: cred.Name, + Value: cred, + Priority: time.Now().Add(-10 * time.Minute).Unix(), + } + err = b.credRotationQueue.Push(item) + if err != nil { + t.Fatalf("couldn't push item onto queue: %s", err) + } + + // update the mock iam with the next requests + miam, err = awsutil.NewMockIAM( + awsutil.WithGetUserError(errors.New("oh no")), + )(nil) + if err != nil { + t.Fatalf("couldn't initialize the mock iam: %s", err) + } + b.iamClient = miam + + // now rotate, but it will fail + r, e := b.rotateCredential(bgCTX, config.StorageView) + if !r { + t.Fatalf("rotate credential should return true in this case, but it didn't") + } + if e == nil { + t.Fatalf("we expected an error when rotating a credential, but didn't get one") + } + // the queue should be updated though + i, e := b.credRotationQueue.PopByKey(cred.Name) + if err != nil { + t.Fatalf("queue error: %s", e) + } + delta := time.Now().Add(10*time.Second).Unix() - i.Priority + if delta < -5 || delta > 5 { + t.Fatalf("priority should be within 5 seconds of our backoff interval") + } +} diff --git a/builtin/logical/aws/secret_access_keys.go b/builtin/logical/aws/secret_access_keys.go index 9b8a2bc9b8b1..a9a9290cc5b7 100644 --- a/builtin/logical/aws/secret_access_keys.go +++ b/builtin/logical/aws/secret_access_keys.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package aws @@ -9,15 +9,14 @@ import ( "regexp" "time" - "github.com/hashicorp/go-secure-stdlib/awsutil" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/template" - "github.com/hashicorp/vault/sdk/logical" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/sts" "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/template" + "github.com/hashicorp/vault/sdk/logical" ) const ( @@ -38,9 +37,14 @@ func secretAccessKeys(b *backend) *framework.Secret { Type: framework.TypeString, Description: "Secret Key", }, + "session_token": { + Type: framework.TypeString, + Description: "Session Token", + }, "security_token": { Type: framework.TypeString, Description: "Security Token", + Deprecated: true, }, }, @@ -153,25 +157,88 @@ func (b *backend) getFederationToken(ctx context.Context, s logical.Storage, return logical.ErrorResponse("must specify at least one of policy_arns or policy_document with %s credential_type", federationTokenCred), nil } - tokenResp, err := stsClient.GetFederationToken(getTokenInput) + tokenResp, err := stsClient.GetFederationTokenWithContext(ctx, getTokenInput) if err != nil { return logical.ErrorResponse("Error generating STS keys: %s", err), awsutil.CheckAWSError(err) } - // STS credentials cannot be revoked so do not create a lease - return &logical.Response{ - Data: map[string]interface{}{ - "access_key": *tokenResp.Credentials.AccessKeyId, - "secret_key": *tokenResp.Credentials.SecretAccessKey, - "security_token": *tokenResp.Credentials.SessionToken, - "ttl": uint64(tokenResp.Credentials.Expiration.Sub(time.Now()).Seconds()), - }, - }, nil + // While STS credentials cannot be revoked/renewed, we will still create a lease since users are + // relying on a non-zero `lease_duration` in order to manage their lease lifecycles manually. + // + ttl := time.Until(*tokenResp.Credentials.Expiration) + resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{ + "access_key": *tokenResp.Credentials.AccessKeyId, + "secret_key": *tokenResp.Credentials.SecretAccessKey, + "security_token": *tokenResp.Credentials.SessionToken, + "session_token": *tokenResp.Credentials.SessionToken, + "ttl": uint64(ttl.Seconds()), + }, map[string]interface{}{ + "username": username, + "policy": policy, + "is_sts": true, + }) + + // Set the secret TTL to appropriately match the expiration of the token + resp.Secret.TTL = ttl + + // STS are purposefully short-lived and aren't renewable + resp.Secret.Renewable = false + + return resp, nil +} + +// NOTE: Getting session tokens with or without MFA/TOTP has behavior that can cause confusion. +// When an AWS IAM user has a policy attached requiring an MFA code by use of "aws:MultiFactorAuthPresent": "true", +// then credentials may still be returned without an MFA code provided. +// If a Vault role associated with the IAM user is configured without both an mfa_serial_number and +// the mfa_code is not given, the API call is successful and returns credentials. These credentials +// are scoped to any resources in the policy that do NOT have "aws:MultiFactorAuthPresent": "true" set and +// accessing resources with it set will be denied. +// This is expected behavior, as the policy may have a mix of permissions, some requiring MFA and others not. +// If an mfa_serial_number is set on the Vault role, then a valid mfa_code MUST be provided to succeed. +func (b *backend) getSessionToken(ctx context.Context, s logical.Storage, serialNumber, mfaCode string, lifeTimeInSeconds int64) (*logical.Response, error) { + stsClient, err := b.clientSTS(ctx, s) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + getTokenInput := &sts.GetSessionTokenInput{ + DurationSeconds: &lifeTimeInSeconds, + } + if serialNumber != "" { + getTokenInput.SerialNumber = &serialNumber + } + if mfaCode != "" { + getTokenInput.TokenCode = &mfaCode + } + + tokenResp, err := stsClient.GetSessionToken(getTokenInput) + if err != nil { + return logical.ErrorResponse("Error generating STS keys: %s", err), awsutil.CheckAWSError(err) + } + + ttl := time.Until(*tokenResp.Credentials.Expiration) + resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{ + "access_key": *tokenResp.Credentials.AccessKeyId, + "secret_key": *tokenResp.Credentials.SecretAccessKey, + "session_token": *tokenResp.Credentials.SessionToken, + "ttl": uint64(ttl.Seconds()), + }, map[string]interface{}{ + "is_sts": true, + }) + + // Set the secret TTL to appropriately match the expiration of the token + resp.Secret.TTL = time.Until(*tokenResp.Credentials.Expiration) + + // STS are purposefully short-lived and aren't renewable + resp.Secret.Renewable = false + + return resp, nil } func (b *backend) assumeRole(ctx context.Context, s logical.Storage, displayName, roleName, roleArn, policy string, policyARNs []string, - iamGroups []string, lifeTimeInSeconds int64, roleSessionName string) (*logical.Response, error, + iamGroups []string, lifeTimeInSeconds int64, roleSessionName string, sessionTags map[string]string, externalID string) (*logical.Response, error, ) { // grab any IAM group policies associated with the vault role, both inline // and managed @@ -228,21 +295,48 @@ func (b *backend) assumeRole(ctx context.Context, s logical.Storage, if len(policyARNs) > 0 { assumeRoleInput.SetPolicyArns(convertPolicyARNs(policyARNs)) } - tokenResp, err := stsClient.AssumeRole(assumeRoleInput) + if externalID != "" { + assumeRoleInput.SetExternalId(externalID) + } + var tags []*sts.Tag + for k, v := range sessionTags { + tags = append(tags, + &sts.Tag{ + Key: aws.String(k), + Value: aws.String(v), + }, + ) + } + assumeRoleInput.SetTags(tags) + tokenResp, err := stsClient.AssumeRoleWithContext(ctx, assumeRoleInput) if err != nil { return logical.ErrorResponse("Error assuming role: %s", err), awsutil.CheckAWSError(err) } - // STS credentials cannot be revoked so do not create a lease - return &logical.Response{ - Data: map[string]interface{}{ - "access_key": *tokenResp.Credentials.AccessKeyId, - "secret_key": *tokenResp.Credentials.SecretAccessKey, - "security_token": *tokenResp.Credentials.SessionToken, - "arn": *tokenResp.AssumedRoleUser.Arn, - "ttl": uint64(tokenResp.Credentials.Expiration.Sub(time.Now()).Seconds()), - }, - }, nil + // While STS credentials cannot be revoked/renewed, we will still create a lease since users are + // relying on a non-zero `lease_duration` in order to manage their lease lifecycles manually. + // + ttl := time.Until(*tokenResp.Credentials.Expiration) + resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{ + "access_key": *tokenResp.Credentials.AccessKeyId, + "secret_key": *tokenResp.Credentials.SecretAccessKey, + "security_token": *tokenResp.Credentials.SessionToken, + "session_token": *tokenResp.Credentials.SessionToken, + "arn": *tokenResp.AssumedRoleUser.Arn, + "ttl": uint64(ttl.Seconds()), + }, map[string]interface{}{ + "username": roleSessionName, + "policy": roleArn, + "is_sts": true, + }) + + // Set the secret TTL to appropriately match the expiration of the token + resp.Secret.TTL = ttl + + // STS are purposefully short-lived and aren't renewable + resp.Secret.Renewable = false + + return resp, nil } func readConfig(ctx context.Context, storage logical.Storage) (rootConfig, error) { @@ -314,7 +408,7 @@ func (b *backend) secretAccessKeysCreate( } // Create the user - _, err = iamClient.CreateUser(createUserRequest) + _, err = iamClient.CreateUserWithContext(ctx, createUserRequest) if err != nil { if walErr := framework.DeleteWAL(ctx, s, walID); walErr != nil { iamErr := fmt.Errorf("error creating IAM user: %w", err) @@ -325,7 +419,7 @@ func (b *backend) secretAccessKeysCreate( for _, arn := range role.PolicyArns { // Attach existing policy against user - _, err = iamClient.AttachUserPolicy(&iam.AttachUserPolicyInput{ + _, err = iamClient.AttachUserPolicyWithContext(ctx, &iam.AttachUserPolicyInput{ UserName: aws.String(username), PolicyArn: aws.String(arn), }) @@ -336,7 +430,7 @@ func (b *backend) secretAccessKeysCreate( } if role.PolicyDocument != "" { // Add new inline user policy against user - _, err = iamClient.PutUserPolicy(&iam.PutUserPolicyInput{ + _, err = iamClient.PutUserPolicyWithContext(ctx, &iam.PutUserPolicyInput{ UserName: aws.String(username), PolicyName: aws.String(policyName), PolicyDocument: aws.String(role.PolicyDocument), @@ -348,7 +442,7 @@ func (b *backend) secretAccessKeysCreate( for _, group := range role.IAMGroups { // Add user to IAM groups - _, err = iamClient.AddUserToGroup(&iam.AddUserToGroupInput{ + _, err = iamClient.AddUserToGroupWithContext(ctx, &iam.AddUserToGroupInput{ UserName: aws.String(username), GroupName: aws.String(group), }) @@ -367,18 +461,17 @@ func (b *backend) secretAccessKeysCreate( } if len(tags) > 0 { - _, err = iamClient.TagUser(&iam.TagUserInput{ + _, err = iamClient.TagUserWithContext(ctx, &iam.TagUserInput{ Tags: tags, UserName: &username, }) - if err != nil { return logical.ErrorResponse("Error adding tags to user: %s", err), awsutil.CheckAWSError(err) } } // Create the keys - keyResp, err := iamClient.CreateAccessKey(&iam.CreateAccessKeyInput{ + keyResp, err := iamClient.CreateAccessKeyWithContext(ctx, &iam.CreateAccessKeyInput{ UserName: aws.String(username), }) if err != nil { @@ -394,9 +487,9 @@ func (b *backend) secretAccessKeysCreate( // Return the info! resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{ - "access_key": *keyResp.AccessKey.AccessKeyId, - "secret_key": *keyResp.AccessKey.SecretAccessKey, - "security_token": nil, + "access_key": *keyResp.AccessKey.AccessKeyId, + "secret_key": *keyResp.AccessKey.SecretAccessKey, + "session_token": nil, }, map[string]interface{}{ "username": username, "policy": role, diff --git a/builtin/logical/aws/secret_access_keys_test.go b/builtin/logical/aws/secret_access_keys_test.go index 9c56e673fdcc..8c6804d94641 100644 --- a/builtin/logical/aws/secret_access_keys_test.go +++ b/builtin/logical/aws/secret_access_keys_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package aws @@ -120,7 +120,7 @@ func TestGenUsername(t *testing.T) { func TestReadConfig_DefaultTemplate(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend() + b := Backend(config) if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } @@ -164,7 +164,7 @@ func TestReadConfig_DefaultTemplate(t *testing.T) { func TestReadConfig_CustomTemplate(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend() + b := Backend(config) if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } diff --git a/builtin/logical/aws/stepwise_test.go b/builtin/logical/aws/stepwise_test.go index b6f1ffea81d5..dff852859f90 100644 --- a/builtin/logical/aws/stepwise_test.go +++ b/builtin/logical/aws/stepwise_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package aws @@ -70,7 +70,7 @@ func testAccStepwiseRead(t *testing.T, path, name string, credentialTests []cred var d struct { AccessKey string `mapstructure:"access_key"` SecretKey string `mapstructure:"secret_key"` - STSToken string `mapstructure:"security_token"` + STSToken string `mapstructure:"session_token"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { return err diff --git a/builtin/logical/consul/backend.go b/builtin/logical/consul/backend.go index ea2cd369e1ad..52aeb3cbceec 100644 --- a/builtin/logical/consul/backend.go +++ b/builtin/logical/consul/backend.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package consul @@ -10,6 +10,8 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) +const operationPrefixConsul = "consul" + // ReportedVersion is used to report a specific version to Vault. var ReportedVersion = "" diff --git a/builtin/logical/consul/backend_test.go b/builtin/logical/consul/backend_test.go index 94ce864d965f..aa377f26e084 100644 --- a/builtin/logical/consul/backend_test.go +++ b/builtin/logical/consul/backend_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package consul @@ -849,6 +849,22 @@ func TestBackend_Roles(t *testing.T) { } } +func TestBackend_Enterprise_Diff_Namespace_Revocation(t *testing.T) { + if _, hasLicense := os.LookupEnv("CONSUL_LICENSE"); !hasLicense { + t.Skip("Skipping: No enterprise license found") + } + + testBackendEntDiffNamespaceRevocation(t) +} + +func TestBackend_Enterprise_Diff_Partition_Revocation(t *testing.T) { + if _, hasLicense := os.LookupEnv("CONSUL_LICENSE"); !hasLicense { + t.Skip("Skipping: No enterprise license found") + } + + testBackendEntDiffPartitionRevocation(t) +} + func TestBackend_Enterprise_Namespace(t *testing.T) { if _, hasLicense := os.LookupEnv("CONSUL_LICENSE"); !hasLicense { t.Skip("Skipping: No enterprise license found") @@ -865,6 +881,268 @@ func TestBackend_Enterprise_Partition(t *testing.T) { testBackendEntPartition(t) } +func testBackendEntDiffNamespaceRevocation(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, consulConfig := consul.PrepareTestContainer(t, "", true, true) + defer cleanup() + + // Perform additional Consul configuration + consulapiConfig := consulapi.DefaultNonPooledConfig() + consulapiConfig.Address = consulConfig.Address() + consulapiConfig.Token = consulConfig.Token + client, err := consulapi.NewClient(consulapiConfig) + if err != nil { + t.Fatal(err) + } + + // Create Policy in default namespace to manage ACLs in a different + // namespace + nsPol := &consulapi.ACLPolicy{ + Name: "diff-ns-test", + Description: "policy to test management of ACLs in one ns from another", + Rules: `namespace "ns1" { + acl="write" + } + `, + } + pol, _, err := client.ACL().PolicyCreate(nsPol, nil) + if err != nil { + t.Fatal(err) + } + + // Create new Token in default namespace with new ACL + cToken, _, err := client.ACL().TokenCreate( + &consulapi.ACLToken{ + Policies: []*consulapi.ACLLink{{ID: pol.ID}}, + }, nil) + if err != nil { + t.Fatal(err) + } + + // Write backend config + connData := map[string]interface{}{ + "address": consulConfig.Address(), + "token": cToken.SecretID, + } + + req := &logical.Request{ + Storage: config.StorageView, + Operation: logical.UpdateOperation, + Path: "config/access", + Data: connData, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Create the role in namespace "ns1" + req.Path = "roles/test-ns" + req.Data = map[string]interface{}{ + "consul_policies": []string{"ns-test"}, + "lease": "6h", + "consul_namespace": "ns1", + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Get Token + req.Operation = logical.ReadOperation + req.Path = "creds/test-ns" + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp nil") + } + if resp.IsError() { + t.Fatalf("resp is error: %v", resp.Error()) + } + + generatedSecret := resp.Secret + generatedSecret.TTL = 6 * time.Hour + + // Verify Secret + var d struct { + Token string `mapstructure:"token"` + Accessor string `mapstructure:"accessor"` + ConsulNamespace string `mapstructure:"consul_namespace"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + t.Fatal(err) + } + + if d.ConsulNamespace != "ns1" { + t.Fatalf("Failed to access namespace") + } + + // Revoke the credential + req.Operation = logical.RevokeOperation + req.Secret = generatedSecret + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("Revocation failed: %v", err) + } + + // Build a management client and verify that the token does not exist anymore + consulmgmtConfig := consulapi.DefaultNonPooledConfig() + consulmgmtConfig.Address = connData["address"].(string) + consulmgmtConfig.Token = connData["token"].(string) + mgmtclient, err := consulapi.NewClient(consulmgmtConfig) + if err != nil { + t.Fatal(err) + } + q := &consulapi.QueryOptions{ + Datacenter: "DC1", + Namespace: "ns1", + } + + _, _, err = mgmtclient.ACL().TokenRead(d.Accessor, q) + if err == nil { + t.Fatal("err: expected error") + } +} + +func testBackendEntDiffPartitionRevocation(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, consulConfig := consul.PrepareTestContainer(t, "", true, true) + defer cleanup() + + // Perform additional Consul configuration + consulapiConfig := consulapi.DefaultNonPooledConfig() + consulapiConfig.Address = consulConfig.Address() + consulapiConfig.Token = consulConfig.Token + client, err := consulapi.NewClient(consulapiConfig) + if err != nil { + t.Fatal(err) + } + + // Create Policy in default partition to manage ACLs in a different + // partition + partPol := &consulapi.ACLPolicy{ + Name: "diff-part-test", + Description: "policy to test management of ACLs in one part from another", + Rules: `partition "part1" { + acl="write" + } + `, + } + pol, _, err := client.ACL().PolicyCreate(partPol, nil) + if err != nil { + t.Fatal(err) + } + + // Create new Token in default partition with new ACL + cToken, _, err := client.ACL().TokenCreate( + &consulapi.ACLToken{ + Policies: []*consulapi.ACLLink{{ID: pol.ID}}, + }, nil) + if err != nil { + t.Fatal(err) + } + + // Write backend config + connData := map[string]interface{}{ + "address": consulConfig.Address(), + "token": cToken.SecretID, + } + + req := &logical.Request{ + Storage: config.StorageView, + Operation: logical.UpdateOperation, + Path: "config/access", + Data: connData, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Create the role in partition "part1" + req.Path = "roles/test-part" + req.Data = map[string]interface{}{ + "consul_policies": []string{"part-test"}, + "lease": "6h", + "partition": "part1", + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Get Token + req.Operation = logical.ReadOperation + req.Path = "creds/test-part" + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp nil") + } + if resp.IsError() { + t.Fatalf("resp is error: %v", resp.Error()) + } + + generatedSecret := resp.Secret + generatedSecret.TTL = 6 * time.Hour + + // Verify Secret + var d struct { + Token string `mapstructure:"token"` + Accessor string `mapstructure:"accessor"` + Partition string `mapstructure:"partition"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + t.Fatal(err) + } + + if d.Partition != "part1" { + t.Fatalf("Failed to access partition") + } + + // Revoke the credential + req.Operation = logical.RevokeOperation + req.Secret = generatedSecret + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("Revocation failed: %v", err) + } + + // Build a management client and verify that the token does not exist anymore + consulmgmtConfig := consulapi.DefaultNonPooledConfig() + consulmgmtConfig.Address = connData["address"].(string) + consulmgmtConfig.Token = connData["token"].(string) + mgmtclient, err := consulapi.NewClient(consulmgmtConfig) + if err != nil { + t.Fatal(err) + } + q := &consulapi.QueryOptions{ + Datacenter: "DC1", + Partition: "part1", + } + + _, _, err = mgmtclient.ACL().TokenRead(d.Accessor, q) + if err == nil { + t.Fatal("err: expected error") + } +} + func testBackendEntNamespace(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} diff --git a/builtin/logical/consul/client.go b/builtin/logical/consul/client.go index 1e30c660271c..8a98200af17c 100644 --- a/builtin/logical/consul/client.go +++ b/builtin/logical/consul/client.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package consul diff --git a/builtin/logical/consul/cmd/consul/main.go b/builtin/logical/consul/cmd/consul/main.go index f42a535b9587..6f0dfe45c3a5 100644 --- a/builtin/logical/consul/cmd/consul/main.go +++ b/builtin/logical/consul/cmd/consul/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/builtin/logical/consul/path_config.go b/builtin/logical/consul/path_config.go index ac76ce1a8ff3..11da1f222c66 100644 --- a/builtin/logical/consul/path_config.go +++ b/builtin/logical/consul/path_config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package consul @@ -15,6 +15,11 @@ import ( func pathConfigAccess(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/access", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixConsul, + }, + Fields: map[string]*framework.FieldSchema{ "address": { Type: framework.TypeString, @@ -55,9 +60,20 @@ must be x509 PEM encoded and if this is set you need to also set client_cert.`, }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathConfigAccessRead, - logical.UpdateOperation: b.pathConfigAccessWrite, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigAccessRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "access-configuration", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigAccessWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "access", + }, + }, }, } } @@ -119,7 +135,7 @@ func (b *backend) pathConfigAccessWrite(ctx context.Context, req *logical.Reques } token, _, err := client.ACL().Bootstrap() if err != nil { - return logical.ErrorResponse("Token not provided and failed to bootstrap ACLs"), err + return logical.ErrorResponse("Token not provided and failed to bootstrap ACLs: %s", err), nil } config.Token = token.SecretID } diff --git a/builtin/logical/consul/path_roles.go b/builtin/logical/consul/path_roles.go index dbb279da768d..1341544ea34f 100644 --- a/builtin/logical/consul/path_roles.go +++ b/builtin/logical/consul/path_roles.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package consul @@ -17,6 +17,11 @@ func pathListRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixConsul, + OperationSuffix: "roles", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, }, @@ -26,6 +31,12 @@ func pathListRoles(b *backend) *framework.Path { func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixConsul, + OperationSuffix: "role", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, diff --git a/builtin/logical/consul/path_token.go b/builtin/logical/consul/path_token.go index bba0c70dc7c0..6cddd1fddc7c 100644 --- a/builtin/logical/consul/path_token.go +++ b/builtin/logical/consul/path_token.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package consul @@ -21,6 +21,13 @@ const ( func pathToken(b *backend) *framework.Path { return &framework.Path{ Pattern: "creds/" + framework.GenericNameRegex("role"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixConsul, + OperationVerb: "generate", + OperationSuffix: "credentials", + }, + Fields: map[string]*framework.FieldSchema{ "role": { Type: framework.TypeString, diff --git a/builtin/logical/consul/path_token_test.go b/builtin/logical/consul/path_token_test.go index 77e7f29ab128..7f5ac3d2b687 100644 --- a/builtin/logical/consul/path_token_test.go +++ b/builtin/logical/consul/path_token_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package consul diff --git a/builtin/logical/consul/secret_token.go b/builtin/logical/consul/secret_token.go index f2f206b7026a..f2219f0790b6 100644 --- a/builtin/logical/consul/secret_token.go +++ b/builtin/logical/consul/secret_token.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package consul @@ -7,6 +7,7 @@ import ( "context" "fmt" + "github.com/hashicorp/consul/api" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) @@ -84,6 +85,24 @@ func (b *backend) secretTokenRevoke(ctx context.Context, req *logical.Request, d version = versionRaw.(string) } + // Extract Consul Namespace and Partition info from secret + var revokeWriteOptions *api.WriteOptions + var namespace, partition string + + namespaceRaw, ok := req.Data["consul_namespace"] + if ok { + namespace = namespaceRaw.(string) + } + partitionRaw, ok := req.Data["partition"] + if ok { + partition = partitionRaw.(string) + } + + revokeWriteOptions = &api.WriteOptions{ + Namespace: namespace, + Partition: partition, + } + switch version { case "": // Pre 1.4 tokens @@ -92,7 +111,7 @@ func (b *backend) secretTokenRevoke(ctx context.Context, req *logical.Request, d return nil, err } case tokenPolicyType: - _, err := c.ACL().TokenDelete(tokenRaw.(string), nil) + _, err := c.ACL().TokenDelete(tokenRaw.(string), revokeWriteOptions) if err != nil { return nil, err } diff --git a/builtin/logical/database/backend.go b/builtin/logical/database/backend.go index f75f9ac1d523..5e6bfada625e 100644 --- a/builtin/logical/database/backend.go +++ b/builtin/logical/database/backend.go @@ -1,12 +1,14 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package database import ( "context" + "errors" "fmt" "net/rpc" + "strconv" "strings" "sync" "time" @@ -15,7 +17,9 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/builtin/logical/database/schedule" "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/helper/syncmap" "github.com/hashicorp/vault/internalshared/configutil" v4 "github.com/hashicorp/vault/sdk/database/dbplugin" v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" @@ -27,19 +31,25 @@ import ( ) const ( - databaseConfigPath = "config/" - databaseRolePath = "role/" - databaseStaticRolePath = "static-role/" - minRootCredRollbackAge = 1 * time.Minute + operationPrefixDatabase = "database" + databaseConfigPath = "config/" + databaseRolePath = "role/" + databaseStaticRolePath = "static-role/" + minRootCredRollbackAge = 1 * time.Minute ) type dbPluginInstance struct { sync.RWMutex database databaseVersionWrapper - id string - name string - closed bool + id string + name string + runningPluginVersion string + closed bool +} + +func (dbi *dbPluginInstance) ID() string { + return dbi.id } func (dbi *dbPluginInstance) Close() error { @@ -62,7 +72,7 @@ func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, b.credRotationQueue = queue.New() // Load queue and kickoff new periodic ticker - go b.initQueue(b.queueCtx, conf, conf.System.ReplicationState()) + go b.initQueue(b.queueCtx, conf) // collect metrics on number of plugin instances var err error @@ -100,6 +110,7 @@ func Backend(conf *logical.BackendConfig) *databaseBackend { pathListPluginConnection(&b), pathConfigurePluginConnection(&b), pathResetConnection(&b), + pathReloadPlugin(&b), }, pathListRoles(&b), pathRoles(&b), @@ -118,25 +129,19 @@ func Backend(conf *logical.BackendConfig) *databaseBackend { } b.logger = conf.Logger - b.connections = make(map[string]*dbPluginInstance) + b.connections = syncmap.NewSyncMap[string, *dbPluginInstance]() b.queueCtx, b.cancelQueueCtx = context.WithCancel(context.Background()) b.roleLocks = locksutil.CreateLocks() + b.schedule = &schedule.DefaultSchedule{} + return &b } func (b *databaseBackend) collectPluginInstanceGaugeValues(context.Context) ([]metricsutil.GaugeLabelValues, error) { // copy the map so we can release the lock - connMapCopy := func() map[string]*dbPluginInstance { - b.connLock.RLock() - defer b.connLock.RUnlock() - mapCopy := map[string]*dbPluginInstance{} - for k, v := range b.connections { - mapCopy[k] = v - } - return mapCopy - }() + connectionsCopy := b.connections.Values() counts := map[string]int{} - for _, v := range connMapCopy { + for _, v := range connectionsCopy { dbType, err := v.database.Type() if err != nil { // there's a chance this will already be closed since we don't hold the lock @@ -155,11 +160,10 @@ func (b *databaseBackend) collectPluginInstanceGaugeValues(context.Context) ([]m } type databaseBackend struct { - // connLock is used to synchronize access to the connections map - connLock sync.RWMutex // connections holds configured database connections by config name - connections map[string]*dbPluginInstance - logger log.Logger + createConnectionLock sync.Mutex + connections *syncmap.SyncMap[string, *dbPluginInstance] + logger log.Logger *framework.Backend // credRotationQueue is an in-memory priority queue used to track Static Roles @@ -180,49 +184,8 @@ type databaseBackend struct { // the running gauge collection process gaugeCollectionProcess *metricsutil.GaugeCollectionProcess gaugeCollectionProcessStop sync.Once -} - -func (b *databaseBackend) connGet(name string) *dbPluginInstance { - b.connLock.RLock() - defer b.connLock.RUnlock() - return b.connections[name] -} - -func (b *databaseBackend) connPop(name string) *dbPluginInstance { - b.connLock.Lock() - defer b.connLock.Unlock() - dbi, ok := b.connections[name] - if ok { - delete(b.connections, name) - } - return dbi -} -func (b *databaseBackend) connPopIfEqual(name, id string) *dbPluginInstance { - b.connLock.Lock() - defer b.connLock.Unlock() - dbi, ok := b.connections[name] - if ok && dbi.id == id { - delete(b.connections, name) - return dbi - } - return nil -} - -func (b *databaseBackend) connPut(name string, newDbi *dbPluginInstance) *dbPluginInstance { - b.connLock.Lock() - defer b.connLock.Unlock() - dbi := b.connections[name] - b.connections[name] = newDbi - return dbi -} - -func (b *databaseBackend) connClear() map[string]*dbPluginInstance { - b.connLock.Lock() - defer b.connLock.Unlock() - old := b.connections - b.connections = make(map[string]*dbPluginInstance) - return old + schedule schedule.Scheduler } func (b *databaseBackend) DatabaseConfig(ctx context.Context, s logical.Storage, name string) (*DatabaseConfig, error) { @@ -328,8 +291,32 @@ func (b *databaseBackend) GetConnection(ctx context.Context, s logical.Storage, return b.GetConnectionWithConfig(ctx, name, config) } +func (b *databaseBackend) GetConnectionSkipVerify(ctx context.Context, s logical.Storage, name string) (*dbPluginInstance, error) { + config, err := b.DatabaseConfig(ctx, s, name) + if err != nil { + return nil, err + } + + // Force the skip verifying the connection + config.VerifyConnection = false + + return b.GetConnectionWithConfig(ctx, name, config) +} + func (b *databaseBackend) GetConnectionWithConfig(ctx context.Context, name string, config *DatabaseConfig) (*dbPluginInstance, error) { - dbi := b.connGet(name) + // fast path, reuse the existing connection + dbi := b.connections.Get(name) + if dbi != nil { + return dbi, nil + } + + // slow path, create a new connection + // if we don't lock the rest of the operation, there is a race condition for multiple callers of this function + b.createConnectionLock.Lock() + defer b.createConnectionLock.Unlock() + + // check again in case we lost the race + dbi = b.connections.Get(name) if dbi != nil { return dbi, nil } @@ -339,14 +326,24 @@ func (b *databaseBackend) GetConnectionWithConfig(ctx context.Context, name stri return nil, err } - dbw, err := newDatabaseWrapper(ctx, config.PluginName, config.PluginVersion, b.System(), b.logger) + // Override the configured version if there is a pinned version. + pinnedVersion, err := b.getPinnedVersion(ctx, config.PluginName) + if err != nil { + return nil, err + } + pluginVersion := config.PluginVersion + if pinnedVersion != "" { + pluginVersion = pinnedVersion + } + + dbw, err := newDatabaseWrapper(ctx, config.PluginName, pluginVersion, b.System(), b.logger) if err != nil { return nil, fmt.Errorf("unable to create database instance: %w", err) } initReq := v5.InitializeRequest{ Config: config.ConnectionDetails, - VerifyConnection: true, + VerifyConnection: config.VerifyConnection, } _, err = dbw.Initialize(ctx, initReq) if err != nil { @@ -355,24 +352,28 @@ func (b *databaseBackend) GetConnectionWithConfig(ctx context.Context, name stri } dbi = &dbPluginInstance{ - database: dbw, - id: id, - name: name, + database: dbw, + id: id, + name: name, + runningPluginVersion: pluginVersion, } - oldConn := b.connPut(name, dbi) - if oldConn != nil { - err := oldConn.Close() + conn, ok := b.connections.PutIfEmpty(name, dbi) + if !ok { + // this is a bug + b.Logger().Warn("BUG: there was a race condition adding to the database connection map") + // There was already an existing connection, so we will use that and close our new one to avoid a race condition. + err := dbi.Close() if err != nil { - b.Logger().Warn("Error closing database connection", "error", err) + b.Logger().Warn("Error closing new database connection", "error", err) } } - return dbi, nil + return conn, nil } // ClearConnection closes the database connection and // removes it from the b.connections map. func (b *databaseBackend) ClearConnection(name string) error { - db := b.connPop(name) + db := b.connections.Pop(name) if db != nil { // Ignore error here since the database client is always killed db.Close() @@ -383,7 +384,7 @@ func (b *databaseBackend) ClearConnection(name string) error { // ClearConnectionId closes the database connection with a specific id and // removes it from the b.connections map. func (b *databaseBackend) ClearConnectionId(name, id string) error { - db := b.connPopIfEqual(name, id) + db := b.connections.PopIfEqual(name, id) if db != nil { // Ignore error here since the database client is always killed db.Close() @@ -402,7 +403,7 @@ func (b *databaseBackend) CloseIfShutdown(db *dbPluginInstance, err error) { db.Close() // Delete the connection if it is still active. - b.connPopIfEqual(db.name, db.id) + b.connections.PopIfEqual(db.name, db.id) }() } } @@ -415,7 +416,7 @@ func (b *databaseBackend) clean(_ context.Context) { b.cancelQueueCtx() } - connections := b.connClear() + connections := b.connections.Clear() for _, db := range connections { go db.Close() } @@ -427,6 +428,28 @@ func (b *databaseBackend) clean(_ context.Context) { }) } +func (b *databaseBackend) dbEvent(ctx context.Context, + operation string, + path string, + name string, + modified bool, + additionalMetadataPairs ...string, +) { + metadata := []string{ + logical.EventMetadataModified, strconv.FormatBool(modified), + logical.EventMetadataOperation, operation, + "path", path, + } + if name != "" { + metadata = append(metadata, "name", name) + } + metadata = append(metadata, additionalMetadataPairs...) + err := logical.SendEvent(ctx, b, fmt.Sprintf("database/%s", operation), metadata...) + if err != nil && !errors.Is(err, framework.ErrNoEvents) { + b.Logger().Error("Error sending event", "error", err) + } +} + const backendHelp = ` The database backend supports using many different databases as secret backends, including but not limited to: diff --git a/builtin/logical/database/backend_get_test.go b/builtin/logical/database/backend_get_test.go new file mode 100644 index 000000000000..b7799725eba8 --- /dev/null +++ b/builtin/logical/database/backend_get_test.go @@ -0,0 +1,109 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package database + +import ( + "context" + "sync" + "testing" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" +) + +func newSystemViewWrapper(view logical.SystemView) logical.SystemView { + return &systemViewWrapper{ + view, + } +} + +type systemViewWrapper struct { + logical.SystemView +} + +var _ logical.ExtendedSystemView = (*systemViewWrapper)(nil) + +func (s *systemViewWrapper) RequestWellKnownRedirect(ctx context.Context, src, dest string) error { + panic("nope") +} + +func (s *systemViewWrapper) DeregisterWellKnownRedirect(ctx context.Context, src string) bool { + panic("nope") +} + +func (s *systemViewWrapper) Auditor() logical.Auditor { + panic("nope") +} + +func (s *systemViewWrapper) ForwardGenericRequest(ctx context.Context, request *logical.Request) (*logical.Response, error) { + panic("nope") +} + +func (s *systemViewWrapper) APILockShouldBlockRequest() (bool, error) { + panic("nope") +} + +func (s *systemViewWrapper) GetPinnedPluginVersion(ctx context.Context, pluginType consts.PluginType, pluginName string) (*pluginutil.PinnedVersion, error) { + return nil, pluginutil.ErrPinnedVersionNotFound +} + +func (s *systemViewWrapper) LookupPluginVersion(ctx context.Context, pluginName string, pluginType consts.PluginType, version string) (*pluginutil.PluginRunner, error) { + return &pluginutil.PluginRunner{ + Name: mockv5, + Type: consts.PluginTypeDatabase, + Builtin: true, + BuiltinFactory: New, + }, nil +} + +func getDbBackend(t *testing.T) (*databaseBackend, logical.Storage) { + t.Helper() + config := logical.TestBackendConfig() + config.System = newSystemViewWrapper(config.System) + config.StorageView = &logical.InmemStorage{} + // Create and init the backend ourselves instead of using a Factory because + // the factory function kicks off threads that cause racy tests. + b := Backend(config) + if err := b.Setup(context.Background(), config); err != nil { + t.Fatal(err) + } + b.schedule = &TestSchedule{} + b.credRotationQueue = queue.New() + b.populateQueue(context.Background(), config.StorageView) + + return b, config.StorageView +} + +// TestGetConnectionRaceCondition checks that GetConnection always returns the same instance, even when asked +// by multiple goroutines in parallel. +func TestGetConnectionRaceCondition(t *testing.T) { + ctx := context.Background() + b, s := getDbBackend(t) + defer b.Cleanup(ctx) + configureDBMount(t, s) + + goroutines := 16 + + wg := sync.WaitGroup{} + wg.Add(goroutines) + dbis := make([]*dbPluginInstance, goroutines) + errs := make([]error, goroutines) + for i := 0; i < goroutines; i++ { + go func(i int) { + defer wg.Done() + dbis[i], errs[i] = b.GetConnection(ctx, s, mockv5) + }(i) + } + wg.Wait() + for i := 0; i < goroutines; i++ { + if errs[i] != nil { + t.Fatal(errs[i]) + } + if dbis[0] != dbis[i] { + t.Fatal("Error: database instances did not match") + } + } +} diff --git a/builtin/logical/database/backend_test.go b/builtin/logical/database/backend_test.go index 574bcd01af62..1573d2146ac9 100644 --- a/builtin/logical/database/backend_test.go +++ b/builtin/logical/database/backend_test.go @@ -1,28 +1,32 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package database import ( "context" "database/sql" + "encoding/json" + "errors" "fmt" "log" "net/url" "os" "reflect" + "slices" "strings" + "sync" "testing" "time" "github.com/go-test/deep" "github.com/hashicorp/go-hclog" - mongodbatlas "github.com/hashicorp/vault-plugin-database-mongodbatlas" "github.com/hashicorp/vault/helper/builtinplugins" "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/testhelpers/certhelpers" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" postgreshelper "github.com/hashicorp/vault/helper/testhelpers/postgresql" vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/plugins/database/mongodb" "github.com/hashicorp/vault/plugins/database/postgresql" v4 "github.com/hashicorp/vault/sdk/database/dbplugin" v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" @@ -34,14 +38,32 @@ import ( "github.com/hashicorp/vault/vault" _ "github.com/jackc/pgx/v4" "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" ) -func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) { +func getClusterPostgresDBWithFactory(t *testing.T, factory logical.Factory) (*vault.TestCluster, logical.SystemView) { + t.Helper() + cluster, sys := getClusterWithFactory(t, factory) + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "postgresql-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_PostgresMultiplexed", + []string{fmt.Sprintf("%s=%s", pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)}) + return cluster, sys +} + +func getClusterPostgresDB(t *testing.T) (*vault.TestCluster, logical.SystemView) { + t.Helper() + cluster, sys := getClusterPostgresDBWithFactory(t, Factory) + return cluster, sys +} + +func getClusterWithFactory(t *testing.T, factory logical.Factory) (*vault.TestCluster, logical.SystemView) { + t.Helper() + pluginDir := corehelpers.MakeTestPluginDir(t) coreConfig := &vault.CoreConfig{ LogicalBackends: map[string]logical.Factory{ - "database": Factory, + "database": factory, }, BuiltinRegistry: builtinplugins.Registry, + PluginDirectory: pluginDir, } cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ @@ -51,30 +73,15 @@ func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) { cores := cluster.Cores vault.TestWaitActive(t, cores[0].Core) - os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile) - sys := vault.TestDynamicSystemView(cores[0].Core, nil) - vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_Postgres", []string{}, "") - vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_PostgresMultiplexed", []string{}, "") - vault.TestAddTestPlugin(t, cores[0].Core, "mongodb-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_Mongo", []string{}, "") - vault.TestAddTestPlugin(t, cores[0].Core, "mongodb-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MongoMultiplexed", []string{}, "") - vault.TestAddTestPlugin(t, cores[0].Core, "mongodbatlas-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MongoAtlas", []string{}, "") - vault.TestAddTestPlugin(t, cores[0].Core, "mongodbatlas-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MongoAtlasMultiplexed", []string{}, "") return cluster, sys } -func TestBackend_PluginMain_Postgres(t *testing.T) { - if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" { - return - } - - dbType, err := postgresql.New() - if err != nil { - t.Fatalf("Failed to initialize postgres: %s", err) - } - - v5.Serve(dbType.(v5.Database)) +func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) { + t.Helper() + cluster, sys := getClusterWithFactory(t, Factory) + return cluster, sys } func TestBackend_PluginMain_PostgresMultiplexed(t *testing.T) { @@ -85,48 +92,6 @@ func TestBackend_PluginMain_PostgresMultiplexed(t *testing.T) { v5.ServeMultiplex(postgresql.New) } -func TestBackend_PluginMain_Mongo(t *testing.T) { - if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" { - return - } - - dbType, err := mongodb.New() - if err != nil { - t.Fatalf("Failed to initialize mongodb: %s", err) - } - - v5.Serve(dbType.(v5.Database)) -} - -func TestBackend_PluginMain_MongoMultiplexed(t *testing.T) { - if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" { - return - } - - v5.ServeMultiplex(mongodb.New) -} - -func TestBackend_PluginMain_MongoAtlas(t *testing.T) { - if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" { - return - } - - dbType, err := mongodbatlas.New() - if err != nil { - t.Fatalf("Failed to initialize mongodbatlas: %s", err) - } - - v5.Serve(dbType.(v5.Database)) -} - -func TestBackend_PluginMain_MongoAtlasMultiplexed(t *testing.T) { - if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" { - return - } - - v5.ServeMultiplex(mongodbatlas.New) -} - func TestBackend_RoleUpgrade(t *testing.T) { storage := &logical.InmemStorage{} backend := &databaseBackend{} @@ -183,12 +148,14 @@ func TestBackend_config_connection(t *testing.T) { var resp *logical.Response var err error - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} config.System = sys + eventSender := logical.NewMockEventSender() + config.EventsSender = eventSender lb, err := Factory(context.Background(), config) if err != nil { t.Fatal(err) @@ -243,6 +210,7 @@ func TestBackend_config_connection(t *testing.T) { "root_credentials_rotate_statements": []string{}, "password_policy": "", "plugin_version": "", + "verify_connection": false, } configReq.Operation = logical.ReadOperation resp, err = b.HandleRequest(namespace.RootContext(nil), configReq) @@ -297,6 +265,7 @@ func TestBackend_config_connection(t *testing.T) { "root_credentials_rotate_statements": []string{}, "password_policy": "", "plugin_version": "", + "verify_connection": false, } configReq.Operation = logical.ReadOperation resp, err = b.HandleRequest(namespace.RootContext(nil), configReq) @@ -340,6 +309,7 @@ func TestBackend_config_connection(t *testing.T) { "root_credentials_rotate_statements": []string{}, "password_policy": "", "plugin_version": "", + "verify_connection": false, } configReq.Operation = logical.ReadOperation resp, err = b.HandleRequest(namespace.RootContext(nil), configReq) @@ -367,10 +337,22 @@ func TestBackend_config_connection(t *testing.T) { if key != "plugin-test" { t.Fatalf("bad key: %q", key) } + assert.Equal(t, 3, len(eventSender.Events)) + assert.Equal(t, "database/config-write", string(eventSender.Events[0].Type)) + assert.Equal(t, "config/plugin-test", eventSender.Events[0].Event.Metadata.AsMap()["path"]) + assert.Equal(t, "plugin-test", eventSender.Events[0].Event.Metadata.AsMap()["name"]) + assert.Equal(t, "database/config-write", string(eventSender.Events[1].Type)) + assert.Equal(t, "config/plugin-test", eventSender.Events[1].Event.Metadata.AsMap()["path"]) + assert.Equal(t, "plugin-test", eventSender.Events[1].Event.Metadata.AsMap()["name"]) + assert.Equal(t, "database/config-write", string(eventSender.Events[2].Type)) + assert.Equal(t, "config/plugin-test", eventSender.Events[2].Event.Metadata.AsMap()["path"]) + assert.Equal(t, "plugin-test", eventSender.Events[2].Event.Metadata.AsMap()["name"]) } +// TestBackend_BadConnectionString tests that an error response resulting from +// a failed connection does not expose the URL. The middleware should sanitize it. func TestBackend_BadConnectionString(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -383,7 +365,7 @@ func TestBackend_BadConnectionString(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, _ := postgreshelper.PrepareTestContainer(t, "13.4-buster") + cleanup, _ := postgreshelper.PrepareTestContainer(t) defer cleanup() respCheck := func(req *logical.Request) { @@ -419,12 +401,14 @@ func TestBackend_BadConnectionString(t *testing.T) { } func TestBackend_basic(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} config.System = sys + eventSender := logical.NewMockEventSender() + config.EventsSender = eventSender b, err := Factory(context.Background(), config) if err != nil { @@ -432,7 +416,7 @@ func TestBackend_basic(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // Configure a connection @@ -548,7 +532,7 @@ func TestBackend_basic(t *testing.T) { if credsResp.Secret.TTL != 5*time.Minute { t.Fatalf("unexpected TTL of %d", credsResp.Secret.TTL) } - if !testCredsExist(t, credsResp, connURL) { + if !testCredsExist(t, credsResp.Data, connURL) { t.Fatalf("Creds should exist") } @@ -568,7 +552,7 @@ func TestBackend_basic(t *testing.T) { t.Fatalf("err:%s resp:%#v\n", err, resp) } - if testCredsExist(t, credsResp, connURL) { + if testCredsExist(t, credsResp.Data, connURL) { t.Fatalf("Creds should not exist") } } @@ -586,7 +570,7 @@ func TestBackend_basic(t *testing.T) { if err != nil || (credsResp != nil && credsResp.IsError()) { t.Fatalf("err:%s resp:%#v\n", err, credsResp) } - if !testCredsExist(t, credsResp, connURL) { + if !testCredsExist(t, credsResp.Data, connURL) { t.Fatalf("Creds should exist") } @@ -619,91 +603,135 @@ func TestBackend_basic(t *testing.T) { t.Fatalf("err:%s resp:%#v\n", err, resp) } - if testCredsExist(t, credsResp, connURL) { + if testCredsExist(t, credsResp.Data, connURL) { t.Fatalf("Creds should not exist") } } + assert.Equal(t, 9, len(eventSender.Events)) + + assertEvent := func(t *testing.T, typ, name, path string) { + t.Helper() + assert.Equal(t, typ, string(eventSender.Events[0].Type)) + assert.Equal(t, name, eventSender.Events[0].Event.Metadata.AsMap()["name"]) + assert.Equal(t, path, eventSender.Events[0].Event.Metadata.AsMap()["path"]) + eventSender.Events = slices.Delete(eventSender.Events, 0, 1) + } + + assertEvent(t, "database/config-write", "plugin-test", "config/plugin-test") + for i := 0; i < 3; i++ { + assertEvent(t, "database/role-update", "plugin-role-test", "roles/plugin-role-test") + assertEvent(t, "database/creds-create", "plugin-role-test", "creds/plugin-role-test") + } + assertEvent(t, "database/creds-create", "plugin-role-test", "creds/plugin-role-test") + assertEvent(t, "database/role-delete", "plugin-role-test", "roles/plugin-role-test") } -func TestBackend_connectionCrud(t *testing.T) { - cluster, sys := getCluster(t) - defer cluster.Cleanup() +// singletonDBFactory allows us to reach into the internals of a databaseBackend +// even when it's been created by a call to the sys mount. The factory method +// satisfies the logical.Factory type, and lazily creates the databaseBackend +// once the SystemView has been provided because the factory method itself is an +// input for creating the test cluster and its system view. +type singletonDBFactory struct { + once sync.Once + db *databaseBackend + + sys logical.SystemView +} + +// factory satisfies the logical.Factory type. +func (s *singletonDBFactory) factory(context.Context, *logical.BackendConfig) (logical.Backend, error) { + if s.sys == nil { + return nil, errors.New("sys is nil") + } config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - config.System = sys + config.System = s.sys - b, err := Factory(context.Background(), config) + var err error + s.once.Do(func() { + var b logical.Backend + b, err = Factory(context.Background(), config) + s.db = b.(*databaseBackend) + }) if err != nil { - t.Fatal(err) + return nil, err } - defer b.Cleanup(context.Background()) + if s.db == nil { + return nil, errors.New("db is nil") + } + return s.db, nil +} + +func TestBackend_connectionCrud(t *testing.T) { + t.Parallel() + dbFactory := &singletonDBFactory{} + cluster, sys := getClusterPostgresDBWithFactory(t, dbFactory.factory) + defer cluster.Cleanup() + + dbFactory.sys = sys + client := cluster.Cores[0].Client.Logical() - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() + // Mount the database plugin. + resp, err := client.Write("sys/mounts/database", map[string]interface{}{ + "type": "database", + }) + if err != nil { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + // Configure a connection - data := map[string]interface{}{ + resp, err = client.Write("database/config/plugin-test", map[string]interface{}{ "connection_url": "test", "plugin_name": "postgresql-database-plugin", "verify_connection": false, + }) + if err != nil { + t.Fatalf("err:%s resp:%#v\n", err, resp) } - req := &logical.Request{ - Operation: logical.UpdateOperation, - Path: "config/plugin-test", - Storage: config.StorageView, - Data: data, - } - resp, err := b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (resp != nil && resp.IsError()) { + + // Configure a second connection to confirm below it doesn't get restarted. + resp, err = client.Write("database/config/plugin-test-hana", map[string]interface{}{ + "connection_url": "test", + "plugin_name": "hana-database-plugin", + "verify_connection": false, + }) + if err != nil { t.Fatalf("err:%s resp:%#v\n", err, resp) } // Create a role - data = map[string]interface{}{ + resp, err = client.Write("database/roles/plugin-role-test", map[string]interface{}{ "db_name": "plugin-test", "creation_statements": testRole, "revocation_statements": defaultRevocationSQL, "default_ttl": "5m", "max_ttl": "10m", - } - req = &logical.Request{ - Operation: logical.UpdateOperation, - Path: "roles/plugin-role-test", - Storage: config.StorageView, - Data: data, - } - resp, err = b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (resp != nil && resp.IsError()) { + }) + if err != nil { t.Fatalf("err:%s resp:%#v\n", err, resp) } // Update the connection - data = map[string]interface{}{ + resp, err = client.Write("database/config/plugin-test", map[string]interface{}{ "connection_url": connURL, "plugin_name": "postgresql-database-plugin", "allowed_roles": []string{"plugin-role-test"}, "username": "postgres", "password": "secret", - "private_key": "PRIVATE_KEY", - } - req = &logical.Request{ - Operation: logical.UpdateOperation, - Path: "config/plugin-test", - Storage: config.StorageView, - Data: data, - } - resp, err = b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (resp != nil && resp.IsError()) { + }) + if err != nil { t.Fatalf("err:%s resp:%#v\n", err, resp) } if len(resp.Warnings) == 0 { t.Fatalf("expected warning about password in url %s, resp:%#v\n", connURL, resp) } - req.Operation = logical.ReadOperation - resp, err = b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (resp != nil && resp.IsError()) { + resp, err = client.Read("database/config/plugin-test") + if err != nil { t.Fatalf("err:%s resp:%#v\n", err, resp) } returnedConnectionDetails := resp.Data["connection_details"].(map[string]interface{}) @@ -714,16 +742,17 @@ func TestBackend_connectionCrud(t *testing.T) { if _, exists := returnedConnectionDetails["password"]; exists { t.Fatal("password should NOT be found in the returned config") } - if _, exists := returnedConnectionDetails["private_key"]; exists { - t.Fatal("private_key should NOT be found in the returned config") - } // Replace connection url with templated version - req.Operation = logical.UpdateOperation - connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") - data["connection_url"] = connURL - resp, err = b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (resp != nil && resp.IsError()) { + templatedConnURL := strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") + resp, err = client.Write("database/config/plugin-test", map[string]interface{}{ + "connection_url": templatedConnURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": []string{"plugin-role-test"}, + "username": "postgres", + "password": "secret", + }) + if err != nil { t.Fatalf("err:%s resp:%#v\n", err, resp) } @@ -732,75 +761,93 @@ func TestBackend_connectionCrud(t *testing.T) { "plugin_name": "postgresql-database-plugin", "connection_details": map[string]interface{}{ "username": "postgres", - "connection_url": connURL, + "connection_url": templatedConnURL, }, - "allowed_roles": []string{"plugin-role-test"}, - "root_credentials_rotate_statements": []string(nil), + "allowed_roles": []any{"plugin-role-test"}, + "root_credentials_rotate_statements": []any{}, "password_policy": "", "plugin_version": "", + "verify_connection": false, } - req.Operation = logical.ReadOperation - resp, err = b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (resp != nil && resp.IsError()) { + resp, err = client.Read("database/config/plugin-test") + if err != nil { t.Fatalf("err:%s resp:%#v\n", err, resp) } delete(resp.Data["connection_details"].(map[string]interface{}), "name") if diff := deep.Equal(resp.Data, expected); diff != nil { - t.Fatal(diff) + t.Fatal(strings.Join(diff, "\n")) } - // Reset Connection - data = map[string]interface{}{} - req = &logical.Request{ - Operation: logical.UpdateOperation, - Path: "reset/plugin-test", - Storage: config.StorageView, - Data: data, - } - resp, err = b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%s resp:%#v\n", err, resp) + // Test endpoints for reloading plugins. + for _, reload := range []struct { + path string + data map[string]any + checkCount bool + }{ + {"database/reset/plugin-test", nil, false}, + {"database/reload/postgresql-database-plugin", nil, true}, + {"sys/plugins/reload/backend", map[string]any{ + "plugin": "postgresql-database-plugin", + }, false}, + } { + getConnectionID := func(name string) string { + t.Helper() + dbi := dbFactory.db.connections.Get(name) + if dbi == nil { + t.Fatal("no plugin-test dbi") + } + return dbi.ID() + } + initialID := getConnectionID("plugin-test") + hanaID := getConnectionID("plugin-test-hana") + resp, err = client.Write(reload.path, reload.data) + if err != nil { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + if initialID == getConnectionID("plugin-test") { + t.Fatal("ID unchanged after connection reset") + } + if hanaID != getConnectionID("plugin-test-hana") { + t.Fatal("hana plugin got restarted but shouldn't have been") + } + if reload.checkCount { + actual, err := resp.Data["count"].(json.Number).Int64() + if err != nil { + t.Fatal(err) + } + if expected := 1; expected != int(actual) { + t.Fatalf("expected %d but got %d", expected, resp.Data["count"].(int)) + } + if expected := []any{"plugin-test"}; !reflect.DeepEqual(expected, resp.Data["connections"]) { + t.Fatalf("expected %v but got %v", expected, resp.Data["connections"]) + } + } } // Get creds - data = map[string]interface{}{} - req = &logical.Request{ - Operation: logical.ReadOperation, - Path: "creds/plugin-role-test", - Storage: config.StorageView, - Data: data, - } - credsResp, err := b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (credsResp != nil && credsResp.IsError()) { + credsResp, err := client.Read("database/creds/plugin-role-test") + if err != nil { t.Fatalf("err:%s resp:%#v\n", err, credsResp) } - credCheckURL := dbutil.QueryHelper(connURL, map[string]string{ + credCheckURL := dbutil.QueryHelper(templatedConnURL, map[string]string{ "username": "postgres", "password": "secret", }) - if !testCredsExist(t, credsResp, credCheckURL) { + if !testCredsExist(t, credsResp.Data, credCheckURL) { t.Fatalf("Creds should exist") } // Delete Connection - data = map[string]interface{}{} - req = &logical.Request{ - Operation: logical.DeleteOperation, - Path: "config/plugin-test", - Storage: config.StorageView, - Data: data, - } - resp, err = b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (resp != nil && resp.IsError()) { + resp, err = client.Delete("database/config/plugin-test") + if err != nil { t.Fatalf("err:%s resp:%#v\n", err, resp) } // Read connection - req.Operation = logical.ReadOperation - resp, err = b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (resp != nil && resp.IsError()) { + resp, err = client.Read("database/config/plugin-test") + if err != nil { t.Fatalf("err:%s resp:%#v\n", err, resp) } @@ -810,8 +857,59 @@ func TestBackend_connectionCrud(t *testing.T) { } } +func TestBackend_connectionSanitizePrivateKey(t *testing.T) { + t.Parallel() + dbFactory := &singletonDBFactory{} + cluster, sys := getClusterPostgresDBWithFactory(t, dbFactory.factory) + defer cluster.Cleanup() + + dbFactory.sys = sys + client := cluster.Cores[0].Client.Logical() + + cleanup, connURL := postgreshelper.PrepareTestContainer(t) + defer cleanup() + + // Mount the database plugin. + resp, err := client.Write("sys/mounts/database", map[string]interface{}{ + "type": "database", + }) + if err != nil { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + caCert := certhelpers.NewCert(t, certhelpers.CommonName("ca"), certhelpers.IsCA(true), certhelpers.SelfSign()) + clientCert := certhelpers.NewCert(t, certhelpers.CommonName("postgres"), certhelpers.DNS("localhost"), certhelpers.Parent(caCert)) + + // Create a connection + resp, err = client.Write("database/config/plugin-test", map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": []string{"plugin-role-test"}, + "username": "postgres", + "tls_certificate": string(clientCert.CombinedPEM()), + "private_key": string(clientCert.PrivateKeyPEM()), + "tls_ca": string(caCert.CombinedPEM()), + "verify_connection": false, + }) + if err != nil { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + resp, err = client.Read("database/config/plugin-test") + if err != nil { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + returnedConnectionDetails := resp.Data["connection_details"].(map[string]interface{}) + if strings.Contains(returnedConnectionDetails["connection_url"].(string), "secret") { + t.Fatal("password should not be found in the connection url") + } + if _, exists := returnedConnectionDetails["private_key"]; exists { + t.Fatal("private_key should NOT be found in the returned config") + } +} + func TestBackend_roleCrud(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -828,7 +926,7 @@ func TestBackend_roleCrud(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // Configure a connection @@ -1064,7 +1162,7 @@ func TestBackend_roleCrud(t *testing.T) { } func TestBackend_allowedRoles(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -1077,7 +1175,7 @@ func TestBackend_allowedRoles(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // Configure a connection @@ -1174,7 +1272,7 @@ func TestBackend_allowedRoles(t *testing.T) { t.Fatalf("err:%s resp:%#v\n", err, credsResp) } - if !testCredsExist(t, credsResp, connURL) { + if !testCredsExist(t, credsResp.Data, connURL) { t.Fatalf("Creds should exist") } @@ -1208,7 +1306,7 @@ func TestBackend_allowedRoles(t *testing.T) { t.Fatalf("err:%s resp:%#v\n", err, credsResp) } - if !testCredsExist(t, credsResp, connURL) { + if !testCredsExist(t, credsResp.Data, connURL) { t.Fatalf("Creds should exist") } @@ -1255,13 +1353,13 @@ func TestBackend_allowedRoles(t *testing.T) { t.Fatalf("err:%s resp:%#v\n", err, credsResp) } - if !testCredsExist(t, credsResp, connURL) { + if !testCredsExist(t, credsResp.Data, connURL) { t.Fatalf("Creds should exist") } } func TestBackend_RotateRootCredentials(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -1274,7 +1372,7 @@ func TestBackend_RotateRootCredentials(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") @@ -1362,7 +1460,7 @@ func TestBackend_RotateRootCredentials(t *testing.T) { } func TestBackend_ConnectionURL_redacted(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) t.Cleanup(cluster.Cleanup) config := logical.TestBackendConfig() @@ -1407,7 +1505,7 @@ func TestBackend_ConnectionURL_redacted(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - cleanup, u := postgreshelper.PrepareTestContainerWithPassword(t, "13.4-buster", tt.password) + cleanup, u := postgreshelper.PrepareTestContainerWithPassword(t, tt.password) t.Cleanup(cleanup) p, err := url.Parse(u) @@ -1513,7 +1611,7 @@ func TestBackend_AsyncClose(t *testing.T) { // Test that having a plugin that takes a LONG time to close will not cause the cleanup function to take // longer than 750ms. cluster, sys := getCluster(t) - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "hanging-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_Hanging", []string{}, "") + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "hanging-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_Hanging", []string{}) t.Cleanup(cluster.Cleanup) config := logical.TestBackendConfig() @@ -1565,13 +1663,13 @@ func TestNewDatabaseWrapper_IgnoresBuiltinVersion(t *testing.T) { } } -func testCredsExist(t *testing.T, resp *logical.Response, connURL string) bool { +func testCredsExist(t *testing.T, data map[string]any, connURL string) bool { t.Helper() var d struct { Username string `mapstructure:"username"` Password string `mapstructure:"password"` } - if err := mapstructure.Decode(resp.Data, &d); err != nil { + if err := mapstructure.Decode(data, &d); err != nil { t.Fatal(err) } log.Printf("[TRACE] Generated credentials: %v", d) diff --git a/builtin/logical/database/credentials.go b/builtin/logical/database/credentials.go index ef33ab0fd057..790dde05a35b 100644 --- a/builtin/logical/database/credentials.go +++ b/builtin/logical/database/credentials.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package database @@ -8,12 +8,17 @@ import ( "crypto/rand" "crypto/rsa" "crypto/x509" + "crypto/x509/pkix" "encoding/pem" "fmt" "io" "strings" + "time" "github.com/hashicorp/vault/helper/random" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/template" "github.com/mitchellh/mapstructure" ) @@ -170,3 +175,217 @@ func (kg rsaKeyGenerator) configMap() (map[string]interface{}, error) { } return config, nil } + +type ClientCertificateGenerator struct { + // CommonNameTemplate is username template to be used for the client certificate common name. + CommonNameTemplate string `mapstructure:"common_name_template,omitempty"` + + // CAPrivateKey is the PEM-encoded private key for the given ca_cert. + CAPrivateKey string `mapstructure:"ca_private_key,omitempty"` + + // CACert is the PEM-encoded CA certificate. + CACert string `mapstructure:"ca_cert,omitempty"` + + // KeyType specifies the desired key type. + // Options include: 'rsa', 'ed25519', 'ec'. + KeyType string `mapstructure:"key_type,omitempty"` + + // KeyBits is the number of bits to use for the generated keys. + // Options include: with key_type=rsa, 2048 (default), 3072, 4096; + // With key_type=ec, allowed values are: 224, 256 (default), 384, 521; + // Ignored with key_type=ed25519. + KeyBits int `mapstructure:"key_bits,omitempty"` + + // SignatureBits is the number of bits to use in the signature algorithm. + // Options include: 256 (default), 384, 512. + SignatureBits int `mapstructure:"signature_bits,omitempty"` + + parsedCABundle *certutil.ParsedCertBundle + cnProducer template.StringTemplate +} + +// newClientCertificateGenerator returns a new ClientCertificateGenerator +// using the given config. Default values will be set on the returned +// ClientCertificateGenerator if not provided in the config. +func newClientCertificateGenerator(config map[string]interface{}) (ClientCertificateGenerator, error) { + var cg ClientCertificateGenerator + if err := mapstructure.WeakDecode(config, &cg); err != nil { + return cg, err + } + + switch cg.KeyType { + case "rsa": + switch cg.KeyBits { + case 0: + cg.KeyBits = 2048 + case 2048, 3072, 4096: + default: + return cg, fmt.Errorf("invalid key_bits") + } + case "ec": + switch cg.KeyBits { + case 0: + cg.KeyBits = 256 + case 224, 256, 384, 521: + default: + return cg, fmt.Errorf("invalid key_bits") + } + case "ed25519": + // key_bits ignored + default: + return cg, fmt.Errorf("invalid key_type") + } + + switch cg.SignatureBits { + case 0: + cg.SignatureBits = 256 + case 256, 384, 512: + default: + return cg, fmt.Errorf("invalid signature_bits") + } + + if cg.CommonNameTemplate == "" { + return cg, fmt.Errorf("missing required common_name_template") + } + + // Validate the common name template + t, err := template.NewTemplate(template.Template(cg.CommonNameTemplate)) + if err != nil { + return cg, fmt.Errorf("failed to create template: %w", err) + } + + _, err = t.Generate(dbplugin.UsernameMetadata{}) + if err != nil { + return cg, fmt.Errorf("invalid common_name_template: %w", err) + } + cg.cnProducer = t + + if cg.CACert == "" { + return cg, fmt.Errorf("missing required ca_cert") + } + if cg.CAPrivateKey == "" { + return cg, fmt.Errorf("missing required ca_private_key") + } + parsedBundle, err := certutil.ParsePEMBundle(strings.Join([]string{cg.CACert, cg.CAPrivateKey}, "\n")) + if err != nil { + return cg, err + } + if parsedBundle.PrivateKey == nil { + return cg, fmt.Errorf("private key not found in the PEM bundle") + } + if parsedBundle.PrivateKeyType == certutil.UnknownPrivateKey { + return cg, fmt.Errorf("unknown private key found in the PEM bundle") + } + if parsedBundle.Certificate == nil { + return cg, fmt.Errorf("certificate not found in the PEM bundle") + } + if !parsedBundle.Certificate.IsCA { + return cg, fmt.Errorf("the given certificate is not marked for CA use") + } + if !parsedBundle.Certificate.BasicConstraintsValid { + return cg, fmt.Errorf("the given certificate does not meet basic constraints for CA use") + } + + certBundle, err := parsedBundle.ToCertBundle() + if err != nil { + return cg, fmt.Errorf("error converting raw values into cert bundle: %w", err) + } + + parsedCABundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return cg, fmt.Errorf("failed to parse cert bundle: %w", err) + } + cg.parsedCABundle = parsedCABundle + + return cg, nil +} + +func (cg *ClientCertificateGenerator) generate(r io.Reader, expiration time.Time, userMeta dbplugin.UsernameMetadata) (*certutil.CertBundle, string, error) { + commonName, err := cg.cnProducer.Generate(userMeta) + if err != nil { + return nil, "", err + } + + // Set defaults + keyBits := cg.KeyBits + signatureBits := cg.SignatureBits + switch cg.KeyType { + case "rsa": + if keyBits == 0 { + keyBits = 2048 + } + if signatureBits == 0 { + signatureBits = 256 + } + case "ec": + if keyBits == 0 { + keyBits = 256 + } + if signatureBits == 0 { + if keyBits == 224 { + signatureBits = 256 + } else { + signatureBits = keyBits + } + } + case "ed25519": + // key_bits ignored + if signatureBits == 0 { + signatureBits = 256 + } + } + + subject := pkix.Name{ + CommonName: commonName, + // Additional subject DN options intentionally omitted for now + } + + creation := &certutil.CreationBundle{ + Params: &certutil.CreationParameters{ + Subject: subject, + KeyType: cg.KeyType, + KeyBits: cg.KeyBits, + SignatureBits: cg.SignatureBits, + NotAfter: expiration, + KeyUsage: x509.KeyUsageDigitalSignature, + ExtKeyUsage: certutil.ClientAuthExtKeyUsage, + BasicConstraintsValidForNonCA: false, + NotBeforeDuration: 30 * time.Second, + URLs: &certutil.URLEntries{ + IssuingCertificates: []string{}, + CRLDistributionPoints: []string{}, + OCSPServers: []string{}, + }, + }, + SigningBundle: &certutil.CAInfoBundle{ + ParsedCertBundle: *cg.parsedCABundle, + URLs: &certutil.URLEntries{ + IssuingCertificates: []string{}, + CRLDistributionPoints: []string{}, + OCSPServers: []string{}, + }, + }, + } + + parsedClientBundle, err := certutil.CreateCertificateWithRandomSource(creation, r) + if err != nil { + return nil, "", fmt.Errorf("unable to generate client certificate: %w", err) + } + + cb, err := parsedClientBundle.ToCertBundle() + if err != nil { + return nil, "", fmt.Errorf("error converting raw cert bundle to cert bundle: %w", err) + } + + return cb, subject.String(), nil +} + +// configMap returns the configuration of the ClientCertificateGenerator +// as a map from string to string. +func (cg ClientCertificateGenerator) configMap() (map[string]interface{}, error) { + config := make(map[string]interface{}) + if err := mapstructure.WeakDecode(cg, &config); err != nil { + return nil, err + } + return config, nil +} diff --git a/builtin/logical/database/credentials_test.go b/builtin/logical/database/credentials_test.go index 9054ecdc9b1d..7f2c4eb3dbb0 100644 --- a/builtin/logical/database/credentials_test.go +++ b/builtin/logical/database/credentials_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package database @@ -17,6 +17,252 @@ import ( "github.com/stretchr/testify/mock" ) +// Test_newClientCertificateGenerator tests the ClientCertificateGenerator struct based on the config +func Test_newClientCertificateGenerator(t *testing.T) { + type args struct { + config map[string]interface{} + } + tests := []struct { + name string + args args + want ClientCertificateGenerator + wantErr bool + }{ + { + name: "newClientCertificateGenerator with nil config", + args: args{ + config: nil, + }, + want: ClientCertificateGenerator{ + CommonNameTemplate: "", + CAPrivateKey: "", + CACert: "", + KeyType: "", + KeyBits: 0, + SignatureBits: 0, + }, + }, + { + name: "newClientCertificateGenerator with zero value key_type", + args: args{ + config: map[string]interface{}{ + "key_type": "", + }, + }, + want: ClientCertificateGenerator{ + KeyType: "", + }, + }, + { + name: "newClientCertificateGenerator with rsa value key_type", + args: args{ + config: map[string]interface{}{ + "key_type": "rsa", + }, + }, + want: ClientCertificateGenerator{ + KeyType: "rsa", + KeyBits: 2048, + SignatureBits: 256, + }, + }, + { + name: "newClientCertificateGenerator with ec value key_type", + args: args{ + config: map[string]interface{}{ + "key_type": "ec", + }, + }, + want: ClientCertificateGenerator{ + KeyType: "ec", + KeyBits: 256, + SignatureBits: 256, + }, + }, + { + name: "newClientCertificateGenerator with ed25519 value key_type", + args: args{ + config: map[string]interface{}{ + "key_type": "ed25519", + }, + }, + want: ClientCertificateGenerator{ + KeyType: "ed25519", + SignatureBits: 256, + }, + }, + { + name: "newClientCertificateGenerator with invalid key_type", + args: args{ + config: map[string]interface{}{ + "key_type": "ece", + }, + }, + wantErr: true, + }, + { + name: "newClientCertificateGenerator with zero value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "0", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 0, + }, + }, + { + name: "newClientCertificateGenerator with 2048 value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "2048", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 2048, + }, + }, + { + name: "newClientCertificateGenerator with 3072 value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "3072", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 3072, + }, + }, + { + name: "newClientCertificateGenerator with 4096 value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "4096", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 4096, + }, + }, + { + name: "newClientCertificateGenerator with 224 value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "224", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 224, + }, + }, + { + name: "newClientCertificateGenerator with 256 value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "256", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 256, + }, + }, + { + name: "newClientCertificateGenerator with 384 value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "384", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 384, + }, + }, + { + name: "newClientCertificateGenerator with 521 value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "521", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 521, + }, + }, + { + name: "newClientCertificateGenerator with invalid key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "4097", + }, + }, + wantErr: true, + }, + { + name: "newClientCertificateGenerator with zero value signature_bits", + args: args{ + config: map[string]interface{}{ + "signature_bits": "0", + }, + }, + want: ClientCertificateGenerator{ + SignatureBits: 0, + }, + }, + { + name: "newClientCertificateGenerator with 256 value signature_bits", + args: args{ + config: map[string]interface{}{ + "signature_bits": "256", + }, + }, + want: ClientCertificateGenerator{ + SignatureBits: 256, + }, + }, + { + name: "newClientCertificateGenerator with 384 value signature_bits", + args: args{ + config: map[string]interface{}{ + "signature_bits": "384", + }, + }, + want: ClientCertificateGenerator{ + SignatureBits: 384, + }, + }, + { + name: "newClientCertificateGenerator with 512 value signature_bits", + args: args{ + config: map[string]interface{}{ + "signature_bits": "512", + }, + }, + want: ClientCertificateGenerator{ + SignatureBits: 512, + }, + }, + { + name: "newClientCertificateGenerator with invalid signature_bits", + args: args{ + config: map[string]interface{}{ + "signature_bits": "612", + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := newClientCertificateGenerator(tt.args.config) + if tt.wantErr { + assert.Error(t, err) + return + } + assert.Equal(t, tt.want, got) + }) + } +} + func Test_newPasswordGenerator(t *testing.T) { type args struct { config map[string]interface{} diff --git a/builtin/logical/database/dbplugin/plugin_test.go b/builtin/logical/database/dbplugin/plugin_test.go index 2b5f7a981ec3..8c9737cdbff5 100644 --- a/builtin/logical/database/dbplugin/plugin_test.go +++ b/builtin/logical/database/dbplugin/plugin_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package dbplugin_test @@ -13,6 +13,7 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/database/dbplugin" "github.com/hashicorp/vault/sdk/helper/consts" @@ -106,14 +107,18 @@ func (m *mockPlugin) SetCredentials(ctx context.Context, statements dbplugin.Sta } func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) { - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + t.Helper() + pluginDir := corehelpers.MakeTestPluginDir(t) + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + PluginDirectory: pluginDir, + }, &vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, }) cluster.Start() cores := cluster.Cores sys := vault.TestDynamicSystemView(cores[0].Core, nil) - vault.TestAddTestPlugin(t, cores[0].Core, "test-plugin", consts.PluginTypeDatabase, "", "TestPlugin_GRPC_Main", []string{}, "") + vault.TestAddTestPlugin(t, cores[0].Core, "test-plugin", consts.PluginTypeDatabase, "", "TestPlugin_GRPC_Main", []string{}) return cluster, sys } diff --git a/builtin/logical/database/mocks_test.go b/builtin/logical/database/mocks_test.go index afb1bbc79f68..4182affd2016 100644 --- a/builtin/logical/database/mocks_test.go +++ b/builtin/logical/database/mocks_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package database diff --git a/builtin/logical/database/mockv4.go b/builtin/logical/database/mockv4.go index a85f307ec49d..f3a753511967 100644 --- a/builtin/logical/database/mockv4.go +++ b/builtin/logical/database/mockv4.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package database diff --git a/builtin/logical/database/mockv5.go b/builtin/logical/database/mockv5.go index fecccfed209f..5f09c37101c3 100644 --- a/builtin/logical/database/mockv5.go +++ b/builtin/logical/database/mockv5.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package database @@ -51,6 +51,9 @@ func (m MockDatabaseV5) Initialize(ctx context.Context, req v5.InitializeRequest "req", req) config := req.Config + if config == nil { + config = map[string]interface{}{} + } config["from-plugin"] = "this value is from the plugin itself" resp := v5.InitializeResponse{ diff --git a/builtin/logical/database/path_config_connection.go b/builtin/logical/database/path_config_connection.go index db5cd9a53b8a..0f373f371d74 100644 --- a/builtin/logical/database/path_config_connection.go +++ b/builtin/logical/database/path_config_connection.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package database @@ -9,11 +9,11 @@ import ( "fmt" "net/url" "sort" + "strings" "github.com/fatih/structs" "github.com/hashicorp/go-uuid" "github.com/hashicorp/go-version" - "github.com/hashicorp/vault/helper/versions" v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/framework" @@ -30,8 +30,9 @@ var ( // DatabaseConfig is used by the Factory function to configure a Database // object. type DatabaseConfig struct { - PluginName string `json:"plugin_name" structs:"plugin_name" mapstructure:"plugin_name"` - PluginVersion string `json:"plugin_version" structs:"plugin_version" mapstructure:"plugin_version"` + PluginName string `json:"plugin_name" structs:"plugin_name" mapstructure:"plugin_name"` + PluginVersion string `json:"plugin_version" structs:"plugin_version" mapstructure:"plugin_version"` + RunningPluginVersion string `json:"running_plugin_version,omitempty" structs:"running_plugin_version,omitempty" mapstructure:"running_plugin_version,omitempty"` // ConnectionDetails stores the database specific connection settings needed // by each database type. ConnectionDetails map[string]interface{} `json:"connection_details" structs:"connection_details" mapstructure:"connection_details"` @@ -39,7 +40,8 @@ type DatabaseConfig struct { RootCredentialsRotateStatements []string `json:"root_credentials_rotate_statements" structs:"root_credentials_rotate_statements" mapstructure:"root_credentials_rotate_statements"` - PasswordPolicy string `json:"password_policy" structs:"password_policy" mapstructure:"password_policy"` + PasswordPolicy string `json:"password_policy" structs:"password_policy" mapstructure:"password_policy"` + VerifyConnection bool `json:"verify_connection" structs:"verify_connection" mapstructure:"verify_connection"` } func (c *DatabaseConfig) SupportsCredentialType(credentialType v5.CredentialType) bool { @@ -62,6 +64,13 @@ func (c *DatabaseConfig) SupportsCredentialType(credentialType v5.CredentialType func pathResetConnection(b *databaseBackend) *framework.Path { return &framework.Path{ Pattern: fmt.Sprintf("reset/%s", framework.GenericNameRegex("name")), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "reset", + OperationSuffix: "connection", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -87,17 +96,109 @@ func (b *databaseBackend) pathConnectionReset() framework.OperationFunc { return logical.ErrorResponse(respErrEmptyName), nil } - // Close plugin and delete the entry in the connections cache. - if err := b.ClearConnection(name); err != nil { + if err := b.reloadConnection(ctx, req.Storage, name); err != nil { return nil, err } - // Execute plugin again, we don't need the object so throw away. - if _, err := b.GetConnection(ctx, req.Storage, name); err != nil { + b.dbEvent(ctx, "reset", req.Path, name, false) + return nil, nil + } +} + +func (b *databaseBackend) reloadConnection(ctx context.Context, storage logical.Storage, name string) error { + // Close plugin and delete the entry in the connections cache. + if err := b.ClearConnection(name); err != nil { + return err + } + + // Execute plugin again, we don't need the object so throw away. + if _, err := b.GetConnection(ctx, storage, name); err != nil { + return err + } + + return nil +} + +// pathReloadPlugin reloads all connections using a named plugin. +func pathReloadPlugin(b *databaseBackend) *framework.Path { + return &framework.Path{ + Pattern: fmt.Sprintf("reload/%s", framework.GenericNameRegex("plugin_name")), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "reload", + OperationSuffix: "plugin", + }, + + Fields: map[string]*framework.FieldSchema{ + "plugin_name": { + Type: framework.TypeString, + Description: "Name of the database plugin", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.reloadPlugin(), + }, + + HelpSynopsis: pathReloadPluginHelpSyn, + HelpDescription: pathReloadPluginHelpDesc, + } +} + +// reloadPlugin reloads all instances of a named plugin by closing the existing +// instances and creating new ones. +func (b *databaseBackend) reloadPlugin() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + pluginName := data.Get("plugin_name").(string) + if pluginName == "" { + return logical.ErrorResponse(respErrEmptyPluginName), nil + } + + connNames, err := req.Storage.List(ctx, "config/") + if err != nil { return nil, err } + reloaded := []string{} + for _, connName := range connNames { + entry, err := req.Storage.Get(ctx, fmt.Sprintf("config/%s", connName)) + if err != nil { + return nil, fmt.Errorf("failed to read connection configuration: %w", err) + } + if entry == nil { + continue + } - return nil, nil + var config DatabaseConfig + if err := entry.DecodeJSON(&config); err != nil { + return nil, err + } + if config.PluginName == pluginName { + if err := b.reloadConnection(ctx, req.Storage, connName); err != nil { + var successfullyReloaded string + if len(reloaded) > 0 { + successfullyReloaded = fmt.Sprintf("successfully reloaded %d connection(s): %s; ", + len(reloaded), + strings.Join(reloaded, ", ")) + } + return nil, fmt.Errorf("%sfailed to reload connection %q: %w", successfullyReloaded, connName, err) + } + reloaded = append(reloaded, connName) + } + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "connections": reloaded, + "count": len(reloaded), + }, + } + + if len(reloaded) == 0 { + resp.AddWarning(fmt.Sprintf("no connections were found with plugin_name %q", pluginName)) + } + b.dbEvent(ctx, "reload", req.Path, "", true, "plugin_name", pluginName) + return resp, nil } } @@ -106,6 +207,11 @@ func (b *databaseBackend) pathConnectionReset() framework.OperationFunc { func pathConfigurePluginConnection(b *databaseBackend) *framework.Path { return &framework.Path{ Pattern: fmt.Sprintf("config/%s", framework.GenericNameRegex("name")), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -152,11 +258,36 @@ func pathConfigurePluginConnection(b *databaseBackend) *framework.Path { }, ExistenceCheck: b.connectionExistenceCheck(), - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.CreateOperation: b.connectionWriteHandler(), - logical.UpdateOperation: b.connectionWriteHandler(), - logical.ReadOperation: b.connectionReadHandler(), - logical.DeleteOperation: b.connectionDeleteHandler(), + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.CreateOperation: &framework.PathOperation{ + Callback: b.connectionWriteHandler(), + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "connection", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.connectionWriteHandler(), + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "connection", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.connectionReadHandler(), + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "read", + OperationSuffix: "connection-configuration", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.connectionDeleteHandler(), + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "delete", + OperationSuffix: "connection-configuration", + }, + }, }, HelpSynopsis: pathConfigConnectionHelpSyn, @@ -184,6 +315,11 @@ func pathListPluginConnection(b *databaseBackend) *framework.Path { return &framework.Path{ Pattern: fmt.Sprintf("config/?$"), + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationSuffix: "connections", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.connectionListHandler(), }, @@ -240,10 +376,24 @@ func (b *databaseBackend) connectionReadHandler() framework.OperationFunc { delete(config.ConnectionDetails, "password") delete(config.ConnectionDetails, "private_key") + delete(config.ConnectionDetails, "service_account_json") - return &logical.Response{ - Data: structs.New(config).Map(), - }, nil + resp := &logical.Response{} + if dbi, err := b.GetConnectionSkipVerify(ctx, req.Storage, name); err == nil { + config.RunningPluginVersion = dbi.runningPluginVersion + if config.PluginVersion != "" && config.PluginVersion != config.RunningPluginVersion { + warning := fmt.Sprintf("Plugin version is configured as %q, but running %q", config.PluginVersion, config.RunningPluginVersion) + if pinnedVersion, _ := b.getPinnedVersion(ctx, config.PluginName); pinnedVersion == config.RunningPluginVersion { + warning += " because that version is pinned" + } else { + warning += " either due to a pinned version or because the plugin was upgraded and not yet reloaded" + } + resp.AddWarning(warning) + } + } + + resp.Data = structs.New(config).Map() + return resp, nil } } @@ -264,6 +414,7 @@ func (b *databaseBackend) connectionDeleteHandler() framework.OperationFunc { return nil, err } + b.dbEvent(ctx, "config-delete", req.Path, name, true) return nil, nil } } @@ -272,15 +423,15 @@ func (b *databaseBackend) connectionDeleteHandler() framework.OperationFunc { // both builtin and plugin database types. func (b *databaseBackend) connectionWriteHandler() framework.OperationFunc { return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - verifyConnection := data.Get("verify_connection").(bool) - name := data.Get("name").(string) if name == "" { return logical.ErrorResponse(respErrEmptyName), nil } // Baseline - config := &DatabaseConfig{} + config := &DatabaseConfig{ + VerifyConnection: true, + } entry, err := req.Storage.Get(ctx, fmt.Sprintf("config/%s", name)) if err != nil { @@ -292,6 +443,13 @@ func (b *databaseBackend) connectionWriteHandler() framework.OperationFunc { } } + // If this value was provided as part of the request we want to set it to this value + if verifyConnectionRaw, ok := data.GetOk("verify_connection"); ok { + config.VerifyConnection = verifyConnectionRaw.(bool) + } else if req.Operation == logical.CreateOperation { + config.VerifyConnection = data.Get("verify_connection").(bool) + } + if pluginNameRaw, ok := data.GetOk("plugin_name"); ok { config.PluginName = pluginNameRaw.(string) } else if req.Operation == logical.CreateOperation { @@ -301,58 +459,9 @@ func (b *databaseBackend) connectionWriteHandler() framework.OperationFunc { return logical.ErrorResponse(respErrEmptyPluginName), nil } - if pluginVersionRaw, ok := data.GetOk("plugin_version"); ok { - config.PluginVersion = pluginVersionRaw.(string) - } - - var builtinShadowed bool - if unversionedPlugin, err := b.System().LookupPlugin(ctx, config.PluginName, consts.PluginTypeDatabase); err == nil && !unversionedPlugin.Builtin { - builtinShadowed = true - } - switch { - case config.PluginVersion != "": - semanticVersion, err := version.NewVersion(config.PluginVersion) - if err != nil { - return logical.ErrorResponse("version %q is not a valid semantic version: %s", config.PluginVersion, err), nil - } - - // Canonicalize the version. - config.PluginVersion = "v" + semanticVersion.String() - - if config.PluginVersion == versions.GetBuiltinVersion(consts.PluginTypeDatabase, config.PluginName) { - if builtinShadowed { - return logical.ErrorResponse("database plugin %q, version %s not found, as it is"+ - " overridden by an unversioned plugin of the same name. Omit `plugin_version` to use the unversioned plugin", config.PluginName, config.PluginVersion), nil - } - - config.PluginVersion = "" - } - case builtinShadowed: - // We'll select the unversioned plugin that's been registered. - case req.Operation == logical.CreateOperation: - // No version provided and no unversioned plugin of that name available. - // Pin to the current latest version if any versioned plugins are registered. - plugins, err := b.System().ListVersionedPlugins(ctx, consts.PluginTypeDatabase) - if err != nil { - return nil, err - } - - var versionedCandidates []pluginutil.VersionedPlugin - for _, plugin := range plugins { - if !plugin.Builtin && plugin.Name == config.PluginName && plugin.Version != "" { - versionedCandidates = append(versionedCandidates, plugin) - } - } - - if len(versionedCandidates) != 0 { - // Sort in reverse order. - sort.SliceStable(versionedCandidates, func(i, j int) bool { - return versionedCandidates[i].SemanticVersion.GreaterThan(versionedCandidates[j].SemanticVersion) - }) - - config.PluginVersion = "v" + versionedCandidates[0].SemanticVersion.String() - b.logger.Debug(fmt.Sprintf("pinning %q database plugin version %q from candidates %v", config.PluginName, config.PluginVersion, versionedCandidates)) - } + pluginVersion, respErr, err := b.selectPluginVersion(ctx, config, data, req.Operation) + if respErr != nil || err != nil { + return respErr, err } if allowedRolesRaw, ok := data.GetOk("allowed_roles"); ok { @@ -401,14 +510,14 @@ func (b *databaseBackend) connectionWriteHandler() framework.OperationFunc { } // Create a database plugin and initialize it. - dbw, err := newDatabaseWrapper(ctx, config.PluginName, config.PluginVersion, b.System(), b.logger) + dbw, err := newDatabaseWrapper(ctx, config.PluginName, pluginVersion, b.System(), b.logger) if err != nil { return logical.ErrorResponse("error creating database object: %s", err), nil } initReq := v5.InitializeRequest{ Config: config.ConnectionDetails, - VerifyConnection: verifyConnection, + VerifyConnection: config.VerifyConnection, } initResp, err := dbw.Initialize(ctx, initReq) if err != nil { @@ -420,10 +529,11 @@ func (b *databaseBackend) connectionWriteHandler() framework.OperationFunc { b.Logger().Debug("created database object", "name", name, "plugin_name", config.PluginName) // Close and remove the old connection - oldConn := b.connPut(name, &dbPluginInstance{ - database: dbw, - name: name, - id: id, + oldConn := b.connections.Put(name, &dbPluginInstance{ + database: dbw, + name: name, + id: id, + runningPluginVersion: pluginVersion, }) if oldConn != nil { oldConn.Close() @@ -458,6 +568,7 @@ func (b *databaseBackend) connectionWriteHandler() framework.OperationFunc { "Vault (or the sdk if using a custom plugin) to gain password policy support", config.PluginName)) } + b.dbEvent(ctx, "config-write", req.Path, name, true) if len(resp.Warnings) == 0 { return nil, nil } @@ -478,6 +589,92 @@ func storeConfig(ctx context.Context, storage logical.Storage, name string, conf return nil } +func (b *databaseBackend) getPinnedVersion(ctx context.Context, pluginName string) (string, error) { + extendedSys, ok := b.System().(logical.ExtendedSystemView) + if !ok { + return "", fmt.Errorf("database backend does not support running as an external plugin") + } + + pin, err := extendedSys.GetPinnedPluginVersion(ctx, consts.PluginTypeDatabase, pluginName) + if errors.Is(err, pluginutil.ErrPinnedVersionNotFound) { + return "", nil + } + if err != nil { + return "", err + } + + return pin.Version, nil +} + +func (b *databaseBackend) selectPluginVersion(ctx context.Context, config *DatabaseConfig, data *framework.FieldData, op logical.Operation) (string, *logical.Response, error) { + pinnedVersion, err := b.getPinnedVersion(ctx, config.PluginName) + if err != nil { + return "", nil, err + } + pluginVersionRaw, ok := data.GetOk("plugin_version") + + switch { + case ok && pinnedVersion != "": + return "", logical.ErrorResponse("cannot specify plugin_version for plugin %q as it is pinned (v%s)", config.PluginName, pinnedVersion), nil + case pinnedVersion != "": + return pinnedVersion, nil, nil + case ok: + config.PluginVersion = pluginVersionRaw.(string) + } + + var builtinShadowed bool + if unversionedPlugin, err := b.System().LookupPlugin(ctx, config.PluginName, consts.PluginTypeDatabase); err == nil && !unversionedPlugin.Builtin { + builtinShadowed = true + } + switch { + case config.PluginVersion != "": + semanticVersion, err := version.NewVersion(config.PluginVersion) + if err != nil { + return "", logical.ErrorResponse("version %q is not a valid semantic version: %s", config.PluginVersion, err), nil + } + + // Canonicalize the version. + config.PluginVersion = "v" + semanticVersion.String() + + if config.PluginVersion == versions.GetBuiltinVersion(consts.PluginTypeDatabase, config.PluginName) { + if builtinShadowed { + return "", logical.ErrorResponse("database plugin %q, version %s not found, as it is"+ + " overridden by an unversioned plugin of the same name. Omit `plugin_version` to use the unversioned plugin", config.PluginName, config.PluginVersion), nil + } + + config.PluginVersion = "" + } + case builtinShadowed: + // We'll select the unversioned plugin that's been registered. + case op == logical.CreateOperation: + // No version provided and no unversioned plugin of that name available. + // Pin to the current latest version if any versioned plugins are registered. + plugins, err := b.System().ListVersionedPlugins(ctx, consts.PluginTypeDatabase) + if err != nil { + return "", nil, err + } + + var versionedCandidates []pluginutil.VersionedPlugin + for _, plugin := range plugins { + if !plugin.Builtin && plugin.Name == config.PluginName && plugin.Version != "" { + versionedCandidates = append(versionedCandidates, plugin) + } + } + + if len(versionedCandidates) != 0 { + // Sort in reverse order. + sort.SliceStable(versionedCandidates, func(i, j int) bool { + return versionedCandidates[i].SemanticVersion.GreaterThan(versionedCandidates[j].SemanticVersion) + }) + + config.PluginVersion = "v" + versionedCandidates[0].SemanticVersion.String() + b.logger.Debug(fmt.Sprintf("pinning %q database plugin version %q from candidates %v", config.PluginName, config.PluginVersion, versionedCandidates)) + } + } + + return config.PluginVersion, nil, nil +} + const pathConfigConnectionHelpSyn = ` Configure connection details to a database plugin. ` @@ -508,3 +705,12 @@ const pathResetConnectionHelpDesc = ` This path resets the database connection by closing the existing database plugin instance and running a new one. ` + +const pathReloadPluginHelpSyn = ` +Reloads all connections using a named database plugin. +` + +const pathReloadPluginHelpDesc = ` +This path resets each database connection using a named plugin by closing each +existing database plugin instance and running a new one. +` diff --git a/builtin/logical/database/path_config_connection_test.go b/builtin/logical/database/path_config_connection_test.go index 8cf06062890f..3741d82dc431 100644 --- a/builtin/logical/database/path_config_connection_test.go +++ b/builtin/logical/database/path_config_connection_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package database @@ -127,7 +127,7 @@ func TestWriteConfig_PluginVersionInStorage(t *testing.T) { } func TestWriteConfig_HelpfulErrorMessageWhenBuiltinOverridden(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) t.Cleanup(cluster.Cleanup) config := logical.TestBackendConfig() diff --git a/builtin/logical/database/path_creds_create.go b/builtin/logical/database/path_creds_create.go index ce7d118794c8..53ca3b7bdb40 100644 --- a/builtin/logical/database/path_creds_create.go +++ b/builtin/logical/database/path_creds_create.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package database @@ -9,6 +9,7 @@ import ( "time" "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" @@ -18,6 +19,13 @@ func pathCredsCreate(b *databaseBackend) []*framework.Path { return []*framework.Path{ { Pattern: "creds/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "generate", + OperationSuffix: "credentials", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -34,6 +42,13 @@ func pathCredsCreate(b *databaseBackend) []*framework.Path { }, { Pattern: "static-creds/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "read", + OperationSuffix: "static-role-credentials", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -52,8 +67,16 @@ func pathCredsCreate(b *databaseBackend) []*framework.Path { } func (b *databaseBackend) pathCredsCreateRead() framework.OperationFunc { - return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (resp *logical.Response, err error) { name := data.Get("name").(string) + modified := false + defer func() { + if err == nil && (resp == nil || !resp.IsError()) { + b.dbEvent(ctx, "creds-create", req.Path, name, modified) + } else { + b.dbEvent(ctx, "creds-create-fail", req.Path, name, modified) + } + }() // Get the role role, err := b.Role(ctx, req.Storage, name) @@ -157,6 +180,27 @@ func (b *databaseBackend) pathCredsCreateRead() framework.OperationFunc { // Set output credential respData["rsa_private_key"] = string(private) + case v5.CredentialTypeClientCertificate: + generator, err := newClientCertificateGenerator(role.CredentialConfig) + if err != nil { + return nil, fmt.Errorf("failed to construct credential generator: %s", err) + } + + // Generate the client certificate + cb, subject, err := generator.generate(b.GetRandomReader(), expiration, + newUserReq.UsernameConfig) + if err != nil { + return nil, fmt.Errorf("failed to generate client certificate: %w", err) + } + + // Set input credential + newUserReq.CredentialType = dbplugin.CredentialTypeClientCertificate + newUserReq.Subject = subject + + // Set output credential + respData["client_certificate"] = cb.Certificate + respData["private_key"] = cb.PrivateKey + respData["private_key_type"] = cb.PrivateKeyType } // Overwriting the password in the event this is a legacy database @@ -166,6 +210,7 @@ func (b *databaseBackend) pathCredsCreateRead() framework.OperationFunc { b.CloseIfShutdown(dbi, err) return nil, err } + modified = true respData["username"] = newUserResp.Username // Database plugins using the v4 interface generate and return the password. @@ -180,7 +225,7 @@ func (b *databaseBackend) pathCredsCreateRead() framework.OperationFunc { "db_name": role.DBName, "revocation_statements": role.Statements.Revocation, } - resp := b.Secret(SecretCredsType).Response(respData, internal) + resp = b.Secret(SecretCredsType).Response(respData, internal) resp.Secret.TTL = role.DefaultTTL resp.Secret.MaxTTL = role.MaxTTL return resp, nil @@ -213,10 +258,22 @@ func (b *databaseBackend) pathStaticCredsRead() framework.OperationFunc { respData := map[string]interface{}{ "username": role.StaticAccount.Username, "ttl": role.StaticAccount.CredentialTTL().Seconds(), - "rotation_period": role.StaticAccount.RotationPeriod.Seconds(), "last_vault_rotation": role.StaticAccount.LastVaultRotation, } + if role.StaticAccount.UsesRotationPeriod() { + respData["rotation_period"] = role.StaticAccount.RotationPeriod.Seconds() + } else if role.StaticAccount.UsesRotationSchedule() { + respData["rotation_schedule"] = role.StaticAccount.RotationSchedule + if role.StaticAccount.RotationWindow.Seconds() != 0 { + respData["rotation_window"] = role.StaticAccount.RotationWindow.Seconds() + } + + // The schedule is in UTC, but we want to convert it to the local time + role.StaticAccount.Schedule.Location = time.Local + respData["ttl"] = role.StaticAccount.CredentialTTL().Seconds() + } + switch role.CredentialType { case v5.CredentialTypePassword: respData["password"] = role.StaticAccount.Password diff --git a/builtin/logical/database/path_roles.go b/builtin/logical/database/path_roles.go index 2de4d8d2580e..a53988498000 100644 --- a/builtin/logical/database/path_roles.go +++ b/builtin/logical/database/path_roles.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package database @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/vault/sdk/helper/locksutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/queue" + "github.com/robfig/cron/v3" ) func pathListRoles(b *databaseBackend) []*framework.Path { @@ -24,6 +25,12 @@ func pathListRoles(b *databaseBackend) []*framework.Path { { Pattern: "roles/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "list", + OperationSuffix: "roles", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, }, @@ -34,6 +41,12 @@ func pathListRoles(b *databaseBackend) []*framework.Path { { Pattern: "static-roles/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "list", + OperationSuffix: "static-roles", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, }, @@ -47,7 +60,11 @@ func pathListRoles(b *databaseBackend) []*framework.Path { func pathRoles(b *databaseBackend) []*framework.Path { return []*framework.Path{ { - Pattern: "roles/" + framework.GenericNameRegex("name"), + Pattern: "roles/" + framework.GenericNameRegex("name"), + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationSuffix: "role", + }, Fields: fieldsForType(databaseRolePath), ExistenceCheck: b.pathRoleExistenceCheck, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -62,7 +79,11 @@ func pathRoles(b *databaseBackend) []*framework.Path { }, { - Pattern: "static-roles/" + framework.GenericNameRegex("name"), + Pattern: "static-roles/" + framework.GenericNameRegex("name"), + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationSuffix: "static-role", + }, Fields: fieldsForType(databaseStaticRolePath), ExistenceCheck: b.pathStaticRoleExistenceCheck, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -176,7 +197,18 @@ func staticFields() map[string]*framework.FieldSchema { Type: framework.TypeDurationSecond, Description: `Period for automatic credential rotation of the given username. Not valid unless used with - "username".`, + "username". Mutually exclusive with "rotation_schedule."`, + }, + "rotation_schedule": { + Type: framework.TypeString, + Description: `Schedule for automatic credential rotation of the + given username. Mutually exclusive with "rotation_period."`, + }, + "rotation_window": { + Type: framework.TypeDurationSecond, + Description: `The window of time in which rotations are allowed to + occur starting from a given "rotation_schedule". Requires "rotation_schedule" + to be specified`, }, "rotation_statements": { Type: framework.TypeStringSlice, @@ -185,6 +217,11 @@ func staticFields() map[string]*framework.FieldSchema { this functionality. See the plugin's API page for more information on support and formatting for this parameter.`, }, + "self_managed_password": { + Type: framework.TypeString, + Description: `Used to connect to a self-managed static account. Must + be provided by the user when root credentials are not provided.`, + }, } return fields } @@ -206,11 +243,12 @@ func (b *databaseBackend) pathStaticRoleExistenceCheck(ctx context.Context, req } func (b *databaseBackend) pathRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - err := req.Storage.Delete(ctx, databaseRolePath+data.Get("name").(string)) + name := data.Get("name").(string) + err := req.Storage.Delete(ctx, databaseRolePath+name) if err != nil { return nil, err } - + b.dbEvent(ctx, "role-delete", req.Path, name, true) return nil, nil } @@ -251,6 +289,7 @@ func (b *databaseBackend) pathStaticRoleDelete(ctx context.Context, req *logical } } + b.dbEvent(ctx, "static-role-delete", req.Path, name, true) return nil, merr.ErrorOrNil() } @@ -273,10 +312,20 @@ func (b *databaseBackend) pathStaticRoleRead(ctx context.Context, req *logical.R if role.StaticAccount != nil { data["username"] = role.StaticAccount.Username data["rotation_statements"] = role.Statements.Rotation - data["rotation_period"] = role.StaticAccount.RotationPeriod.Seconds() if !role.StaticAccount.LastVaultRotation.IsZero() { data["last_vault_rotation"] = role.StaticAccount.LastVaultRotation } + + // only return one of the mutually exclusive fields in the response + if role.StaticAccount.UsesRotationPeriod() { + data["rotation_period"] = role.StaticAccount.RotationPeriod.Seconds() + } else if role.StaticAccount.UsesRotationSchedule() { + data["rotation_schedule"] = role.StaticAccount.RotationSchedule + // rotation_window is only valid with rotation_schedule + if role.StaticAccount.RotationWindow != 0 { + data["rotation_window"] = role.StaticAccount.RotationWindow.Seconds() + } + } } if len(role.CredentialConfig) > 0 { @@ -456,10 +505,12 @@ func (b *databaseBackend) pathRoleCreateUpdate(ctx context.Context, req *logical return nil, err } + b.dbEvent(ctx, fmt.Sprintf("role-%s", req.Operation), req.Path, name, true) return nil, nil } func (b *databaseBackend) pathStaticRoleCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + response := &logical.Response{} name := data.Get("name").(string) if name == "" { return logical.ErrorResponse("empty role name attribute given"), nil @@ -516,12 +567,17 @@ func (b *databaseBackend) pathStaticRoleCreateUpdate(ctx context.Context, req *l } role.StaticAccount.Username = username - // If it's a Create operation, both username and rotation_period must be included - rotationPeriodSecondsRaw, ok := data.GetOk("rotation_period") - if !ok && createRole { - return logical.ErrorResponse("rotation_period is required to create static accounts"), nil + rotationPeriodSecondsRaw, rotationPeriodOk := data.GetOk("rotation_period") + rotationScheduleRaw, rotationScheduleOk := data.GetOk("rotation_schedule") + rotationWindowSecondsRaw, rotationWindowOk := data.GetOk("rotation_window") + + if rotationScheduleOk && rotationPeriodOk { + return logical.ErrorResponse("mutually exclusive fields rotation_period and rotation_schedule were both specified; only one of them can be provided"), nil + } else if createRole && (!rotationScheduleOk && !rotationPeriodOk) { + return logical.ErrorResponse("one of rotation_schedule or rotation_period must be provided to create a static account"), nil } - if ok { + + if rotationPeriodOk { rotationPeriodSeconds := rotationPeriodSecondsRaw.(int) if rotationPeriodSeconds < defaultQueueTickSeconds { // If rotation frequency is specified, and this is an update, the value @@ -530,6 +586,38 @@ func (b *databaseBackend) pathStaticRoleCreateUpdate(ctx context.Context, req *l return logical.ErrorResponse(fmt.Sprintf("rotation_period must be %d seconds or more", defaultQueueTickSeconds)), nil } role.StaticAccount.RotationPeriod = time.Duration(rotationPeriodSeconds) * time.Second + + if rotationWindowOk { + return logical.ErrorResponse("rotation_window is invalid with use of rotation_period"), nil + } + + // Unset rotation schedule and window if rotation period is set since + // these are mutually exclusive + role.StaticAccount.RotationSchedule = "" + role.StaticAccount.RotationWindow = 0 + } + + if rotationScheduleOk { + rotationSchedule := rotationScheduleRaw.(string) + parsedSchedule, err := b.schedule.Parse(rotationSchedule) + if err != nil { + return logical.ErrorResponse("could not parse rotation_schedule", "error", err), nil + } + role.StaticAccount.RotationSchedule = rotationSchedule + role.StaticAccount.Schedule = *parsedSchedule + + if rotationWindowOk { + rotationWindowSeconds := rotationWindowSecondsRaw.(int) + err := b.schedule.ValidateRotationWindow(rotationWindowSeconds) + if err != nil { + return logical.ErrorResponse("rotation_window is invalid", "error", err), nil + } + role.StaticAccount.RotationWindow = time.Duration(rotationWindowSeconds) * time.Second + } + + // Unset rotation period if rotation schedule is set since these are + // mutually exclusive + role.StaticAccount.RotationPeriod = 0 } if rotationStmtsRaw, ok := data.GetOk("rotation_statements"); ok { @@ -545,6 +633,10 @@ func (b *databaseBackend) pathStaticRoleCreateUpdate(ctx context.Context, req *l } } + if smPasswordRaw, ok := data.GetOk("self_managed_password"); ok && createRole { + role.StaticAccount.SelfManagedPassword = smPasswordRaw.(string) + } + var credentialConfig map[string]string if raw, ok := data.GetOk("credential_config"); ok { credentialConfig = raw.(map[string]string) @@ -603,14 +695,21 @@ func (b *databaseBackend) pathStaticRoleCreateUpdate(ctx context.Context, req *l } } - item.Priority = lvr.Add(role.StaticAccount.RotationPeriod).Unix() + if rotationPeriodOk { + b.logger.Debug("init priority for RotationPeriod", "lvr", lvr, "next", lvr.Add(role.StaticAccount.RotationPeriod)) + item.Priority = lvr.Add(role.StaticAccount.RotationPeriod).Unix() + } else if rotationScheduleOk { + next := role.StaticAccount.Schedule.Next(lvr) + b.logger.Debug("init priority for Schedule", "lvr", lvr, "next", next) + item.Priority = next.Unix() + } // Add their rotation to the queue if err := b.pushItem(item); err != nil { return nil, err } - - return nil, nil + b.dbEvent(ctx, fmt.Sprintf("static-role-%s", req.Operation), req.Path, name, true) + return response, nil } type roleEntry struct { @@ -631,6 +730,8 @@ func (r *roleEntry) setCredentialType(credentialType string) error { r.CredentialType = v5.CredentialTypePassword case v5.CredentialTypeRSAPrivateKey.String(): r.CredentialType = v5.CredentialTypeRSAPrivateKey + case v5.CredentialTypeClientCertificate.String(): + r.CredentialType = v5.CredentialTypeClientCertificate default: return fmt.Errorf("invalid credential_type %q", credentialType) } @@ -672,6 +773,18 @@ func (r *roleEntry) setCredentialConfig(config map[string]string) error { if len(cm) > 0 { r.CredentialConfig = cm } + case v5.CredentialTypeClientCertificate: + generator, err := newClientCertificateGenerator(c) + if err != nil { + return err + } + cm, err := generator.configMap() + if err != nil { + return err + } + if len(cm) > 0 { + r.CredentialConfig = cm + } } return nil @@ -681,6 +794,12 @@ type staticAccount struct { // Username to create or assume management for static accounts Username string `json:"username"` + // SelfManagedPassword is used to make a dedicated connection to the DB + // user specified by Username. The credentials will leverage the existing + // static role mechanisms to handle password rotations. Required when root + // credentials are not provided. + SelfManagedPassword string `json:"self_managed_password"` + // Password is the current password credential for static accounts. As an input, // this is used/required when trying to assume management of an existing static // account. Returned on credential request if the role's credential type is @@ -696,24 +815,102 @@ type staticAccount struct { // LastVaultRotation represents the last time Vault rotated the password LastVaultRotation time.Time `json:"last_vault_rotation"` + // NextVaultRotation represents the next time Vault is expected to rotate + // the password + NextVaultRotation time.Time `json:"next_vault_rotation"` + // RotationPeriod is number in seconds between each rotation, effectively a // "time to live". This value is compared to the LastVaultRotation to // determine if a password needs to be rotated RotationPeriod time.Duration `json:"rotation_period"` + // RotationSchedule is a "chron style" string representing the allowed + // schedule for each rotation. + // e.g. "1 0 * * *" would rotate at one minute past midnight (00:01) every + // day. + RotationSchedule string `json:"rotation_schedule"` + + // RotationWindow is number in seconds in which rotations are allowed to + // occur starting from a given rotation_schedule. + RotationWindow time.Duration `json:"rotation_window"` + + // Schedule holds the parsed "chron style" string representing the allowed + // schedule for each rotation. + Schedule cron.SpecSchedule `json:"schedule"` + // RevokeUser is a boolean flag to indicate if Vault should revoke the // database user when the role is deleted RevokeUserOnDelete bool `json:"revoke_user_on_delete"` } -// NextRotationTime calculates the next rotation by adding the Rotation Period -// to the last known vault rotation +// NextRotationTime calculates the next rotation for period and schedule-based +// rotations. +// +// Period-based expiries are calculated by adding the Rotation Period to the +// last known vault rotation. Schedule-based expiries are calculated by +// querying for the next schedule expiry since the last known vault rotation. func (s *staticAccount) NextRotationTime() time.Time { - return s.LastVaultRotation.Add(s.RotationPeriod) + if s.UsesRotationPeriod() { + return s.LastVaultRotation.Add(s.RotationPeriod) + } + return s.Schedule.Next(time.Now()) +} + +// NextRotationTimeFromInput calculates the next rotation time for period and +// schedule-based roles based on the input. +func (s *staticAccount) NextRotationTimeFromInput(input time.Time) time.Time { + if s.UsesRotationPeriod() { + return input.Add(s.RotationPeriod) + } + return s.Schedule.Next(input) +} + +// UsesRotationSchedule returns true if the given static account has been +// configured to rotate credentials on a schedule (i.e. NOT on a rotation period). +func (s *staticAccount) UsesRotationSchedule() bool { + return s.RotationSchedule != "" && s.RotationPeriod == 0 +} + +// UsesRotationPeriod returns true if the given static account has been +// configured to rotate credentials on a period (i.e. NOT on a rotation schedule). +func (s *staticAccount) UsesRotationPeriod() bool { + return s.RotationPeriod != 0 && s.RotationSchedule == "" +} + +// IsInsideRotationWindow returns true if the current time t is within a given +// static account's rotation window. +// +// Returns true if the rotation window is not set. In this case, the rotation +// window is effectively the span of time between two consecutive rotation +// schedules and we should not prevent rotation. +func (s *staticAccount) IsInsideRotationWindow(t time.Time) bool { + if s.UsesRotationSchedule() && s.RotationWindow != 0 { + return t.Before(s.NextVaultRotation.Add(s.RotationWindow)) + } + return true +} + +// ShouldRotate returns true if a given static account should have its +// credentials rotated. +// +// This will return true when the priority <= the current Unix time. If this +// static account is schedule-based with a rotation window, this method will +// return false if t is outside the rotation window. +func (s *staticAccount) ShouldRotate(priority int64, t time.Time) bool { + return priority <= t.Unix() && s.IsInsideRotationWindow(t) +} + +// SetNextVaultRotation +func (s *staticAccount) SetNextVaultRotation(t time.Time) { + if s.UsesRotationPeriod() { + s.NextVaultRotation = t.Add(s.RotationPeriod) + } else { + s.NextVaultRotation = s.Schedule.Next(t) + } } // CredentialTTL calculates the approximate time remaining until the credential is -// no longer valid. This is approximate because the periodic rotation is only +// no longer valid. This is approximate because the rotation expiry is only // checked approximately every 5 seconds, and each rotation can take a small // amount of time to process. This can result in a negative TTL time while the // rotation function processes the Static Role and performs the rotation. If the diff --git a/builtin/logical/database/path_roles_test.go b/builtin/logical/database/path_roles_test.go index dc2eddeb1a04..41a2e99758aa 100644 --- a/builtin/logical/database/path_roles_test.go +++ b/builtin/logical/database/path_roles_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package database @@ -205,7 +205,7 @@ func TestBackend_Roles_CredentialTypes(t *testing.T) { } func TestBackend_StaticRole_Config(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -222,7 +222,7 @@ func TestBackend_StaticRole_Config(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // create the database user @@ -254,6 +254,8 @@ func TestBackend_StaticRole_Config(t *testing.T) { path string expected map[string]interface{} err error + // use this field to check partial error strings, otherwise use err + errContains string }{ "basic": { account: map[string]interface{}{ @@ -266,12 +268,71 @@ func TestBackend_StaticRole_Config(t *testing.T) { "rotation_period": float64(5400), }, }, - "missing rotation period": { + "missing required fields": { account: map[string]interface{}{ "username": dbUser, }, path: "plugin-role-test", - err: errors.New("rotation_period is required to create static accounts"), + err: errors.New("one of rotation_schedule or rotation_period must be provided to create a static account"), + }, + "rotation_period with rotation_schedule": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_period": "5400s", + "rotation_schedule": "* * * * *", + }, + path: "plugin-role-test", + err: errors.New("mutually exclusive fields rotation_period and rotation_schedule were both specified; only one of them can be provided"), + }, + "rotation window invalid with rotation_period": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_period": "5400s", + "rotation_window": "3600s", + }, + path: "disallowed-role", + err: errors.New("rotation_window is invalid with use of rotation_period"), + }, + "happy path for rotation_schedule": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "* * * * *", + }, + path: "plugin-role-test", + expected: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "* * * * *", + }, + }, + "happy path for rotation_schedule and rotation_window": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "* * * * *", + "rotation_window": "3600s", + }, + path: "plugin-role-test", + expected: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "* * * * *", + "rotation_window": float64(3600), + }, + }, + "error parsing rotation_schedule": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "foo", + }, + path: "plugin-role-test", + errContains: "could not parse rotation_schedule", + }, + "rotation_window invalid": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "* * * * *", + "rotation_window": "59s", + }, + path: "plugin-role-test", + errContains: "rotation_window is invalid", }, "disallowed role config": { account: map[string]interface{}{ @@ -281,6 +342,14 @@ func TestBackend_StaticRole_Config(t *testing.T) { path: "disallowed-role", err: errors.New("\"disallowed-role\" is not an allowed role"), }, + "fails to parse cronSpec with seconds": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "*/10 * * * * *", + }, + path: "plugin-role-test-1", + errContains: "could not parse rotation_schedule", + }, } for name, tc := range testCases { @@ -305,7 +374,12 @@ func TestBackend_StaticRole_Config(t *testing.T) { } resp, err = b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (resp != nil && resp.IsError()) { + if tc.errContains != "" { + if !strings.Contains(resp.Error().Error(), tc.errContains) { + t.Fatalf("expected err message: (%s), got (%s), response error: (%s)", tc.err, err, resp.Error()) + } + return + } else if err != nil || (resp != nil && resp.IsError()) { if tc.err == nil { t.Fatalf("err:%s resp:%#v\n", err, resp) } @@ -341,7 +415,14 @@ func TestBackend_StaticRole_Config(t *testing.T) { expected := tc.expected actual := make(map[string]interface{}) - dataKeys := []string{"username", "password", "last_vault_rotation", "rotation_period"} + dataKeys := []string{ + "username", + "password", + "last_vault_rotation", + "rotation_period", + "rotation_schedule", + "rotation_window", + } for _, key := range dataKeys { if v, ok := resp.Data[key]; ok { actual[key] = v @@ -388,8 +469,188 @@ func TestBackend_StaticRole_Config(t *testing.T) { } } +func TestBackend_StaticRole_ReadCreds(t *testing.T) { + cluster, sys := getClusterPostgresDB(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer b.Cleanup(context.Background()) + + cleanup, connURL := postgreshelper.PrepareTestContainer(t) + defer cleanup() + + // create the database user + createTestPGUser(t, connURL, dbUser, dbUserDefaultPassword, testRoleStaticCreate) + + verifyPgConn(t, dbUser, dbUserDefaultPassword, connURL) + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "name": "plugin-test", + } + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + testCases := map[string]struct { + account map[string]interface{} + path string + expected map[string]interface{} + }{ + "happy path for rotation_period": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_period": "5400s", + }, + path: "plugin-role-test", + expected: map[string]interface{}{ + "username": dbUser, + "rotation_period": float64(5400), + }, + }, + "happy path for rotation_schedule": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "* * * * *", + }, + path: "plugin-role-test", + expected: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "* * * * *", + }, + }, + "happy path for rotation_schedule and rotation_window": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "* * * * *", + "rotation_window": "3600s", + }, + path: "plugin-role-test", + expected: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "* * * * *", + "rotation_window": float64(3600), + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + data = map[string]interface{}{ + "name": "plugin-role-test", + "db_name": "plugin-test", + "rotation_statements": testRoleStaticUpdate, + "username": dbUser, + } + + for k, v := range tc.account { + data[k] = v + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read the creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + expected := tc.expected + actual := make(map[string]interface{}) + dataKeys := []string{ + "username", + "password", + "last_vault_rotation", + "rotation_period", + "rotation_schedule", + "rotation_window", + "ttl", + } + for _, key := range dataKeys { + if v, ok := resp.Data[key]; ok { + actual[key] = v + } + } + + if len(tc.expected) > 0 { + // verify a password is returned, but we don't care what it's value is + if actual["password"] == "" { + t.Fatalf("expected result to contain password, but none found") + } + if actual["ttl"] == "" { + t.Fatalf("expected result to contain ttl, but none found") + } + if v, ok := actual["last_vault_rotation"].(time.Time); !ok { + t.Fatalf("expected last_vault_rotation to be set to time.Time type, got: %#v", v) + } + + // delete these values before the comparison, since we can't know them in + // advance + delete(actual, "password") + delete(actual, "ttl") + delete(actual, "last_vault_rotation") + if diff := deep.Equal(expected, actual); diff != nil { + t.Fatal(diff) + } + } + + // Delete role for next run + req = &logical.Request{ + Operation: logical.DeleteOperation, + Path: "static-roles/plugin-role-test", + Storage: config.StorageView, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + }) + } +} + func TestBackend_StaticRole_Updates(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -406,7 +667,7 @@ func TestBackend_StaticRole_Updates(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // create the database user @@ -581,8 +842,114 @@ func TestBackend_StaticRole_Updates(t *testing.T) { } } +func TestBackend_StaticRole_Updates_RotationSchedule(t *testing.T) { + ctx := context.Background() + b, storage, mockDB := getBackend(t) + defer b.Cleanup(ctx) + configureDBMount(t, storage) + + data := map[string]interface{}{ + "name": "plugin-role-test-updates", + "db_name": "mockv5", + "rotation_statements": testRoleStaticUpdate, + "username": dbUser, + "rotation_schedule": "0 0 */2 * * *", + "rotation_window": "1h", + } + + mockDB.On("UpdateUser", mock.Anything, mock.Anything). + Return(v5.UpdateUserResponse{}, nil). + Once() + req := &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: storage, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: storage, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + rotation := resp.Data["rotation_schedule"].(string) + window := resp.Data["rotation_window"].(float64) + + // update rotation_schedule and window + updateData := map[string]interface{}{ + "name": "plugin-role-test-updates", + "db_name": "mockv5", + "username": dbUser, + "rotation_schedule": "0 0 */1 * * *", + "rotation_window": "2h", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: storage, + Data: updateData, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // re-read the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: storage, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + newRotation := resp.Data["rotation_schedule"].(string) + if newRotation == rotation { + t.Fatalf("expected change in rotation, but got old value: %#v", newRotation) + } + newWindow := resp.Data["rotation_window"].(float64) + if newWindow == window { + t.Fatalf("expected change in rotation_window, but got old value: %#v", newWindow) + } + + // verify that rotation_schedule is only required when creating + updateData = map[string]interface{}{ + "name": "plugin-role-test-updates", + "db_name": "mockv5", + "username": dbUser, + "rotation_statements": testRoleStaticUpdateRotation, + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: storage, + Data: updateData, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } +} + func TestBackend_StaticRole_Role_name_check(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -599,7 +966,7 @@ func TestBackend_StaticRole_Role_name_check(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // create the database user @@ -828,6 +1195,93 @@ func TestWALsDeletedOnRoleDeletion(t *testing.T) { requireWALs(t, storage, 1) } +func TestIsInsideRotationWindow(t *testing.T) { + for _, tc := range []struct { + name string + expected bool + data map[string]interface{} + now time.Time + timeModifier func(t time.Time) time.Time + }{ + { + "always returns true for rotation_period type", + true, + map[string]interface{}{ + "rotation_period": "86400s", + }, + time.Now(), + nil, + }, + { + "always returns true for rotation_schedule when no rotation_window set", + true, + map[string]interface{}{ + "rotation_schedule": "0 0 */2 * * *", + }, + time.Now(), + nil, + }, + { + "returns true for rotation_schedule when inside rotation_window", + true, + map[string]interface{}{ + "rotation_schedule": "0 0 */2 * * *", + "rotation_window": "3600s", + }, + time.Now(), + func(t time.Time) time.Time { + // set current time just inside window + return t.Add(-3640 * time.Second) + }, + }, + { + "returns false for rotation_schedule when outside rotation_window", + false, + map[string]interface{}{ + "rotation_schedule": "0 0 */2 * * *", + "rotation_window": "3600s", + }, + time.Now(), + func(t time.Time) time.Time { + // set current time just outside window + return t.Add(-3560 * time.Second) + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + b, s, mockDB := getBackend(t) + defer b.Cleanup(ctx) + configureDBMount(t, s) + + testTime := tc.now + if tc.data["rotation_schedule"] != nil && tc.timeModifier != nil { + rotationSchedule := tc.data["rotation_schedule"].(string) + schedule, err := b.schedule.Parse(rotationSchedule) + if err != nil { + t.Fatalf("could not parse rotation_schedule: %s", err) + } + next1 := schedule.Next(tc.now) // the next rotation time we expect + next2 := schedule.Next(next1) // the next rotation time after that + testTime = tc.timeModifier(next2) + } + + tc.data["username"] = "hashicorp" + tc.data["db_name"] = "mockv5" + createRoleWithData(t, b, s, mockDB, "test-role", tc.data) + role, err := b.StaticRole(ctx, s, "test-role") + if err != nil { + t.Fatal(err) + } + + isInsideWindow := role.StaticAccount.IsInsideRotationWindow(testTime) + if tc.expected != isInsideWindow { + t.Fatalf("expected %t, got %t", tc.expected, isInsideWindow) + } + }) + } +} + func createRole(t *testing.T, b *databaseBackend, storage logical.Storage, mockDB *mockNewDatabase, roleName string) { t.Helper() mockDB.On("UpdateUser", mock.Anything, mock.Anything). @@ -848,6 +1302,22 @@ func createRole(t *testing.T, b *databaseBackend, storage logical.Storage, mockD } } +func createRoleWithData(t *testing.T, b *databaseBackend, s logical.Storage, mockDB *mockNewDatabase, roleName string, data map[string]interface{}) { + t.Helper() + mockDB.On("UpdateUser", mock.Anything, mock.Anything). + Return(v5.UpdateUserResponse{}, nil). + Once() + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/" + roleName, + Storage: s, + Data: data, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatal(resp, err) + } +} + const testRoleStaticCreate = ` CREATE ROLE "{{name}}" WITH LOGIN diff --git a/builtin/logical/database/path_rotate_credentials.go b/builtin/logical/database/path_rotate_credentials.go index f71db7e14b4d..f2f7fa321ea9 100644 --- a/builtin/logical/database/path_rotate_credentials.go +++ b/builtin/logical/database/path_rotate_credentials.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package database @@ -19,6 +19,13 @@ func pathRotateRootCredentials(b *databaseBackend) []*framework.Path { return []*framework.Path{ { Pattern: "rotate-root/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "rotate", + OperationSuffix: "root-credentials", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -39,6 +46,13 @@ func pathRotateRootCredentials(b *databaseBackend) []*framework.Path { }, { Pattern: "rotate-role/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "rotate", + OperationSuffix: "static-role-credentials", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -61,8 +75,17 @@ func pathRotateRootCredentials(b *databaseBackend) []*framework.Path { } func (b *databaseBackend) pathRotateRootCredentialsUpdate() framework.OperationFunc { - return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (resp *logical.Response, err error) { name := data.Get("name").(string) + modified := false + defer func() { + if err == nil { + b.dbEvent(ctx, "rotate-root", req.Path, name, modified) + } else { + b.dbEvent(ctx, "rotate-root-fail", req.Path, name, modified) + } + }() + if name == "" { return logical.ErrorResponse(respErrEmptyName), nil } @@ -145,6 +168,7 @@ func (b *databaseBackend) pathRotateRootCredentialsUpdate() framework.OperationF if newConfigDetails != nil { config.ConnectionDetails = newConfigDetails } + modified = true // 1.12.0 and 1.12.1 stored builtin plugins in storage, but 1.12.2 reverted // that, so clean up any pre-existing stored builtin versions on write. @@ -165,8 +189,16 @@ func (b *databaseBackend) pathRotateRootCredentialsUpdate() framework.OperationF } func (b *databaseBackend) pathRotateRoleCredentialsUpdate() framework.OperationFunc { - return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (_ *logical.Response, err error) { name := data.Get("name").(string) + modified := false + defer func() { + if err == nil { + b.dbEvent(ctx, "rotate", req.Path, name, modified) + } else { + b.dbEvent(ctx, "rotate-fail", req.Path, name, modified) + } + }() if name == "" { return logical.ErrorResponse("empty role name attribute given"), nil } @@ -210,9 +242,10 @@ func (b *databaseBackend) pathRotateRoleCredentialsUpdate() framework.OperationF item.Value = resp.WALID } } else { - item.Priority = resp.RotationTime.Add(role.StaticAccount.RotationPeriod).Unix() + item.Priority = role.StaticAccount.NextRotationTimeFromInput(resp.RotationTime).Unix() // Clear any stored WAL ID as we must have successfully deleted our WAL to get here. item.Value = "" + modified = true } // Add their rotation to the queue diff --git a/builtin/logical/database/rollback.go b/builtin/logical/database/rollback.go index 22ce6168663e..6e1b1dc48400 100644 --- a/builtin/logical/database/rollback.go +++ b/builtin/logical/database/rollback.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package database diff --git a/builtin/logical/database/rollback_test.go b/builtin/logical/database/rollback_test.go index 8f36fe26a795..47c768374296 100644 --- a/builtin/logical/database/rollback_test.go +++ b/builtin/logical/database/rollback_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package database @@ -27,7 +27,7 @@ const ( // - Password has been altered on the database // - Password has not been updated in storage func TestBackend_RotateRootCredentials_WAL_rollback(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -44,7 +44,7 @@ func TestBackend_RotateRootCredentials_WAL_rollback(t *testing.T) { } defer lb.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") @@ -170,7 +170,7 @@ func TestBackend_RotateRootCredentials_WAL_rollback(t *testing.T) { // - Password has not been altered on the database // - Password has not been updated in storage func TestBackend_RotateRootCredentials_WAL_no_rollback_1(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -183,7 +183,7 @@ func TestBackend_RotateRootCredentials_WAL_no_rollback_1(t *testing.T) { } defer lb.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") @@ -274,7 +274,7 @@ func TestBackend_RotateRootCredentials_WAL_no_rollback_1(t *testing.T) { // - Password has been altered on the database // - Password has been updated in storage func TestBackend_RotateRootCredentials_WAL_no_rollback_2(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -291,7 +291,7 @@ func TestBackend_RotateRootCredentials_WAL_no_rollback_2(t *testing.T) { } defer lb.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") diff --git a/builtin/logical/database/rotation.go b/builtin/logical/database/rotation.go index 1ef54aecac32..d4d41cf570b5 100644 --- a/builtin/logical/database/rotation.go +++ b/builtin/logical/database/rotation.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package database @@ -13,7 +13,6 @@ import ( "github.com/hashicorp/go-secure-stdlib/strutil" v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/locksutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/queue" @@ -92,9 +91,9 @@ func (b *databaseBackend) populateQueue(ctx context.Context, s logical.Storage) log.Warn("unable to delete WAL", "error", err, "WAL ID", walEntry.walID) } } else { - log.Info("found WAL for role", - "role", item.Key, - "WAL ID", walEntry.walID) + // previous rotation attempt was interrupted, so we set the + // Priority as highest to be processed immediately + log.Info("found WAL for role", "role", item.Key, "WAL ID", walEntry.walID) item.Value = walEntry.walID item.Priority = time.Now().Unix() } @@ -191,41 +190,77 @@ func (b *databaseBackend) rotateCredential(ctx context.Context, s logical.Storag return false } + roleName := item.Key + logger := b.Logger().With("role", roleName) + // Grab the exclusive lock for this Role, to make sure we don't incur and // writes during the rotation process - lock := locksutil.LockForKey(b.roleLocks, item.Key) + lock := locksutil.LockForKey(b.roleLocks, roleName) lock.Lock() defer lock.Unlock() // Validate the role still exists - role, err := b.StaticRole(ctx, s, item.Key) + role, err := b.StaticRole(ctx, s, roleName) if err != nil { - b.logger.Error("unable to load role", "role", item.Key, "error", err) + logger.Error("unable to load role", "error", err) + item.Priority = time.Now().Add(10 * time.Second).Unix() if err := b.pushItem(item); err != nil { - b.logger.Error("unable to push item on to queue", "error", err) + logger.Error("unable to push item on to queue", "error", err) } return true } if role == nil { - b.logger.Warn("role not found", "role", item.Key, "error", err) + logger.Warn("role not found", "error", err) return true } - // If "now" is less than the Item priority, then this item does not need to - // be rotated - if time.Now().Unix() < item.Priority { + logger = logger.With("database", role.DBName) + + input := &setStaticAccountInput{ + RoleName: roleName, + Role: role, + } + + now := time.Now() + if !role.StaticAccount.ShouldRotate(item.Priority, now) { + if !role.StaticAccount.IsInsideRotationWindow(now) { + // We are a schedule-based rotation and we are outside a rotation + // window so we update priority and NextVaultRotation + item.Priority = role.StaticAccount.NextRotationTimeFromInput(now).Unix() + role.StaticAccount.SetNextVaultRotation(now) + b.logger.Trace("outside schedule-based rotation window, update priority", "next", role.StaticAccount.NextRotationTime()) + + // write to storage after updating NextVaultRotation so the next + // time this item is checked for rotation our role that we retrieve + // from storage reflects that change + entry, err := logical.StorageEntryJSON(databaseStaticRolePath+input.RoleName, input.Role) + if err != nil { + logger.Error("unable to encode entry for storage", "error", err) + return false + } + if err := s.Put(ctx, entry); err != nil { + logger.Error("unable to write to storage", "error", err) + return false + } + } + // do not rotate now, push item back onto queue to be rotated later if err := b.pushItem(item); err != nil { - b.logger.Error("unable to push item on to queue", "error", err) + logger.Error("unable to push item on to queue", "error", err) } // Break out of the for loop return false } - input := &setStaticAccountInput{ - RoleName: item.Key, - Role: role, - } + // send an event indicating if the rotation was a success or failure + rotated := false + defer func() { + if rotated { + b.dbEvent(ctx, "rotate", "", roleName, true) + } else { + b.dbEvent(ctx, "rotate-fail", "", roleName, false) + } + }() // If there is a WAL entry related to this Role, the corresponding WAL ID // should be stored in the Item's Value field. @@ -235,7 +270,8 @@ func (b *databaseBackend) rotateCredential(ctx context.Context, s logical.Storag resp, err := b.setStaticAccount(ctx, s, input) if err != nil { - b.logger.Error("unable to rotate credentials in periodic function", "error", err) + logger.Error("unable to rotate credentials in periodic function", "error", err) + // Increment the priority enough so that the next call to this method // likely will not attempt to rotate it, as a back-off of sorts item.Priority = time.Now().Add(10 * time.Second).Unix() @@ -246,7 +282,7 @@ func (b *databaseBackend) rotateCredential(ctx context.Context, s logical.Storag } if err := b.pushItem(item); err != nil { - b.logger.Error("unable to push item on to queue", "error", err) + logger.Error("unable to push item on to queue", "error", err) } // Go to next item return true @@ -260,11 +296,12 @@ func (b *databaseBackend) rotateCredential(ctx context.Context, s logical.Storag } // Update priority and push updated Item to the queue - nextRotation := lvr.Add(role.StaticAccount.RotationPeriod) - item.Priority = nextRotation.Unix() + item.Priority = role.StaticAccount.NextRotationTimeFromInput(lvr).Unix() + if err := b.pushItem(item); err != nil { - b.logger.Warn("unable to push item on to queue", "error", err) + logger.Warn("unable to push item on to queue", "error", err) } + rotated = true return true } @@ -324,10 +361,19 @@ type setStaticAccountOutput struct { // // This method does not perform any operations on the priority queue. Those // tasks must be handled outside of this method. -func (b *databaseBackend) setStaticAccount(ctx context.Context, s logical.Storage, input *setStaticAccountInput) (*setStaticAccountOutput, error) { +func (b *databaseBackend) setStaticAccount(ctx context.Context, s logical.Storage, input *setStaticAccountInput) (_ *setStaticAccountOutput, err error) { if input == nil || input.Role == nil || input.RoleName == "" { return nil, errors.New("input was empty when attempting to set credentials for static account") } + modified := false + defer func() { + if err == nil { + b.dbEvent(ctx, "static-creds-create", "", input.RoleName, modified) + } else { + b.dbEvent(ctx, "static-creds-create-fail", "", input.RoleName, modified) + } + }() + // Re-use WAL ID if present, otherwise PUT a new WAL output := &setStaticAccountOutput{WALID: input.WALID} @@ -367,6 +413,11 @@ func (b *databaseBackend) setStaticAccount(ctx context.Context, s logical.Storag Commands: input.Role.Statements.Rotation, } + // Add external password to request so we can use static account connection + if input.Role.StaticAccount.SelfManagedPassword != "" { + updateReq.SelfManagedPassword = input.Role.StaticAccount.SelfManagedPassword + } + // Use credential from input if available. This happens if we're restoring from // a WAL item or processing the rotation queue with an item that has a WAL // associated with it @@ -481,11 +532,19 @@ func (b *databaseBackend) setStaticAccount(ctx context.Context, s logical.Storag b.CloseIfShutdown(dbi, err) return output, fmt.Errorf("error setting credentials: %w", err) } + modified = true + + // static user password successfully updated in external system + // update self-managed password if available for future connections + if input.Role.StaticAccount.SelfManagedPassword != "" { + input.Role.StaticAccount.SelfManagedPassword = input.Role.StaticAccount.Password + } // Store updated role information // lvr is the known LastVaultRotation lvr := time.Now() input.Role.StaticAccount.LastVaultRotation = lvr + input.Role.StaticAccount.SetNextVaultRotation(lvr) output.RotationTime = lvr entry, err := logical.StorageEntryJSON(databaseStaticRolePath+input.RoleName, input.Role) @@ -517,14 +576,12 @@ func (b *databaseBackend) setStaticAccount(ctx context.Context, s logical.Storag // not wait for success or failure of it's tasks before continuing. This is to // avoid blocking the mount process while loading and evaluating existing roles, // etc. -func (b *databaseBackend) initQueue(ctx context.Context, conf *logical.BackendConfig, replicationState consts.ReplicationState) { +func (b *databaseBackend) initQueue(ctx context.Context, conf *logical.BackendConfig) { // Verify this mount is on the primary server, or is a local mount. If not, do // not create a queue or launch a ticker. Both processing the WAL list and // populating the queue are done sequentially and before launching a // go-routine to run the periodic ticker. - if (conf.System.LocalMount() || !replicationState.HasState(consts.ReplicationPerformanceSecondary)) && - !replicationState.HasState(consts.ReplicationDRSecondary) && - !replicationState.HasState(consts.ReplicationPerformanceStandby) { + if b.WriteSafeReplicationState() { b.Logger().Info("initializing database rotation queue") // Poll for a PutWAL call that does not return a "read-only storage" error. @@ -561,10 +618,10 @@ func (b *databaseBackend) initQueue(ctx context.Context, conf *logical.BackendCo queueTickerInterval := defaultQueueTickSeconds * time.Second if strVal, ok := conf.Config[queueTickIntervalKey]; ok { newVal, err := strconv.Atoi(strVal) - if err == nil { + if err == nil && newVal > 0 { queueTickerInterval = time.Duration(newVal) * time.Second } else { - b.Logger().Error("bad value for %q option: %q", queueTickIntervalKey, strVal) + b.Logger().Error("bad value for %q option: %q, default value of %d being used instead", queueTickIntervalKey, strVal, defaultQueueTickSeconds) } } go b.runTicker(ctx, queueTickerInterval, conf.StorageView) diff --git a/builtin/logical/database/rotation_test.go b/builtin/logical/database/rotation_test.go index e0cb96dd67cf..99fc3ddf004b 100644 --- a/builtin/logical/database/rotation_test.go +++ b/builtin/logical/database/rotation_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package database @@ -15,30 +15,36 @@ import ( "time" "github.com/Sectorbob/mlab-ns2/gae/ns/digest" + "github.com/hashicorp/vault/builtin/logical/database/schedule" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/helper/testhelpers/mongodb" postgreshelper "github.com/hashicorp/vault/helper/testhelpers/postgresql" v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/dbtxn" "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/queue" _ "github.com/jackc/pgx/v4/stdlib" + "github.com/robfig/cron/v3" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" mongodbatlasapi "go.mongodb.org/atlas/mongodbatlas" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" ) const ( - dbUser = "vaultstatictest" - dbUserDefaultPassword = "password" + mockv5 = "mockv5" + dbUser = "vaultstatictest" + dbUserDefaultPassword = "password" + testMinRotationWindowSeconds = 5 + testScheduleParseOptions = cron.Second | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow ) -func TestBackend_StaticRole_Rotate_basic(t *testing.T) { - cluster, sys := getCluster(t) +func TestBackend_StaticRole_Rotation_basic(t *testing.T) { + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -55,7 +61,9 @@ func TestBackend_StaticRole_Rotate_basic(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + b.schedule = &TestSchedule{} + + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // create the database user @@ -83,110 +91,357 @@ func TestBackend_StaticRole_Rotate_basic(t *testing.T) { t.Fatalf("err:%s resp:%#v\n", err, resp) } - data = map[string]interface{}{ + testCases := map[string]struct { + account map[string]interface{} + path string + expected map[string]interface{} + waitTime time.Duration + }{ + "basic with rotation_period": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_period": "5400s", + }, + path: "plugin-role-test-1", + expected: map[string]interface{}{ + "username": dbUser, + "rotation_period": float64(5400), + }, + }, + "rotation_schedule is set and expires": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "*/10 * * * * *", + }, + path: "plugin-role-test-2", + expected: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "*/10 * * * * *", + }, + waitTime: 20 * time.Second, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + data = map[string]interface{}{ + "name": "plugin-role-test", + "db_name": "plugin-test", + "rotation_statements": testRoleStaticUpdate, + "username": dbUser, + } + + for k, v := range tc.account { + data[k] = v + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/" + tc.path, + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read the creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/" + tc.path, + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + username := resp.Data["username"].(string) + password := resp.Data["password"].(string) + if username == "" || password == "" { + t.Fatalf("empty username (%s) or password (%s)", username, password) + } + + // Verify username/password + verifyPgConn(t, dbUser, password, connURL) + + // Re-read the creds, verifying they aren't changing on read + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/" + tc.path, + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + if username != resp.Data["username"].(string) || password != resp.Data["password"].(string) { + t.Fatal("expected re-read username/password to match, but didn't") + } + + // Trigger rotation + data = map[string]interface{}{"name": "plugin-role-test"} + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "rotate-role/" + tc.path, + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + if resp != nil { + t.Fatalf("Expected empty response from rotate-role: (%#v)", resp) + } + + // Re-Read the creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/" + tc.path, + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + newPassword := resp.Data["password"].(string) + if password == newPassword { + t.Fatalf("expected passwords to differ, got (%s)", newPassword) + } + + // Verify new username/password + verifyPgConn(t, username, newPassword, connURL) + + if tc.waitTime > 0 { + time.Sleep(tc.waitTime) + // Re-Read the creds after schedule expiration + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/" + tc.path, + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + checkPassword := resp.Data["password"].(string) + if newPassword == checkPassword { + t.Fatalf("expected passwords to differ, got (%s)", checkPassword) + } + } + }) + } +} + +// TestBackend_StaticRole_Rotation_Schedule_ErrorRecover tests that failed +// rotations can successfully recover and that they do not occur outside of a +// rotation window. +func TestBackend_StaticRole_Rotation_Schedule_ErrorRecover(t *testing.T) { + cluster, sys := getClusterPostgresDB(t) + t.Cleanup(cluster.Cleanup) + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + eventSender := logical.NewMockEventSender() + config.EventsSender = eventSender + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer b.Cleanup(context.Background()) + + b.schedule = &TestSchedule{} + + cleanup, connURL := postgreshelper.PrepareTestContainer(t) + t.Cleanup(cleanup) + + // create the database user + createTestPGUser(t, connURL, dbUser, dbUserDefaultPassword, testRoleStaticCreate) + verifyPgConn(t, dbUser, dbUserDefaultPassword, connURL) + + // Configure a connection + connectionData := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "name": "plugin-test", + } + configureConnection(t, b, config.StorageView, connectionData) + + // create the role that will rotate every 10th second + // rotations will not be allowed after 5s + data := map[string]interface{}{ "name": "plugin-role-test", "db_name": "plugin-test", "rotation_statements": testRoleStaticUpdate, + "rotation_schedule": "*/10 * * * * *", + "rotation_window": "5s", "username": dbUser, - "rotation_period": "5400s", } - - req = &logical.Request{ + req := &logical.Request{ Operation: logical.CreateOperation, Path: "static-roles/plugin-role-test", Storage: config.StorageView, Data: data, } - - resp, err = b.HandleRequest(namespace.RootContext(nil), req) + resp, err := b.HandleRequest(namespace.RootContext(nil), req) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("err:%s resp:%#v\n", err, resp) } // Read the creds - data = map[string]interface{}{} req = &logical.Request{ Operation: logical.ReadOperation, Path: "static-creds/plugin-role-test", Storage: config.StorageView, - Data: data, } - resp, err = b.HandleRequest(namespace.RootContext(nil), req) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("err:%s resp:%#v\n", err, resp) } username := resp.Data["username"].(string) - password := resp.Data["password"].(string) - if username == "" || password == "" { - t.Fatalf("empty username (%s) or password (%s)", username, password) + originalPassword := resp.Data["password"].(string) + if username == "" || originalPassword == "" { + t.Fatalf("empty username (%s) or password (%s)", username, originalPassword) } // Verify username/password - verifyPgConn(t, dbUser, password, connURL) + verifyPgConn(t, dbUser, originalPassword, connURL) - // Re-read the creds, verifying they aren't changing on read - data = map[string]interface{}{} + // Set invalid connection URL so we fail to rotate + connectionData["connection_url"] = strings.Replace(connURL, "postgres:secret", "postgres:foo", 1) + configureConnection(t, b, config.StorageView, connectionData) + + // determine next rotation schedules based on current test time + rotationSchedule := data["rotation_schedule"].(string) + schedule, err := b.schedule.Parse(rotationSchedule) + if err != nil { + t.Fatalf("could not parse rotation_schedule: %s", err) + } + next := schedule.Next(time.Now()) // the next rotation time we expect + time.Sleep(next.Sub(time.Now())) + + // Re-Read the creds after schedule expiration req = &logical.Request{ Operation: logical.ReadOperation, Path: "static-creds/plugin-role-test", Storage: config.StorageView, - Data: data, } resp, err = b.HandleRequest(namespace.RootContext(nil), req) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("err:%s resp:%#v\n", err, resp) } - if username != resp.Data["username"].(string) || password != resp.Data["password"].(string) { - t.Fatal("expected re-read username/password to match, but didn't") + checkPassword := resp.Data["password"].(string) + if originalPassword != checkPassword { + // should match because rotations should be failing + t.Fatalf("expected passwords to match, got (%s)", checkPassword) } - // Trigger rotation - data = map[string]interface{}{"name": "plugin-role-test"} + // wait until we are outside the rotation window so that rotations will not occur + next = schedule.Next(time.Now()) // the next rotation time after now + time.Sleep(next.Add(time.Second * 6).Sub(time.Now())) + + // reset to valid connection URL so we do not fail to rotate anymore + connectionData["connection_url"] = connURL + configureConnection(t, b, config.StorageView, connectionData) + + // we are outside a rotation window, Re-Read the creds req = &logical.Request{ - Operation: logical.UpdateOperation, - Path: "rotate-role/plugin-role-test", + Operation: logical.ReadOperation, + Path: "static-creds/plugin-role-test", Storage: config.StorageView, - Data: data, } resp, err = b.HandleRequest(namespace.RootContext(nil), req) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("err:%s resp:%#v\n", err, resp) } - if resp != nil { - t.Fatalf("Expected empty response from rotate-role: (%#v)", resp) + checkPassword = resp.Data["password"].(string) + if originalPassword != checkPassword { + // should match because rotations should not occur outside the rotation window + t.Fatalf("expected passwords to match, got (%s)", checkPassword) } + // Verify new username/password + verifyPgConn(t, username, checkPassword, connURL) + + // sleep until the next rotation time with a buffer to ensure we had time to rotate + next = schedule.Next(time.Now()) // the next rotation time we expect + time.Sleep(next.Add(time.Second * 5).Sub(time.Now())) // Re-Read the creds - data = map[string]interface{}{} req = &logical.Request{ Operation: logical.ReadOperation, Path: "static-creds/plugin-role-test", Storage: config.StorageView, - Data: data, } resp, err = b.HandleRequest(namespace.RootContext(nil), req) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("err:%s resp:%#v\n", err, resp) } - newPassword := resp.Data["password"].(string) - if password == newPassword { - t.Fatalf("expected passwords to differ, got (%s)", newPassword) + checkPassword = resp.Data["password"].(string) + if originalPassword == checkPassword { + // should differ because we slept until the next rotation time + t.Fatalf("expected passwords to differ, got (%s)", checkPassword) } // Verify new username/password - verifyPgConn(t, username, newPassword, connURL) + verifyPgConn(t, username, checkPassword, connURL) + + eventSender.Stop() // avoid race detector + // check that we got a successful rotation event + if len(eventSender.Events) == 0 { + t.Fatal("Expected to have some events but got none") + } + // check that we got a rotate-fail event + found := false + for _, event := range eventSender.Events { + if string(event.Type) == "database/rotate-fail" { + found = true + break + } + } + assert.True(t, found) + found = false + for _, event := range eventSender.Events { + if string(event.Type) == "database/rotate" { + found = true + break + } + } + assert.True(t, found) } // Sanity check to make sure we don't allow an attempt of rotating credentials // for non-static accounts, which doesn't make sense anyway, but doesn't hurt to // verify we return an error -func TestBackend_StaticRole_Rotate_NonStaticError(t *testing.T) { - cluster, sys := getCluster(t) +func TestBackend_StaticRole_Rotation_NonStaticError(t *testing.T) { + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -203,7 +458,7 @@ func TestBackend_StaticRole_Rotate_NonStaticError(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // create the database user @@ -289,8 +544,8 @@ func TestBackend_StaticRole_Rotate_NonStaticError(t *testing.T) { } } -func TestBackend_StaticRole_Revoke_user(t *testing.T) { - cluster, sys := getCluster(t) +func TestBackend_StaticRole_Rotation_Revoke_user(t *testing.T) { + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -307,7 +562,7 @@ func TestBackend_StaticRole_Revoke_user(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // create the database user @@ -467,8 +722,8 @@ func verifyPgConn(t *testing.T, username, password, connURL string) { // WAL testing // // First scenario, WAL contains a role name that does not exist. -func TestBackend_Static_QueueWAL_discard_role_not_found(t *testing.T) { - cluster, sys := getCluster(t) +func TestBackend_StaticRole_Rotation_QueueWAL_discard_role_not_found(t *testing.T) { + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() ctx := context.Background() @@ -508,8 +763,8 @@ func TestBackend_Static_QueueWAL_discard_role_not_found(t *testing.T) { // Second scenario, WAL contains a role name that does exist, but the role's // LastVaultRotation is greater than the WAL has -func TestBackend_Static_QueueWAL_discard_role_newer_rotation_date(t *testing.T) { - cluster, sys := getCluster(t) +func TestBackend_StaticRole_Rotation_QueueWAL_discard_role_newer_rotation_date(t *testing.T) { + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() ctx := context.Background() @@ -528,7 +783,7 @@ func TestBackend_Static_QueueWAL_discard_role_newer_rotation_date(t *testing.T) t.Fatal("could not convert to db backend") } - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // create the database user @@ -696,8 +951,8 @@ func assertWALCount(t *testing.T, s logical.Storage, expected int, key string) { type userCreator func(t *testing.T, username, password string) -func TestBackend_StaticRole_Rotations_PostgreSQL(t *testing.T) { - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster") +func TestBackend_StaticRole_Rotation_PostgreSQL(t *testing.T) { + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() uc := userCreator(func(t *testing.T, username, password string) { createTestPGUser(t, connURL, username, password, testRoleStaticCreate) @@ -708,7 +963,7 @@ func TestBackend_StaticRole_Rotations_PostgreSQL(t *testing.T) { }) } -func TestBackend_StaticRole_Rotations_MongoDB(t *testing.T) { +func TestBackend_StaticRole_Rotation_MongoDB(t *testing.T) { cleanup, connURL := mongodb.PrepareTestContainerWithDatabase(t, "5.0.10", "vaulttestdb") defer cleanup() @@ -721,7 +976,7 @@ func TestBackend_StaticRole_Rotations_MongoDB(t *testing.T) { }) } -func TestBackend_StaticRole_Rotations_MongoDBAtlas(t *testing.T) { +func TestBackend_StaticRole_Rotation_MongoDBAtlas(t *testing.T) { // To get the project ID, connect to cloud.mongodb.com, go to the vault-test project and // look at Project Settings. projID := os.Getenv("VAULT_MONGODBATLAS_PROJECT_ID") @@ -770,6 +1025,34 @@ func TestBackend_StaticRole_Rotations_MongoDBAtlas(t *testing.T) { }) } +// TestQueueTickIntervalKeyConfig tests the configuration of queueTickIntervalKey +// does not break on invalid values. +func TestQueueTickIntervalKeyConfig(t *testing.T) { + t.Parallel() + cluster, sys := getClusterPostgresDB(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + config.Config[queueTickIntervalKey] = "1" + + // Rotation ticker starts running in Factory call + b, err := Factory(context.Background(), config) + require.Nil(t, err) + b.Cleanup(context.Background()) + + config.Config[queueTickIntervalKey] = "0" + b, err = Factory(context.Background(), config) + require.Nil(t, err) + b.Cleanup(context.Background()) + + config.Config[queueTickIntervalKey] = "-1" + b, err = Factory(context.Background(), config) + require.Nil(t, err) + b.Cleanup(context.Background()) +} + func testBackend_StaticRole_Rotations(t *testing.T, createUser userCreator, opts map[string]interface{}) { // We need to set this value for the plugin to run, but it doesn't matter what we set it to. oldToken := os.Getenv(pluginutil.PluginUnwrapTokenEnv) @@ -782,7 +1065,7 @@ func testBackend_StaticRole_Rotations(t *testing.T, createUser userCreator, opts } }() - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -945,8 +1228,8 @@ type createUserCommand struct { } // Demonstrates a bug fix for the credential rotation not releasing locks -func TestBackend_StaticRole_LockRegression(t *testing.T) { - cluster, sys := getCluster(t) +func TestBackend_StaticRole_Rotation_LockRegression(t *testing.T) { + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -963,7 +1246,7 @@ func TestBackend_StaticRole_LockRegression(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // Configure a connection @@ -1024,8 +1307,8 @@ func TestBackend_StaticRole_LockRegression(t *testing.T) { } } -func TestBackend_StaticRole_Rotate_Invalid_Role(t *testing.T) { - cluster, sys := getCluster(t) +func TestBackend_StaticRole_Rotation_Invalid_Role(t *testing.T) { + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -1042,7 +1325,7 @@ func TestBackend_StaticRole_Rotate_Invalid_Role(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // create the database user @@ -1161,10 +1444,18 @@ func TestRollsPasswordForwardsUsingWAL(t *testing.T) { func TestStoredWALsCorrectlyProcessed(t *testing.T) { const walNewPassword = "new-password-from-wal" + + rotationPeriodData := map[string]interface{}{ + "username": "hashicorp", + "db_name": mockv5, + "rotation_period": "86400s", + } + for _, tc := range []struct { name string shouldRotate bool wal *setCredentialsWAL + data map[string]interface{} }{ { "WAL is kept and used for roll forward", @@ -1175,6 +1466,7 @@ func TestStoredWALsCorrectlyProcessed(t *testing.T) { NewPassword: walNewPassword, LastVaultRotation: time.Now().Add(time.Hour), }, + rotationPeriodData, }, { "zero-time WAL is discarded on load", @@ -1185,9 +1477,10 @@ func TestStoredWALsCorrectlyProcessed(t *testing.T) { NewPassword: walNewPassword, LastVaultRotation: time.Time{}, }, + rotationPeriodData, }, { - "empty-password WAL is kept but a new password is generated", + "rotation_period empty-password WAL is kept but a new password is generated", true, &setCredentialsWAL{ RoleName: "hashicorp", @@ -1195,6 +1488,22 @@ func TestStoredWALsCorrectlyProcessed(t *testing.T) { NewPassword: "", LastVaultRotation: time.Now().Add(time.Hour), }, + rotationPeriodData, + }, + { + "rotation_schedule empty-password WAL is kept but a new password is generated", + true, + &setCredentialsWAL{ + RoleName: "hashicorp", + Username: "hashicorp", + NewPassword: "", + LastVaultRotation: time.Now().Add(time.Hour), + }, + map[string]interface{}{ + "username": "hashicorp", + "db_name": mockv5, + "rotation_schedule": "*/10 * * * * *", + }, }, } { t.Run(tc.name, func(t *testing.T) { @@ -1209,8 +1518,9 @@ func TestStoredWALsCorrectlyProcessed(t *testing.T) { t.Fatal(err) } b.credRotationQueue = queue.New() + b.schedule = &TestSchedule{} configureDBMount(t, config.StorageView) - createRole(t, b, config.StorageView, mockDB, "hashicorp") + createRoleWithData(t, b, config.StorageView, mockDB, tc.wal.RoleName, tc.data) role, err := b.StaticRole(ctx, config.StorageView, "hashicorp") if err != nil { t.Fatal(err) @@ -1224,7 +1534,7 @@ func TestStoredWALsCorrectlyProcessed(t *testing.T) { b.credRotationQueue = queue.New() // Now finish the startup process by populating the queue, which should discard the WAL - b.initQueue(ctx, config, consts.ReplicationUnknown) + b.initQueue(ctx, config) if tc.shouldRotate { requireWALs(t, storage, 1) @@ -1248,6 +1558,7 @@ func TestStoredWALsCorrectlyProcessed(t *testing.T) { t.Fatal(err) } + nextRotationTime := role.StaticAccount.NextRotationTime() if tc.shouldRotate { if tc.wal.NewPassword != "" { // Should use WAL's new_password field @@ -1263,11 +1574,11 @@ func TestStoredWALsCorrectlyProcessed(t *testing.T) { t.Fatal() } } + // Ensure the role was not promoted for early rotation + assertPriorityUnchanged(t, item.Priority, nextRotationTime) } else { // Ensure the role was not promoted for early rotation - if item.Priority < time.Now().Add(time.Hour).Unix() { - t.Fatal("priority should be for about a week away, but was", item.Priority) - } + assertPriorityUnchanged(t, item.Priority, nextRotationTime) if role.StaticAccount.Password != initialPassword { t.Fatal("password should not have been rotated yet") } @@ -1368,6 +1679,7 @@ func getBackend(t *testing.T) (*databaseBackend, logical.Storage, *mockNewDataba if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } + b.schedule = &TestSchedule{} b.credRotationQueue = queue.New() b.populateQueue(context.Background(), config.StorageView) @@ -1388,9 +1700,9 @@ func setupMockDB(b *databaseBackend) *mockNewDatabase { dbi := &dbPluginInstance{ database: dbw, id: "foo-id", - name: "mockV5", + name: mockv5, } - b.connections["mockv5"] = dbi + b.connections.Put(mockv5, dbi) return mockDB } @@ -1399,7 +1711,7 @@ func setupMockDB(b *databaseBackend) *mockNewDatabase { // plugin init code paths, allowing us to use a manually populated mock DB object. func configureDBMount(t *testing.T, storage logical.Storage) { t.Helper() - entry, err := logical.StorageEntryJSON(fmt.Sprintf("config/mockv5"), &DatabaseConfig{ + entry, err := logical.StorageEntryJSON(fmt.Sprintf("config/"+mockv5), &DatabaseConfig{ AllowedRoles: []string{"*"}, }) if err != nil { @@ -1444,7 +1756,53 @@ func capturePasswords(t *testing.T, b logical.Backend, config *logical.BackendCo return pws } +func configureConnection(t *testing.T, b *databaseBackend, s logical.Storage, data map[string]interface{}) { + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/" + data["name"].(string), + Storage: s, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } +} + func newBoolPtr(b bool) *bool { v := b return &v } + +// assertPriorityUnchanged is a helper to verify that the priority is the +// expected value for a given rotation time +func assertPriorityUnchanged(t *testing.T, priority int64, nextRotationTime time.Time) { + t.Helper() + if priority != nextRotationTime.Unix() { + t.Fatalf("expected next rotation at %s, but got %s", nextRotationTime, time.Unix(priority, 0).String()) + } +} + +var _ schedule.Scheduler = &TestSchedule{} + +type TestSchedule struct{} + +func (d *TestSchedule) Parse(rotationSchedule string) (*cron.SpecSchedule, error) { + parser := cron.NewParser(testScheduleParseOptions) + schedule, err := parser.Parse(rotationSchedule) + if err != nil { + return nil, err + } + sched, ok := schedule.(*cron.SpecSchedule) + if !ok { + return nil, fmt.Errorf("invalid rotation schedule") + } + return sched, nil +} + +func (d *TestSchedule) ValidateRotationWindow(s int) error { + if s < testMinRotationWindowSeconds { + return fmt.Errorf("rotation_window must be %d seconds or more", testMinRotationWindowSeconds) + } + return nil +} diff --git a/builtin/logical/database/schedule/schedule.go b/builtin/logical/database/schedule/schedule.go new file mode 100644 index 000000000000..8f30717ec131 --- /dev/null +++ b/builtin/logical/database/schedule/schedule.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package schedule + +import ( + "fmt" + + "github.com/robfig/cron/v3" +) + +const ( + // Minimum allowed value for rotation_window + minRotationWindowSeconds = 3600 + parseOptions = cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow +) + +type Scheduler interface { + Parse(string) (*cron.SpecSchedule, error) + ValidateRotationWindow(int) error +} + +var _ Scheduler = &DefaultSchedule{} + +type DefaultSchedule struct{} + +func (d *DefaultSchedule) Parse(rotationSchedule string) (*cron.SpecSchedule, error) { + parser := cron.NewParser(parseOptions) + schedule, err := parser.Parse(rotationSchedule) + if err != nil { + return nil, err + } + sched, ok := schedule.(*cron.SpecSchedule) + if !ok { + return nil, fmt.Errorf("invalid rotation schedule") + } + return sched, nil +} + +func (d *DefaultSchedule) ValidateRotationWindow(s int) error { + if s < minRotationWindowSeconds { + return fmt.Errorf("rotation_window must be %d seconds or more", minRotationWindowSeconds) + } + return nil +} diff --git a/builtin/logical/database/secret_creds.go b/builtin/logical/database/secret_creds.go index fefa452a5d35..b485f7ca46bb 100644 --- a/builtin/logical/database/secret_creds.go +++ b/builtin/logical/database/secret_creds.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package database @@ -34,6 +34,9 @@ func (b *databaseBackend) secretCredsRenew() framework.OperationFunc { return nil, fmt.Errorf("secret is missing username internal data") } username, ok := usernameRaw.(string) + if !ok { + return nil, fmt.Errorf("username not a string") + } roleNameRaw, ok := req.Secret.InternalData["role"] if !ok { @@ -98,6 +101,9 @@ func (b *databaseBackend) secretCredsRevoke() framework.OperationFunc { return nil, fmt.Errorf("secret is missing username internal data") } username, ok := usernameRaw.(string) + if !ok { + return nil, fmt.Errorf("username not a string") + } var resp *logical.Response diff --git a/builtin/logical/database/version_wrapper.go b/builtin/logical/database/version_wrapper.go index daab17964d3e..f5280307c953 100644 --- a/builtin/logical/database/version_wrapper.go +++ b/builtin/logical/database/version_wrapper.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package database diff --git a/builtin/logical/database/version_wrapper_test.go b/builtin/logical/database/version_wrapper_test.go index 95a5f7b6fa95..47840385966e 100644 --- a/builtin/logical/database/version_wrapper_test.go +++ b/builtin/logical/database/version_wrapper_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package database diff --git a/builtin/logical/database/versioning_large_test.go b/builtin/logical/database/versioning_large_test.go index b39ddb7e1ce4..be936c760336 100644 --- a/builtin/logical/database/versioning_large_test.go +++ b/builtin/logical/database/versioning_large_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package database @@ -25,9 +25,10 @@ func TestPlugin_lifecycle(t *testing.T) { cluster, sys := getCluster(t) defer cluster.Cleanup() - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v4-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV4", []string{}, "") - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV5", []string{}, "") - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v6-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV6Multiplexed", []string{}, "") + env := []string{fmt.Sprintf("%s=%s", pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)} + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v4-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV4", env) + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV5", env) + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v6-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV6Multiplexed", env) config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} @@ -226,7 +227,7 @@ func TestPlugin_VersionSelection(t *testing.T) { defer cluster.Cleanup() for _, version := range []string{"v11.0.0", "v11.0.1-rc1", "v2.0.0"} { - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, version, "TestBackend_PluginMain_MockV5", []string{}, "") + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, version, "TestBackend_PluginMain_MockV5", []string{}) } config := logical.TestBackendConfig() @@ -312,11 +313,11 @@ func TestPlugin_VersionSelection(t *testing.T) { } // Register a newer version of the plugin, and ensure that's the new default version selected. - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, "v11.0.1", "TestBackend_PluginMain_MockV5", []string{}, "") + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, "v11.0.1", "TestBackend_PluginMain_MockV5", []string{}) t.Run("no version specified, new latest version selected", test(t, "", "v11.0.1")) // Register an unversioned plugin and ensure that is now selected when no version is specified. - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV5", []string{}, "") + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV5", []string{}) for name, tc := range map[string]struct { selectVersion string expectedVersion string @@ -397,7 +398,7 @@ func TestPlugin_VersionMustBeExplicitlyUpgraded(t *testing.T) { } // Register versioned plugin, and check that a new write to existing config doesn't upgrade the plugin implicitly. - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mysql-database-plugin", consts.PluginTypeDatabase, "v1.0.0", "TestBackend_PluginMain_MockV5", []string{}, "") + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mysql-database-plugin", consts.PluginTypeDatabase, "v1.0.0", "TestBackend_PluginMain_MockV5", []string{}) resp, err = b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, Path: "config/db", diff --git a/builtin/logical/nomad/backend.go b/builtin/logical/nomad/backend.go index ec89dd449cc8..4c7e149185e8 100644 --- a/builtin/logical/nomad/backend.go +++ b/builtin/logical/nomad/backend.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package nomad @@ -11,6 +11,8 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) +const operationPrefixNomad = "nomad" + // Factory returns a Nomad backend that satisfies the logical.Backend interface func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go index 535eeb2a05eb..e4e3fcded07a 100644 --- a/builtin/logical/nomad/backend_test.go +++ b/builtin/logical/nomad/backend_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package nomad @@ -8,13 +8,14 @@ import ( "fmt" "os" "reflect" + "runtime" "strings" "testing" "time" nomadapi "github.com/hashicorp/nomad/api" "github.com/hashicorp/vault/helper/testhelpers" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" "github.com/hashicorp/vault/sdk/logical" "github.com/mitchellh/mapstructure" ) @@ -38,6 +39,11 @@ func (c *Config) Client() (*nomadapi.Client, error) { } func prepareTestContainer(t *testing.T, bootstrap bool) (func(), *Config) { + // Skipping on ARM, as this image can't run on ARM architecture + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as this image is not supported on ARM architectures") + } + if retAddress := os.Getenv("NOMAD_ADDR"); retAddress != "" { s, err := docker.NewServiceURLParse(retAddress) if err != nil { @@ -47,7 +53,7 @@ func prepareTestContainer(t *testing.T, bootstrap bool) (func(), *Config) { } runner, err := docker.NewServiceRunner(docker.RunOptions{ - ImageRepo: "multani/nomad", + ImageRepo: "docker.mirror.hashicorp.services/multani/nomad", ImageTag: "1.1.6", ContainerName: "nomad", Ports: []string{"4646/tcp"}, diff --git a/builtin/logical/nomad/cmd/nomad/main.go b/builtin/logical/nomad/cmd/nomad/main.go index 10f45aabb483..493e1be2d5da 100644 --- a/builtin/logical/nomad/cmd/nomad/main.go +++ b/builtin/logical/nomad/cmd/nomad/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/builtin/logical/nomad/path_config_access.go b/builtin/logical/nomad/path_config_access.go index 7fb32f50039c..cde6f97edb8a 100644 --- a/builtin/logical/nomad/path_config_access.go +++ b/builtin/logical/nomad/path_config_access.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package nomad @@ -16,6 +16,11 @@ const configAccessKey = "config/access" func pathConfigAccess(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/access", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixNomad, + }, + Fields: map[string]*framework.FieldSchema{ "address": { Type: framework.TypeString, @@ -48,11 +53,35 @@ must be x509 PEM encoded and if this is set you need to also set client_cert.`, }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathConfigAccessRead, - logical.CreateOperation: b.pathConfigAccessWrite, - logical.UpdateOperation: b.pathConfigAccessWrite, - logical.DeleteOperation: b.pathConfigAccessDelete, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigAccessRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "read", + OperationSuffix: "access-configuration", + }, + }, + logical.CreateOperation: &framework.PathOperation{ + Callback: b.pathConfigAccessWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "access", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigAccessWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "access", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathConfigAccessDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "delete", + OperationSuffix: "access-configuration", + }, + }, }, ExistenceCheck: b.configExistenceCheck, diff --git a/builtin/logical/nomad/path_config_lease.go b/builtin/logical/nomad/path_config_lease.go index 25df513e82c0..2569a07ade01 100644 --- a/builtin/logical/nomad/path_config_lease.go +++ b/builtin/logical/nomad/path_config_lease.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package nomad @@ -16,6 +16,11 @@ const leaseConfigKey = "config/lease" func pathConfigLease(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/lease", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixNomad, + }, + Fields: map[string]*framework.FieldSchema{ "ttl": { Type: framework.TypeDurationSecond, @@ -27,10 +32,28 @@ func pathConfigLease(b *backend) *framework.Path { }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathLeaseRead, - logical.UpdateOperation: b.pathLeaseUpdate, - logical.DeleteOperation: b.pathLeaseDelete, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathLeaseRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "read", + OperationSuffix: "lease-configuration", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathLeaseUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "lease", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathLeaseDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "delete", + OperationSuffix: "lease-configuration", + }, + }, }, HelpSynopsis: pathConfigLeaseHelpSyn, diff --git a/builtin/logical/nomad/path_creds_create.go b/builtin/logical/nomad/path_creds_create.go index 150e5d617d06..9c25bed07dd7 100644 --- a/builtin/logical/nomad/path_creds_create.go +++ b/builtin/logical/nomad/path_creds_create.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package nomad @@ -20,6 +20,13 @@ const maxTokenNameLength = 256 func pathCredsCreate(b *backend) *framework.Path { return &framework.Path{ Pattern: "creds/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixNomad, + OperationVerb: "generate", + OperationSuffix: "credentials", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index 0fd527547e21..4732dec5b8a4 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package nomad @@ -16,6 +16,11 @@ func pathListRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "role/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixNomad, + OperationSuffix: "roles", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, }, @@ -25,6 +30,12 @@ func pathListRoles(b *backend) *framework.Path { func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "role/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixNomad, + OperationSuffix: "role", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go index 3c6b920681d3..2eaf19be2795 100644 --- a/builtin/logical/nomad/secret_token.go +++ b/builtin/logical/nomad/secret_token.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package nomad diff --git a/builtin/logical/pki/acme_authorizations.go b/builtin/logical/pki/acme_authorizations.go new file mode 100644 index 000000000000..64548ffed99e --- /dev/null +++ b/builtin/logical/pki/acme_authorizations.go @@ -0,0 +1,187 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "fmt" + "time" +) + +type ACMEIdentifierType string + +const ( + ACMEDNSIdentifier ACMEIdentifierType = "dns" + ACMEIPIdentifier ACMEIdentifierType = "ip" +) + +type ACMEIdentifier struct { + Type ACMEIdentifierType `json:"type"` + Value string `json:"value"` + OriginalValue string `json:"original_value"` + IsWildcard bool `json:"is_wildcard"` +} + +func (ai *ACMEIdentifier) MaybeParseWildcard() (bool, string, error) { + if ai.Type != ACMEDNSIdentifier || !isWildcardDomain(ai.Value) { + return false, ai.Value, nil + } + + // Here on out, technically it is a wildcard. + ai.IsWildcard = true + + wildcardLabel, reducedName, err := validateWildcardDomain(ai.Value) + if err != nil { + return true, "", err + } + + if wildcardLabel != "*" { + // Per RFC 8555 Section. 7.1.3. Order Objects: + // + // > Any identifier of type "dns" in a newOrder request MAY have a + // > wildcard domain name as its value. A wildcard domain name consists + // > of a single asterisk character followed by a single full stop + // > character ("*.") followed by a domain name as defined for use in the + // > Subject Alternate Name Extension by [RFC5280]. + return true, "", fmt.Errorf("wildcard must be entire left-most label") + } + + if reducedName == "" { + return true, "", fmt.Errorf("wildcard must not be entire domain name; need at least two domain labels") + } + + // Parsing was indeed successful, so update our reduced name. + ai.Value = reducedName + + return true, reducedName, nil +} + +func (ai *ACMEIdentifier) NetworkMarshal(useOriginalValue bool) map[string]interface{} { + value := ai.OriginalValue + if !useOriginalValue { + value = ai.Value + } + return map[string]interface{}{ + "type": ai.Type, + "value": value, + } +} + +type ACMEAuthorizationStatusType string + +const ( + ACMEAuthorizationPending ACMEAuthorizationStatusType = "pending" + ACMEAuthorizationValid ACMEAuthorizationStatusType = "valid" + ACMEAuthorizationInvalid ACMEAuthorizationStatusType = "invalid" + ACMEAuthorizationDeactivated ACMEAuthorizationStatusType = "deactivated" + ACMEAuthorizationExpired ACMEAuthorizationStatusType = "expired" + ACMEAuthorizationRevoked ACMEAuthorizationStatusType = "revoked" +) + +type ACMEOrderStatusType string + +const ( + ACMEOrderPending ACMEOrderStatusType = "pending" + ACMEOrderProcessing ACMEOrderStatusType = "processing" + ACMEOrderValid ACMEOrderStatusType = "valid" + ACMEOrderInvalid ACMEOrderStatusType = "invalid" + ACMEOrderReady ACMEOrderStatusType = "ready" +) + +type ACMEChallengeType string + +const ( + ACMEHTTPChallenge ACMEChallengeType = "http-01" + ACMEDNSChallenge ACMEChallengeType = "dns-01" + ACMEALPNChallenge ACMEChallengeType = "tls-alpn-01" +) + +type ACMEChallengeStatusType string + +const ( + ACMEChallengePending ACMEChallengeStatusType = "pending" + ACMEChallengeProcessing ACMEChallengeStatusType = "processing" + ACMEChallengeValid ACMEChallengeStatusType = "valid" + ACMEChallengeInvalid ACMEChallengeStatusType = "invalid" +) + +type ACMEChallenge struct { + Type ACMEChallengeType `json:"type"` + Status ACMEChallengeStatusType `json:"status"` + Validated string `json:"validated,optional"` + Error map[string]interface{} `json:"error,optional"` + ChallengeFields map[string]interface{} `json:"challenge_fields"` +} + +func (ac *ACMEChallenge) NetworkMarshal(acmeCtx *acmeContext, authId string) map[string]interface{} { + resp := map[string]interface{}{ + "type": ac.Type, + "url": buildChallengeUrl(acmeCtx, authId, string(ac.Type)), + "status": ac.Status, + } + + if ac.Validated != "" { + resp["validated"] = ac.Validated + } + + if len(ac.Error) > 0 { + resp["error"] = ac.Error + } + + for field, value := range ac.ChallengeFields { + resp[field] = value + } + + return resp +} + +func buildChallengeUrl(acmeCtx *acmeContext, authId, challengeType string) string { + return acmeCtx.baseUrl.JoinPath("/challenge/", authId, challengeType).String() +} + +type ACMEAuthorization struct { + Id string `json:"id"` + AccountId string `json:"account_id"` + + Identifier *ACMEIdentifier `json:"identifier"` + Status ACMEAuthorizationStatusType `json:"status"` + + // Per RFC 8555 Section 7.1.4. Authorization Objects: + // + // > This field is REQUIRED for objects with "valid" in the "status" + // > field. + Expires string `json:"expires,optional"` + + Challenges []*ACMEChallenge `json:"challenges"` + Wildcard bool `json:"wildcard"` +} + +func (aa *ACMEAuthorization) GetExpires() (time.Time, error) { + if aa.Expires == "" { + return time.Time{}, nil + } + + return time.Parse(time.RFC3339, aa.Expires) +} + +func (aa *ACMEAuthorization) NetworkMarshal(acmeCtx *acmeContext) map[string]interface{} { + resp := map[string]interface{}{ + "identifier": aa.Identifier.NetworkMarshal( /* use value, not original value */ false), + "status": aa.Status, + "wildcard": aa.Wildcard, + } + + if aa.Expires != "" { + resp["expires"] = aa.Expires + } + + if len(aa.Challenges) > 0 { + challenges := []map[string]interface{}{} + for _, challenge := range aa.Challenges { + challenges = append(challenges, challenge.NetworkMarshal(acmeCtx, aa.Id)) + } + resp["challenges"] = challenges + } + + return resp +} diff --git a/builtin/logical/pki/acme_billing.go b/builtin/logical/pki/acme_billing.go new file mode 100644 index 000000000000..6c66ad447cf0 --- /dev/null +++ b/builtin/logical/pki/acme_billing.go @@ -0,0 +1,25 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) doTrackBilling(ctx context.Context, identifiers []*ACMEIdentifier) error { + billingView, ok := b.System().(logical.ACMEBillingSystemView) + if !ok { + return fmt.Errorf("failed to perform cast to ACME billing system view interface") + } + + var realized []string + for _, identifier := range identifiers { + realized = append(realized, fmt.Sprintf("%s/%s", identifier.Type, identifier.OriginalValue)) + } + + return billingView.CreateActivityCountEventForIdentifiers(ctx, realized) +} diff --git a/builtin/logical/pki/acme_billing_test.go b/builtin/logical/pki/acme_billing_test.go new file mode 100644 index 000000000000..b1948d7be29c --- /dev/null +++ b/builtin/logical/pki/acme_billing_test.go @@ -0,0 +1,320 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/json" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/pki/dnstest" + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/helper/timeutil" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/activity" + "github.com/stretchr/testify/require" + "golang.org/x/crypto/acme" +) + +// TestACMEBilling is a basic test that will validate client counts created via ACME workflows. +func TestACMEBilling(t *testing.T) { + t.Parallel() + timeutil.SkipAtEndOfMonth(t) + + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + dns := dnstest.SetupResolver(t, "dadgarcorp.com") + defer dns.Cleanup() + + // Enable additional mounts. + setupAcmeBackendOnClusterAtPath(t, cluster, client, "pki2") + setupAcmeBackendOnClusterAtPath(t, cluster, client, "ns1/pki") + setupAcmeBackendOnClusterAtPath(t, cluster, client, "ns2/pki") + + // Enable custom DNS resolver for testing. + for _, mount := range []string{"pki", "pki2", "ns1/pki", "ns2/pki"} { + _, err := client.Logical().Write(mount+"/config/acme", map[string]interface{}{ + "dns_resolver": dns.GetLocalAddr(), + }) + require.NoError(t, err, "failed to set local dns resolver address for testing on mount: "+mount) + } + + // Enable client counting. + _, err := client.Logical().Write("/sys/internal/counters/config", map[string]interface{}{ + "enabled": "enable", + }) + require.NoError(t, err, "failed to enable client counting") + + // Setup ACME clients. We refresh account keys each time for consistency. + acmeClientPKI := getAcmeClientForCluster(t, cluster, "/v1/pki/acme/", nil) + acmeClientPKI2 := getAcmeClientForCluster(t, cluster, "/v1/pki2/acme/", nil) + acmeClientPKINS1 := getAcmeClientForCluster(t, cluster, "/v1/ns1/pki/acme/", nil) + acmeClientPKINS2 := getAcmeClientForCluster(t, cluster, "/v1/ns2/pki/acme/", nil) + + // Get our initial count. + expectedCount := validateClientCount(t, client, "", -1, "initial fetch") + + // Unique identifier: should increase by one. + doACMEForDomainWithDNS(t, dns, acmeClientPKI, []string{"dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "pki", expectedCount+1, "new certificate") + + // Different identifier; should increase by one. + doACMEForDomainWithDNS(t, dns, acmeClientPKI, []string{"example.dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "pki", expectedCount+1, "new certificate") + + // While same identifiers, used together and so thus are unique; increase by one. + doACMEForDomainWithDNS(t, dns, acmeClientPKI, []string{"example.dadgarcorp.com", "dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "pki", expectedCount+1, "new certificate") + + // Same identifiers in different order are not unique; keep the same. + doACMEForDomainWithDNS(t, dns, acmeClientPKI, []string{"dadgarcorp.com", "example.dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "pki", expectedCount, "different order; same identifiers") + + // Using a different mount shouldn't affect counts. + doACMEForDomainWithDNS(t, dns, acmeClientPKI2, []string{"dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "", expectedCount, "different mount; same identifiers") + + // But using a different identifier should. + doACMEForDomainWithDNS(t, dns, acmeClientPKI2, []string{"pki2.dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "pki2", expectedCount+1, "different mount with different identifiers") + + // A new identifier in a unique namespace will affect results. + doACMEForDomainWithDNS(t, dns, acmeClientPKINS1, []string{"unique.dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "ns1/pki", expectedCount+1, "unique identifier in a namespace") + + // But in a different namespace with the existing identifier will not. + doACMEForDomainWithDNS(t, dns, acmeClientPKINS2, []string{"unique.dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "", expectedCount, "existing identifier in a namespace") + doACMEForDomainWithDNS(t, dns, acmeClientPKI2, []string{"unique.dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "", expectedCount, "existing identifier outside of a namespace") + + // Creating a unique identifier in a namespace with a mount with the + // same name as another namespace should increase counts as well. + doACMEForDomainWithDNS(t, dns, acmeClientPKINS2, []string{"very-unique.dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "ns2/pki", expectedCount+1, "unique identifier in a different namespace") + + // Check the current fragment + fragment := cluster.Cores[0].Core.ResetActivityLog()[0] + if fragment == nil { + t.Fatal("no fragment created") + } + validateAcmeClientTypes(t, fragment, expectedCount) +} + +func validateAcmeClientTypes(t *testing.T, fragment *activity.LogFragment, expectedCount int64) { + t.Helper() + if int64(len(fragment.Clients)) != expectedCount { + t.Fatalf("bad number of entities, expected %v: got %v, entities are: %v", expectedCount, len(fragment.Clients), fragment.Clients) + } + + for _, ac := range fragment.Clients { + if ac.ClientType != vault.ACMEActivityType { + t.Fatalf("Couldn't find expected '%v' client_type in %v", vault.ACMEActivityType, fragment.Clients) + } + } +} + +func validateClientCount(t *testing.T, client *api.Client, mount string, expected int64, message string) int64 { + resp, err := client.Logical().Read("/sys/internal/counters/activity/monthly") + require.NoError(t, err, "failed to fetch client count values") + t.Logf("got client count numbers: %v", resp) + + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.Contains(t, resp.Data, "acme_clients") + require.Contains(t, resp.Data, "months") + + rawCount := resp.Data["acme_clients"].(json.Number) + count, err := rawCount.Int64() + require.NoError(t, err, "failed to parse number as int64: "+rawCount.String()) + + if expected != -1 { + require.Equal(t, expected, count, "value of client counts did not match expectations: "+message) + } + + if mount == "" { + return count + } + + months := resp.Data["months"].([]interface{}) + if len(months) > 1 { + t.Fatalf("running across a month boundary despite using SkipAtEndOfMonth(...); rerun test from start fully in the next month instead") + } + + require.Equal(t, 1, len(months), "expected only a single month when running this test") + + monthlyInfo := months[0].(map[string]interface{}) + + // Validate this month's aggregate counts match the overall value. + require.Contains(t, monthlyInfo, "counts", "expected monthly info to contain a count key") + monthlyCounts := monthlyInfo["counts"].(map[string]interface{}) + require.Contains(t, monthlyCounts, "acme_clients", "expected month[0].counts to contain a non_entity_clients key") + monthlyCountNonEntityRaw := monthlyCounts["acme_clients"].(json.Number) + monthlyCountNonEntity, err := monthlyCountNonEntityRaw.Int64() + require.NoError(t, err, "failed to parse number as int64: "+monthlyCountNonEntityRaw.String()) + require.Equal(t, count, monthlyCountNonEntity, "expected equal values for non entity client counts") + + // Validate this mount's namespace is included in the namespaces list, + // if this is enterprise. Otherwise, if its OSS or we don't have a + // namespace, we default to the value root. + mountNamespace := "" + mountPath := mount + "/" + if constants.IsEnterprise && strings.Contains(mount, "/") { + pieces := strings.Split(mount, "/") + require.Equal(t, 2, len(pieces), "we do not support nested namespaces in this test") + mountNamespace = pieces[0] + "/" + mountPath = pieces[1] + "/" + } + + require.Contains(t, monthlyInfo, "namespaces", "expected monthly info to contain a namespaces key") + monthlyNamespaces := monthlyInfo["namespaces"].([]interface{}) + foundNamespace := false + for index, namespaceRaw := range monthlyNamespaces { + namespace := namespaceRaw.(map[string]interface{}) + require.Contains(t, namespace, "namespace_path", "expected monthly.namespaces[%v] to contain a namespace_path key", index) + namespacePath := namespace["namespace_path"].(string) + + if namespacePath != mountNamespace { + t.Logf("skipping non-matching namespace %v: %v != %v / %v", index, namespacePath, mountNamespace, namespace) + continue + } + + foundNamespace = true + + // This namespace must have a non-empty aggregate non-entity count. + require.Contains(t, namespace, "counts", "expected monthly.namespaces[%v] to contain a counts key", index) + namespaceCounts := namespace["counts"].(map[string]interface{}) + require.Contains(t, namespaceCounts, "acme_clients", "expected namespace counts to contain a non_entity_clients key") + namespaceCountNonEntityRaw := namespaceCounts["acme_clients"].(json.Number) + namespaceCountNonEntity, err := namespaceCountNonEntityRaw.Int64() + require.NoError(t, err, "failed to parse number as int64: "+namespaceCountNonEntityRaw.String()) + require.Greater(t, namespaceCountNonEntity, int64(0), "expected at least one non-entity client count value in the namespace") + + require.Contains(t, namespace, "mounts", "expected monthly.namespaces[%v] to contain a mounts key", index) + namespaceMounts := namespace["mounts"].([]interface{}) + foundMount := false + for mountIndex, mountRaw := range namespaceMounts { + mountInfo := mountRaw.(map[string]interface{}) + require.Contains(t, mountInfo, "mount_path", "expected monthly.namespaces[%v].mounts[%v] to contain a mount_path key", index, mountIndex) + mountInfoPath := mountInfo["mount_path"].(string) + if mountPath != mountInfoPath { + t.Logf("skipping non-matching mount path %v in namespace %v: %v != %v / %v of %v", mountIndex, index, mountPath, mountInfoPath, mountInfo, namespace) + continue + } + + foundMount = true + + // This mount must also have a non-empty non-entity client count. + require.Contains(t, mountInfo, "counts", "expected monthly.namespaces[%v].mounts[%v] to contain a counts key", index, mountIndex) + mountCounts := mountInfo["counts"].(map[string]interface{}) + require.Contains(t, mountCounts, "acme_clients", "expected mount counts to contain a non_entity_clients key") + mountCountNonEntityRaw := mountCounts["acme_clients"].(json.Number) + mountCountNonEntity, err := mountCountNonEntityRaw.Int64() + require.NoError(t, err, "failed to parse number as int64: "+mountCountNonEntityRaw.String()) + require.Greater(t, mountCountNonEntity, int64(0), "expected at least one non-entity client count value in the mount") + } + + require.True(t, foundMount, "expected to find the mount "+mountPath+" in the list of mounts for namespace, but did not") + } + + require.True(t, foundNamespace, "expected to find the namespace "+mountNamespace+" in the list of namespaces, but did not") + + return count +} + +func doACMEForDomainWithDNS(t *testing.T, dns *dnstest.TestServer, acmeClient *acme.Client, domains []string) *x509.Certificate { + cr := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: domains[0]}, + DNSNames: domains, + } + + return doACMEForCSRWithDNS(t, dns, acmeClient, domains, cr) +} + +func doACMEForCSRWithDNS(t *testing.T, dns *dnstest.TestServer, acmeClient *acme.Client, domains []string, cr *x509.CertificateRequest) *x509.Certificate { + accountKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed to generate account key") + acmeClient.Key = accountKey + + testCtx, cancelFunc := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancelFunc() + + // Register the client. + _, err = acmeClient.Register(testCtx, &acme.Account{Contact: []string{"mailto:ipsans@dadgarcorp.com"}}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Create the Order + var orderIdentifiers []acme.AuthzID + for _, domain := range domains { + orderIdentifiers = append(orderIdentifiers, acme.AuthzID{Type: "dns", Value: domain}) + } + order, err := acmeClient.AuthorizeOrder(testCtx, orderIdentifiers) + require.NoError(t, err, "failed creating ACME order") + + // Fetch its authorizations. + var auths []*acme.Authorization + for _, authUrl := range order.AuthzURLs { + authorization, err := acmeClient.GetAuthorization(testCtx, authUrl) + require.NoError(t, err, "failed to lookup authorization at url: %s", authUrl) + auths = append(auths, authorization) + } + + // For each dns-01 challenge, place the record in the associated DNS resolver. + var challengesToAccept []*acme.Challenge + for _, auth := range auths { + for _, challenge := range auth.Challenges { + if challenge.Status != acme.StatusPending { + t.Logf("ignoring challenge not in status pending: %v", challenge) + continue + } + + if challenge.Type == "dns-01" { + challengeBody, err := acmeClient.DNS01ChallengeRecord(challenge.Token) + require.NoError(t, err, "failed generating challenge response") + + dns.AddRecord("_acme-challenge."+auth.Identifier.Value, "TXT", challengeBody) + defer dns.RemoveRecord("_acme-challenge."+auth.Identifier.Value, "TXT", challengeBody) + + require.NoError(t, err, "failed setting DNS record") + + challengesToAccept = append(challengesToAccept, challenge) + } + } + } + + dns.PushConfig() + require.GreaterOrEqual(t, len(challengesToAccept), 1, "Need at least one challenge, got none") + + // Tell the ACME server, that they can now validate those challenges. + for _, challenge := range challengesToAccept { + _, err = acmeClient.Accept(testCtx, challenge) + require.NoError(t, err, "failed to accept challenge: %v", challenge) + } + + // Wait for the order/challenges to be validated. + _, err = acmeClient.WaitOrder(testCtx, order.URI) + require.NoError(t, err, "failed waiting for order to be ready") + + // Create/sign the CSR and ask ACME server to sign it returning us the final certificate + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + csr, err := x509.CreateCertificateRequest(rand.Reader, cr, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, false) + require.NoError(t, err, "failed to get a certificate back from ACME") + + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert bytes") + + return acmeCert +} diff --git a/builtin/logical/pki/acme_challenge_engine.go b/builtin/logical/pki/acme_challenge_engine.go new file mode 100644 index 000000000000..96663f2e1cae --- /dev/null +++ b/builtin/logical/pki/acme_challenge_engine.go @@ -0,0 +1,563 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "container/list" + "context" + "fmt" + "sync" + "time" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" +) + +var MaxChallengeTimeout = 1 * time.Minute + +const MaxRetryAttempts = 5 + +const ChallengeAttemptFailedMsg = "this may occur if the validation target was misconfigured: check that challenge responses are available at the required locations and retry." + +type ChallengeValidation struct { + // Account KID that this validation attempt is recorded under. + Account string `json:"account"` + + // The authorization ID that this validation attempt is for. + Authorization string `json:"authorization"` + ChallengeType ACMEChallengeType `json:"challenge_type"` + + // The token of this challenge and the JWS thumbprint of the account + // we're validating against. + Token string `json:"token"` + Thumbprint string `json:"thumbprint"` + + Initiated time.Time `json:"initiated"` + FirstValidation time.Time `json:"first_validation,omitempty"` + RetryCount int `json:"retry_count,omitempty"` + LastRetry time.Time `json:"last_retry,omitempty"` + RetryAfter time.Time `json:"retry_after,omitempty"` +} + +type ChallengeQueueEntry struct { + Identifier string + RetryAfter time.Time + NumRetries int // Track if we are spinning on a corrupted challenge +} + +type ACMEChallengeEngine struct { + NumWorkers int + + ValidationLock sync.Mutex + NewValidation chan string + Closing chan struct{} + Validations *list.List +} + +func NewACMEChallengeEngine() *ACMEChallengeEngine { + ace := &ACMEChallengeEngine{} + ace.NewValidation = make(chan string, 1) + ace.Closing = make(chan struct{}, 1) + ace.Validations = list.New() + ace.NumWorkers = 5 + + return ace +} + +func (ace *ACMEChallengeEngine) LoadFromStorage(b *backend, sc *storageContext) error { + items, err := sc.Storage.List(sc.Context, acmeValidationPrefix) + if err != nil { + return fmt.Errorf("failed loading list of validations from disk: %w", err) + } + + ace.ValidationLock.Lock() + defer ace.ValidationLock.Unlock() + + // Add them to our queue of validations to work through later. + foundExistingValidations := false + for _, item := range items { + ace.Validations.PushBack(&ChallengeQueueEntry{ + Identifier: item, + }) + foundExistingValidations = true + } + + if foundExistingValidations { + ace.NewValidation <- "existing" + } + + return nil +} + +func (ace *ACMEChallengeEngine) Run(b *backend, state *acmeState, sc *storageContext) { + // We load the existing ACME challenges within the Run thread to avoid + // delaying the PKI mount initialization + b.Logger().Debug("Loading existing challenge validations on disk") + err := ace.LoadFromStorage(b, sc) + if err != nil { + b.Logger().Error("failed loading existing ACME challenge validations:", "err", err) + } + + for { + // err == nil on shutdown. + b.Logger().Debug("Starting ACME challenge validation engine") + err := ace._run(b, state) + if err != nil { + b.Logger().Error("Got unexpected error from ACME challenge validation engine", "err", err) + time.Sleep(1 * time.Second) + continue + } + break + } +} + +func (ace *ACMEChallengeEngine) _run(b *backend, state *acmeState) error { + // This runner uses a background context for storage operations: we don't + // want to tie it to a inbound request and we don't want to set a time + // limit, so create a fresh background context. + runnerSC := b.makeStorageContext(context.Background(), b.storage) + + // We want at most a certain number of workers operating to verify + // challenges. + var finishedWorkersChannels []chan bool + for { + // Wait until we've got more work to do. + select { + case <-ace.Closing: + b.Logger().Debug("shutting down ACME challenge validation engine") + return nil + case <-ace.NewValidation: + } + + // First try to reap any finished workers. Read from their channels + // and if not finished yet, add to a fresh slice. + var newFinishedWorkersChannels []chan bool + for _, channel := range finishedWorkersChannels { + select { + case <-channel: + default: + // This channel had not been written to, indicating that the + // worker had not yet finished. + newFinishedWorkersChannels = append(newFinishedWorkersChannels, channel) + } + } + finishedWorkersChannels = newFinishedWorkersChannels + + // If we have space to take on another work item, do so. + firstIdentifier := "" + startedWork := false + now := time.Now() + for len(finishedWorkersChannels) < ace.NumWorkers { + var task *ChallengeQueueEntry + + // Find our next work item. We do all of these operations + // while holding the queue lock, hence some repeated checks + // afterwards. Out of this, we get a candidate task, using + // element == nil as a sentinel for breaking our parent + // loop. + ace.ValidationLock.Lock() + element := ace.Validations.Front() + if element != nil { + ace.Validations.Remove(element) + task = element.Value.(*ChallengeQueueEntry) + if !task.RetryAfter.IsZero() && now.Before(task.RetryAfter) { + // We cannot work on this element yet; remove it to + // the back of the queue. This allows us to potentially + // select the next item in the next iteration. + ace.Validations.PushBack(task) + } + + if firstIdentifier != "" && task.Identifier == firstIdentifier { + // We found and rejected this element before; exit the + // loop by "claiming" we didn't find any work. + element = nil + } else if firstIdentifier == "" { + firstIdentifier = task.Identifier + } + } + ace.ValidationLock.Unlock() + if element == nil { + // There was no more work to do to fill up the queue; exit + // this loop. + break + } + if now.Before(task.RetryAfter) { + // Here, while we found an element, we didn't want to + // completely exit the loop (perhaps it was our first time + // finding a work order), so retry without modifying + // firstIdentifier. + continue + } + + config, err := state.getConfigWithUpdate(runnerSC) + if err != nil { + return fmt.Errorf("failed fetching ACME configuration: %w", err) + } + + // Since this work item was valid, we won't expect to see it in + // the validation queue again until it is executed. Here, we + // want to avoid infinite looping above (if we removed the one + // valid item and the remainder are all not immediately + // actionable). At the worst, we'll spend a little more time + // looping through the queue until we hit a repeat. + firstIdentifier = "" + + // If we are no longer the active node, break out + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary | consts.ReplicationPerformanceStandby) { + break + } + + // Here, we got a piece of work that is ready to check; create a + // channel and a new go routine and run it. Note that this still + // could have a RetryAfter date we're not aware of (e.g., if the + // cluster restarted as we do not read the entries there). + channel := make(chan bool, 1) + go ace.VerifyChallenge(runnerSC, task.Identifier, task.NumRetries, channel, config) + finishedWorkersChannels = append(finishedWorkersChannels, channel) + startedWork = true + } + + // If we have no more capacity for work, we should pause a little to + // let the system catch up. Additionally, if we only had + // non-actionable work items, we should pause until some time has + // elapsed: not too much that we potentially starve any new incoming + // items from validation, but not too short that we cause a busy loop. + if len(finishedWorkersChannels) == ace.NumWorkers || !startedWork { + time.Sleep(100 * time.Millisecond) + } + + // Lastly, if we have more work to do, re-trigger ourselves. + ace.ValidationLock.Lock() + if ace.Validations.Front() != nil { + select { + case ace.NewValidation <- "retry": + default: + } + } + ace.ValidationLock.Unlock() + } + + return fmt.Errorf("unexpectedly exited from ACMEChallengeEngine._run()") +} + +func (ace *ACMEChallengeEngine) AcceptChallenge(sc *storageContext, account string, authz *ACMEAuthorization, challenge *ACMEChallenge, thumbprint string) error { + name := authz.Id + "-" + string(challenge.Type) + path := acmeValidationPrefix + name + + entry, err := sc.Storage.Get(sc.Context, path) + if err == nil && entry != nil { + // Challenge already in the queue; exit without re-adding it. + return nil + } + + if authz.Status != ACMEAuthorizationPending { + return fmt.Errorf("%w: cannot accept already validated authorization %v (%v)", ErrMalformed, authz.Id, authz.Status) + } + + for _, otherChallenge := range authz.Challenges { + // We assume within an authorization we won't have multiple challenges of the same challenge type + // and we want to limit a single challenge being in a processing state to avoid race conditions + // failing one challenge and passing another. + if otherChallenge.Type != challenge.Type && otherChallenge.Status != ACMEChallengePending { + return fmt.Errorf("%w: only a single challenge within an authorization can be accepted (%v) in status %v", ErrMalformed, otherChallenge.Type, otherChallenge.Status) + } + + // The requested challenge can ping us to wake us up, so allow pending and currently processing statuses + if otherChallenge.Status != ACMEChallengePending && otherChallenge.Status != ACMEChallengeProcessing { + return fmt.Errorf("%w: challenge is in invalid state (%v) in authorization %v", ErrMalformed, challenge.Status, authz.Id) + } + } + + token := challenge.ChallengeFields["token"].(string) + + cv := &ChallengeValidation{ + Account: account, + Authorization: authz.Id, + ChallengeType: challenge.Type, + Token: token, + Thumbprint: thumbprint, + Initiated: time.Now(), + } + + json, err := logical.StorageEntryJSON(path, &cv) + if err != nil { + return fmt.Errorf("error creating challenge validation queue entry: %w", err) + } + + if err := sc.Storage.Put(sc.Context, json); err != nil { + return fmt.Errorf("error writing challenge validation entry: %w", err) + } + + if challenge.Status == ACMEChallengePending { + challenge.Status = ACMEChallengeProcessing + + authzPath := getAuthorizationPath(account, authz.Id) + if err := saveAuthorizationAtPath(sc, authzPath, authz); err != nil { + return fmt.Errorf("error saving updated authorization %v: %w", authz.Id, err) + } + } + + ace.ValidationLock.Lock() + defer ace.ValidationLock.Unlock() + ace.Validations.PushBack(&ChallengeQueueEntry{ + Identifier: name, + }) + + select { + case ace.NewValidation <- name: + default: + } + + return nil +} + +func (ace *ACMEChallengeEngine) VerifyChallenge(runnerSc *storageContext, id string, validationQueueRetries int, finished chan bool, config *acmeConfigEntry) { + sc, cancel := runnerSc.WithFreshTimeout(MaxChallengeTimeout) + defer cancel() + runnerSc.Logger().Debug("Starting verification of challenge", "id", id) + + if retry, retryAfter, err := ace._verifyChallenge(sc, id, config); err != nil { + // Because verification of this challenge failed, we need to retry + // it in the future. Log the error and re-add the item to the queue + // to try again later. + sc.Logger().Error(fmt.Sprintf("ACME validation failed for %v: %v", id, err)) + + if retry { + validationQueueRetries++ + + // The retry logic within _verifyChallenge is dependent on being able to read and decode + // the ACME challenge entries. If we encounter such failures we would retry forever, so + // we have a secondary check here to see if we are consistently looping within the validation + // queue that is larger than the normal retry attempts we would allow. + if validationQueueRetries > MaxRetryAttempts*2 { + sc.Logger().Warn("reached max error attempts within challenge queue: %v, giving up", id) + _, _, err = ace._verifyChallengeCleanup(sc, nil, id) + if err != nil { + sc.Logger().Warn("Failed cleaning up challenge entry: %v", err) + } + finished <- true + return + } + + ace.ValidationLock.Lock() + defer ace.ValidationLock.Unlock() + ace.Validations.PushBack(&ChallengeQueueEntry{ + Identifier: id, + RetryAfter: retryAfter, + NumRetries: validationQueueRetries, + }) + + // Let the validator know there's a pending challenge. + select { + case ace.NewValidation <- id: + default: + } + } + + // We're the only producer on this channel and it has a buffer size + // of one element, so it is safe to directly write here. + finished <- true + return + } + + // We're the only producer on this channel and it has a buffer size of one + // element, so it is safe to directly write here. + finished <- false +} + +func (ace *ACMEChallengeEngine) _verifyChallenge(sc *storageContext, id string, config *acmeConfigEntry) (bool, time.Time, error) { + now := time.Now() + backoffTime := now.Add(1 * time.Second) + path := acmeValidationPrefix + id + challengeEntry, err := sc.Storage.Get(sc.Context, path) + if err != nil { + return true, backoffTime, fmt.Errorf("error loading challenge %v: %w", id, err) + } + + if challengeEntry == nil { + // Something must've successfully cleaned up our storage entry from + // under us. Assume we don't need to rerun, else the client will + // trigger us to re-run. + return ace._verifyChallengeCleanup(sc, nil, id) + } + + var cv *ChallengeValidation + if err := challengeEntry.DecodeJSON(&cv); err != nil { + return true, backoffTime, fmt.Errorf("error decoding challenge %v: %w", id, err) + } + + if now.Before(cv.RetryAfter) { + return true, cv.RetryAfter, fmt.Errorf("retrying challenge %v too soon", id) + } + + authzPath := getAuthorizationPath(cv.Account, cv.Authorization) + authz, err := loadAuthorizationAtPath(sc, authzPath) + if err != nil { + return true, backoffTime, fmt.Errorf("error loading authorization %v/%v for challenge %v: %w", cv.Account, cv.Authorization, id, err) + } + + if authz.Status != ACMEAuthorizationPending { + // Something must've finished up this challenge for us. Assume we + // don't need to rerun and exit instead. + err = nil + return ace._verifyChallengeCleanup(sc, err, id) + } + + var challenge *ACMEChallenge + for _, authzChallenge := range authz.Challenges { + if authzChallenge.Type == cv.ChallengeType { + challenge = authzChallenge + break + } + } + + if challenge == nil { + err = fmt.Errorf("no challenge of type %v in authorization %v/%v for challenge %v", cv.ChallengeType, cv.Account, cv.Authorization, id) + return ace._verifyChallengeCleanup(sc, err, id) + } + + if challenge.Status != ACMEChallengePending && challenge.Status != ACMEChallengeProcessing { + err = fmt.Errorf("challenge is in invalid state %v in authorization %v/%v for challenge %v", challenge.Status, cv.Account, cv.Authorization, id) + return ace._verifyChallengeCleanup(sc, err, id) + } + + var valid bool + switch challenge.Type { + case ACMEHTTPChallenge: + if authz.Identifier.Type != ACMEDNSIdentifier && authz.Identifier.Type != ACMEIPIdentifier { + err = fmt.Errorf("unsupported identifier type for authorization %v/%v in challenge %v: %v", cv.Account, cv.Authorization, id, authz.Identifier.Type) + return ace._verifyChallengeCleanup(sc, err, id) + } + + if authz.Wildcard { + err = fmt.Errorf("unable to validate wildcard authorization %v/%v in challenge %v via http-01 challenge", cv.Account, cv.Authorization, id) + return ace._verifyChallengeCleanup(sc, err, id) + } + + valid, err = ValidateHTTP01Challenge(authz.Identifier.Value, cv.Token, cv.Thumbprint, config) + if err != nil { + err = fmt.Errorf("%w: error validating http-01 challenge %v: %v; %v", ErrIncorrectResponse, id, err, ChallengeAttemptFailedMsg) + return ace._verifyChallengeRetry(sc, cv, authzPath, authz, challenge, err, id) + } + case ACMEDNSChallenge: + if authz.Identifier.Type != ACMEDNSIdentifier { + err = fmt.Errorf("unsupported identifier type for authorization %v/%v in challenge %v: %v", cv.Account, cv.Authorization, id, authz.Identifier.Type) + return ace._verifyChallengeCleanup(sc, err, id) + } + + valid, err = ValidateDNS01Challenge(authz.Identifier.Value, cv.Token, cv.Thumbprint, config) + if err != nil { + err = fmt.Errorf("%w: error validating dns-01 challenge %v: %v; %v", ErrIncorrectResponse, id, err, ChallengeAttemptFailedMsg) + return ace._verifyChallengeRetry(sc, cv, authzPath, authz, challenge, err, id) + } + case ACMEALPNChallenge: + if authz.Identifier.Type != ACMEDNSIdentifier { + err = fmt.Errorf("unsupported identifier type for authorization %v/%v in challenge %v: %v", cv.Account, cv.Authorization, id, authz.Identifier.Type) + return ace._verifyChallengeCleanup(sc, err, id) + } + + if authz.Wildcard { + err = fmt.Errorf("unable to validate wildcard authorization %v/%v in challenge %v via tls-alpn-01 challenge", cv.Account, cv.Authorization, id) + return ace._verifyChallengeCleanup(sc, err, id) + } + + valid, err = ValidateTLSALPN01Challenge(authz.Identifier.Value, cv.Token, cv.Thumbprint, config) + if err != nil { + err = fmt.Errorf("%w: error validating tls-alpn-01 challenge %v: %s", ErrIncorrectResponse, id, err.Error()) + return ace._verifyChallengeRetry(sc, cv, authzPath, authz, challenge, err, id) + } + default: + err = fmt.Errorf("unsupported ACME challenge type %v for challenge %v", cv.ChallengeType, id) + return ace._verifyChallengeCleanup(sc, err, id) + } + + if !valid { + err = fmt.Errorf("%w: challenge failed with no additional information", ErrIncorrectResponse) + return ace._verifyChallengeRetry(sc, cv, authzPath, authz, challenge, err, id) + } + + // If we got here, the challenge verification was successful. Update + // the authorization appropriately. + expires := now.Add(15 * 24 * time.Hour) + challenge.Status = ACMEChallengeValid + challenge.Validated = now.Format(time.RFC3339) + challenge.Error = nil + authz.Status = ACMEAuthorizationValid + authz.Expires = expires.Format(time.RFC3339) + + if err := saveAuthorizationAtPath(sc, authzPath, authz); err != nil { + err = fmt.Errorf("error saving updated (validated) authorization %v/%v for challenge %v: %w", cv.Account, cv.Authorization, id, err) + return ace._verifyChallengeRetry(sc, cv, authzPath, authz, challenge, err, id) + } + + return ace._verifyChallengeCleanup(sc, nil, id) +} + +func (ace *ACMEChallengeEngine) _verifyChallengeRetry(sc *storageContext, cv *ChallengeValidation, authzPath string, auth *ACMEAuthorization, challenge *ACMEChallenge, verificationErr error, id string) (bool, time.Time, error) { + now := time.Now() + path := acmeValidationPrefix + id + + if err := updateChallengeStatus(sc, cv, authzPath, auth, challenge, verificationErr); err != nil { + return true, now, err + } + + if cv.RetryCount > MaxRetryAttempts { + err := fmt.Errorf("reached max error attempts for challenge %v: %w", id, verificationErr) + return ace._verifyChallengeCleanup(sc, err, id) + } + + if cv.FirstValidation.IsZero() { + cv.FirstValidation = now + } + cv.RetryCount += 1 + cv.LastRetry = now + cv.RetryAfter = now.Add(time.Duration(cv.RetryCount*5) * time.Second) + + json, jsonErr := logical.StorageEntryJSON(path, cv) + if jsonErr != nil { + return true, now, fmt.Errorf("error persisting updated challenge validation queue entry (error prior to retry, if any: %v): %w", verificationErr, jsonErr) + } + + if putErr := sc.Storage.Put(sc.Context, json); putErr != nil { + return true, now, fmt.Errorf("error writing updated challenge validation entry (error prior to retry, if any: %v): %w", verificationErr, putErr) + } + + if verificationErr != nil { + verificationErr = fmt.Errorf("retrying validation: %w", verificationErr) + } + + return true, cv.RetryAfter, verificationErr +} + +func updateChallengeStatus(sc *storageContext, cv *ChallengeValidation, authzPath string, auth *ACMEAuthorization, challenge *ACMEChallenge, verificationErr error) error { + if verificationErr != nil { + challengeError := TranslateErrorToErrorResponse(verificationErr) + challenge.Error = challengeError.MarshalForStorage() + } + + if cv.RetryCount > MaxRetryAttempts { + challenge.Status = ACMEChallengeInvalid + auth.Status = ACMEAuthorizationInvalid + } + + if err := saveAuthorizationAtPath(sc, authzPath, auth); err != nil { + return fmt.Errorf("error persisting authorization/challenge update: %w", err) + } + return nil +} + +func (ace *ACMEChallengeEngine) _verifyChallengeCleanup(sc *storageContext, err error, id string) (bool, time.Time, error) { + now := time.Now() + + // Remove our ChallengeValidation entry only. + if deleteErr := sc.Storage.Delete(sc.Context, acmeValidationPrefix+id); deleteErr != nil { + return true, now.Add(1 * time.Second), fmt.Errorf("error deleting challenge %v (error prior to cleanup, if any: %v): %w", id, err, deleteErr) + } + + if err != nil { + err = fmt.Errorf("removing challenge validation attempt and not retrying %v; previous error: %w", id, err) + } + + return false, now, err +} diff --git a/builtin/logical/pki/acme_challenges.go b/builtin/logical/pki/acme_challenges.go new file mode 100644 index 000000000000..85c051c86e0e --- /dev/null +++ b/builtin/logical/pki/acme_challenges.go @@ -0,0 +1,502 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "bytes" + "context" + "crypto/sha256" + "crypto/subtle" + "crypto/tls" + "crypto/x509" + "encoding/asn1" + "encoding/base64" + "fmt" + "io" + "net" + "net/http" + "strings" + "time" +) + +const ( + DNSChallengePrefix = "_acme-challenge." + ALPNProtocol = "acme-tls/1" +) + +// While this should be a constant, there's no way to do a low-level test of +// ValidateTLSALPN01Challenge without spinning up a complicated Docker +// instance to build a custom responder. Because we already have a local +// toolchain, it is far easier to drive this through Go tests with a custom +// (high) port, rather than requiring permission to bind to port 443 (root-run +// tests are even worse). +var ALPNPort = "443" + +// OID of the acmeIdentifier X.509 Certificate Extension. +var OIDACMEIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 31} + +// ValidateKeyAuthorization validates that the given keyAuthz from a challenge +// matches our expectation, returning (true, nil) if so, or (false, err) if +// not. +func ValidateKeyAuthorization(keyAuthz string, token string, thumbprint string) (bool, error) { + parts := strings.Split(keyAuthz, ".") + if len(parts) != 2 { + return false, fmt.Errorf("invalid authorization: got %v parts, expected 2", len(parts)) + } + + tokenPart := parts[0] + thumbprintPart := parts[1] + + if token != tokenPart || thumbprint != thumbprintPart { + return false, fmt.Errorf("key authorization was invalid") + } + + return true, nil +} + +// ValidateSHA256KeyAuthorization validates that the given keyAuthz from a +// challenge matches our expectation, returning (true, nil) if so, or +// (false, err) if not. +// +// This is for use with DNS challenges, which require base64 encoding. +func ValidateSHA256KeyAuthorization(keyAuthz string, token string, thumbprint string) (bool, error) { + authzContents := token + "." + thumbprint + checksum := sha256.Sum256([]byte(authzContents)) + expectedAuthz := base64.RawURLEncoding.EncodeToString(checksum[:]) + + if keyAuthz != expectedAuthz { + return false, fmt.Errorf("sha256 key authorization was invalid") + } + + return true, nil +} + +// ValidateRawSHA256KeyAuthorization validates that the given keyAuthz from a +// challenge matches our expectation, returning (true, nil) if so, or +// (false, err) if not. +// +// This is for use with TLS challenges, which require the raw hash output. +func ValidateRawSHA256KeyAuthorization(keyAuthz []byte, token string, thumbprint string) (bool, error) { + authzContents := token + "." + thumbprint + expectedAuthz := sha256.Sum256([]byte(authzContents)) + + if len(keyAuthz) != len(expectedAuthz) || subtle.ConstantTimeCompare(expectedAuthz[:], keyAuthz) != 1 { + return false, fmt.Errorf("sha256 key authorization was invalid") + } + + return true, nil +} + +func buildResolver(config *acmeConfigEntry) (*net.Resolver, error) { + if len(config.DNSResolver) == 0 { + return net.DefaultResolver, nil + } + + return &net.Resolver{ + PreferGo: true, + StrictErrors: false, + Dial: func(ctx context.Context, network, address string) (net.Conn, error) { + d := net.Dialer{ + Timeout: 10 * time.Second, + } + return d.DialContext(ctx, network, config.DNSResolver) + }, + }, nil +} + +func buildDialerConfig(config *acmeConfigEntry) (*net.Dialer, error) { + resolver, err := buildResolver(config) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %w", err) + } + + return &net.Dialer{ + Timeout: 10 * time.Second, + KeepAlive: -1 * time.Second, + Resolver: resolver, + }, nil +} + +// Validates a given ACME http-01 challenge against the specified domain, +// per RFC 8555. +// +// We attempt to be defensive here against timeouts, extra redirects, &c. +func ValidateHTTP01Challenge(domain string, token string, thumbprint string, config *acmeConfigEntry) (bool, error) { + path := "http://" + domain + "/.well-known/acme-challenge/" + token + dialer, err := buildDialerConfig(config) + if err != nil { + return false, fmt.Errorf("failed to build dialer: %w", err) + } + + transport := &http.Transport{ + // Only a single request is sent to this server as we do not do any + // batching of validation attempts. There is no need to do an HTTP + // KeepAlive as a result. + DisableKeepAlives: true, + MaxIdleConns: 1, + MaxIdleConnsPerHost: 1, + MaxConnsPerHost: 1, + IdleConnTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + + // We'd rather timeout and re-attempt validation later than hang + // too many validators waiting for slow hosts. + DialContext: dialer.DialContext, + ResponseHeaderTimeout: 10 * time.Second, + } + + maxRedirects := 10 + urlLength := 2000 + + client := &http.Client{ + Transport: transport, + CheckRedirect: func(req *http.Request, via []*http.Request) error { + if len(via)+1 >= maxRedirects { + return fmt.Errorf("http-01: too many redirects: %v", len(via)+1) + } + + reqUrlLen := len(req.URL.String()) + if reqUrlLen > urlLength { + return fmt.Errorf("http-01: redirect url length too long: %v", reqUrlLen) + } + + return nil + }, + } + + resp, err := client.Get(path) + if err != nil { + return false, fmt.Errorf("http-01: failed to fetch path %v: %w", path, err) + } + + // We provision a buffer which allows for a variable size challenge, some + // whitespace, and a detection gap for too long of a message. + minExpected := len(token) + 1 + len(thumbprint) + maxExpected := 512 + + defer resp.Body.Close() + + // Attempt to read the body, but don't do so infinitely. + body, err := io.ReadAll(io.LimitReader(resp.Body, int64(maxExpected+1))) + if err != nil { + return false, fmt.Errorf("http-01: unexpected error while reading body: %w", err) + } + + if len(body) > maxExpected { + return false, fmt.Errorf("http-01: response too large: received %v > %v bytes", len(body), maxExpected) + } + + if len(body) < minExpected { + return false, fmt.Errorf("http-01: response too small: received %v < %v bytes", len(body), minExpected) + } + + // Per RFC 8555 Section 8.3. HTTP Challenge: + // + // > The server SHOULD ignore whitespace characters at the end of the body. + keyAuthz := string(body) + keyAuthz = strings.TrimSpace(keyAuthz) + + // If we got here, we got no non-EOF error while reading. Try to validate + // the token because we're bounded by a reasonable amount of length. + return ValidateKeyAuthorization(keyAuthz, token, thumbprint) +} + +func ValidateDNS01Challenge(domain string, token string, thumbprint string, config *acmeConfigEntry) (bool, error) { + // Here, domain is the value from the post-wildcard-processed identifier. + // Per RFC 8555, no difference in validation occurs if a wildcard entry + // is requested or if a non-wildcard entry is requested. + // + // XXX: In this case the DNS server is operator controlled and is assumed + // to be less malicious so the default resolver is used. In the future, + // we'll want to use net.Resolver for two reasons: + // + // 1. To control the actual resolver via ACME configuration, + // 2. To use a context to set stricter timeout limits. + resolver, err := buildResolver(config) + if err != nil { + return false, fmt.Errorf("failed to build resolver: %w", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + name := DNSChallengePrefix + domain + results, err := resolver.LookupTXT(ctx, name) + if err != nil { + return false, fmt.Errorf("dns-01: failed to lookup TXT records for domain (%v) via resolver %v: %w", name, config.DNSResolver, err) + } + + for _, keyAuthz := range results { + ok, _ := ValidateSHA256KeyAuthorization(keyAuthz, token, thumbprint) + if ok { + return true, nil + } + } + + return false, fmt.Errorf("dns-01: challenge failed against %v records", len(results)) +} + +func ValidateTLSALPN01Challenge(domain string, token string, thumbprint string, config *acmeConfigEntry) (bool, error) { + // This RFC is defined in RFC 8737 Automated Certificate Management + // Environment (ACME) TLS Application‑Layer Protocol Negotiation + // (ALPN) Challenge Extension. + // + // This is conceptually similar to ValidateHTTP01Challenge, but + // uses a TLS connection on port 443 with the specified ALPN + // protocol. + + cfg := &tls.Config{ + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge, the name of the negotiated + // protocol is "acme-tls/1". + NextProtos: []string{ALPNProtocol}, + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > ... and an SNI extension containing only the domain name + // > being validated during the TLS handshake. + // + // According to the Go docs, setting this option (even though + // InsecureSkipVerify=true is also specified), allows us to + // set the SNI extension to this value. + ServerName: domain, + + VerifyConnection: func(connState tls.ConnectionState) error { + // We initiated a fresh connection with no session tickets; + // even if we did have a session ticket, we do not wish to + // use it. Verify that the server has not inadvertently + // reused connections between validation attempts or something. + if connState.DidResume { + return fmt.Errorf("server under test incorrectly reported that handshake was resumed when no session cache was provided; refusing to continue") + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > The ACME server verifies that during the TLS handshake the + // > application-layer protocol "acme-tls/1" was successfully + // > negotiated (and that the ALPN extension contained only the + // > value "acme-tls/1"). + if connState.NegotiatedProtocol != ALPNProtocol { + return fmt.Errorf("server under test negotiated unexpected ALPN protocol %v", connState.NegotiatedProtocol) + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > and that the certificate returned + // + // Because this certificate MUST be self-signed (per earlier + // statement in RFC 8737 Section 3), there is no point in sending + // more than one certificate, and so we will err early here if + // we got more than one. + if len(connState.PeerCertificates) > 1 { + return fmt.Errorf("server under test returned multiple (%v) certificates when we expected only one", len(connState.PeerCertificates)) + } + cert := connState.PeerCertificates[0] + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > The client prepares for validation by constructing a + // > self-signed certificate that MUST contain an acmeIdentifier + // > extension and a subjectAlternativeName extension [RFC5280]. + // + // Verify that this is a self-signed certificate that isn't signed + // by another certificate (i.e., with the same key material but + // different issuer). + // NOTE: Do not use cert.CheckSignatureFrom(cert) as we need to bypass the + // checks for the parent certificate having the IsCA basic constraint set. + err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature) + if err != nil { + return fmt.Errorf("server under test returned a non-self-signed certificate: %w", err) + } + + if !bytes.Equal(cert.RawSubject, cert.RawIssuer) { + return fmt.Errorf("server under test returned a non-self-signed certificate: invalid subject (%v) <-> issuer (%v) match", cert.Subject.String(), cert.Issuer.String()) + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > The subjectAlternativeName extension MUST contain a single + // > dNSName entry where the value is the domain name being + // > validated. + // + // TODO: this does not validate that there are not other SANs + // with unknown (to Go) OIDs. + if len(cert.DNSNames) != 1 || len(cert.EmailAddresses) > 0 || len(cert.IPAddresses) > 0 || len(cert.URIs) > 0 { + return fmt.Errorf("server under test returned a certificate with incorrect SANs") + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > The comparison of dNSNames MUST be case insensitive + // > [RFC4343]. Note that as ACME doesn't support Unicode + // > identifiers, all dNSNames MUST be encoded using the rules + // > of [RFC3492]. + if !strings.EqualFold(cert.DNSNames[0], domain) { + return fmt.Errorf("server under test returned a certificate with unexpected identifier: %v", cert.DNSNames[0]) + } + + // Per above, verify that the acmeIdentifier extension is present + // exactly once and has the correct value. + var foundACMEId bool + for _, ext := range cert.Extensions { + if !ext.Id.Equal(OIDACMEIdentifier) { + continue + } + + // There must be only a single ACME extension. + if foundACMEId { + return fmt.Errorf("server under test returned a certificate with multiple acmeIdentifier extensions") + } + foundACMEId = true + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > a critical acmeIdentifier extension + if !ext.Critical { + return fmt.Errorf("server under test returned a certificate with an acmeIdentifier extension marked non-Critical") + } + + var keyAuthz []byte + remainder, err := asn1.Unmarshal(ext.Value, &keyAuthz) + if err != nil { + return fmt.Errorf("server under test returned a certificate with invalid acmeIdentifier extension value: %w", err) + } + if len(remainder) > 0 { + return fmt.Errorf("server under test returned a certificate with invalid acmeIdentifier extension value with additional trailing data") + } + + ok, err := ValidateRawSHA256KeyAuthorization(keyAuthz, token, thumbprint) + if !ok || err != nil { + return fmt.Errorf("server under test returned a certificate with an invalid key authorization (%w)", err) + } + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > The ACME server verifies that ... the certificate returned + // > contains: ... a critical acmeIdentifier extension containing + // > the expected SHA-256 digest computed in step 1. + if !foundACMEId { + return fmt.Errorf("server under test returned a certificate without the required acmeIdentifier extension") + } + + // Remove the handled critical extension and validate that we + // have no additional critical extensions left unhandled. + var index int = -1 + for oidIndex, oid := range cert.UnhandledCriticalExtensions { + if oid.Equal(OIDACMEIdentifier) { + index = oidIndex + break + } + } + if index != -1 { + // Unlike the foundACMEId case, this is not a failure; if Go + // updates to "understand" this critical extension, we do not + // wish to fail. + cert.UnhandledCriticalExtensions = append(cert.UnhandledCriticalExtensions[0:index], cert.UnhandledCriticalExtensions[index+1:]...) + } + if len(cert.UnhandledCriticalExtensions) > 0 { + return fmt.Errorf("server under test returned a certificate with additional unknown critical extensions (%v)", cert.UnhandledCriticalExtensions) + } + + // All good! + return nil + }, + + // We never want to resume a connection; do not provide session + // cache storage. + ClientSessionCache: nil, + + // Do not trust any system trusted certificates; we're going to be + // manually validating the chain, so specifying a non-empty pool + // here could only cause additional, unnecessary work. + RootCAs: x509.NewCertPool(), + + // Do not bother validating the client's chain; we know it should be + // self-signed. This also disables hostname verification, but we do + // this verification as part of VerifyConnection(...) ourselves. + // + // Per Go docs, this option is only safe in conjunction with + // VerifyConnection which we define above. + InsecureSkipVerify: true, + + // RFC 8737 Section 4. acme-tls/1 Protocol Definition: + // + // > ACME servers that implement "acme-tls/1" MUST only negotiate + // > TLS 1.2 [RFC5246] or higher when connecting to clients for + // > validation. + MinVersion: tls.VersionTLS12, + + // While RFC 8737 does not place restrictions around allowed cipher + // suites, we wish to restrict ourselves to secure defaults. Specify + // the Intermediate guideline from Mozilla's TLS config generator to + // disable obviously weak ciphers. + // + // See also: https://ssl-config.mozilla.org/#server=go&version=1.14.4&config=intermediate&guideline=5.7 + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + }, + } + + // Build a dialer using our custom DNS resolver, to ensure domains get + // resolved according to configuration. + dialer, err := buildDialerConfig(config) + if err != nil { + return false, fmt.Errorf("failed to build dialer: %w", err) + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > 2. The ACME server resolves the domain name being validated and + // > chooses one of the IP addresses returned for validation (the + // > server MAY validate against multiple addresses if more than + // > one is returned). + // > 3. The ACME server initiates a TLS connection to the chosen IP + // > address. This connection MUST use TCP port 443. + address := fmt.Sprintf("%v:"+ALPNPort, domain) + conn, err := dialer.Dial("tcp", address) + if err != nil { + return false, fmt.Errorf("tls-alpn-01: failed to dial host: %w", err) + } + + // Initiate the connection to the remote peer. + client := tls.Client(conn, cfg) + + // We intentionally swallow this error as it isn't useful to the + // underlying protocol we perform here. Notably, per RFC 8737 + // Section 4. acme-tls/1 Protocol Definition: + // + // > Once the handshake is completed, the client MUST NOT exchange + // > any further data with the server and MUST immediately close the + // > connection. ... Because of this, an ACME server MAY choose to + // > withhold authorization if either the certificate signature is + // > invalid or the handshake doesn't fully complete. + defer client.Close() + + // We wish to put time bounds on the total time the handshake can + // stall for, so build a connection context here. + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // See note above about why we can allow Handshake to complete + // successfully. + if err := client.HandshakeContext(ctx); err != nil { + return false, fmt.Errorf("tls-alpn-01: failed to perform handshake: %w", err) + } + return true, nil +} diff --git a/builtin/logical/pki/acme_challenges_test.go b/builtin/logical/pki/acme_challenges_test.go new file mode 100644 index 000000000000..c1b919be757c --- /dev/null +++ b/builtin/logical/pki/acme_challenges_test.go @@ -0,0 +1,758 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/sha256" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "fmt" + "math/big" + "net/http" + "net/http/httptest" + "strconv" + "strings" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/builtin/logical/pki/dnstest" + "github.com/stretchr/testify/require" +) + +type keyAuthorizationTestCase struct { + keyAuthz string + token string + thumbprint string + shouldFail bool +} + +var keyAuthorizationTestCases = []keyAuthorizationTestCase{ + { + // Entirely empty + "", + "non-empty-token", + "non-empty-thumbprint", + true, + }, + { + // Both empty + ".", + "non-empty-token", + "non-empty-thumbprint", + true, + }, + { + // Not equal + "non-.non-", + "non-empty-token", + "non-empty-thumbprint", + true, + }, + { + // Empty thumbprint + "non-.", + "non-empty-token", + "non-empty-thumbprint", + true, + }, + { + // Empty token + ".non-", + "non-empty-token", + "non-empty-thumbprint", + true, + }, + { + // Wrong order + "non-empty-thumbprint.non-empty-token", + "non-empty-token", + "non-empty-thumbprint", + true, + }, + { + // Too many pieces + "one.two.three", + "non-empty-token", + "non-empty-thumbprint", + true, + }, + { + // Valid + "non-empty-token.non-empty-thumbprint", + "non-empty-token", + "non-empty-thumbprint", + false, + }, +} + +func TestAcmeValidateKeyAuthorization(t *testing.T) { + t.Parallel() + + for index, tc := range keyAuthorizationTestCases { + t.Run("subtest-"+strconv.Itoa(index), func(st *testing.T) { + isValid, err := ValidateKeyAuthorization(tc.keyAuthz, tc.token, tc.thumbprint) + if !isValid && err == nil { + st.Fatalf("[%d] expected failure to give reason via err (%v / %v)", index, isValid, err) + } + + expectedValid := !tc.shouldFail + if expectedValid != isValid { + st.Fatalf("[%d] got ret=%v, expected ret=%v (shouldFail=%v)", index, isValid, expectedValid, tc.shouldFail) + } + }) + } +} + +func TestAcmeValidateHTTP01Challenge(t *testing.T) { + t.Parallel() + + for index, tc := range keyAuthorizationTestCases { + validFunc := func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(tc.keyAuthz)) + } + withPadding := func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(" " + tc.keyAuthz + " ")) + } + withRedirect := func(w http.ResponseWriter, r *http.Request) { + if strings.Contains(r.URL.Path, "/.well-known/") { + http.Redirect(w, r, "/my-http-01-challenge-response", 301) + return + } + + w.Write([]byte(tc.keyAuthz)) + } + withSleep := func(w http.ResponseWriter, r *http.Request) { + // Long enough to ensure any excessively short timeouts are hit, + // not long enough to trigger a failure (hopefully). + time.Sleep(5 * time.Second) + w.Write([]byte(tc.keyAuthz)) + } + + validHandlers := []http.HandlerFunc{ + http.HandlerFunc(validFunc), http.HandlerFunc(withPadding), + http.HandlerFunc(withRedirect), http.HandlerFunc(withSleep), + } + + for handlerIndex, handler := range validHandlers { + func() { + ts := httptest.NewServer(handler) + defer ts.Close() + + host := ts.URL[7:] + isValid, err := ValidateHTTP01Challenge(host, tc.token, tc.thumbprint, &acmeConfigEntry{}) + if !isValid && err == nil { + t.Fatalf("[tc=%d/handler=%d] expected failure to give reason via err (%v / %v)", index, handlerIndex, isValid, err) + } + + expectedValid := !tc.shouldFail + if expectedValid != isValid { + t.Fatalf("[tc=%d/handler=%d] got ret=%v (err=%v), expected ret=%v (shouldFail=%v)", index, handlerIndex, isValid, err, expectedValid, tc.shouldFail) + } + }() + } + } + + // Negative test cases for various HTTP-specific scenarios. + redirectLoop := func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, "/my-http-01-challenge-response", 301) + } + publicRedirect := func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, "http://hashicorp.com/", 301) + } + noData := func(w http.ResponseWriter, r *http.Request) {} + noContent := func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + } + notFound := func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + } + simulateHang := func(w http.ResponseWriter, r *http.Request) { + time.Sleep(30 * time.Second) + w.Write([]byte("my-token.my-thumbprint")) + } + tooLarge := func(w http.ResponseWriter, r *http.Request) { + for i := 0; i < 512; i++ { + w.Write([]byte("my-token.my-thumbprint\n")) + } + } + + validHandlers := []http.HandlerFunc{ + http.HandlerFunc(redirectLoop), http.HandlerFunc(publicRedirect), + http.HandlerFunc(noData), http.HandlerFunc(noContent), + http.HandlerFunc(notFound), http.HandlerFunc(simulateHang), + http.HandlerFunc(tooLarge), + } + for handlerIndex, handler := range validHandlers { + func() { + ts := httptest.NewServer(handler) + defer ts.Close() + + host := ts.URL[7:] + isValid, err := ValidateHTTP01Challenge(host, "my-token", "my-thumbprint", &acmeConfigEntry{}) + if isValid || err == nil { + t.Fatalf("[handler=%d] expected failure validating challenge (%v / %v)", handlerIndex, isValid, err) + } + }() + } +} + +func TestAcmeValidateDNS01Challenge(t *testing.T) { + t.Parallel() + + host := "dadgarcorp.com" + resolver := dnstest.SetupResolver(t, host) + defer resolver.Cleanup() + + t.Logf("DNS Server Address: %v", resolver.GetLocalAddr()) + + config := &acmeConfigEntry{ + DNSResolver: resolver.GetLocalAddr(), + } + + for index, tc := range keyAuthorizationTestCases { + checksum := sha256.Sum256([]byte(tc.keyAuthz)) + authz := base64.RawURLEncoding.EncodeToString(checksum[:]) + resolver.AddRecord(DNSChallengePrefix+host, "TXT", authz) + resolver.PushConfig() + + isValid, err := ValidateDNS01Challenge(host, tc.token, tc.thumbprint, config) + if !isValid && err == nil { + t.Fatalf("[tc=%d] expected failure to give reason via err (%v / %v)", index, isValid, err) + } + + expectedValid := !tc.shouldFail + if expectedValid != isValid { + t.Fatalf("[tc=%d] got ret=%v (err=%v), expected ret=%v (shouldFail=%v)", index, isValid, err, expectedValid, tc.shouldFail) + } + + resolver.RemoveAllRecords() + } +} + +func TestAcmeValidateTLSALPN01Challenge(t *testing.T) { + // This test is not parallel because we modify ALPNPort to use a custom + // non-standard port _just for testing purposes_. + host := "localhost" + config := &acmeConfigEntry{} + + log := hclog.L() + + returnedProtocols := []string{ALPNProtocol} + var certificates []*x509.Certificate + var privateKey crypto.PrivateKey + + tlsCfg := &tls.Config{} + tlsCfg.GetConfigForClient = func(*tls.ClientHelloInfo) (*tls.Config, error) { + var retCfg tls.Config = *tlsCfg + retCfg.NextProtos = returnedProtocols + log.Info(fmt.Sprintf("[alpn-server] returned protocol: %v", returnedProtocols)) + return &retCfg, nil + } + tlsCfg.GetCertificate = func(*tls.ClientHelloInfo) (*tls.Certificate, error) { + var ret tls.Certificate + for index, cert := range certificates { + ret.Certificate = append(ret.Certificate, cert.Raw) + if index == 0 { + ret.Leaf = cert + } + } + ret.PrivateKey = privateKey + log.Info(fmt.Sprintf("[alpn-server] returned certificates: %v", ret)) + return &ret, nil + } + + ln, err := tls.Listen("tcp", host+":0", tlsCfg) + require.NoError(t, err, "failed to listen with TLS config") + + doOneAccept := func() { + log.Info("[alpn-server] starting accept...") + connRaw, err := ln.Accept() + require.NoError(t, err, "failed to accept TLS connection") + + log.Info("[alpn-server] got connection...") + conn := tls.Server(connRaw.(*tls.Conn), tlsCfg) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer func() { + log.Info("[alpn-server] canceling listener connection...") + cancel() + }() + + log.Info("[alpn-server] starting handshake...") + if err := conn.HandshakeContext(ctx); err != nil { + log.Info("[alpn-server] got non-fatal error while handshaking connection: %v", err) + } + + log.Info("[alpn-server] closing connection...") + if err := conn.Close(); err != nil { + log.Info("[alpn-server] got non-fatal error while closing connection: %v", err) + } + } + + ALPNPort = strings.Split(ln.Addr().String(), ":")[1] + + type alpnTestCase struct { + name string + certificates []*x509.Certificate + privateKey crypto.PrivateKey + protocols []string + token string + thumbprint string + shouldFail bool + } + + var alpnTestCases []alpnTestCase + // Add all of our keyAuthorizationTestCases into alpnTestCases + for index, tc := range keyAuthorizationTestCases { + log.Info(fmt.Sprintf("using keyAuthorizationTestCase [tc=%d] as alpnTestCase [tc=%d]...", index, len(alpnTestCases))) + // Properly encode the authorization. + checksum := sha256.Sum256([]byte(tc.keyAuthz)) + authz, err := asn1.Marshal(checksum[:]) + require.NoError(t, err, "failed asn.1 marshalling authz") + + // Build a self-signed certificate. + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + Issuer: pkix.Name{ + CommonName: host, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: key.Public(), + SerialNumber: big.NewInt(1), + DNSNames: []string{host}, + ExtraExtensions: []pkix.Extension{ + { + Id: OIDACMEIdentifier, + Critical: true, + Value: authz, + }, + }, + BasicConstraintsValid: true, + IsCA: false, + } + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + require.NoError(t, err, "failed to create certificate") + cert, err := x509.ParseCertificate(certBytes) + require.NoError(t, err, "failed to parse newly generated certificate") + + newTc := alpnTestCase{ + name: fmt.Sprintf("keyAuthorizationTestCase[%d]", index), + certificates: []*x509.Certificate{cert}, + privateKey: key, + protocols: []string{ALPNProtocol}, + token: tc.token, + thumbprint: tc.thumbprint, + shouldFail: tc.shouldFail, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + { + // Test case: Longer chain + // Build a self-signed certificate. + rootKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating root private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "Root CA", + }, + Issuer: pkix.Name{ + CommonName: "Root CA", + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: rootKey.Public(), + SerialNumber: big.NewInt(1), + BasicConstraintsValid: true, + IsCA: true, + } + rootCertBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, rootKey.Public(), rootKey) + require.NoError(t, err, "failed to create root certificate") + rootCert, err := x509.ParseCertificate(rootCertBytes) + require.NoError(t, err, "failed to parse newly generated root certificate") + + // Compute our authorization. + checksum := sha256.Sum256([]byte("valid.valid")) + authz, err := asn1.Marshal(checksum[:]) + require.NoError(t, err, "failed to marshal authz with asn.1 ") + + // Build a leaf certificate which _could_ pass validation + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating leaf private key") + tmpl = &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + Issuer: pkix.Name{ + CommonName: "Root CA", + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: key.Public(), + SerialNumber: big.NewInt(2), + DNSNames: []string{host}, + ExtraExtensions: []pkix.Extension{ + { + Id: OIDACMEIdentifier, + Critical: true, + Value: authz, + }, + }, + BasicConstraintsValid: true, + IsCA: false, + } + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, rootCert, key.Public(), rootKey) + require.NoError(t, err, "failed to create leaf certificate") + cert, err := x509.ParseCertificate(certBytes) + require.NoError(t, err, "failed to parse newly generated leaf certificate") + + newTc := alpnTestCase{ + name: "longer chain with valid leaf", + certificates: []*x509.Certificate{cert, rootCert}, + privateKey: key, + protocols: []string{ALPNProtocol}, + token: "valid", + thumbprint: "valid", + shouldFail: true, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + { + // Test case: cert without DNSSan + // Compute our authorization. + checksum := sha256.Sum256([]byte("valid.valid")) + authz, err := asn1.Marshal(checksum[:]) + require.NoError(t, err, "failed to marshal authz with asn.1 ") + + // Build a leaf certificate without a DNSSan + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating leaf private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + Issuer: pkix.Name{ + CommonName: host, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: key.Public(), + SerialNumber: big.NewInt(2), + // NO DNSNames + ExtraExtensions: []pkix.Extension{ + { + Id: OIDACMEIdentifier, + Critical: true, + Value: authz, + }, + }, + BasicConstraintsValid: true, + IsCA: false, + } + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + require.NoError(t, err, "failed to create leaf certificate") + cert, err := x509.ParseCertificate(certBytes) + require.NoError(t, err, "failed to parse newly generated leaf certificate") + + newTc := alpnTestCase{ + name: "valid keyauthz without valid dnsname", + certificates: []*x509.Certificate{cert}, + privateKey: key, + protocols: []string{ALPNProtocol}, + token: "valid", + thumbprint: "valid", + shouldFail: true, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + { + // Test case: cert without matching DNSSan + // Compute our authorization. + checksum := sha256.Sum256([]byte("valid.valid")) + authz, err := asn1.Marshal(checksum[:]) + require.NoError(t, err, "failed to marshal authz with asn.1 ") + + // Build a leaf certificate which fails validation due to bad DNSName + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating leaf private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + Issuer: pkix.Name{ + CommonName: host, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: key.Public(), + SerialNumber: big.NewInt(2), + DNSNames: []string{host + ".dadgarcorp.com" /* not matching host! */}, + ExtraExtensions: []pkix.Extension{ + { + Id: OIDACMEIdentifier, + Critical: true, + Value: authz, + }, + }, + BasicConstraintsValid: true, + IsCA: false, + } + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + require.NoError(t, err, "failed to create leaf certificate") + cert, err := x509.ParseCertificate(certBytes) + require.NoError(t, err, "failed to parse newly generated leaf certificate") + + newTc := alpnTestCase{ + name: "valid keyauthz without matching dnsname", + certificates: []*x509.Certificate{cert}, + privateKey: key, + protocols: []string{ALPNProtocol}, + token: "valid", + thumbprint: "valid", + shouldFail: true, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + { + // Test case: cert with additional SAN + // Compute our authorization. + checksum := sha256.Sum256([]byte("valid.valid")) + authz, err := asn1.Marshal(checksum[:]) + require.NoError(t, err, "failed to marshal authz with asn.1 ") + + // Build a leaf certificate which has an invalid additional SAN + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating leaf private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + Issuer: pkix.Name{ + CommonName: host, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: key.Public(), + SerialNumber: big.NewInt(2), + DNSNames: []string{host}, + EmailAddresses: []string{"webmaster@" + host}, /* unexpected */ + ExtraExtensions: []pkix.Extension{ + { + Id: OIDACMEIdentifier, + Critical: true, + Value: authz, + }, + }, + BasicConstraintsValid: true, + IsCA: false, + } + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + require.NoError(t, err, "failed to create leaf certificate") + cert, err := x509.ParseCertificate(certBytes) + require.NoError(t, err, "failed to parse newly generated leaf certificate") + + newTc := alpnTestCase{ + name: "valid keyauthz with additional email SANs", + certificates: []*x509.Certificate{cert}, + privateKey: key, + protocols: []string{ALPNProtocol}, + token: "valid", + thumbprint: "valid", + shouldFail: true, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + { + // Test case: cert without CN + // Compute our authorization. + checksum := sha256.Sum256([]byte("valid.valid")) + authz, err := asn1.Marshal(checksum[:]) + require.NoError(t, err, "failed to marshal authz with asn.1 ") + + // Build a leaf certificate which should pass validation + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating leaf private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{}, + Issuer: pkix.Name{}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: key.Public(), + SerialNumber: big.NewInt(2), + DNSNames: []string{host}, + ExtraExtensions: []pkix.Extension{ + { + Id: OIDACMEIdentifier, + Critical: true, + Value: authz, + }, + }, + BasicConstraintsValid: true, + IsCA: false, + } + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + require.NoError(t, err, "failed to create leaf certificate") + cert, err := x509.ParseCertificate(certBytes) + require.NoError(t, err, "failed to parse newly generated leaf certificate") + + newTc := alpnTestCase{ + name: "valid certificate; no Subject/Issuer (missing CN)", + certificates: []*x509.Certificate{cert}, + privateKey: key, + protocols: []string{ALPNProtocol}, + token: "valid", + thumbprint: "valid", + shouldFail: false, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + { + // Test case: cert without the extension + // Build a leaf certificate which should fail validation + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating leaf private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{}, + Issuer: pkix.Name{}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: key.Public(), + SerialNumber: big.NewInt(1), + DNSNames: []string{host}, + BasicConstraintsValid: true, + IsCA: true, + } + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + require.NoError(t, err, "failed to create leaf certificate") + cert, err := x509.ParseCertificate(certBytes) + require.NoError(t, err, "failed to parse newly generated leaf certificate") + + newTc := alpnTestCase{ + name: "missing required acmeIdentifier extension", + certificates: []*x509.Certificate{cert}, + privateKey: key, + protocols: []string{ALPNProtocol}, + token: "valid", + thumbprint: "valid", + shouldFail: true, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + { + // Test case: root without a leaf + // Build a self-signed certificate. + rootKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating root private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "Root CA", + }, + Issuer: pkix.Name{ + CommonName: "Root CA", + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: rootKey.Public(), + SerialNumber: big.NewInt(1), + BasicConstraintsValid: true, + IsCA: true, + } + rootCertBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, rootKey.Public(), rootKey) + require.NoError(t, err, "failed to create root certificate") + rootCert, err := x509.ParseCertificate(rootCertBytes) + require.NoError(t, err, "failed to parse newly generated root certificate") + + newTc := alpnTestCase{ + name: "root without leaf", + certificates: []*x509.Certificate{rootCert}, + privateKey: rootKey, + protocols: []string{ALPNProtocol}, + token: "valid", + thumbprint: "valid", + shouldFail: true, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + for index, tc := range alpnTestCases { + log.Info(fmt.Sprintf("\n\n[tc=%d/name=%s] starting validation", index, tc.name)) + certificates = tc.certificates + privateKey = tc.privateKey + returnedProtocols = tc.protocols + + // Attempt to validate the challenge. + go doOneAccept() + isValid, err := ValidateTLSALPN01Challenge(host, tc.token, tc.thumbprint, config) + if !isValid && err == nil { + t.Fatalf("[tc=%d/name=%s] expected failure to give reason via err (%v / %v)", index, tc.name, isValid, err) + } + + expectedValid := !tc.shouldFail + if expectedValid != isValid { + t.Fatalf("[tc=%d/name=%s] got ret=%v (err=%v), expected ret=%v (shouldFail=%v)", index, tc.name, isValid, err, expectedValid, tc.shouldFail) + } else if err != nil { + log.Info(fmt.Sprintf("[tc=%d/name=%s] got expected failure: err=%v", index, tc.name, err)) + } + } +} + +// TestAcmeValidateHttp01TLSRedirect verify that we allow a http-01 challenge to redirect +// to a TLS server and not validate the certificate chain is valid. We don't validate the +// TLS chain as we would have accepted the auth over a non-secured channel anyway had +// the original request not redirected us. +func TestAcmeValidateHttp01TLSRedirect(t *testing.T) { + t.Parallel() + + for index, tc := range keyAuthorizationTestCases { + t.Run("subtest-"+strconv.Itoa(index), func(st *testing.T) { + validFunc := func(w http.ResponseWriter, r *http.Request) { + if strings.Contains(r.URL.Path, "/.well-known/") { + w.Write([]byte(tc.keyAuthz)) + return + } + http.Error(w, "status not found", http.StatusNotFound) + } + + tlsTs := httptest.NewTLSServer(http.HandlerFunc(validFunc)) + defer tlsTs.Close() + + // Set up a http server that will redirect to our TLS server + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, tlsTs.URL+r.URL.Path, 301) + })) + defer ts.Close() + + host := ts.URL[len("http://"):] + isValid, err := ValidateHTTP01Challenge(host, tc.token, tc.thumbprint, &acmeConfigEntry{}) + if !isValid && err == nil { + st.Fatalf("[tc=%d] expected failure to give reason via err (%v / %v)", index, isValid, err) + } + + expectedValid := !tc.shouldFail + if expectedValid != isValid { + st.Fatalf("[tc=%d] got ret=%v (err=%v), expected ret=%v (shouldFail=%v)", index, isValid, err, expectedValid, tc.shouldFail) + } + }) + } +} diff --git a/builtin/logical/pki/acme_eab_policy.go b/builtin/logical/pki/acme_eab_policy.go new file mode 100644 index 000000000000..43af5d330055 --- /dev/null +++ b/builtin/logical/pki/acme_eab_policy.go @@ -0,0 +1,69 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "fmt" + "strings" +) + +type EabPolicyName string + +const ( + eabPolicyNotRequired EabPolicyName = "not-required" + eabPolicyNewAccountRequired EabPolicyName = "new-account-required" + eabPolicyAlwaysRequired EabPolicyName = "always-required" +) + +func getEabPolicyByString(name string) (EabPolicy, error) { + lcName := strings.TrimSpace(strings.ToLower(name)) + switch lcName { + case string(eabPolicyNotRequired): + return getEabPolicyByName(eabPolicyNotRequired), nil + case string(eabPolicyNewAccountRequired): + return getEabPolicyByName(eabPolicyNewAccountRequired), nil + case string(eabPolicyAlwaysRequired): + return getEabPolicyByName(eabPolicyAlwaysRequired), nil + default: + return getEabPolicyByName(eabPolicyAlwaysRequired), fmt.Errorf("unknown eab policy name: %s", name) + } +} + +func getEabPolicyByName(name EabPolicyName) EabPolicy { + return EabPolicy{Name: name} +} + +type EabPolicy struct { + Name EabPolicyName +} + +// EnforceForNewAccount for new account creations, should we require an EAB. +func (ep EabPolicy) EnforceForNewAccount(eabData *eabType) error { + if (ep.Name == eabPolicyAlwaysRequired || ep.Name == eabPolicyNewAccountRequired) && eabData == nil { + return ErrExternalAccountRequired + } + + return nil +} + +// EnforceForExistingAccount for all operations within ACME, does the account being used require an EAB attached to it. +func (ep EabPolicy) EnforceForExistingAccount(account *acmeAccount) error { + if ep.Name == eabPolicyAlwaysRequired && account.Eab == nil { + return ErrExternalAccountRequired + } + + return nil +} + +// IsExternalAccountRequired for new accounts incoming does is an EAB required +func (ep EabPolicy) IsExternalAccountRequired() bool { + return ep.Name == eabPolicyAlwaysRequired || ep.Name == eabPolicyNewAccountRequired +} + +// OverrideEnvDisablingPublicAcme determines if ACME is enabled but the OS environment variable +// has said to disable public acme support, if we can override that environment variable to +// turn on ACME support +func (ep EabPolicy) OverrideEnvDisablingPublicAcme() bool { + return ep.Name == eabPolicyAlwaysRequired +} diff --git a/builtin/logical/pki/acme_errors.go b/builtin/logical/pki/acme_errors.go new file mode 100644 index 000000000000..3c9c059f7d22 --- /dev/null +++ b/builtin/logical/pki/acme_errors.go @@ -0,0 +1,212 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/sdk/logical" +) + +// Error prefix; see RFC 8555 Section 6.7. Errors. +const ( + ErrorPrefix = "urn:ietf:params:acme:error:" + ErrorContentType = "application/problem+json" +) + +// See RFC 8555 Section 6.7. Errors. +var ErrAccountDoesNotExist = errors.New("The request specified an account that does not exist") + +var ErrAcmeDisabled = errors.New("ACME feature is disabled") + +var ( + ErrAlreadyRevoked = errors.New("The request specified a certificate to be revoked that has already been revoked") + ErrBadCSR = errors.New("The CSR is unacceptable") + ErrBadNonce = errors.New("The client sent an unacceptable anti-replay nonce") + ErrBadPublicKey = errors.New("The JWS was signed by a public key the server does not support") + ErrBadRevocationReason = errors.New("The revocation reason provided is not allowed by the server") + ErrBadSignatureAlgorithm = errors.New("The JWS was signed with an algorithm the server does not support") + ErrCAA = errors.New("Certification Authority Authorization (CAA) records forbid the CA from issuing a certificate") + ErrCompound = errors.New("Specific error conditions are indicated in the 'subproblems' array") + ErrConnection = errors.New("The server could not connect to validation target") + ErrDNS = errors.New("There was a problem with a DNS query during identifier validation") + ErrExternalAccountRequired = errors.New("The request must include a value for the 'externalAccountBinding' field") + ErrIncorrectResponse = errors.New("Response received didn't match the challenge's requirements") + ErrInvalidContact = errors.New("A contact URL for an account was invalid") + ErrMalformed = errors.New("The request message was malformed") + ErrOrderNotReady = errors.New("The request attempted to finalize an order that is not ready to be finalized") + ErrRateLimited = errors.New("The request exceeds a rate limit") + ErrRejectedIdentifier = errors.New("The server will not issue certificates for the identifier") + ErrServerInternal = errors.New("The server experienced an internal error") + ErrTLS = errors.New("The server received a TLS error during validation") + ErrUnauthorized = errors.New("The client lacks sufficient authorization") + ErrUnsupportedContact = errors.New("A contact URL for an account used an unsupported protocol scheme") + ErrUnsupportedIdentifier = errors.New("An identifier is of an unsupported type") + ErrUserActionRequired = errors.New("Visit the 'instance' URL and take actions specified there") +) + +// Mapping of err->name; see table in RFC 8555 Section 6.7. Errors. +var errIdMappings = map[error]string{ + ErrAccountDoesNotExist: "accountDoesNotExist", + ErrAlreadyRevoked: "alreadyRevoked", + ErrBadCSR: "badCSR", + ErrBadNonce: "badNonce", + ErrBadPublicKey: "badPublicKey", + ErrBadRevocationReason: "badRevocationReason", + ErrBadSignatureAlgorithm: "badSignatureAlgorithm", + ErrCAA: "caa", + ErrCompound: "compound", + ErrConnection: "connection", + ErrDNS: "dns", + ErrExternalAccountRequired: "externalAccountRequired", + ErrIncorrectResponse: "incorrectResponse", + ErrInvalidContact: "invalidContact", + ErrMalformed: "malformed", + ErrOrderNotReady: "orderNotReady", + ErrRateLimited: "rateLimited", + ErrRejectedIdentifier: "rejectedIdentifier", + ErrServerInternal: "serverInternal", + ErrTLS: "tls", + ErrUnauthorized: "unauthorized", + ErrUnsupportedContact: "unsupportedContact", + ErrUnsupportedIdentifier: "unsupportedIdentifier", + ErrUserActionRequired: "userActionRequired", +} + +// Mapping of err->status codes; see table in RFC 8555 Section 6.7. Errors. +var errCodeMappings = map[error]int{ + ErrAccountDoesNotExist: http.StatusBadRequest, // See RFC 8555 Section 7.3.1. Finding an Account URL Given a Key. + ErrAlreadyRevoked: http.StatusBadRequest, + ErrBadCSR: http.StatusBadRequest, + ErrBadNonce: http.StatusBadRequest, + ErrBadPublicKey: http.StatusBadRequest, + ErrBadRevocationReason: http.StatusBadRequest, + ErrBadSignatureAlgorithm: http.StatusBadRequest, + ErrCAA: http.StatusForbidden, + ErrCompound: http.StatusBadRequest, + ErrConnection: http.StatusInternalServerError, + ErrDNS: http.StatusInternalServerError, + ErrExternalAccountRequired: http.StatusUnauthorized, + ErrIncorrectResponse: http.StatusBadRequest, + ErrInvalidContact: http.StatusBadRequest, + ErrMalformed: http.StatusBadRequest, + ErrOrderNotReady: http.StatusForbidden, // See RFC 8555 Section 7.4. Applying for Certificate Issuance. + ErrRateLimited: http.StatusTooManyRequests, + ErrRejectedIdentifier: http.StatusBadRequest, + ErrServerInternal: http.StatusInternalServerError, + ErrTLS: http.StatusInternalServerError, + ErrUnauthorized: http.StatusUnauthorized, + ErrUnsupportedContact: http.StatusBadRequest, + ErrUnsupportedIdentifier: http.StatusBadRequest, + ErrUserActionRequired: http.StatusUnauthorized, +} + +type ErrorResponse struct { + StatusCode int `json:"-"` + Type string `json:"type"` + Detail string `json:"detail"` + Subproblems []*ErrorResponse `json:"subproblems"` +} + +func (e *ErrorResponse) MarshalForStorage() map[string]interface{} { + subProblems := []map[string]interface{}{} + for _, subProblem := range e.Subproblems { + subProblems = append(subProblems, subProblem.MarshalForStorage()) + } + return map[string]interface{}{ + "status": e.StatusCode, + "type": e.Type, + "detail": e.Detail, + "subproblems": subProblems, + } +} + +func (e *ErrorResponse) Marshal() (*logical.Response, error) { + body, err := json.Marshal(e) + if err != nil { + return nil, fmt.Errorf("failed marshalling of error response: %w", err) + } + + var resp logical.Response + resp.Data = map[string]interface{}{ + logical.HTTPContentType: ErrorContentType, + logical.HTTPRawBody: body, + logical.HTTPStatusCode: e.StatusCode, + } + + return &resp, nil +} + +func FindType(given error) (err error, id string, code int, found bool) { + matchedError := false + for err, id = range errIdMappings { + if errors.Is(given, err) { + matchedError = true + break + } + } + + // If the given error was not matched from one of the standard ACME errors + // make this error, force ErrServerInternal + if !matchedError { + err = ErrServerInternal + id = errIdMappings[err] + } + + code = errCodeMappings[err] + + return +} + +func TranslateError(given error) (*logical.Response, error) { + if errors.Is(given, logical.ErrReadOnly) { + return nil, given + } + + if errors.Is(given, ErrAcmeDisabled) { + return logical.RespondWithStatusCode(nil, nil, http.StatusNotFound) + } + + body := TranslateErrorToErrorResponse(given) + + return body.Marshal() +} + +func TranslateErrorToErrorResponse(given error) ErrorResponse { + // We're multierror aware here: if we're given a list of errors, assume + // they're structured so the first error is the outer error and the inner + // subproblems are subsequent in the multierror. + var remaining []error + if unwrapped, ok := given.(*multierror.Error); ok { + remaining = unwrapped.Errors[1:] + given = unwrapped.Errors[0] + } + + _, id, code, found := FindType(given) + if !found && len(remaining) > 0 { + // Translate multierrors into a generic error code. + id = errIdMappings[ErrCompound] + code = errCodeMappings[ErrCompound] + } + + var body ErrorResponse + body.Type = ErrorPrefix + id + body.Detail = given.Error() + body.StatusCode = code + + for _, subgiven := range remaining { + _, subid, _, _ := FindType(subgiven) + + var sub ErrorResponse + sub.Type = ErrorPrefix + subid + body.Detail = subgiven.Error() + + body.Subproblems = append(body.Subproblems, &sub) + } + return body +} diff --git a/builtin/logical/pki/acme_jws.go b/builtin/logical/pki/acme_jws.go new file mode 100644 index 000000000000..cc096c55c250 --- /dev/null +++ b/builtin/logical/pki/acme_jws.go @@ -0,0 +1,278 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "bytes" + "crypto" + "encoding/base64" + "encoding/json" + "fmt" + "strings" + + "github.com/go-jose/go-jose/v3" +) + +var AllowedOuterJWSTypes = map[string]interface{}{ + "RS256": true, + "RS384": true, + "RS512": true, + "PS256": true, + "PS384": true, + "PS512": true, + "ES256": true, + "ES384": true, + "ES512": true, + "EdDSA2": true, +} + +var AllowedEabJWSTypes = map[string]interface{}{ + "HS256": true, + "HS384": true, + "HS512": true, +} + +// This wraps a JWS message structure. +type jwsCtx struct { + Algo string `json:"alg"` + Kid string `json:"kid"` + Jwk json.RawMessage `json:"jwk"` + Nonce string `json:"nonce"` + Url string `json:"url"` + Key jose.JSONWebKey `json:"-"` + Existing bool `json:"-"` +} + +func (c *jwsCtx) GetKeyThumbprint() (string, error) { + keyThumbprint, err := c.Key.Thumbprint(crypto.SHA256) + if err != nil { + return "", fmt.Errorf("failed creating thumbprint: %w", err) + } + return base64.RawURLEncoding.EncodeToString(keyThumbprint), nil +} + +func UnmarshalEabJwsJson(eabBytes []byte) (*jwsCtx, error) { + var eabJws jwsCtx + var err error + if err = json.Unmarshal(eabBytes, &eabJws); err != nil { + return nil, err + } + + if eabJws.Kid == "" { + return nil, fmt.Errorf("invalid header: got missing required field 'kid': %w", ErrMalformed) + } + + if _, present := AllowedEabJWSTypes[eabJws.Algo]; !present { + return nil, fmt.Errorf("invalid header: unexpected value for 'algo': %w", ErrMalformed) + } + + return &eabJws, nil +} + +func (c *jwsCtx) UnmarshalOuterJwsJson(a *acmeState, ac *acmeContext, jws []byte) error { + var err error + if err = json.Unmarshal(jws, c); err != nil { + return err + } + + if c.Kid != "" && len(c.Jwk) > 0 { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > The "jwk" and "kid" fields are mutually exclusive. Servers MUST + // > reject requests that contain both. + return fmt.Errorf("invalid header: got both account 'kid' and 'jwk' in the same message; expected only one: %w", ErrMalformed) + } + + if c.Kid == "" && len(c.Jwk) == 0 { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > Either "jwk" (JSON Web Key) or "kid" (Key ID) as specified + // > below + return fmt.Errorf("invalid header: got neither required fields of 'kid' nor 'jwk': %w", ErrMalformed) + } + + if _, present := AllowedOuterJWSTypes[c.Algo]; !present { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > The JWS Protected Header MUST include the following fields: + // > + // > - "alg" (Algorithm) + // > + // > * This field MUST NOT contain "none" or a Message + // > Authentication Code (MAC) algorithm (e.g. one in which the + // > algorithm registry description mentions MAC/HMAC). + return fmt.Errorf("invalid header: unexpected value for 'algo': %w", ErrMalformed) + } + + if c.Kid != "" { + // Load KID from storage first. + kid := getKeyIdFromAccountUrl(c.Kid) + c.Jwk, err = a.LoadJWK(ac, kid) + if err != nil { + return err + } + c.Kid = kid // Use the uuid itself, not the full account url that was originally provided to us. + c.Existing = true + } + + if err = c.Key.UnmarshalJSON(c.Jwk); err != nil { + return err + } + + if !c.Key.Valid() { + return fmt.Errorf("received invalid jwk: %w", ErrMalformed) + } + + if c.Kid == "" { + c.Kid = genUuid() + c.Existing = false + } + + return nil +} + +func getKeyIdFromAccountUrl(accountUrl string) string { + pieces := strings.Split(accountUrl, "/") + return pieces[len(pieces)-1] +} + +func hasValues(h jose.Header) bool { + return h.KeyID != "" || h.JSONWebKey != nil || h.Algorithm != "" || h.Nonce != "" || len(h.ExtraHeaders) > 0 +} + +func (c *jwsCtx) VerifyJWS(signature string) (map[string]interface{}, error) { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > The JWS Unencoded Payload Option [RFC7797] MUST NOT be used + // + // This is validated by go-jose. + sig, err := jose.ParseSigned(signature) + if err != nil { + return nil, fmt.Errorf("error parsing signature: %s: %w", err, ErrMalformed) + } + + if len(sig.Signatures) > 1 { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > The JWS MUST NOT have multiple signatures + return nil, fmt.Errorf("request had multiple signatures: %w", ErrMalformed) + } + + if hasValues(sig.Signatures[0].Unprotected) { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > The JWS Unprotected Header [RFC7515] MUST NOT be used + return nil, fmt.Errorf("request had unprotected headers: %w", ErrMalformed) + } + + payload, err := sig.Verify(c.Key) + if err != nil { + return nil, err + } + + if len(payload) == 0 { + // Distinguish POST-AS-GET from POST-with-an-empty-body. + return nil, nil + } + + var m map[string]interface{} + if err := json.Unmarshal(payload, &m); err != nil { + return nil, fmt.Errorf("failed to json unmarshal 'payload': %s: %w", err, ErrMalformed) + } + + return m, nil +} + +func verifyEabPayload(acmeState *acmeState, ac *acmeContext, outer *jwsCtx, expectedPath string, payload map[string]interface{}) (*eabType, error) { + // Parse the key out. + rawProtectedBase64, ok := payload["protected"] + if !ok { + return nil, fmt.Errorf("missing required field 'protected': %w", ErrMalformed) + } + jwkBase64 := rawProtectedBase64.(string) + + jwkBytes, err := base64.RawURLEncoding.DecodeString(jwkBase64) + if err != nil { + return nil, fmt.Errorf("failed to base64 parse eab 'protected': %s: %w", err, ErrMalformed) + } + + eabJws, err := UnmarshalEabJwsJson(jwkBytes) + if err != nil { + return nil, fmt.Errorf("failed to json unmarshal eab 'protected': %w", err) + } + + if len(eabJws.Url) == 0 { + return nil, fmt.Errorf("missing required parameter 'url' in eab 'protected': %w", ErrMalformed) + } + expectedUrl := ac.clusterUrl.JoinPath(expectedPath).String() + if expectedUrl != eabJws.Url { + return nil, fmt.Errorf("invalid value for 'url' in eab 'protected': got '%v' expected '%v': %w", eabJws.Url, expectedUrl, ErrUnauthorized) + } + + if len(eabJws.Nonce) != 0 { + return nil, fmt.Errorf("nonce should not be provided in eab 'protected': %w", ErrMalformed) + } + + rawPayloadBase64, ok := payload["payload"] + if !ok { + return nil, fmt.Errorf("missing required field eab 'payload': %w", ErrMalformed) + } + payloadBase64, ok := rawPayloadBase64.(string) + if !ok { + return nil, fmt.Errorf("failed to parse 'payload' field: %w", ErrMalformed) + } + + rawSignatureBase64, ok := payload["signature"] + if !ok { + return nil, fmt.Errorf("missing required field 'signature': %w", ErrMalformed) + } + signatureBase64, ok := rawSignatureBase64.(string) + if !ok { + return nil, fmt.Errorf("failed to parse 'signature' field: %w", ErrMalformed) + } + + // go-jose only seems to support compact signature encodings. + compactSig := fmt.Sprintf("%v.%v.%v", jwkBase64, payloadBase64, signatureBase64) + sig, err := jose.ParseSigned(compactSig) + if err != nil { + return nil, fmt.Errorf("error parsing eab signature: %s: %w", err, ErrMalformed) + } + + if len(sig.Signatures) > 1 { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > The JWS MUST NOT have multiple signatures + return nil, fmt.Errorf("eab had multiple signatures: %w", ErrMalformed) + } + + if hasValues(sig.Signatures[0].Unprotected) { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > The JWS Unprotected Header [RFC7515] MUST NOT be used + return nil, fmt.Errorf("eab had unprotected headers: %w", ErrMalformed) + } + + // Load the EAB to validate the signature against + eabEntry, err := acmeState.LoadEab(ac.sc, eabJws.Kid) + if err != nil { + return nil, fmt.Errorf("%w: failed to verify eab", ErrUnauthorized) + } + + verifiedPayload, err := sig.Verify(eabEntry.PrivateBytes) + if err != nil { + return nil, err + } + + // Make sure how eab payload matches the outer JWK key value + if !bytes.Equal(outer.Jwk, verifiedPayload) { + return nil, fmt.Errorf("eab payload does not match outer JWK key: %w", ErrMalformed) + } + + if eabEntry.AcmeDirectory != ac.acmeDirectory { + // This EAB was not created for this specific ACME directory, reject it + return nil, fmt.Errorf("%w: failed to verify eab", ErrUnauthorized) + } + + return eabEntry, nil +} diff --git a/builtin/logical/pki/acme_state.go b/builtin/logical/pki/acme_state.go new file mode 100644 index 000000000000..63962d933b99 --- /dev/null +++ b/builtin/logical/pki/acme_state.go @@ -0,0 +1,700 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "crypto/rand" + "encoding/base64" + "errors" + "fmt" + "io" + "net" + "path" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-secure-stdlib/nonceutil" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + // How many bytes are in a token. Per RFC 8555 Section + // 8.3. HTTP Challenge and Section 11.3 Token Entropy: + // + // > token (required, string): A random value that uniquely identifies + // > the challenge. This value MUST have at least 128 bits of entropy. + tokenBytes = 128 / 8 + + // Path Prefixes + acmePathPrefix = "acme/" + acmeAccountPrefix = acmePathPrefix + "accounts/" + acmeThumbprintPrefix = acmePathPrefix + "account-thumbprints/" + acmeValidationPrefix = acmePathPrefix + "validations/" + acmeEabPrefix = acmePathPrefix + "eab/" +) + +type acmeState struct { + nonces nonceutil.NonceService + + validator *ACMEChallengeEngine + + configDirty *atomic.Bool + _config sync.RWMutex + config acmeConfigEntry +} + +type acmeThumbprint struct { + Kid string `json:"kid"` + Thumbprint string `json:"-"` +} + +func NewACMEState() *acmeState { + state := &acmeState{ + nonces: nonceutil.NewNonceService(), + validator: NewACMEChallengeEngine(), + configDirty: new(atomic.Bool), + } + // Config hasn't been loaded yet; mark dirty. + state.configDirty.Store(true) + + return state +} + +func (a *acmeState) Initialize(b *backend, sc *storageContext) error { + // Initialize the nonce service. + if err := a.nonces.Initialize(); err != nil { + return fmt.Errorf("failed to initialize the ACME nonce service: %w", err) + } + + // Load the ACME config. + _, err := a.getConfigWithUpdate(sc) + if err != nil { + return fmt.Errorf("error initializing ACME engine: %w", err) + } + + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary | consts.ReplicationPerformanceStandby) { + // It is assumed, that if the node does become the active node later + // the plugin is re-initialized, so this is safe. It also spares the node + // from loading the existing queue into memory for no reason. + b.Logger().Debug("Not on an active node, skipping starting ACME challenge validation engine") + return nil + } + // Kick off our ACME challenge validation engine. + go a.validator.Run(b, a, sc) + + // All good. + return nil +} + +func (a *acmeState) Shutdown(b *backend) { + // If we aren't the active node, nothing to shutdown + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary | consts.ReplicationPerformanceStandby) { + return + } + + a.validator.Closing <- struct{}{} +} + +func (a *acmeState) markConfigDirty() { + a.configDirty.Store(true) +} + +func (a *acmeState) reloadConfigIfRequired(sc *storageContext) error { + if !a.configDirty.Load() { + return nil + } + + a._config.Lock() + defer a._config.Unlock() + + if !a.configDirty.Load() { + // Someone beat us to grabbing the above write lock and already + // updated the config. + return nil + } + + config, err := getAcmeConfig(sc) + if err != nil { + return fmt.Errorf("failed reading ACME config: %w", err) + } + + a.config = *config + a.configDirty.Store(false) + + return nil +} + +func (a *acmeState) getConfigWithUpdate(sc *storageContext) (*acmeConfigEntry, error) { + if err := a.reloadConfigIfRequired(sc); err != nil { + return nil, err + } + + a._config.RLock() + defer a._config.RUnlock() + + configCopy := a.config + return &configCopy, nil +} + +func (a *acmeState) getConfigWithForcedUpdate(sc *storageContext) (*acmeConfigEntry, error) { + a.markConfigDirty() + return a.getConfigWithUpdate(sc) +} + +func (a *acmeState) writeConfig(sc *storageContext, config *acmeConfigEntry) (*acmeConfigEntry, error) { + a._config.Lock() + defer a._config.Unlock() + + if err := sc.setAcmeConfig(config); err != nil { + a.markConfigDirty() + return nil, fmt.Errorf("failed writing ACME config: %w", err) + } + + if config != nil { + a.config = *config + } else { + a.config = defaultAcmeConfig + } + + return config, nil +} + +func generateRandomBase64(srcBytes int) (string, error) { + data := make([]byte, 21) + if _, err := io.ReadFull(rand.Reader, data); err != nil { + return "", err + } + + return base64.RawURLEncoding.EncodeToString(data), nil +} + +func (a *acmeState) GetNonce() (string, time.Time, error) { + return a.nonces.Get() +} + +func (a *acmeState) RedeemNonce(nonce string) bool { + return a.nonces.Redeem(nonce) +} + +func (a *acmeState) DoTidyNonces() { + a.nonces.Tidy() +} + +type ACMEAccountStatus string + +func (aas ACMEAccountStatus) String() string { + return string(aas) +} + +const ( + AccountStatusValid ACMEAccountStatus = "valid" + AccountStatusDeactivated ACMEAccountStatus = "deactivated" + AccountStatusRevoked ACMEAccountStatus = "revoked" +) + +type acmeAccount struct { + KeyId string `json:"-"` + Status ACMEAccountStatus `json:"status"` + Contact []string `json:"contact"` + TermsOfServiceAgreed bool `json:"terms-of-service-agreed"` + Jwk []byte `json:"jwk"` + AcmeDirectory string `json:"acme-directory"` + AccountCreatedDate time.Time `json:"account-created-date"` + MaxCertExpiry time.Time `json:"account-max-cert-expiry"` + AccountRevokedDate time.Time `json:"account-revoked-date"` + Eab *eabType `json:"eab"` +} + +type acmeOrder struct { + OrderId string `json:"-"` + AccountId string `json:"account-id"` + Status ACMEOrderStatusType `json:"status"` + Expires time.Time `json:"expires"` + Identifiers []*ACMEIdentifier `json:"identifiers"` + AuthorizationIds []string `json:"authorization-ids"` + CertificateSerialNumber string `json:"cert-serial-number"` + CertificateExpiry time.Time `json:"cert-expiry"` + // The actual issuer UUID that issued the certificate, blank if an order exists but no certificate was issued. + IssuerId issuing.IssuerID `json:"issuer-id"` +} + +func (o acmeOrder) getIdentifierDNSValues() []string { + var identifiers []string + for _, value := range o.Identifiers { + if value.Type == ACMEDNSIdentifier { + // Here, because of wildcard processing, we need to use the + // original value provided by the caller rather than the + // post-modification (trimmed '*.' prefix) value. + identifiers = append(identifiers, value.OriginalValue) + } + } + return identifiers +} + +func (o acmeOrder) getIdentifierIPValues() []net.IP { + var identifiers []net.IP + for _, value := range o.Identifiers { + if value.Type == ACMEIPIdentifier { + identifiers = append(identifiers, net.ParseIP(value.Value)) + } + } + return identifiers +} + +func (a *acmeState) CreateAccount(ac *acmeContext, c *jwsCtx, contact []string, termsOfServiceAgreed bool, eab *eabType) (*acmeAccount, error) { + // Write out the thumbprint value/entry out first, if we get an error mid-way through + // this is easier to recover from. The new kid with the same existing public key + // will rewrite the thumbprint entry. This goes in hand with LoadAccountByKey that + // will return a nil, nil value if the referenced kid in a loaded thumbprint does not + // exist. This effectively makes this self-healing IF the end-user re-attempts the + // account creation with the same public key. + thumbprint, err := c.GetKeyThumbprint() + if err != nil { + return nil, fmt.Errorf("failed generating thumbprint: %w", err) + } + + thumbPrint := &acmeThumbprint{ + Kid: c.Kid, + Thumbprint: thumbprint, + } + thumbPrintEntry, err := logical.StorageEntryJSON(acmeThumbprintPrefix+thumbprint, thumbPrint) + if err != nil { + return nil, fmt.Errorf("error generating account thumbprint entry: %w", err) + } + + if err = ac.sc.Storage.Put(ac.sc.Context, thumbPrintEntry); err != nil { + return nil, fmt.Errorf("error writing account thumbprint entry: %w", err) + } + + // Now write out the main value that the thumbprint points too. + acct := &acmeAccount{ + KeyId: c.Kid, + Contact: contact, + TermsOfServiceAgreed: termsOfServiceAgreed, + Jwk: c.Jwk, + Status: AccountStatusValid, + AcmeDirectory: ac.acmeDirectory, + AccountCreatedDate: time.Now(), + Eab: eab, + } + json, err := logical.StorageEntryJSON(acmeAccountPrefix+c.Kid, acct) + if err != nil { + return nil, fmt.Errorf("error creating account entry: %w", err) + } + + if err := ac.sc.Storage.Put(ac.sc.Context, json); err != nil { + return nil, fmt.Errorf("error writing account entry: %w", err) + } + + return acct, nil +} + +func (a *acmeState) UpdateAccount(sc *storageContext, acct *acmeAccount) error { + json, err := logical.StorageEntryJSON(acmeAccountPrefix+acct.KeyId, acct) + if err != nil { + return fmt.Errorf("error creating account entry: %w", err) + } + + if err := sc.Storage.Put(sc.Context, json); err != nil { + return fmt.Errorf("error writing account entry: %w", err) + } + + return nil +} + +// LoadAccount will load the account object based on the passed in keyId field value +// otherwise will return an error if the account does not exist. +func (a *acmeState) LoadAccount(ac *acmeContext, keyId string) (*acmeAccount, error) { + entry, err := ac.sc.Storage.Get(ac.sc.Context, acmeAccountPrefix+keyId) + if err != nil { + return nil, fmt.Errorf("error loading account: %w", err) + } + if entry == nil { + return nil, fmt.Errorf("account not found: %w", ErrAccountDoesNotExist) + } + + var acct acmeAccount + err = entry.DecodeJSON(&acct) + if err != nil { + return nil, fmt.Errorf("error decoding account: %w", err) + } + + if acct.AcmeDirectory != ac.acmeDirectory { + return nil, fmt.Errorf("%w: account part of different ACME directory path", ErrMalformed) + } + + acct.KeyId = keyId + + return &acct, nil +} + +// LoadAccountByKey will attempt to load the account based on a key thumbprint. If the thumbprint +// or kid is unknown a nil, nil will be returned. +func (a *acmeState) LoadAccountByKey(ac *acmeContext, keyThumbprint string) (*acmeAccount, error) { + thumbprintEntry, err := ac.sc.Storage.Get(ac.sc.Context, acmeThumbprintPrefix+keyThumbprint) + if err != nil { + return nil, fmt.Errorf("failed loading acme thumbprintEntry for key: %w", err) + } + if thumbprintEntry == nil { + return nil, nil + } + + var thumbprint acmeThumbprint + err = thumbprintEntry.DecodeJSON(&thumbprint) + if err != nil { + return nil, fmt.Errorf("failed decoding thumbprint entry: %s: %w", keyThumbprint, err) + } + + if len(thumbprint.Kid) == 0 { + return nil, fmt.Errorf("empty kid within thumbprint entry: %s", keyThumbprint) + } + + acct, err := a.LoadAccount(ac, thumbprint.Kid) + if err != nil { + // If we fail to lookup the account that the thumbprint entry references, assume a bad + // write previously occurred in which we managed to write out the thumbprint but failed + // writing out the main account information. + if errors.Is(err, ErrAccountDoesNotExist) { + return nil, nil + } + return nil, err + } + + return acct, nil +} + +func (a *acmeState) LoadJWK(ac *acmeContext, keyId string) ([]byte, error) { + key, err := a.LoadAccount(ac, keyId) + if err != nil { + return nil, err + } + + if len(key.Jwk) == 0 { + return nil, fmt.Errorf("malformed key entry lacks JWK") + } + + return key.Jwk, nil +} + +func (a *acmeState) LoadAuthorization(ac *acmeContext, userCtx *jwsCtx, authId string) (*ACMEAuthorization, error) { + if authId == "" { + return nil, fmt.Errorf("malformed authorization identifier") + } + + authorizationPath := getAuthorizationPath(userCtx.Kid, authId) + + authz, err := loadAuthorizationAtPath(ac.sc, authorizationPath) + if err != nil { + return nil, err + } + + if userCtx.Kid != authz.AccountId { + return nil, ErrUnauthorized + } + + return authz, nil +} + +func loadAuthorizationAtPath(sc *storageContext, authorizationPath string) (*ACMEAuthorization, error) { + entry, err := sc.Storage.Get(sc.Context, authorizationPath) + if err != nil { + return nil, fmt.Errorf("error loading authorization: %w", err) + } + + if entry == nil { + return nil, fmt.Errorf("authorization does not exist: %w", ErrMalformed) + } + + var authz ACMEAuthorization + err = entry.DecodeJSON(&authz) + if err != nil { + return nil, fmt.Errorf("error decoding authorization: %w", err) + } + + return &authz, nil +} + +func (a *acmeState) SaveAuthorization(ac *acmeContext, authz *ACMEAuthorization) error { + path := getAuthorizationPath(authz.AccountId, authz.Id) + return saveAuthorizationAtPath(ac.sc, path, authz) +} + +func saveAuthorizationAtPath(sc *storageContext, path string, authz *ACMEAuthorization) error { + if authz.Id == "" { + return fmt.Errorf("invalid authorization, missing id") + } + + if authz.AccountId == "" { + return fmt.Errorf("invalid authorization, missing account id") + } + + json, err := logical.StorageEntryJSON(path, authz) + if err != nil { + return fmt.Errorf("error creating authorization entry: %w", err) + } + + if err = sc.Storage.Put(sc.Context, json); err != nil { + return fmt.Errorf("error writing authorization entry: %w", err) + } + + return nil +} + +func (a *acmeState) ParseRequestParams(ac *acmeContext, req *logical.Request, data *framework.FieldData) (*jwsCtx, map[string]interface{}, error) { + var c jwsCtx + var m map[string]interface{} + + // Parse the key out. + rawJWKBase64, ok := data.GetOk("protected") + if !ok { + return nil, nil, fmt.Errorf("missing required field 'protected': %w", ErrMalformed) + } + jwkBase64 := rawJWKBase64.(string) + + jwkBytes, err := base64.RawURLEncoding.DecodeString(jwkBase64) + if err != nil { + return nil, nil, fmt.Errorf("failed to base64 parse 'protected': %s: %w", err, ErrMalformed) + } + if err = c.UnmarshalOuterJwsJson(a, ac, jwkBytes); err != nil { + return nil, nil, fmt.Errorf("failed to json unmarshal 'protected': %w", err) + } + + // Since we already parsed the header to verify the JWS context, we + // should read and redeem the nonce here too, to avoid doing any extra + // work if it is invalid. + if !a.RedeemNonce(c.Nonce) { + return nil, nil, fmt.Errorf("invalid or reused nonce: %w", ErrBadNonce) + } + + // If the path is incorrect, reject the request. + // + // See RFC 8555 Section 6.4. Request URL Integrity: + // + // > As noted in Section 6.2, all ACME request objects carry a "url" + // > header parameter in their protected header. ... On receiving such + // > an object in an HTTP request, the server MUST compare the "url" + // > header parameter to the request URL. If the two do not match, + // > then the server MUST reject the request as unauthorized. + if len(c.Url) == 0 { + return nil, nil, fmt.Errorf("missing required parameter 'url' in 'protected': %w", ErrMalformed) + } + if ac.clusterUrl.JoinPath(req.Path).String() != c.Url { + return nil, nil, fmt.Errorf("invalid value for 'url' in 'protected': got '%v' expected '%v': %w", c.Url, ac.clusterUrl.JoinPath(req.Path).String(), ErrUnauthorized) + } + + rawPayloadBase64, ok := data.GetOk("payload") + if !ok { + return nil, nil, fmt.Errorf("missing required field 'payload': %w", ErrMalformed) + } + payloadBase64 := rawPayloadBase64.(string) + + rawSignatureBase64, ok := data.GetOk("signature") + if !ok { + return nil, nil, fmt.Errorf("missing required field 'signature': %w", ErrMalformed) + } + signatureBase64 := rawSignatureBase64.(string) + + // go-jose only seems to support compact signature encodings. + compactSig := fmt.Sprintf("%v.%v.%v", jwkBase64, payloadBase64, signatureBase64) + m, err = c.VerifyJWS(compactSig) + if err != nil { + return nil, nil, fmt.Errorf("failed to verify signature: %w", err) + } + + return &c, m, nil +} + +func (a *acmeState) LoadOrder(ac *acmeContext, userCtx *jwsCtx, orderId string) (*acmeOrder, error) { + path := getOrderPath(userCtx.Kid, orderId) + entry, err := ac.sc.Storage.Get(ac.sc.Context, path) + if err != nil { + return nil, fmt.Errorf("error loading order: %w", err) + } + + if entry == nil { + return nil, fmt.Errorf("order does not exist: %w", ErrMalformed) + } + + var order acmeOrder + err = entry.DecodeJSON(&order) + if err != nil { + return nil, fmt.Errorf("error decoding order: %w", err) + } + + if userCtx.Kid != order.AccountId { + return nil, ErrUnauthorized + } + + order.OrderId = orderId + + return &order, nil +} + +func (a *acmeState) SaveOrder(ac *acmeContext, order *acmeOrder) error { + if order.OrderId == "" { + return fmt.Errorf("invalid order, missing order id") + } + + if order.AccountId == "" { + return fmt.Errorf("invalid order, missing account id") + } + path := getOrderPath(order.AccountId, order.OrderId) + json, err := logical.StorageEntryJSON(path, order) + if err != nil { + return fmt.Errorf("error serializing order entry: %w", err) + } + + if err = ac.sc.Storage.Put(ac.sc.Context, json); err != nil { + return fmt.Errorf("error writing order entry: %w", err) + } + + return nil +} + +func (a *acmeState) ListOrderIds(sc *storageContext, accountId string) ([]string, error) { + accountOrderPrefixPath := acmeAccountPrefix + accountId + "/orders/" + + rawOrderIds, err := sc.Storage.List(sc.Context, accountOrderPrefixPath) + if err != nil { + return nil, fmt.Errorf("failed listing order ids for account %s: %w", accountId, err) + } + + orderIds := []string{} + for _, order := range rawOrderIds { + if strings.HasSuffix(order, "/") { + // skip any folders we might have for some reason + continue + } + orderIds = append(orderIds, order) + } + return orderIds, nil +} + +type acmeCertEntry struct { + Serial string `json:"-"` + Account string `json:"-"` + Order string `json:"order"` +} + +func (a *acmeState) TrackIssuedCert(ac *acmeContext, accountId string, serial string, orderId string) error { + path := getAcmeSerialToAccountTrackerPath(accountId, serial) + entry := acmeCertEntry{ + Order: orderId, + } + + json, err := logical.StorageEntryJSON(path, &entry) + if err != nil { + return fmt.Errorf("error serializing acme cert entry: %w", err) + } + + if err = ac.sc.Storage.Put(ac.sc.Context, json); err != nil { + return fmt.Errorf("error writing acme cert entry: %w", err) + } + + return nil +} + +func (a *acmeState) GetIssuedCert(ac *acmeContext, accountId string, serial string) (*acmeCertEntry, error) { + path := acmeAccountPrefix + accountId + "/certs/" + normalizeSerial(serial) + + entry, err := ac.sc.Storage.Get(ac.sc.Context, path) + if err != nil { + return nil, fmt.Errorf("error loading acme cert entry: %w", err) + } + + if entry == nil { + return nil, fmt.Errorf("no certificate with this serial was issued for this account") + } + + var cert acmeCertEntry + err = entry.DecodeJSON(&cert) + if err != nil { + return nil, fmt.Errorf("error decoding acme cert entry: %w", err) + } + + cert.Serial = denormalizeSerial(serial) + cert.Account = accountId + + return &cert, nil +} + +func (a *acmeState) SaveEab(sc *storageContext, eab *eabType) error { + json, err := logical.StorageEntryJSON(path.Join(acmeEabPrefix, eab.KeyID), eab) + if err != nil { + return err + } + return sc.Storage.Put(sc.Context, json) +} + +func (a *acmeState) LoadEab(sc *storageContext, eabKid string) (*eabType, error) { + rawEntry, err := sc.Storage.Get(sc.Context, path.Join(acmeEabPrefix, eabKid)) + if err != nil { + return nil, err + } + if rawEntry == nil { + return nil, fmt.Errorf("%w: no eab found for kid %s", ErrStorageItemNotFound, eabKid) + } + + var eab eabType + err = rawEntry.DecodeJSON(&eab) + if err != nil { + return nil, err + } + + eab.KeyID = eabKid + return &eab, nil +} + +func (a *acmeState) DeleteEab(sc *storageContext, eabKid string) (bool, error) { + rawEntry, err := sc.Storage.Get(sc.Context, path.Join(acmeEabPrefix, eabKid)) + if err != nil { + return false, err + } + if rawEntry == nil { + return false, nil + } + + err = sc.Storage.Delete(sc.Context, path.Join(acmeEabPrefix, eabKid)) + if err != nil { + return false, err + } + return true, nil +} + +func (a *acmeState) ListEabIds(sc *storageContext) ([]string, error) { + entries, err := sc.Storage.List(sc.Context, acmeEabPrefix) + if err != nil { + return nil, err + } + var ids []string + for _, entry := range entries { + if strings.HasSuffix(entry, "/") { + continue + } + ids = append(ids, entry) + } + + return ids, nil +} + +func getAcmeSerialToAccountTrackerPath(accountId string, serial string) string { + return acmeAccountPrefix + accountId + "/certs/" + normalizeSerial(serial) +} + +func getAuthorizationPath(accountId string, authId string) string { + return acmeAccountPrefix + accountId + "/authorizations/" + authId +} + +func getOrderPath(accountId string, orderId string) string { + return acmeAccountPrefix + accountId + "/orders/" + orderId +} + +func getACMEToken() (string, error) { + return generateRandomBase64(tokenBytes) +} diff --git a/builtin/logical/pki/acme_state_test.go b/builtin/logical/pki/acme_state_test.go new file mode 100644 index 000000000000..ed9586e834e2 --- /dev/null +++ b/builtin/logical/pki/acme_state_test.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAcmeNonces(t *testing.T) { + t.Parallel() + + a := NewACMEState() + a.nonces.Initialize() + + // Simple operation should succeed. + nonce, _, err := a.GetNonce() + require.NoError(t, err) + require.NotEmpty(t, nonce) + + require.True(t, a.RedeemNonce(nonce)) + require.False(t, a.RedeemNonce(nonce)) + + // Redeeming in opposite order should work. + var nonces []string + for i := 0; i < len(nonce); i++ { + nonce, _, err = a.GetNonce() + require.NoError(t, err) + require.NotEmpty(t, nonce) + } + + for i := len(nonces) - 1; i >= 0; i-- { + nonce = nonces[i] + require.True(t, a.RedeemNonce(nonce)) + } + + for i := 0; i < len(nonces); i++ { + nonce = nonces[i] + require.False(t, a.RedeemNonce(nonce)) + } +} diff --git a/builtin/logical/pki/acme_wrappers.go b/builtin/logical/pki/acme_wrappers.go new file mode 100644 index 000000000000..88a1cee1d075 --- /dev/null +++ b/builtin/logical/pki/acme_wrappers.go @@ -0,0 +1,512 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +type acmeContext struct { + issuing.IssuerRoleContext + + // baseUrl is the combination of the configured cluster local URL and the acmePath up to /acme/ + baseUrl *url.URL + clusterUrl *url.URL + sc *storageContext + acmeState *acmeState + // acmeDirectory is a string that can distinguish the various acme directories we have configured + // if something needs to remain locked into a directory path structure. + acmeDirectory string + eabPolicy EabPolicy + ciepsPolicy string + runtimeOpts acmeWrapperOpts +} + +func (c acmeContext) getAcmeState() *acmeState { + return c.acmeState +} + +type ( + acmeOperation func(acmeCtx *acmeContext, r *logical.Request, _ *framework.FieldData) (*logical.Response, error) + acmeParsedOperation func(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}) (*logical.Response, error) + acmeAccountRequiredOperation func(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, acct *acmeAccount) (*logical.Response, error) +) + +// setupAcmeDirectory will populate a prefix'd URL with all the paths required +// for a given ACME directory. +func setupAcmeDirectory(b *backend, acmePrefix string, unauthPrefix string, opts acmeWrapperOpts) { + acmePrefix = strings.TrimRight(acmePrefix, "/") + unauthPrefix = strings.TrimRight(unauthPrefix, "/") + + b.Backend.Paths = append(b.Backend.Paths, pathAcmeDirectory(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeNonce(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeNewAccount(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeUpdateAccount(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeGetOrder(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeListOrders(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeNewOrder(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeFinalizeOrder(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeFetchOrderCert(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeChallenge(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeAuthorization(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeRevoke(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeNewEab(b, acmePrefix)) // auth'd API that lives underneath the various /acme paths + + // Add specific un-auth'd paths for ACME APIs + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/directory") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/new-nonce") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/new-account") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/new-order") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/revoke-cert") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/key-change") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/account/+") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/authorization/+") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/challenge/+/+") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/orders") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/order/+") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/order/+/finalize") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/order/+/cert") + // We specifically do NOT add acme/new-eab to this as it should be auth'd +} + +// acmeErrorWrapper the lowest level wrapper that will translate errors into proper ACME error responses +func acmeErrorWrapper(op framework.OperationFunc) framework.OperationFunc { + return func(ctx context.Context, r *logical.Request, data *framework.FieldData) (*logical.Response, error) { + resp, err := op(ctx, r, data) + if err != nil { + return TranslateError(err) + } + + return resp, nil + } +} + +type acmeWrapperOpts struct { + isDefault bool + isCiepsEnabled bool +} + +func (o acmeWrapperOpts) Clone() acmeWrapperOpts { + return acmeWrapperOpts{ + isDefault: o.isDefault, + isCiepsEnabled: o.isCiepsEnabled, + } +} + +// acmeWrapper a basic wrapper that all ACME handlers should leverage as the basis. +// This will create a basic ACME context, validate basic ACME configuration is setup +// for operations. This pulls in acmeErrorWrapper to translate error messages for users, +// but does not enforce any sort of ACME authentication. +func (b *backend) acmeWrapper(opts acmeWrapperOpts, op acmeOperation) framework.OperationFunc { + return acmeErrorWrapper(func(ctx context.Context, r *logical.Request, data *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, r.Storage) + + config, err := b.GetAcmeState().getConfigWithUpdate(sc) + if err != nil { + return nil, fmt.Errorf("failed to fetch ACME configuration: %w", err) + } + + // use string form in case someone messes up our config from raw storage. + eabPolicy, err := getEabPolicyByString(string(config.EabPolicyName)) + if err != nil { + return nil, err + } + + if isAcmeDisabled(sc, config, eabPolicy) { + return nil, ErrAcmeDisabled + } + + if b.UseLegacyBundleCaStorage() { + return nil, fmt.Errorf("%w: Can not perform ACME operations until migration has completed", ErrServerInternal) + } + + acmeBaseUrl, clusterBase, err := getAcmeBaseUrl(sc, r) + if err != nil { + return nil, err + } + + role, issuer, err := getAcmeRoleAndIssuer(sc, data, config) + if err != nil { + return nil, err + } + + acmeDirectory, err := getAcmeDirectory(r) + if err != nil { + return nil, err + } + + isCiepsEnabled, ciepsPolicy, err := getCiepsAcmeSettings(b, sc, opts, config, data) + if err != nil { + return nil, err + } + runtimeOpts := opts.Clone() + + if isCiepsEnabled { + // We need to possibly reset the isCiepsEnabled option to true if we are in + // the default folder with the external-policy set as it would have been + // normally disabled. + if runtimeOpts.isDefault { + runtimeOpts.isCiepsEnabled = true + } + } + + acmeCtx := &acmeContext{ + IssuerRoleContext: issuing.NewIssuerRoleContext(ctx, issuer, role), + baseUrl: acmeBaseUrl, + clusterUrl: clusterBase, + sc: sc, + acmeState: b.acmeState, + acmeDirectory: acmeDirectory, + eabPolicy: eabPolicy, + ciepsPolicy: ciepsPolicy, + runtimeOpts: runtimeOpts, + } + + return op(acmeCtx, r, data) + }) +} + +// acmeParsedWrapper is an ACME wrapper that will parse out the ACME request parameters, validate +// that we have a proper signature and pass to the operation a decoded map of arguments received. +// This wrapper builds on top of acmeWrapper. Note that this does perform signature verification +// it does not enforce the account being in a valid state nor existing. +func (b *backend) acmeParsedWrapper(opt acmeWrapperOpts, op acmeParsedOperation) framework.OperationFunc { + return b.acmeWrapper(opt, func(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData) (*logical.Response, error) { + user, data, err := b.GetAcmeState().ParseRequestParams(acmeCtx, r, fields) + if err != nil { + return nil, err + } + + resp, err := op(acmeCtx, r, fields, user, data) + + // Our response handlers might not add the necessary headers. + if resp != nil { + if resp.Headers == nil { + resp.Headers = map[string][]string{} + } + + if _, ok := resp.Headers["Replay-Nonce"]; !ok { + nonce, _, err := b.GetAcmeState().GetNonce() + if err != nil { + return nil, err + } + + resp.Headers["Replay-Nonce"] = []string{nonce} + } + + if _, ok := resp.Headers["Link"]; !ok { + resp.Headers["Link"] = genAcmeLinkHeader(acmeCtx) + } else { + directory := genAcmeLinkHeader(acmeCtx)[0] + addDirectory := true + for _, item := range resp.Headers["Link"] { + if item == directory { + addDirectory = false + break + } + } + if addDirectory { + resp.Headers["Link"] = append(resp.Headers["Link"], directory) + } + } + + // ACME responses don't understand Vault's default encoding + // format. Rather than expecting everything to handle creating + // ACME-formatted responses, do the marshaling in one place. + if _, ok := resp.Data[logical.HTTPRawBody]; !ok { + ignored_values := map[string]bool{logical.HTTPContentType: true, logical.HTTPStatusCode: true} + fields := map[string]interface{}{} + body := map[string]interface{}{ + logical.HTTPContentType: "application/json", + logical.HTTPStatusCode: http.StatusOK, + } + + for key, value := range resp.Data { + if _, present := ignored_values[key]; !present { + fields[key] = value + } else { + body[key] = value + } + } + + rawBody, err := json.Marshal(fields) + if err != nil { + return nil, fmt.Errorf("Error marshaling JSON body: %w", err) + } + + body[logical.HTTPRawBody] = rawBody + resp.Data = body + } + } + + return resp, err + }) +} + +// acmeAccountRequiredWrapper builds on top of acmeParsedWrapper, enforcing the +// request has a proper signature for an existing account, and that account is +// in a valid status. It passes to the operation a decoded form of the request +// parameters as well as the ACME account the request is for. +func (b *backend) acmeAccountRequiredWrapper(opt acmeWrapperOpts, op acmeAccountRequiredOperation) framework.OperationFunc { + return b.acmeParsedWrapper(opt, func(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, uc *jwsCtx, data map[string]interface{}) (*logical.Response, error) { + if !uc.Existing { + return nil, fmt.Errorf("cannot process request without a 'kid': %w", ErrMalformed) + } + + account, err := requireValidAcmeAccount(acmeCtx, uc) + if err != nil { + return nil, err + } + + return op(acmeCtx, r, fields, uc, data, account) + }) +} + +func requireValidAcmeAccount(acmeCtx *acmeContext, uc *jwsCtx) (*acmeAccount, error) { + account, err := acmeCtx.getAcmeState().LoadAccount(acmeCtx, uc.Kid) + if err != nil { + return nil, fmt.Errorf("error loading account: %w", err) + } + + if err = acmeCtx.eabPolicy.EnforceForExistingAccount(account); err != nil { + return nil, err + } + + if account.Status != AccountStatusValid { + // Treating "revoked" and "deactivated" as the same here. + return nil, fmt.Errorf("%w: account in status: %s", ErrUnauthorized, account.Status) + } + return account, nil +} + +func getAcmeBaseUrl(sc *storageContext, r *logical.Request) (*url.URL, *url.URL, error) { + baseUrl, err := getBasePathFromClusterConfig(sc) + if err != nil { + return nil, nil, err + } + + directoryPrefix, err := getAcmeDirectory(r) + if err != nil { + return nil, nil, err + } + + return baseUrl.JoinPath(directoryPrefix), baseUrl, nil +} + +func getBasePathFromClusterConfig(sc *storageContext) (*url.URL, error) { + cfg, err := sc.getClusterConfig() + if err != nil { + return nil, fmt.Errorf("failed loading cluster config: %w", err) + } + + if cfg.Path == "" { + return nil, fmt.Errorf("ACME feature requires local cluster 'path' field configuration to be set") + } + + baseUrl, err := url.Parse(cfg.Path) + if err != nil { + return nil, fmt.Errorf("failed parsing URL configured in local cluster 'path' configuration: %s: %s", + cfg.Path, err.Error()) + } + return baseUrl, nil +} + +func getAcmeIssuer(sc *storageContext, issuerName string) (*issuing.IssuerEntry, error) { + if issuerName == "" { + issuerName = defaultRef + } + issuerId, err := sc.resolveIssuerReference(issuerName) + if err != nil { + return nil, fmt.Errorf("%w: issuer does not exist", ErrMalformed) + } + + issuer, err := sc.fetchIssuerById(issuerId) + if err != nil { + return nil, fmt.Errorf("issuer failed to load: %w", err) + } + + if issuer.Usage.HasUsage(issuing.IssuanceUsage) && len(issuer.KeyID) > 0 { + return issuer, nil + } + + return nil, fmt.Errorf("%w: issuer missing proper issuance usage or key", ErrServerInternal) +} + +// getAcmeDirectory return the base acme directory path, without a leading '/' and including +// the trailing /acme/ folder which is the root of all our various directories +func getAcmeDirectory(r *logical.Request) (string, error) { + acmePath := r.Path + if !strings.HasPrefix(acmePath, "/") { + acmePath = "/" + acmePath + } + + lastIndex := strings.LastIndex(acmePath, "/acme/") + if lastIndex == -1 { + return "", fmt.Errorf("%w: unable to determine acme base folder path: %s", ErrServerInternal, acmePath) + } + + // Skip the leading '/' and return our base path with the /acme/ + return strings.TrimLeft(acmePath[0:lastIndex]+"/acme/", "/"), nil +} + +func getAcmeRoleAndIssuer(sc *storageContext, data *framework.FieldData, config *acmeConfigEntry) (*issuing.RoleEntry, *issuing.IssuerEntry, error) { + requestedIssuer := getRequestedAcmeIssuerFromPath(data) + requestedRole := getRequestedAcmeRoleFromPath(data) + issuerToLoad := requestedIssuer + + var role *issuing.RoleEntry + var err error + + if len(requestedRole) == 0 { // Default Directory + policyType, extraInfo, err := getDefaultDirectoryPolicyType(config.DefaultDirectoryPolicy) + if err != nil { + return nil, nil, err + } + switch policyType { + case Forbid: + return nil, nil, fmt.Errorf("%w: default directory not allowed by ACME policy", ErrServerInternal) + case SignVerbatim, ExternalPolicy: + role = issuing.SignVerbatimRoleWithOpts( + issuing.WithIssuer(requestedIssuer), + issuing.WithNoStore(false)) + case Role: + role, err = getAndValidateAcmeRole(sc, extraInfo) + if err != nil { + return nil, nil, err + } + } + } else { // Requested Role + role, err = getAndValidateAcmeRole(sc, requestedRole) + if err != nil { + return nil, nil, err + } + + // Check the Requested Role is Allowed + allowAnyRole := len(config.AllowedRoles) == 1 && config.AllowedRoles[0] == "*" + if !allowAnyRole { + + var foundRole bool + for _, name := range config.AllowedRoles { + if name == role.Name { + foundRole = true + break + } + } + + if !foundRole { + return nil, nil, fmt.Errorf("%w: specified role not allowed by ACME policy", ErrServerInternal) + } + } + } + + // If we haven't loaded an issuer directly from our path and the specified (or default) + // role does specify an issuer prefer the role's issuer rather than the default issuer. + if len(role.Issuer) > 0 && len(requestedIssuer) == 0 { + issuerToLoad = role.Issuer + } + + issuer, err := getAcmeIssuer(sc, issuerToLoad) + if err != nil { + return nil, nil, err + } + + allowAnyIssuer := len(config.AllowedIssuers) == 1 && config.AllowedIssuers[0] == "*" + if !allowAnyIssuer { + var foundIssuer bool + for index, name := range config.AllowedIssuers { + candidateId, err := sc.resolveIssuerReference(name) + if err != nil { + return nil, nil, fmt.Errorf("failed to resolve reference for allowed_issuer entry %d: %w", index, err) + } + + if candidateId == issuer.ID { + foundIssuer = true + break + } + } + + if !foundIssuer { + return nil, nil, fmt.Errorf("%w: specified issuer not allowed by ACME policy", ErrServerInternal) + } + } + + // If not allowed in configuration, override ExtKeyUsage behavior to force it to only be + // ServerAuth within ACME issued certs + if !config.AllowRoleExtKeyUsage { + role.ExtKeyUsage = []string{"serverauth"} + role.ExtKeyUsageOIDs = []string{} + role.ServerFlag = true + role.ClientFlag = false + role.CodeSigningFlag = false + role.EmailProtectionFlag = false + } + + return role, issuer, nil +} + +func getAndValidateAcmeRole(sc *storageContext, requestedRole string) (*issuing.RoleEntry, error) { + var err error + role, err := sc.GetRole(requestedRole) + if err != nil { + return nil, fmt.Errorf("%w: err loading role", ErrServerInternal) + } + + if role == nil { + return nil, fmt.Errorf("%w: role does not exist", ErrMalformed) + } + + if role.NoStore { + return nil, fmt.Errorf("%w: role can not be used as NoStore is set to true", ErrServerInternal) + } + + return role, nil +} + +func getRequestedAcmeRoleFromPath(data *framework.FieldData) string { + requestedRole := "" + roleNameRaw, present := data.GetOk("role") + if present { + requestedRole = roleNameRaw.(string) + } + return requestedRole +} + +func getRequestedAcmeIssuerFromPath(data *framework.FieldData) string { + requestedIssuer := "" + requestedIssuerRaw, present := data.GetOk(issuerRefParam) + if present { + requestedIssuer = requestedIssuerRaw.(string) + } + return requestedIssuer +} + +func isAcmeDisabled(sc *storageContext, config *acmeConfigEntry, policy EabPolicy) bool { + if !config.Enabled { + return true + } + + disableAcme, nonFatalErr := isPublicACMEDisabledByEnv() + if nonFatalErr != nil { + sc.Logger().Warn(fmt.Sprintf("could not parse env var '%s'", disableAcmeEnvVar), "error", nonFatalErr) + } + + // The OS environment if true will override any configuration option. + if disableAcme { + if policy.OverrideEnvDisablingPublicAcme() { + return false + } + return true + } + + return false +} diff --git a/builtin/logical/pki/acme_wrappers_test.go b/builtin/logical/pki/acme_wrappers_test.go new file mode 100644 index 000000000000..569036deae6c --- /dev/null +++ b/builtin/logical/pki/acme_wrappers_test.go @@ -0,0 +1,125 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +// TestACMEIssuerRoleLoading validates the role and issuer loading logic within the base +// ACME wrapper is correct. +func TestACMEIssuerRoleLoading(t *testing.T) { + b, s := CreateBackendWithStorage(t) + + _, err := CBWrite(b, s, "config/cluster", map[string]interface{}{ + "path": "http://localhost:8200/v1/pki", + "aia_path": "http://localhost:8200/cdn/pki", + }) + require.NoError(t, err) + + _, err = CBWrite(b, s, "config/acme", map[string]interface{}{ + "enabled": true, + }) + require.NoError(t, err) + + _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault1.com", + "issuer_name": "issuer-1", + "key_type": "ec", + }) + require.NoError(t, err, "failed creating issuer issuer-1") + + _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault2.com", + "issuer_name": "issuer-2", + "key_type": "ec", + }) + require.NoError(t, err, "failed creating issuer issuer-2") + + _, err = CBWrite(b, s, "roles/role-bad-issuer", map[string]interface{}{ + issuerRefParam: "non-existant", + "no_store": "false", + }) + require.NoError(t, err, "failed creating role role-bad-issuer") + + _, err = CBWrite(b, s, "roles/role-no-store-enabled", map[string]interface{}{ + issuerRefParam: "issuer-2", + "no_store": "true", + }) + require.NoError(t, err, "failed creating role role-no-store-enabled") + + _, err = CBWrite(b, s, "roles/role-issuer-2", map[string]interface{}{ + issuerRefParam: "issuer-2", + "no_store": "false", + }) + require.NoError(t, err, "failed creating role role-issuer-2") + + tc := []struct { + name string + roleName string + issuerName string + expectedIssuerName string + expectErr bool + }{ + {name: "pass-default-use-default", roleName: "", issuerName: "", expectedIssuerName: "issuer-1", expectErr: false}, + {name: "pass-role-issuer-2", roleName: "role-issuer-2", issuerName: "", expectedIssuerName: "issuer-2", expectErr: false}, + {name: "pass-issuer-1-no-role", roleName: "", issuerName: "issuer-1", expectedIssuerName: "issuer-1", expectErr: false}, + {name: "fail-role-has-bad-issuer", roleName: "role-bad-issuer", issuerName: "", expectedIssuerName: "", expectErr: true}, + {name: "fail-role-no-store-enabled", roleName: "role-no-store-enabled", issuerName: "", expectedIssuerName: "", expectErr: true}, + {name: "fail-role-no-store-enabled", roleName: "role-no-store-enabled", issuerName: "", expectedIssuerName: "", expectErr: true}, + {name: "fail-role-does-not-exist", roleName: "non-existant", issuerName: "", expectedIssuerName: "", expectErr: true}, + {name: "fail-issuer-does-not-exist", roleName: "", issuerName: "non-existant", expectedIssuerName: "", expectErr: true}, + } + + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + f := b.acmeWrapper(acmeWrapperOpts{}, func(acmeCtx *acmeContext, r *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + if tt.roleName != acmeCtx.Role.Name { + return nil, fmt.Errorf("expected role %s but got %s", tt.roleName, acmeCtx.Role.Name) + } + + if tt.expectedIssuerName != acmeCtx.Issuer.Name { + return nil, fmt.Errorf("expected issuer %s but got %s", tt.expectedIssuerName, acmeCtx.Issuer.Name) + } + + return nil, nil + }) + + var acmePath string + fieldRaw := map[string]interface{}{} + if tt.issuerName != "" { + fieldRaw[issuerRefParam] = tt.issuerName + acmePath = "issuer/" + tt.issuerName + "/" + } + if tt.roleName != "" { + fieldRaw["role"] = tt.roleName + acmePath = acmePath + "roles/" + tt.roleName + "/" + } + + acmePath = strings.TrimLeft(acmePath+"/acme/directory", "/") + + resp, err := f(context.Background(), &logical.Request{Path: acmePath, Storage: s}, &framework.FieldData{ + Raw: fieldRaw, + Schema: getCsrSignVerbatimSchemaFields(), + }) + require.NoError(t, err, "all errors should be re-encoded") + + if tt.expectErr { + require.NotEqual(t, 200, resp.Data[logical.HTTPStatusCode]) + require.Equal(t, ErrorContentType, resp.Data[logical.HTTPContentType]) + } else { + if resp != nil { + t.Fatalf("expected no error got %s", string(resp.Data[logical.HTTPRawBody].([]uint8))) + } + } + }) + } +} diff --git a/builtin/logical/pki/backend.go b/builtin/logical/pki/backend.go index db45f2d49ce1..615380a826fa 100644 --- a/builtin/logical/pki/backend.go +++ b/builtin/logical/pki/backend.go @@ -1,33 +1,38 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki import ( "context" + "crypto/x509" "fmt" - "sort" "strings" "sync" "sync/atomic" "time" - atomic2 "go.uber.org/atomic" - - "github.com/hashicorp/vault/helper/constants" - - "github.com/hashicorp/go-multierror" - - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/armon/go-metrics" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/managed_key" + "github.com/hashicorp/vault/builtin/logical/pki/pki_backend" + "github.com/hashicorp/vault/builtin/logical/pki/revocation" "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" ) const ( + operationPrefixPKI = "pki" + operationPrefixPKIIssuer = "pki-issuer" + operationPrefixPKIIssuers = "pki-issuers" + operationPrefixPKIRoot = "pki-root" + noRole = 0 roleOptional = 1 roleRequired = 2 @@ -46,9 +51,9 @@ const ( * will be forwarded to that cluster's active node and not go all the way up to the performance primary's * active node. * - * If a certificate issue request has a role in which no_store is set to true, that node itself - * will issue the certificate and not forward the request to the active node, as this does not - * need to write to storage. + * If a certificate issue request has a role in which no_store and no_store_metadata is set to + * true, that node itself will issue the certificate and not forward the request to the active + * node, as this does not need to write to storage. * * Following the same pattern, if a managed key is involved to sign an issued certificate request * and the local node does not have access for some reason to it, the request will be forwarded to @@ -95,6 +100,12 @@ func Backend(conf *logical.BackendConfig) *backend { "issuer/+/crl/delta/der", "issuer/+/crl/delta/pem", "issuer/+/crl/delta", + "issuer/+/unified-crl/der", + "issuer/+/unified-crl/pem", + "issuer/+/unified-crl", + "issuer/+/unified-crl/delta/der", + "issuer/+/unified-crl/delta/pem", + "issuer/+/unified-crl/delta", "issuer/+/pem", "issuer/+/der", "issuer/+/json", @@ -107,6 +118,8 @@ func Backend(conf *logical.BackendConfig) *backend { "unified-crl", "unified-ocsp", // Unified OCSP POST "unified-ocsp/*", // Unified OCSP GET + + // ACME paths are added below }, LocalStorage: []string{ @@ -114,8 +127,11 @@ func Backend(conf *logical.BackendConfig) *backend { localDeltaWALPath, legacyCRLPath, clusterConfigPath, - "crls/", - "certs/", + issuing.PathCrls, + issuing.PathCerts, + issuing.PathCertMetadata, + acmePathPrefix, + autoTidyLastRunPath, }, Root: []string{ @@ -131,9 +147,21 @@ func Backend(conf *logical.BackendConfig) *backend { WriteForwardedStorage: []string{ crossRevocationPath, - unifiedRevocationWritePathPrefix, + revocation.UnifiedRevocationWritePathPrefix, unifiedDeltaWALPath, }, + + Limited: []string{ + "issue", + "issue/*", + }, + + Binary: []string{ + "ocsp", // OCSP POST + "ocsp/*", // OCSP GET + "unified-ocsp", // Unified OCSP POST + "unified-ocsp/*", // Unified OCSP GET + }, }, Paths: []*framework.Path{ @@ -165,6 +193,7 @@ func Backend(conf *logical.BackendConfig) *backend { // Issuer APIs pathListIssuers(&b), pathGetIssuer(&b), + pathGetUnauthedIssuer(&b), pathGetIssuerCRL(&b), pathImportIssuer(&b), pathIssuerIssue(&b), @@ -203,6 +232,11 @@ func Backend(conf *logical.BackendConfig) *backend { // CRL Signing pathResignCrls(&b), pathSignRevocationList(&b), + + // ACME + pathAcmeConfig(&b), + pathAcmeEabList(&b), + pathAcmeEabDelete(&b), }, Secrets: []*framework.Secret{ @@ -213,19 +247,37 @@ func Backend(conf *logical.BackendConfig) *backend { InitializeFunc: b.initialize, Invalidate: b.invalidate, PeriodicFunc: b.periodicFunc, - } - - if constants.IsEnterprise { - // Unified CRL/OCSP paths are ENT only - entOnly := []*framework.Path{ - pathGetIssuerUnifiedCRL(&b), - pathListCertsRevocationQueue(&b), - pathListUnifiedRevoked(&b), - pathFetchUnifiedCRL(&b), - buildPathUnifiedOcspGet(&b), - buildPathUnifiedOcspPost(&b), - } - b.Backend.Paths = append(b.Backend.Paths, entOnly...) + Clean: b.cleanup, + } + + // Add ACME paths to backend + for _, prefix := range []struct { + acmePrefix string + unauthPrefix string + opts acmeWrapperOpts + }{ + { + "acme", + "acme", + acmeWrapperOpts{true, false}, + }, + { + "roles/" + framework.GenericNameRegex("role") + "/acme", + "roles/+/acme", + acmeWrapperOpts{}, + }, + { + "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/acme", + "issuer/+/acme", + acmeWrapperOpts{}, + }, + { + "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/roles/" + framework.GenericNameRegex("role") + "/acme", + "issuer/+/roles/+/acme", + acmeWrapperOpts{}, + }, + } { + setupAcmeDirectory(&b, prefix.acmePrefix, prefix.unauthPrefix, prefix.opts) } b.tidyCASGuard = new(uint32) @@ -243,26 +295,29 @@ func Backend(conf *logical.BackendConfig) *backend { conf.System.ReplicationState().HasState(consts.ReplicationDRSecondary) b.crlBuilder = newCRLBuilder(!cannotRebuildCRLs) - // Delay the first tidy until after we've started up. - b.lastTidy = time.Now() + // Delay the first tidy until after we've started up, this will be reset within the initialize function + now := time.Now() + b.tidyStatusLock.Lock() + b.lastAutoTidy = now + b.tidyStatusLock.Unlock() - // Metrics initialization for count of certificates in storage - b.certCountEnabled = atomic2.NewBool(false) - b.publishCertCountMetrics = atomic2.NewBool(false) - b.certsCounted = atomic2.NewBool(false) - b.certCountError = "Initialize Not Yet Run, Cert Counts Unavailable" - b.certCount = &atomic.Uint32{} - b.revokedCertCount = &atomic.Uint32{} - b.possibleDoubleCountedSerials = make([]string, 0, 250) - b.possibleDoubleCountedRevokedSerials = make([]string, 0, 250) + // Keep track of when this mount was started up. + b.mountStartup = now b.unifiedTransferStatus = newUnifiedTransferStatus() + b.acmeState = NewACMEState() + b.certificateCounter = NewCertificateCounter(b.backendUUID) + + // It is important that we call SetupEnt at the very end as + // some ENT backends need access to the member vars initialized above. + b.SetupEnt() return &b } type backend struct { *framework.Backend + entBackend backendUUID string storage logical.Storage @@ -272,27 +327,54 @@ type backend struct { tidyStatusLock sync.RWMutex tidyStatus *tidyStatus - lastTidy time.Time + // lastAutoTidy should be accessed through the tidyStatusLock, + // use getAutoTidyLastRun and writeAutoTidyLastRun instead of direct access + lastAutoTidy time.Time + + // autoTidyBackoff a random time in the future in which auto-tidy can't start + // for after the system starts up to avoid a thundering herd of tidy operations + // at startup. + autoTidyBackoff time.Time - unifiedTransferStatus *unifiedTransferStatus + unifiedTransferStatus *UnifiedTransferStatus - certCountEnabled *atomic2.Bool - publishCertCountMetrics *atomic2.Bool - certCount *atomic.Uint32 - revokedCertCount *atomic.Uint32 - certsCounted *atomic2.Bool - certCountError string - possibleDoubleCountedSerials []string - possibleDoubleCountedRevokedSerials []string + certificateCounter *CertificateCounter pkiStorageVersion atomic.Value - crlBuilder *crlBuilder + crlBuilder *CrlBuilder // Write lock around issuers and keys. issuersLock sync.RWMutex + + // Context around ACME operations + acmeState *acmeState + acmeAccountLock sync.RWMutex // (Write) Locked on Tidy, (Read) Locked on Account Creation + + // Track when this mount was started. + mountStartup time.Time } -type roleOperation func(ctx context.Context, req *logical.Request, data *framework.FieldData, role *roleEntry) (*logical.Response, error) +// BackendOps a bridge/legacy interface until we can further +// separate out backend things into distinct packages. +type BackendOps interface { + managed_key.PkiManagedKeyView + pki_backend.SystemViewGetter + pki_backend.MountInfo + pki_backend.Logger + revocation.RevokerFactory + + UseLegacyBundleCaStorage() bool + CrlBuilder() *CrlBuilder + GetRevokeStorageLock() *sync.RWMutex + GetUnifiedTransferStatus() *UnifiedTransferStatus + GetAcmeState() *acmeState + GetRole(ctx context.Context, s logical.Storage, n string) (*issuing.RoleEntry, error) + GetCertificateCounter() *CertificateCounter +} + +var _ BackendOps = &backend{} + +type roleOperation func(ctx context.Context, req *logical.Request, data *framework.FieldData, role *issuing.RoleEntry) (*logical.Response, error) const backendHelp = ` The PKI backend dynamically generates X509 server and client certificates. @@ -314,7 +396,7 @@ func metricsKey(req *logical.Request, extra ...string) []string { func (b *backend) metricsWrap(callType string, roleMode int, ofunc roleOperation) framework.OperationFunc { return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { key := metricsKey(req, callType) - var role *roleEntry + var role *issuing.RoleEntry var labels []metrics.Label var err error @@ -330,7 +412,7 @@ func (b *backend) metricsWrap(callType string, roleMode int, ofunc roleOperation } if roleMode > noRole { // Get the role - role, err = b.getRole(ctx, req.Storage, roleName) + role, err = b.GetRole(ctx, req.Storage, roleName) if err != nil { return nil, err } @@ -359,9 +441,9 @@ func (b *backend) metricsWrap(callType string, roleMode int, ofunc roleOperation } // initialize is used to perform a possible PKI storage migration if needed -func (b *backend) initialize(ctx context.Context, _ *logical.InitializationRequest) error { +func (b *backend) initialize(ctx context.Context, ir *logical.InitializationRequest) error { sc := b.makeStorageContext(ctx, b.storage) - if err := b.crlBuilder.reloadConfigIfRequired(sc); err != nil { + if err := b.CrlBuilder().reloadConfigIfRequired(sc); err != nil { return err } @@ -370,14 +452,57 @@ func (b *backend) initialize(ctx context.Context, _ *logical.InitializationReque return err } + err = b.GetAcmeState().Initialize(b, sc) + if err != nil { + return err + } + // Initialize also needs to populate our certificate and revoked certificate count err = b.initializeStoredCertificateCounts(ctx) if err != nil { // Don't block/err initialize/startup for metrics. Context on this call can time out due to number of certificates. - b.Logger().Error("Could not initialize stored certificate counts", err) - b.certCountError = err.Error() + b.Logger().Error("Could not initialize stored certificate counts", "error", err) + b.GetCertificateCounter().SetError(err) } - return nil + + // Initialize lastAutoTidy from disk + b.initializeLastTidyFromStorage(sc) + + return b.initializeEnt(sc, ir) +} + +// initializeLastTidyFromStorage reads the time we last ran auto tidy from storage and initializes +// b.lastAutoTidy with the value. If no previous value existed, we persist time.Now() and initialize +// b.lastAutoTidy with that value. +func (b *backend) initializeLastTidyFromStorage(sc *storageContext) { + now := time.Now() + + lastTidyTime, err := sc.getAutoTidyLastRun() + if err != nil { + lastTidyTime = now + b.Logger().Error("failed loading previous tidy last run time, using now", "error", err.Error()) + } + if lastTidyTime.IsZero() { + // No previous time was set, persist now so we can track a starting point across Vault restarts + lastTidyTime = now + if err = b.updateLastAutoTidyTime(sc, now); err != nil { + b.Logger().Error("failed persisting tidy last run time", "error", err.Error()) + } + } + + // We bypass using updateLastAutoTidyTime here to avoid the storage write on init + // that normally isn't required + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + b.lastAutoTidy = lastTidyTime +} + +func (b *backend) cleanup(ctx context.Context) { + sc := b.makeStorageContext(ctx, b.storage) + + b.GetAcmeState().Shutdown(b) + + b.cleanupEnt(sc) } func (b *backend) initializePKIIssuersStorage(ctx context.Context) error { @@ -406,7 +531,31 @@ func (b *backend) initializePKIIssuersStorage(ctx context.Context) error { return nil } -func (b *backend) useLegacyBundleCaStorage() bool { +func (b *backend) BackendUUID() string { + return b.backendUUID +} + +func (b *backend) CrlBuilder() *CrlBuilder { + return b.crlBuilder +} + +func (b *backend) GetRevokeStorageLock() *sync.RWMutex { + return &b.revokeStorageLock +} + +func (b *backend) GetUnifiedTransferStatus() *UnifiedTransferStatus { + return b.unifiedTransferStatus +} + +func (b *backend) GetAcmeState() *acmeState { + return b.acmeState +} + +func (b *backend) GetCertificateCounter() *CertificateCounter { + return b.certificateCounter +} + +func (b *backend) UseLegacyBundleCaStorage() bool { // This helper function is here to choose whether or not we use the newer // issuer/key storage format or the older legacy ca bundle format. // @@ -419,6 +568,18 @@ func (b *backend) useLegacyBundleCaStorage() bool { return version == nil || version == 0 } +func (b *backend) IsSecondaryNode() bool { + return b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) +} + +func (b *backend) GetManagedKeyView() (logical.ManagedKeySystemView, error) { + managedKeyView, ok := b.System().(logical.ManagedKeySystemView) + if !ok { + return nil, errutil.InternalError{Err: fmt.Sprintf("unsupported system view")} + } + return managedKeyView, nil +} + func (b *backend) updatePkiStorageVersion(ctx context.Context, grabIssuersLock bool) { info, err := getMigrationInfo(ctx, b.storage) if err != nil { @@ -457,34 +618,36 @@ func (b *backend) invalidate(ctx context.Context, key string) { go func() { b.Logger().Info("Detected a migration completed, resetting pki storage version") b.updatePkiStorageVersion(ctx, true) - b.crlBuilder.requestRebuildIfActiveNode(b) + b.CrlBuilder().requestRebuildIfActiveNode(b) }() case strings.HasPrefix(key, issuerPrefix): - if !b.useLegacyBundleCaStorage() { + if !b.UseLegacyBundleCaStorage() { // See note in updateDefaultIssuerId about why this is necessary. // We do this ahead of CRL rebuilding just so we know that things // are stale. - b.crlBuilder.invalidateCRLBuildTime() + b.CrlBuilder().invalidateCRLBuildTime() // If an issuer has changed on the primary, we need to schedule an update of our CRL, // the primary cluster would have done it already, but the CRL is cluster specific so // force a rebuild of ours. - b.crlBuilder.requestRebuildIfActiveNode(b) + b.CrlBuilder().requestRebuildIfActiveNode(b) } else { b.Logger().Debug("Ignoring invalidation updates for issuer as the PKI migration has yet to complete.") } case key == "config/crl": // We may need to reload our OCSP status flag - b.crlBuilder.markConfigDirty() + b.CrlBuilder().markConfigDirty() + case key == storageAcmeConfig: + b.GetAcmeState().markConfigDirty() case key == storageIssuerConfig: - b.crlBuilder.invalidateCRLBuildTime() + b.CrlBuilder().invalidateCRLBuildTime() case strings.HasPrefix(key, crossRevocationPrefix): split := strings.Split(key, "/") if !strings.HasSuffix(key, "/confirmed") { cluster := split[len(split)-2] serial := split[len(split)-1] - b.crlBuilder.addCertForRevocationCheck(cluster, serial) + b.CrlBuilder().addCertForRevocationCheck(cluster, serial) } else { if len(split) >= 3 { cluster := split[len(split)-3] @@ -495,7 +658,7 @@ func (b *backend) invalidate(ctx context.Context, key string) { // ignore them). On performance primary nodes though, // we do want to track them to remove them. if !isNotPerfPrimary { - b.crlBuilder.addCertForRevocationRemoval(cluster, serial) + b.CrlBuilder().addCertForRevocationRemoval(cluster, serial) } } } @@ -504,8 +667,10 @@ func (b *backend) invalidate(ctx context.Context, key string) { split := strings.Split(key, "/") cluster := split[len(split)-2] serial := split[len(split)-1] - b.crlBuilder.addCertFromCrossRevocation(cluster, serial) + b.CrlBuilder().addCertFromCrossRevocation(cluster, serial) } + + b.invalidateEnt(ctx, key) } func (b *backend) periodicFunc(ctx context.Context, request *logical.Request) error { @@ -513,7 +678,7 @@ func (b *backend) periodicFunc(ctx context.Context, request *logical.Request) er doCRL := func() error { // First attempt to reload the CRL configuration. - if err := b.crlBuilder.reloadConfigIfRequired(sc); err != nil { + if err := b.CrlBuilder().reloadConfigIfRequired(sc); err != nil { return err } @@ -525,31 +690,47 @@ func (b *backend) periodicFunc(ctx context.Context, request *logical.Request) er } // First handle any global revocation queue entries. - if err := b.crlBuilder.processRevocationQueue(sc); err != nil { + if err := b.CrlBuilder().processRevocationQueue(sc); err != nil { return err } // Then handle any unified cross-cluster revocations. - if err := b.crlBuilder.processCrossClusterRevocations(sc); err != nil { + if err := b.CrlBuilder().processCrossClusterRevocations(sc); err != nil { return err } // Check if we're set to auto rebuild and a CRL is set to expire. - if err := b.crlBuilder.checkForAutoRebuild(sc); err != nil { + if err := b.CrlBuilder().checkForAutoRebuild(sc); err != nil { return err } // Then attempt to rebuild the CRLs if required. - if err := b.crlBuilder.rebuildIfForced(sc); err != nil { + warnings, err := b.CrlBuilder().RebuildIfForced(sc) + if err != nil { return err } + if len(warnings) > 0 { + msg := "During rebuild of complete CRL, got the following warnings:" + for index, warning := range warnings { + msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) + } + b.Logger().Warn(msg) + } // If a delta CRL was rebuilt above as part of the complete CRL rebuild, // this will be a no-op. However, if we do need to rebuild delta CRLs, // this would cause us to do so. - if err := b.crlBuilder.rebuildDeltaCRLsIfForced(sc, false); err != nil { + warnings, err = b.CrlBuilder().rebuildDeltaCRLsIfForced(sc, false) + if err != nil { return err } + if len(warnings) > 0 { + msg := "During rebuild of delta CRL, got the following warnings:" + for index, warning := range warnings { + msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) + } + b.Logger().Warn(msg) + } return nil } @@ -573,13 +754,20 @@ func (b *backend) periodicFunc(ctx context.Context, request *logical.Request) er // Check if we should run another tidy... now := time.Now() - b.tidyStatusLock.RLock() - nextOp := b.lastTidy.Add(config.Interval) - b.tidyStatusLock.RUnlock() + nextOp := b.getLastAutoTidyTime().Add(config.Interval) if now.Before(nextOp) { return nil } + if b.autoTidyBackoff.IsZero() { + b.autoTidyBackoff = config.CalculateStartupBackoff(b.mountStartup) + } + + if b.autoTidyBackoff.After(now) { + b.Logger().Info("Auto tidy will not run as we are still within the random backoff ending at", "backoff_until", b.autoTidyBackoff) + return nil + } + // Ensure a tidy isn't already running... If it is, we'll trigger // again when the running one finishes. if !atomic.CompareAndSwapUint32(b.tidyCASGuard, 0, 1) { @@ -589,9 +777,11 @@ func (b *backend) periodicFunc(ctx context.Context, request *logical.Request) er // Prevent ourselves from starting another tidy operation while // this one is still running. This operation runs in the background // and has a separate error reporting mechanism. - b.tidyStatusLock.Lock() - b.lastTidy = now - b.tidyStatusLock.Unlock() + err = b.updateLastAutoTidyTime(sc, now) + if err != nil { + // We don't really mind if this write fails, we'll re-run in the future + b.Logger().Warn("failed to persist auto tidy last run time", "error", err.Error()) + } // Because the request from the parent storage will be cleared at // some point (and potentially reused) -- due to tidy executing in @@ -605,18 +795,19 @@ func (b *backend) periodicFunc(ctx context.Context, request *logical.Request) er return nil } + // First tidy any ACME nonces to free memory. + b.GetAcmeState().DoTidyNonces() + + // Then run unified transfer. backgroundSc := b.makeStorageContext(context.Background(), b.storage) go runUnifiedTransfer(backgroundSc) + // Then run the CRL rebuild and tidy operation. crlErr := doCRL() tidyErr := doAutoTidy() // Periodically re-emit gauges so that they don't disappear/go stale - tidyConfig, err := sc.getAutoTidyConfig() - if err != nil { - return err - } - b.emitCertStoreMetrics(tidyConfig) + b.GetCertificateCounter().EmitCertStoreMetrics() var errors error if crlErr != nil { @@ -633,12 +824,12 @@ func (b *backend) periodicFunc(ctx context.Context, request *logical.Request) er // Check if the CRL was invalidated due to issuer swap and update // accordingly. - if err := b.crlBuilder.flushCRLBuildTimeInvalidation(sc); err != nil { + if err := b.CrlBuilder().flushCRLBuildTimeInvalidation(sc); err != nil { return err } // All good! - return nil + return b.periodicFuncEnt(backgroundSc, request) } func (b *backend) initializeStoredCertificateCounts(ctx context.Context) error { @@ -654,211 +845,96 @@ func (b *backend) initializeStoredCertificateCounts(ctx context.Context) error { return err } - b.certCountEnabled.Store(config.MaintainCount) - b.publishCertCountMetrics.Store(config.PublishMetrics) - - if config.MaintainCount == false { - b.possibleDoubleCountedRevokedSerials = nil - b.possibleDoubleCountedSerials = nil - b.certsCounted.Store(true) - b.certCount.Store(0) - b.revokedCertCount.Store(0) - b.certCountError = "Cert Count is Disabled: enable via Tidy Config maintain_stored_certificate_counts" + certCounter := b.GetCertificateCounter() + isEnabled := certCounter.ReconfigureWithTidyConfig(config) + if !isEnabled { return nil } - // Ideally these three things would be set in one transaction, since that isn't possible, set the counts to "0", - // first, so count will over-count (and miss putting things in deduplicate queue), rather than under-count. - b.certCount.Store(0) - b.revokedCertCount.Store(0) - b.possibleDoubleCountedRevokedSerials = nil - b.possibleDoubleCountedSerials = nil - // A cert issued or revoked here will be double-counted. That's okay, this is "best effort" metrics. - b.certsCounted.Store(false) - - entries, err := b.storage.List(ctx, "certs/") + entries, err := b.storage.List(ctx, issuing.PathCerts) if err != nil { return err } - b.certCount.Add(uint32(len(entries))) revokedEntries, err := b.storage.List(ctx, "revoked/") if err != nil { return err } - b.revokedCertCount.Add(uint32(len(revokedEntries))) - - b.certsCounted.Store(true) - // Now that the metrics are set, we can switch from appending newly-stored certificates to the possible double-count - // list, and instead have them update the counter directly. We need to do this so that we are looking at a static - // slice of possibly double counted serials. Note that certsCounted is computed before the storage operation, so - // there may be some delay here. - - // Sort the listed-entries first, to accommodate that delay. - sort.Slice(entries, func(i, j int) bool { - return entries[i] < entries[j] - }) - - sort.Slice(revokedEntries, func(i, j int) bool { - return revokedEntries[i] < revokedEntries[j] - }) - - // We assume here that these lists are now complete. - sort.Slice(b.possibleDoubleCountedSerials, func(i, j int) bool { - return b.possibleDoubleCountedSerials[i] < b.possibleDoubleCountedSerials[j] - }) - - listEntriesIndex := 0 - possibleDoubleCountIndex := 0 - for { - if listEntriesIndex >= len(entries) { - break - } - if possibleDoubleCountIndex >= len(b.possibleDoubleCountedSerials) { - break - } - if entries[listEntriesIndex] == b.possibleDoubleCountedSerials[possibleDoubleCountIndex] { - // This represents a double-counted entry - b.decrementTotalCertificatesCountNoReport() - listEntriesIndex = listEntriesIndex + 1 - possibleDoubleCountIndex = possibleDoubleCountIndex + 1 - continue - } - if entries[listEntriesIndex] < b.possibleDoubleCountedSerials[possibleDoubleCountIndex] { - listEntriesIndex = listEntriesIndex + 1 - continue - } - if entries[listEntriesIndex] > b.possibleDoubleCountedSerials[possibleDoubleCountIndex] { - possibleDoubleCountIndex = possibleDoubleCountIndex + 1 - continue - } - } - - sort.Slice(b.possibleDoubleCountedRevokedSerials, func(i, j int) bool { - return b.possibleDoubleCountedRevokedSerials[i] < b.possibleDoubleCountedRevokedSerials[j] - }) - - listRevokedEntriesIndex := 0 - possibleRevokedDoubleCountIndex := 0 - for { - if listRevokedEntriesIndex >= len(revokedEntries) { - break - } - if possibleRevokedDoubleCountIndex >= len(b.possibleDoubleCountedRevokedSerials) { - break - } - if revokedEntries[listRevokedEntriesIndex] == b.possibleDoubleCountedRevokedSerials[possibleRevokedDoubleCountIndex] { - // This represents a double-counted revoked entry - b.decrementTotalRevokedCertificatesCountNoReport() - listRevokedEntriesIndex = listRevokedEntriesIndex + 1 - possibleRevokedDoubleCountIndex = possibleRevokedDoubleCountIndex + 1 - continue - } - if revokedEntries[listRevokedEntriesIndex] < b.possibleDoubleCountedRevokedSerials[possibleRevokedDoubleCountIndex] { - listRevokedEntriesIndex = listRevokedEntriesIndex + 1 - continue - } - if revokedEntries[listRevokedEntriesIndex] > b.possibleDoubleCountedRevokedSerials[possibleRevokedDoubleCountIndex] { - possibleRevokedDoubleCountIndex = possibleRevokedDoubleCountIndex + 1 - continue - } - } - - b.possibleDoubleCountedRevokedSerials = nil - b.possibleDoubleCountedSerials = nil - - b.emitCertStoreMetrics(config) - - b.certCountError = "" + certCounter.InitializeCountsFromStorage(entries, revokedEntries) return nil } -func (b *backend) emitCertStoreMetrics(config *tidyConfig) { - if config.PublishMetrics == true { - certCount := b.certCount.Load() - b.emitTotalCertCountMetric(certCount) - revokedCertCount := b.revokedCertCount.Load() - b.emitTotalRevokedCountMetric(revokedCertCount) - } -} +var _ revocation.Revoker = &revoker{} -// The "certsCounted" boolean here should be loaded from the backend certsCounted before the corresponding storage call: -// eg. certsCounted := b.certsCounted.Load() -func (b *backend) ifCountEnabledIncrementTotalCertificatesCount(certsCounted bool, newSerial string) { - if b.certCountEnabled.Load() { - certCount := b.certCount.Add(1) - switch { - case !certsCounted: - // This is unsafe, but a good best-attempt - if strings.HasPrefix(newSerial, "certs/") { - newSerial = newSerial[6:] - } - b.possibleDoubleCountedSerials = append(b.possibleDoubleCountedSerials, newSerial) - default: - if b.publishCertCountMetrics.Load() { - b.emitTotalCertCountMetric(certCount) - } - } - } +type revoker struct { + backend *backend + storageContext *storageContext + crlConfig *pki_backend.CrlConfig } -func (b *backend) ifCountEnabledDecrementTotalCertificatesCountReport() { - if b.certCountEnabled.Load() { - certCount := b.decrementTotalCertificatesCountNoReport() - if b.publishCertCountMetrics.Load() { - b.emitTotalCertCountMetric(certCount) - } - } +func (r *revoker) RevokeCert(cert *x509.Certificate) (revocation.RevokeCertInfo, error) { + r.backend.GetRevokeStorageLock().Lock() + defer r.backend.GetRevokeStorageLock().Unlock() + resp, err := revokeCert(r.storageContext, r.crlConfig, cert) + return parseRevokeCertOutput(resp, err) } -func (b *backend) emitTotalCertCountMetric(certCount uint32) { - metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_certificates_stored"}, float32(certCount)) +func (r *revoker) RevokeCertBySerial(serial string) (revocation.RevokeCertInfo, error) { + // NOTE: tryRevokeCertBySerial grabs the revoke storage lock for us + resp, err := tryRevokeCertBySerial(r.storageContext, r.crlConfig, serial) + return parseRevokeCertOutput(resp, err) } -// Called directly only by the initialize function to deduplicate the count, when we don't have a full count yet -// Does not respect whether-we-are-counting backend information. -func (b *backend) decrementTotalCertificatesCountNoReport() uint32 { - newCount := b.certCount.Add(^uint32(0)) - return newCount -} +// There are a bunch of reasons that a certificate will/won't be revoked. Sadly we will need a further +// refactoring but for now handle the basics of the reasons/response objects back to a usable object +// that doesn't directly reply to the API request +func parseRevokeCertOutput(resp *logical.Response, err error) (revocation.RevokeCertInfo, error) { + if err != nil { + return revocation.RevokeCertInfo{}, err + } -// The "certsCounted" boolean here should be loaded from the backend certsCounted before the corresponding storage call: -// eg. certsCounted := b.certsCounted.Load() -func (b *backend) ifCountEnabledIncrementTotalRevokedCertificatesCount(certsCounted bool, newSerial string) { - if b.certCountEnabled.Load() { - newRevokedCertCount := b.revokedCertCount.Add(1) - switch { - case !certsCounted: - // This is unsafe, but a good best-attempt - if strings.HasPrefix(newSerial, "revoked/") { // allow passing in the path (revoked/serial) OR the serial - newSerial = newSerial[8:] - } - b.possibleDoubleCountedRevokedSerials = append(b.possibleDoubleCountedRevokedSerials, newSerial) - default: - if b.publishCertCountMetrics.Load() { - b.emitTotalRevokedCountMetric(newRevokedCertCount) - } - } + if resp == nil { + // nil, nil response, most likely means the certificate was missing, + // but *might* be other things such as a tainted mount + return revocation.RevokeCertInfo{}, nil + } + + if resp.IsError() { + // There are a few reasons we return a response error but not an error, + // such as UserError's or they tried to revoke the CA + return revocation.RevokeCertInfo{}, resp.Error() } -} -func (b *backend) ifCountEnabledDecrementTotalRevokedCertificatesCountReport() { - if b.certCountEnabled.Load() { - revokedCertCount := b.decrementTotalRevokedCertificatesCountNoReport() - if b.publishCertCountMetrics.Load() { - b.emitTotalRevokedCountMetric(revokedCertCount) + // It is possible we don't return the field for various reasons if just a bunch of warnings are set. + if revTimeRaw, ok := resp.Data["revocation_time"]; ok { + revTimeInt, err := parseutil.ParseInt(revTimeRaw) + if err != nil { + // Lets me lenient for now + revTimeInt = 0 } + revTime := time.Unix(revTimeInt, 0) + return revocation.RevokeCertInfo{ + RevocationTime: revTime, + }, nil } -} -func (b *backend) emitTotalRevokedCountMetric(revokedCertCount uint32) { - metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_revoked_certificates_stored"}, float32(revokedCertCount)) + // Since we don't really know what went wrong if anything, for example the certificate might + // have been expired or close to expiry, lets punt on it for now + return revocation.RevokeCertInfo{ + Warnings: resp.Warnings, + }, nil } -// Called directly only by the initialize function to deduplicate the count, when we don't have a full count yet -// Does not respect whether-we-are-counting backend information. -func (b *backend) decrementTotalRevokedCertificatesCountNoReport() uint32 { - newRevokedCertCount := b.revokedCertCount.Add(^uint32(0)) - return newRevokedCertCount +func (b *backend) GetRevoker(ctx context.Context, s logical.Storage) (revocation.Revoker, error) { + sc := b.makeStorageContext(ctx, s) + crlConfig, err := b.CrlBuilder().GetConfigWithUpdate(sc) + if err != nil { + return nil, err + } + return &revoker{ + backend: b, + crlConfig: crlConfig, + storageContext: sc, + }, nil } diff --git a/builtin/logical/pki/backend_oss.go b/builtin/logical/pki/backend_oss.go new file mode 100644 index 000000000000..aa8c413e767f --- /dev/null +++ b/builtin/logical/pki/backend_oss.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package pki + +import ( + "context" + + "github.com/hashicorp/vault/sdk/logical" +) + +type entBackend struct{} + +func (b *backend) initializeEnt(_ *storageContext, _ *logical.InitializationRequest) error { + return nil +} + +func (b *backend) invalidateEnt(_ context.Context, _ string) {} + +func (b *backend) periodicFuncEnt(_ *storageContext, _ *logical.Request) error { + return nil +} + +func (b *backend) cleanupEnt(_ *storageContext) {} + +func (b *backend) SetupEnt() {} diff --git a/builtin/logical/pki/backend_oss_test.go b/builtin/logical/pki/backend_oss_test.go new file mode 100644 index 000000000000..fb4648293342 --- /dev/null +++ b/builtin/logical/pki/backend_oss_test.go @@ -0,0 +1,18 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package pki + +func getEntProperAuthingPaths(_ string) map[string]pathAuthChecker { + return map[string]pathAuthChecker{} +} + +func getEntAcmePrefixes() []string { + return []string{} +} + +func entProperAuthingPathReplacer(rawPath string) string { + return rawPath +} diff --git a/builtin/logical/pki/backend_test.go b/builtin/logical/pki/backend_test.go index aafa932b6829..af079b13f781 100644 --- a/builtin/logical/pki/backend_test.go +++ b/builtin/logical/pki/backend_test.go @@ -1,10 +1,11 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki import ( "bytes" + "cmp" "context" "crypto" "crypto/ecdsa" @@ -26,6 +27,7 @@ import ( "net/url" "os" "reflect" + "slices" "sort" "strconv" "strings" @@ -33,14 +35,6 @@ import ( "testing" "time" - "github.com/hashicorp/vault/helper/testhelpers/teststorage" - - "github.com/hashicorp/vault/helper/testhelpers" - - "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" - - "github.com/stretchr/testify/require" - "github.com/armon/go-metrics" "github.com/fatih/structs" "github.com/go-test/deep" @@ -48,12 +42,20 @@ import ( "github.com/hashicorp/vault/api" auth "github.com/hashicorp/vault/api/auth/userpass" "github.com/hashicorp/vault/builtin/credential/userpass" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/parsing" + "github.com/hashicorp/vault/builtin/logical/pki/pki_backend" + "github.com/hashicorp/vault/helper/testhelpers" logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/helper/testhelpers/teststorage" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" "golang.org/x/net/idna" ) @@ -719,6 +721,10 @@ func generateCSR(t *testing.T, csrTemplate *x509.CertificateRequest, keyType str t.Fatalf("Got error generating private key for CSR: %v", err) } + return generateCSRWithKey(t, csrTemplate, priv) +} + +func generateCSRWithKey(t *testing.T, csrTemplate *x509.CertificateRequest, priv interface{}) (interface{}, []byte, string) { csr, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, priv) if err != nil { t.Fatalf("Got error generating CSR: %v", err) @@ -855,7 +861,7 @@ func generateTestCsr(t *testing.T, keyType certutil.PrivateKeyType, keyBits int) // Generates steps to test out various role permutations func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { - roleVals := roleEntry{ + roleVals := issuing.RoleEntry{ MaxTTL: 12 * time.Hour, KeyType: "rsa", KeyBits: 2048, @@ -937,7 +943,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { ret = append(ret, issueTestStep) } - getCountryCheck := func(role roleEntry) logicaltest.TestCheckFunc { + getCountryCheck := func(role issuing.RoleEntry) logicaltest.TestCheckFunc { var certBundle certutil.CertBundle return func(resp *logical.Response) error { err := mapstructure.Decode(resp.Data, &certBundle) @@ -958,7 +964,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } } - getOuCheck := func(role roleEntry) logicaltest.TestCheckFunc { + getOuCheck := func(role issuing.RoleEntry) logicaltest.TestCheckFunc { var certBundle certutil.CertBundle return func(resp *logical.Response) error { err := mapstructure.Decode(resp.Data, &certBundle) @@ -979,7 +985,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } } - getOrganizationCheck := func(role roleEntry) logicaltest.TestCheckFunc { + getOrganizationCheck := func(role issuing.RoleEntry) logicaltest.TestCheckFunc { var certBundle certutil.CertBundle return func(resp *logical.Response) error { err := mapstructure.Decode(resp.Data, &certBundle) @@ -1000,7 +1006,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } } - getLocalityCheck := func(role roleEntry) logicaltest.TestCheckFunc { + getLocalityCheck := func(role issuing.RoleEntry) logicaltest.TestCheckFunc { var certBundle certutil.CertBundle return func(resp *logical.Response) error { err := mapstructure.Decode(resp.Data, &certBundle) @@ -1021,7 +1027,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } } - getProvinceCheck := func(role roleEntry) logicaltest.TestCheckFunc { + getProvinceCheck := func(role issuing.RoleEntry) logicaltest.TestCheckFunc { var certBundle certutil.CertBundle return func(resp *logical.Response) error { err := mapstructure.Decode(resp.Data, &certBundle) @@ -1042,7 +1048,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } } - getStreetAddressCheck := func(role roleEntry) logicaltest.TestCheckFunc { + getStreetAddressCheck := func(role issuing.RoleEntry) logicaltest.TestCheckFunc { var certBundle certutil.CertBundle return func(resp *logical.Response) error { err := mapstructure.Decode(resp.Data, &certBundle) @@ -1063,7 +1069,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } } - getPostalCodeCheck := func(role roleEntry) logicaltest.TestCheckFunc { + getPostalCodeCheck := func(role issuing.RoleEntry) logicaltest.TestCheckFunc { var certBundle certutil.CertBundle return func(resp *logical.Response) error { err := mapstructure.Decode(resp.Data, &certBundle) @@ -1084,7 +1090,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } } - getNotBeforeCheck := func(role roleEntry) logicaltest.TestCheckFunc { + getNotBeforeCheck := func(role issuing.RoleEntry) logicaltest.TestCheckFunc { var certBundle certutil.CertBundle return func(resp *logical.Response) error { err := mapstructure.Decode(resp.Data, &certBundle) @@ -1109,7 +1115,9 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { // Returns a TestCheckFunc that performs various validity checks on the // returned certificate information, mostly within checkCertsAndPrivateKey - getCnCheck := func(name string, role roleEntry, key crypto.Signer, usage x509.KeyUsage, extUsage x509.ExtKeyUsage, validity time.Duration) logicaltest.TestCheckFunc { + getCnCheck := func(name string, role issuing.RoleEntry, key crypto.Signer, usage x509.KeyUsage, + extUsage x509.ExtKeyUsage, validity time.Duration, + ) logicaltest.TestCheckFunc { var certBundle certutil.CertBundle return func(resp *logical.Response) error { err := mapstructure.Decode(resp.Data, &certBundle) @@ -1219,7 +1227,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } getRandCsr := func(keyType string, errorOk bool, csrTemplate *x509.CertificateRequest) csrPlan { - rsaKeyBits := []int{2048, 3072, 4096} + rsaKeyBits := []int{2048, 3072, 4096, 8192} ecKeyBits := []int{224, 256, 384, 521} plan := csrPlan{errorOk: errorOk} @@ -1332,7 +1340,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } roleVals.KeyUsage = usage - parsedKeyUsage := parseKeyUsages(roleVals.KeyUsage) + parsedKeyUsage := parsing.ParseKeyUsages(roleVals.KeyUsage) if parsedKeyUsage == 0 && len(usage) != 0 { panic("parsed key usages was zero") } @@ -1591,7 +1599,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } { - getOtherCheck := func(expectedOthers ...otherNameUtf8) logicaltest.TestCheckFunc { + getOtherCheck := func(expectedOthers ...certutil.OtherNameUtf8) logicaltest.TestCheckFunc { return func(resp *logical.Response) error { var certBundle certutil.CertBundle err := mapstructure.Decode(resp.Data, &certBundle) @@ -1607,7 +1615,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { if err != nil { return err } - var expected []otherNameUtf8 + var expected []certutil.OtherNameUtf8 expected = append(expected, expectedOthers...) if diff := deep.Equal(foundOthers, expected); len(diff) > 0 { return fmt.Errorf("wrong SAN IPs, diff: %v", diff) @@ -1616,11 +1624,11 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } } - addOtherSANTests := func(useCSRs, useCSRSANs bool, allowedOtherSANs []string, errorOk bool, otherSANs []string, csrOtherSANs []otherNameUtf8, check logicaltest.TestCheckFunc) { - otherSansMap := func(os []otherNameUtf8) map[string][]string { + addOtherSANTests := func(useCSRs, useCSRSANs bool, allowedOtherSANs []string, errorOk bool, otherSANs []string, csrOtherSANs []certutil.OtherNameUtf8, check logicaltest.TestCheckFunc) { + otherSansMap := func(os []certutil.OtherNameUtf8) map[string][]string { ret := make(map[string][]string) for _, o := range os { - ret[o.oid] = append(ret[o.oid], o.value) + ret[o.Oid] = append(ret[o.Oid], o.Value) } return ret } @@ -1651,14 +1659,14 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { roleVals.UseCSRCommonName = true commonNames.Localhost = true - newOtherNameUtf8 := func(s string) (ret otherNameUtf8) { + newOtherNameUtf8 := func(s string) (ret certutil.OtherNameUtf8) { pieces := strings.Split(s, ";") if len(pieces) == 2 { piecesRest := strings.Split(pieces[1], ":") if len(piecesRest) == 2 { switch strings.ToUpper(piecesRest[0]) { case "UTF-8", "UTF8": - return otherNameUtf8{oid: pieces[0], value: piecesRest[1]} + return certutil.OtherNameUtf8{Oid: pieces[0], Value: piecesRest[1]} } } } @@ -1668,7 +1676,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { oid1 := "1.3.6.1.4.1.311.20.2.3" oth1str := oid1 + ";utf8:devops@nope.com" oth1 := newOtherNameUtf8(oth1str) - oth2 := otherNameUtf8{oid1, "me@example.com"} + oth2 := certutil.OtherNameUtf8{oid1, "me@example.com"} // allowNone, allowAll := []string{}, []string{oid1 + ";UTF-8:*"} allowNone, allowAll := []string{}, []string{"*"} @@ -1683,15 +1691,15 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { // Given OtherSANs as API argument and useCSRSANs false, CSR arg ignored. addOtherSANTests(useCSRs, false, allowAll, false, []string{oth1str}, - []otherNameUtf8{oth2}, getOtherCheck(oth1)) + []certutil.OtherNameUtf8{oth2}, getOtherCheck(oth1)) if useCSRs { // OtherSANs not allowed, valid OtherSANs provided via CSR, should be an error. - addOtherSANTests(useCSRs, true, allowNone, true, nil, []otherNameUtf8{oth1}, nil) + addOtherSANTests(useCSRs, true, allowNone, true, nil, []certutil.OtherNameUtf8{oth1}, nil) // Given OtherSANs as both API and CSR arguments and useCSRSANs=true, API arg ignored. addOtherSANTests(useCSRs, false, allowAll, false, []string{oth2.String()}, - []otherNameUtf8{oth1}, getOtherCheck(oth2)) + []certutil.OtherNameUtf8{oth1}, getOtherCheck(oth2)) } } @@ -2096,7 +2104,7 @@ func TestBackend_PathFetchCertList(t *testing.T) { // list certs/ resp, err = b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.ListOperation, - Path: "certs/", + Path: issuing.PathCerts, Storage: storage, MountPoint: "pki/", }) @@ -2169,7 +2177,7 @@ func runTestSignVerbatim(t *testing.T, keyType string) { // On older versions of Go this test will fail due to an explicit check for duplicate otherNames later in this test. ExtraExtensions: []pkix.Extension{ { - Id: oidExtensionSubjectAltName, + Id: certutil.OidExtensionSubjectAltName, Critical: false, Value: []byte{0x30, 0x26, 0xA0, 0x24, 0x06, 0x0A, 0x2B, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x14, 0x02, 0x03, 0xA0, 0x16, 0x0C, 0x14, 0x75, 0x73, 0x65, 0x72, 0x6E, 0x61, 0x6D, 0x65, 0x40, 0x65, 0x78, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x2E, 0x63, 0x6F, 0x6D}, }, @@ -2331,7 +2339,7 @@ func runTestSignVerbatim(t *testing.T, keyType string) { // We assume that there is only one SAN in the original CSR and that it is an otherName. san_count := 0 for _, ext := range cert.Extensions { - if ext.Id.Equal(oidExtensionSubjectAltName) { + if ext.Id.Equal(certutil.OidExtensionSubjectAltName) { san_count += 1 } } @@ -2400,6 +2408,14 @@ func TestBackend_Root_Idempotency(t *testing.T) { require.NotNil(t, resp, "expected ca info") keyId1 := resp.Data["key_id"] issuerId1 := resp.Data["issuer_id"] + cert := parseCert(t, resp.Data["certificate"].(string)) + certSkid := certutil.GetHexFormatted(cert.SubjectKeyId, ":") + + // -> Validate the SKID matches between the root cert and the key + resp, err = CBRead(b, s, "key/"+keyId1.(issuing.KeyID).String()) + require.NoError(t, err) + require.NotNil(t, resp, "expected a response") + require.Equal(t, resp.Data["subject_key_id"], certSkid) resp, err = CBRead(b, s, "cert/ca_chain") require.NoError(t, err, "error reading ca_chain: %v", err) @@ -2414,6 +2430,14 @@ func TestBackend_Root_Idempotency(t *testing.T) { require.NotNil(t, resp, "expected ca info") keyId2 := resp.Data["key_id"] issuerId2 := resp.Data["issuer_id"] + cert = parseCert(t, resp.Data["certificate"].(string)) + certSkid = certutil.GetHexFormatted(cert.SubjectKeyId, ":") + + // -> Validate the SKID matches between the root cert and the key + resp, err = CBRead(b, s, "key/"+keyId2.(issuing.KeyID).String()) + require.NoError(t, err) + require.NotNil(t, resp, "expected a response") + require.Equal(t, resp.Data["subject_key_id"], certSkid) // Make sure that we actually generated different issuer and key values require.NotEqual(t, keyId1, keyId2) @@ -2438,13 +2462,27 @@ func TestBackend_Root_Idempotency(t *testing.T) { require.NoError(t, err) require.NotNil(t, resp, "expected ca info") + firstMapping := resp.Data["mapping"].(map[string]string) firstImportedKeys := resp.Data["imported_keys"].([]string) firstImportedIssuers := resp.Data["imported_issuers"].([]string) + firstExistingKeys := resp.Data["existing_keys"].([]string) + firstExistingIssuers := resp.Data["existing_issuers"].([]string) require.NotContains(t, firstImportedKeys, keyId1) require.NotContains(t, firstImportedKeys, keyId2) require.NotContains(t, firstImportedIssuers, issuerId1) require.NotContains(t, firstImportedIssuers, issuerId2) + require.Empty(t, firstExistingKeys) + require.Empty(t, firstExistingIssuers) + require.NotEmpty(t, firstMapping) + require.Equal(t, 1, len(firstMapping)) + + var issuerId3 string + var keyId3 string + for i, k := range firstMapping { + issuerId3 = i + keyId3 = k + } // Performing this again should result in no key/issuer ids being imported/generated. resp, err = CBWrite(b, s, "config/ca", map[string]interface{}{ @@ -2452,11 +2490,17 @@ func TestBackend_Root_Idempotency(t *testing.T) { }) require.NoError(t, err) require.NotNil(t, resp, "expected ca info") + secondMapping := resp.Data["mapping"].(map[string]string) secondImportedKeys := resp.Data["imported_keys"] secondImportedIssuers := resp.Data["imported_issuers"] + secondExistingKeys := resp.Data["existing_keys"] + secondExistingIssuers := resp.Data["existing_issuers"] - require.Nil(t, secondImportedKeys) - require.Nil(t, secondImportedIssuers) + require.Empty(t, secondImportedKeys) + require.Empty(t, secondImportedIssuers) + require.Contains(t, secondExistingKeys, keyId3) + require.Contains(t, secondExistingIssuers, issuerId3) + require.Equal(t, 1, len(secondMapping)) resp, err = CBDelete(b, s, "root") require.NoError(t, err) @@ -2496,14 +2540,67 @@ func TestBackend_Root_Idempotency(t *testing.T) { } } -func TestBackend_SignIntermediate_AllowedPastCA(t *testing.T) { +// TestBackend_SignIntermediate_EnforceLeafFlag verifies if the flag is true +// that we will leverage the issuer's configured behavior +func TestBackend_SignIntermediate_EnforceLeafFlag(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + require.NoError(t, err, "failed generating root cert") + rootCert := parseCert(t, resp.Data["certificate"].(string)) + + _, err = CBWrite(b, s, "issuer/default", map[string]interface{}{ + "leaf_not_after_behavior": "err", + }) + require.NoError(t, err, "failed updating root issuer cert behavior") + + resp, err = CBWrite(b, s, "intermediate/generate/internal", map[string]interface{}{ + "common_name": "myint.com", + }) + require.NoError(t, err, "failed generating intermediary CSR") + csr := resp.Data["csr"] + + _, err = CBWrite(b, s, "root/sign-intermediate", map[string]interface{}{ + "common_name": "myint.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:caadmin@example.com", + "csr": csr, + "ttl": "60h", + "enforce_leaf_not_after_behavior": true, + }) + require.Error(t, err, "sign-intermediate should have failed as root issuer leaf behavior is set to err") + + // Now test with permit, the old default behavior + _, err = CBWrite(b, s, "issuer/default", map[string]interface{}{ + "leaf_not_after_behavior": "permit", + }) + require.NoError(t, err, "failed updating root issuer cert behavior to permit") + + resp, err = CBWrite(b, s, "root/sign-intermediate", map[string]interface{}{ + "common_name": "myint.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:caadmin@example.com", + "csr": csr, + "ttl": "60h", + "enforce_leaf_not_after_behavior": true, + }) + require.NoError(t, err, "failed to sign intermediary CA with permit as issuer") + intCert := parseCert(t, resp.Data["certificate"].(string)) + + require.Truef(t, rootCert.NotAfter.Before(intCert.NotAfter), + "root cert notAfter %v was not before ca cert's notAfter %v", rootCert.NotAfter, intCert.NotAfter) +} + +func TestBackend_SignIntermediate_AllowedPastCAValidity(t *testing.T) { t.Parallel() b_root, s_root := CreateBackendWithStorage(t) b_int, s_int := CreateBackendWithStorage(t) var err error // Direct issuing from root - _, err = CBWrite(b_root, s_root, "root/generate/internal", map[string]interface{}{ + resp, err := CBWrite(b_root, s_root, "root/generate/internal", map[string]interface{}{ "ttl": "40h", "common_name": "myvault.com", }) @@ -2511,33 +2608,40 @@ func TestBackend_SignIntermediate_AllowedPastCA(t *testing.T) { t.Fatal(err) } + rootCert := parseCert(t, resp.Data["certificate"].(string)) + _, err = CBWrite(b_root, s_root, "roles/test", map[string]interface{}{ "allow_bare_domains": true, "allow_subdomains": true, + "allow_any_name": true, }) if err != nil { t.Fatal(err) } - resp, err := CBWrite(b_int, s_int, "intermediate/generate/internal", map[string]interface{}{ + resp, err = CBWrite(b_int, s_int, "intermediate/generate/internal", map[string]interface{}{ "common_name": "myint.com", }) schema.ValidateResponse(t, schema.GetResponseSchema(t, b_root.Route("intermediate/generate/internal"), logical.UpdateOperation), resp, true) + require.Contains(t, resp.Data, "key_id") + intKeyId := resp.Data["key_id"].(issuing.KeyID) + csr := resp.Data["csr"] + + resp, err = CBRead(b_int, s_int, "key/"+intKeyId.String()) + require.NoError(t, err) + require.NotNil(t, resp, "expected a response") + intSkid := resp.Data["subject_key_id"].(string) if err != nil { t.Fatal(err) } - csr := resp.Data["csr"] - _, err = CBWrite(b_root, s_root, "sign/test", map[string]interface{}{ "common_name": "myint.com", "csr": csr, "ttl": "60h", }) - if err == nil { - t.Fatal("expected error") - } + require.ErrorContains(t, err, "that is beyond the expiration of the CA certificate") _, err = CBWrite(b_root, s_root, "sign-verbatim/test", map[string]interface{}{ "common_name": "myint.com", @@ -2545,9 +2649,7 @@ func TestBackend_SignIntermediate_AllowedPastCA(t *testing.T) { "csr": csr, "ttl": "60h", }) - if err == nil { - t.Fatal("expected error") - } + require.ErrorContains(t, err, "that is beyond the expiration of the CA certificate") resp, err = CBWrite(b_root, s_root, "root/sign-intermediate", map[string]interface{}{ "common_name": "myint.com", @@ -2564,6 +2666,13 @@ func TestBackend_SignIntermediate_AllowedPastCA(t *testing.T) { if len(resp.Warnings) == 0 { t.Fatalf("expected warnings, got %#v", *resp) } + + cert := parseCert(t, resp.Data["certificate"].(string)) + certSkid := certutil.GetHexFormatted(cert.SubjectKeyId, ":") + require.Equal(t, intSkid, certSkid) + + require.Equal(t, rootCert.NotAfter, cert.NotAfter, "intermediary cert's NotAfter did not match root cert's NotAfter") + require.Contains(t, resp.Warnings, intCaTruncatationWarning, "missing warning about intermediary CA notAfter truncation") } func TestBackend_ConsulSignLeafWithLegacyRole(t *testing.T) { @@ -2720,7 +2829,7 @@ func TestBackend_SignSelfIssued(t *testing.T) { } sc := b.makeStorageContext(context.Background(), storage) - signingBundle, err := sc.fetchCAInfo(defaultRef, ReadOnlyUsage) + signingBundle, err := sc.fetchCAInfo(defaultRef, issuing.ReadOnlyUsage) if err != nil { t.Fatal(err) } @@ -3079,11 +3188,14 @@ func TestBackend_OID_SANs(t *testing.T) { cert.DNSNames[2] != "foobar.com" { t.Fatalf("unexpected DNS SANs %v", cert.DNSNames) } - expectedOtherNames := []otherNameUtf8{{oid1, val1}, {oid2, val2}} + expectedOtherNames := []certutil.OtherNameUtf8{{oid1, val1}, {oid2, val2}} foundOtherNames, err := getOtherSANsFromX509Extensions(cert.Extensions) if err != nil { t.Fatal(err) } + // Sort our returned list as SANS are built internally with a map so ordering can be inconsistent + slices.SortFunc(foundOtherNames, func(a, b certutil.OtherNameUtf8) int { return cmp.Compare(a.Oid, b.Oid) }) + if diff := deep.Equal(expectedOtherNames, foundOtherNames); len(diff) != 0 { t.Errorf("unexpected otherNames: %v", diff) } @@ -3656,6 +3768,10 @@ func TestReadWriteDeleteRoles(t *testing.T) { "allowed_user_ids": []interface{}{}, } + if issuing.MetadataPermitted { + expectedData["no_store_metadata"] = false + } + if diff := deep.Equal(expectedData, resp.Data); len(diff) > 0 { t.Fatalf("pki role default values have changed, diff: %v", diff) } @@ -3830,9 +3946,11 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { "maintain_stored_certificate_counts": true, "publish_stored_certificate_count_metrics": true, }) + require.NoError(t, err, "failed calling auto-tidy") _, err = client.Logical().Write("/sys/plugins/reload/backend", map[string]interface{}{ "mounts": "pki/", }) + require.NoError(t, err, "failed calling backend reload") // Check the metrics initialized in order to calculate backendUUID for /pki // BackendUUID not consistent during tests with UUID from /sys/mounts/pki @@ -3987,6 +4105,7 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { expectedData := map[string]interface{}{ "safety_buffer": json.Number("1"), "issuer_safety_buffer": json.Number("31536000"), + "revocation_queue_safety_buffer": json.Number("172800"), "tidy_cert_store": true, "tidy_revoked_certs": true, "tidy_revoked_cert_issuer_associations": false, @@ -3994,11 +4113,14 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { "tidy_move_legacy_ca_bundle": false, "tidy_revocation_queue": false, "tidy_cross_cluster_revoked_certs": false, + "tidy_cert_metadata": false, + "tidy_cmpv2_nonce_store": false, "pause_duration": "0s", "state": "Finished", "error": nil, "time_started": nil, "time_finished": nil, + "last_auto_tidy_finished": nil, "message": nil, "cert_store_deleted_count": json.Number("1"), "revoked_cert_deleted_count": json.Number("1"), @@ -4008,6 +4130,14 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { "revocation_queue_deleted_count": json.Number("0"), "cross_revoked_cert_deleted_count": json.Number("0"), "internal_backend_uuid": backendUUID, + "tidy_acme": false, + "acme_account_safety_buffer": json.Number("2592000"), + "acme_orders_deleted_count": json.Number("0"), + "acme_account_revoked_count": json.Number("0"), + "acme_account_deleted_count": json.Number("0"), + "total_acme_account_count": json.Number("0"), + "cert_metadata_deleted_count": json.Number("0"), + "cmpv2_nonce_deleted_count": json.Number("0"), } // Let's copy the times from the response so that we can use deep.Equal() timeStarted, ok := tidyStatus.Data["time_started"] @@ -4020,6 +4150,7 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { t.Fatal("Expected tidy status response to include a value for time_finished") } expectedData["time_finished"] = timeFinished + expectedData["last_auto_tidy_finished"] = tidyStatus.Data["last_auto_tidy_finished"] if diff := deep.Equal(expectedData, tidyStatus.Data); diff != nil { t.Fatal(diff) @@ -4881,9 +5012,9 @@ func TestRootWithExistingKey(t *testing.T) { resp, err = CBList(b, s, "issuers") require.NoError(t, err) require.Equal(t, 3, len(resp.Data["keys"].([]string))) - require.Contains(t, resp.Data["keys"], string(myIssuerId1.(issuerID))) - require.Contains(t, resp.Data["keys"], string(myIssuerId2.(issuerID))) - require.Contains(t, resp.Data["keys"], string(myIssuerId3.(issuerID))) + require.Contains(t, resp.Data["keys"], string(myIssuerId1.(issuing.IssuerID))) + require.Contains(t, resp.Data["keys"], string(myIssuerId2.(issuing.IssuerID))) + require.Contains(t, resp.Data["keys"], string(myIssuerId3.(issuing.IssuerID))) } func TestIntermediateWithExistingKey(t *testing.T) { @@ -4984,12 +5115,13 @@ func TestIssuanceTTLs(t *testing.T) { }) require.Error(t, err, "expected issuance to fail due to longer default ttl than cert ttl") - resp, err = CBWrite(b, s, "issuer/root", map[string]interface{}{ - "issuer_name": "root", + resp, err = CBPatch(b, s, "issuer/root", map[string]interface{}{ "leaf_not_after_behavior": "permit", }) require.NoError(t, err) require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.Equal(t, resp.Data["leaf_not_after_behavior"], "permit") _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ "common_name": "testing", @@ -5002,6 +5134,8 @@ func TestIssuanceTTLs(t *testing.T) { }) require.NoError(t, err) require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.Equal(t, resp.Data["leaf_not_after_behavior"], "truncate") _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ "common_name": "testing", @@ -5662,17 +5796,18 @@ func TestBackend_InitializeCertificateCounts(t *testing.T) { } } - if b.certCount.Load() != 6 { - t.Fatalf("Failed to count six certificates root,A,B,C,D,E, instead counted %d certs", b.certCount.Load()) + certCounter := b.GetCertificateCounter() + if certCounter.CertificateCount() != 6 { + t.Fatalf("Failed to count six certificates root,A,B,C,D,E, instead counted %d certs", certCounter.CertificateCount()) } - if b.revokedCertCount.Load() != 2 { - t.Fatalf("Failed to count two revoked certificates A+B, instead counted %d certs", b.revokedCertCount.Load()) + if certCounter.RevokedCount() != 2 { + t.Fatalf("Failed to count two revoked certificates A+B, instead counted %d certs", certCounter.RevokedCount()) } // Simulates listing while initialize in progress, by "restarting it" - b.certCount.Store(0) - b.revokedCertCount.Store(0) - b.certsCounted.Store(false) + certCounter.certCount.Store(0) + certCounter.revokedCertCount.Store(0) + certCounter.certsCounted.Store(false) // Revoke certificates C, D dirtyRevocations := serials[2:4] @@ -5697,15 +5832,16 @@ func TestBackend_InitializeCertificateCounts(t *testing.T) { } // Run initialize - b.initializeStoredCertificateCounts(ctx) + err = b.initializeStoredCertificateCounts(ctx) + require.NoError(t, err, "failed initializing certificate counts") // Test certificate count - if b.certCount.Load() != 8 { - t.Fatalf("Failed to initialize count of certificates root, A,B,C,D,E,F,G counted %d certs", b.certCount.Load()) + if certCounter.CertificateCount() != 8 { + t.Fatalf("Failed to initialize count of certificates root, A,B,C,D,E,F,G counted %d certs", certCounter.CertificateCount()) } - if b.revokedCertCount.Load() != 4 { - t.Fatalf("Failed to count revoked certificates A,B,C,D counted %d certs", b.revokedCertCount.Load()) + if certCounter.RevokedCount() != 4 { + t.Fatalf("Failed to count revoked certificates A,B,C,D counted %d certs", certCounter.RevokedCount()) } return @@ -5951,7 +6087,7 @@ func TestPKI_EmptyCRLConfigUpgraded(t *testing.T) { b, s := CreateBackendWithStorage(t) // Write an empty CRLConfig into storage. - crlConfigEntry, err := logical.StorageEntryJSON("config/crl", &crlConfig{}) + crlConfigEntry, err := logical.StorageEntryJSON("config/crl", &pki_backend.CrlConfig{}) require.NoError(t, err) err = s.Put(ctx, crlConfigEntry) require.NoError(t, err) @@ -5960,13 +6096,13 @@ func TestPKI_EmptyCRLConfigUpgraded(t *testing.T) { require.NoError(t, err) require.NotNil(t, resp) require.NotNil(t, resp.Data) - require.Equal(t, resp.Data["expiry"], defaultCrlConfig.Expiry) - require.Equal(t, resp.Data["disable"], defaultCrlConfig.Disable) - require.Equal(t, resp.Data["ocsp_disable"], defaultCrlConfig.OcspDisable) - require.Equal(t, resp.Data["auto_rebuild"], defaultCrlConfig.AutoRebuild) - require.Equal(t, resp.Data["auto_rebuild_grace_period"], defaultCrlConfig.AutoRebuildGracePeriod) - require.Equal(t, resp.Data["enable_delta"], defaultCrlConfig.EnableDelta) - require.Equal(t, resp.Data["delta_rebuild_interval"], defaultCrlConfig.DeltaRebuildInterval) + require.Equal(t, resp.Data["expiry"], pki_backend.DefaultCrlConfig.Expiry) + require.Equal(t, resp.Data["disable"], pki_backend.DefaultCrlConfig.Disable) + require.Equal(t, resp.Data["ocsp_disable"], pki_backend.DefaultCrlConfig.OcspDisable) + require.Equal(t, resp.Data["auto_rebuild"], pki_backend.DefaultCrlConfig.AutoRebuild) + require.Equal(t, resp.Data["auto_rebuild_grace_period"], pki_backend.DefaultCrlConfig.AutoRebuildGracePeriod) + require.Equal(t, resp.Data["enable_delta"], pki_backend.DefaultCrlConfig.EnableDelta) + require.Equal(t, resp.Data["delta_rebuild_interval"], pki_backend.DefaultCrlConfig.DeltaRebuildInterval) } func TestPKI_ListRevokedCerts(t *testing.T) { @@ -6070,27 +6206,28 @@ func TestPKI_TemplatedAIAs(t *testing.T) { _, err = CBWrite(b, s, "config/urls", aiaData) require.NoError(t, err) - // But root generation will fail. + // Root generation should succeed, but without AIA info. rootData := map[string]interface{}{ "common_name": "Long-Lived Root X1", "issuer_name": "long-root-x1", "key_type": "ec", } - _, err = CBWrite(b, s, "root/generate/internal", rootData) - require.Error(t, err) - require.Contains(t, err.Error(), "unable to parse AIA URL") + resp, err = CBWrite(b, s, "root/generate/internal", rootData) + require.NoError(t, err) + _, err = CBDelete(b, s, "root") + require.NoError(t, err) - // Clearing the config and regenerating the root should succeed. + // Clearing the config and regenerating the root should still succeed. _, err = CBWrite(b, s, "config/urls", map[string]interface{}{ - "crl_distribution_points": "", - "issuing_certificates": "", - "ocsp_servers": "", - "enable_templating": false, + "crl_distribution_points": "{{cluster_path}}/issuer/my-root-id/crl/der", + "issuing_certificates": "{{cluster_aia_path}}/issuer/my-root-id/der", + "ocsp_servers": "{{cluster_path}}/ocsp", + "enable_templating": true, }) require.NoError(t, err) resp, err = CBWrite(b, s, "root/generate/internal", rootData) requireSuccessNonNilResponse(t, resp, err) - issuerId := string(resp.Data["issuer_id"].(issuerID)) + issuerId := string(resp.Data["issuer_id"].(issuing.IssuerID)) // Now write the original AIA config and sign a leaf. _, err = CBWrite(b, s, "config/urls", aiaData) @@ -6461,6 +6598,632 @@ func TestStandby_Operations(t *testing.T) { require.NotNil(t, resp, "got nil response from revoke request") } +type pathAuthCheckerFunc func(t *testing.T, client *api.Client, path string, token string) + +func isPermDenied(err error) bool { + return err != nil && strings.Contains(err.Error(), "permission denied") +} + +func isUnsupportedPathOperation(err error) bool { + return err != nil && (strings.Contains(err.Error(), "unsupported path") || strings.Contains(err.Error(), "unsupported operation")) +} + +func isDeniedOp(err error) bool { + return isPermDenied(err) || isUnsupportedPathOperation(err) +} + +func pathShouldBeAuthed(t *testing.T, client *api.Client, path string, token string) { + client.SetToken("") + resp, err := client.Logical().ReadWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to read %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to list %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to write %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to delete %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to patch %v while unauthed: %v / %v", path, err, resp) + } +} + +func pathShouldBeUnauthedReadList(t *testing.T, client *api.Client, path string, token string) { + // Should be able to read both with and without a token. + client.SetToken("") + resp, err := client.Logical().ReadWithContext(ctx, path) + if err != nil && isPermDenied(err) { + // Read will sometimes return permission denied, when the handler + // does not support the given operation. Retry with the token. + client.SetToken(token) + resp2, err2 := client.Logical().ReadWithContext(ctx, path) + if err2 != nil && !isUnsupportedPathOperation(err2) { + t.Fatalf("unexpected failure to read %v while unauthed: %v / %v\nWhile authed: %v / %v", path, err, resp, err2, resp2) + } + client.SetToken("") + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err != nil && isPermDenied(err) { + // List will sometimes return permission denied, when the handler + // does not support the given operation. Retry with the token. + client.SetToken(token) + resp2, err2 := client.Logical().ListWithContext(ctx, path) + if err2 != nil && !isUnsupportedPathOperation(err2) { + t.Fatalf("unexpected failure to list %v while unauthed: %v / %v\nWhile authed: %v / %v", path, err, resp, err2, resp2) + } + client.SetToken("") + } + + // These should all be denied. + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + if !strings.Contains(path, "ocsp") || !strings.Contains(err.Error(), "Code: 40") { + t.Fatalf("unexpected failure during write on read-only path %v while unauthed: %v / %v", path, err, resp) + } + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on read-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on read-only path %v while unauthed: %v / %v", path, err, resp) + } + + // Retrying with token should allow read/list, but not modification still. + client.SetToken(token) + resp, err = client.Logical().ReadWithContext(ctx, path) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to read %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to list %v while authed: %v / %v", path, err, resp) + } + + // Should all be denied. + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + if !strings.Contains(path, "ocsp") || !strings.Contains(err.Error(), "Code: 40") { + t.Fatalf("unexpected failure during write on read-only path %v while authed: %v / %v", path, err, resp) + } + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on read-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on read-only path %v while authed: %v / %v", path, err, resp) + } +} + +func pathShouldBeUnauthedWriteOnly(t *testing.T, client *api.Client, path string, token string) { + client.SetToken("") + resp, err := client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to write %v while unauthed: %v / %v", path, err, resp) + } + + // These should all be denied. However, on OSS, we might end up with + // a regular 404, which looks like err == resp == nil; hence we only + // fail when there's a non-nil response and/or a non-nil err. + resp, err = client.Logical().ReadWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during read on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during list on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during delete on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during patch on write-only path %v while unauthed: %v / %v", path, err, resp) + } + + // Retrying with token should allow writing, but nothing else. + client.SetToken(token) + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to write %v while unauthed: %v / %v", path, err, resp) + } + + // These should all be denied. + resp, err = client.Logical().ReadWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during read on write-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + if resp != nil || err != nil { + t.Fatalf("unexpected failure during list on write-only path %v while authed: %v / %v", path, err, resp) + } + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during delete on write-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during patch on write-only path %v while authed: %v / %v", path, err, resp) + } +} + +type pathAuthChecker int + +const ( + shouldBeAuthed pathAuthChecker = iota + shouldBeUnauthedReadList + shouldBeUnauthedWriteOnly + shouldBeUnauthedReadWriteOnly +) + +var pathAuthChckerMap = map[pathAuthChecker]pathAuthCheckerFunc{ + shouldBeAuthed: pathShouldBeAuthed, + shouldBeUnauthedReadList: pathShouldBeUnauthedReadList, + shouldBeUnauthedWriteOnly: pathShouldBeUnauthedWriteOnly, + shouldBeUnauthedReadWriteOnly: pathShouldBeUnauthedWriteOnly, +} + +func TestProperAuthing(t *testing.T) { + t.Parallel() + ctx := context.Background() + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + token := client.Token() + + // Mount PKI. + err := client.Sys().MountWithContext(ctx, "pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Setup basic configuration. + _, err = client.Logical().WriteWithContext(ctx, "pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().WriteWithContext(ctx, "pki/roles/test", map[string]interface{}{ + "allow_localhost": true, + }) + if err != nil { + t.Fatal(err) + } + + resp, err := client.Logical().WriteWithContext(ctx, "pki/issue/test", map[string]interface{}{ + "common_name": "localhost", + }) + if err != nil || resp == nil { + t.Fatal(err) + } + serial := resp.Data["serial_number"].(string) + eabKid := "13b80844-e60d-42d2-b7e9-152a8e834b90" + paths := map[string]pathAuthChecker{ + "ca_chain": shouldBeUnauthedReadList, + "cert/ca_chain": shouldBeUnauthedReadList, + "ca": shouldBeUnauthedReadList, + "ca/pem": shouldBeUnauthedReadList, + "cert/" + serial: shouldBeUnauthedReadList, + "cert/" + serial + "/raw": shouldBeUnauthedReadList, + "cert/" + serial + "/raw/pem": shouldBeUnauthedReadList, + "cert/crl": shouldBeUnauthedReadList, + "cert/crl/raw": shouldBeUnauthedReadList, + "cert/crl/raw/pem": shouldBeUnauthedReadList, + "cert/delta-crl": shouldBeUnauthedReadList, + "cert/delta-crl/raw": shouldBeUnauthedReadList, + "cert/delta-crl/raw/pem": shouldBeUnauthedReadList, + "cert/unified-crl": shouldBeUnauthedReadList, + "cert/unified-crl/raw": shouldBeUnauthedReadList, + "cert/unified-crl/raw/pem": shouldBeUnauthedReadList, + "cert/unified-delta-crl": shouldBeUnauthedReadList, + "cert/unified-delta-crl/raw": shouldBeUnauthedReadList, + "cert/unified-delta-crl/raw/pem": shouldBeUnauthedReadList, + issuing.PathCerts: shouldBeAuthed, + "certs/revoked/": shouldBeAuthed, + "certs/revocation-queue/": shouldBeAuthed, + "certs/unified-revoked/": shouldBeAuthed, + "config/acme": shouldBeAuthed, + "config/auto-tidy": shouldBeAuthed, + "config/ca": shouldBeAuthed, + "config/cluster": shouldBeAuthed, + "config/crl": shouldBeAuthed, + "config/issuers": shouldBeAuthed, + "config/keys": shouldBeAuthed, + "config/urls": shouldBeAuthed, + "crl": shouldBeUnauthedReadList, + "crl/pem": shouldBeUnauthedReadList, + "crl/delta": shouldBeUnauthedReadList, + "crl/delta/pem": shouldBeUnauthedReadList, + "crl/rotate": shouldBeAuthed, + "crl/rotate-delta": shouldBeAuthed, + "intermediate/cross-sign": shouldBeAuthed, + "intermediate/generate/exported": shouldBeAuthed, + "intermediate/generate/internal": shouldBeAuthed, + "intermediate/generate/existing": shouldBeAuthed, + "intermediate/generate/kms": shouldBeAuthed, + "intermediate/set-signed": shouldBeAuthed, + "issue/test": shouldBeAuthed, + "issuer/default": shouldBeAuthed, + "issuer/default/der": shouldBeUnauthedReadList, + "issuer/default/json": shouldBeUnauthedReadList, + "issuer/default/pem": shouldBeUnauthedReadList, + "issuer/default/crl": shouldBeUnauthedReadList, + "issuer/default/crl/pem": shouldBeUnauthedReadList, + "issuer/default/crl/der": shouldBeUnauthedReadList, + "issuer/default/crl/delta": shouldBeUnauthedReadList, + "issuer/default/crl/delta/der": shouldBeUnauthedReadList, + "issuer/default/crl/delta/pem": shouldBeUnauthedReadList, + "issuer/default/unified-crl": shouldBeUnauthedReadList, + "issuer/default/unified-crl/pem": shouldBeUnauthedReadList, + "issuer/default/unified-crl/der": shouldBeUnauthedReadList, + "issuer/default/unified-crl/delta": shouldBeUnauthedReadList, + "issuer/default/unified-crl/delta/der": shouldBeUnauthedReadList, + "issuer/default/unified-crl/delta/pem": shouldBeUnauthedReadList, + "issuer/default/issue/test": shouldBeAuthed, + "issuer/default/resign-crls": shouldBeAuthed, + "issuer/default/revoke": shouldBeAuthed, + "issuer/default/sign-intermediate": shouldBeAuthed, + "issuer/default/sign-revocation-list": shouldBeAuthed, + "issuer/default/sign-self-issued": shouldBeAuthed, + "issuer/default/sign-verbatim": shouldBeAuthed, + "issuer/default/sign-verbatim/test": shouldBeAuthed, + "issuer/default/sign/test": shouldBeAuthed, + "issuers/": shouldBeUnauthedReadList, + "issuers/generate/intermediate/exported": shouldBeAuthed, + "issuers/generate/intermediate/internal": shouldBeAuthed, + "issuers/generate/intermediate/existing": shouldBeAuthed, + "issuers/generate/intermediate/kms": shouldBeAuthed, + "issuers/generate/root/exported": shouldBeAuthed, + "issuers/generate/root/internal": shouldBeAuthed, + "issuers/generate/root/existing": shouldBeAuthed, + "issuers/generate/root/kms": shouldBeAuthed, + "issuers/import/cert": shouldBeAuthed, + "issuers/import/bundle": shouldBeAuthed, + "key/default": shouldBeAuthed, + "keys/": shouldBeAuthed, + "keys/generate/internal": shouldBeAuthed, + "keys/generate/exported": shouldBeAuthed, + "keys/generate/kms": shouldBeAuthed, + "keys/import": shouldBeAuthed, + "ocsp": shouldBeUnauthedWriteOnly, + "ocsp/dGVzdAo=": shouldBeUnauthedReadList, + "revoke": shouldBeAuthed, + "revoke-with-key": shouldBeAuthed, + "roles/test": shouldBeAuthed, + "roles/": shouldBeAuthed, + "root": shouldBeAuthed, + "root/generate/exported": shouldBeAuthed, + "root/generate/internal": shouldBeAuthed, + "root/generate/existing": shouldBeAuthed, + "root/generate/kms": shouldBeAuthed, + "root/replace": shouldBeAuthed, + "root/rotate/internal": shouldBeAuthed, + "root/rotate/exported": shouldBeAuthed, + "root/rotate/existing": shouldBeAuthed, + "root/rotate/kms": shouldBeAuthed, + "root/sign-intermediate": shouldBeAuthed, + "root/sign-self-issued": shouldBeAuthed, + "sign-verbatim": shouldBeAuthed, + "sign-verbatim/test": shouldBeAuthed, + "sign/test": shouldBeAuthed, + "tidy": shouldBeAuthed, + "tidy-cancel": shouldBeAuthed, + "tidy-status": shouldBeAuthed, + "unified-crl": shouldBeUnauthedReadList, + "unified-crl/pem": shouldBeUnauthedReadList, + "unified-crl/delta": shouldBeUnauthedReadList, + "unified-crl/delta/pem": shouldBeUnauthedReadList, + "unified-ocsp": shouldBeUnauthedWriteOnly, + "unified-ocsp/dGVzdAo=": shouldBeUnauthedReadList, + "eab/": shouldBeAuthed, + "eab/" + eabKid: shouldBeAuthed, + } + + entPaths := getEntProperAuthingPaths(serial) + maps.Copy(paths, entPaths) + + // Add ACME based paths to the test suite + ossAcmePrefixes := []string{"acme/", "issuer/default/acme/", "roles/test/acme/", "issuer/default/roles/test/acme/"} + entAcmePrefixes := getEntAcmePrefixes() + for _, acmePrefix := range append(ossAcmePrefixes, entAcmePrefixes...) { + paths[acmePrefix+"directory"] = shouldBeUnauthedReadList + paths[acmePrefix+"new-nonce"] = shouldBeUnauthedReadList + paths[acmePrefix+"new-account"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"revoke-cert"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"new-order"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"orders"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"account/hrKmDYTvicHoHGVN2-3uzZV_BPGdE0W_dNaqYTtYqeo="] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"authorization/29da8c38-7a09-465e-b9a6-3d76802b1afd"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"challenge/29da8c38-7a09-465e-b9a6-3d76802b1afd/http-01"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"order/13b80844-e60d-42d2-b7e9-152a8e834b90"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"order/13b80844-e60d-42d2-b7e9-152a8e834b90/finalize"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"order/13b80844-e60d-42d2-b7e9-152a8e834b90/cert"] = shouldBeUnauthedWriteOnly + + // Make sure this new-eab path is auth'd + paths[acmePrefix+"new-eab"] = shouldBeAuthed + } + + for path, checkerType := range paths { + checker := pathAuthChckerMap[checkerType] + checker(t, client, "pki/"+path, token) + } + + client.SetToken(token) + openAPIResp, err := client.Logical().ReadWithContext(ctx, "sys/internal/specs/openapi") + if err != nil { + t.Fatalf("failed to get openapi data: %v", err) + } + + validatedPath := false + for openapi_path, raw_data := range openAPIResp.Data["paths"].(map[string]interface{}) { + if !strings.HasPrefix(openapi_path, "/pki/") { + t.Logf("Skipping path: %v", openapi_path) + continue + } + + t.Logf("Validating path: %v", openapi_path) + validatedPath = true + // Substitute values in from our testing map. + raw_path := openapi_path[5:] + if strings.Contains(raw_path, "roles/") && strings.Contains(raw_path, "{name}") { + raw_path = strings.ReplaceAll(raw_path, "{name}", "test") + } + if strings.Contains(raw_path, "{role}") { + raw_path = strings.ReplaceAll(raw_path, "{role}", "test") + } + if strings.Contains(raw_path, "ocsp/") && strings.Contains(raw_path, "{req}") { + raw_path = strings.ReplaceAll(raw_path, "{req}", "dGVzdAo=") + } + if strings.Contains(raw_path, "{issuer_ref}") { + raw_path = strings.ReplaceAll(raw_path, "{issuer_ref}", "default") + } + if strings.Contains(raw_path, "{key_ref}") { + raw_path = strings.ReplaceAll(raw_path, "{key_ref}", "default") + } + if strings.Contains(raw_path, "{exported}") { + raw_path = strings.ReplaceAll(raw_path, "{exported}", "internal") + } + if strings.Contains(raw_path, "{serial}") { + raw_path = strings.ReplaceAll(raw_path, "{serial}", serial) + } + if strings.Contains(raw_path, "acme/account/") && strings.Contains(raw_path, "{kid}") { + raw_path = strings.ReplaceAll(raw_path, "{kid}", "hrKmDYTvicHoHGVN2-3uzZV_BPGdE0W_dNaqYTtYqeo=") + } + if strings.Contains(raw_path, "acme/") && strings.Contains(raw_path, "{auth_id}") { + raw_path = strings.ReplaceAll(raw_path, "{auth_id}", "29da8c38-7a09-465e-b9a6-3d76802b1afd") + } + if strings.Contains(raw_path, "acme/") && strings.Contains(raw_path, "{challenge_type}") { + raw_path = strings.ReplaceAll(raw_path, "{challenge_type}", "http-01") + } + if strings.Contains(raw_path, "acme/") && strings.Contains(raw_path, "{order_id}") { + raw_path = strings.ReplaceAll(raw_path, "{order_id}", "13b80844-e60d-42d2-b7e9-152a8e834b90") + } + if strings.Contains(raw_path, "eab") && strings.Contains(raw_path, "{key_id}") { + raw_path = strings.ReplaceAll(raw_path, "{key_id}", eabKid) + } + if strings.Contains(raw_path, "external-policy/") && strings.Contains(raw_path, "{policy}") { + raw_path = strings.ReplaceAll(raw_path, "{policy}", "a-policy") + } + + raw_path = entProperAuthingPathReplacer(raw_path) + + handler, present := paths[raw_path] + if !present { + t.Fatalf("OpenAPI reports PKI mount contains %v -> %v but was not tested to be authed or not authed.", + openapi_path, raw_path) + } + + openapi_data := raw_data.(map[string]interface{}) + hasList := false + rawGetData, hasGet := openapi_data["get"] + if hasGet { + getData := rawGetData.(map[string]interface{}) + getParams, paramsPresent := getData["parameters"].(map[string]interface{}) + if getParams != nil && paramsPresent { + if _, hasList = getParams["list"]; hasList { + // LIST is exclusive from GET on the same endpoint usually. + hasGet = false + } + } + } + _, hasPost := openapi_data["post"] + _, hasDelete := openapi_data["delete"] + + if handler == shouldBeUnauthedReadList { + if hasPost || hasDelete { + t.Fatalf("Unauthed read-only endpoints should not have POST/DELETE capabilities: %v->%v", openapi_path, raw_path) + } + } else if handler == shouldBeUnauthedWriteOnly { + if hasGet || hasList { + t.Fatalf("Unauthed write-only endpoints should not have GET/LIST capabilities: %v->%v", openapi_path, raw_path) + } + } else if handler == shouldBeUnauthedReadWriteOnly { + if hasDelete || hasList { + t.Fatalf("Unauthed read-write-only endpoints should not have DELETE/LIST capabilities: %v->%v", openapi_path, raw_path) + } + } + } + + if !validatedPath { + t.Fatalf("Expected to have validated at least one path.") + } +} + +func TestPatchIssuer(t *testing.T) { + t.Parallel() + + type TestCase struct { + Field string + Before interface{} + Patched interface{} + } + testCases := []TestCase{ + { + Field: "issuer_name", + Before: "root", + Patched: "root-new", + }, + { + Field: "leaf_not_after_behavior", + Before: "err", + Patched: "permit", + }, + { + Field: "usage", + Before: "crl-signing,issuing-certificates,ocsp-signing,read-only", + Patched: "issuing-certificates,read-only", + }, + { + Field: "revocation_signature_algorithm", + Before: "ECDSAWithSHA256", + Patched: "ECDSAWithSHA384", + }, + { + Field: "issuing_certificates", + Before: []string{"http://localhost/v1/pki-1/ca"}, + Patched: []string{"http://localhost/v1/pki/ca"}, + }, + { + Field: "crl_distribution_points", + Before: []string{"http://localhost/v1/pki-1/crl"}, + Patched: []string{"http://localhost/v1/pki/crl"}, + }, + { + Field: "ocsp_servers", + Before: []string{"http://localhost/v1/pki-1/ocsp"}, + Patched: []string{"http://localhost/v1/pki/ocsp"}, + }, + { + Field: "enable_aia_url_templating", + Before: false, + Patched: true, + }, + { + Field: "manual_chain", + Before: []string(nil), + Patched: []string{"self"}, + }, + } + + for index, testCase := range testCases { + t.Logf("index: %v / tc: %v", index, testCase) + + b, s := CreateBackendWithStorage(t) + + // 1. Setup root issuer. + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "Vault Root CA", + "key_type": "ec", + "ttl": "7200h", + "issuer_name": "root", + }) + requireSuccessNonNilResponse(t, resp, err, "failed generating root issuer") + id := string(resp.Data["issuer_id"].(issuing.IssuerID)) + + // 2. Enable Cluster paths + resp, err = CBWrite(b, s, "config/urls", map[string]interface{}{ + "path": "https://localhost/v1/pki", + "aia_path": "http://localhost/v1/pki", + }) + requireSuccessNonNilResponse(t, resp, err, "failed updating AIA config") + + // 3. Add AIA information + resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "issuing_certificates": "http://localhost/v1/pki-1/ca", + "crl_distribution_points": "http://localhost/v1/pki-1/crl", + "ocsp_servers": "http://localhost/v1/pki-1/ocsp", + }) + requireSuccessNonNilResponse(t, resp, err, "failed setting up issuer") + + // 4. Read the issuer before. + resp, err = CBRead(b, s, "issuer/default") + requireSuccessNonNilResponse(t, resp, err, "failed reading root issuer before") + require.Equal(t, testCase.Before, resp.Data[testCase.Field], "bad expectations") + + // 5. Perform modification. + resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + testCase.Field: testCase.Patched, + }) + requireSuccessNonNilResponse(t, resp, err, "failed patching root issuer") + + if testCase.Field != "manual_chain" { + require.Equal(t, testCase.Patched, resp.Data[testCase.Field], "failed persisting value") + } else { + // self->id + require.Equal(t, []string{id}, resp.Data[testCase.Field], "failed persisting value") + } + + // 6. Ensure it stuck + resp, err = CBRead(b, s, "issuer/default") + requireSuccessNonNilResponse(t, resp, err, "failed reading root issuer after") + + if testCase.Field != "manual_chain" { + require.Equal(t, testCase.Patched, resp.Data[testCase.Field]) + } else { + // self->id + require.Equal(t, []string{id}, resp.Data[testCase.Field], "failed persisting value") + } + } +} + +func TestGenerateRootCAWithAIA(t *testing.T) { + // Generate a root CA at /pki-root + b_root, s_root := CreateBackendWithStorage(t) + + // Setup templated AIA information + _, err := CBWrite(b_root, s_root, "config/cluster", map[string]interface{}{ + "path": "https://localhost:8200", + "aia_path": "https://localhost:8200", + }) + require.NoError(t, err, "failed to write AIA settings") + + _, err = CBWrite(b_root, s_root, "config/urls", map[string]interface{}{ + "crl_distribution_points": "{{cluster_path}}/issuer/{{issuer_id}}/crl/der", + "issuing_certificates": "{{cluster_aia_path}}/issuer/{{issuer_id}}/der", + "ocsp_servers": "{{cluster_path}}/ocsp", + "enable_templating": true, + }) + require.NoError(t, err, "failed to write AIA settings") + + // Write a root issuer, this should succeed. + resp, err := CBWrite(b_root, s_root, "root/generate/exported", map[string]interface{}{ + "common_name": "root myvault.com", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err, "expected root generation to succeed") +} + var ( initTest sync.Once rsaCAKey string diff --git a/builtin/logical/pki/ca_test.go b/builtin/logical/pki/ca_test.go index 7dbffef24774..4517604f8a0d 100644 --- a/builtin/logical/pki/ca_test.go +++ b/builtin/logical/pki/ca_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki diff --git a/builtin/logical/pki/ca_util.go b/builtin/logical/pki/ca_util.go index 129c6a923d1b..2d2478ab8e46 100644 --- a/builtin/logical/pki/ca_util.go +++ b/builtin/logical/pki/ca_util.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -13,14 +13,15 @@ import ( "io" "time" - "golang.org/x/crypto/ed25519" - + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/managed_key" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" + "golang.org/x/crypto/ed25519" ) -func getGenerationParams(sc *storageContext, data *framework.FieldData) (exported bool, format string, role *roleEntry, errorResp *logical.Response) { +func getGenerationParams(sc *storageContext, data *framework.FieldData) (exported bool, format string, role *issuing.RoleEntry, errorResp *logical.Response) { exportedStr := data.Get("exported").(string) switch exportedStr { case "exported": @@ -47,7 +48,7 @@ func getGenerationParams(sc *storageContext, data *framework.FieldData) (exporte return } - role = &roleEntry{ + role = &issuing.RoleEntry{ TTL: time.Duration(data.Get("ttl").(int)) * time.Second, KeyType: keyType, KeyBits: keyBits, @@ -71,6 +72,7 @@ func getGenerationParams(sc *storageContext, data *framework.FieldData) (exporte PostalCode: data.Get("postal_code").([]string), NotBeforeDuration: time.Duration(data.Get("not_before_duration").(int)) * time.Second, CNValidations: []string{"disabled"}, + KeyUsage: data.Get("key_usage").([]string), } *role.AllowWildcardCertificates = true @@ -83,14 +85,13 @@ func getGenerationParams(sc *storageContext, data *framework.FieldData) (exporte func generateCABundle(sc *storageContext, input *inputBundle, data *certutil.CreationBundle, randomSource io.Reader) (*certutil.ParsedCertBundle, error) { ctx := sc.Context - b := sc.Backend if kmsRequested(input) { keyId, err := getManagedKeyId(input.apiData) if err != nil { return nil, err } - return generateManagedKeyCABundle(ctx, b, keyId, data, randomSource) + return managed_key.GenerateManagedKeyCABundle(ctx, sc.GetPkiManagedView(), keyId, data, randomSource) } if existingKeyRequested(input) { @@ -104,12 +105,12 @@ func generateCABundle(sc *storageContext, input *inputBundle, data *certutil.Cre return nil, err } - if keyEntry.isManagedPrivateKey() { - keyId, err := keyEntry.getManagedKeyUUID() + if keyEntry.IsManagedPrivateKey() { + keyId, err := issuing.GetManagedKeyUUID(keyEntry) if err != nil { return nil, err } - return generateManagedKeyCABundle(ctx, b, keyId, data, randomSource) + return managed_key.GenerateManagedKeyCABundle(ctx, sc.GetPkiManagedView(), keyId, data, randomSource) } return certutil.CreateCertificateWithKeyGenerator(data, randomSource, existingKeyGeneratorFromBytes(keyEntry)) @@ -120,7 +121,6 @@ func generateCABundle(sc *storageContext, input *inputBundle, data *certutil.Cre func generateCSRBundle(sc *storageContext, input *inputBundle, data *certutil.CreationBundle, addBasicConstraints bool, randomSource io.Reader) (*certutil.ParsedCSRBundle, error) { ctx := sc.Context - b := sc.Backend if kmsRequested(input) { keyId, err := getManagedKeyId(input.apiData) @@ -128,7 +128,7 @@ func generateCSRBundle(sc *storageContext, input *inputBundle, data *certutil.Cr return nil, err } - return generateManagedKeyCSRBundle(ctx, b, keyId, data, addBasicConstraints, randomSource) + return managed_key.GenerateManagedKeyCSRBundle(ctx, sc.GetPkiManagedView(), keyId, data, addBasicConstraints, randomSource) } if existingKeyRequested(input) { @@ -142,12 +142,12 @@ func generateCSRBundle(sc *storageContext, input *inputBundle, data *certutil.Cr return nil, err } - if key.isManagedPrivateKey() { - keyId, err := key.getManagedKeyUUID() + if key.IsManagedPrivateKey() { + keyId, err := issuing.GetManagedKeyUUID(key) if err != nil { return nil, err } - return generateManagedKeyCSRBundle(ctx, b, keyId, data, addBasicConstraints, randomSource) + return managed_key.GenerateManagedKeyCSRBundle(ctx, sc.GetPkiManagedView(), keyId, data, addBasicConstraints, randomSource) } return certutil.CreateCSRWithKeyGenerator(data, addBasicConstraints, randomSource, existingKeyGeneratorFromBytes(key)) @@ -156,11 +156,8 @@ func generateCSRBundle(sc *storageContext, input *inputBundle, data *certutil.Cr return certutil.CreateCSRWithRandomSource(data, addBasicConstraints, randomSource) } -func parseCABundle(ctx context.Context, b *backend, bundle *certutil.CertBundle) (*certutil.ParsedCertBundle, error) { - if bundle.PrivateKeyType == certutil.ManagedPrivateKey { - return parseManagedKeyCABundle(ctx, b, bundle) - } - return bundle.ToParsedCertBundle() +func parseCABundle(ctx context.Context, mkv managed_key.PkiManagedKeyView, bundle *certutil.CertBundle) (*certutil.ParsedCertBundle, error) { + return issuing.ParseCABundle(ctx, mkv, bundle) } func (sc *storageContext) getKeyTypeAndBitsForRole(data *framework.FieldData) (string, int, error) { @@ -192,7 +189,7 @@ func (sc *storageContext) getKeyTypeAndBitsForRole(data *framework.FieldData) (s return "", 0, errors.New("unable to determine managed key id: " + err.Error()) } - pubKeyManagedKey, err := getManagedKeyPublicKey(sc.Context, sc.Backend, keyId) + pubKeyManagedKey, err := managed_key.GetManagedKeyPublicKey(sc.Context, sc.GetPkiManagedView(), keyId) if err != nil { return "", 0, errors.New("failed to lookup public key from managed key: " + err.Error()) } @@ -224,7 +221,7 @@ func (sc *storageContext) getExistingPublicKey(data *framework.FieldData) (crypt if err != nil { return nil, err } - return getPublicKey(sc.Context, sc.Backend, key) + return getPublicKey(sc.Context, sc.GetPkiManagedView(), key) } func getKeyTypeAndBitsFromPublicKeyForRole(pubKey crypto.PublicKey) (certutil.PrivateKeyType, int, error) { @@ -237,7 +234,7 @@ func getKeyTypeAndBitsFromPublicKeyForRole(pubKey crypto.PublicKey) (certutil.Pr keyBits = certutil.GetPublicKeySize(pubKey) case *ecdsa.PublicKey: keyType = certutil.ECPrivateKey - case *ed25519.PublicKey: + case ed25519.PublicKey: keyType = certutil.Ed25519PrivateKey default: return certutil.UnknownPrivateKey, 0, fmt.Errorf("unsupported public key: %#v", pubKey) @@ -245,7 +242,7 @@ func getKeyTypeAndBitsFromPublicKeyForRole(pubKey crypto.PublicKey) (certutil.Pr return keyType, keyBits, nil } -func (sc *storageContext) getExistingKeyFromRef(keyRef string) (*keyEntry, error) { +func (sc *storageContext) getExistingKeyFromRef(keyRef string) (*issuing.KeyEntry, error) { keyId, err := sc.resolveKeyReference(keyRef) if err != nil { return nil, err @@ -253,7 +250,7 @@ func (sc *storageContext) getExistingKeyFromRef(keyRef string) (*keyEntry, error return sc.fetchKeyById(keyId) } -func existingKeyGeneratorFromBytes(key *keyEntry) certutil.KeyGenerator { +func existingKeyGeneratorFromBytes(key *issuing.KeyEntry) certutil.KeyGenerator { return func(_ string, _ int, container certutil.ParsedPrivateKeyContainer, _ io.Reader) error { signer, _, pemBytes, err := getSignerFromKeyEntryBytes(key) if err != nil { diff --git a/builtin/logical/pki/ca_util_test.go b/builtin/logical/pki/ca_util_test.go new file mode 100644 index 000000000000..d4ef64e68fe1 --- /dev/null +++ b/builtin/logical/pki/ca_util_test.go @@ -0,0 +1,82 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "testing" + + "github.com/hashicorp/vault/sdk/helper/certutil" +) + +func TestGetKeyTypeAndBitsFromPublicKeyForRole(t *testing.T) { + rsaKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatalf("error generating rsa key: %s", err) + } + + ecdsaKey, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + t.Fatalf("error generating ecdsa key: %s", err) + } + + publicKey, _, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + t.Fatalf("error generating ed25519 key: %s", err) + } + + testCases := map[string]struct { + publicKey crypto.PublicKey + expectedKeyType certutil.PrivateKeyType + expectedKeyBits int + expectError bool + }{ + "rsa": { + publicKey: rsaKey.Public(), + expectedKeyType: certutil.RSAPrivateKey, + expectedKeyBits: 2048, + }, + "ecdsa": { + publicKey: ecdsaKey.Public(), + expectedKeyType: certutil.ECPrivateKey, + expectedKeyBits: 0, + }, + "ed25519": { + publicKey: publicKey, + expectedKeyType: certutil.Ed25519PrivateKey, + expectedKeyBits: 0, + }, + "bad key type": { + publicKey: []byte{}, + expectedKeyType: certutil.UnknownPrivateKey, + expectedKeyBits: 0, + expectError: true, + }, + } + + for name, tt := range testCases { + t.Run(name, func(t *testing.T) { + keyType, keyBits, err := getKeyTypeAndBitsFromPublicKeyForRole(tt.publicKey) + if err != nil && !tt.expectError { + t.Fatalf("unexpected error: %s", err) + } + if err == nil && tt.expectError { + t.Fatal("expected error, got nil") + } + + if keyType != tt.expectedKeyType { + t.Fatalf("key type mismatch: expected %s, got %s", tt.expectedKeyType, keyType) + } + + if keyBits != tt.expectedKeyBits { + t.Fatalf("key bits mismatch: expected %d, got %d", tt.expectedKeyBits, keyBits) + } + }) + } +} diff --git a/builtin/logical/pki/cert_util.go b/builtin/logical/pki/cert_util.go index 435a89dc70fa..5111699ab8c5 100644 --- a/builtin/logical/pki/cert_util.go +++ b/builtin/logical/pki/cert_util.go @@ -1,20 +1,15 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki import ( "crypto" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/rsa" "crypto/x509" "crypto/x509/pkix" "encoding/asn1" "encoding/base64" - "encoding/hex" "encoding/pem" - "errors" "fmt" "io" "math/big" @@ -25,19 +20,19 @@ import ( "strings" "time" - "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/parsing" + "github.com/hashicorp/vault/builtin/logical/pki/pki_backend" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" - "github.com/ryanuber/go-glob" "golang.org/x/crypto/cryptobyte" cbbasn1 "golang.org/x/crypto/cryptobyte/asn1" - "golang.org/x/net/idna" ) type inputBundle struct { - role *roleEntry + role *issuing.RoleEntry req *logical.Request apiData *framework.FieldData } @@ -68,10 +63,60 @@ var ( middleWildRegex = labelRegex + `\*` + labelRegex leftWildLabelRegex = regexp.MustCompile(`^(` + allWildRegex + `|` + startWildRegex + `|` + endWildRegex + `|` + middleWildRegex + `)$`) - // OIDs for X.509 certificate extensions used below. - oidExtensionSubjectAltName = []int{2, 5, 29, 17} + // Cloned from https://github.com/golang/go/blob/82c713feb05da594567631972082af2fcba0ee4f/src/crypto/x509/x509.go#L327-L379 + oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2} + oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} + oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + oidSignatureRSAPSS = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10} + oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2} + oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} + oidSignatureEd25519 = asn1.ObjectIdentifier{1, 3, 101, 112} + oidISOSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 29} + + signatureAlgorithmDetails = []struct { + algo x509.SignatureAlgorithm + name string + oid asn1.ObjectIdentifier + pubKeyAlgo x509.PublicKeyAlgorithm + hash crypto.Hash + }{ + {x509.MD2WithRSA, "MD2-RSA", oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */}, + {x509.MD5WithRSA, "MD5-RSA", oidSignatureMD5WithRSA, x509.RSA, crypto.MD5}, + {x509.SHA1WithRSA, "SHA1-RSA", oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1}, + {x509.SHA1WithRSA, "SHA1-RSA", oidISOSignatureSHA1WithRSA, x509.RSA, crypto.SHA1}, + {x509.SHA256WithRSA, "SHA256-RSA", oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256}, + {x509.SHA384WithRSA, "SHA384-RSA", oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384}, + {x509.SHA512WithRSA, "SHA512-RSA", oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512}, + {x509.SHA256WithRSAPSS, "SHA256-RSAPSS", oidSignatureRSAPSS, x509.RSA, crypto.SHA256}, + {x509.SHA384WithRSAPSS, "SHA384-RSAPSS", oidSignatureRSAPSS, x509.RSA, crypto.SHA384}, + {x509.SHA512WithRSAPSS, "SHA512-RSAPSS", oidSignatureRSAPSS, x509.RSA, crypto.SHA512}, + {x509.DSAWithSHA1, "DSA-SHA1", oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1}, + {x509.DSAWithSHA256, "DSA-SHA256", oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256}, + {x509.ECDSAWithSHA1, "ECDSA-SHA1", oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1}, + {x509.ECDSAWithSHA256, "ECDSA-SHA256", oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256}, + {x509.ECDSAWithSHA384, "ECDSA-SHA384", oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384}, + {x509.ECDSAWithSHA512, "ECDSA-SHA512", oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512}, + {x509.PureEd25519, "Ed25519", oidSignatureEd25519, x509.Ed25519, crypto.Hash(0) /* no pre-hashing */}, + } ) +func doesPublicKeyAlgoMatchSignatureAlgo(pubKey x509.PublicKeyAlgorithm, algo x509.SignatureAlgorithm) bool { + for _, detail := range signatureAlgorithmDetails { + if detail.algo == algo { + return pubKey == detail.pubKeyAlgo + } + } + + return false +} + func getFormat(data *framework.FieldData) string { format := data.Get("format").(string) switch format { @@ -87,87 +132,56 @@ func getFormat(data *framework.FieldData) string { // fetchCAInfo will fetch the CA info, will return an error if no ca info exists, this does NOT support // loading using the legacyBundleShimID and should be used with care. This should be called only once // within the request path otherwise you run the risk of a race condition with the issuer migration on perf-secondaries. -func (sc *storageContext) fetchCAInfo(issuerRef string, usage issuerUsage) (*certutil.CAInfoBundle, error) { - var issuerId issuerID +func (sc *storageContext) fetchCAInfo(issuerRef string, usage issuing.IssuerUsage) (*certutil.CAInfoBundle, error) { + bundle, _, err := sc.fetchCAInfoWithIssuer(issuerRef, usage) + return bundle, err +} + +func (sc *storageContext) fetchCAInfoWithIssuer(issuerRef string, usage issuing.IssuerUsage) (*certutil.CAInfoBundle, issuing.IssuerID, error) { + var issuerId issuing.IssuerID - if sc.Backend.useLegacyBundleCaStorage() { + if sc.UseLegacyBundleCaStorage() { // We have not completed the migration so attempt to load the bundle from the legacy location - sc.Backend.Logger().Info("Using legacy CA bundle as PKI migration has not completed.") + sc.Logger().Info("Using legacy CA bundle as PKI migration has not completed.") issuerId = legacyBundleShimID } else { var err error issuerId, err = sc.resolveIssuerReference(issuerRef) if err != nil { // Usually a bad label from the user or mis-configured default. - return nil, errutil.UserError{Err: err.Error()} - } - } - - return sc.fetchCAInfoByIssuerId(issuerId, usage) -} - -// fetchCAInfoByIssuerId will fetch the CA info, will return an error if no ca info exists for the given issuerId. -// This does support the loading using the legacyBundleShimID -func (sc *storageContext) fetchCAInfoByIssuerId(issuerId issuerID, usage issuerUsage) (*certutil.CAInfoBundle, error) { - entry, bundle, err := sc.fetchCertBundleByIssuerId(issuerId, true) - if err != nil { - switch err.(type) { - case errutil.UserError: - return nil, err - case errutil.InternalError: - return nil, err - default: - return nil, errutil.InternalError{Err: fmt.Sprintf("error fetching CA info: %v", err)} + return nil, issuing.IssuerRefNotFound, errutil.UserError{Err: err.Error()} } } - if err := entry.EnsureUsage(usage); err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("error while attempting to use issuer %v: %v", issuerId, err)} - } - - parsedBundle, err := parseCABundle(sc.Context, sc.Backend, bundle) + bundle, err := sc.fetchCAInfoByIssuerId(issuerId, usage) if err != nil { - return nil, errutil.InternalError{Err: err.Error()} + return nil, issuing.IssuerRefNotFound, err } - if parsedBundle.Certificate == nil { - return nil, errutil.InternalError{Err: "stored CA information not able to be parsed"} - } - if parsedBundle.PrivateKey == nil { - return nil, errutil.UserError{Err: fmt.Sprintf("unable to fetch corresponding key for issuer %v; unable to use this issuer for signing", issuerId)} - } - - caInfo := &certutil.CAInfoBundle{ - ParsedCertBundle: *parsedBundle, - URLs: nil, - LeafNotAfterBehavior: entry.LeafNotAfterBehavior, - RevocationSigAlg: entry.RevocationSigAlg, - } - - entries, err := entry.GetAIAURLs(sc) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch AIA URL information: %v", err)} - } - caInfo.URLs = entries + return bundle, issuerId, nil +} - return caInfo, nil +// fetchCAInfoByIssuerId will fetch the CA info, will return an error if no ca info exists for the given issuerId. +// This does support the loading using the legacyBundleShimID +func (sc *storageContext) fetchCAInfoByIssuerId(issuerId issuing.IssuerID, usage issuing.IssuerUsage) (*certutil.CAInfoBundle, error) { + return issuing.FetchCAInfoByIssuerId(sc.Context, sc.Storage, sc.GetPkiManagedView(), issuerId, usage) } func fetchCertBySerialBigInt(sc *storageContext, prefix string, serial *big.Int) (*logical.StorageEntry, error) { return fetchCertBySerial(sc, prefix, serialFromBigInt(serial)) } -// Allows fetching certificates from the backend; it handles the slightly +// fetchCertBySerial allows fetching certificates from the backend; it handles the slightly // separate pathing for CRL, and revoked certificates. // // Support for fetching CA certificates was removed, due to the new issuers // changes. -func fetchCertBySerial(sc *storageContext, prefix, serial string) (*logical.StorageEntry, error) { +func fetchCertBySerial(sc pki_backend.StorageContext, prefix, serial string) (*logical.StorageEntry, error) { var path, legacyPath string var err error var certEntry *logical.StorageEntry - hyphenSerial := normalizeSerial(serial) + hyphenSerial := parsing.NormalizeSerialForStorage(serial) colonSerial := strings.ReplaceAll(strings.ToLower(serial), "-", ":") switch { @@ -176,30 +190,38 @@ func fetchCertBySerial(sc *storageContext, prefix, serial string) (*logical.Stor case strings.HasPrefix(prefix, "revoked/"): legacyPath = "revoked/" + colonSerial path = "revoked/" + hyphenSerial - case serial == legacyCRLPath || serial == deltaCRLPath || serial == unifiedCRLPath || serial == unifiedDeltaCRLPath: - if err = sc.Backend.crlBuilder.rebuildIfForced(sc); err != nil { + case serial == issuing.LegacyCRLPath || serial == issuing.DeltaCRLPath || serial == issuing.UnifiedCRLPath || serial == issuing.UnifiedDeltaCRLPath: + warnings, err := sc.CrlBuilder().RebuildIfForced(sc) + if err != nil { return nil, err } + if len(warnings) > 0 { + msg := "During rebuild of CRL for cert fetch, got the following warnings:" + for index, warning := range warnings { + msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) + } + sc.Logger().Warn(msg) + } - unified := serial == unifiedCRLPath || serial == unifiedDeltaCRLPath - path, err = sc.resolveIssuerCRLPath(defaultRef, unified) + unified := serial == issuing.UnifiedCRLPath || serial == issuing.UnifiedDeltaCRLPath + path, err = issuing.ResolveIssuerCRLPath(sc.GetContext(), sc.GetStorage(), sc.UseLegacyBundleCaStorage(), issuing.DefaultRef, unified) if err != nil { return nil, err } - if serial == deltaCRLPath || serial == unifiedDeltaCRLPath { - if sc.Backend.useLegacyBundleCaStorage() { + if serial == issuing.DeltaCRLPath || serial == issuing.UnifiedDeltaCRLPath { + if sc.UseLegacyBundleCaStorage() { return nil, fmt.Errorf("refusing to serve delta CRL with legacy CA bundle") } - path += deltaCRLPathSuffix + path += issuing.DeltaCRLPathSuffix } default: - legacyPath = "certs/" + colonSerial - path = "certs/" + hyphenSerial + legacyPath = issuing.PathCerts + colonSerial + path = issuing.PathCerts + hyphenSerial } - certEntry, err = sc.Storage.Get(sc.Context, path) + certEntry, err = sc.GetStorage().Get(sc.GetContext(), path) if err != nil { return nil, errutil.InternalError{Err: fmt.Sprintf("error fetching certificate %s: %s", serial, err)} } @@ -219,7 +241,7 @@ func fetchCertBySerial(sc *storageContext, prefix, serial string) (*logical.Stor // always manifest on Windows, and thus the initial check for a revoked // cert fails would return an error when the cert isn't revoked, preventing // the happy path from working. - certEntry, _ = sc.Storage.Get(sc.Context, legacyPath) + certEntry, _ = sc.GetStorage().Get(sc.GetContext(), legacyPath) if certEntry == nil { return nil, nil } @@ -229,17 +251,18 @@ func fetchCertBySerial(sc *storageContext, prefix, serial string) (*logical.Stor // Update old-style paths to new-style paths certEntry.Key = path - certsCounted := sc.Backend.certsCounted.Load() - if err = sc.Storage.Put(sc.Context, certEntry); err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("error saving certificate with serial %s to new location", serial)} + certCounter := sc.GetCertificateCounter() + certsCounted := certCounter.IsInitialized() + if err = sc.GetStorage().Put(sc.GetContext(), certEntry); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error saving certificate with serial %s to new location: %s", serial, err)} } - if err = sc.Storage.Delete(sc.Context, legacyPath); err != nil { + if err = sc.GetStorage().Delete(sc.GetContext(), legacyPath); err != nil { // If we fail here, we have an extra (copy) of a cert in storage, add to metrics: switch { case strings.HasPrefix(prefix, "revoked/"): - sc.Backend.ifCountEnabledIncrementTotalRevokedCertificatesCount(certsCounted, path) + certCounter.IncrementTotalRevokedCertificatesCount(certsCounted, path) default: - sc.Backend.ifCountEnabledIncrementTotalCertificatesCount(certsCounted, path) + certCounter.IncrementTotalCertificatesCount(certsCounted, path) } return nil, errutil.InternalError{Err: fmt.Sprintf("error deleting certificate with serial %s from old location", serial)} } @@ -249,339 +272,32 @@ func fetchCertBySerial(sc *storageContext, prefix, serial string) (*logical.Stor // Given a URI SAN, verify that it is allowed. func validateURISAN(b *backend, data *inputBundle, uri string) bool { - valid := false - for _, allowed := range data.role.AllowedURISANs { - if data.role.AllowedURISANsTemplate { - isTemplate, _ := framework.ValidateIdentityTemplate(allowed) - if isTemplate && data.req.EntityID != "" { - tmpAllowed, err := framework.PopulateIdentityTemplate(allowed, data.req.EntityID, b.System()) - if err != nil { - continue - } - allowed = tmpAllowed - } - } - validURI := glob.Glob(allowed, uri) - if validURI { - valid = true - break - } - } - return valid + entityInfo := issuing.NewEntityInfoFromReq(data.req) + return issuing.ValidateURISAN(b.System(), data.role, entityInfo, uri) } // Validates a given common name, ensuring it's either an email or a hostname // after validating it according to the role parameters, or disables // validation altogether. func validateCommonName(b *backend, data *inputBundle, name string) string { - isDisabled := len(data.role.CNValidations) == 1 && data.role.CNValidations[0] == "disabled" - if isDisabled { - return "" - } - - if validateNames(b, data, []string{name}) != "" { - return name - } - - // Validations weren't disabled, but the role lacked CN Validations, so - // don't restrict types. This case is hit in certain existing tests. - if len(data.role.CNValidations) == 0 { - return "" - } + entityInfo := issuing.NewEntityInfoFromReq(data.req) + return issuing.ValidateCommonName(b.System(), data.role, entityInfo, name) +} - // If there's an at in the data, ensure email type validation is allowed. - // Otherwise, ensure hostname is allowed. - if strings.Contains(name, "@") { - var allowsEmails bool - for _, validation := range data.role.CNValidations { - if validation == "email" { - allowsEmails = true - break - } - } - if !allowsEmails { - return name - } - } else { - var allowsHostnames bool - for _, validation := range data.role.CNValidations { - if validation == "hostname" { - allowsHostnames = true - break - } - } - if !allowsHostnames { - return name - } - } +func isWildcardDomain(name string) bool { + return issuing.IsWildcardDomain(name) +} - return "" +func validateWildcardDomain(name string) (string, string, error) { + return issuing.ValidateWildcardDomain(name) } // Given a set of requested names for a certificate, verifies that all of them // match the various toggles set in the role for controlling issuance. // If one does not pass, it is returned in the string argument. func validateNames(b *backend, data *inputBundle, names []string) string { - for _, name := range names { - // Previously, reducedName was called sanitizedName but this made - // little sense under the previous interpretation of wildcards, - // leading to two bugs in this implementation. We presently call it - // "reduced" to indicate that it is still untrusted input (potentially - // different from the bare Common Name entry we're validating), it - // might have been modified such as by the removal of wildcard labels - // or the email prefix. - reducedName := name - emailDomain := reducedName - wildcardLabel := "" - isEmail := false - isWildcard := false - - // If it has an @, assume it is an email address and separate out the - // user from the hostname portion so that we can act on the hostname. - // Note that this matches behavior from the alt_names parameter. If it - // ends up being problematic for users, I guess that could be separated - // into dns_names and email_names in the future to be explicit, but I - // don't think this is likely. - if strings.Contains(reducedName, "@") { - splitEmail := strings.Split(reducedName, "@") - if len(splitEmail) != 2 { - return name - } - reducedName = splitEmail[1] - emailDomain = splitEmail[1] - isEmail = true - } - - // Per RFC 6125 Section 6.4.3, and explicitly contradicting the earlier - // RFC 2818 which no modern client will validate against, there are two - // main types of wildcards, each with a single wildcard specifier (`*`, - // functionally different from the `*` used as a glob from the - // AllowGlobDomains parsing path) in the left-most label: - // - // 1. Entire label is a single wildcard character (most common and - // well-supported), - // 2. Part of the label contains a single wildcard character (e.g. per - /// RFC 6125: baz*.example.net, *baz.example.net, or b*z.example.net). - // - // We permit issuance of both but not the older RFC 2818 style under - // the new AllowWildcardCertificates option. However, anything with a - // glob character is technically a wildcard. - if strings.Contains(reducedName, "*") { - // Regardless of later rejections below, this common name contains - // a wildcard character and is thus technically a wildcard name. - isWildcard = true - - // Additionally, if AllowWildcardCertificates is explicitly - // forbidden, it takes precedence over AllowAnyName, thus we should - // reject the name now. - // - // We expect the role to have been correctly migrated but guard for - // safety. - if data.role.AllowWildcardCertificates != nil && !*data.role.AllowWildcardCertificates { - return name - } - - if strings.Count(reducedName, "*") > 1 { - // As mentioned above, only one wildcard character is permitted - // under RFC 6125 semantics. - return name - } - - // Split the Common Name into two parts: a left-most label and the - // remaining segments (if present). - splitLabels := strings.SplitN(reducedName, ".", 2) - if len(splitLabels) != 2 { - // We've been given a single-part domain name that consists - // entirely of a wildcard. This is a little tricky to handle, - // but EnforceHostnames validates both the wildcard-containing - // label and the reduced name, but _only_ the latter if it is - // non-empty. This allows us to still validate the only label - // component matches hostname expectations still. - wildcardLabel = splitLabels[0] - reducedName = "" - } else { - // We have a (at least) two label domain name. But before we can - // update our names, we need to validate the wildcard ended up - // in the segment we expected it to. While this is (kinda) - // validated under EnforceHostnames's leftWildLabelRegex, we - // still need to validate it in the non-enforced mode. - // - // By validated assumption above, we know there's strictly one - // wildcard in this domain so we only need to check the wildcard - // label or the reduced name (as one is equivalent to the other). - // Because we later assume reducedName _lacks_ wildcard segments, - // we validate that. - wildcardLabel = splitLabels[0] - reducedName = splitLabels[1] - if strings.Contains(reducedName, "*") { - return name - } - } - } - - // Email addresses using wildcard domain names do not make sense - // in a Common Name field. - if isEmail && isWildcard { - return name - } - - // AllowAnyName is checked after this because EnforceHostnames still - // applies when allowing any name. Also, we check the reduced name to - // ensure that we are not either checking a full email address or a - // wildcard prefix. - if data.role.EnforceHostnames { - if reducedName != "" { - // See note above about splitLabels having only one segment - // and setting reducedName to the empty string. - p := idna.New( - idna.StrictDomainName(true), - idna.VerifyDNSLength(true), - ) - converted, err := p.ToASCII(reducedName) - if err != nil { - return name - } - if !hostnameRegex.MatchString(converted) { - return name - } - } - - // When a wildcard is specified, we additionally need to validate - // the label with the wildcard is correctly formed. - if isWildcard && !leftWildLabelRegex.MatchString(wildcardLabel) { - return name - } - } - - // Self-explanatory, but validations from EnforceHostnames and - // AllowWildcardCertificates take precedence. - if data.role.AllowAnyName { - continue - } - - // The following blocks all work the same basic way: - // 1) If a role allows a certain class of base (localhost, token - // display name, role-configured domains), perform further tests - // - // 2) If there is a perfect match on either the sanitized name or it's an - // email address with a perfect match on the hostname portion, allow it - // - // 3) If subdomains are allowed, we check based on the sanitized name; - // note that if not a wildcard, will be equivalent to the email domain - // for email checks, and we already checked above for both a wildcard - // and email address being present in the same name - // 3a) First we check for a non-wildcard subdomain, as in . - // 3b) Then we check if it's a wildcard and the base domain is a match - // - // Variances are noted in-line - - if data.role.AllowLocalhost { - if reducedName == "localhost" || - reducedName == "localdomain" || - (isEmail && emailDomain == "localhost") || - (isEmail && emailDomain == "localdomain") { - continue - } - - if data.role.AllowSubdomains { - // It is possible, if unlikely, to have a subdomain of "localhost" - if strings.HasSuffix(reducedName, ".localhost") || - (isWildcard && reducedName == "localhost") { - continue - } - - // A subdomain of "localdomain" is also not entirely uncommon - if strings.HasSuffix(reducedName, ".localdomain") || - (isWildcard && reducedName == "localdomain") { - continue - } - } - } - - if data.role.AllowTokenDisplayName { - if name == data.req.DisplayName { - continue - } - - if data.role.AllowSubdomains { - if isEmail { - // If it's an email address, we need to parse the token - // display name in order to do a proper comparison of the - // subdomain - if strings.Contains(data.req.DisplayName, "@") { - splitDisplay := strings.Split(data.req.DisplayName, "@") - if len(splitDisplay) == 2 { - // Compare the sanitized name against the hostname - // portion of the email address in the broken - // display name - if strings.HasSuffix(reducedName, "."+splitDisplay[1]) { - continue - } - } - } - } - - if strings.HasSuffix(reducedName, "."+data.req.DisplayName) || - (isWildcard && reducedName == data.req.DisplayName) { - continue - } - } - } - - if len(data.role.AllowedDomains) > 0 { - valid := false - for _, currDomain := range data.role.AllowedDomains { - // If there is, say, a trailing comma, ignore it - if currDomain == "" { - continue - } - - if data.role.AllowedDomainsTemplate { - isTemplate, _ := framework.ValidateIdentityTemplate(currDomain) - if isTemplate && data.req.EntityID != "" { - tmpCurrDomain, err := framework.PopulateIdentityTemplate(currDomain, data.req.EntityID, b.System()) - if err != nil { - continue - } - - currDomain = tmpCurrDomain - } - } - - // First, allow an exact match of the base domain if that role flag - // is enabled - if data.role.AllowBareDomains && - (strings.EqualFold(name, currDomain) || - (isEmail && strings.EqualFold(emailDomain, currDomain))) { - valid = true - break - } - - if data.role.AllowSubdomains { - if strings.HasSuffix(reducedName, "."+currDomain) || - (isWildcard && strings.EqualFold(reducedName, currDomain)) { - valid = true - break - } - } - - if data.role.AllowGlobDomains && - strings.Contains(currDomain, "*") && - glob.Glob(currDomain, name) { - valid = true - break - } - } - - if valid { - continue - } - } - - return name - } - - return "" + entityInfo := issuing.NewEntityInfoFromReq(data.req) + return issuing.ValidateNames(b.System(), data.role, entityInfo, names) } // validateOtherSANs checks if the values requested are allowed. If an OID @@ -589,110 +305,20 @@ func validateNames(b *backend, data *inputBundle, names []string) string { // allowed, it will be returned as the second string. Empty strings + error // means everything is okay. func validateOtherSANs(data *inputBundle, requested map[string][]string) (string, string, error) { - if len(data.role.AllowedOtherSANs) == 1 && data.role.AllowedOtherSANs[0] == "*" { - // Anything is allowed - return "", "", nil - } - - allowed, err := parseOtherSANs(data.role.AllowedOtherSANs) - if err != nil { - return "", "", fmt.Errorf("error parsing role's allowed SANs: %w", err) - } - for oid, names := range requested { - for _, name := range names { - allowedNames, ok := allowed[oid] - if !ok { - return oid, "", nil - } - - valid := false - for _, allowedName := range allowedNames { - if glob.Glob(allowedName, name) { - valid = true - break - } - } - - if !valid { - return oid, name, nil - } - } - } - - return "", "", nil + return issuing.ValidateOtherSANs(data.role, requested) } func parseOtherSANs(others []string) (map[string][]string, error) { - result := map[string][]string{} - for _, other := range others { - splitOther := strings.SplitN(other, ";", 2) - if len(splitOther) != 2 { - return nil, fmt.Errorf("expected a semicolon in other SAN %q", other) - } - splitType := strings.SplitN(splitOther[1], ":", 2) - if len(splitType) != 2 { - return nil, fmt.Errorf("expected a colon in other SAN %q", other) - } - switch { - case strings.EqualFold(splitType[0], "utf8"): - case strings.EqualFold(splitType[0], "utf-8"): - default: - return nil, fmt.Errorf("only utf8 other SANs are supported; found non-supported type in other SAN %q", other) - } - result[splitOther[0]] = append(result[splitOther[0]], splitType[1]) - } - - return result, nil + return issuing.ParseOtherSANs(others) } // Returns bool stating whether the given UserId is Valid func validateUserId(data *inputBundle, userId string) bool { - allowedList := data.role.AllowedUserIDs - - if len(allowedList) == 0 { - // Nothing is allowed. - return false - } - - if strutil.StrListContainsCaseInsensitive(allowedList, userId) { - return true - } - - for _, rolePattern := range allowedList { - if rolePattern == "" { - continue - } - - if strings.Contains(rolePattern, "*") && glob.Glob(rolePattern, userId) { - return true - } - } - - // No matches. - return false + return issuing.ValidateUserId(data.role, userId) } func validateSerialNumber(data *inputBundle, serialNumber string) string { - valid := false - if len(data.role.AllowedSerialNumbers) > 0 { - for _, currSerialNumber := range data.role.AllowedSerialNumbers { - if currSerialNumber == "" { - continue - } - - if (strings.Contains(currSerialNumber, "*") && - glob.Glob(currSerialNumber, serialNumber)) || - currSerialNumber == serialNumber { - valid = true - break - } - } - } - if !valid { - return serialNumber - } else { - return "" - } + return issuing.ValidateSerialNumber(data.role, serialNumber) } func generateCert(sc *storageContext, @@ -702,7 +328,6 @@ func generateCert(sc *storageContext, randomSource io.Reader) (*certutil.ParsedCertBundle, []string, error, ) { ctx := sc.Context - b := sc.Backend if input.role == nil { return nil, nil, errutil.InternalError{Err: "no role found in data bundle"} @@ -712,7 +337,7 @@ func generateCert(sc *storageContext, return nil, nil, errutil.UserError{Err: "RSA keys < 2048 bits are unsafe and not supported"} } - data, warnings, err := generateCreationBundle(b, input, caSign, nil) + data, warnings, err := generateCreationBundle(sc.System(), input, caSign, nil) if err != nil { return nil, nil, err } @@ -732,9 +357,28 @@ func generateCert(sc *storageContext, return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch AIA URL information: %v", err)} } - uris, err := entries.toURLEntries(sc, issuerID("")) + uris, err := ToURLEntries(sc, issuing.IssuerID(""), entries) if err != nil { - return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse AIA URL information: %v\nUsing templated AIA URL's {{issuer_id}} field when generating root certificates is not supported.", err)} + // When generating root issuers, don't err on missing issuer + // ID; there is little value in including AIA info on a root, + // as this info would point back to itself; though RFC 5280 is + // a touch vague on this point, this seems to be consensus + // from public CAs such as DigiCert Global Root G3, ISRG Root + // X1, and others. + // + // This is a UX bug if we do err here, as it requires AIA + // templating to not include issuer id (a best practice for + // child certs issued from root and intermediate mounts + // however), and setting this before root generation (or, on + // root renewal) could cause problems. + if _, nonEmptyIssuerErr := ToURLEntries(sc, issuing.IssuerID("empty-issuer-id"), entries); nonEmptyIssuerErr != nil { + return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse AIA URL information: %v\nUsing templated AIA URL's {{issuer_id}} field when generating root certificates is not supported.", err)} + } + + uris = &certutil.URLEntries{} + + msg := "When generating root CA, found global AIA configuration with issuer_id template unsuitable for root generation. This AIA configuration has been ignored. To include AIA on this root CA, set the global AIA configuration to not include issuer_id and instead to refer to a static issuer name." + warnings = append(warnings, msg) } data.Params.URLs = uris @@ -758,9 +402,7 @@ func generateCert(sc *storageContext, // N.B.: This is only meant to be used for generating intermediate CAs. // It skips some sanity checks. func generateIntermediateCSR(sc *storageContext, input *inputBundle, randomSource io.Reader) (*certutil.ParsedCSRBundle, []string, error) { - b := sc.Backend - - creation, warnings, err := generateCreationBundle(b, input, nil, nil) + creation, warnings, err := generateCreationBundle(sc.System(), input, nil, nil) if err != nil { return nil, nil, err } @@ -768,6 +410,10 @@ func generateIntermediateCSR(sc *storageContext, input *inputBundle, randomSourc return nil, nil, errutil.InternalError{Err: "nil parameters received from parameter bundle generation"} } + _, exists := input.apiData.GetOk("key_usage") + if !exists { + creation.Params.KeyUsage = 0 + } addBasicConstraints := input.apiData != nil && input.apiData.Get("add_basic_constraints").(bool) parsedBundle, err := generateCSRBundle(sc, input, creation, addBasicConstraints, randomSource) if err != nil { @@ -777,741 +423,155 @@ func generateIntermediateCSR(sc *storageContext, input *inputBundle, randomSourc return parsedBundle, warnings, nil } -func signCert(b *backend, - data *inputBundle, - caSign *certutil.CAInfoBundle, - isCA bool, - useCSRValues bool) (*certutil.ParsedCertBundle, []string, error, -) { - if data.role == nil { - return nil, nil, errutil.InternalError{Err: "no role found in data bundle"} +func NewSignCertInputFromDataFields(data *framework.FieldData, isCA bool, useCSRValues bool) SignCertInputFromDataFields { + certBundle := NewCreationBundleInputFromFieldData(data) + return SignCertInputFromDataFields{ + CreationBundleInputFromFieldData: certBundle, + data: data, + isCA: isCA, + useCSRValues: useCSRValues, } +} + +type SignCertInputFromDataFields struct { + CreationBundleInputFromFieldData + data *framework.FieldData + isCA bool + useCSRValues bool +} - csrString := data.apiData.Get("csr").(string) +var _ issuing.SignCertInput = SignCertInputFromDataFields{} + +func (i SignCertInputFromDataFields) GetCSR() (*x509.CertificateRequest, error) { + csrString := i.data.Get("csr").(string) if csrString == "" { - return nil, nil, errutil.UserError{Err: "\"csr\" is empty"} + return nil, errutil.UserError{Err: "\"csr\" is empty"} } pemBlock, _ := pem.Decode([]byte(csrString)) if pemBlock == nil { - return nil, nil, errutil.UserError{Err: "csr contains no data"} + return nil, errutil.UserError{Err: "csr contains no data"} } csr, err := x509.ParseCertificateRequest(pemBlock.Bytes) if err != nil { - return nil, nil, errutil.UserError{Err: fmt.Sprintf("certificate request could not be parsed: %v", err)} - } - - if csr.PublicKeyAlgorithm == x509.UnknownPublicKeyAlgorithm || csr.PublicKey == nil { - return nil, nil, errutil.UserError{Err: "Refusing to sign CSR with empty PublicKey. This usually means the SubjectPublicKeyInfo field has an OID not recognized by Go, such as 1.2.840.113549.1.1.10 for rsaPSS."} + return nil, errutil.UserError{Err: fmt.Sprintf("certificate request could not be parsed: %v", err)} } - // This switch validates that the CSR key type matches the role and sets - // the value in the actualKeyType/actualKeyBits values. - actualKeyType := "" - actualKeyBits := 0 - - switch data.role.KeyType { - case "rsa": - // Verify that the key matches the role type - if csr.PublicKeyAlgorithm != x509.RSA { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "role requires keys of type %s", - data.role.KeyType)} - } - - pubKey, ok := csr.PublicKey.(*rsa.PublicKey) - if !ok { - return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} - } - - actualKeyType = "rsa" - actualKeyBits = pubKey.N.BitLen() - case "ec": - // Verify that the key matches the role type - if csr.PublicKeyAlgorithm != x509.ECDSA { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "role requires keys of type %s", - data.role.KeyType)} - } - pubKey, ok := csr.PublicKey.(*ecdsa.PublicKey) - if !ok { - return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} - } - - actualKeyType = "ec" - actualKeyBits = pubKey.Params().BitSize - case "ed25519": - // Verify that the key matches the role type - if csr.PublicKeyAlgorithm != x509.Ed25519 { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "role requires keys of type %s", - data.role.KeyType)} - } - - _, ok := csr.PublicKey.(ed25519.PublicKey) - if !ok { - return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} - } - - actualKeyType = "ed25519" - actualKeyBits = 0 - case "any": - // We need to compute the actual key type and key bits, to correctly - // validate minimums and SignatureBits below. - switch csr.PublicKeyAlgorithm { - case x509.RSA: - pubKey, ok := csr.PublicKey.(*rsa.PublicKey) - if !ok { - return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} - } - if pubKey.N.BitLen() < 2048 { - return nil, nil, errutil.UserError{Err: "RSA keys < 2048 bits are unsafe and not supported"} - } + return csr, nil +} - actualKeyType = "rsa" - actualKeyBits = pubKey.N.BitLen() - case x509.ECDSA: - pubKey, ok := csr.PublicKey.(*ecdsa.PublicKey) - if !ok { - return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} - } +func (i SignCertInputFromDataFields) IsCA() bool { + return i.isCA +} - actualKeyType = "ec" - actualKeyBits = pubKey.Params().BitSize - case x509.Ed25519: - _, ok := csr.PublicKey.(ed25519.PublicKey) - if !ok { - return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} - } +func (i SignCertInputFromDataFields) UseCSRValues() bool { + return i.useCSRValues +} - actualKeyType = "ed25519" - actualKeyBits = 0 - default: - return nil, nil, errutil.UserError{Err: "Unknown key type in CSR: " + csr.PublicKeyAlgorithm.String()} - } - default: - return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unsupported key type value: %s", data.role.KeyType)} - } +func (i SignCertInputFromDataFields) GetPermittedDomains() []string { + return i.data.Get("permitted_dns_domains").([]string) +} - // Before validating key lengths, update our KeyBits/SignatureBits based - // on the actual CSR key type. - if data.role.KeyType == "any" { - // We update the value of KeyBits and SignatureBits here (from the - // role), using the specified key type. This allows us to convert - // the default value (0) for SignatureBits and KeyBits to a - // meaningful value. - // - // We ignore the role's original KeyBits value if the KeyType is any - // as legacy (pre-1.10) roles had default values that made sense only - // for RSA keys (key_bits=2048) and the older code paths ignored the role value - // set for KeyBits when KeyType was set to any. This also enforces the - // docs saying when key_type=any, we only enforce our specified minimums - // for signing operations - if data.role.KeyBits, data.role.SignatureBits, err = certutil.ValidateDefaultOrValueKeyTypeSignatureLength( - actualKeyType, 0, data.role.SignatureBits); err != nil { - return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unknown internal error updating default values: %v", err)} - } +func (i SignCertInputFromDataFields) IgnoreCSRSignature() bool { + return false +} - // We're using the KeyBits field as a minimum value below, and P-224 is safe - // and a previously allowed value. However, the above call defaults - // to P-256 as that's a saner default than P-224 (w.r.t. generation), so - // override it here to allow 224 as the smallest size we permit. - if actualKeyType == "ec" { - data.role.KeyBits = 224 - } +func signCert(sysView logical.SystemView, data *inputBundle, caSign *certutil.CAInfoBundle, isCA bool, useCSRValues bool) (*certutil.ParsedCertBundle, []string, error) { + if data.role == nil { + return nil, nil, errutil.InternalError{Err: "no role found in data bundle"} } - // At this point, data.role.KeyBits and data.role.SignatureBits should both - // be non-zero, for RSA and ECDSA keys. Validate the actualKeyBits based on - // the role's values. If the KeyType was any, and KeyBits was set to 0, - // KeyBits should be updated to 2048 unless some other value was chosen - // explicitly. - // - // This validation needs to occur regardless of the role's key type, so - // that we always validate both RSA and ECDSA key sizes. - if actualKeyType == "rsa" { - if actualKeyBits < data.role.KeyBits { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "role requires a minimum of a %d-bit key, but CSR's key is %d bits", - data.role.KeyBits, actualKeyBits)} - } + entityInfo := issuing.NewEntityInfoFromReq(data.req) + signCertInput := NewSignCertInputFromDataFields(data.apiData, isCA, useCSRValues) - if actualKeyBits < 2048 { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "Vault requires a minimum of a 2048-bit key, but CSR's key is %d bits", - actualKeyBits)} - } - } else if actualKeyType == "ec" { - if actualKeyBits < data.role.KeyBits { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "role requires a minimum of a %d-bit key, but CSR's key is %d bits", - data.role.KeyBits, - actualKeyBits)} - } - } + return issuing.SignCert(sysView, data.role, entityInfo, caSign, signCertInput) +} - creation, warnings, err := generateCreationBundle(b, data, caSign, csr) - if err != nil { - return nil, nil, err - } - if creation.Params == nil { - return nil, nil, errutil.InternalError{Err: "nil parameters received from parameter bundle generation"} - } +func getOtherSANsFromX509Extensions(exts []pkix.Extension) ([]certutil.OtherNameUtf8, error) { + return certutil.GetOtherSANsFromX509Extensions(exts) +} - creation.Params.IsCA = isCA - creation.Params.UseCSRValues = useCSRValues +var _ issuing.CreationBundleInput = CreationBundleInputFromFieldData{} - if isCA { - creation.Params.PermittedDNSDomains = data.apiData.Get("permitted_dns_domains").([]string) +func NewCreationBundleInputFromFieldData(data *framework.FieldData) CreationBundleInputFromFieldData { + certNotAfter := NewCertNotAfterInputFromFieldData(data) + return CreationBundleInputFromFieldData{ + CertNotAfterInputFromFieldData: certNotAfter, + data: data, } - - parsedBundle, err := certutil.SignCertificate(creation) - if err != nil { - return nil, nil, err - } - - return parsedBundle, warnings, nil } -// otherNameRaw describes a name related to a certificate which is not in one -// of the standard name formats. RFC 5280, 4.2.1.6: -// -// OtherName ::= SEQUENCE { -// type-id OBJECT IDENTIFIER, -// value [0] EXPLICIT ANY DEFINED BY type-id } -type otherNameRaw struct { - TypeID asn1.ObjectIdentifier - Value asn1.RawValue +type CreationBundleInputFromFieldData struct { + CertNotAfterInputFromFieldData + data *framework.FieldData } -type otherNameUtf8 struct { - oid string - value string +func (cb CreationBundleInputFromFieldData) IgnoreCSRSignature() bool { + return false } -// ExtractUTF8String returns the UTF8 string contained in the Value, or an error -// if none is present. -func (oraw *otherNameRaw) extractUTF8String() (*otherNameUtf8, error) { - svalue := cryptobyte.String(oraw.Value.Bytes) - var outTag cbbasn1.Tag - var val cryptobyte.String - read := svalue.ReadAnyASN1(&val, &outTag) - - if read && outTag == asn1.TagUTF8String { - return &otherNameUtf8{oid: oraw.TypeID.String(), value: string(val)}, nil - } - return nil, fmt.Errorf("no UTF-8 string found in OtherName") +func (cb CreationBundleInputFromFieldData) GetCommonName() string { + return cb.data.Get("common_name").(string) } -func (o otherNameUtf8) String() string { - return fmt.Sprintf("%s;%s:%s", o.oid, "UTF-8", o.value) +func (cb CreationBundleInputFromFieldData) GetSerialNumber() string { + return cb.data.Get("serial_number").(string) } -func getOtherSANsFromX509Extensions(exts []pkix.Extension) ([]otherNameUtf8, error) { - var ret []otherNameUtf8 - for _, ext := range exts { - if !ext.Id.Equal(oidExtensionSubjectAltName) { - continue - } - err := forEachSAN(ext.Value, func(tag int, data []byte) error { - if tag != 0 { - return nil - } +func (cb CreationBundleInputFromFieldData) GetExcludeCnFromSans() bool { + return cb.data.Get("exclude_cn_from_sans").(bool) +} - var other otherNameRaw - _, err := asn1.UnmarshalWithParams(data, &other, "tag:0") - if err != nil { - return fmt.Errorf("could not parse requested other SAN: %w", err) - } - val, err := other.extractUTF8String() - if err != nil { - return err - } - ret = append(ret, *val) - return nil - }) - if err != nil { - return nil, err - } - } +func (cb CreationBundleInputFromFieldData) GetOptionalAltNames() (interface{}, bool) { + return cb.data.GetOk("alt_names") +} - return ret, nil +func (cb CreationBundleInputFromFieldData) GetOtherSans() []string { + return cb.data.Get("other_sans").([]string) } -func forEachSAN(extension []byte, callback func(tag int, data []byte) error) error { - // RFC 5280, 4.2.1.6 +func (cb CreationBundleInputFromFieldData) GetIpSans() []string { + return cb.data.Get("ip_sans").([]string) +} - // SubjectAltName ::= GeneralNames - // - // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName - // - // GeneralName ::= CHOICE { - // otherName [0] OtherName, - // rfc822Name [1] IA5String, - // dNSName [2] IA5String, - // x400Address [3] ORAddress, - // directoryName [4] Name, - // ediPartyName [5] EDIPartyName, - // uniformResourceIdentifier [6] IA5String, - // iPAddress [7] OCTET STRING, - // registeredID [8] OBJECT IDENTIFIER } - var seq asn1.RawValue - rest, err := asn1.Unmarshal(extension, &seq) - if err != nil { - return err - } else if len(rest) != 0 { - return fmt.Errorf("x509: trailing data after X.509 extension") - } - if !seq.IsCompound || seq.Tag != 16 || seq.Class != 0 { - return asn1.StructuralError{Msg: "bad SAN sequence"} - } +func (cb CreationBundleInputFromFieldData) GetURISans() []string { + return cb.data.Get("uri_sans").([]string) +} - rest = seq.Bytes - for len(rest) > 0 { - var v asn1.RawValue - rest, err = asn1.Unmarshal(rest, &v) - if err != nil { - return err - } +func (cb CreationBundleInputFromFieldData) GetOptionalSkid() (interface{}, bool) { + return cb.data.GetOk("skid") +} - if err := callback(v.Tag, v.FullBytes); err != nil { - return err - } - } +func (cb CreationBundleInputFromFieldData) IsUserIdInSchema() (interface{}, bool) { + val, present := cb.data.Schema["user_ids"] + return val, present +} - return nil +func (cb CreationBundleInputFromFieldData) GetUserIds() []string { + return cb.data.Get("user_ids").([]string) } // generateCreationBundle is a shared function that reads parameters supplied // from the various endpoints and generates a CreationParameters with the // parameters that can be used to issue or sign -func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAInfoBundle, csr *x509.CertificateRequest) (*certutil.CreationBundle, []string, error) { - // Read in names -- CN, DNS and email addresses - var cn string - var ridSerialNumber string - var warnings []string - dnsNames := []string{} - emailAddresses := []string{} - { - if csr != nil && data.role.UseCSRCommonName { - cn = csr.Subject.CommonName - } - if cn == "" { - cn = data.apiData.Get("common_name").(string) - if cn == "" && data.role.RequireCN { - return nil, nil, errutil.UserError{Err: `the common_name field is required, or must be provided in a CSR with "use_csr_common_name" set to true, unless "require_cn" is set to false`} - } - } +func generateCreationBundle(sysView logical.SystemView, data *inputBundle, caSign *certutil.CAInfoBundle, csr *x509.CertificateRequest) (*certutil.CreationBundle, []string, error) { + entityInfo := issuing.NewEntityInfoFromReq(data.req) + creationBundleInput := NewCreationBundleInputFromFieldData(data.apiData) - ridSerialNumber = data.apiData.Get("serial_number").(string) - - // only take serial number from CSR if one was not supplied via API - if ridSerialNumber == "" && csr != nil { - ridSerialNumber = csr.Subject.SerialNumber - } - - if csr != nil && data.role.UseCSRSANs { - dnsNames = csr.DNSNames - emailAddresses = csr.EmailAddresses - } - - if cn != "" && !data.apiData.Get("exclude_cn_from_sans").(bool) { - if strings.Contains(cn, "@") { - // Note: emails are not disallowed if the role's email protection - // flag is false, because they may well be included for - // informational purposes; it is up to the verifying party to - // ensure that email addresses in a subject alternate name can be - // used for the purpose for which they are presented - emailAddresses = append(emailAddresses, cn) - } else { - // Only add to dnsNames if it's actually a DNS name but convert - // idn first - p := idna.New( - idna.StrictDomainName(true), - idna.VerifyDNSLength(true), - ) - converted, err := p.ToASCII(cn) - if err != nil { - return nil, nil, errutil.UserError{Err: err.Error()} - } - if hostnameRegex.MatchString(converted) { - dnsNames = append(dnsNames, converted) - } - } - } - - if csr == nil || !data.role.UseCSRSANs { - cnAltRaw, ok := data.apiData.GetOk("alt_names") - if ok { - cnAlt := strutil.ParseDedupAndSortStrings(cnAltRaw.(string), ",") - for _, v := range cnAlt { - if strings.Contains(v, "@") { - emailAddresses = append(emailAddresses, v) - } else { - // Only add to dnsNames if it's actually a DNS name but - // convert idn first - p := idna.New( - idna.StrictDomainName(true), - idna.VerifyDNSLength(true), - ) - converted, err := p.ToASCII(v) - if err != nil { - return nil, nil, errutil.UserError{Err: err.Error()} - } - if hostnameRegex.MatchString(converted) { - dnsNames = append(dnsNames, converted) - } - } - } - } - } - - // Check the CN. This ensures that the CN is checked even if it's - // excluded from SANs. - if cn != "" { - badName := validateCommonName(b, data, cn) - if len(badName) != 0 { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "common name %s not allowed by this role", badName)} - } - } - - if ridSerialNumber != "" { - badName := validateSerialNumber(data, ridSerialNumber) - if len(badName) != 0 { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "serial_number %s not allowed by this role", badName)} - } - } - - // Check for bad email and/or DNS names - badName := validateNames(b, data, dnsNames) - if len(badName) != 0 { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "subject alternate name %s not allowed by this role", badName)} - } - - badName = validateNames(b, data, emailAddresses) - if len(badName) != 0 { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "email address %s not allowed by this role", badName)} - } - } - - // otherSANsInput has the same format as the other_sans HTTP param in the - // Vault PKI API: it is a list of strings of the form ;: - // where must be UTF8/UTF-8. - var otherSANsInput []string - // otherSANs is the output of parseOtherSANs(otherSANsInput): its keys are - // the value, its values are of the form [, ] - var otherSANs map[string][]string - if sans := data.apiData.Get("other_sans").([]string); len(sans) > 0 { - otherSANsInput = sans - } - if data.role.UseCSRSANs && csr != nil && len(csr.Extensions) > 0 { - others, err := getOtherSANsFromX509Extensions(csr.Extensions) - if err != nil { - return nil, nil, errutil.UserError{Err: fmt.Errorf("could not parse requested other SAN: %w", err).Error()} - } - for _, other := range others { - otherSANsInput = append(otherSANsInput, other.String()) - } - } - if len(otherSANsInput) > 0 { - requested, err := parseOtherSANs(otherSANsInput) - if err != nil { - return nil, nil, errutil.UserError{Err: fmt.Errorf("could not parse requested other SAN: %w", err).Error()} - } - badOID, badName, err := validateOtherSANs(data, requested) - switch { - case err != nil: - return nil, nil, errutil.UserError{Err: err.Error()} - case len(badName) > 0: - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "other SAN %s not allowed for OID %s by this role", badName, badOID)} - case len(badOID) > 0: - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "other SAN OID %s not allowed by this role", badOID)} - default: - otherSANs = requested - } - } - - // Get and verify any IP SANs - ipAddresses := []net.IP{} - { - if csr != nil && data.role.UseCSRSANs { - if len(csr.IPAddresses) > 0 { - if !data.role.AllowIPSANs { - return nil, nil, errutil.UserError{Err: "IP Subject Alternative Names are not allowed in this role, but was provided some via CSR"} - } - ipAddresses = csr.IPAddresses - } - } else { - ipAlt := data.apiData.Get("ip_sans").([]string) - if len(ipAlt) > 0 { - if !data.role.AllowIPSANs { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "IP Subject Alternative Names are not allowed in this role, but was provided %s", ipAlt)} - } - for _, v := range ipAlt { - parsedIP := net.ParseIP(v) - if parsedIP == nil { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "the value %q is not a valid IP address", v)} - } - ipAddresses = append(ipAddresses, parsedIP) - } - } - } - } - - URIs := []*url.URL{} - { - if csr != nil && data.role.UseCSRSANs { - if len(csr.URIs) > 0 { - if len(data.role.AllowedURISANs) == 0 { - return nil, nil, errutil.UserError{ - Err: "URI Subject Alternative Names are not allowed in this role, but were provided via CSR", - } - } - - // validate uri sans - for _, uri := range csr.URIs { - valid := validateURISAN(b, data, uri.String()) - if !valid { - return nil, nil, errutil.UserError{ - Err: "URI Subject Alternative Names were provided via CSR which are not valid for this role", - } - } - - URIs = append(URIs, uri) - } - } - } else { - uriAlt := data.apiData.Get("uri_sans").([]string) - if len(uriAlt) > 0 { - if len(data.role.AllowedURISANs) == 0 { - return nil, nil, errutil.UserError{ - Err: "URI Subject Alternative Names are not allowed in this role, but were provided via the API", - } - } - - for _, uri := range uriAlt { - valid := validateURISAN(b, data, uri) - if !valid { - return nil, nil, errutil.UserError{ - Err: "URI Subject Alternative Names were provided via the API which are not valid for this role", - } - } - - parsedURI, err := url.Parse(uri) - if parsedURI == nil || err != nil { - return nil, nil, errutil.UserError{ - Err: fmt.Sprintf( - "the provided URI Subject Alternative Name %q is not a valid URI", uri), - } - } - - URIs = append(URIs, parsedURI) - } - } - } - } - - // Most of these could also be RemoveDuplicateStable, or even - // leave duplicates in, but OU is the one most likely to be duplicated. - subject := pkix.Name{ - CommonName: cn, - SerialNumber: ridSerialNumber, - Country: strutil.RemoveDuplicatesStable(data.role.Country, false), - Organization: strutil.RemoveDuplicatesStable(data.role.Organization, false), - OrganizationalUnit: strutil.RemoveDuplicatesStable(data.role.OU, false), - Locality: strutil.RemoveDuplicatesStable(data.role.Locality, false), - Province: strutil.RemoveDuplicatesStable(data.role.Province, false), - StreetAddress: strutil.RemoveDuplicatesStable(data.role.StreetAddress, false), - PostalCode: strutil.RemoveDuplicatesStable(data.role.PostalCode, false), - } - - // Get the TTL and verify it against the max allowed - var ttl time.Duration - var maxTTL time.Duration - var notAfter time.Time - var err error - { - ttl = time.Duration(data.apiData.Get("ttl").(int)) * time.Second - notAfterAlt := data.role.NotAfter - if notAfterAlt == "" { - notAfterAltRaw, ok := data.apiData.GetOk("not_after") - if ok { - notAfterAlt = notAfterAltRaw.(string) - } - - } - if ttl > 0 && notAfterAlt != "" { - return nil, nil, errutil.UserError{ - Err: "Either ttl or not_after should be provided. Both should not be provided in the same request.", - } - } - - if ttl == 0 && data.role.TTL > 0 { - ttl = data.role.TTL - } - - if data.role.MaxTTL > 0 { - maxTTL = data.role.MaxTTL - } - - if ttl == 0 { - ttl = b.System().DefaultLeaseTTL() - } - if maxTTL == 0 { - maxTTL = b.System().MaxLeaseTTL() - } - if ttl > maxTTL { - warnings = append(warnings, fmt.Sprintf("TTL %q is longer than permitted maxTTL %q, so maxTTL is being used", ttl, maxTTL)) - ttl = maxTTL - } - - if notAfterAlt != "" { - notAfter, err = time.Parse(time.RFC3339, notAfterAlt) - if err != nil { - return nil, nil, errutil.UserError{Err: err.Error()} - } - } else { - notAfter = time.Now().Add(ttl) - } - if caSign != nil && notAfter.After(caSign.Certificate.NotAfter) { - // If it's not self-signed, verify that the issued certificate - // won't be valid past the lifetime of the CA certificate, and - // act accordingly. This is dependent based on the issuer's - // LeafNotAfterBehavior argument. - switch caSign.LeafNotAfterBehavior { - case certutil.PermitNotAfterBehavior: - // Explicitly do nothing. - case certutil.TruncateNotAfterBehavior: - notAfter = caSign.Certificate.NotAfter - case certutil.ErrNotAfterBehavior: - fallthrough - default: - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "cannot satisfy request, as TTL would result in notAfter of %s that is beyond the expiration of the CA certificate at %s", notAfter.UTC().Format(time.RFC3339Nano), caSign.Certificate.NotAfter.UTC().Format(time.RFC3339Nano))} - } - } - } - - // Parse SKID from the request for cross-signing. - var skid []byte - { - if rawSKIDValue, ok := data.apiData.GetOk("skid"); ok { - // Handle removing common separators to make copy/paste from tool - // output easier. Chromium uses space, OpenSSL uses colons, and at - // one point, Vault had preferred dash as a separator for hex - // strings. - var err error - skidValue := rawSKIDValue.(string) - for _, separator := range []string{":", "-", " "} { - skidValue = strings.ReplaceAll(skidValue, separator, "") - } - - skid, err = hex.DecodeString(skidValue) - if err != nil { - return nil, nil, errutil.UserError{Err: fmt.Sprintf("cannot parse requested SKID value as hex: %v", err)} - } - } - } - - // Add UserIDs into the Subject, if the request type supports it. - if _, present := data.apiData.Schema["user_ids"]; present { - rawUserIDs := data.apiData.Get("user_ids").([]string) - - // Only take UserIDs from CSR if one was not supplied via API. - if len(rawUserIDs) == 0 && csr != nil { - for _, attr := range csr.Subject.Names { - if attr.Type.Equal(certutil.SubjectPilotUserIDAttributeOID) { - switch aValue := attr.Value.(type) { - case string: - rawUserIDs = append(rawUserIDs, aValue) - case []byte: - rawUserIDs = append(rawUserIDs, string(aValue)) - default: - return nil, nil, errutil.UserError{Err: "unknown type for user_id attribute in CSR's Subject"} - } - } - } - } - - // Check for bad userIDs and add to the subject. - if len(rawUserIDs) > 0 { - for _, value := range rawUserIDs { - if !validateUserId(data, value) { - return nil, nil, errutil.UserError{Err: fmt.Sprintf("user_id %v is not allowed by this role", value)} - } - - subject.ExtraNames = append(subject.ExtraNames, pkix.AttributeTypeAndValue{ - Type: certutil.SubjectPilotUserIDAttributeOID, - Value: value, - }) - } - } - } - - creation := &certutil.CreationBundle{ - Params: &certutil.CreationParameters{ - Subject: subject, - DNSNames: strutil.RemoveDuplicates(dnsNames, false), - EmailAddresses: strutil.RemoveDuplicates(emailAddresses, false), - IPAddresses: ipAddresses, - URIs: URIs, - OtherSANs: otherSANs, - KeyType: data.role.KeyType, - KeyBits: data.role.KeyBits, - SignatureBits: data.role.SignatureBits, - UsePSS: data.role.UsePSS, - NotAfter: notAfter, - KeyUsage: x509.KeyUsage(parseKeyUsages(data.role.KeyUsage)), - ExtKeyUsage: parseExtKeyUsages(data.role), - ExtKeyUsageOIDs: data.role.ExtKeyUsageOIDs, - PolicyIdentifiers: data.role.PolicyIdentifiers, - BasicConstraintsValidForNonCA: data.role.BasicConstraintsValidForNonCA, - NotBeforeDuration: data.role.NotBeforeDuration, - ForceAppendCaChain: caSign != nil, - SKID: skid, - }, - SigningBundle: caSign, - CSR: csr, - } - - // Don't deal with URLs or max path length if it's self-signed, as these - // normally come from the signing bundle - if caSign == nil { - return creation, warnings, nil - } - - // This will have been read in from the getGlobalAIAURLs function - creation.Params.URLs = caSign.URLs + return issuing.GenerateCreationBundle(sysView, data.role, entityInfo, creationBundleInput, caSign, csr) +} - // If the max path length in the role is not nil, it was specified at - // generation time with the max_path_length parameter; otherwise derive it - // from the signing certificate - if data.role.MaxPathLength != nil { - creation.Params.MaxPathLength = *data.role.MaxPathLength - } else { - switch { - case caSign.Certificate.MaxPathLen < 0: - creation.Params.MaxPathLength = -1 - case caSign.Certificate.MaxPathLen == 0 && - caSign.Certificate.MaxPathLenZero: - // The signing function will ensure that we do not issue a CA cert - creation.Params.MaxPathLength = 0 - default: - // If this takes it to zero, we handle this case later if - // necessary - creation.Params.MaxPathLength = caSign.Certificate.MaxPathLen - 1 - } - } +// getCertificateNotAfter compute a certificate's NotAfter date based on the mount ttl, role, signing bundle and input +// api data being sent. Returns a NotAfter time, a set of warnings or an error. +func getCertificateNotAfter(sysView logical.SystemView, data *inputBundle, caSign *certutil.CAInfoBundle) (time.Time, []string, error) { + input := NewCertNotAfterInputFromFieldData(data.apiData) + return issuing.GetCertificateNotAfter(sysView, data.role, input, caSign) +} - return creation, warnings, nil +// applyIssuerLeafNotAfterBehavior resets a certificate's notAfter time or errors out based on the +// issuer's notAfter date along with the LeafNotAfterBehavior configuration +func applyIssuerLeafNotAfterBehavior(caSign *certutil.CAInfoBundle, notAfter time.Time) (time.Time, error) { + return issuing.ApplyIssuerLeafNotAfterBehavior(caSign, notAfter) } func convertRespToPKCS8(resp *logical.Response) error { @@ -1648,7 +708,7 @@ func handleOtherSANs(in *x509.Certificate, sans map[string][]string) error { // Marshal and add to ExtraExtensions ext := pkix.Extension{ // This is the defined OID for subjectAltName - Id: asn1.ObjectIdentifier(oidExtensionSubjectAltName), + Id: certutil.OidExtensionSubjectAltName, } var err error ext.Value, err = asn1.Marshal(rawValues) @@ -1709,13 +769,23 @@ func stringToOid(in string) (asn1.ObjectIdentifier, error) { } func parseCertificateFromBytes(certBytes []byte) (*x509.Certificate, error) { - block, extra := pem.Decode(certBytes) - if block == nil { - return nil, errors.New("unable to parse certificate: invalid PEM") - } - if len(strings.TrimSpace(string(extra))) > 0 { - return nil, errors.New("unable to parse certificate: trailing PEM data") - } + return parsing.ParseCertificateFromBytes(certBytes) +} + +func NewCertNotAfterInputFromFieldData(data *framework.FieldData) CertNotAfterInputFromFieldData { + return CertNotAfterInputFromFieldData{data: data} +} + +var _ issuing.CertNotAfterInput = CertNotAfterInputFromFieldData{} + +type CertNotAfterInputFromFieldData struct { + data *framework.FieldData +} + +func (i CertNotAfterInputFromFieldData) GetTTL() int { + return i.data.Get("ttl").(int) +} - return x509.ParseCertificate(block.Bytes) +func (i CertNotAfterInputFromFieldData) GetOptionalNotAfter() (interface{}, bool) { + return i.data.GetOk("not_after") } diff --git a/builtin/logical/pki/cert_util_test.go b/builtin/logical/pki/cert_util_test.go index 7fb811cb8fcf..4f162aeb0946 100644 --- a/builtin/logical/pki/cert_util_test.go +++ b/builtin/logical/pki/cert_util_test.go @@ -1,17 +1,28 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki import ( "context" + "crypto/x509" + "crypto/x509/pkix" "fmt" + "net" + "net/url" "reflect" "strings" "testing" + "time" + "github.com/go-test/deep" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/parsing" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" ) func TestPki_FetchCertBySerial(t *testing.T) { @@ -28,7 +39,7 @@ func TestPki_FetchCertBySerial(t *testing.T) { &logical.Request{ Storage: storage, }, - "certs/", + issuing.PathCerts, "00:00:00:00:00:00:00:00", }, "revoked cert": { @@ -97,7 +108,7 @@ func TestPki_FetchCertBySerial(t *testing.T) { // order-preserving way. func TestPki_MultipleOUs(t *testing.T) { t.Parallel() - var b backend + b, _ := CreateBackendWithStorage(t) fields := addCACommonFields(map[string]*framework.FieldSchema{}) apiData := &framework.FieldData{ @@ -109,12 +120,12 @@ func TestPki_MultipleOUs(t *testing.T) { } input := &inputBundle{ apiData: apiData, - role: &roleEntry{ + role: &issuing.RoleEntry{ MaxTTL: 3600, OU: []string{"Z", "E", "V"}, }, } - cb, _, err := generateCreationBundle(&b, input, nil, nil) + cb, _, err := generateCreationBundle(b.System(), input, nil, nil) if err != nil { t.Fatalf("Error: %v", err) } @@ -129,7 +140,7 @@ func TestPki_MultipleOUs(t *testing.T) { func TestPki_PermitFQDNs(t *testing.T) { t.Parallel() - var b backend + b, _ := CreateBackendWithStorage(t) fields := addCACommonFields(map[string]*framework.FieldSchema{}) cases := map[string]struct { @@ -146,7 +157,7 @@ func TestPki_PermitFQDNs(t *testing.T) { "ttl": 3600, }, }, - role: &roleEntry{ + role: &issuing.RoleEntry{ AllowAnyName: true, MaxTTL: 3600, EnforceHostnames: true, @@ -165,7 +176,7 @@ func TestPki_PermitFQDNs(t *testing.T) { "ttl": 3600, }, }, - role: &roleEntry{ + role: &issuing.RoleEntry{ AllowedDomains: []string{"example.net", "EXAMPLE.COM"}, AllowBareDomains: true, MaxTTL: 3600, @@ -174,6 +185,24 @@ func TestPki_PermitFQDNs(t *testing.T) { expectedDnsNames: []string{"Example.Net", "eXaMPLe.COM"}, expectedEmails: []string{}, }, + "case insensitivity subdomain validation": { + input: &inputBundle{ + apiData: &framework.FieldData{ + Schema: fields, + Raw: map[string]interface{}{ + "common_name": "SUB.EXAMPLE.COM", + "ttl": 3600, + }, + }, + role: &issuing.RoleEntry{ + AllowedDomains: []string{"example.com", "*.Example.com"}, + AllowGlobDomains: true, + MaxTTL: 3600, + }, + }, + expectedDnsNames: []string{"SUB.EXAMPLE.COM"}, + expectedEmails: []string{}, + }, "case email as AllowedDomain with bare domains": { input: &inputBundle{ apiData: &framework.FieldData{ @@ -183,7 +212,7 @@ func TestPki_PermitFQDNs(t *testing.T) { "ttl": 3600, }, }, - role: &roleEntry{ + role: &issuing.RoleEntry{ AllowedDomains: []string{"test@testemail.com"}, AllowBareDomains: true, MaxTTL: 3600, @@ -201,7 +230,7 @@ func TestPki_PermitFQDNs(t *testing.T) { "ttl": 3600, }, }, - role: &roleEntry{ + role: &issuing.RoleEntry{ AllowedDomains: []string{"testemail.com"}, AllowBareDomains: true, MaxTTL: 3600, @@ -216,7 +245,7 @@ func TestPki_PermitFQDNs(t *testing.T) { name := name testCase := testCase t.Run(name, func(t *testing.T) { - cb, _, err := generateCreationBundle(&b, testCase.input, nil, nil) + cb, _, err := generateCreationBundle(b.System(), testCase.input, nil, nil) if err != nil { t.Fatalf("Error: %v", err) } @@ -235,3 +264,688 @@ func TestPki_PermitFQDNs(t *testing.T) { }) } } + +type parseCertificateTestCase struct { + name string + data map[string]interface{} + roleData map[string]interface{} // if a role is to be created + ttl time.Duration + wantParams certutil.CreationParameters + wantFields map[string]interface{} + wantErr bool +} + +func TestParseCertificate(t *testing.T) { + t.Parallel() + + parseURL := func(s string) *url.URL { + u, err := url.Parse(s) + if err != nil { + t.Fatal(err) + } + return u + } + + tests := []*parseCertificateTestCase{ + { + name: "simple CA", + data: map[string]interface{}{ + "common_name": "the common name", + "key_type": "ec", + "key_bits": 384, + "ttl": "1h", + "not_before_duration": "30s", + "street_address": "", + }, + ttl: 1 * time.Hour, + wantParams: certutil.CreationParameters{ + Subject: pkix.Name{ + CommonName: "the common name", + }, + DNSNames: nil, + EmailAddresses: nil, + IPAddresses: nil, + URIs: nil, + OtherSANs: make(map[string][]string), + IsCA: true, + KeyType: "ec", + KeyBits: 384, + NotAfter: time.Time{}, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + ExtKeyUsage: 0, + ExtKeyUsageOIDs: nil, + PolicyIdentifiers: nil, + BasicConstraintsValidForNonCA: false, + SignatureBits: 384, + UsePSS: false, + ForceAppendCaChain: false, + UseCSRValues: false, + PermittedDNSDomains: nil, + URLs: nil, + MaxPathLength: -1, + NotBeforeDuration: 30, + SKID: []byte("We'll assert that it is not nil as an special case"), + }, + wantFields: map[string]interface{}{ + "common_name": "the common name", + "alt_names": "", + "ip_sans": "", + "uri_sans": "", + "other_sans": "", + "signature_bits": 384, + "exclude_cn_from_sans": true, + "ou": "", + "organization": "", + "country": "", + "locality": "", + "province": "", + "street_address": "", + "postal_code": "", + "serial_number": "", + "ttl": "1h0m30s", + "max_path_length": -1, + "permitted_dns_domains": "", + "use_pss": false, + "key_type": "ec", + "key_bits": 384, + "skid": "We'll assert that it is not nil as an special case", + }, + wantErr: false, + }, + { + // Note that this test's data is used to create the internal CA used by test "full non CA cert" + name: "full CA", + data: map[string]interface{}{ + // using the same order as in https://developer.hashicorp.com/vault/api-docs/secret/pki#sign-certificate + "common_name": "the common name", + "alt_names": "user@example.com,admin@example.com,example.com,www.example.com", + "ip_sans": "1.2.3.4,1.2.3.5", + "uri_sans": "https://example.com,https://www.example.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:caadmin@example.com", + "ttl": "2h", + "max_path_length": 2, + "permitted_dns_domains": ".example.com,.www.example.com", + "ou": "unit1, unit2", + "organization": "org1, org2", + "country": "US, CA", + "locality": "locality1, locality2", + "province": "province1, province2", + "street_address": "street_address1, street_address2", + "postal_code": "postal_code1, postal_code2", + "not_before_duration": "45s", + "key_type": "rsa", + "use_pss": true, + "key_bits": 2048, + "signature_bits": 384, + // TODO(kitography): Specify key usage + }, + ttl: 2 * time.Hour, + wantParams: certutil.CreationParameters{ + Subject: pkix.Name{ + CommonName: "the common name", + OrganizationalUnit: []string{"unit1", "unit2"}, + Organization: []string{"org1", "org2"}, + Country: []string{"CA", "US"}, + Locality: []string{"locality1", "locality2"}, + Province: []string{"province1", "province2"}, + StreetAddress: []string{"street_address1", "street_address2"}, + PostalCode: []string{"postal_code1", "postal_code2"}, + }, + DNSNames: []string{"example.com", "www.example.com"}, + EmailAddresses: []string{"admin@example.com", "user@example.com"}, + IPAddresses: []net.IP{[]byte{1, 2, 3, 4}, []byte{1, 2, 3, 5}}, + URIs: []*url.URL{parseURL("https://example.com"), parseURL("https://www.example.com")}, + OtherSANs: map[string][]string{"1.3.6.1.4.1.311.20.2.3": {"caadmin@example.com"}}, + IsCA: true, + KeyType: "rsa", + KeyBits: 2048, + NotAfter: time.Time{}, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + ExtKeyUsage: 0, + ExtKeyUsageOIDs: nil, + PolicyIdentifiers: nil, + BasicConstraintsValidForNonCA: false, + SignatureBits: 384, + UsePSS: true, + ForceAppendCaChain: false, + UseCSRValues: false, + PermittedDNSDomains: []string{".example.com", ".www.example.com"}, + URLs: nil, + MaxPathLength: 2, + NotBeforeDuration: 45 * time.Second, + SKID: []byte("We'll assert that it is not nil as an special case"), + }, + wantFields: map[string]interface{}{ + "common_name": "the common name", + "alt_names": "example.com,www.example.com,admin@example.com,user@example.com", + "ip_sans": "1.2.3.4,1.2.3.5", + "uri_sans": "https://example.com,https://www.example.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;UTF-8:caadmin@example.com", + "signature_bits": 384, + "exclude_cn_from_sans": true, + "ou": "unit1,unit2", + "organization": "org1,org2", + "country": "CA,US", + "locality": "locality1,locality2", + "province": "province1,province2", + "street_address": "street_address1,street_address2", + "postal_code": "postal_code1,postal_code2", + "serial_number": "", + "ttl": "2h0m45s", + "max_path_length": 2, + "permitted_dns_domains": ".example.com,.www.example.com", + "use_pss": true, + "key_type": "rsa", + "key_bits": 2048, + "skid": "We'll assert that it is not nil as an special case", + }, + wantErr: false, + }, + { + // Note that we use the data of test "full CA" to create the internal CA needed for this test + name: "full non CA cert", + data: map[string]interface{}{ + // using the same order as in https://developer.hashicorp.com/vault/api-docs/secret/pki#generate-certificate-and-key + "common_name": "the common name non ca", + "alt_names": "user@example.com,admin@example.com,example.com,www.example.com", + "ip_sans": "1.2.3.4,1.2.3.5", + "uri_sans": "https://example.com,https://www.example.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:caadmin@example.com", + "ttl": "2h", + // format + // private_key_format + "exclude_cn_from_sans": true, + // not_after + // remove_roots_from_chain + "user_ids": "humanoid,robot", + }, + roleData: map[string]interface{}{ + "allow_any_name": true, + "cn_validations": "disabled", + "allow_ip_sans": true, + "allowed_other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:*@example.com", + "allowed_uri_sans": "https://example.com,https://www.example.com", + "allowed_user_ids": "*", + "not_before_duration": "45s", + "signature_bits": 384, + "key_usage": "KeyAgreement", + "ext_key_usage": "ServerAuth", + "ext_key_usage_oids": "1.3.6.1.5.5.7.3.67,1.3.6.1.5.5.7.3.68", + "client_flag": false, + "server_flag": false, + "policy_identifiers": "1.2.3.4.5.6.7.8.9.0", + }, + ttl: 2 * time.Hour, + wantParams: certutil.CreationParameters{ + Subject: pkix.Name{ + CommonName: "the common name non ca", + }, + DNSNames: []string{"example.com", "www.example.com"}, + EmailAddresses: []string{"admin@example.com", "user@example.com"}, + IPAddresses: []net.IP{[]byte{1, 2, 3, 4}, []byte{1, 2, 3, 5}}, + URIs: []*url.URL{parseURL("https://example.com"), parseURL("https://www.example.com")}, + OtherSANs: map[string][]string{"1.3.6.1.4.1.311.20.2.3": {"caadmin@example.com"}}, + IsCA: false, + KeyType: "rsa", + KeyBits: 2048, + NotAfter: time.Time{}, + KeyUsage: x509.KeyUsageKeyAgreement, + ExtKeyUsage: 0, // Please Ignore + ExtKeyUsageOIDs: []string{"1.3.6.1.5.5.7.3.1", "1.3.6.1.5.5.7.3.67", "1.3.6.1.5.5.7.3.68"}, + PolicyIdentifiers: []string{"1.2.3.4.5.6.7.8.9.0"}, + BasicConstraintsValidForNonCA: false, + SignatureBits: 384, + UsePSS: false, + ForceAppendCaChain: false, + UseCSRValues: false, + PermittedDNSDomains: nil, + URLs: nil, + MaxPathLength: 0, + NotBeforeDuration: 45, + SKID: []byte("We'll assert that it is not nil as an special case"), + }, + wantFields: map[string]interface{}{ + "common_name": "the common name non ca", + "alt_names": "example.com,www.example.com,admin@example.com,user@example.com", + "ip_sans": "1.2.3.4,1.2.3.5", + "uri_sans": "https://example.com,https://www.example.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;UTF-8:caadmin@example.com", + "signature_bits": 384, + "exclude_cn_from_sans": true, + "ou": "", + "organization": "", + "country": "", + "locality": "", + "province": "", + "street_address": "", + "postal_code": "", + "serial_number": "", + "ttl": "2h0m45s", + "max_path_length": 0, + "permitted_dns_domains": "", + "use_pss": false, + "key_type": "rsa", + "key_bits": 2048, + "skid": "We'll assert that it is not nil as an special case", + }, + wantErr: false, + }, + } + for _, tt := range tests { + + b, s := CreateBackendWithStorage(t) + + var cert *x509.Certificate + issueTime := time.Now() + if tt.wantParams.IsCA { + resp, err := CBWrite(b, s, "root/generate/internal", tt.data) + require.NoError(t, err) + require.NotNil(t, resp) + + certData := resp.Data["certificate"].(string) + cert, err = parsing.ParseCertificateFromString(certData) + require.NoError(t, err) + require.NotNil(t, cert) + } else { + // use the "simple CA" data to create the internal CA + caData := tests[1].data + caData["ttl"] = "3h" + resp, err := CBWrite(b, s, "root/generate/internal", caData) + require.NoError(t, err) + require.NotNil(t, resp) + + // create a role + resp, err = CBWrite(b, s, "roles/test", tt.roleData) + require.NoError(t, err) + require.NotNil(t, resp) + + // create the cert + resp, err = CBWrite(b, s, "issue/test", tt.data) + require.NoError(t, err) + require.NotNil(t, resp) + + certData := resp.Data["certificate"].(string) + cert, err = parsing.ParseCertificateFromString(certData) + require.NoError(t, err) + require.NotNil(t, cert) + } + + t.Run(tt.name+" parameters", func(t *testing.T) { + testParseCertificateToCreationParameters(t, issueTime, tt, cert) + }) + t.Run(tt.name+" fields", func(t *testing.T) { + testParseCertificateToFields(t, issueTime, tt, cert) + }) + } +} + +func testParseCertificateToCreationParameters(t *testing.T, issueTime time.Time, tt *parseCertificateTestCase, cert *x509.Certificate) { + params, err := certutil.ParseCertificateToCreationParameters(*cert) + + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + + ignoreBasicConstraintsValidForNonCA := tt.wantParams.IsCA + + var diff []string + for _, d := range deep.Equal(tt.wantParams, params) { + switch { + case strings.HasPrefix(d, "SKID"): + continue + case strings.HasPrefix(d, "BasicConstraintsValidForNonCA") && ignoreBasicConstraintsValidForNonCA: + continue + case strings.HasPrefix(d, "NotBeforeDuration"): + continue + case strings.HasPrefix(d, "NotAfter"): + continue + } + diff = append(diff, d) + } + if diff != nil { + t.Errorf("testParseCertificateToCreationParameters() diff: %s", strings.Join(diff, "\n")) + } + + require.NotNil(t, params.SKID) + require.GreaterOrEqual(t, params.NotBeforeDuration, tt.wantParams.NotBeforeDuration, + "NotBeforeDuration want: %s got: %s", tt.wantParams.NotBeforeDuration, params.NotBeforeDuration) + + require.GreaterOrEqual(t, params.NotAfter, issueTime.Add(tt.ttl).Add(-1*time.Minute), + "NotAfter want: %s got: %s", tt.wantParams.NotAfter, params.NotAfter) + require.LessOrEqual(t, params.NotAfter, issueTime.Add(tt.ttl).Add(1*time.Minute), + "NotAfter want: %s got: %s", tt.wantParams.NotAfter, params.NotAfter) + } +} + +func testParseCertificateToFields(t *testing.T, issueTime time.Time, tt *parseCertificateTestCase, cert *x509.Certificate) { + fields, err := certutil.ParseCertificateToFields(*cert) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + + require.NotNil(t, fields["skid"]) + delete(fields, "skid") + delete(tt.wantFields, "skid") + + { + // Sometimes TTL comes back as 1s off, so we'll allow that + expectedTTL, err := parseutil.ParseDurationSecond(tt.wantFields["ttl"].(string)) + require.NoError(t, err) + actualTTL, err := parseutil.ParseDurationSecond(fields["ttl"].(string)) + require.NoError(t, err) + + diff := expectedTTL - actualTTL + require.LessOrEqual(t, actualTTL, expectedTTL, // NotAfter is generated before NotBefore so the time.Now of notBefore may be later, shrinking our calculated TTL during very slow tests + "ttl should be, if off, smaller than expected want: %s got: %s", tt.wantFields["ttl"], fields["ttl"]) + require.LessOrEqual(t, diff, 30*time.Second, // Test can be slow, allow more off in the other direction + "ttl must be at most 30s off, want: %s got: %s", tt.wantFields["ttl"], fields["ttl"]) + delete(fields, "ttl") + delete(tt.wantFields, "ttl") + } + + if diff := deep.Equal(tt.wantFields, fields); diff != nil { + t.Errorf("testParseCertificateToFields() diff: %s", strings.ReplaceAll(strings.Join(diff, "\n"), "map", "\nmap")) + } + } +} + +func TestParseCsr(t *testing.T) { + t.Parallel() + + parseURL := func(s string) *url.URL { + u, err := url.Parse(s) + if err != nil { + t.Fatal(err) + } + return u + } + + tests := []*parseCertificateTestCase{ + { + name: "simple CSR", + data: map[string]interface{}{ + "common_name": "the common name", + "key_type": "ec", + "key_bits": 384, + "ttl": "1h", + "not_before_duration": "30s", + "street_address": "", + }, + ttl: 1 * time.Hour, + wantParams: certutil.CreationParameters{ + Subject: pkix.Name{ + CommonName: "the common name", + }, + DNSNames: nil, + EmailAddresses: nil, + IPAddresses: nil, + URIs: nil, + OtherSANs: make(map[string][]string), + IsCA: false, + KeyType: "ec", + KeyBits: 384, + NotAfter: time.Time{}, + KeyUsage: 0, + ExtKeyUsage: 0, + ExtKeyUsageOIDs: nil, + PolicyIdentifiers: nil, + BasicConstraintsValidForNonCA: false, + SignatureBits: 384, + UsePSS: false, + ForceAppendCaChain: false, + UseCSRValues: false, + PermittedDNSDomains: nil, + URLs: nil, + MaxPathLength: 0, + NotBeforeDuration: 0, + SKID: nil, + }, + wantFields: map[string]interface{}{ + "common_name": "the common name", + "ou": "", + "organization": "", + "country": "", + "locality": "", + "province": "", + "street_address": "", + "postal_code": "", + "alt_names": "", + "ip_sans": "", + "uri_sans": "", + "other_sans": "", + "exclude_cn_from_sans": true, + "key_type": "ec", + "key_bits": 384, + "signature_bits": 384, + "use_pss": false, + "serial_number": "", + "add_basic_constraints": false, + }, + wantErr: false, + }, + { + name: "full CSR with basic constraints", + data: map[string]interface{}{ + // using the same order as in https://developer.hashicorp.com/vault/api-docs/secret/pki#generate-intermediate-csr + "common_name": "the common name", + "alt_names": "user@example.com,admin@example.com,example.com,www.example.com", + "ip_sans": "1.2.3.4,1.2.3.5", + "uri_sans": "https://example.com,https://www.example.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:caadmin@example.com", + // format + // private_key_format + "key_type": "rsa", + "key_bits": 2048, + "key_name": "the-key-name", + // key_ref + "signature_bits": 384, + // exclude_cn_from_sans + "ou": "unit1, unit2", + "organization": "org1, org2", + "country": "US, CA", + "locality": "locality1, locality2", + "province": "province1, province2", + "street_address": "street_address1, street_address2", + "postal_code": "postal_code1, postal_code2", + "serial_number": "37:60:16:e4:85:d5:96:38:3a:ed:31:06:8d:ed:7a:46:d4:22:63:d8", + "add_basic_constraints": true, + }, + ttl: 2 * time.Hour, + wantParams: certutil.CreationParameters{ + Subject: pkix.Name{ + CommonName: "the common name", + OrganizationalUnit: []string{"unit1", "unit2"}, + Organization: []string{"org1", "org2"}, + Country: []string{"CA", "US"}, + Locality: []string{"locality1", "locality2"}, + Province: []string{"province1", "province2"}, + StreetAddress: []string{"street_address1", "street_address2"}, + PostalCode: []string{"postal_code1", "postal_code2"}, + SerialNumber: "37:60:16:e4:85:d5:96:38:3a:ed:31:06:8d:ed:7a:46:d4:22:63:d8", + }, + DNSNames: []string{"example.com", "www.example.com"}, + EmailAddresses: []string{"admin@example.com", "user@example.com"}, + IPAddresses: []net.IP{[]byte{1, 2, 3, 4}, []byte{1, 2, 3, 5}}, + URIs: []*url.URL{parseURL("https://example.com"), parseURL("https://www.example.com")}, + OtherSANs: map[string][]string{"1.3.6.1.4.1.311.20.2.3": {"caadmin@example.com"}}, + IsCA: true, + KeyType: "rsa", + KeyBits: 2048, + NotAfter: time.Time{}, + KeyUsage: 0, // TODO(kitography): Verify with Kit + ExtKeyUsage: 0, // TODO(kitography): Verify with Kit + ExtKeyUsageOIDs: nil, // TODO(kitography): Verify with Kit + PolicyIdentifiers: nil, // TODO(kitography): Verify with Kit + BasicConstraintsValidForNonCA: true, + SignatureBits: 384, + UsePSS: false, + ForceAppendCaChain: false, + UseCSRValues: false, + PermittedDNSDomains: nil, + URLs: nil, + MaxPathLength: -1, + NotBeforeDuration: 0, + SKID: nil, + }, + wantFields: map[string]interface{}{ + "common_name": "the common name", + "ou": "unit1,unit2", + "organization": "org1,org2", + "country": "CA,US", + "locality": "locality1,locality2", + "province": "province1,province2", + "street_address": "street_address1,street_address2", + "postal_code": "postal_code1,postal_code2", + "alt_names": "example.com,www.example.com,admin@example.com,user@example.com", + "ip_sans": "1.2.3.4,1.2.3.5", + "uri_sans": "https://example.com,https://www.example.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;UTF-8:caadmin@example.com", + "exclude_cn_from_sans": true, + "key_type": "rsa", + "key_bits": 2048, + "signature_bits": 384, + "use_pss": false, + "serial_number": "37:60:16:e4:85:d5:96:38:3a:ed:31:06:8d:ed:7a:46:d4:22:63:d8", + "add_basic_constraints": true, + }, + wantErr: false, + }, + { + name: "full CSR without basic constraints", + data: map[string]interface{}{ + // using the same order as in https://developer.hashicorp.com/vault/api-docs/secret/pki#generate-intermediate-csr + "common_name": "the common name", + "alt_names": "user@example.com,admin@example.com,example.com,www.example.com", + "ip_sans": "1.2.3.4,1.2.3.5", + "uri_sans": "https://example.com,https://www.example.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:caadmin@example.com", + // format + // private_key_format + "key_type": "rsa", + "key_bits": 2048, + "key_name": "the-key-name", + // key_ref + "signature_bits": 384, + // exclude_cn_from_sans + "ou": "unit1, unit2", + "organization": "org1, org2", + "country": "CA,US", + "locality": "locality1, locality2", + "province": "province1, province2", + "street_address": "street_address1, street_address2", + "postal_code": "postal_code1, postal_code2", + "serial_number": "37:60:16:e4:85:d5:96:38:3a:ed:31:06:8d:ed:7a:46:d4:22:63:d8", + "add_basic_constraints": false, + }, + ttl: 2 * time.Hour, + wantParams: certutil.CreationParameters{ + Subject: pkix.Name{ + CommonName: "the common name", + OrganizationalUnit: []string{"unit1", "unit2"}, + Organization: []string{"org1", "org2"}, + Country: []string{"CA", "US"}, + Locality: []string{"locality1", "locality2"}, + Province: []string{"province1", "province2"}, + StreetAddress: []string{"street_address1", "street_address2"}, + PostalCode: []string{"postal_code1", "postal_code2"}, + SerialNumber: "37:60:16:e4:85:d5:96:38:3a:ed:31:06:8d:ed:7a:46:d4:22:63:d8", + }, + DNSNames: []string{"example.com", "www.example.com"}, + EmailAddresses: []string{"admin@example.com", "user@example.com"}, + IPAddresses: []net.IP{[]byte{1, 2, 3, 4}, []byte{1, 2, 3, 5}}, + URIs: []*url.URL{parseURL("https://example.com"), parseURL("https://www.example.com")}, + OtherSANs: map[string][]string{"1.3.6.1.4.1.311.20.2.3": {"caadmin@example.com"}}, + IsCA: false, + KeyType: "rsa", + KeyBits: 2048, + NotAfter: time.Time{}, + KeyUsage: 0, + ExtKeyUsage: 0, + ExtKeyUsageOIDs: nil, + PolicyIdentifiers: nil, + BasicConstraintsValidForNonCA: false, + SignatureBits: 384, + UsePSS: false, + ForceAppendCaChain: false, + UseCSRValues: false, + PermittedDNSDomains: nil, + URLs: nil, + MaxPathLength: 0, + NotBeforeDuration: 0, + SKID: nil, + }, + wantFields: map[string]interface{}{ + "common_name": "the common name", + "ou": "unit1,unit2", + "organization": "org1,org2", + "country": "CA,US", + "locality": "locality1,locality2", + "province": "province1,province2", + "street_address": "street_address1,street_address2", + "postal_code": "postal_code1,postal_code2", + "alt_names": "example.com,www.example.com,admin@example.com,user@example.com", + "ip_sans": "1.2.3.4,1.2.3.5", + "uri_sans": "https://example.com,https://www.example.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;UTF-8:caadmin@example.com", + "exclude_cn_from_sans": true, + "key_type": "rsa", + "key_bits": 2048, + "signature_bits": 384, + "use_pss": false, + "serial_number": "37:60:16:e4:85:d5:96:38:3a:ed:31:06:8d:ed:7a:46:d4:22:63:d8", + "add_basic_constraints": false, + }, + wantErr: false, + }, + } + for _, tt := range tests { + + b, s := CreateBackendWithStorage(t) + + issueTime := time.Now() + resp, err := CBWrite(b, s, "intermediate/generate/internal", tt.data) + require.NoError(t, err) + require.NotNil(t, resp) + + csrData := resp.Data["csr"].(string) + csr, err := parsing.ParseCertificateRequestFromString(csrData) + require.NoError(t, err) + require.NotNil(t, csr) + + t.Run(tt.name+" parameters", func(t *testing.T) { + testParseCsrToCreationParameters(t, issueTime, tt, csr) + }) + t.Run(tt.name+" fields", func(t *testing.T) { + testParseCsrToFields(t, issueTime, tt, csr) + }) + } +} + +func testParseCsrToCreationParameters(t *testing.T, issueTime time.Time, tt *parseCertificateTestCase, csr *x509.CertificateRequest) { + params, err := certutil.ParseCsrToCreationParameters(*csr) + + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + + if diff := deep.Equal(tt.wantParams, params); diff != nil { + t.Errorf("testParseCertificateToCreationParameters() diff: %s", strings.ReplaceAll(strings.Join(diff, "\n"), "map", "\nmap")) + } + } +} + +func testParseCsrToFields(t *testing.T, issueTime time.Time, tt *parseCertificateTestCase, csr *x509.CertificateRequest) { + fields, err := certutil.ParseCsrToFields(*csr) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + + if diff := deep.Equal(tt.wantFields, fields); diff != nil { + t.Errorf("testParseCertificateToFields() diff: %s", strings.ReplaceAll(strings.Join(diff, "\n"), "map", "\nmap")) + } + } +} diff --git a/builtin/logical/pki/chain_test.go b/builtin/logical/pki/chain_test.go index e76df359e9ed..0dba2cd282ee 100644 --- a/builtin/logical/pki/chain_test.go +++ b/builtin/logical/pki/chain_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -16,6 +16,7 @@ import ( "testing" "time" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/logical" ) @@ -575,7 +576,7 @@ func (c CBIssueLeaf) RevokeLeaf(t testing.TB, b *backend, s logical.Storage, kno if resp == nil { t.Fatalf("failed to read default issuer config: nil response") } - defaultID := resp.Data["default"].(issuerID).String() + defaultID := resp.Data["default"].(issuing.IssuerID).String() c.Issuer = defaultID issuer = nil } @@ -637,7 +638,7 @@ func (c CBIssueLeaf) Run(t testing.TB, b *backend, s logical.Storage, knownKeys if resp == nil { t.Fatalf("failed to read default issuer config: nil response") } - defaultID := resp.Data["default"].(issuerID).String() + defaultID := resp.Data["default"].(issuing.IssuerID).String() resp, err = CBRead(b, s, "issuer/"+c.Issuer) if err != nil { @@ -646,7 +647,7 @@ func (c CBIssueLeaf) Run(t testing.TB, b *backend, s logical.Storage, knownKeys if resp == nil { t.Fatalf("failed to read issuer %v: nil response", c.Issuer) } - ourID := resp.Data["issuer_id"].(issuerID).String() + ourID := resp.Data["issuer_id"].(issuing.IssuerID).String() areDefault := ourID == defaultID for _, usage := range []string{"read-only", "crl-signing", "issuing-certificates", "issuing-certificates,crl-signing"} { diff --git a/builtin/logical/pki/chain_util.go b/builtin/logical/pki/chain_util.go index e884f075588e..594319f7adae 100644 --- a/builtin/logical/pki/chain_util.go +++ b/builtin/logical/pki/chain_util.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -9,10 +9,11 @@ import ( "fmt" "sort" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/helper/errutil" ) -func prettyIssuer(issuerIdEntryMap map[issuerID]*issuerEntry, issuer issuerID) string { +func prettyIssuer(issuerIdEntryMap map[issuing.IssuerID]*issuing.IssuerEntry, issuer issuing.IssuerID) string { if entry, ok := issuerIdEntryMap[issuer]; ok && len(entry.Name) > 0 { return "[id:" + string(issuer) + "/name:" + entry.Name + "]" } @@ -20,7 +21,7 @@ func prettyIssuer(issuerIdEntryMap map[issuerID]*issuerEntry, issuer issuerID) s return "[" + string(issuer) + "]" } -func (sc *storageContext) rebuildIssuersChains(referenceCert *issuerEntry /* optional */) error { +func (sc *storageContext) rebuildIssuersChains(referenceCert *issuing.IssuerEntry /* optional */) error { // This function rebuilds the CAChain field of all known issuers. This // function should usually be invoked when a new issuer is added to the // pool of issuers. @@ -116,22 +117,22 @@ func (sc *storageContext) rebuildIssuersChains(referenceCert *issuerEntry /* opt // fourth maps that certificate back to the other issuers with that // subject (note the keyword _other_: we'll exclude self-loops here) -- // either via a parent or child relationship. - issuerIdEntryMap := make(map[issuerID]*issuerEntry, len(issuers)) - issuerIdCertMap := make(map[issuerID]*x509.Certificate, len(issuers)) - issuerIdParentsMap := make(map[issuerID][]issuerID, len(issuers)) - issuerIdChildrenMap := make(map[issuerID][]issuerID, len(issuers)) + issuerIdEntryMap := make(map[issuing.IssuerID]*issuing.IssuerEntry, len(issuers)) + issuerIdCertMap := make(map[issuing.IssuerID]*x509.Certificate, len(issuers)) + issuerIdParentsMap := make(map[issuing.IssuerID][]issuing.IssuerID, len(issuers)) + issuerIdChildrenMap := make(map[issuing.IssuerID][]issuing.IssuerID, len(issuers)) // For every known issuer, we map that subject back to the id of issuers - // containing that subject. This lets us build our issuerID -> parents + // containing that subject. This lets us build our IssuerID -> parents // mapping efficiently. Worst case we'll have a single linear chain where // every entry has a distinct subject. - subjectIssuerIdsMap := make(map[string][]issuerID, len(issuers)) + subjectIssuerIdsMap := make(map[string][]issuing.IssuerID, len(issuers)) // First, read every issuer entry from storage. We'll propagate entries // to three of the maps here: all but issuerIdParentsMap and // issuerIdChildrenMap, which we'll do in a second pass. for _, identifier := range issuers { - var stored *issuerEntry + var stored *issuing.IssuerEntry // When the reference issuer is provided and matches this identifier, // prefer the updated reference copy instead. @@ -261,8 +262,8 @@ func (sc *storageContext) rebuildIssuersChains(referenceCert *issuerEntry /* opt // manually building their chain prior to starting the topographical sort. // // This thus runs in O(|V| + |E|) -> O(n^2) in the number of issuers. - processedIssuers := make(map[issuerID]bool, len(issuers)) - toVisit := make([]issuerID, 0, len(issuers)) + processedIssuers := make(map[issuing.IssuerID]bool, len(issuers)) + toVisit := make([]issuing.IssuerID, 0, len(issuers)) // Handle any explicitly constructed certificate chains. Here, we don't // validate much what the user provides; if they provide since-deleted @@ -323,7 +324,7 @@ func (sc *storageContext) rebuildIssuersChains(referenceCert *issuerEntry /* opt // ensure we don't accidentally infinite-loop (if we introduce a bug). maxVisitCount := len(issuers)*len(issuers)*len(issuers) + 100 for len(toVisit) > 0 && maxVisitCount >= 0 { - var issuer issuerID + var issuer issuing.IssuerID issuer, toVisit = toVisit[0], toVisit[1:] // If (and only if) we're presently starved for next nodes to visit, @@ -387,8 +388,8 @@ func (sc *storageContext) rebuildIssuersChains(referenceCert *issuerEntry /* opt // However, if you directly step onto the cross-signed, now you're // taken in an alternative direction (via its chain), and must // revisit any roots later. - var roots []issuerID - var intermediates []issuerID + var roots []issuing.IssuerID + var intermediates []issuing.IssuerID for _, parentCertId := range parentCerts { if bytes.Equal(issuerIdCertMap[parentCertId].RawSubject, issuerIdCertMap[parentCertId].RawIssuer) { roots = append(roots, parentCertId) @@ -470,7 +471,7 @@ func (sc *storageContext) rebuildIssuersChains(referenceCert *issuerEntry /* opt return nil } -func addToChainIfNotExisting(includedParentCerts map[string]bool, entry *issuerEntry, certToAdd string) { +func addToChainIfNotExisting(includedParentCerts map[string]bool, entry *issuing.IssuerEntry, certToAdd string) { included, ok := includedParentCerts[certToAdd] if ok && included { return @@ -481,15 +482,15 @@ func addToChainIfNotExisting(includedParentCerts map[string]bool, entry *issuerE } func processAnyCliqueOrCycle( - issuers []issuerID, - processedIssuers map[issuerID]bool, - toVisit []issuerID, - issuerIdEntryMap map[issuerID]*issuerEntry, - issuerIdCertMap map[issuerID]*x509.Certificate, - issuerIdParentsMap map[issuerID][]issuerID, - issuerIdChildrenMap map[issuerID][]issuerID, - subjectIssuerIdsMap map[string][]issuerID, -) ([]issuerID /* toVisit */, error) { + issuers []issuing.IssuerID, + processedIssuers map[issuing.IssuerID]bool, + toVisit []issuing.IssuerID, + issuerIdEntryMap map[issuing.IssuerID]*issuing.IssuerEntry, + issuerIdCertMap map[issuing.IssuerID]*x509.Certificate, + issuerIdParentsMap map[issuing.IssuerID][]issuing.IssuerID, + issuerIdChildrenMap map[issuing.IssuerID][]issuing.IssuerID, + subjectIssuerIdsMap map[string][]issuing.IssuerID, +) ([]issuing.IssuerID /* toVisit */, error) { // Topological sort really only works on directed acyclic graphs (DAGs). // But a pool of arbitrary (issuer) certificates are actually neither! // This pool could contain both cliques and cycles. Because this could @@ -550,15 +551,15 @@ func processAnyCliqueOrCycle( // Finally -- it isn't enough to consider this chain in isolation // either. We need to consider _all_ parents and ensure they've been // processed before processing this closure. - var cliques [][]issuerID - var cycles [][]issuerID - closure := make(map[issuerID]bool) + var cliques [][]issuing.IssuerID + var cycles [][]issuing.IssuerID + closure := make(map[issuing.IssuerID]bool) - var cliquesToProcess []issuerID + var cliquesToProcess []issuing.IssuerID cliquesToProcess = append(cliquesToProcess, issuer) for len(cliquesToProcess) > 0 { - var node issuerID + var node issuing.IssuerID node, cliquesToProcess = cliquesToProcess[0], cliquesToProcess[1:] // Skip potential clique nodes which have already been processed @@ -753,7 +754,7 @@ func processAnyCliqueOrCycle( return nil, err } - closure := make(map[issuerID]bool) + closure := make(map[issuing.IssuerID]bool) for _, cycle := range cycles { for _, node := range cycle { closure[node] = true @@ -811,14 +812,14 @@ func processAnyCliqueOrCycle( } func findAllCliques( - processedIssuers map[issuerID]bool, - issuerIdCertMap map[issuerID]*x509.Certificate, - subjectIssuerIdsMap map[string][]issuerID, - issuers []issuerID, -) ([][]issuerID, map[issuerID]int, []issuerID, error) { - var allCliques [][]issuerID - issuerIdCliqueMap := make(map[issuerID]int) - var allCliqueNodes []issuerID + processedIssuers map[issuing.IssuerID]bool, + issuerIdCertMap map[issuing.IssuerID]*x509.Certificate, + subjectIssuerIdsMap map[string][]issuing.IssuerID, + issuers []issuing.IssuerID, +) ([][]issuing.IssuerID, map[issuing.IssuerID]int, []issuing.IssuerID, error) { + var allCliques [][]issuing.IssuerID + issuerIdCliqueMap := make(map[issuing.IssuerID]int) + var allCliqueNodes []issuing.IssuerID for _, node := range issuers { // Check if the node has already been visited... @@ -859,11 +860,11 @@ func findAllCliques( } func isOnReissuedClique( - processedIssuers map[issuerID]bool, - issuerIdCertMap map[issuerID]*x509.Certificate, - subjectIssuerIdsMap map[string][]issuerID, - node issuerID, -) ([]issuerID, error) { + processedIssuers map[issuing.IssuerID]bool, + issuerIdCertMap map[issuing.IssuerID]*x509.Certificate, + subjectIssuerIdsMap map[string][]issuing.IssuerID, + node issuing.IssuerID, +) ([]issuing.IssuerID, error) { // Finding max cliques in arbitrary graphs is a nearly pathological // problem, usually left to the realm of SAT solvers and NP-Complete // theoretical. @@ -891,7 +892,7 @@ func isOnReissuedClique( // under this reissued clique detection code). // // What does this mean for our algorithm? A simple greedy search is - // sufficient. If we index our certificates by subject -> issuerID + // sufficient. If we index our certificates by subject -> IssuerID // (and cache its value across calls, which we've already done for // building the parent/child relationship), we can find all other issuers // with the same public key and subject as the existing node fairly @@ -925,7 +926,7 @@ func isOnReissuedClique( // condition (the subject half), so validate they match the other half // (the issuer half) and the second condition. For node (which is // included in candidates), the condition should vacuously hold. - var clique []issuerID + var clique []issuing.IssuerID for _, candidate := range candidates { // Skip already processed nodes, even if they could be clique // candidates. We'll treat them as any other (already processed) @@ -957,7 +958,7 @@ func isOnReissuedClique( return clique, nil } -func containsIssuer(collection []issuerID, target issuerID) bool { +func containsIssuer(collection []issuing.IssuerID, target issuing.IssuerID) bool { if len(collection) == 0 { return false } @@ -971,7 +972,7 @@ func containsIssuer(collection []issuerID, target issuerID) bool { return false } -func appendCycleIfNotExisting(knownCycles [][]issuerID, candidate []issuerID) [][]issuerID { +func appendCycleIfNotExisting(knownCycles [][]issuing.IssuerID, candidate []issuing.IssuerID) [][]issuing.IssuerID { // There's two ways to do cycle detection: canonicalize the cycles, // rewriting them to have the least (or max) element first or just // brute force the detection. @@ -1007,7 +1008,7 @@ func appendCycleIfNotExisting(knownCycles [][]issuerID, candidate []issuerID) [] return knownCycles } -func canonicalizeCycle(cycle []issuerID) []issuerID { +func canonicalizeCycle(cycle []issuing.IssuerID) []issuing.IssuerID { // Find the minimum value and put it at the head, keeping the relative // ordering the same. minIndex := 0 @@ -1026,11 +1027,11 @@ func canonicalizeCycle(cycle []issuerID) []issuerID { } func findCyclesNearClique( - processedIssuers map[issuerID]bool, - issuerIdCertMap map[issuerID]*x509.Certificate, - issuerIdChildrenMap map[issuerID][]issuerID, - cliqueNodes []issuerID, -) ([][]issuerID, error) { + processedIssuers map[issuing.IssuerID]bool, + issuerIdCertMap map[issuing.IssuerID]*x509.Certificate, + issuerIdChildrenMap map[issuing.IssuerID][]issuing.IssuerID, + cliqueNodes []issuing.IssuerID, +) ([][]issuing.IssuerID, error) { // When we have a reissued clique, we need to find all cycles next to it. // Presumably, because they all have non-empty parents, they should not // have been visited yet. We further know that (because we're exploring @@ -1046,7 +1047,7 @@ func findCyclesNearClique( // Copy the clique nodes as excluded nodes; we'll avoid exploring cycles // which have parents that have been already explored. excludeNodes := cliqueNodes[:] - var knownCycles [][]issuerID + var knownCycles [][]issuing.IssuerID // We know the node has at least one child, since the clique is non-empty. for _, child := range issuerIdChildrenMap[cliqueNode] { @@ -1081,12 +1082,12 @@ func findCyclesNearClique( } func findAllCyclesWithNode( - processedIssuers map[issuerID]bool, - issuerIdCertMap map[issuerID]*x509.Certificate, - issuerIdChildrenMap map[issuerID][]issuerID, - source issuerID, - exclude []issuerID, -) ([][]issuerID, error) { + processedIssuers map[issuing.IssuerID]bool, + issuerIdCertMap map[issuing.IssuerID]*x509.Certificate, + issuerIdChildrenMap map[issuing.IssuerID][]issuing.IssuerID, + source issuing.IssuerID, + exclude []issuing.IssuerID, +) ([][]issuing.IssuerID, error) { // We wish to find all cycles involving this particular node and report // the corresponding paths. This is a full-graph traversal (excluding // certain paths) as we're not just checking if a cycle occurred, but @@ -1096,28 +1097,28 @@ func findAllCyclesWithNode( maxCycleSize := 8 // Whether we've visited any given node. - cycleVisited := make(map[issuerID]bool) - visitCounts := make(map[issuerID]int) - parentCounts := make(map[issuerID]map[issuerID]bool) + cycleVisited := make(map[issuing.IssuerID]bool) + visitCounts := make(map[issuing.IssuerID]int) + parentCounts := make(map[issuing.IssuerID]map[issuing.IssuerID]bool) // Paths to the specified node. Some of these might be cycles. - pathsTo := make(map[issuerID][][]issuerID) + pathsTo := make(map[issuing.IssuerID][][]issuing.IssuerID) // Nodes to visit. - var visitQueue []issuerID + var visitQueue []issuing.IssuerID // Add the source node to start. In order to set up the paths to a // given node, we seed pathsTo with the single path involving just // this node visitQueue = append(visitQueue, source) - pathsTo[source] = [][]issuerID{{source}} + pathsTo[source] = [][]issuing.IssuerID{{source}} // Begin building paths. // // Loop invariant: // pathTo[x] contains valid paths to reach this node, from source. for len(visitQueue) > 0 { - var current issuerID + var current issuing.IssuerID current, visitQueue = visitQueue[0], visitQueue[1:] // If we've already processed this node, we have a cycle. Skip this @@ -1162,7 +1163,7 @@ func findAllCyclesWithNode( // Track this parent->child relationship to know when to exit. setOfParents, ok := parentCounts[child] if !ok { - setOfParents = make(map[issuerID]bool) + setOfParents = make(map[issuing.IssuerID]bool) parentCounts[child] = setOfParents } _, existingParent := setOfParents[current] @@ -1179,7 +1180,7 @@ func findAllCyclesWithNode( // externally with an existing path). addedPath := false if _, ok := pathsTo[child]; !ok { - pathsTo[child] = make([][]issuerID, 0) + pathsTo[child] = make([][]issuing.IssuerID, 0) } for _, path := range pathsTo[current] { @@ -1204,7 +1205,7 @@ func findAllCyclesWithNode( return nil, errutil.InternalError{Err: fmt.Sprintf("Error updating certificate path: path of length %d is too long", len(path))} } // Make sure to deep copy the path. - newPath := make([]issuerID, 0, len(path)+1) + newPath := make([]issuing.IssuerID, 0, len(path)+1) newPath = append(newPath, path...) newPath = append(newPath, child) @@ -1249,7 +1250,7 @@ func findAllCyclesWithNode( // Ok, we've now exited from our loop. Any cycles would've been detected // and their paths recorded in pathsTo. Now we can iterate over these // (starting a source), clean them up and validate them. - var cycles [][]issuerID + var cycles [][]issuing.IssuerID for _, cycle := range pathsTo[source] { // Skip the trivial cycle. if len(cycle) == 1 && cycle[0] == source { @@ -1287,8 +1288,8 @@ func findAllCyclesWithNode( return cycles, nil } -func reversedCycle(cycle []issuerID) []issuerID { - var result []issuerID +func reversedCycle(cycle []issuing.IssuerID) []issuing.IssuerID { + var result []issuing.IssuerID for index := len(cycle) - 1; index >= 0; index-- { result = append(result, cycle[index]) } @@ -1297,11 +1298,11 @@ func reversedCycle(cycle []issuerID) []issuerID { } func computeParentsFromClosure( - processedIssuers map[issuerID]bool, - issuerIdParentsMap map[issuerID][]issuerID, - closure map[issuerID]bool, -) (map[issuerID]bool, bool) { - parents := make(map[issuerID]bool) + processedIssuers map[issuing.IssuerID]bool, + issuerIdParentsMap map[issuing.IssuerID][]issuing.IssuerID, + closure map[issuing.IssuerID]bool, +) (map[issuing.IssuerID]bool, bool) { + parents := make(map[issuing.IssuerID]bool) for node := range closure { nodeParents, ok := issuerIdParentsMap[node] if !ok { @@ -1326,11 +1327,11 @@ func computeParentsFromClosure( } func addNodeCertsToEntry( - issuerIdEntryMap map[issuerID]*issuerEntry, - issuerIdChildrenMap map[issuerID][]issuerID, + issuerIdEntryMap map[issuing.IssuerID]*issuing.IssuerEntry, + issuerIdChildrenMap map[issuing.IssuerID][]issuing.IssuerID, includedParentCerts map[string]bool, - entry *issuerEntry, - issuersCollection ...[]issuerID, + entry *issuing.IssuerEntry, + issuersCollection ...[]issuing.IssuerID, ) { for _, collection := range issuersCollection { // Find a starting point into this collection such that it verifies @@ -1369,10 +1370,10 @@ func addNodeCertsToEntry( } func addParentChainsToEntry( - issuerIdEntryMap map[issuerID]*issuerEntry, + issuerIdEntryMap map[issuing.IssuerID]*issuing.IssuerEntry, includedParentCerts map[string]bool, - entry *issuerEntry, - parents map[issuerID]bool, + entry *issuing.IssuerEntry, + parents map[issuing.IssuerID]bool, ) { for parent := range parents { nodeEntry := issuerIdEntryMap[parent] diff --git a/builtin/logical/pki/cieps_util_oss.go b/builtin/logical/pki/cieps_util_oss.go new file mode 100644 index 000000000000..7efb1151eeea --- /dev/null +++ b/builtin/logical/pki/cieps_util_oss.go @@ -0,0 +1,25 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package pki + +import ( + "crypto/x509" + "fmt" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// issueAcmeCertUsingCieps based on the passed in ACME information, perform a CIEPS request/response +func issueAcmeCertUsingCieps(_ *backend, _ *acmeContext, _ *logical.Request, _ *framework.FieldData, _ *jwsCtx, _ *acmeAccount, _ *acmeOrder, _ *x509.CertificateRequest) (*certutil.ParsedCertBundle, issuing.IssuerID, error) { + return nil, "", fmt.Errorf("cieps is an enterprise only feature") +} + +func getCiepsAcmeSettings(b *backend, sc *storageContext, opts acmeWrapperOpts, config *acmeConfigEntry, data *framework.FieldData) (bool, string, error) { + return false, "", nil +} diff --git a/builtin/logical/pki/cmd/pki/main.go b/builtin/logical/pki/cmd/pki/main.go index 7c804be23713..49bbe146e750 100644 --- a/builtin/logical/pki/cmd/pki/main.go +++ b/builtin/logical/pki/cmd/pki/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/builtin/logical/pki/cmpv2_util_oss.go b/builtin/logical/pki/cmpv2_util_oss.go new file mode 100644 index 000000000000..9e7574795d34 --- /dev/null +++ b/builtin/logical/pki/cmpv2_util_oss.go @@ -0,0 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package pki + +import ( + "context" + + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) doTidyCMPV2NonceStore(_ context.Context, _ logical.Storage) error { + return nil +} diff --git a/builtin/logical/pki/config_util.go b/builtin/logical/pki/config_util.go index 80814550c753..87feab78cadd 100644 --- a/builtin/logical/pki/config_util.go +++ b/builtin/logical/pki/config_util.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki import ( - "fmt" "strings" - "time" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" ) func (sc *storageContext) isDefaultKeySet() (bool, error) { @@ -27,14 +27,14 @@ func (sc *storageContext) isDefaultIssuerSet() (bool, error) { return strings.TrimSpace(config.DefaultIssuerId.String()) != "", nil } -func (sc *storageContext) updateDefaultKeyId(id keyID) error { +func (sc *storageContext) updateDefaultKeyId(id issuing.KeyID) error { config, err := sc.getKeysConfig() if err != nil { return err } if config.DefaultKeyId != id { - return sc.setKeysConfig(&keyConfigEntry{ + return sc.setKeysConfig(&issuing.KeyConfigEntry{ DefaultKeyId: id, }) } @@ -42,7 +42,7 @@ func (sc *storageContext) updateDefaultKeyId(id keyID) error { return nil } -func (sc *storageContext) updateDefaultIssuerId(id issuerID) error { +func (sc *storageContext) updateDefaultIssuerId(id issuing.IssuerID) error { config, err := sc.getIssuersConfig() if err != nil { return err @@ -55,67 +55,3 @@ func (sc *storageContext) updateDefaultIssuerId(id issuerID) error { return nil } - -func (sc *storageContext) changeDefaultIssuerTimestamps(oldDefault issuerID, newDefault issuerID) error { - if newDefault == oldDefault { - return nil - } - - now := time.Now().UTC() - - // When the default issuer changes, we need to modify four - // pieces of information: - // - // 1. The old default issuer's modification time, as it no - // longer works for the /cert/ca path. - // 2. The new default issuer's modification time, as it now - // works for the /cert/ca path. - // 3. & 4. Both issuer's CRLs, as they behave the same, under - // the /cert/crl path! - for _, thisId := range []issuerID{oldDefault, newDefault} { - if len(thisId) == 0 { - continue - } - - // 1 & 2 above. - issuer, err := sc.fetchIssuerById(thisId) - if err != nil { - // Due to the lack of transactions, if we deleted the default - // issuer (successfully), but the subsequent issuer config write - // (to clear the default issuer's old id) failed, we might have - // an inconsistent config. If we later hit this loop (and flush - // these timestamps again -- perhaps because the operator - // selected a new default), we'd have erred out here, because - // the since-deleted default issuer doesn't exist. In this case, - // skip the issuer instead of bailing. - err := fmt.Errorf("unable to update issuer (%v)'s modification time: error fetching issuer: %w", thisId, err) - if strings.Contains(err.Error(), "does not exist") { - sc.Backend.Logger().Warn(err.Error()) - continue - } - - return err - } - - issuer.LastModified = now - err = sc.writeIssuer(issuer) - if err != nil { - return fmt.Errorf("unable to update issuer (%v)'s modification time: error persisting issuer: %w", thisId, err) - } - } - - // Fetch and update the internalCRLConfigEntry (3&4). - cfg, err := sc.getLocalCRLConfig() - if err != nil { - return fmt.Errorf("unable to update local CRL config's modification time: error fetching local CRL config: %w", err) - } - - cfg.LastModified = now - cfg.DeltaLastModified = now - err = sc.setLocalCRLConfig(cfg) - if err != nil { - return fmt.Errorf("unable to update local CRL config's modification time: error persisting local CRL config: %w", err) - } - - return nil -} diff --git a/builtin/logical/pki/crl_test.go b/builtin/logical/pki/crl_test.go index a494a7b2c255..3c7848db78de 100644 --- a/builtin/logical/pki/crl_test.go +++ b/builtin/logical/pki/crl_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -12,13 +12,16 @@ import ( "testing" "time" - "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" - + "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/pki_backend" + "github.com/hashicorp/vault/builtin/logical/pki/revocation" + "github.com/hashicorp/vault/helper/constants" vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" - "github.com/stretchr/testify/require" ) @@ -60,10 +63,10 @@ func TestBackend_CRLConfigUpdate(t *testing.T) { require.Equal(t, "24h", resp.Data["expiry"]) require.Equal(t, false, resp.Data["disable"]) - require.Equal(t, defaultCrlConfig.OcspDisable, resp.Data["ocsp_disable"]) - require.Equal(t, defaultCrlConfig.OcspExpiry, resp.Data["ocsp_expiry"]) - require.Equal(t, defaultCrlConfig.AutoRebuild, resp.Data["auto_rebuild"]) - require.Equal(t, defaultCrlConfig.AutoRebuildGracePeriod, resp.Data["auto_rebuild_grace_period"]) + require.Equal(t, pki_backend.DefaultCrlConfig.OcspDisable, resp.Data["ocsp_disable"]) + require.Equal(t, pki_backend.DefaultCrlConfig.OcspExpiry, resp.Data["ocsp_expiry"]) + require.Equal(t, pki_backend.DefaultCrlConfig.AutoRebuild, resp.Data["auto_rebuild"]) + require.Equal(t, pki_backend.DefaultCrlConfig.AutoRebuildGracePeriod, resp.Data["auto_rebuild_grace_period"]) } func TestBackend_CRLConfig(t *testing.T) { @@ -415,15 +418,18 @@ func TestCrlRebuilder(t *testing.T) { cb := newCRLBuilder(true /* can rebuild and write CRLs */) // Force an initial build - err = cb.rebuild(sc, true) + warnings, err := cb.Rebuild(sc, true) require.NoError(t, err, "Failed to rebuild CRL") + require.Empty(t, warnings, "unexpectedly got warnings rebuilding CRL") resp := requestCrlFromBackend(t, s, b) crl1 := parseCrlPemBytes(t, resp.Data["http_raw_body"].([]byte)) // We shouldn't rebuild within this call. - err = cb.rebuildIfForced(sc) + warnings, err = cb.RebuildIfForced(sc) require.NoError(t, err, "Failed to rebuild if forced CRL") + require.Empty(t, warnings, "unexpectedly got warnings rebuilding CRL") + resp = requestCrlFromBackend(t, s, b) crl2 := parseCrlPemBytes(t, resp.Data["http_raw_body"].([]byte)) require.Equal(t, crl1.ThisUpdate, crl2.ThisUpdate, "According to the update field, we rebuilt the CRL") @@ -439,8 +445,9 @@ func TestCrlRebuilder(t *testing.T) { // This should rebuild the CRL cb.requestRebuildIfActiveNode(b) - err = cb.rebuildIfForced(sc) + warnings, err = cb.RebuildIfForced(sc) require.NoError(t, err, "Failed to rebuild if forced CRL") + require.Empty(t, warnings, "unexpectedly got warnings rebuilding CRL") resp = requestCrlFromBackend(t, s, b) schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("crl/pem"), logical.ReadOperation), resp, true) @@ -969,13 +976,13 @@ func TestAutoRebuild(t *testing.T) { require.NoError(t, err) require.NotNil(t, resp) require.NotNil(t, resp.Data) - require.Equal(t, resp.Data["expiry"], defaultCrlConfig.Expiry) - require.Equal(t, resp.Data["disable"], defaultCrlConfig.Disable) - require.Equal(t, resp.Data["ocsp_disable"], defaultCrlConfig.OcspDisable) - require.Equal(t, resp.Data["auto_rebuild"], defaultCrlConfig.AutoRebuild) - require.Equal(t, resp.Data["auto_rebuild_grace_period"], defaultCrlConfig.AutoRebuildGracePeriod) - require.Equal(t, resp.Data["enable_delta"], defaultCrlConfig.EnableDelta) - require.Equal(t, resp.Data["delta_rebuild_interval"], defaultCrlConfig.DeltaRebuildInterval) + require.Equal(t, resp.Data["expiry"], pki_backend.DefaultCrlConfig.Expiry) + require.Equal(t, resp.Data["disable"], pki_backend.DefaultCrlConfig.Disable) + require.Equal(t, resp.Data["ocsp_disable"], pki_backend.DefaultCrlConfig.OcspDisable) + require.Equal(t, resp.Data["auto_rebuild"], pki_backend.DefaultCrlConfig.AutoRebuild) + require.Equal(t, resp.Data["auto_rebuild_grace_period"], pki_backend.DefaultCrlConfig.AutoRebuildGracePeriod) + require.Equal(t, resp.Data["enable_delta"], pki_backend.DefaultCrlConfig.EnableDelta) + require.Equal(t, resp.Data["delta_rebuild_interval"], pki_backend.DefaultCrlConfig.DeltaRebuildInterval) // Safety guard: we play with rebuild timing below. _, err = client.Logical().Write("pki/config/crl", map[string]interface{}{ @@ -1040,13 +1047,7 @@ func TestAutoRebuild(t *testing.T) { // each revocation. Pull the storage from the cluster (via the sys/raw // endpoint which requires the mount UUID) and verify the revInfo contains // a matching issuer. - resp, err = client.Logical().Read("sys/mounts/pki") - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["uuid"]) - pkiMount := resp.Data["uuid"].(string) - require.NotEmpty(t, pkiMount) + pkiMount := findStorageMountUuid(t, client, "pki") revEntryPath := "logical/" + pkiMount + "/" + revokedPath + normalizeSerial(newLeafSerial) // storage from cluster.Core[0] is a physical storage copy, not a logical @@ -1060,17 +1061,17 @@ func TestAutoRebuild(t *testing.T) { require.NotNil(t, resp.Data) require.NotEmpty(t, resp.Data["value"]) revEntryValue := resp.Data["value"].(string) - var revInfo revocationInfo + var revInfo revocation.RevocationInfo err = json.Unmarshal([]byte(revEntryValue), &revInfo) require.NoError(t, err) - require.Equal(t, revInfo.CertificateIssuer, issuerID(rootIssuer)) + require.Equal(t, revInfo.CertificateIssuer, issuing.IssuerID(rootIssuer)) // New serial should not appear on CRL. crl = getCrlCertificateList(t, client, "pki") thisCRLNumber := getCRLNumber(t, crl) requireSerialNumberInCRL(t, crl, leafSerial) // But the old one should. now := time.Now() - graceInterval, _ := time.ParseDuration(gracePeriod) + graceInterval, _ := parseutil.ParseDurationSecond(gracePeriod) expectedUpdate := lastCRLExpiry.Add(-1 * graceInterval) if requireSerialNumberInCRL(nil, crl, newLeafSerial) { // If we somehow lagged and we ended up needing to rebuild @@ -1174,6 +1175,17 @@ func TestAutoRebuild(t *testing.T) { requireSerialNumberInCRL(t, crl, newLeafSerial) } +func findStorageMountUuid(t *testing.T, client *api.Client, mount string) string { + resp, err := client.Logical().Read("sys/mounts/" + mount) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["uuid"]) + pkiMount := resp.Data["uuid"].(string) + require.NotEmpty(t, pkiMount) + return pkiMount +} + func TestTidyIssuerAssociation(t *testing.T) { t.Parallel() @@ -1190,7 +1202,7 @@ func TestTidyIssuerAssociation(t *testing.T) { require.NotEmpty(t, resp.Data["certificate"]) require.NotEmpty(t, resp.Data["issuer_id"]) rootCert := resp.Data["certificate"].(string) - rootID := resp.Data["issuer_id"].(issuerID) + rootID := resp.Data["issuer_id"].(issuing.IssuerID) // Create a role for issuance. _, err = CBWrite(b, s, "roles/local-testing", map[string]interface{}{ @@ -1222,7 +1234,7 @@ func TestTidyIssuerAssociation(t *testing.T) { require.NotNil(t, entry) require.NotNil(t, entry.Value) - var leafInfo revocationInfo + var leafInfo revocation.RevocationInfo err = entry.DecodeJSON(&leafInfo) require.NoError(t, err) require.Equal(t, rootID, leafInfo.CertificateIssuer) @@ -1325,3 +1337,193 @@ func requestCrlFromBackend(t *testing.T, s logical.Storage, b *backend) *logical require.False(t, resp.IsError(), "crl error response: %v", resp) return resp } + +func TestCRLWarningsEmptyKeyUsage(t *testing.T) { + t.Parallel() + + b, s := CreateBackendWithStorage(t) + + // Generated using OpenSSL with a configuration lacking KeyUsage on + // the CA certificate. + cert := `-----BEGIN CERTIFICATE----- +MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADATMREwDwYDVQQDDAhyb290 +LW9sZDAeFw0yMDAxMDEwMTAxMDFaFw0yMTAxMDEwMTAxMDFaMBMxETAPBgNVBAMM +CHJvb3Qtb2xkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzqhSZxAL +PwFhCIPL1jFPq6jxp1wFgo6YNSfVI13gfaGIjfErxsQUbosmlEuTeOc50zXXN3kb +SDufy5Yi1OeSkFZRdJ78zdKzsEDIVR1ukUngVsSrt05gdNMJlh8XOPbcrJo78jYG +lRgtkkFSc/wCu+ue6JqkfKrbUY/G9WK0UM8ppHm1Ux67ZGoypyEgaqqxKHBRC4Yl +D+lAs1vP4C6cavqdUMKgAPTKmMBzlbpCuYPLHSzWh9Com3WQSqCbrlo3uH5RT3V9 +5Gjuk3mMUhY1l6fRL7wG3f+4x+DS+ICQNT0o4lnMxpIsiTh0cEHUFgY7G0iHWYPj +CIN8UDhpZIpoCQIDAQABo2UwYzAdBgNVHQ4EFgQUJlHk3PN7pfC22FGxAb0rWgQt +L4cwHwYDVR0jBBgwFoAUJlHk3PN7pfC22FGxAb0rWgQtL4cwDAYDVR0TBAUwAwEB +/zATBgNVHSUEDDAKBggrBgEFBQcDATANBgkqhkiG9w0BAQsFAAOCAQEAcaU0FbXb +FfXluBrjKfOzVKz+kvQ1CVv3xe3MBkS6wvqybBjJCFChnqCPxEe57BdSbBXNU5LZ +zCR/OqYas4Csv9+msSn9BI2FSMAmfMDTsp5/6iIQJqlJx9L8a7bjzVMGX6QJm/3x +S/EgGsMETAgewQXeu4jhI6StgJ2V/4Ofe498hYw4LAiBapJmkU/nHezWodNBZJ7h +LcLOzVj0Hu5MZplGBgJFgRqBCVVkqXA0q7tORuhNzYtNdJFpv3pZIhvVFFu3HUPf +wYQPhLye5WNtosz5xKe8X0Q9qp8g6azMTk+5Qe7u1d8MYAA2AIlGuKUvPHRruOmN +NC+gQnS7AK1lCw== +-----END CERTIFICATE-----` + privKey := `-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDOqFJnEAs/AWEI +g8vWMU+rqPGnXAWCjpg1J9UjXeB9oYiN8SvGxBRuiyaUS5N45znTNdc3eRtIO5/L +liLU55KQVlF0nvzN0rOwQMhVHW6RSeBWxKu3TmB00wmWHxc49tysmjvyNgaVGC2S +QVJz/AK7657omqR8qttRj8b1YrRQzymkebVTHrtkajKnISBqqrEocFELhiUP6UCz +W8/gLpxq+p1QwqAA9MqYwHOVukK5g8sdLNaH0KibdZBKoJuuWje4flFPdX3kaO6T +eYxSFjWXp9EvvAbd/7jH4NL4gJA1PSjiWczGkiyJOHRwQdQWBjsbSIdZg+MIg3xQ +OGlkimgJAgMBAAECggEABKmCdmXDwy+eR0ll41aoc/hzPzHRxADAiU51Pf+DrYHj +6UPcF3db+KR2Adl0ocEhqlSoHs3CIk6KC9c+wOvagBwaaVWe4WvT9vF3M4he8rMm +dv6n2xJPFcOfDz5zUSssjk5KdOvoGRv7BzYnDIvOafvmUVwPwuo92Wizddy8saf4 +Xuea0Cupz1PELPKkbXcAqb+TzbAZrwdPj1Y7vTe/KGE4+aoDqCW/sFB1E0UsMGlt +/yfGwFP48b7kdkqSpcEQW5H8+WL3TfqRcolCD9To4vo2J+1Po0S/8qPNRvkNQDDX +AypHtrXFBOWHpJgXT4rKyH+ZGJchrCRDblt9s/sNQwKBgQD7NytvYET3pWemYiX+ +MB9uc6cPuMFONvlzjA9T6dbOSi/HLaeDoW027aMUZqb7QeaQCoWcUwh13dI2SZq0 +5+l9hei4JkWjoDhbWmPe7zDuQr3UMl0CSk3egz3BSHkjAhRAuUxK0QLKGB23zWxz +k8mUWYZaZRA39C6aqMt/jbJjDwKBgQDSl+eO+DjpwPzrjPSphpF4xYo4XDje9ovK +9q4KTHye7Flc3cMCX3WZBmzdt0zbqu6gWZjJH0XbWX/+SkJBGh77XWD0FeQnU7Vk +ipoeb8zTsCVxD9EytQuXti3cqBgClcCMvLKgLOJIcNYTnygojwg3t+jboQqbtV7p +VpQfAC6jZwKBgQCxJ46x1CnOmg4l/0DbqAQCV/yP0bI//fSbz0Ff459fimF3DHL9 +GHF0MtC2Kk3HEgoNud3PB58Hv43mSrGWsZSuuCgM9LBXWz1i7rNPG05eNyK26W09 +mDihmduK2hjS3zx5CDMM76gP7EHIxEyelLGqtBdS18JAMypKVo5rPPl3cQKBgQCG +ueXLImQOr4dfDntLpSqV0BLAQcekZKhEPZJURmCHr37wGXNzpixurJyjL2w9MFqf +PRKwwJAJZ3Wp8kn2qkZd23x2Szb+LeBjJQS6Kh4o44zgixTz0r1K3qLygptxs+pO +Xz4LmQte+skKHo0rfW3tb3vKXnmR6fOBZgE23//2SwKBgHck44hoE1Ex2gDEfIq1 +04OBoS1cpuc9ge4uHEmv+8uANjzwlsYf8hY1qae513MGixRBOkxcI5xX/fYPQV9F +t3Jfh8QX85JjnGntuXuraYZJMUjpwXr3QHPx0jpvAM3Au5j6qD3biC9Vrwq9Chkg +hbiiPARizZA/Tsna/9ox1qDT +-----END PRIVATE KEY-----` + resp, err := CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": cert + "\n" + privKey, + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Warnings) + originalWarnings := resp.Warnings + + resp, err = CBRead(b, s, "crl/rotate") + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Warnings) + + // All CRL-specific warnings should've already occurred earlier on the + // import's CRL rebuild. + for _, warning := range resp.Warnings { + require.Contains(t, originalWarnings, warning) + } + + // Deleting the issuer and key should remove the warning. + _, err = CBDelete(b, s, "root") + require.NoError(t, err) + + resp, err = CBRead(b, s, "crl/rotate") + require.NoError(t, err) + require.NotNil(t, resp) + require.Empty(t, resp.Warnings) + + // Adding back just the cert shouldn't cause CRL rebuild warnings. + resp, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": cert, + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotNil(t, resp.Data["mapping"]) + require.NotEmpty(t, resp.Data["mapping"]) + require.Equal(t, len(resp.Data["mapping"].(map[string]string)), 1) + for key, value := range resp.Data["mapping"].(map[string]string) { + require.NotEmpty(t, key) + require.Empty(t, value) + } + + resp, err = CBRead(b, s, "crl/rotate") + require.NoError(t, err) + require.NotNil(t, resp) + require.Empty(t, resp.Warnings) +} + +func TestCRLIssuerRemoval(t *testing.T) { + t.Parallel() + + ctx := context.Background() + b, s := CreateBackendWithStorage(t) + + if constants.IsEnterprise { + // We don't really care about the whole cross cluster replication + // stuff, but we do want to enable unified CRLs if we can, so that + // unified CRLs get built. + _, err := CBWrite(b, s, "config/crl", map[string]interface{}{ + "cross_cluster_revocation": true, + "auto_rebuild": true, + }) + require.NoError(t, err, "failed enabling unified CRLs on enterprise") + } + + // Create a single root, configure delta CRLs, and rotate CRLs to prep a + // starting state. + _, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "Root R1", + "key_type": "ec", + }) + require.NoError(t, err) + _, err = CBWrite(b, s, "config/crl", map[string]interface{}{ + "enable_delta": true, + "auto_rebuild": true, + }) + require.NoError(t, err) + _, err = CBRead(b, s, "crl/rotate") + require.NoError(t, err) + + // List items in storage under both CRL paths so we know what is there in + // the "good" state. + crlList, err := s.List(ctx, issuing.PathCrls) + require.NoError(t, err) + require.Contains(t, crlList, "config") + require.Greater(t, len(crlList), 1) + + unifiedCRLList, err := s.List(ctx, "unified-crls/") + require.NoError(t, err) + require.Contains(t, unifiedCRLList, "config") + require.Greater(t, len(unifiedCRLList), 1) + + // Now, create a bunch of issuers, generate CRLs, and remove them. + var keyIDs []string + var issuerIDs []string + for i := 1; i <= 25; i++ { + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": fmt.Sprintf("Root X%v", i), + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + + key := string(resp.Data["key_id"].(issuing.KeyID)) + keyIDs = append(keyIDs, key) + issuer := string(resp.Data["issuer_id"].(issuing.IssuerID)) + issuerIDs = append(issuerIDs, issuer) + } + _, err = CBRead(b, s, "crl/rotate") + require.NoError(t, err) + for _, issuer := range issuerIDs { + _, err := CBDelete(b, s, "issuer/"+issuer) + require.NoError(t, err) + } + for _, key := range keyIDs { + _, err := CBDelete(b, s, "key/"+key) + require.NoError(t, err) + } + + // Finally list storage entries again to ensure they are cleaned up. + afterCRLList, err := s.List(ctx, issuing.PathCrls) + require.NoError(t, err) + for _, entry := range crlList { + require.Contains(t, afterCRLList, entry) + } + require.Equal(t, len(afterCRLList), len(crlList)) + + afterUnifiedCRLList, err := s.List(ctx, "unified-crls/") + require.NoError(t, err) + for _, entry := range unifiedCRLList { + require.Contains(t, afterUnifiedCRLList, entry) + } + require.Equal(t, len(afterUnifiedCRLList), len(unifiedCRLList)) +} diff --git a/builtin/logical/pki/crl_util.go b/builtin/logical/pki/crl_util.go index 4454dce3e45a..def00a5f11c6 100644 --- a/builtin/logical/pki/crl_util.go +++ b/builtin/logical/pki/crl_util.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -12,10 +12,14 @@ import ( "math/big" "strings" "sync" + "sync/atomic" "time" - atomic2 "go.uber.org/atomic" - + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/parsing" + "github.com/hashicorp/vault/builtin/logical/pki/pki_backend" + "github.com/hashicorp/vault/builtin/logical/pki/revocation" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/errutil" @@ -23,7 +27,7 @@ import ( ) const ( - revokedPath = "revoked/" + revokedPath = revocation.RevokedPath crossRevocationPrefix = "cross-revocation-queue/" crossRevocationPath = crossRevocationPrefix + "{{clusterId}}/" deltaWALLastBuildSerialName = "last-build-serial" @@ -37,13 +41,6 @@ const ( unifiedDeltaWALLastRevokedSerial = unifiedDeltaWALPath + deltaWALLastRevokedSerialName ) -type revocationInfo struct { - CertificateBytes []byte `json:"certificate_bytes"` - RevocationTime int64 `json:"revocation_time"` - RevocationTimeUTC time.Time `json:"revocation_time_utc"` - CertificateIssuer issuerID `json:"issuer_id"` -} - type revocationRequest struct { RequestedAt time.Time `json:"requested_at"` } @@ -81,64 +78,76 @@ type ( } ) -// crlBuilder is gatekeeper for controlling various read/write operations to the storage of the CRL. +// CrlBuilder is gatekeeper for controlling various read/write operations to the storage of the CRL. // The extra complexity arises from secondary performance clusters seeing various writes to its storage // without the actual API calls. During the storage invalidation process, we do not have the required state // to actually rebuild the CRLs, so we need to schedule it in a deferred fashion. This allows either // read or write calls to perform the operation if required, or have the flag reset upon a write operation // // The CRL builder also tracks the revocation configuration. -type crlBuilder struct { +type CrlBuilder struct { _builder sync.Mutex - forceRebuild *atomic2.Bool + forceRebuild *atomic.Bool canRebuild bool lastDeltaRebuildCheck time.Time _config sync.RWMutex - dirty *atomic2.Bool - config crlConfig + dirty *atomic.Bool + config pki_backend.CrlConfig haveInitializedConfig bool // Whether to invalidate our LastModifiedTime due to write on the // global issuance config. - invalidate *atomic2.Bool + invalidate *atomic.Bool // Global revocation queue entries get accepted by the invalidate func - // and passed to the crlBuilder for processing. - haveInitializedQueue *atomic2.Bool + // and passed to the CrlBuilder for processing. + haveInitializedQueue *atomic.Bool revQueue *revocationQueue removalQueue *revocationQueue crossQueue *revocationQueue } +var _ pki_backend.CrlBuilderType = (*CrlBuilder)(nil) + const ( _ignoreForceFlag = true _enforceForceFlag = false ) -func newCRLBuilder(canRebuild bool) *crlBuilder { - return &crlBuilder{ - forceRebuild: atomic2.NewBool(false), +func newCRLBuilder(canRebuild bool) *CrlBuilder { + builder := &CrlBuilder{ + forceRebuild: &atomic.Bool{}, canRebuild: canRebuild, // Set the last delta rebuild window to now, delaying the first delta // rebuild by the first rebuild period to give us some time on startup // to stabilize. lastDeltaRebuildCheck: time.Now(), - dirty: atomic2.NewBool(true), - config: defaultCrlConfig, - invalidate: atomic2.NewBool(false), - haveInitializedQueue: atomic2.NewBool(false), + dirty: &atomic.Bool{}, + config: pki_backend.DefaultCrlConfig, + invalidate: &atomic.Bool{}, + haveInitializedQueue: &atomic.Bool{}, revQueue: newRevocationQueue(), removalQueue: newRevocationQueue(), crossQueue: newRevocationQueue(), } + builder.dirty.Store(true) + return builder +} + +func (cb *CrlBuilder) SetLastDeltaRebuildCheckTime(t time.Time) { + cb.lastDeltaRebuildCheck = t } -func (cb *crlBuilder) markConfigDirty() { +func (cb *CrlBuilder) ShouldInvalidate() bool { + return cb.invalidate.Load() +} + +func (cb *CrlBuilder) markConfigDirty() { cb.dirty.Store(true) } -func (cb *crlBuilder) reloadConfigIfRequired(sc *storageContext) error { +func (cb *CrlBuilder) reloadConfigIfRequired(sc *storageContext) error { if cb.dirty.Load() { // Acquire a write lock. cb._config.Lock() @@ -160,7 +169,7 @@ func (cb *crlBuilder) reloadConfigIfRequired(sc *storageContext) error { if config != nil { cb.config = *config } else { - cb.config = defaultCrlConfig + cb.config = pki_backend.DefaultCrlConfig } // Updated the config; unset dirty. @@ -171,7 +180,7 @@ func (cb *crlBuilder) reloadConfigIfRequired(sc *storageContext) error { triggerChangeNotification = false // do not trigger on the initial loading of configuration. } - // Certain things need to be triggered on all server types when crlConfig is loaded. + // Certain things need to be triggered on all server types when CrlConfig is loaded. if triggerChangeNotification { cb.notifyOnConfigChange(sc, previousConfig, cb.config) } @@ -180,12 +189,12 @@ func (cb *crlBuilder) reloadConfigIfRequired(sc *storageContext) error { return nil } -func (cb *crlBuilder) notifyOnConfigChange(sc *storageContext, priorConfig crlConfig, newConfig crlConfig) { +func (cb *CrlBuilder) notifyOnConfigChange(sc *storageContext, priorConfig pki_backend.CrlConfig, newConfig pki_backend.CrlConfig) { // If you need to hook into a CRL configuration change across different server types // such as primary clusters as well as performance replicas, it is easier to do here than // in two places (API layer and in invalidateFunc) if priorConfig.UnifiedCRL != newConfig.UnifiedCRL && newConfig.UnifiedCRL { - sc.Backend.unifiedTransferStatus.forceRun() + sc.GetUnifiedTransferStatus().forceRun() } if priorConfig.UseGlobalQueue != newConfig.UseGlobalQueue && newConfig.UseGlobalQueue { @@ -193,10 +202,10 @@ func (cb *crlBuilder) notifyOnConfigChange(sc *storageContext, priorConfig crlCo } } -func (cb *crlBuilder) getConfigWithUpdate(sc *storageContext) (*crlConfig, error) { +func (cb *CrlBuilder) GetConfigWithUpdate(sc pki_backend.StorageContext) (*pki_backend.CrlConfig, error) { // Config may mutate immediately after accessing, but will be freshly // fetched if necessary. - if err := cb.reloadConfigIfRequired(sc); err != nil { + if err := cb.reloadConfigIfRequired(sc.(*storageContext)); err != nil { return nil, err } @@ -207,8 +216,43 @@ func (cb *crlBuilder) getConfigWithUpdate(sc *storageContext) (*crlConfig, error return &configCopy, nil } -func (cb *crlBuilder) checkForAutoRebuild(sc *storageContext) error { - cfg, err := cb.getConfigWithUpdate(sc) +func (cb *CrlBuilder) getConfigWithForcedUpdate(sc *storageContext) (*pki_backend.CrlConfig, error) { + cb.markConfigDirty() + return cb.GetConfigWithUpdate(sc) +} + +func (cb *CrlBuilder) writeConfig(sc *storageContext, config *pki_backend.CrlConfig) (*pki_backend.CrlConfig, error) { + cb._config.Lock() + defer cb._config.Unlock() + + if err := sc.setRevocationConfig(config); err != nil { + cb.markConfigDirty() + return nil, fmt.Errorf("failed writing CRL config: %w", err) + } + + previousConfig := cb.config + if config != nil { + cb.config = *config + } else { + cb.config = pki_backend.DefaultCrlConfig + } + + triggerChangeNotification := true + if !cb.haveInitializedConfig { + cb.haveInitializedConfig = true + triggerChangeNotification = false // do not trigger on the initial loading of configuration. + } + + // Certain things need to be triggered on all server types when CrlConfig is loaded. + if triggerChangeNotification { + cb.notifyOnConfigChange(sc, previousConfig, cb.config) + } + + return config, nil +} + +func (cb *CrlBuilder) checkForAutoRebuild(sc *storageContext) error { + cfg, err := cb.GetConfigWithUpdate(sc) if err != nil { return err } @@ -248,14 +292,14 @@ func (cb *crlBuilder) checkForAutoRebuild(sc *storageContext) error { // the grace period and act accordingly. now := time.Now() - period, err := time.ParseDuration(cfg.AutoRebuildGracePeriod) + period, err := parseutil.ParseDurationSecond(cfg.AutoRebuildGracePeriod) if err != nil { // This may occur if the duration is empty; in that case // assume the default. The default should be valid and shouldn't // error. - defaultPeriod, defaultErr := time.ParseDuration(defaultCrlConfig.AutoRebuildGracePeriod) + defaultPeriod, defaultErr := parseutil.ParseDurationSecond(pki_backend.DefaultCrlConfig.AutoRebuildGracePeriod) if defaultErr != nil { - return fmt.Errorf("error checking for auto-rebuild status: unable to parse duration from both config's grace period (%v) and default grace period (%v):\n- config: %v\n- default: %w\n", cfg.AutoRebuildGracePeriod, defaultCrlConfig.AutoRebuildGracePeriod, err, defaultErr) + return fmt.Errorf("error checking for auto-rebuild status: unable to parse duration from both config's grace period (%v) and default grace period (%v):\n- config: %v\n- default: %w\n", cfg.AutoRebuildGracePeriod, pki_backend.DefaultCrlConfig.AutoRebuildGracePeriod, err, defaultErr) } period = defaultPeriod @@ -272,14 +316,14 @@ func (cb *crlBuilder) checkForAutoRebuild(sc *storageContext) error { } // Mark the internal LastModifiedTime tracker invalid. -func (cb *crlBuilder) invalidateCRLBuildTime() { +func (cb *CrlBuilder) invalidateCRLBuildTime() { cb.invalidate.Store(true) } // Update the config to mark the modified CRL. See note in // updateDefaultIssuerId about why this is necessary. -func (cb *crlBuilder) flushCRLBuildTimeInvalidation(sc *storageContext) error { - if cb.invalidate.CAS(true, false) { +func (cb *CrlBuilder) flushCRLBuildTimeInvalidation(sc *storageContext) error { + if cb.invalidate.CompareAndSwap(true, false) { // Flush out our invalidation. cfg, err := sc.getLocalCRLConfig() if err != nil { @@ -299,23 +343,23 @@ func (cb *crlBuilder) flushCRLBuildTimeInvalidation(sc *storageContext) error { return nil } -// rebuildIfForced is to be called by readers or periodic functions that might need to trigger +// RebuildIfForced is to be called by readers or periodic functions that might need to trigger // a refresh of the CRL before the read occurs. -func (cb *crlBuilder) rebuildIfForced(sc *storageContext) error { +func (cb *CrlBuilder) RebuildIfForced(sc pki_backend.StorageContext) ([]string, error) { if cb.forceRebuild.Load() { - return cb._doRebuild(sc, true, _enforceForceFlag) + return cb._doRebuild(sc.(*storageContext), true, _enforceForceFlag) } - return nil + return nil, nil } // rebuild is to be called by various write apis that know the CRL is to be updated and can be now. -func (cb *crlBuilder) rebuild(sc *storageContext, forceNew bool) error { - return cb._doRebuild(sc, forceNew, _ignoreForceFlag) +func (cb *CrlBuilder) Rebuild(sc pki_backend.StorageContext, forceNew bool) ([]string, error) { + return cb._doRebuild(sc.(*storageContext), forceNew, _ignoreForceFlag) } // requestRebuildIfActiveNode will schedule a rebuild of the CRL from the next read or write api call assuming we are the active node of a cluster -func (cb *crlBuilder) requestRebuildIfActiveNode(b *backend) { +func (cb *CrlBuilder) requestRebuildIfActiveNode(b *backend) { // Only schedule us on active nodes, as the active node is the only node that can rebuild/write the CRL. // Note 1: The CRL is cluster specific, so this does need to run on the active node of a performance secondary cluster. // Note 2: This is called by the storage invalidation function, so it should not block. @@ -329,7 +373,7 @@ func (cb *crlBuilder) requestRebuildIfActiveNode(b *backend) { cb.forceRebuild.Store(true) } -func (cb *crlBuilder) _doRebuild(sc *storageContext, forceNew bool, ignoreForceFlag bool) error { +func (cb *CrlBuilder) _doRebuild(sc *storageContext, forceNew bool, ignoreForceFlag bool) ([]string, error) { cb._builder.Lock() defer cb._builder.Unlock() // Re-read the lock in case someone beat us to the punch between the previous load op. @@ -346,12 +390,12 @@ func (cb *crlBuilder) _doRebuild(sc *storageContext, forceNew bool, ignoreForceF return buildCRLs(sc, myForceNew) } - return nil + return nil, nil } -func (cb *crlBuilder) _getPresentDeltaWALForClearing(sc *storageContext, path string) ([]string, error) { +func (cb *CrlBuilder) _getPresentDeltaWALForClearing(sc pki_backend.StorageContext, path string) ([]string, error) { // Clearing of the delta WAL occurs after a new complete CRL has been built. - walSerials, err := sc.Storage.List(sc.Context, path) + walSerials, err := sc.GetStorage().List(sc.GetContext(), path) if err != nil { return nil, fmt.Errorf("error fetching list of delta WAL certificates to clear: %w", err) } @@ -362,15 +406,36 @@ func (cb *crlBuilder) _getPresentDeltaWALForClearing(sc *storageContext, path st return walSerials, nil } -func (cb *crlBuilder) getPresentLocalDeltaWALForClearing(sc *storageContext) ([]string, error) { +func (cb *CrlBuilder) GetPresentLocalDeltaWALForClearing(sc pki_backend.StorageContext) ([]string, error) { return cb._getPresentDeltaWALForClearing(sc, localDeltaWALPath) } -func (cb *crlBuilder) getPresentUnifiedDeltaWALForClearing(sc *storageContext) ([]string, error) { - return cb._getPresentDeltaWALForClearing(sc, unifiedDeltaWALPath) +func (cb *CrlBuilder) GetPresentUnifiedDeltaWALForClearing(sc pki_backend.StorageContext) ([]string, error) { + walClusters, err := sc.GetStorage().List(sc.GetContext(), unifiedDeltaWALPrefix) + if err != nil { + return nil, fmt.Errorf("error fetching list of clusters with delta WAL entries: %w", err) + } + + var allPaths []string + for index, cluster := range walClusters { + prefix := unifiedDeltaWALPrefix + cluster + clusterPaths, err := cb._getPresentDeltaWALForClearing(sc, prefix) + if err != nil { + return nil, fmt.Errorf("error fetching delta WAL entries for cluster (%v / %v): %w", index, cluster, err) + } + + // Here, we don't want to include the unifiedDeltaWALPrefix because + // ClearUnifiedDeltaWAL handles that for us. Instead, just include + // the cluster identifier. + for _, clusterPath := range clusterPaths { + allPaths = append(allPaths, cluster+clusterPath) + } + } + + return allPaths, nil } -func (cb *crlBuilder) _clearDeltaWAL(sc *storageContext, walSerials []string, path string) error { +func (cb *CrlBuilder) _clearDeltaWAL(sc pki_backend.StorageContext, walSerials []string, path string) error { // Clearing of the delta WAL occurs after a new complete CRL has been built. for _, serial := range walSerials { // Don't remove our special entries! @@ -378,7 +443,7 @@ func (cb *crlBuilder) _clearDeltaWAL(sc *storageContext, walSerials []string, pa continue } - if err := sc.Storage.Delete(sc.Context, path+serial); err != nil { + if err := sc.GetStorage().Delete(sc.GetContext(), path+serial); err != nil { return fmt.Errorf("error clearing delta WAL certificate: %w", err) } } @@ -386,15 +451,15 @@ func (cb *crlBuilder) _clearDeltaWAL(sc *storageContext, walSerials []string, pa return nil } -func (cb *crlBuilder) clearLocalDeltaWAL(sc *storageContext, walSerials []string) error { +func (cb *CrlBuilder) ClearLocalDeltaWAL(sc pki_backend.StorageContext, walSerials []string) error { return cb._clearDeltaWAL(sc, walSerials, localDeltaWALPath) } -func (cb *crlBuilder) clearUnifiedDeltaWAL(sc *storageContext, walSerials []string) error { +func (cb *CrlBuilder) ClearUnifiedDeltaWAL(sc pki_backend.StorageContext, walSerials []string) error { return cb._clearDeltaWAL(sc, walSerials, unifiedDeltaWALPrefix) } -func (cb *crlBuilder) rebuildDeltaCRLsIfForced(sc *storageContext, override bool) error { +func (cb *CrlBuilder) rebuildDeltaCRLsIfForced(sc *storageContext, override bool) ([]string, error) { // Delta CRLs use the same expiry duration as the complete CRL. Because // we always rebuild the complete CRL and then the delta CRL, we can // be assured that the delta CRL always expires after a complete CRL, @@ -404,20 +469,20 @@ func (cb *crlBuilder) rebuildDeltaCRLsIfForced(sc *storageContext, override bool // This guarantee means we can avoid checking delta CRL expiry. Thus, // we only need to rebuild the delta CRL when we have new revocations, // within our time window for updating it. - cfg, err := cb.getConfigWithUpdate(sc) + cfg, err := cb.GetConfigWithUpdate(sc) if err != nil { - return err + return nil, err } if !cfg.EnableDelta { // We explicitly do not update the last check time here, as we // want to persist the last rebuild window if it hasn't been set. - return nil + return nil, nil } - deltaRebuildDuration, err := time.ParseDuration(cfg.DeltaRebuildInterval) + deltaRebuildDuration, err := parseutil.ParseDurationSecond(cfg.DeltaRebuildInterval) if err != nil { - return err + return nil, err } // Acquire CRL building locks before we get too much further. @@ -433,7 +498,7 @@ func (cb *crlBuilder) rebuildDeltaCRLsIfForced(sc *storageContext, override bool // If we're still before the time of our next rebuild check, we can // safely return here even if we have certs. We'll wait for a bit, // retrigger this check, and then do the rebuild. - return nil + return nil, nil } // Update our check time. If we bail out below (due to storage errors @@ -444,23 +509,23 @@ func (cb *crlBuilder) rebuildDeltaCRLsIfForced(sc *storageContext, override bool rebuildLocal, err := cb._shouldRebuildLocalCRLs(sc, override) if err != nil { - return err + return nil, fmt.Errorf("error determining if local CRLs should be rebuilt: %w", err) } rebuildUnified, err := cb._shouldRebuildUnifiedCRLs(sc, override) if err != nil { - return err + return nil, fmt.Errorf("error determining if unified CRLs should be rebuilt: %w", err) } if !rebuildLocal && !rebuildUnified { - return nil + return nil, nil } // Finally, we must've needed to do the rebuild. Execute! - return cb.rebuildDeltaCRLsHoldingLock(sc, false) + return cb.RebuildDeltaCRLsHoldingLock(sc, false) } -func (cb *crlBuilder) _shouldRebuildLocalCRLs(sc *storageContext, override bool) (bool, error) { +func (cb *CrlBuilder) _shouldRebuildLocalCRLs(sc *storageContext, override bool) (bool, error) { // Fetch two storage entries to see if we actually need to do this // rebuild, given we're within the window. lastWALEntry, err := sc.Storage.Get(sc.Context, localDeltaWALLastRevokedSerial) @@ -506,71 +571,92 @@ func (cb *crlBuilder) _shouldRebuildLocalCRLs(sc *storageContext, override bool) return true, nil } -func (cb *crlBuilder) _shouldRebuildUnifiedCRLs(sc *storageContext, override bool) (bool, error) { +func (cb *CrlBuilder) _shouldRebuildUnifiedCRLs(sc *storageContext, override bool) (bool, error) { // Unified CRL can only be built by the main cluster. - b := sc.Backend - if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || - (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { + sysView := sc.System() + if sysView.ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!sysView.LocalMount() && sysView.ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { return false, nil } - // Fetch two storage entries to see if we actually need to do this - // rebuild, given we're within the window. - lastWALEntry, err := sc.Storage.Get(sc.Context, unifiedDeltaWALLastRevokedSerial) - if err != nil || !override && (lastWALEntry == nil || lastWALEntry.Value == nil) { - // If this entry does not exist, we don't need to rebuild the - // delta WAL due to the expiration assumption above. There must - // not have been any new revocations. Since err should be nil - // in this case, we can safely return it. - return false, err + // If we're overriding whether we should build Delta CRLs, always return + // true, even if storage errors might've happen. + if override { + return true, nil } - lastBuildEntry, err := sc.Storage.Get(sc.Context, unifiedDeltaWALLastBuildSerial) + // Fetch two storage entries to see if we actually need to do this + // rebuild, given we're within the window. We need to fetch these + // two entries per cluster. + clusters, err := sc.Storage.List(sc.Context, unifiedDeltaWALPrefix) if err != nil { - return false, err + return false, fmt.Errorf("failed to get the list of clusters having written Delta WALs: %w", err) } - if !override && lastBuildEntry != nil && lastBuildEntry.Value != nil { - // If the last build entry doesn't exist, we still want to build a - // new delta WAL, since this could be our very first time doing so. - // + // If any cluster tells us to rebuild, we should rebuild. + shouldRebuild := false + for index, cluster := range clusters { + prefix := unifiedDeltaWALPrefix + cluster + clusterUnifiedLastRevokedWALEntry := prefix + deltaWALLastRevokedSerialName + clusterUnifiedLastBuiltWALEntry := prefix + deltaWALLastBuildSerialName + + lastWALEntry, err := sc.Storage.Get(sc.Context, clusterUnifiedLastRevokedWALEntry) + if err != nil { + return false, fmt.Errorf("failed fetching last revoked WAL entry for cluster (%v / %v): %w", index, cluster, err) + } + + if lastWALEntry == nil || lastWALEntry.Value == nil { + continue + } + + lastBuildEntry, err := sc.Storage.Get(sc.Context, clusterUnifiedLastBuiltWALEntry) + if err != nil { + return false, fmt.Errorf("failed fetching last built CRL WAL entry for cluster (%v / %v): %w", index, cluster, err) + } + + if lastBuildEntry == nil || lastBuildEntry.Value == nil { + // If the last build entry doesn't exist, we still want to build a + // new delta WAL, since this could be our very first time doing so. + shouldRebuild = true + break + } + // Otherwise, here, now that we know it exists, we want to check this // value against the other value. Since we previously guarded the WAL // entry being non-empty, we're good to decode everything within this // guard. var walInfo lastWALInfo if err := lastWALEntry.DecodeJSON(&walInfo); err != nil { - return false, err + return false, fmt.Errorf("failed decoding last revoked WAL entry for cluster (%v / %v): %w", index, cluster, err) } var deltaInfo lastDeltaInfo if err := lastBuildEntry.DecodeJSON(&deltaInfo); err != nil { - return false, err + return false, fmt.Errorf("failed decoding last built CRL WAL entry for cluster (%v / %v): %w", index, cluster, err) } - // Here, everything decoded properly and we know that no new certs - // have been revoked since we built this last delta CRL. We can exit - // without rebuilding then. - if walInfo.Serial == deltaInfo.Serial { - return false, nil + if walInfo.Serial != deltaInfo.Serial { + shouldRebuild = true + break } } - return true, nil + // No errors occurred, so return the result. + return shouldRebuild, nil } -func (cb *crlBuilder) rebuildDeltaCRLs(sc *storageContext, forceNew bool) error { +func (cb *CrlBuilder) rebuildDeltaCRLs(sc *storageContext, forceNew bool) ([]string, error) { cb._builder.Lock() defer cb._builder.Unlock() - return cb.rebuildDeltaCRLsHoldingLock(sc, forceNew) + return cb.RebuildDeltaCRLsHoldingLock(sc, forceNew) } -func (cb *crlBuilder) rebuildDeltaCRLsHoldingLock(sc *storageContext, forceNew bool) error { - return buildAnyCRLs(sc, forceNew, true /* building delta */) +func (cb *CrlBuilder) RebuildDeltaCRLsHoldingLock(sc pki_backend.StorageContext, forceNew bool) ([]string, error) { + return buildAnyCRLs(sc.(*storageContext), forceNew, true /* building delta */) } -func (cb *crlBuilder) addCertForRevocationCheck(cluster, serial string) { +func (cb *CrlBuilder) addCertForRevocationCheck(cluster, serial string) { entry := &revocationQueueEntry{ Cluster: cluster, Serial: serial, @@ -578,7 +664,7 @@ func (cb *crlBuilder) addCertForRevocationCheck(cluster, serial string) { cb.revQueue.Add(entry) } -func (cb *crlBuilder) addCertForRevocationRemoval(cluster, serial string) { +func (cb *CrlBuilder) addCertForRevocationRemoval(cluster, serial string) { entry := &revocationQueueEntry{ Cluster: cluster, Serial: serial, @@ -586,7 +672,7 @@ func (cb *crlBuilder) addCertForRevocationRemoval(cluster, serial string) { cb.removalQueue.Add(entry) } -func (cb *crlBuilder) addCertFromCrossRevocation(cluster, serial string) { +func (cb *CrlBuilder) addCertFromCrossRevocation(cluster, serial string) { entry := &revocationQueueEntry{ Cluster: cluster, Serial: serial, @@ -594,20 +680,20 @@ func (cb *crlBuilder) addCertFromCrossRevocation(cluster, serial string) { cb.crossQueue.Add(entry) } -func (cb *crlBuilder) maybeGatherQueueForFirstProcess(sc *storageContext, isNotPerfPrimary bool) error { +func (cb *CrlBuilder) maybeGatherQueueForFirstProcess(sc *storageContext, isNotPerfPrimary bool) error { // Assume holding lock. if cb.haveInitializedQueue.Load() { return nil } - sc.Backend.Logger().Debug(fmt.Sprintf("gathering first time existing revocations")) + sc.Logger().Debug(fmt.Sprintf("gathering first time existing revocations")) clusters, err := sc.Storage.List(sc.Context, crossRevocationPrefix) if err != nil { return fmt.Errorf("failed to list cross-cluster revocation queue participating clusters: %w", err) } - sc.Backend.Logger().Debug(fmt.Sprintf("found %v clusters: %v", len(clusters), clusters)) + sc.Logger().Debug(fmt.Sprintf("found %v clusters: %v", len(clusters), clusters)) for cIndex, cluster := range clusters { cluster = cluster[0 : len(cluster)-1] @@ -617,7 +703,7 @@ func (cb *crlBuilder) maybeGatherQueueForFirstProcess(sc *storageContext, isNotP return fmt.Errorf("failed to list cross-cluster revocation queue entries for cluster %v (%v): %w", cluster, cIndex, err) } - sc.Backend.Logger().Debug(fmt.Sprintf("found %v serials for cluster %v: %v", len(serials), cluster, serials)) + sc.Logger().Debug(fmt.Sprintf("found %v serials for cluster %v: %v", len(serials), cluster, serials)) for _, serial := range serials { if serial[len(serial)-1] == '/' { @@ -650,11 +736,11 @@ func (cb *crlBuilder) maybeGatherQueueForFirstProcess(sc *storageContext, isNotP return nil } -func (cb *crlBuilder) processRevocationQueue(sc *storageContext) error { - sc.Backend.Logger().Debug(fmt.Sprintf("starting to process revocation requests")) +func (cb *CrlBuilder) processRevocationQueue(sc *storageContext) error { + sc.Logger().Debug(fmt.Sprintf("starting to process revocation requests")) - isNotPerfPrimary := sc.Backend.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || - (!sc.Backend.System().LocalMount() && sc.Backend.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) + isNotPerfPrimary := sc.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!sc.System().LocalMount() && sc.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) if err := cb.maybeGatherQueueForFirstProcess(sc, isNotPerfPrimary); err != nil { return fmt.Errorf("failed to gather first queue: %w", err) @@ -663,14 +749,14 @@ func (cb *crlBuilder) processRevocationQueue(sc *storageContext) error { revQueue := cb.revQueue.Iterate() removalQueue := cb.removalQueue.Iterate() - sc.Backend.Logger().Debug(fmt.Sprintf("gathered %v revocations and %v confirmation entries", len(revQueue), len(removalQueue))) + sc.Logger().Debug(fmt.Sprintf("gathered %v revocations and %v confirmation entries", len(revQueue), len(removalQueue))) - crlConfig, err := cb.getConfigWithUpdate(sc) + crlConfig, err := cb.GetConfigWithUpdate(sc) if err != nil { return err } - ourClusterId, err := sc.Backend.System().ClusterID(sc.Context) + ourClusterId, err := sc.System().ClusterID(sc.Context) if err != nil { return fmt.Errorf("unable to fetch clusterID to ignore local revocation entries: %w", err) } @@ -712,7 +798,7 @@ func (cb *crlBuilder) processRevocationQueue(sc *storageContext) error { } if err := sc.Storage.Put(sc.Context, confirmedEntry); err != nil { - return fmt.Errorf("error persisting cross-cluster revocation confirmation: %w\nThis may occur when the active node of the primary performance replication cluster is unavailable.", err) + return fmt.Errorf("error persisting cross-cluster revocation confirmation: %w", err) } } else { // Since we're the active node of the primary cluster, go ahead @@ -732,7 +818,7 @@ func (cb *crlBuilder) processRevocationQueue(sc *storageContext) error { } if isNotPerfPrimary { - sc.Backend.Logger().Debug(fmt.Sprintf("not on perf primary so ignoring any revocation confirmations")) + sc.Logger().Debug(fmt.Sprintf("not on perf primary so ignoring any revocation confirmations")) // See note in pki/backend.go; this should be empty. cb.removalQueue.RemoveAll() @@ -767,10 +853,10 @@ func (cb *crlBuilder) processRevocationQueue(sc *storageContext) error { return nil } -func (cb *crlBuilder) processCrossClusterRevocations(sc *storageContext) error { - sc.Backend.Logger().Debug(fmt.Sprintf("starting to process unified revocations")) +func (cb *CrlBuilder) processCrossClusterRevocations(sc *storageContext) error { + sc.Logger().Debug(fmt.Sprintf("starting to process unified revocations")) - crlConfig, err := cb.getConfigWithUpdate(sc) + crlConfig, err := cb.GetConfigWithUpdate(sc) if err != nil { return err } @@ -781,9 +867,9 @@ func (cb *crlBuilder) processCrossClusterRevocations(sc *storageContext) error { } crossQueue := cb.crossQueue.Iterate() - sc.Backend.Logger().Debug(fmt.Sprintf("gathered %v unified revocations entries", len(crossQueue))) + sc.Logger().Debug(fmt.Sprintf("gathered %v unified revocations entries", len(crossQueue))) - ourClusterId, err := sc.Backend.System().ClusterID(sc.Context) + ourClusterId, err := sc.System().ClusterID(sc.Context) if err != nil { return fmt.Errorf("unable to fetch clusterID to ignore local unified revocation entries: %w", err) } @@ -829,58 +915,14 @@ func (cb *crlBuilder) processCrossClusterRevocations(sc *storageContext) error { return nil } -// Helper function to fetch a map of issuerID->parsed cert for revocation -// usage. Unlike other paths, this needs to handle the legacy bundle -// more gracefully than rejecting it outright. -func fetchIssuerMapForRevocationChecking(sc *storageContext) (map[issuerID]*x509.Certificate, error) { - var err error - var issuers []issuerID - - if !sc.Backend.useLegacyBundleCaStorage() { - issuers, err = sc.listIssuers() - if err != nil { - return nil, fmt.Errorf("could not fetch issuers list: %w", err) - } - } else { - // Hack: this isn't a real issuerID, but it works for fetchCAInfo - // since it resolves the reference. - issuers = []issuerID{legacyBundleShimID} - } - - issuerIDCertMap := make(map[issuerID]*x509.Certificate, len(issuers)) - for _, issuer := range issuers { - _, bundle, caErr := sc.fetchCertBundleByIssuerId(issuer, false) - if caErr != nil { - return nil, fmt.Errorf("error fetching CA certificate for issuer id %v: %w", issuer, caErr) - } - - if bundle == nil { - return nil, fmt.Errorf("faulty reference: %v - CA info not found", issuer) - } - - parsedBundle, err := parseCABundle(sc.Context, sc.Backend, bundle) - if err != nil { - return nil, errutil.InternalError{Err: err.Error()} - } - - if parsedBundle.Certificate == nil { - return nil, errutil.InternalError{Err: "stored CA information not able to be parsed"} - } - - issuerIDCertMap[issuer] = parsedBundle.Certificate - } - - return issuerIDCertMap, nil -} - // Revoke a certificate from a given serial number if it is present in local // storage. -func tryRevokeCertBySerial(sc *storageContext, config *crlConfig, serial string) (*logical.Response, error) { +func tryRevokeCertBySerial(sc *storageContext, config *pki_backend.CrlConfig, serial string) (*logical.Response, error) { // revokeCert requires us to hold these locks before calling it. - sc.Backend.revokeStorageLock.Lock() - defer sc.Backend.revokeStorageLock.Unlock() + sc.GetRevokeStorageLock().Lock() + defer sc.GetRevokeStorageLock().Unlock() - certEntry, err := fetchCertBySerial(sc, "certs/", serial) + certEntry, err := fetchCertBySerial(sc, issuing.PathCerts, serial) if err != nil { switch err.(type) { case errutil.UserError: @@ -903,24 +945,24 @@ func tryRevokeCertBySerial(sc *storageContext, config *crlConfig, serial string) } // Revokes a cert, and tries to be smart about error recovery -func revokeCert(sc *storageContext, config *crlConfig, cert *x509.Certificate) (*logical.Response, error) { +func revokeCert(sc *storageContext, config *pki_backend.CrlConfig, cert *x509.Certificate) (*logical.Response, error) { // As this backend is self-contained and this function does not hook into // third parties to manage users or resources, if the mount is tainted, // revocation doesn't matter anyways -- the CRL that would be written will // be immediately blown away by the view being cleared. So we can simply // fast path a successful exit. - if sc.Backend.System().Tainted() { + if sc.System().Tainted() { return nil, nil } - colonSerial := serialFromCert(cert) - hyphenSerial := normalizeSerial(colonSerial) + colonSerial := parsing.SerialFromCert(cert) + hyphenSerial := parsing.NormalizeSerialForStorage(colonSerial) // Validate that no issuers match the serial number to be revoked. We need // to gracefully degrade to the legacy cert bundle when it is required, as // secondary PR clusters might not have been upgraded, but still need to // handle revoking certs. - issuerIDCertMap, err := fetchIssuerMapForRevocationChecking(sc) + issuerIDCertMap, err := revocation.FetchIssuerMapForRevocationChecking(sc) if err != nil { return nil, err } @@ -933,7 +975,7 @@ func revokeCert(sc *storageContext, config *crlConfig, cert *x509.Certificate) ( } } - curRevInfo, err := sc.fetchRevocationInfo(colonSerial) + curRevInfo, err := fetchRevocationInfo(sc, colonSerial) if err != nil { return nil, err } @@ -960,7 +1002,7 @@ func revokeCert(sc *storageContext, config *crlConfig, cert *x509.Certificate) ( } currTime := time.Now() - revInfo := revocationInfo{ + revInfo := revocation.RevocationInfo{ CertificateBytes: cert.Raw, RevocationTime: currTime.Unix(), RevocationTimeUTC: currTime.UTC(), @@ -968,37 +1010,53 @@ func revokeCert(sc *storageContext, config *crlConfig, cert *x509.Certificate) ( // We may not find an issuer with this certificate; that's fine so // ignore the return value. - associateRevokedCertWithIsssuer(&revInfo, cert, issuerIDCertMap) + revInfo.AssociateRevokedCertWithIsssuer(cert, issuerIDCertMap) revEntry, err := logical.StorageEntryJSON(revokedPath+hyphenSerial, revInfo) if err != nil { return nil, fmt.Errorf("error creating revocation entry: %w", err) } - certsCounted := sc.Backend.certsCounted.Load() + certCounter := sc.GetCertificateCounter() + certsCounted := certCounter.IsInitialized() err = sc.Storage.Put(sc.Context, revEntry) if err != nil { return nil, fmt.Errorf("error saving revoked certificate to new location: %w", err) } - sc.Backend.ifCountEnabledIncrementTotalRevokedCertificatesCount(certsCounted, revEntry.Key) + certCounter.IncrementTotalRevokedCertificatesCount(certsCounted, revEntry.Key) + + // From here on out, the certificate has been revoked locally. Any other + // persistence issues might still err, but any other failure messages + // should be added as warnings to the revocation. + resp := &logical.Response{ + Data: map[string]interface{}{ + "revocation_time": revInfo.RevocationTime, + "revocation_time_rfc3339": revInfo.RevocationTimeUTC.Format(time.RFC3339Nano), + "state": "revoked", + }, + } // If this flag is enabled after the fact, existing local entries will be published to // the unified storage space through a periodic function. + failedWritingUnifiedCRL := false if config.UnifiedCRL { - entry := &unifiedRevocationEntry{ + entry := &revocation.UnifiedRevocationEntry{ SerialNumber: colonSerial, CertExpiration: cert.NotAfter, RevocationTimeUTC: revInfo.RevocationTimeUTC, CertificateIssuer: revInfo.CertificateIssuer, } - ignoreErr := writeUnifiedRevocationEntry(sc, entry) + ignoreErr := revocation.WriteUnifiedRevocationEntry(sc.GetContext(), sc.GetStorage(), entry) if ignoreErr != nil { // Just log the error if we fail to write across clusters, a separate background // thread will reattempt it later on as we have the local write done. - sc.Backend.Logger().Debug("Failed to write unified revocation entry, will re-attempt later", + sc.Logger().Error("Failed to write unified revocation entry, will re-attempt later", "serial_number", colonSerial, "error", ignoreErr) - sc.Backend.unifiedTransferStatus.forceRun() + sc.GetUnifiedTransferStatus().forceRun() + + resp.AddWarning(fmt.Sprintf("Failed to write unified revocation entry, will re-attempt later: %v", err)) + failedWritingUnifiedCRL = true } } @@ -1007,7 +1065,7 @@ func revokeCert(sc *storageContext, config *crlConfig, cert *x509.Certificate) ( // already rebuilt the full CRL so the Delta WAL will be cleared // afterwards. Writing an entry only to immediately remove it // isn't necessary. - crlErr := sc.Backend.crlBuilder.rebuild(sc, false) + warnings, crlErr := sc.CrlBuilder().Rebuild(sc, false) if crlErr != nil { switch crlErr.(type) { case errutil.UserError: @@ -1016,27 +1074,24 @@ func revokeCert(sc *storageContext, config *crlConfig, cert *x509.Certificate) ( return nil, fmt.Errorf("error encountered during CRL building: %w", crlErr) } } + for index, warning := range warnings { + resp.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } } else if config.EnableDelta { - if err := writeRevocationDeltaWALs(sc, config, hyphenSerial, colonSerial); err != nil { + if err := writeRevocationDeltaWALs(sc, config, resp, failedWritingUnifiedCRL, hyphenSerial, colonSerial); err != nil { return nil, fmt.Errorf("failed to write WAL entries for Delta CRLs: %w", err) } } - return &logical.Response{ - Data: map[string]interface{}{ - "revocation_time": revInfo.RevocationTime, - "revocation_time_rfc3339": revInfo.RevocationTimeUTC.Format(time.RFC3339Nano), - "state": "revoked", - }, - }, nil + return resp, nil } -func writeRevocationDeltaWALs(sc *storageContext, config *crlConfig, hyphenSerial string, colonSerial string) error { +func writeRevocationDeltaWALs(sc *storageContext, config *pki_backend.CrlConfig, resp *logical.Response, failedWritingUnifiedCRL bool, hyphenSerial string, colonSerial string) error { if err := writeSpecificRevocationDeltaWALs(sc, hyphenSerial, colonSerial, localDeltaWALPath); err != nil { return fmt.Errorf("failed to write local delta WAL entry: %w", err) } - if config.UnifiedCRL { + if config.UnifiedCRL && !failedWritingUnifiedCRL { // We only need to write cross-cluster unified Delta WAL entries when // it is enabled; in particular, because we rebuild CRLs when enabling // this flag, any revocations that happened prior to enabling unified @@ -1046,9 +1101,21 @@ func writeRevocationDeltaWALs(sc *storageContext, config *crlConfig, hyphenSeria // listing for the unified CRL rebuild, this revocation will not // appear on either the main or the next delta CRL, but will need to // wait for a subsequent complete CRL rebuild). - if err := writeSpecificRevocationDeltaWALs(sc, hyphenSerial, colonSerial, unifiedDeltaWALPath); err != nil { - return fmt.Errorf("failed to write cross-cluster delta WAL entry: %w", err) + // + // Lastly, we don't attempt this if the unified CRL entry failed to + // write, as we need that entry before the delta WAL entry will make + // sense. + if ignoredErr := writeSpecificRevocationDeltaWALs(sc, hyphenSerial, colonSerial, unifiedDeltaWALPath); ignoredErr != nil { + // Just log the error if we fail to write across clusters, a separate background + // thread will reattempt it later on as we have the local write done. + sc.Logger().Error("Failed to write cross-cluster delta WAL entry, will re-attempt later", + "serial_number", colonSerial, "error", ignoredErr) + sc.GetUnifiedTransferStatus().forceRun() + + resp.AddWarning(fmt.Sprintf("Failed to write cross-cluster delta WAL entry, will re-attempt later: %v", ignoredErr)) } + } else if failedWritingUnifiedCRL { + resp.AddWarning("Skipping cross-cluster delta WAL entry as cross-cluster revocation failed to write; will re-attempt later.") } return nil @@ -1102,11 +1169,11 @@ func writeSpecificRevocationDeltaWALs(sc *storageContext, hyphenSerial string, c return nil } -func buildCRLs(sc *storageContext, forceNew bool) error { +func buildCRLs(sc *storageContext, forceNew bool) ([]string, error) { return buildAnyCRLs(sc, forceNew, false) } -func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { +func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) ([]string, error) { // In order to build all CRLs, we need knowledge of all issuers. Any two // issuers with the same keys _and_ subject should have the same CRL since // they're functionally equivalent. @@ -1134,14 +1201,13 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { // See the message in revokedCert about rebuilding CRLs: we need to // gracefully handle revoking entries with the legacy cert bundle. var err error - var issuers []issuerID + var issuers []issuing.IssuerID var wasLegacy bool - // First, fetch an updated copy of the CRL config. We'll pass this into - // buildCRL. - globalCRLConfig, err := sc.Backend.crlBuilder.getConfigWithUpdate(sc) + // First, fetch an updated copy of the CRL config. We'll pass this into buildCRL. + globalCRLConfig, err := sc.CrlBuilder().GetConfigWithUpdate(sc) if err != nil { - return fmt.Errorf("error building CRL: while updating config: %w", err) + return nil, fmt.Errorf("error building CRL: while updating config: %w", err) } if globalCRLConfig.Disable && !forceNew { @@ -1153,19 +1219,19 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { // So, since tidy can now associate issuers on revocation entries, we // can skip the rest of this function and exit early without updating // anything. - return nil + return nil, nil } - if !sc.Backend.useLegacyBundleCaStorage() { + if !sc.UseLegacyBundleCaStorage() { issuers, err = sc.listIssuers() if err != nil { - return fmt.Errorf("error building CRL: while listing issuers: %w", err) + return nil, fmt.Errorf("error building CRL: while listing issuers: %w", err) } } else { // Here, we hard-code the legacy issuer entry instead of using the // default ref. This is because we need to hack some of the logic // below for revocation to handle the legacy bundle. - issuers = []issuerID{legacyBundleShimID} + issuers = []issuing.IssuerID{legacyBundleShimID} wasLegacy = true // Here, we avoid building a delta CRL with the legacy CRL bundle. @@ -1173,30 +1239,30 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { // Users should upgrade symmetrically, rather than attempting // backward compatibility for new features across disparate versions. if isDelta { - return nil + return []string{"refusing to rebuild delta CRL with legacy bundle; finish migrating to newer issuer storage layout"}, nil } } issuersConfig, err := sc.getIssuersConfig() if err != nil { - return fmt.Errorf("error building CRLs: while getting the default config: %w", err) + return nil, fmt.Errorf("error building CRLs: while getting the default config: %w", err) } - // We map issuerID->entry for fast lookup and also issuerID->Cert for + // We map IssuerID->entry for fast lookup and also IssuerID->Cert for // signature verification and correlation of revoked certs. - issuerIDEntryMap := make(map[issuerID]*issuerEntry, len(issuers)) - issuerIDCertMap := make(map[issuerID]*x509.Certificate, len(issuers)) + issuerIDEntryMap := make(map[issuing.IssuerID]*issuing.IssuerEntry, len(issuers)) + issuerIDCertMap := make(map[issuing.IssuerID]*x509.Certificate, len(issuers)) - // We use a double map (keyID->subject->issuerID) to store whether or not this + // We use a double map (KeyID->subject->IssuerID) to store whether or not this // key+subject paring has been seen before. We can then iterate over each // key/subject and choose any representative issuer for that combination. - keySubjectIssuersMap := make(map[keyID]map[string][]issuerID) + keySubjectIssuersMap := make(map[issuing.KeyID]map[string][]issuing.IssuerID) for _, issuer := range issuers { // We don't strictly need this call, but by requesting the bundle, the // legacy path is automatically ignored. thisEntry, _, err := sc.fetchCertBundleByIssuerId(issuer, false) if err != nil { - return fmt.Errorf("error building CRLs: unable to fetch specified issuer (%v): %w", issuer, err) + return nil, fmt.Errorf("error building CRLs: unable to fetch specified issuer (%v): %w", issuer, err) } if len(thisEntry.KeyID) == 0 { @@ -1221,13 +1287,13 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { thisCert, err := thisEntry.GetCertificate() if err != nil { - return fmt.Errorf("error building CRLs: unable to parse issuer (%v)'s certificate: %w", issuer, err) + return nil, fmt.Errorf("error building CRLs: unable to parse issuer (%v)'s certificate: %w", issuer, err) } issuerIDCertMap[issuer] = thisCert subject := string(thisCert.RawSubject) if _, ok := keySubjectIssuersMap[thisEntry.KeyID]; !ok { - keySubjectIssuersMap[thisEntry.KeyID] = make(map[string][]issuerID) + keySubjectIssuersMap[thisEntry.KeyID] = make(map[string][]issuing.IssuerID) } keySubjectIssuersMap[thisEntry.KeyID][subject] = append(keySubjectIssuersMap[thisEntry.KeyID][subject], issuer) @@ -1236,19 +1302,27 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { // Now we do two calls: building the cluster-local CRL, and potentially // building the global CRL if we're on the active node of the performance // primary. - currLocalDeltaSerials, err := buildAnyLocalCRLs(sc, issuersConfig, globalCRLConfig, + currLocalDeltaSerials, localWarnings, err := buildAnyLocalCRLs(sc, issuersConfig, globalCRLConfig, issuers, issuerIDEntryMap, issuerIDCertMap, keySubjectIssuersMap, wasLegacy, forceNew, isDelta) if err != nil { - return err + return nil, err } - currUnifiedDeltaSerials, err := buildAnyUnifiedCRLs(sc, issuersConfig, globalCRLConfig, + currUnifiedDeltaSerials, unifiedWarnings, err := buildAnyUnifiedCRLs(sc, issuersConfig, globalCRLConfig, issuers, issuerIDEntryMap, issuerIDCertMap, keySubjectIssuersMap, wasLegacy, forceNew, isDelta) if err != nil { - return err + return nil, err + } + + var warnings []string + for _, warning := range localWarnings { + warnings = append(warnings, fmt.Sprintf("warning from local CRL rebuild: %v", warning)) + } + for _, warning := range unifiedWarnings { + warnings = append(warnings, fmt.Sprintf("warning from unified CRL rebuild: %v", warning)) } // Finally, we decide if we need to rebuild the Delta CRLs again, for both @@ -1256,22 +1330,26 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { if !isDelta { // After we've confirmed the primary CRLs have built OK, go ahead and // clear the delta CRL WAL and rebuild it. - if err := sc.Backend.crlBuilder.clearLocalDeltaWAL(sc, currLocalDeltaSerials); err != nil { - return fmt.Errorf("error building CRLs: unable to clear Delta WAL: %w", err) + if err := sc.CrlBuilder().ClearLocalDeltaWAL(sc, currLocalDeltaSerials); err != nil { + return nil, fmt.Errorf("error building CRLs: unable to clear Delta WAL: %w", err) + } + if err := sc.CrlBuilder().ClearUnifiedDeltaWAL(sc, currUnifiedDeltaSerials); err != nil { + return nil, fmt.Errorf("error building CRLs: unable to clear Delta WAL: %w", err) } - if err := sc.Backend.crlBuilder.clearUnifiedDeltaWAL(sc, currUnifiedDeltaSerials); err != nil { - return fmt.Errorf("error building CRLs: unable to clear Delta WAL: %w", err) + deltaWarnings, err := sc.CrlBuilder().RebuildDeltaCRLsHoldingLock(sc, forceNew) + if err != nil { + return nil, fmt.Errorf("error building CRLs: unable to rebuild empty Delta WAL: %w", err) } - if err := sc.Backend.crlBuilder.rebuildDeltaCRLsHoldingLock(sc, forceNew); err != nil { - return fmt.Errorf("error building CRLs: unable to rebuild empty Delta WAL: %w", err) + for _, warning := range deltaWarnings { + warnings = append(warnings, fmt.Sprintf("warning from delta CRL rebuild: %v", warning)) } } - return nil + return warnings, nil } func getLastWALSerial(sc *storageContext, path string) (string, error) { - lastWALEntry, err := sc.Storage.Get(sc.Context, localDeltaWALLastRevokedSerial) + lastWALEntry, err := sc.Storage.Get(sc.Context, path) if err != nil { return "", err } @@ -1291,17 +1369,18 @@ func getLastWALSerial(sc *storageContext, path string) (string, error) { func buildAnyLocalCRLs( sc *storageContext, - issuersConfig *issuerConfigEntry, - globalCRLConfig *crlConfig, - issuers []issuerID, - issuerIDEntryMap map[issuerID]*issuerEntry, - issuerIDCertMap map[issuerID]*x509.Certificate, - keySubjectIssuersMap map[keyID]map[string][]issuerID, + issuersConfig *issuing.IssuerConfigEntry, + globalCRLConfig *pki_backend.CrlConfig, + issuers []issuing.IssuerID, + issuerIDEntryMap map[issuing.IssuerID]*issuing.IssuerEntry, + issuerIDCertMap map[issuing.IssuerID]*x509.Certificate, + keySubjectIssuersMap map[issuing.KeyID]map[string][]issuing.IssuerID, wasLegacy bool, forceNew bool, isDelta bool, -) ([]string, error) { +) ([]string, []string, error) { var err error + var warnings []string // Before we load cert entries, we want to store the last seen delta WAL // serial number. The subsequent List will have at LEAST that certificate @@ -1312,7 +1391,7 @@ func buildAnyLocalCRLs( if isDelta { lastDeltaSerial, err = getLastWALSerial(sc, localDeltaWALLastRevokedSerial) if err != nil { - return nil, err + return nil, nil, err } } @@ -1321,14 +1400,14 @@ func buildAnyLocalCRLs( // visible now, should also be visible on the complete CRL we're writing. var currDeltaCerts []string if !isDelta { - currDeltaCerts, err = sc.Backend.crlBuilder.getPresentLocalDeltaWALForClearing(sc) + currDeltaCerts, err = sc.CrlBuilder().GetPresentLocalDeltaWALForClearing(sc) if err != nil { - return nil, fmt.Errorf("error building CRLs: unable to get present delta WAL entries for removal: %w", err) + return nil, nil, fmt.Errorf("error building CRLs: unable to get present delta WAL entries for removal: %w", err) } } var unassignedCerts []pkix.RevokedCertificate - var revokedCertsMap map[issuerID][]pkix.RevokedCertificate + var revokedCertsMap map[issuing.IssuerID][]pkix.RevokedCertificate // If the CRL is disabled do not bother reading in all the revoked certificates. if !globalCRLConfig.Disable { @@ -1338,7 +1417,7 @@ func buildAnyLocalCRLs( // a separate pool for those. unassignedCerts, revokedCertsMap, err = getLocalRevokedCertEntries(sc, issuerIDCertMap, isDelta) if err != nil { - return nil, fmt.Errorf("error building CRLs: unable to get revoked certificate entries: %w", err) + return nil, nil, fmt.Errorf("error building CRLs: unable to get revoked certificate entries: %w", err) } if !isDelta { @@ -1351,7 +1430,7 @@ func buildAnyLocalCRLs( // duplicate this serial number on the delta, hence the above // guard for isDelta. if err := augmentWithRevokedIssuers(issuerIDEntryMap, issuerIDCertMap, revokedCertsMap); err != nil { - return nil, fmt.Errorf("error building CRLs: unable to parse revoked issuers: %w", err) + return nil, nil, fmt.Errorf("error building CRLs: unable to parse revoked issuers: %w", err) } } } @@ -1360,28 +1439,32 @@ func buildAnyLocalCRLs( // CRLs. internalCRLConfig, err := sc.getLocalCRLConfig() if err != nil { - return nil, fmt.Errorf("error building CRLs: unable to fetch cluster-local CRL configuration: %w", err) + return nil, nil, fmt.Errorf("error building CRLs: unable to fetch cluster-local CRL configuration: %w", err) } - if err := buildAnyCRLsWithCerts(sc, issuersConfig, globalCRLConfig, internalCRLConfig, + rebuildWarnings, err := buildAnyCRLsWithCerts(sc, issuersConfig, globalCRLConfig, internalCRLConfig, issuers, issuerIDEntryMap, keySubjectIssuersMap, unassignedCerts, revokedCertsMap, - forceNew, false /* isUnified */, isDelta); err != nil { - return nil, fmt.Errorf("error building CRLs: %w", err) + forceNew, false /* isUnified */, isDelta) + if err != nil { + return nil, nil, fmt.Errorf("error building CRLs: %w", err) + } + if len(rebuildWarnings) > 0 { + warnings = append(warnings, rebuildWarnings...) } // Finally, persist our potentially updated local CRL config. Only do this // if we didn't have a legacy CRL bundle. if !wasLegacy { if err := sc.setLocalCRLConfig(internalCRLConfig); err != nil { - return nil, fmt.Errorf("error building CRLs: unable to persist updated cluster-local CRL config: %w", err) + return nil, nil, fmt.Errorf("error building CRLs: unable to persist updated cluster-local CRL config: %w", err) } } if isDelta { // Update our last build time here so we avoid checking for new certs // for a while. - sc.Backend.crlBuilder.lastDeltaRebuildCheck = time.Now() + sc.CrlBuilder().SetLastDeltaRebuildCheckTime(time.Now()) if len(lastDeltaSerial) > 0 { // When we have a last delta serial, write out the relevant info @@ -1390,43 +1473,44 @@ func buildAnyLocalCRLs( lastDeltaBuildEntry, err := logical.StorageEntryJSON(localDeltaWALLastBuildSerial, deltaInfo) if err != nil { - return nil, fmt.Errorf("error creating last delta CRL rebuild serial entry: %w", err) + return nil, nil, fmt.Errorf("error creating last delta CRL rebuild serial entry: %w", err) } err = sc.Storage.Put(sc.Context, lastDeltaBuildEntry) if err != nil { - return nil, fmt.Errorf("error persisting last delta CRL rebuild info: %w", err) + return nil, nil, fmt.Errorf("error persisting last delta CRL rebuild info: %w", err) } } } - return currDeltaCerts, nil + return currDeltaCerts, warnings, nil } func buildAnyUnifiedCRLs( sc *storageContext, - issuersConfig *issuerConfigEntry, - globalCRLConfig *crlConfig, - issuers []issuerID, - issuerIDEntryMap map[issuerID]*issuerEntry, - issuerIDCertMap map[issuerID]*x509.Certificate, - keySubjectIssuersMap map[keyID]map[string][]issuerID, + issuersConfig *issuing.IssuerConfigEntry, + globalCRLConfig *pki_backend.CrlConfig, + issuers []issuing.IssuerID, + issuerIDEntryMap map[issuing.IssuerID]*issuing.IssuerEntry, + issuerIDCertMap map[issuing.IssuerID]*x509.Certificate, + keySubjectIssuersMap map[issuing.KeyID]map[string][]issuing.IssuerID, wasLegacy bool, forceNew bool, isDelta bool, -) ([]string, error) { +) ([]string, []string, error) { var err error + var warnings []string // Unified CRL can only be built by the main cluster. - b := sc.Backend - if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || - (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { - return nil, nil + sysView := sc.System() + if sysView.ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!sysView.LocalMount() && sysView.ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { + return nil, nil, nil } // Unified CRL should only be built if enabled. if !globalCRLConfig.UnifiedCRL && !forceNew { - return nil, nil + return nil, nil, nil } // Before we load cert entries, we want to store the last seen delta WAL @@ -1434,11 +1518,23 @@ func buildAnyUnifiedCRLs( // (and potentially more) in it; when we're done writing the delta CRL, // we'll write this serial as a sentinel to see if we need to rebuild it // in the future. - var lastDeltaSerial string + // + // We need to do this per-cluster. + lastDeltaSerial := map[string]string{} if isDelta { - lastDeltaSerial, err = getLastWALSerial(sc, unifiedDeltaWALLastRevokedSerial) + clusters, err := sc.Storage.List(sc.Context, unifiedDeltaWALPrefix) if err != nil { - return nil, err + return nil, nil, fmt.Errorf("error listing clusters for unified delta WAL building: %w", err) + } + + for index, cluster := range clusters { + path := unifiedDeltaWALPrefix + cluster + deltaWALLastRevokedSerialName + serial, err := getLastWALSerial(sc, path) + if err != nil { + return nil, nil, fmt.Errorf("error getting last written Delta WAL serial for cluster (%v / %v): %w", index, cluster, err) + } + + lastDeltaSerial[cluster] = serial } } @@ -1447,14 +1543,14 @@ func buildAnyUnifiedCRLs( // visible now, should also be visible on the complete CRL we're writing. var currDeltaCerts []string if !isDelta { - currDeltaCerts, err = sc.Backend.crlBuilder.getPresentUnifiedDeltaWALForClearing(sc) + currDeltaCerts, err = sc.CrlBuilder().GetPresentUnifiedDeltaWALForClearing(sc) if err != nil { - return nil, fmt.Errorf("error building CRLs: unable to get present delta WAL entries for removal: %w", err) + return nil, nil, fmt.Errorf("error building CRLs: unable to get present delta WAL entries for removal: %w", err) } } var unassignedCerts []pkix.RevokedCertificate - var revokedCertsMap map[issuerID][]pkix.RevokedCertificate + var revokedCertsMap map[issuing.IssuerID][]pkix.RevokedCertificate // If the CRL is disabled do not bother reading in all the revoked certificates. if !globalCRLConfig.Disable { @@ -1464,7 +1560,7 @@ func buildAnyUnifiedCRLs( // a separate pool for those. unassignedCerts, revokedCertsMap, err = getUnifiedRevokedCertEntries(sc, issuerIDCertMap, isDelta) if err != nil { - return nil, fmt.Errorf("error building CRLs: unable to get revoked certificate entries: %w", err) + return nil, nil, fmt.Errorf("error building CRLs: unable to get revoked certificate entries: %w", err) } if !isDelta { @@ -1477,7 +1573,7 @@ func buildAnyUnifiedCRLs( // duplicate this serial number on the delta, hence the above // guard for isDelta. if err := augmentWithRevokedIssuers(issuerIDEntryMap, issuerIDCertMap, revokedCertsMap); err != nil { - return nil, fmt.Errorf("error building CRLs: unable to parse revoked issuers: %w", err) + return nil, nil, fmt.Errorf("error building CRLs: unable to parse revoked issuers: %w", err) } } } @@ -1486,65 +1582,78 @@ func buildAnyUnifiedCRLs( // CRLs. internalCRLConfig, err := sc.getUnifiedCRLConfig() if err != nil { - return nil, fmt.Errorf("error building CRLs: unable to fetch cluster-local CRL configuration: %w", err) + return nil, nil, fmt.Errorf("error building CRLs: unable to fetch cluster-local CRL configuration: %w", err) } - if err := buildAnyCRLsWithCerts(sc, issuersConfig, globalCRLConfig, internalCRLConfig, + rebuildWarnings, err := buildAnyCRLsWithCerts(sc, issuersConfig, globalCRLConfig, internalCRLConfig, issuers, issuerIDEntryMap, keySubjectIssuersMap, unassignedCerts, revokedCertsMap, - forceNew, true /* isUnified */, isDelta); err != nil { - return nil, fmt.Errorf("error building CRLs: %w", err) + forceNew, true /* isUnified */, isDelta) + if err != nil { + return nil, nil, fmt.Errorf("error building CRLs: %w", err) + } + if len(rebuildWarnings) > 0 { + warnings = append(warnings, rebuildWarnings...) } // Finally, persist our potentially updated local CRL config. Only do this // if we didn't have a legacy CRL bundle. if !wasLegacy { if err := sc.setUnifiedCRLConfig(internalCRLConfig); err != nil { - return nil, fmt.Errorf("error building CRLs: unable to persist updated cluster-local CRL config: %w", err) + return nil, nil, fmt.Errorf("error building CRLs: unable to persist updated cluster-local CRL config: %w", err) } } if isDelta { // Update our last build time here so we avoid checking for new certs // for a while. - sc.Backend.crlBuilder.lastDeltaRebuildCheck = time.Now() + sc.CrlBuilder().SetLastDeltaRebuildCheckTime(time.Now()) - if len(lastDeltaSerial) > 0 { - // When we have a last delta serial, write out the relevant info - // so we can skip extra CRL rebuilds. - deltaInfo := lastDeltaInfo{Serial: lastDeltaSerial} + // Persist all of our known last revoked serial numbers here, as the + // last seen serial during build. This will allow us to detect if any + // new revocations have occurred, forcing us to rebuild the delta CRL. + for cluster, serial := range lastDeltaSerial { + if len(serial) == 0 { + continue + } - lastDeltaBuildEntry, err := logical.StorageEntryJSON(unifiedDeltaWALLastBuildSerial, deltaInfo) + // Make sure to use the cluster-specific path. Since we're on the + // active node of the primary cluster, we own this entry and can + // safely write it. + path := unifiedDeltaWALPrefix + cluster + deltaWALLastBuildSerialName + deltaInfo := lastDeltaInfo{Serial: serial} + lastDeltaBuildEntry, err := logical.StorageEntryJSON(path, deltaInfo) if err != nil { - return nil, fmt.Errorf("error creating last delta CRL rebuild serial entry: %w", err) + return nil, nil, fmt.Errorf("error creating last delta CRL rebuild serial entry: %w", err) } err = sc.Storage.Put(sc.Context, lastDeltaBuildEntry) if err != nil { - return nil, fmt.Errorf("error persisting last delta CRL rebuild info: %w", err) + return nil, nil, fmt.Errorf("error persisting last delta CRL rebuild info: %w", err) } } } - return currDeltaCerts, nil + return currDeltaCerts, warnings, nil } func buildAnyCRLsWithCerts( sc *storageContext, - issuersConfig *issuerConfigEntry, - globalCRLConfig *crlConfig, - internalCRLConfig *internalCRLConfigEntry, - issuers []issuerID, - issuerIDEntryMap map[issuerID]*issuerEntry, - keySubjectIssuersMap map[keyID]map[string][]issuerID, + issuersConfig *issuing.IssuerConfigEntry, + globalCRLConfig *pki_backend.CrlConfig, + internalCRLConfig *issuing.InternalCRLConfigEntry, + issuers []issuing.IssuerID, + issuerIDEntryMap map[issuing.IssuerID]*issuing.IssuerEntry, + keySubjectIssuersMap map[issuing.KeyID]map[string][]issuing.IssuerID, unassignedCerts []pkix.RevokedCertificate, - revokedCertsMap map[issuerID][]pkix.RevokedCertificate, + revokedCertsMap map[issuing.IssuerID][]pkix.RevokedCertificate, forceNew bool, isUnified bool, isDelta bool, -) error { +) ([]string, error) { // Now we can call buildCRL once, on an arbitrary/representative issuer - // from each of these (keyID, subject) sets. + // from each of these (KeyID, subject) sets. + var warnings []string for _, subjectIssuersMap := range keySubjectIssuersMap { for _, issuersSet := range subjectIssuersMap { if len(issuersSet) == 0 { @@ -1552,15 +1661,15 @@ func buildAnyCRLsWithCerts( } var revokedCerts []pkix.RevokedCertificate - representative := issuerID("") - var crlIdentifier crlID - var crlIdIssuer issuerID + representative := issuing.IssuerID("") + var crlIdentifier issuing.CrlID + var crlIdIssuer issuing.IssuerID for _, issuerId := range issuersSet { // Skip entries which aren't enabled for CRL signing. We don't // particularly care which issuer is ultimately chosen as the // set representative for signing at this point, other than // that it has crl-signing usage. - if err := issuerIDEntryMap[issuerId].EnsureUsage(CRLSigningUsage); err != nil { + if err := issuerIDEntryMap[issuerId].EnsureUsage(issuing.CRLSigningUsage); err != nil { continue } @@ -1580,7 +1689,7 @@ func buildAnyCRLsWithCerts( // Otherwise, use any other random issuer if we've not yet // chosen one. - if representative == issuerID("") { + if representative == issuing.IssuerID("") { representative = issuerId } @@ -1592,7 +1701,7 @@ func buildAnyCRLsWithCerts( // Finally, check our crlIdentifier. if thisCRLId, ok := internalCRLConfig.IssuerIDCRLMap[issuerId]; ok && len(thisCRLId) > 0 { if len(crlIdentifier) > 0 && crlIdentifier != thisCRLId { - return fmt.Errorf("error building CRLs: two issuers with same keys/subjects (%v vs %v) have different internal CRL IDs: %v vs %v", issuerId, crlIdIssuer, thisCRLId, crlIdentifier) + return nil, fmt.Errorf("error building CRLs: two issuers with same keys/subjects (%v vs %v) have different internal CRL IDs: %v vs %v", issuerId, crlIdIssuer, thisCRLId, crlIdentifier) } crlIdentifier = thisCRLId @@ -1604,6 +1713,24 @@ func buildAnyCRLsWithCerts( // Skip this set for the time being; while we have valid // issuers and associated keys, this occurred because we lack // crl-signing usage on all issuers in this set. + // + // But, tell the user about this, so they can either correct + // this by reissuing the CA certificate or adding an equivalent + // version with KU bits if the CA cert lacks KU altogether. + // + // See also: https://github.com/hashicorp/vault/issues/20137 + warning := "Issuer equivalency set with associated keys lacked an issuer with CRL Signing KeyUsage; refusing to rebuild CRL for this group of issuers: " + var issuers []string + for _, issuerId := range issuersSet { + issuers = append(issuers, issuerId.String()) + } + warning += strings.Join(issuers, ",") + + // We only need this warning once. :-) + if !isUnified && !isDelta { + warnings = append(warnings, warning) + } + continue } @@ -1644,7 +1771,7 @@ func buildAnyCRLsWithCerts( // Lastly, build the CRL. nextUpdate, err := buildCRL(sc, globalCRLConfig, forceNew, representative, revokedCerts, crlIdentifier, crlNumber, isUnified, isDelta, lastCompleteNumber) if err != nil { - return fmt.Errorf("error building CRLs: unable to build CRL for issuer (%v): %w", representative, err) + return nil, fmt.Errorf("error building CRLs: unable to build CRL for issuer (%v): %w", representative, err) } internalCRLConfig.CRLExpirationMap[crlIdentifier] = *nextUpdate @@ -1692,17 +1819,17 @@ func buildAnyCRLsWithCerts( } if !stillHaveIssuerForID { - if err := sc.Storage.Delete(sc.Context, "crls/"+crlId.String()); err != nil { - return fmt.Errorf("error building CRLs: unable to clean up deleted issuers' CRL: %w", err) + if err := sc.Storage.Delete(sc.Context, issuing.PathCrls+crlId.String()); err != nil { + return nil, fmt.Errorf("error building CRLs: unable to clean up deleted issuers' CRL: %w", err) } } } // All good :-) - return nil + return warnings, nil } -func isRevInfoIssuerValid(revInfo *revocationInfo, issuerIDCertMap map[issuerID]*x509.Certificate) bool { +func isRevInfoIssuerValid(revInfo *revocation.RevocationInfo, issuerIDCertMap map[issuing.IssuerID]*x509.Certificate) bool { if len(revInfo.CertificateIssuer) > 0 { issuerId := revInfo.CertificateIssuer if _, issuerExists := issuerIDCertMap[issuerId]; issuerExists { @@ -1713,23 +1840,9 @@ func isRevInfoIssuerValid(revInfo *revocationInfo, issuerIDCertMap map[issuerID] return false } -func associateRevokedCertWithIsssuer(revInfo *revocationInfo, revokedCert *x509.Certificate, issuerIDCertMap map[issuerID]*x509.Certificate) bool { - for issuerId, issuerCert := range issuerIDCertMap { - if bytes.Equal(revokedCert.RawIssuer, issuerCert.RawSubject) { - if err := revokedCert.CheckSignatureFrom(issuerCert); err == nil { - // Valid mapping. Add it to the specified entry. - revInfo.CertificateIssuer = issuerId - return true - } - } - } - - return false -} - -func getLocalRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuerID]*x509.Certificate, isDelta bool) ([]pkix.RevokedCertificate, map[issuerID][]pkix.RevokedCertificate, error) { +func getLocalRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuing.IssuerID]*x509.Certificate, isDelta bool) ([]pkix.RevokedCertificate, map[issuing.IssuerID][]pkix.RevokedCertificate, error) { var unassignedCerts []pkix.RevokedCertificate - revokedCertsMap := make(map[issuerID][]pkix.RevokedCertificate) + revokedCertsMap := make(map[issuing.IssuerID][]pkix.RevokedCertificate) listingPath := revokedPath if isDelta { @@ -1754,7 +1867,7 @@ func getLocalRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuerID continue } - var revInfo revocationInfo + var revInfo revocation.RevocationInfo revokedEntry, err := sc.Storage.Get(sc.Context, revokedPath+serial) if err != nil { return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch revoked cert with serial %s: %s", serial, err)} @@ -1831,7 +1944,7 @@ func getLocalRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuerID } // Now we need to assign the revoked certificate to an issuer. - foundParent := associateRevokedCertWithIsssuer(&revInfo, revokedCert, issuerIDCertMap) + foundParent := revInfo.AssociateRevokedCertWithIsssuer(revokedCert, issuerIDCertMap) if !foundParent { // If the parent isn't found, add it to the unassigned bucket. unassignedCerts = append(unassignedCerts, newRevCert) @@ -1856,13 +1969,13 @@ func getLocalRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuerID return unassignedCerts, revokedCertsMap, nil } -func getUnifiedRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuerID]*x509.Certificate, isDelta bool) ([]pkix.RevokedCertificate, map[issuerID][]pkix.RevokedCertificate, error) { +func getUnifiedRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuing.IssuerID]*x509.Certificate, isDelta bool) ([]pkix.RevokedCertificate, map[issuing.IssuerID][]pkix.RevokedCertificate, error) { // Getting unified revocation entries is a bit different than getting // the local ones. In particular, the full copy of the certificate is // unavailable, so we'll be able to avoid parsing the stored certificate, // at the expense of potentially having incorrect issuer mappings. var unassignedCerts []pkix.RevokedCertificate - revokedCertsMap := make(map[issuerID][]pkix.RevokedCertificate) + revokedCertsMap := make(map[issuing.IssuerID][]pkix.RevokedCertificate) listingPath := unifiedRevocationReadPathPrefix if isDelta { @@ -1918,7 +2031,7 @@ func getUnifiedRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuer continue } - var xRevEntry unifiedRevocationEntry + var xRevEntry revocation.UnifiedRevocationEntry if err := entryRaw.DecodeJSON(&xRevEntry); err != nil { return nil, nil, fmt.Errorf("failed json decoding of unified revocation entry at path %v: %w ", serialPath, err) } @@ -1952,7 +2065,7 @@ func getUnifiedRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuer return unassignedCerts, revokedCertsMap, nil } -func augmentWithRevokedIssuers(issuerIDEntryMap map[issuerID]*issuerEntry, issuerIDCertMap map[issuerID]*x509.Certificate, revokedCertsMap map[issuerID][]pkix.RevokedCertificate) error { +func augmentWithRevokedIssuers(issuerIDEntryMap map[issuing.IssuerID]*issuing.IssuerEntry, issuerIDCertMap map[issuing.IssuerID]*x509.Certificate, revokedCertsMap map[issuing.IssuerID][]pkix.RevokedCertificate) error { // When setup our maps with the legacy CA bundle, we only have a // single entry here. This entry is never revoked, so the outer loop // will exit quickly. @@ -1988,10 +2101,10 @@ func augmentWithRevokedIssuers(issuerIDEntryMap map[issuerID]*issuerEntry, issue // Builds a CRL by going through the list of revoked certificates and building // a new CRL with the stored revocation times and serial numbers. -func buildCRL(sc *storageContext, crlInfo *crlConfig, forceNew bool, thisIssuerId issuerID, revoked []pkix.RevokedCertificate, identifier crlID, crlNumber int64, isUnified bool, isDelta bool, lastCompleteNumber int64) (*time.Time, error) { +func buildCRL(sc *storageContext, crlInfo *pki_backend.CrlConfig, forceNew bool, thisIssuerId issuing.IssuerID, revoked []pkix.RevokedCertificate, identifier issuing.CrlID, crlNumber int64, isUnified bool, isDelta bool, lastCompleteNumber int64) (*time.Time, error) { var revokedCerts []pkix.RevokedCertificate - crlLifetime, err := time.ParseDuration(crlInfo.Expiry) + crlLifetime, err := parseutil.ParseDurationSecond(crlInfo.Expiry) if err != nil { return nil, errutil.InternalError{Err: fmt.Sprintf("error parsing CRL duration of %s", crlInfo.Expiry)} } @@ -2015,7 +2128,7 @@ func buildCRL(sc *storageContext, crlInfo *crlConfig, forceNew bool, thisIssuerI revokedCerts = revoked WRITE: - signingBundle, caErr := sc.fetchCAInfoByIssuerId(thisIssuerId, CRLSigningUsage) + signingBundle, caErr := sc.fetchCAInfoByIssuerId(thisIssuerId, issuing.CRLSigningUsage) if caErr != nil { switch caErr.(type) { case errutil.UserError: @@ -2051,7 +2164,7 @@ WRITE: return nil, errutil.InternalError{Err: fmt.Sprintf("error creating new CRL: %s", err)} } - writePath := "crls/" + identifier.String() + writePath := issuing.PathCrls + identifier.String() if thisIssuerId == legacyBundleShimID { // Ignore the CRL ID as it won't be persisted anyways; hard-code the // old legacy path and allow it to be updated. @@ -2080,6 +2193,6 @@ WRITE: // shouldLocalPathsUseUnified assuming a legacy path for a CRL/OCSP request, does our // configuration say we should be returning the unified response or not -func shouldLocalPathsUseUnified(cfg *crlConfig) bool { +func shouldLocalPathsUseUnified(cfg *pki_backend.CrlConfig) bool { return cfg.UnifiedCRL && cfg.UnifiedCRLOnExistingPaths } diff --git a/builtin/logical/pki/defaultdirectorypolicytype_enumer.go b/builtin/logical/pki/defaultdirectorypolicytype_enumer.go new file mode 100644 index 000000000000..917225ff834a --- /dev/null +++ b/builtin/logical/pki/defaultdirectorypolicytype_enumer.go @@ -0,0 +1,51 @@ +// Code generated by "enumer -type=DefaultDirectoryPolicyType"; DO NOT EDIT. + +package pki + +import ( + "fmt" +) + +const _DefaultDirectoryPolicyTypeName = "ForbidSignVerbatimRoleExternalPolicy" + +var _DefaultDirectoryPolicyTypeIndex = [...]uint8{0, 6, 18, 22, 36} + +func (i DefaultDirectoryPolicyType) String() string { + if i < 0 || i >= DefaultDirectoryPolicyType(len(_DefaultDirectoryPolicyTypeIndex)-1) { + return fmt.Sprintf("DefaultDirectoryPolicyType(%d)", i) + } + return _DefaultDirectoryPolicyTypeName[_DefaultDirectoryPolicyTypeIndex[i]:_DefaultDirectoryPolicyTypeIndex[i+1]] +} + +var _DefaultDirectoryPolicyTypeValues = []DefaultDirectoryPolicyType{0, 1, 2, 3} + +var _DefaultDirectoryPolicyTypeNameToValueMap = map[string]DefaultDirectoryPolicyType{ + _DefaultDirectoryPolicyTypeName[0:6]: 0, + _DefaultDirectoryPolicyTypeName[6:18]: 1, + _DefaultDirectoryPolicyTypeName[18:22]: 2, + _DefaultDirectoryPolicyTypeName[22:36]: 3, +} + +// DefaultDirectoryPolicyTypeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func DefaultDirectoryPolicyTypeString(s string) (DefaultDirectoryPolicyType, error) { + if val, ok := _DefaultDirectoryPolicyTypeNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to DefaultDirectoryPolicyType values", s) +} + +// DefaultDirectoryPolicyTypeValues returns all values of the enum +func DefaultDirectoryPolicyTypeValues() []DefaultDirectoryPolicyType { + return _DefaultDirectoryPolicyTypeValues +} + +// IsADefaultDirectoryPolicyType returns "true" if the value is listed in the enum definition. "false" otherwise +func (i DefaultDirectoryPolicyType) IsADefaultDirectoryPolicyType() bool { + for _, v := range _DefaultDirectoryPolicyTypeValues { + if i == v { + return true + } + } + return false +} diff --git a/builtin/logical/pki/dnstest/server.go b/builtin/logical/pki/dnstest/server.go new file mode 100644 index 000000000000..751c0ae873fd --- /dev/null +++ b/builtin/logical/pki/dnstest/server.go @@ -0,0 +1,428 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package dnstest + +import ( + "context" + "fmt" + "net" + "strings" + "sync" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/stretchr/testify/require" +) + +type TestServer struct { + t *testing.T + ctx context.Context + log hclog.Logger + + runner *docker.Runner + network string + startup *docker.Service + + lock sync.Mutex + serial int + forwarders []string + domains []string + records map[string]map[string][]string // domain -> record -> value(s). + + cleanup func() +} + +func SetupResolver(t *testing.T, domain string) *TestServer { + return SetupResolverOnNetwork(t, domain, "") +} + +func SetupResolverOnNetwork(t *testing.T, domain string, network string) *TestServer { + var ts TestServer + ts.t = t + ts.ctx = context.Background() + ts.domains = []string{domain} + ts.records = map[string]map[string][]string{} + ts.network = network + ts.log = hclog.L() + + ts.setupRunner(domain, network) + ts.startContainer(network) + ts.PushConfig() + + return &ts +} + +func (ts *TestServer) setupRunner(domain string, network string) { + var err error + ts.runner, err = docker.NewServiceRunner(docker.RunOptions{ + ImageRepo: "ubuntu/bind9", + ImageTag: "latest", + ContainerName: "bind9-dns-" + strings.ReplaceAll(domain, ".", "-"), + NetworkName: network, + Ports: []string{"53/udp"}, + // DNS container logging was disabled to reduce content within CI logs. + //LogConsumer: func(s string) { + // ts.log.Info(s) + //}, + }) + require.NoError(ts.t, err) +} + +func (ts *TestServer) startContainer(network string) { + connUpFunc := func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + // Perform a simple connection to this resolver, even though the + // default configuration doesn't do anything useful. + peer, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", host, port)) + if err != nil { + return nil, fmt.Errorf("failed to resolve peer: %v / %v: %w", host, port, err) + } + + conn, err := net.DialUDP("udp", nil, peer) + if err != nil { + return nil, fmt.Errorf("failed to dial peer: %v / %v / %v: %w", host, port, peer, err) + } + defer conn.Close() + + _, err = conn.Write([]byte("garbage-in")) + if err != nil { + return nil, fmt.Errorf("failed to write to peer: %v / %v / %v: %w", host, port, peer, err) + } + + // Connection worked. + return docker.NewServiceHostPort(host, port), nil + } + + result, _, err := ts.runner.StartNewService(ts.ctx, true, true, connUpFunc) + require.NoError(ts.t, err, "failed to start dns resolver for "+ts.domains[0]) + ts.startup = result + + if ts.startup.StartResult.RealIP == "" { + mapping, err := ts.runner.GetNetworkAndAddresses(ts.startup.Container.ID) + require.NoError(ts.t, err, "failed to fetch network addresses to correct missing real IP address") + if len(network) == 0 { + require.Equal(ts.t, 1, len(mapping), "expected exactly one network address") + for network = range mapping { + // Because mapping is a map of network name->ip, we need + // to use the above range's assignment to get the name, + // as there is no other way of getting the keys of a map. + } + } + require.Contains(ts.t, mapping, network, "expected network to be part of the mapping") + ts.startup.StartResult.RealIP = mapping[network] + } + + ts.log.Info(fmt.Sprintf("[dnsserv] Addresses of DNS resolver: local=%v / container=%v", ts.GetLocalAddr(), ts.GetRemoteAddr())) +} + +func (ts *TestServer) buildNamedConf() string { + forwarders := "\n" + if len(ts.forwarders) > 0 { + forwarders = "\tforwarders {\n" + for _, forwarder := range ts.forwarders { + forwarders += "\t\t" + forwarder + ";\n" + } + forwarders += "\t};\n" + } + + zones := "\n" + for _, domain := range ts.domains { + zones += fmt.Sprintf("zone \"%s\" {\n", domain) + zones += "\ttype primary;\n" + zones += fmt.Sprintf("\tfile \"%s.zone\";\n", domain) + zones += "\tallow-update {\n\t\tnone;\n\t};\n" + zones += "\tnotify no;\n" + zones += "};\n\n" + } + + // Reverse lookups are not handles as they're not presently necessary. + + cfg := `options { + directory "/var/cache/bind"; + + dnssec-validation no; + + ` + forwarders + ` +}; + +` + zones + + return cfg +} + +func (ts *TestServer) buildZoneFile(target string) string { + // One second TTL by default to allow quick refreshes. + zone := "$TTL 1;\n" + + ts.serial += 1 + zone += fmt.Sprintf("@\tIN\tSOA\tns.%v.\troot.%v.\t(\n", target, target) + zone += fmt.Sprintf("\t\t\t%d;\n\t\t\t1;\n\t\t\t1;\n\t\t\t2;\n\t\t\t1;\n\t\t\t)\n\n", ts.serial) + zone += fmt.Sprintf("@\tIN\tNS\tns%d.%v.\n", ts.serial, target) + zone += fmt.Sprintf("ns%d.%v.\tIN\tA\t%v\n", ts.serial, target, "127.0.0.1") + + for domain, records := range ts.records { + if !strings.HasSuffix(domain, target) { + continue + } + + for recordType, values := range records { + for _, value := range values { + zone += fmt.Sprintf("%s.\tIN\t%s\t%s\n", domain, recordType, value) + } + } + } + + return zone +} + +func (ts *TestServer) pushNamedConf() { + contents := docker.NewBuildContext() + cfgPath := "/etc/bind/named.conf.options" + namedCfg := ts.buildNamedConf() + contents[cfgPath] = docker.PathContentsFromString(namedCfg) + contents[cfgPath].SetOwners(0, 142) // root, bind + + ts.log.Info(fmt.Sprintf("Generated bind9 config (%s):\n%v\n", cfgPath, namedCfg)) + + err := ts.runner.CopyTo(ts.startup.Container.ID, "/", contents) + require.NoError(ts.t, err, "failed pushing updated named.conf.options to container") +} + +func (ts *TestServer) pushZoneFiles() { + contents := docker.NewBuildContext() + + for _, domain := range ts.domains { + path := "/var/cache/bind/" + domain + ".zone" + zoneFile := ts.buildZoneFile(domain) + contents[path] = docker.PathContentsFromString(zoneFile) + contents[path].SetOwners(0, 142) // root, bind + + ts.log.Info(fmt.Sprintf("Generated bind9 zone file for %v (%s):\n%v\n", domain, path, zoneFile)) + } + + err := ts.runner.CopyTo(ts.startup.Container.ID, "/", contents) + require.NoError(ts.t, err, "failed pushing updated named.conf.options to container") +} + +func (ts *TestServer) PushConfig() { + ts.lock.Lock() + defer ts.lock.Unlock() + + _, _, _, err := ts.runner.RunCmdWithOutput(ts.ctx, ts.startup.Container.ID, []string{"rndc", "freeze"}) + require.NoError(ts.t, err, "failed to freeze DNS config") + + // There's two cases here: + // + // 1. We've added a new top-level domain name. Here, we want to make + // sure the new zone file is pushed before we push the reference + // to it. + // 2. We've just added a new. Here, the order doesn't matter, but + // mostly likely the second push will be a no-op. + ts.pushZoneFiles() + ts.pushNamedConf() + + _, _, _, err = ts.runner.RunCmdWithOutput(ts.ctx, ts.startup.Container.ID, []string{"rndc", "thaw"}) + require.NoError(ts.t, err, "failed to thaw DNS config") + + // Wait until our config has taken. + corehelpers.RetryUntil(ts.t, 15*time.Second, func() error { + // bind reloads based on file mtime, touch files before starting + // to make sure it has been updated more recently than when the + // last update was written. Then issue a new SIGHUP. + for _, domain := range ts.domains { + path := "/var/cache/bind/" + domain + ".zone" + touchCmd := []string{"touch", path} + + _, _, _, err := ts.runner.RunCmdWithOutput(ts.ctx, ts.startup.Container.ID, touchCmd) + if err != nil { + return fmt.Errorf("failed to update zone mtime: %w", err) + } + } + ts.runner.DockerAPI.ContainerKill(ts.ctx, ts.startup.Container.ID, "SIGHUP") + + // Connect to our bind resolver. + resolver := &net.Resolver{ + PreferGo: true, + StrictErrors: false, + Dial: func(ctx context.Context, network, address string) (net.Conn, error) { + d := net.Dialer{ + Timeout: 10 * time.Second, + } + return d.DialContext(ctx, network, ts.GetLocalAddr()) + }, + } + + // last domain has the given serial number, which also appears in the + // NS record so we can fetch it via Go. + lastDomain := ts.domains[len(ts.domains)-1] + records, err := resolver.LookupNS(ts.ctx, lastDomain) + if err != nil { + return fmt.Errorf("failed to lookup NS record for %v: %w", lastDomain, err) + } + + if len(records) != 1 { + return fmt.Errorf("expected only 1 NS record for %v, got %v/%v", lastDomain, len(records), records) + } + + expectedNS := fmt.Sprintf("ns%d.%v.", ts.serial, lastDomain) + if records[0].Host != expectedNS { + return fmt.Errorf("expected to find NS %v, got %v indicating reload hadn't completed", expectedNS, records[0]) + } + + return nil + }) +} + +func (ts *TestServer) GetLocalAddr() string { + return ts.startup.Config.Address() +} + +func (ts *TestServer) GetRemoteAddr() string { + return fmt.Sprintf("%s:%d", ts.startup.StartResult.RealIP, 53) +} + +func (ts *TestServer) AddDomain(domain string) { + ts.lock.Lock() + defer ts.lock.Unlock() + + for _, existing := range ts.domains { + if existing == domain { + return + } + } + + ts.domains = append(ts.domains, domain) +} + +func (ts *TestServer) AddRecord(domain string, record string, value string) { + ts.lock.Lock() + defer ts.lock.Unlock() + + foundDomain := false + for _, existing := range ts.domains { + if strings.HasSuffix(domain, existing) { + foundDomain = true + break + } + } + if !foundDomain { + ts.t.Fatalf("cannot add record %v/%v :: [%v] -- no domain zone matching (%v)", record, domain, value, ts.domains) + } + + value = strings.TrimSpace(value) + if _, present := ts.records[domain]; !present { + ts.records[domain] = map[string][]string{} + } + + if values, present := ts.records[domain][record]; present { + for _, candidate := range values { + if candidate == value { + // Already present; skip adding. + return + } + } + } + + ts.records[domain][record] = append(ts.records[domain][record], value) +} + +func (ts *TestServer) RemoveRecord(domain string, record string, value string) { + ts.lock.Lock() + defer ts.lock.Unlock() + + foundDomain := false + for _, existing := range ts.domains { + if strings.HasSuffix(domain, existing) { + foundDomain = true + break + } + } + if !foundDomain { + // Not found. + return + } + + value = strings.TrimSpace(value) + if _, present := ts.records[domain]; !present { + // Not found. + return + } + + var remaining []string + if values, present := ts.records[domain][record]; present { + for _, candidate := range values { + if candidate != value { + remaining = append(remaining, candidate) + } + } + } + + ts.records[domain][record] = remaining +} + +func (ts *TestServer) RemoveRecordsOfTypeForDomain(domain string, record string) { + ts.lock.Lock() + defer ts.lock.Unlock() + + foundDomain := false + for _, existing := range ts.domains { + if strings.HasSuffix(domain, existing) { + foundDomain = true + break + } + } + if !foundDomain { + // Not found. + return + } + + if _, present := ts.records[domain]; !present { + // Not found. + return + } + + delete(ts.records[domain], record) +} + +func (ts *TestServer) RemoveRecordsForDomain(domain string) { + ts.lock.Lock() + defer ts.lock.Unlock() + + foundDomain := false + for _, existing := range ts.domains { + if strings.HasSuffix(domain, existing) { + foundDomain = true + break + } + } + if !foundDomain { + // Not found. + return + } + + if _, present := ts.records[domain]; !present { + // Not found. + return + } + + ts.records[domain] = map[string][]string{} +} + +func (ts *TestServer) RemoveAllRecords() { + ts.lock.Lock() + defer ts.lock.Unlock() + + ts.records = map[string]map[string][]string{} +} + +func (ts *TestServer) Cleanup() { + if ts.cleanup != nil { + ts.cleanup() + } + if ts.startup != nil && ts.startup.Cleanup != nil { + ts.startup.Cleanup() + } +} diff --git a/builtin/logical/pki/fields.go b/builtin/logical/pki/fields.go index 9e6201d2c8fe..dfeec9df4bb3 100644 --- a/builtin/logical/pki/fields.go +++ b/builtin/logical/pki/fields.go @@ -1,11 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki import ( "time" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/framework" ) @@ -16,6 +17,7 @@ const ( keyIdParam = "key_id" keyTypeParam = "key_type" keyBitsParam = "key_bits" + skidParam = "subject_key_id" ) // addIssueAndSignCommonFields adds fields common to both CA and non-CA issuing @@ -164,6 +166,13 @@ Any values are added with OID 0.9.2342.19200300.100.1.1.`, Name: "User ID(s)", }, } + fields["cert_metadata"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `User supplied metadata to store associated with this certificate's serial number, base64 encoded`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Certificate Metadata", + }, + } fields = addIssuerRefField(fields) @@ -323,9 +332,8 @@ is required. Ignored for other types.`, Type: framework.TypeInt, Default: 0, Description: `The number of bits to use. Allowed values are -0 (universal default); with rsa key_type: 2048 (default), 3072, or -4096; with ec key_type: 224, 256 (default), 384, or 521; ignored with -ed25519.`, +0 (universal default); with rsa key_type: 2048 (default), 3072, 4096 or 8192; +with ec key_type: 224, 256 (default), 384, or 521; ignored with ed25519.`, DisplayAttrs: &framework.DisplayAttributes{ Value: 0, }, @@ -491,6 +499,23 @@ this removes ALL issuers within the mount (and is thus not desirable in most operational scenarios).`, } + fields["tidy_acme"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `Set to true to enable tidying ACME accounts, +orders and authorizations. ACME orders are tidied (deleted) +safety_buffer after the certificate associated with them expires, +or after the order and relevant authorizations have expired if no +certificate was produced. Authorizations are tidied with the +corresponding order. + +When a valid ACME Account is at least acme_account_safety_buffer +old, and has no remaining orders associated with it, the account is +marked as revoked. After another acme_account_safety_buffer has +passed from the revocation or deactivation date, a revoked or +deactivated ACME account is deleted.`, + Default: false, + } + fields["safety_buffer"] = &framework.FieldSchema{ Type: framework.TypeDurationSecond, Description: `The amount of extra time that must have passed @@ -509,6 +534,14 @@ Defaults to 8760 hours (1 year).`, Default: int(defaultTidyConfig.IssuerSafetyBuffer / time.Second), // TypeDurationSecond currently requires defaults to be int } + fields["acme_account_safety_buffer"] = &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: `The amount of time that must pass after creation +that an account with no orders is marked revoked, and the amount of time +after being marked revoked or deactivated.`, + Default: int(defaultTidyConfig.AcmeAccountSafetyBuffer / time.Second), // TypeDurationSecond currently requires defaults to be int + } + fields["pause_duration"] = &framework.FieldSchema{ Type: framework.TypeString, Description: `The amount of time to wait between processing @@ -521,23 +554,6 @@ greater period of time. By default this is zero seconds.`, Default: "0s", } - fields["maintain_stored_certificate_counts"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `This configures whether stored certificates -are counted upon initialization of the backend, and whether during -normal operation, a running count of certificates stored is maintained.`, - Default: false, - } - - fields["publish_stored_certificate_count_metrics"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `This configures whether the stored certificate -count is published to the metrics consumer. It does not affect if the -stored certificate count is maintained, and if maintained, it will be -available on the tidy-status endpoint.`, - Default: false, - } - fields["tidy_revocation_queue"] = &framework.FieldSchema{ Type: framework.TypeBool, Description: `Set to true to remove stale revocation queue entries @@ -563,5 +579,115 @@ the cross-cluster revoked certificate store. Only runs on the active primary node.`, } + fields["tidy_cert_metadata"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `Set to true to enable tidying up certificate metadata`, + } + + fields["tidy_cmpv2_nonce_store"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `Set to true to enable tidying up the CMPv2 nonce store`, + } + + return fields +} + +// generate the entire list of schema fields we need for CSR sign verbatim, this is also +// leveraged by ACME internally. +func getCsrSignVerbatimSchemaFields() map[string]*framework.FieldSchema { + fields := map[string]*framework.FieldSchema{} + fields = addNonCACommonFields(fields) + fields = addSignVerbatimRoleFields(fields) + + fields["csr"] = &framework.FieldSchema{ + Type: framework.TypeString, + Default: "", + Description: `PEM-format CSR to be signed. Values will be +taken verbatim from the CSR, except for +basic constraints.`, + } + + return fields +} + +// addSignVerbatimRoleFields provides the fields and defaults to be used by anything that is building up the fields +// and their corresponding default values when generating/using a sign-verbatim type role such as buildSignVerbatimRole. +func addSignVerbatimRoleFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields["key_usage"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Default: issuing.DefaultRoleKeyUsages, + Description: `A comma-separated string or list of key usages (not extended +key usages). Valid values can be found at +https://golang.org/pkg/crypto/x509/#KeyUsage +-- simply drop the "KeyUsage" part of the name. +To remove all key usages from being set, set +this value to an empty list.`, + } + + fields["ext_key_usage"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Default: issuing.DefaultRoleEstKeyUsages, + Description: `A comma-separated string or list of extended key usages. Valid values can be found at +https://golang.org/pkg/crypto/x509/#ExtKeyUsage +-- simply drop the "ExtKeyUsage" part of the name. +To remove all key usages from being set, set +this value to an empty list.`, + } + + fields["ext_key_usage_oids"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Default: issuing.DefaultRoleEstKeyUsageOids, + Description: `A comma-separated string or list of extended key usage oids.`, + } + + fields["signature_bits"] = &framework.FieldSchema{ + Type: framework.TypeInt, + Default: issuing.DefaultRoleSignatureBits, + Description: `The number of bits to use in the signature +algorithm; accepts 256 for SHA-2-256, 384 for SHA-2-384, and 512 for +SHA-2-512. Defaults to 0 to automatically detect based on key length +(SHA-2-256 for RSA keys, and matching the curve size for NIST P-Curves).`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: issuing.DefaultRoleSignatureBits, + }, + } + + fields["use_pss"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Default: issuing.DefaultRoleUsePss, + Description: `Whether or not to use PSS signatures when using a +RSA key-type issuer. Defaults to false.`, + } + + return fields +} + +func addCACertKeyUsage(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields["key_usage"] = &framework.FieldSchema{ // Same Name as Leaf-Cert Field, and CA CSR Field, but Description and Default Differ + Type: framework.TypeCommaStringSlice, + Default: []string{"CertSign", "CRLSign"}, + Description: `This list of key usages (not extended key usages) will be +added to the existing set of key usages, CRL,CertSign, on +the generated certificate. Valid values can be found at +https://golang.org/pkg/crypto/x509/#KeyUsage -- simply drop +the "KeyUsage" part of the name. To use the issuer for +CMPv2, DigitalSignature must be set.`, + } + + return fields +} + +func addCaCsrKeyUsage(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields["key_usage"] = &framework.FieldSchema{ // Same Name as Leaf-Cert, CA-Cert Field, but Description and Default Differ + Type: framework.TypeCommaStringSlice, + Default: []string{}, + Description: `Specifies key_usage to encode in the certificate signing +request. This is a comma-separated string or list of key +usages (not extended key usages). Valid values can be found +at https://golang.org/pkg/crypto/x509/#KeyUsage -- simply +drop the "KeyUsage" part of the name. If not set, key +usage will not appear on the CSR.`, + } + return fields } diff --git a/builtin/logical/pki/ifmodifiedreqtype_enumer.go b/builtin/logical/pki/ifmodifiedreqtype_enumer.go new file mode 100644 index 000000000000..b366fd825fc1 --- /dev/null +++ b/builtin/logical/pki/ifmodifiedreqtype_enumer.go @@ -0,0 +1,53 @@ +// Code generated by "enumer -type=ifModifiedReqType -trimprefix=ifModified"; DO NOT EDIT. + +package pki + +import ( + "fmt" +) + +const _ifModifiedReqTypeName = "UnknownCACRLDeltaCRLUnifiedCRLUnifiedDeltaCRL" + +var _ifModifiedReqTypeIndex = [...]uint8{0, 7, 9, 12, 20, 30, 45} + +func (i ifModifiedReqType) String() string { + if i < 0 || i >= ifModifiedReqType(len(_ifModifiedReqTypeIndex)-1) { + return fmt.Sprintf("ifModifiedReqType(%d)", i) + } + return _ifModifiedReqTypeName[_ifModifiedReqTypeIndex[i]:_ifModifiedReqTypeIndex[i+1]] +} + +var _ifModifiedReqTypeValues = []ifModifiedReqType{0, 1, 2, 3, 4, 5} + +var _ifModifiedReqTypeNameToValueMap = map[string]ifModifiedReqType{ + _ifModifiedReqTypeName[0:7]: 0, + _ifModifiedReqTypeName[7:9]: 1, + _ifModifiedReqTypeName[9:12]: 2, + _ifModifiedReqTypeName[12:20]: 3, + _ifModifiedReqTypeName[20:30]: 4, + _ifModifiedReqTypeName[30:45]: 5, +} + +// ifModifiedReqTypeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func ifModifiedReqTypeString(s string) (ifModifiedReqType, error) { + if val, ok := _ifModifiedReqTypeNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to ifModifiedReqType values", s) +} + +// ifModifiedReqTypeValues returns all values of the enum +func ifModifiedReqTypeValues() []ifModifiedReqType { + return _ifModifiedReqTypeValues +} + +// IsAifModifiedReqType returns "true" if the value is listed in the enum definition. "false" otherwise +func (i ifModifiedReqType) IsAifModifiedReqType() bool { + for _, v := range _ifModifiedReqTypeValues { + if i == v { + return true + } + } + return false +} diff --git a/builtin/logical/pki/integration_test.go b/builtin/logical/pki/integration_test.go index 0df673e2dcde..1c3da7fa3a1b 100644 --- a/builtin/logical/pki/integration_test.go +++ b/builtin/logical/pki/integration_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -14,9 +14,14 @@ import ( "fmt" "testing" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + vaulthttp "github.com/hashicorp/vault/http" + vaultocsp "github.com/hashicorp/vault/sdk/helper/ocsp" "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" - "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" "github.com/stretchr/testify/require" ) @@ -36,7 +41,7 @@ func TestIntegration_RotateRootUsesNext(t *testing.T) { require.NotNil(t, resp, "got nil response from rotate root") require.False(t, resp.IsError(), "got an error from rotate root: %#v", resp) - issuerId1 := resp.Data["issuer_id"].(issuerID) + issuerId1 := resp.Data["issuer_id"].(issuing.IssuerID) issuerName1 := resp.Data["issuer_name"] require.NotEmpty(t, issuerId1, "issuer id was empty on initial rotate root command") @@ -56,7 +61,7 @@ func TestIntegration_RotateRootUsesNext(t *testing.T) { require.NotNil(t, resp, "got nil response from rotate root") require.False(t, resp.IsError(), "got an error from rotate root: %#v", resp) - issuerId2 := resp.Data["issuer_id"].(issuerID) + issuerId2 := resp.Data["issuer_id"].(issuing.IssuerID) issuerName2 := resp.Data["issuer_name"] require.NotEmpty(t, issuerId2, "issuer id was empty on second rotate root command") @@ -78,7 +83,7 @@ func TestIntegration_RotateRootUsesNext(t *testing.T) { require.NotNil(t, resp, "got nil response from rotate root") require.False(t, resp.IsError(), "got an error from rotate root: %#v", resp) - issuerId3 := resp.Data["issuer_id"].(issuerID) + issuerId3 := resp.Data["issuer_id"].(issuing.IssuerID) issuerName3 := resp.Data["issuer_name"] require.NotEmpty(t, issuerId3, "issuer id was empty on third rotate root command") @@ -232,6 +237,8 @@ func TestIntegration_SetSignedWithBackwardsPemBundles(t *testing.T) { require.False(t, resp.IsError(), "got an error from generating root ca: %#v", resp) rootCert := resp.Data["certificate"].(string) + schema.ValidateResponse(t, schema.GetResponseSchema(t, rootBackend.Route("issuers/generate/root/internal"), logical.UpdateOperation), resp, true) + // generate intermediate resp, err = intBackend.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, @@ -293,6 +300,8 @@ func TestIntegration_SetSignedWithBackwardsPemBundles(t *testing.T) { require.NoError(t, err, "failed setting up role example") require.NotNil(t, resp, "got nil response from setting up role example: %#v", resp) + schema.ValidateResponse(t, schema.GetResponseSchema(t, intBackend.Route("roles/example"), logical.UpdateOperation), resp, true) + // Issue cert resp, err = intBackend.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, @@ -307,6 +316,8 @@ func TestIntegration_SetSignedWithBackwardsPemBundles(t *testing.T) { require.NoError(t, err, "failed issuing a leaf cert from int ca") require.NotNil(t, resp, "got nil response issuing a leaf cert from int ca") require.False(t, resp.IsError(), "got an error issuing a leaf cert from int ca: %#v", resp) + + schema.ValidateResponse(t, schema.GetResponseSchema(t, intBackend.Route("issue/example"), logical.UpdateOperation), resp, true) } func TestIntegration_CSRGeneration(t *testing.T) { @@ -425,7 +436,7 @@ func TestIntegration_AutoIssuer(t *testing.T) { "pem_bundle": certOne, }) requireSuccessNonNilResponse(t, resp, err) - issuerIdOneReimported := issuerID(resp.Data["imported_issuers"].([]string)[0]) + issuerIdOneReimported := issuing.IssuerID(resp.Data["imported_issuers"].([]string)[0]) resp, err = CBRead(b, s, "config/issuers") requireSuccessNonNilResponse(t, resp, err) @@ -469,11 +480,253 @@ func TestIntegration_AutoIssuer(t *testing.T) { require.Equal(t, issuerIdOneReimported, resp.Data["default"]) } -func genTestRootCa(t *testing.T, b *backend, s logical.Storage) (issuerID, keyID) { +// TestLDAPAiaCrlUrls validates we can properly handle CRL urls that are ldap based. +func TestLDAPAiaCrlUrls(t *testing.T) { + t.Parallel() + + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + singleCore := cluster.Cores[0] + vault.TestWaitActive(t, singleCore.Core) + client := singleCore.Client + + mountPKIEndpoint(t, client, "pki") + + // Attempt multiple urls + crls := []string{ + "ldap://ldap.example.com/cn=example%20CA,dc=example,dc=com?certificateRevocationList;binary", + "ldap://ldap.example.com/cn=CA,dc=example,dc=com?authorityRevocationList;binary", + } + + _, err := client.Logical().Write("pki/config/urls", map[string]interface{}{ + "crl_distribution_points": crls, + }) + require.NoError(t, err) + + resp, err := client.Logical().Read("pki/config/urls") + require.NoError(t, err, "failed reading config/urls") + require.NotNil(t, resp, "resp was nil") + require.NotNil(t, resp.Data, "data within response was nil") + require.NotEmpty(t, resp.Data["crl_distribution_points"], "crl_distribution_points was nil within data") + require.Len(t, resp.Data["crl_distribution_points"], len(crls)) + + for _, crlVal := range crls { + require.Contains(t, resp.Data["crl_distribution_points"], crlVal) + } + + resp, err = client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "Root R1", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["issuer_id"]) + rootIssuerId := resp.Data["issuer_id"].(string) + + _, err = client.Logical().Write("pki/roles/example-root", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "max_ttl": "1h", + "key_type": "ec", + "issuer_ref": rootIssuerId, + }) + require.NoError(t, err) + + resp, err = client.Logical().Write("pki/issue/example-root", map[string]interface{}{ + "common_name": "test.example.com", + "ttl": "5m", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["certificate"]) + + certPEM := resp.Data["certificate"].(string) + certBlock, _ := pem.Decode([]byte(certPEM)) + require.NotNil(t, certBlock) + cert, err := x509.ParseCertificate(certBlock.Bytes) + require.NoError(t, err) + + require.EqualValues(t, crls, cert.CRLDistributionPoints) +} + +func TestIntegrationOCSPClientWithPKI(t *testing.T) { + t.Parallel() + + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + err := client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "32h", + }, + }) + require.NoError(t, err) + + resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "Root R1", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["issuer_id"]) + rootIssuerId := resp.Data["issuer_id"].(string) + + // Set URLs pointing to the issuer. + _, err = client.Logical().Write("pki/config/cluster", map[string]interface{}{ + "path": client.Address() + "/v1/pki", + "aia_path": client.Address() + "/v1/pki", + }) + require.NoError(t, err) + + _, err = client.Logical().Write("pki/config/urls", map[string]interface{}{ + "enable_templating": true, + "crl_distribution_points": "{{cluster_aia_path}}/issuer/{{issuer_id}}/crl/der", + "issuing_certificates": "{{cluster_aia_path}}/issuer/{{issuer_id}}/der", + "ocsp_servers": "{{cluster_aia_path}}/ocsp", + }) + require.NoError(t, err) + + // Build an intermediate CA + resp, err = client.Logical().Write("pki/intermediate/generate/internal", map[string]interface{}{ + "common_name": "Int X1", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["csr"]) + intermediateCSR := resp.Data["csr"].(string) + + resp, err = client.Logical().Write("pki/root/sign-intermediate", map[string]interface{}{ + "csr": intermediateCSR, + "ttl": "20h", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["certificate"]) + intermediateCert := resp.Data["certificate"] + + resp, err = client.Logical().Write("pki/intermediate/set-signed", map[string]interface{}{ + "certificate": intermediateCert, + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["imported_issuers"]) + rawImportedIssuers := resp.Data["imported_issuers"].([]interface{}) + require.Equal(t, len(rawImportedIssuers), 1) + importedIssuer := rawImportedIssuers[0].(string) + require.NotEmpty(t, importedIssuer) + + // Set intermediate as default. + _, err = client.Logical().Write("pki/config/issuers", map[string]interface{}{ + "default": importedIssuer, + }) + require.NoError(t, err) + + // Setup roles for root, intermediate. + _, err = client.Logical().Write("pki/roles/example-root", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "max_ttl": "1h", + "key_type": "ec", + "issuer_ref": rootIssuerId, + }) + require.NoError(t, err) + + _, err = client.Logical().Write("pki/roles/example-int", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "max_ttl": "1h", + "key_type": "ec", + }) + require.NoError(t, err) + + // Issue certs and validate them against OCSP. + for _, path := range []string{"pki/issue/example-int", "pki/issue/example-root"} { + t.Logf("Validating against path: %v", path) + resp, err = client.Logical().Write(path, map[string]interface{}{ + "common_name": "test.example.com", + "ttl": "5m", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["certificate"]) + require.NotEmpty(t, resp.Data["issuing_ca"]) + require.NotEmpty(t, resp.Data["serial_number"]) + + certPEM := resp.Data["certificate"].(string) + certBlock, _ := pem.Decode([]byte(certPEM)) + require.NotNil(t, certBlock) + cert, err := x509.ParseCertificate(certBlock.Bytes) + require.NoError(t, err) + require.NotNil(t, cert) + + issuerPEM := resp.Data["issuing_ca"].(string) + issuerBlock, _ := pem.Decode([]byte(issuerPEM)) + require.NotNil(t, issuerBlock) + issuer, err := x509.ParseCertificate(issuerBlock.Bytes) + require.NoError(t, err) + require.NotNil(t, issuer) + + serialNumber := resp.Data["serial_number"].(string) + + testLogger := hclog.New(hclog.DefaultOptions) + + conf := &vaultocsp.VerifyConfig{ + OcspFailureMode: vaultocsp.FailOpenFalse, + ExtraCas: []*x509.Certificate{cluster.CACert}, + } + ocspClient := vaultocsp.New(func() hclog.Logger { + return testLogger + }, 10) + + _, err = client.Logical().Write("pki/revoke", map[string]interface{}{ + "serial_number": serialNumber, + }) + require.NoError(t, err) + + err = ocspClient.VerifyLeafCertificate(context.Background(), cert, issuer, conf) + require.Error(t, err) + require.Contains(t, err.Error(), serialNumber, "Expected revoked serial number to appear in err") + } +} + +func genTestRootCa(t *testing.T, b *backend, s logical.Storage) (issuing.IssuerID, issuing.KeyID) { return genTestRootCaWithIssuerName(t, b, s, "") } -func genTestRootCaWithIssuerName(t *testing.T, b *backend, s logical.Storage, issuerName string) (issuerID, keyID) { +func genTestRootCaWithIssuerName(t *testing.T, b *backend, s logical.Storage, issuerName string) (issuing.IssuerID, issuing.KeyID) { data := map[string]interface{}{ "common_name": "test.com", } @@ -491,8 +744,8 @@ func genTestRootCaWithIssuerName(t *testing.T, b *backend, s logical.Storage, is require.NotNil(t, resp, "got nil response from generating root ca") require.False(t, resp.IsError(), "got an error from generating root ca: %#v", resp) - issuerId := resp.Data["issuer_id"].(issuerID) - keyId := resp.Data["key_id"].(keyID) + issuerId := resp.Data["issuer_id"].(issuing.IssuerID) + keyId := resp.Data["key_id"].(issuing.KeyID) require.NotEmpty(t, issuerId, "returned issuer id was empty") require.NotEmpty(t, keyId, "returned key id was empty") diff --git a/builtin/logical/pki/issuing/aia.go b/builtin/logical/pki/issuing/aia.go new file mode 100644 index 000000000000..0f2e76b99f4a --- /dev/null +++ b/builtin/logical/pki/issuing/aia.go @@ -0,0 +1,184 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package issuing + +import ( + "context" + "fmt" + "net/url" + "strings" + "unicode/utf8" + + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ClusterConfigPath = "config/cluster" + +type AiaConfigEntry struct { + IssuingCertificates []string `json:"issuing_certificates"` + CRLDistributionPoints []string `json:"crl_distribution_points"` + OCSPServers []string `json:"ocsp_servers"` + EnableTemplating bool `json:"enable_templating"` +} + +type ClusterConfigEntry struct { + Path string `json:"path"` + AIAPath string `json:"aia_path"` +} + +func GetAIAURLs(ctx context.Context, s logical.Storage, i *IssuerEntry) (*certutil.URLEntries, error) { + // Default to the per-issuer AIA URLs. + entries := i.AIAURIs + + // If none are set (either due to a nil entry or because no URLs have + // been provided), fall back to the global AIA URL config. + if entries == nil || (len(entries.IssuingCertificates) == 0 && len(entries.CRLDistributionPoints) == 0 && len(entries.OCSPServers) == 0) { + var err error + + entries, err = GetGlobalAIAURLs(ctx, s) + if err != nil { + return nil, err + } + } + + if entries == nil { + return &certutil.URLEntries{}, nil + } + + return ToURLEntries(ctx, s, i.ID, entries) +} + +func GetGlobalAIAURLs(ctx context.Context, storage logical.Storage) (*AiaConfigEntry, error) { + entry, err := storage.Get(ctx, "urls") + if err != nil { + return nil, err + } + + entries := &AiaConfigEntry{ + IssuingCertificates: []string{}, + CRLDistributionPoints: []string{}, + OCSPServers: []string{}, + EnableTemplating: false, + } + + if entry == nil { + return entries, nil + } + + if err := entry.DecodeJSON(entries); err != nil { + return nil, err + } + + return entries, nil +} + +func ToURLEntries(ctx context.Context, s logical.Storage, issuer IssuerID, c *AiaConfigEntry) (*certutil.URLEntries, error) { + if len(c.IssuingCertificates) == 0 && len(c.CRLDistributionPoints) == 0 && len(c.OCSPServers) == 0 { + return &certutil.URLEntries{}, nil + } + + result := certutil.URLEntries{ + IssuingCertificates: c.IssuingCertificates[:], + CRLDistributionPoints: c.CRLDistributionPoints[:], + OCSPServers: c.OCSPServers[:], + } + + if c.EnableTemplating { + cfg, err := GetClusterConfig(ctx, s) + if err != nil { + return nil, fmt.Errorf("error fetching cluster-local address config: %w", err) + } + + for name, source := range map[string]*[]string{ + "issuing_certificates": &result.IssuingCertificates, + "crl_distribution_points": &result.CRLDistributionPoints, + "ocsp_servers": &result.OCSPServers, + } { + templated := make([]string, len(*source)) + for index, uri := range *source { + if strings.Contains(uri, "{{cluster_path}}") && len(cfg.Path) == 0 { + return nil, fmt.Errorf("unable to template AIA URLs as we lack local cluster address information (path)") + } + if strings.Contains(uri, "{{cluster_aia_path}}") && len(cfg.AIAPath) == 0 { + return nil, fmt.Errorf("unable to template AIA URLs as we lack local cluster address information (aia_path)") + } + if strings.Contains(uri, "{{issuer_id}}") && len(issuer) == 0 { + // Elide issuer AIA info as we lack an issuer_id. + return nil, fmt.Errorf("unable to template AIA URLs as we lack an issuer_id for this operation") + } + + uri = strings.ReplaceAll(uri, "{{cluster_path}}", cfg.Path) + uri = strings.ReplaceAll(uri, "{{cluster_aia_path}}", cfg.AIAPath) + uri = strings.ReplaceAll(uri, "{{issuer_id}}", issuer.String()) + templated[index] = uri + } + + if uri := ValidateURLs(templated); uri != "" { + return nil, fmt.Errorf("error validating templated %v; invalid URI: %v", name, uri) + } + + *source = templated + } + } + + return &result, nil +} + +func GetClusterConfig(ctx context.Context, s logical.Storage) (*ClusterConfigEntry, error) { + entry, err := s.Get(ctx, ClusterConfigPath) + if err != nil { + return nil, err + } + + var result ClusterConfigEntry + if entry == nil { + return &result, nil + } + + if err = entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +func ValidateURLs(urls []string) string { + for _, curr := range urls { + if !isURL(curr) || strings.Contains(curr, "{{issuer_id}}") || strings.Contains(curr, "{{cluster_path}}") || strings.Contains(curr, "{{cluster_aia_path}}") { + return curr + } + } + + return "" +} + +const ( + maxURLRuneCount = 2083 + minURLRuneCount = 3 +) + +// IsURL checks if the string is an URL. +func isURL(str string) bool { + if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { + return false + } + strTemp := str + if strings.Contains(str, ":") && !strings.Contains(str, "://") { + // support no indicated urlscheme but with colon for port number + // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString + strTemp = "http://" + str + } + u, err := url.ParseRequestURI(strTemp) + if err != nil { + return false + } + if strings.HasPrefix(u.Host, ".") { + return false + } + if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) { + return false + } + return true +} diff --git a/builtin/logical/pki/issuing/config_issuer.go b/builtin/logical/pki/issuing/config_issuer.go new file mode 100644 index 000000000000..aa9e10ec739b --- /dev/null +++ b/builtin/logical/pki/issuing/config_issuer.go @@ -0,0 +1,124 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package issuing + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const StorageIssuerConfig = "config/issuers" + +type IssuerConfigEntry struct { + // This new fetchedDefault field allows us to detect if the default + // issuer was modified, in turn dispatching the timestamp updater + // if necessary. + fetchedDefault IssuerID `json:"-"` + DefaultIssuerId IssuerID `json:"default"` + DefaultFollowsLatestIssuer bool `json:"default_follows_latest_issuer"` +} + +func GetIssuersConfig(ctx context.Context, s logical.Storage) (*IssuerConfigEntry, error) { + entry, err := s.Get(ctx, StorageIssuerConfig) + if err != nil { + return nil, err + } + + issuerConfig := &IssuerConfigEntry{} + if entry != nil { + if err := entry.DecodeJSON(issuerConfig); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode issuer configuration: %v", err)} + } + } + issuerConfig.fetchedDefault = issuerConfig.DefaultIssuerId + + return issuerConfig, nil +} + +func SetIssuersConfig(ctx context.Context, s logical.Storage, config *IssuerConfigEntry) error { + json, err := logical.StorageEntryJSON(StorageIssuerConfig, config) + if err != nil { + return err + } + + if err := s.Put(ctx, json); err != nil { + return err + } + + if err := changeDefaultIssuerTimestamps(ctx, s, config.fetchedDefault, config.DefaultIssuerId); err != nil { + return err + } + + return nil +} + +func changeDefaultIssuerTimestamps(ctx context.Context, s logical.Storage, oldDefault IssuerID, newDefault IssuerID) error { + if newDefault == oldDefault { + return nil + } + + now := time.Now().UTC() + + // When the default issuer changes, we need to modify four + // pieces of information: + // + // 1. The old default issuer's modification time, as it no + // longer works for the /cert/ca path. + // 2. The new default issuer's modification time, as it now + // works for the /cert/ca path. + // 3. & 4. Both issuer's CRLs, as they behave the same, under + // the /cert/crl path! + for _, thisId := range []IssuerID{oldDefault, newDefault} { + if len(thisId) == 0 { + continue + } + + // 1 & 2 above. + issuer, err := FetchIssuerById(ctx, s, thisId) + if err != nil { + // Due to the lack of transactions, if we deleted the default + // issuer (successfully), but the subsequent issuer config write + // (to clear the default issuer's old id) failed, we might have + // an inconsistent config. If we later hit this loop (and flush + // these timestamps again -- perhaps because the operator + // selected a new default), we'd have erred out here, because + // the since-deleted default issuer doesn't exist. In this case, + // skip the issuer instead of bailing. + err := fmt.Errorf("unable to update issuer (%v)'s modification time: error fetching issuer: %w", thisId, err) + if strings.Contains(err.Error(), "does not exist") { + hclog.L().Warn(err.Error()) + continue + } + + return err + } + + issuer.LastModified = now + err = WriteIssuer(ctx, s, issuer) + if err != nil { + return fmt.Errorf("unable to update issuer (%v)'s modification time: error persisting issuer: %w", thisId, err) + } + } + + // Fetch and update the internalCRLConfigEntry (3&4). + cfg, err := GetLocalCRLConfig(ctx, s) + if err != nil { + return fmt.Errorf("unable to update local CRL config's modification time: error fetching local CRL config: %w", err) + } + + cfg.LastModified = now + cfg.DeltaLastModified = now + err = SetLocalCRLConfig(ctx, s, cfg) + if err != nil { + return fmt.Errorf("unable to update local CRL config's modification time: error persisting local CRL config: %w", err) + } + + return nil +} diff --git a/builtin/logical/pki/issuing/config_key.go b/builtin/logical/pki/issuing/config_key.go new file mode 100644 index 000000000000..b0526a25c6fc --- /dev/null +++ b/builtin/logical/pki/issuing/config_key.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package issuing + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + StorageKeyConfig = "config/keys" +) + +type KeyConfigEntry struct { + DefaultKeyId KeyID `json:"default"` +} + +func SetKeysConfig(ctx context.Context, s logical.Storage, config *KeyConfigEntry) error { + json, err := logical.StorageEntryJSON(StorageKeyConfig, config) + if err != nil { + return err + } + + return s.Put(ctx, json) +} + +func GetKeysConfig(ctx context.Context, s logical.Storage) (*KeyConfigEntry, error) { + entry, err := s.Get(ctx, StorageKeyConfig) + if err != nil { + return nil, err + } + + keyConfig := &KeyConfigEntry{} + if entry != nil { + if err := entry.DecodeJSON(keyConfig); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode key configuration: %v", err)} + } + } + + return keyConfig, nil +} diff --git a/builtin/logical/pki/issuing/config_revocation.go b/builtin/logical/pki/issuing/config_revocation.go new file mode 100644 index 000000000000..b38be4b89282 --- /dev/null +++ b/builtin/logical/pki/issuing/config_revocation.go @@ -0,0 +1,190 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package issuing + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + StorageLocalCRLConfig = "crls/config" + StorageUnifiedCRLConfig = "unified-crls/config" +) + +type InternalCRLConfigEntry struct { + IssuerIDCRLMap map[IssuerID]CrlID `json:"issuer_id_crl_map"` + CRLNumberMap map[CrlID]int64 `json:"crl_number_map"` + LastCompleteNumberMap map[CrlID]int64 `json:"last_complete_number_map"` + CRLExpirationMap map[CrlID]time.Time `json:"crl_expiration_map"` + LastModified time.Time `json:"last_modified"` + DeltaLastModified time.Time `json:"delta_last_modified"` + UseGlobalQueue bool `json:"cross_cluster_revocation"` +} + +type CrlID string + +func (p CrlID) String() string { + return string(p) +} + +func GetLocalCRLConfig(ctx context.Context, s logical.Storage) (*InternalCRLConfigEntry, error) { + return _getInternalCRLConfig(ctx, s, StorageLocalCRLConfig) +} + +func GetUnifiedCRLConfig(ctx context.Context, s logical.Storage) (*InternalCRLConfigEntry, error) { + return _getInternalCRLConfig(ctx, s, StorageUnifiedCRLConfig) +} + +func _getInternalCRLConfig(ctx context.Context, s logical.Storage, path string) (*InternalCRLConfigEntry, error) { + entry, err := s.Get(ctx, path) + if err != nil { + return nil, err + } + + mapping := &InternalCRLConfigEntry{} + if entry != nil { + if err := entry.DecodeJSON(mapping); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode cluster-local CRL configuration: %v", err)} + } + } + + if len(mapping.IssuerIDCRLMap) == 0 { + mapping.IssuerIDCRLMap = make(map[IssuerID]CrlID) + } + + if len(mapping.CRLNumberMap) == 0 { + mapping.CRLNumberMap = make(map[CrlID]int64) + } + + if len(mapping.LastCompleteNumberMap) == 0 { + mapping.LastCompleteNumberMap = make(map[CrlID]int64) + + // Since this might not exist on migration, we want to guess as + // to the last full CRL number was. This was likely the last + // Value from CRLNumberMap if it existed, since we're just adding + // the mapping here in this block. + // + // After the next full CRL build, we will have set this Value + // correctly, so it doesn't really matter in the long term if + // we're off here. + for id, number := range mapping.CRLNumberMap { + // Decrement by one, since CRLNumberMap is the future number, + // not the last built number. + mapping.LastCompleteNumberMap[id] = number - 1 + } + } + + if len(mapping.CRLExpirationMap) == 0 { + mapping.CRLExpirationMap = make(map[CrlID]time.Time) + } + + return mapping, nil +} + +func SetLocalCRLConfig(ctx context.Context, s logical.Storage, mapping *InternalCRLConfigEntry) error { + return _setInternalCRLConfig(ctx, s, mapping, StorageLocalCRLConfig) +} + +func SetUnifiedCRLConfig(ctx context.Context, s logical.Storage, mapping *InternalCRLConfigEntry) error { + return _setInternalCRLConfig(ctx, s, mapping, StorageUnifiedCRLConfig) +} + +func _setInternalCRLConfig(ctx context.Context, s logical.Storage, mapping *InternalCRLConfigEntry, path string) error { + if err := _cleanupInternalCRLMapping(ctx, s, mapping, path); err != nil { + return fmt.Errorf("failed to clean up internal CRL mapping: %w", err) + } + + json, err := logical.StorageEntryJSON(path, mapping) + if err != nil { + return err + } + + return s.Put(ctx, json) +} + +func _cleanupInternalCRLMapping(ctx context.Context, s logical.Storage, mapping *InternalCRLConfigEntry, path string) error { + // Track which CRL IDs are presently referred to by issuers; any other CRL + // IDs are subject to cleanup. + // + // Unused IDs both need to be removed from this map (cleaning up the size + // of this storage entry) but also the full CRLs removed from disk. + presentMap := make(map[CrlID]bool) + for _, id := range mapping.IssuerIDCRLMap { + presentMap[id] = true + } + + // Identify which CRL IDs exist and are candidates for removal; + // theoretically these three maps should be in sync, but were added + // at different times. + toRemove := make(map[CrlID]bool) + for id := range mapping.CRLNumberMap { + if !presentMap[id] { + toRemove[id] = true + } + } + for id := range mapping.LastCompleteNumberMap { + if !presentMap[id] { + toRemove[id] = true + } + } + for id := range mapping.CRLExpirationMap { + if !presentMap[id] { + toRemove[id] = true + } + } + + // Depending on which path we're writing this config to, we need to + // remove CRLs from the relevant folder too. + isLocal := path == StorageLocalCRLConfig + baseCRLPath := PathCrls + if !isLocal { + baseCRLPath = "unified-crls/" + } + + for id := range toRemove { + // Clean up space in this mapping... + delete(mapping.CRLNumberMap, id) + delete(mapping.LastCompleteNumberMap, id) + delete(mapping.CRLExpirationMap, id) + + // And clean up space on disk from the fat CRL mapping. + crlPath := baseCRLPath + string(id) + deltaCRLPath := crlPath + "-delta" + if err := s.Delete(ctx, crlPath); err != nil { + return fmt.Errorf("failed to delete unreferenced CRL %v: %w", id, err) + } + if err := s.Delete(ctx, deltaCRLPath); err != nil { + return fmt.Errorf("failed to delete unreferenced delta CRL %v: %w", id, err) + } + } + + // Lastly, some CRLs could've been partially removed from the map but + // not from disk. Check to see if we have any dangling CRLs and remove + // them too. + list, err := s.List(ctx, baseCRLPath) + if err != nil { + return fmt.Errorf("failed listing all CRLs: %w", err) + } + for _, crl := range list { + if crl == "config" || strings.HasSuffix(crl, "/") { + continue + } + + if presentMap[CrlID(crl)] { + continue + } + + if err := s.Delete(ctx, baseCRLPath+"/"+crl); err != nil { + return fmt.Errorf("failed cleaning up orphaned CRL %v: %w", crl, err) + } + } + + return nil +} diff --git a/builtin/logical/pki/issuing/context.go b/builtin/logical/pki/issuing/context.go new file mode 100644 index 000000000000..daa08a9ee70f --- /dev/null +++ b/builtin/logical/pki/issuing/context.go @@ -0,0 +1,22 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package issuing + +import "context" + +// IssuerRoleContext combines in a single struct an issuer and a role that we should +// leverage to issue a certificate along with the +type IssuerRoleContext struct { + context.Context + Role *RoleEntry + Issuer *IssuerEntry +} + +func NewIssuerRoleContext(ctx context.Context, issuer *IssuerEntry, role *RoleEntry) IssuerRoleContext { + return IssuerRoleContext{ + Context: ctx, + Role: role, + Issuer: issuer, + } +} diff --git a/builtin/logical/pki/issuing/issue_common.go b/builtin/logical/pki/issuing/issue_common.go new file mode 100644 index 000000000000..b1a67e2feb33 --- /dev/null +++ b/builtin/logical/pki/issuing/issue_common.go @@ -0,0 +1,1035 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package issuing + +import ( + "context" + "crypto/x509" + "crypto/x509/pkix" + "encoding/hex" + "fmt" + "net" + "net/url" + "regexp" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/builtin/logical/pki/parsing" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/ryanuber/go-glob" + "golang.org/x/net/idna" +) + +const ( + PathCerts = "certs/" + PathCertMetadata = "cert-metadata/" + PathCrls = "crls/" +) + +var ( + // labelRegex is a single label from a valid domain name and was extracted + // from hostnameRegex below for use in leftWildLabelRegex, without any + // label separators (`.`). + labelRegex = `([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])` + + // A note on hostnameRegex: although we set the StrictDomainName option + // when doing the idna conversion, this appears to only affect output, not + // input, so it will allow e.g. host^123.example.com straight through. So + // we still need to use this to check the output. + hostnameRegex = regexp.MustCompile(`^(\*\.)?(` + labelRegex + `\.)*` + labelRegex + `\.?$`) + + // Left Wildcard Label Regex is equivalent to a single domain label + // component from hostnameRegex above, but with additional wildcard + // characters added. There are four possibilities here: + // + // 1. Entire label is a wildcard, + // 2. Wildcard exists at the start, + // 3. Wildcard exists at the end, + // 4. Wildcard exists in the middle. + allWildRegex = `\*` + startWildRegex = `\*` + labelRegex + endWildRegex = labelRegex + `\*` + middleWildRegex = labelRegex + `\*` + labelRegex + leftWildLabelRegex = regexp.MustCompile(`^(` + allWildRegex + `|` + startWildRegex + `|` + endWildRegex + `|` + middleWildRegex + `)$`) +) + +type EntityInfo struct { + DisplayName string + EntityID string +} + +type CertificateCounter interface { + IsInitialized() bool + IncrementTotalCertificatesCount(certsCounted bool, newSerial string) + IncrementTotalRevokedCertificatesCount(certsCounted bool, newSerial string) +} + +func NewEntityInfoFromReq(req *logical.Request) EntityInfo { + if req == nil { + return EntityInfo{} + } + return EntityInfo{ + DisplayName: req.DisplayName, + EntityID: req.EntityID, + } +} + +type CreationBundleInput interface { + CertNotAfterInput + GetCommonName() string + GetSerialNumber() string + GetExcludeCnFromSans() bool + GetOptionalAltNames() (interface{}, bool) + GetOtherSans() []string + GetIpSans() []string + GetURISans() []string + GetOptionalSkid() (interface{}, bool) + IsUserIdInSchema() (interface{}, bool) + GetUserIds() []string + IgnoreCSRSignature() bool +} + +// GenerateCreationBundle is a shared function that reads parameters supplied +// from the various endpoints and generates a CreationParameters with the +// parameters that can be used to issue or sign +func GenerateCreationBundle(b logical.SystemView, role *RoleEntry, entityInfo EntityInfo, cb CreationBundleInput, caSign *certutil.CAInfoBundle, csr *x509.CertificateRequest) (*certutil.CreationBundle, []string, error) { + // Read in names -- CN, DNS and email addresses + var cn string + var ridSerialNumber string + var warnings []string + dnsNames := []string{} + emailAddresses := []string{} + { + if csr != nil && role.UseCSRCommonName { + cn = csr.Subject.CommonName + } + if cn == "" { + cn = cb.GetCommonName() + if cn == "" && role.RequireCN { + return nil, nil, errutil.UserError{Err: `the common_name field is required, or must be provided in a CSR with "use_csr_common_name" set to true, unless "require_cn" is set to false`} + } + } + + ridSerialNumber = cb.GetSerialNumber() + + // only take serial number from CSR if one was not supplied via API + if ridSerialNumber == "" && csr != nil { + ridSerialNumber = csr.Subject.SerialNumber + } + + if csr != nil && role.UseCSRSANs { + dnsNames = csr.DNSNames + emailAddresses = csr.EmailAddresses + } + + if cn != "" && !cb.GetExcludeCnFromSans() { + if strings.Contains(cn, "@") { + // Note: emails are not disallowed if the role's email protection + // flag is false, because they may well be included for + // informational purposes; it is up to the verifying party to + // ensure that email addresses in a subject alternate name can be + // used for the purpose for which they are presented + emailAddresses = append(emailAddresses, cn) + } else { + // Only add to dnsNames if it's actually a DNS name but convert + // idn first + p := idna.New( + idna.StrictDomainName(true), + idna.VerifyDNSLength(true), + ) + converted, err := p.ToASCII(cn) + if err != nil { + return nil, nil, errutil.UserError{Err: err.Error()} + } + if hostnameRegex.MatchString(converted) { + dnsNames = append(dnsNames, converted) + } + } + } + + if csr == nil || !role.UseCSRSANs { + cnAltRaw, ok := cb.GetOptionalAltNames() + if ok { + cnAlt := strutil.ParseDedupAndSortStrings(cnAltRaw.(string), ",") + for _, v := range cnAlt { + if strings.Contains(v, "@") { + emailAddresses = append(emailAddresses, v) + } else { + // Only add to dnsNames if it's actually a DNS name but + // convert idn first + p := idna.New( + idna.StrictDomainName(true), + idna.VerifyDNSLength(true), + ) + converted, err := p.ToASCII(v) + if err != nil { + return nil, nil, errutil.UserError{Err: err.Error()} + } + if hostnameRegex.MatchString(converted) { + dnsNames = append(dnsNames, converted) + } + } + } + } + } + + // Check the CN. This ensures that the CN is checked even if it's + // excluded from SANs. + if cn != "" { + badName := ValidateCommonName(b, role, entityInfo, cn) + if len(badName) != 0 { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "common name %s not allowed by this role", badName)} + } + } + + if ridSerialNumber != "" { + badName := ValidateSerialNumber(role, ridSerialNumber) + if len(badName) != 0 { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "serial_number %s not allowed by this role", badName)} + } + } + + // Check for bad email and/or DNS names + badName := ValidateNames(b, role, entityInfo, dnsNames) + if len(badName) != 0 { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "subject alternate name %s not allowed by this role", badName)} + } + + badName = ValidateNames(b, role, entityInfo, emailAddresses) + if len(badName) != 0 { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "email address %s not allowed by this role", badName)} + } + } + + // otherSANsInput has the same format as the other_sans HTTP param in the + // Vault PKI API: it is a list of strings of the form ;: + // where must be UTF8/UTF-8. + var otherSANsInput []string + // otherSANs is the output of parseOtherSANs(otherSANsInput): its keys are + // the value, its values are of the form [, ] + var otherSANs map[string][]string + if sans := cb.GetOtherSans(); len(sans) > 0 { + otherSANsInput = sans + } + if role.UseCSRSANs && csr != nil && len(csr.Extensions) > 0 { + others, err := certutil.GetOtherSANsFromX509Extensions(csr.Extensions) + if err != nil { + return nil, nil, errutil.UserError{Err: fmt.Errorf("could not parse requested other SAN: %w", err).Error()} + } + for _, other := range others { + otherSANsInput = append(otherSANsInput, other.String()) + } + } + if len(otherSANsInput) > 0 { + requested, err := ParseOtherSANs(otherSANsInput) + if err != nil { + return nil, nil, errutil.UserError{Err: fmt.Errorf("could not parse requested other SAN: %w", err).Error()} + } + badOID, badName, err := ValidateOtherSANs(role, requested) + switch { + case err != nil: + return nil, nil, errutil.UserError{Err: err.Error()} + case len(badName) > 0: + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "other SAN %s not allowed for OID %s by this role", badName, badOID)} + case len(badOID) > 0: + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "other SAN OID %s not allowed by this role", badOID)} + default: + otherSANs = requested + } + } + + // Get and verify any IP SANs + ipAddresses := []net.IP{} + { + if csr != nil && role.UseCSRSANs { + if len(csr.IPAddresses) > 0 { + if !role.AllowIPSANs { + return nil, nil, errutil.UserError{Err: "IP Subject Alternative Names are not allowed in this role, but was provided some via CSR"} + } + ipAddresses = csr.IPAddresses + } + } else { + ipAlt := cb.GetIpSans() + if len(ipAlt) > 0 { + if !role.AllowIPSANs { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "IP Subject Alternative Names are not allowed in this role, but was provided %s", ipAlt)} + } + for _, v := range ipAlt { + parsedIP := net.ParseIP(v) + if parsedIP == nil { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "the value %q is not a valid IP address", v)} + } + ipAddresses = append(ipAddresses, parsedIP) + } + } + } + } + + URIs := []*url.URL{} + { + if csr != nil && role.UseCSRSANs { + if len(csr.URIs) > 0 { + if len(role.AllowedURISANs) == 0 { + return nil, nil, errutil.UserError{ + Err: "URI Subject Alternative Names are not allowed in this role, but were provided via CSR", + } + } + + // validate uri sans + for _, uri := range csr.URIs { + valid := ValidateURISAN(b, role, entityInfo, uri.String()) + if !valid { + return nil, nil, errutil.UserError{ + Err: "URI Subject Alternative Names were provided via CSR which are not valid for this role", + } + } + + URIs = append(URIs, uri) + } + } + } else { + uriAlt := cb.GetURISans() + if len(uriAlt) > 0 { + if len(role.AllowedURISANs) == 0 { + return nil, nil, errutil.UserError{ + Err: "URI Subject Alternative Names are not allowed in this role, but were provided via the API", + } + } + + for _, uri := range uriAlt { + valid := ValidateURISAN(b, role, entityInfo, uri) + if !valid { + return nil, nil, errutil.UserError{ + Err: "URI Subject Alternative Names were provided via the API which are not valid for this role", + } + } + + parsedURI, err := url.Parse(uri) + if parsedURI == nil || err != nil { + return nil, nil, errutil.UserError{ + Err: fmt.Sprintf( + "the provided URI Subject Alternative Name %q is not a valid URI", uri), + } + } + + URIs = append(URIs, parsedURI) + } + } + } + } + + // Most of these could also be RemoveDuplicateStable, or even + // leave duplicates in, but OU is the one most likely to be duplicated. + subject := pkix.Name{ + CommonName: cn, + SerialNumber: ridSerialNumber, + Country: strutil.RemoveDuplicatesStable(role.Country, false), + Organization: strutil.RemoveDuplicatesStable(role.Organization, false), + OrganizationalUnit: strutil.RemoveDuplicatesStable(role.OU, false), + Locality: strutil.RemoveDuplicatesStable(role.Locality, false), + Province: strutil.RemoveDuplicatesStable(role.Province, false), + StreetAddress: strutil.RemoveDuplicatesStable(role.StreetAddress, false), + PostalCode: strutil.RemoveDuplicatesStable(role.PostalCode, false), + } + + // Get the TTL and verify it against the max allowed + notAfter, ttlWarnings, err := GetCertificateNotAfter(b, role, cb, caSign) + if err != nil { + return nil, warnings, err + } + warnings = append(warnings, ttlWarnings...) + + // Parse SKID from the request for cross-signing. + var skid []byte + { + if rawSKIDValue, ok := cb.GetOptionalSkid(); ok { + // Handle removing common separators to make copy/paste from tool + // output easier. Chromium uses space, OpenSSL uses colons, and at + // one point, Vault had preferred dash as a separator for hex + // strings. + var err error + skidValue := rawSKIDValue.(string) + for _, separator := range []string{":", "-", " "} { + skidValue = strings.ReplaceAll(skidValue, separator, "") + } + + skid, err = hex.DecodeString(skidValue) + if err != nil { + return nil, nil, errutil.UserError{Err: fmt.Sprintf("cannot parse requested SKID value as hex: %v", err)} + } + } + } + + // Add UserIDs into the Subject, if the request type supports it. + if _, present := cb.IsUserIdInSchema(); present { + rawUserIDs := cb.GetUserIds() + + // Only take UserIDs from CSR if one was not supplied via API. + if len(rawUserIDs) == 0 && csr != nil { + for _, attr := range csr.Subject.Names { + if attr.Type.Equal(certutil.SubjectPilotUserIDAttributeOID) { + switch aValue := attr.Value.(type) { + case string: + rawUserIDs = append(rawUserIDs, aValue) + case []byte: + rawUserIDs = append(rawUserIDs, string(aValue)) + default: + return nil, nil, errutil.UserError{Err: "unknown type for user_id attribute in CSR's Subject"} + } + } + } + } + + // Check for bad userIDs and add to the subject. + if len(rawUserIDs) > 0 { + for _, value := range rawUserIDs { + if !ValidateUserId(role, value) { + return nil, nil, errutil.UserError{Err: fmt.Sprintf("user_id %v is not allowed by this role", value)} + } + + subject.ExtraNames = append(subject.ExtraNames, pkix.AttributeTypeAndValue{ + Type: certutil.SubjectPilotUserIDAttributeOID, + Value: value, + }) + } + } + } + + creation := &certutil.CreationBundle{ + Params: &certutil.CreationParameters{ + Subject: subject, + DNSNames: strutil.RemoveDuplicates(dnsNames, false), + EmailAddresses: strutil.RemoveDuplicates(emailAddresses, false), + IPAddresses: ipAddresses, + URIs: URIs, + OtherSANs: otherSANs, + KeyType: role.KeyType, + KeyBits: role.KeyBits, + SignatureBits: role.SignatureBits, + UsePSS: role.UsePSS, + NotAfter: notAfter, + KeyUsage: x509.KeyUsage(parsing.ParseKeyUsages(role.KeyUsage)), + ExtKeyUsage: ParseExtKeyUsagesFromRole(role), + ExtKeyUsageOIDs: role.ExtKeyUsageOIDs, + PolicyIdentifiers: role.PolicyIdentifiers, + BasicConstraintsValidForNonCA: role.BasicConstraintsValidForNonCA, + NotBeforeDuration: role.NotBeforeDuration, + ForceAppendCaChain: caSign != nil, + SKID: skid, + IgnoreCSRSignature: cb.IgnoreCSRSignature(), + }, + SigningBundle: caSign, + CSR: csr, + } + + // Don't deal with URLs or max path length if it's self-signed, as these + // normally come from the signing bundle + if caSign == nil { + return creation, warnings, nil + } + + // This will have been read in from the getGlobalAIAURLs function + creation.Params.URLs = caSign.URLs + + // If the max path length in the role is not nil, it was specified at + // generation time with the max_path_length parameter; otherwise derive it + // from the signing certificate + if role.MaxPathLength != nil { + creation.Params.MaxPathLength = *role.MaxPathLength + } else { + switch { + case caSign.Certificate.MaxPathLen < 0: + creation.Params.MaxPathLength = -1 + case caSign.Certificate.MaxPathLen == 0 && + caSign.Certificate.MaxPathLenZero: + // The signing function will ensure that we do not issue a CA cert + creation.Params.MaxPathLength = 0 + default: + // If this takes it to zero, we handle this case later if + // necessary + creation.Params.MaxPathLength = caSign.Certificate.MaxPathLen - 1 + } + } + + return creation, warnings, nil +} + +// Given a set of requested names for a certificate, verifies that all of them +// match the various toggles set in the role for controlling issuance. +// If one does not pass, it is returned in the string argument. +func ValidateNames(b logical.SystemView, role *RoleEntry, entityInfo EntityInfo, names []string) string { + for _, name := range names { + // Previously, reducedName was called sanitizedName but this made + // little sense under the previous interpretation of wildcards, + // leading to two bugs in this implementation. We presently call it + // "reduced" to indicate that it is still untrusted input (potentially + // different from the bare Common Name entry we're validating), it + // might have been modified such as by the removal of wildcard labels + // or the email prefix. + reducedName := name + emailDomain := reducedName + wildcardLabel := "" + isEmail := false + isWildcard := false + + // If it has an @, assume it is an email address and separate out the + // user from the hostname portion so that we can act on the hostname. + // Note that this matches behavior from the alt_names parameter. If it + // ends up being problematic for users, I guess that could be separated + // into dns_names and email_names in the future to be explicit, but I + // don't think this is likely. + if strings.Contains(reducedName, "@") { + splitEmail := strings.Split(reducedName, "@") + if len(splitEmail) != 2 { + return name + } + reducedName = splitEmail[1] + emailDomain = splitEmail[1] + isEmail = true + } + + if IsWildcardDomain(reducedName) { + // Regardless of later rejections below, this common name contains + // a wildcard character and is thus technically a wildcard name. + isWildcard = true + + // Additionally, if AllowWildcardCertificates is explicitly + // forbidden, it takes precedence over AllowAnyName, thus we should + // reject the name now. + // + // We expect the role to have been correctly migrated but guard for + // safety. + if role.AllowWildcardCertificates != nil && !*role.AllowWildcardCertificates { + return name + } + + // Check that this domain is well-formatted per RFC 6125. + var err error + wildcardLabel, reducedName, err = ValidateWildcardDomain(reducedName) + if err != nil { + return name + } + } + + // Email addresses using wildcard domain names do not make sense + // in a Common Name field. + if isEmail && isWildcard { + return name + } + + // AllowAnyName is checked after this because EnforceHostnames still + // applies when allowing any name. Also, we check the reduced name to + // ensure that we are not either checking a full email address or a + // wildcard prefix. + if role.EnforceHostnames { + if reducedName != "" { + // See note above about splitLabels having only one segment + // and setting reducedName to the empty string. + p := idna.New( + idna.StrictDomainName(true), + idna.VerifyDNSLength(true), + ) + converted, err := p.ToASCII(reducedName) + if err != nil { + return name + } + if !hostnameRegex.MatchString(converted) { + return name + } + } + + // When a wildcard is specified, we additionally need to validate + // the label with the wildcard is correctly formed. + if isWildcard && !leftWildLabelRegex.MatchString(wildcardLabel) { + return name + } + } + + // Self-explanatory, but validations from EnforceHostnames and + // AllowWildcardCertificates take precedence. + if role.AllowAnyName { + continue + } + + // The following blocks all work the same basic way: + // 1) If a role allows a certain class of base (localhost, token + // display name, role-configured domains), perform further tests + // + // 2) If there is a perfect match on either the sanitized name or it's an + // email address with a perfect match on the hostname portion, allow it + // + // 3) If subdomains are allowed, we check based on the sanitized name; + // note that if not a wildcard, will be equivalent to the email domain + // for email checks, and we already checked above for both a wildcard + // and email address being present in the same name + // 3a) First we check for a non-wildcard subdomain, as in . + // 3b) Then we check if it's a wildcard and the base domain is a match + // + // Variances are noted in-line + + if role.AllowLocalhost { + if reducedName == "localhost" || + reducedName == "localdomain" || + (isEmail && emailDomain == "localhost") || + (isEmail && emailDomain == "localdomain") { + continue + } + + if role.AllowSubdomains { + // It is possible, if unlikely, to have a subdomain of "localhost" + if strings.HasSuffix(reducedName, ".localhost") || + (isWildcard && reducedName == "localhost") { + continue + } + + // A subdomain of "localdomain" is also not entirely uncommon + if strings.HasSuffix(reducedName, ".localdomain") || + (isWildcard && reducedName == "localdomain") { + continue + } + } + } + + if role.AllowTokenDisplayName { + if name == entityInfo.DisplayName { + continue + } + + if role.AllowSubdomains { + if isEmail { + // If it's an email address, we need to parse the token + // display name in order to do a proper comparison of the + // subdomain + if strings.Contains(entityInfo.DisplayName, "@") { + splitDisplay := strings.Split(entityInfo.DisplayName, "@") + if len(splitDisplay) == 2 { + // Compare the sanitized name against the hostname + // portion of the email address in the broken + // display name + if strings.HasSuffix(reducedName, "."+splitDisplay[1]) { + continue + } + } + } + } + + if strings.HasSuffix(reducedName, "."+entityInfo.DisplayName) || + (isWildcard && reducedName == entityInfo.DisplayName) { + continue + } + } + } + + if len(role.AllowedDomains) > 0 { + valid := false + for _, currDomain := range role.AllowedDomains { + // If there is, say, a trailing comma, ignore it + if currDomain == "" { + continue + } + + if role.AllowedDomainsTemplate { + isTemplate, _ := framework.ValidateIdentityTemplate(currDomain) + if isTemplate && entityInfo.EntityID != "" { + tmpCurrDomain, err := framework.PopulateIdentityTemplate(currDomain, entityInfo.EntityID, b) + if err != nil { + continue + } + + currDomain = tmpCurrDomain + } + } + + // First, allow an exact match of the base domain if that role flag + // is enabled + if role.AllowBareDomains && + (strings.EqualFold(name, currDomain) || + (isEmail && strings.EqualFold(emailDomain, currDomain))) { + valid = true + break + } + + if role.AllowSubdomains { + if strings.HasSuffix(reducedName, "."+currDomain) || + (isWildcard && strings.EqualFold(reducedName, currDomain)) { + valid = true + break + } + } + + if role.AllowGlobDomains && + strings.Contains(currDomain, "*") && + glob.Glob(strings.ToLower(currDomain), strings.ToLower(name)) { + valid = true + break + } + } + + if valid { + continue + } + } + + return name + } + + return "" +} + +func IsWildcardDomain(name string) bool { + // Per RFC 6125 Section 6.4.3, and explicitly contradicting the earlier + // RFC 2818 which no modern client will validate against, there are two + // main types of wildcards, each with a single wildcard specifier (`*`, + // functionally different from the `*` used as a glob from the + // AllowGlobDomains parsing path) in the left-most label: + // + // 1. Entire label is a single wildcard character (most common and + // well-supported), + // 2. Part of the label contains a single wildcard character (e.g. per + // RFC 6125: baz*.example.net, *baz.example.net, or b*z.example.net). + // + // We permit issuance of both but not the older RFC 2818 style under + // the new AllowWildcardCertificates option. However, anything with a + // glob character is technically a wildcard, though not a valid one. + + return strings.Contains(name, "*") +} + +func ValidateWildcardDomain(name string) (string, string, error) { + // See note in isWildcardDomain(...) about the definition of a wildcard + // domain. + var wildcardLabel string + var reducedName string + + if strings.Count(name, "*") > 1 { + // As mentioned above, only one wildcard character is permitted + // under RFC 6125 semantics. + return wildcardLabel, reducedName, fmt.Errorf("expected only one wildcard identifier in the given domain name") + } + + // Split the Common Name into two parts: a left-most label and the + // remaining segments (if present). + splitLabels := strings.SplitN(name, ".", 2) + if len(splitLabels) != 2 { + // We've been given a single-part domain name that consists + // entirely of a wildcard. This is a little tricky to handle, + // but EnforceHostnames validates both the wildcard-containing + // label and the reduced name, but _only_ the latter if it is + // non-empty. This allows us to still validate the only label + // component matches hostname expectations still. + wildcardLabel = splitLabels[0] + reducedName = "" + } else { + // We have a (at least) two label domain name. But before we can + // update our names, we need to validate the wildcard ended up + // in the segment we expected it to. While this is (kinda) + // validated under EnforceHostnames's leftWildLabelRegex, we + // still need to validate it in the non-enforced mode. + // + // By validated assumption above, we know there's strictly one + // wildcard in this domain so we only need to check the wildcard + // label or the reduced name (as one is equivalent to the other). + // Because we later assume reducedName _lacks_ wildcard segments, + // we validate that. + wildcardLabel = splitLabels[0] + reducedName = splitLabels[1] + if strings.Contains(reducedName, "*") { + return wildcardLabel, reducedName, fmt.Errorf("expected wildcard to only be present in left-most domain label") + } + } + + return wildcardLabel, reducedName, nil +} + +// ValidateCommonName Validates a given common name, ensuring it's either an email or a hostname +// after validating it according to the role parameters, or disables +// validation altogether. +func ValidateCommonName(b logical.SystemView, role *RoleEntry, entityInfo EntityInfo, name string) string { + isDisabled := len(role.CNValidations) == 1 && role.CNValidations[0] == "disabled" + if isDisabled { + return "" + } + + if ValidateNames(b, role, entityInfo, []string{name}) != "" { + return name + } + + // Validations weren't disabled, but the role lacked CN Validations, so + // don't restrict types. This case is hit in certain existing tests. + if len(role.CNValidations) == 0 { + return "" + } + + // If there's an at in the data, ensure email type validation is allowed. + // Otherwise, ensure hostname is allowed. + if strings.Contains(name, "@") { + var allowsEmails bool + for _, validation := range role.CNValidations { + if validation == "email" { + allowsEmails = true + break + } + } + if !allowsEmails { + return name + } + } else { + var allowsHostnames bool + for _, validation := range role.CNValidations { + if validation == "hostname" { + allowsHostnames = true + break + } + } + if !allowsHostnames { + return name + } + } + + return "" +} + +// ValidateOtherSANs checks if the values requested are allowed. If an OID +// isn't allowed, it will be returned as the first string. If a Value isn't +// allowed, it will be returned as the second string. Empty strings + error +// means everything is okay. +func ValidateOtherSANs(role *RoleEntry, requested map[string][]string) (string, string, error) { + if len(role.AllowedOtherSANs) == 1 && role.AllowedOtherSANs[0] == "*" { + // Anything is allowed + return "", "", nil + } + + allowed, err := ParseOtherSANs(role.AllowedOtherSANs) + if err != nil { + return "", "", fmt.Errorf("error parsing role's allowed SANs: %w", err) + } + for oid, names := range requested { + for _, name := range names { + allowedNames, ok := allowed[oid] + if !ok { + return oid, "", nil + } + + valid := false + for _, allowedName := range allowedNames { + if glob.Glob(allowedName, name) { + valid = true + break + } + } + + if !valid { + return oid, name, nil + } + } + } + + return "", "", nil +} + +func ParseOtherSANs(others []string) (map[string][]string, error) { + result := map[string][]string{} + for _, other := range others { + splitOther := strings.SplitN(other, ";", 2) + if len(splitOther) != 2 { + return nil, fmt.Errorf("expected a semicolon in other SAN %q", other) + } + splitType := strings.SplitN(splitOther[1], ":", 2) + if len(splitType) != 2 { + return nil, fmt.Errorf("expected a colon in other SAN %q", other) + } + switch { + case strings.EqualFold(splitType[0], "utf8"): + case strings.EqualFold(splitType[0], "utf-8"): + default: + return nil, fmt.Errorf("only utf8 other SANs are supported; found non-supported type in other SAN %q", other) + } + result[splitOther[0]] = append(result[splitOther[0]], splitType[1]) + } + + return result, nil +} + +// Given a URI SAN, verify that it is allowed. +func ValidateURISAN(b logical.SystemView, role *RoleEntry, entityInfo EntityInfo, uri string) bool { + valid := false + for _, allowed := range role.AllowedURISANs { + if role.AllowedURISANsTemplate { + isTemplate, _ := framework.ValidateIdentityTemplate(allowed) + if isTemplate && entityInfo.EntityID != "" { + tmpAllowed, err := framework.PopulateIdentityTemplate(allowed, entityInfo.EntityID, b) + if err != nil { + continue + } + allowed = tmpAllowed + } + } + validURI := glob.Glob(allowed, uri) + if validURI { + valid = true + break + } + } + return valid +} + +// ValidateUserId Returns bool stating whether the given UserId is Valid +func ValidateUserId(role *RoleEntry, userId string) bool { + allowedList := role.AllowedUserIDs + + if len(allowedList) == 0 { + // Nothing is allowed. + return false + } + + if strutil.StrListContainsCaseInsensitive(allowedList, userId) { + return true + } + + for _, rolePattern := range allowedList { + if rolePattern == "" { + continue + } + + if strings.Contains(rolePattern, "*") && glob.Glob(rolePattern, userId) { + return true + } + } + + // No matches. + return false +} + +func ValidateSerialNumber(role *RoleEntry, serialNumber string) string { + valid := false + if len(role.AllowedSerialNumbers) > 0 { + for _, currSerialNumber := range role.AllowedSerialNumbers { + if currSerialNumber == "" { + continue + } + + if (strings.Contains(currSerialNumber, "*") && + glob.Glob(currSerialNumber, serialNumber)) || + currSerialNumber == serialNumber { + valid = true + break + } + } + } + if !valid { + return serialNumber + } else { + return "" + } +} + +type CertNotAfterInput interface { + GetTTL() int + GetOptionalNotAfter() (interface{}, bool) +} + +// GetCertificateNotAfter compute a certificate's NotAfter date based on the mount ttl, role, signing bundle and input +// api data being sent. Returns a NotAfter time, a set of warnings or an error. +func GetCertificateNotAfter(b logical.SystemView, role *RoleEntry, input CertNotAfterInput, caSign *certutil.CAInfoBundle) (time.Time, []string, error) { + var warnings []string + var maxTTL time.Duration + var notAfter time.Time + var err error + + ttl := time.Duration(input.GetTTL()) * time.Second + notAfterAlt := role.NotAfter + if notAfterAlt == "" { + notAfterAltRaw, ok := input.GetOptionalNotAfter() + if ok { + notAfterAlt = notAfterAltRaw.(string) + } + } + if ttl > 0 && notAfterAlt != "" { + return time.Time{}, warnings, errutil.UserError{Err: "Either ttl or not_after should be provided. Both should not be provided in the same request."} + } + + if ttl == 0 && role.TTL > 0 { + ttl = role.TTL + } + + if role.MaxTTL > 0 { + maxTTL = role.MaxTTL + } + + if ttl == 0 { + ttl = b.DefaultLeaseTTL() + } + if maxTTL == 0 { + maxTTL = b.MaxLeaseTTL() + } + if ttl > maxTTL { + warnings = append(warnings, fmt.Sprintf("TTL %q is longer than permitted maxTTL %q, so maxTTL is being used", ttl, maxTTL)) + ttl = maxTTL + } + + if notAfterAlt != "" { + notAfter, err = time.Parse(time.RFC3339, notAfterAlt) + if err != nil { + return notAfter, warnings, errutil.UserError{Err: err.Error()} + } + } else { + notAfter = time.Now().Add(ttl) + } + notAfter, err = ApplyIssuerLeafNotAfterBehavior(caSign, notAfter) + if err != nil { + return time.Time{}, warnings, err + } + return notAfter, warnings, nil +} + +// ApplyIssuerLeafNotAfterBehavior resets a certificate's notAfter time or errors out based on the +// issuer's notAfter date along with the LeafNotAfterBehavior configuration +func ApplyIssuerLeafNotAfterBehavior(caSign *certutil.CAInfoBundle, notAfter time.Time) (time.Time, error) { + if caSign != nil && notAfter.After(caSign.Certificate.NotAfter) { + // If it's not self-signed, verify that the issued certificate + // won't be valid past the lifetime of the CA certificate, and + // act accordingly. This is dependent based on the issuer's + // LeafNotAfterBehavior argument. + switch caSign.LeafNotAfterBehavior { + case certutil.PermitNotAfterBehavior: + // Explicitly do nothing. + case certutil.TruncateNotAfterBehavior: + notAfter = caSign.Certificate.NotAfter + case certutil.ErrNotAfterBehavior: + fallthrough + default: + return time.Time{}, errutil.UserError{Err: fmt.Sprintf( + "cannot satisfy request, as TTL would result in notAfter of %s that is beyond the expiration of the CA certificate at %s", notAfter.UTC().Format(time.RFC3339Nano), caSign.Certificate.NotAfter.UTC().Format(time.RFC3339Nano))} + } + } + return notAfter, nil +} + +// StoreCertificate given a certificate bundle that was signed, persist the certificate to storage +func StoreCertificate(ctx context.Context, s logical.Storage, certCounter CertificateCounter, certBundle *certutil.ParsedCertBundle) error { + hyphenSerialNumber := parsing.NormalizeSerialForStorageFromBigInt(certBundle.Certificate.SerialNumber) + key := PathCerts + hyphenSerialNumber + certsCounted := certCounter.IsInitialized() + err := s.Put(ctx, &logical.StorageEntry{ + Key: key, + Value: certBundle.CertificateBytes, + }) + if err != nil { + return fmt.Errorf("unable to store certificate locally: %w", err) + } + certCounter.IncrementTotalCertificatesCount(certsCounted, key) + return nil +} diff --git a/builtin/logical/pki/issuing/issuers.go b/builtin/logical/pki/issuing/issuers.go new file mode 100644 index 000000000000..9ceff5b13cf2 --- /dev/null +++ b/builtin/logical/pki/issuing/issuers.go @@ -0,0 +1,588 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package issuing + +import ( + "bytes" + "context" + "crypto/x509" + "fmt" + "sort" + "strings" + "time" + + "github.com/hashicorp/vault/builtin/logical/pki/managed_key" + "github.com/hashicorp/vault/builtin/logical/pki/parsing" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + ReadOnlyUsage IssuerUsage = iota + IssuanceUsage IssuerUsage = 1 << iota + CRLSigningUsage IssuerUsage = 1 << iota + OCSPSigningUsage IssuerUsage = 1 << iota +) + +const ( + // When adding a new usage in the future, we'll need to create a usage + // mask field on the IssuerEntry and handle migrations to a newer mask, + // inferring a value for the new bits. + AllIssuerUsages = ReadOnlyUsage | IssuanceUsage | CRLSigningUsage | OCSPSigningUsage + + DefaultRef = "default" + IssuerPrefix = "config/issuer/" + + // Used as a quick sanity check for a reference id lookups... + uuidLength = 36 + + IssuerRefNotFound = IssuerID("not-found") + LatestIssuerVersion = 1 + + LegacyCertBundlePath = "config/ca_bundle" + LegacyBundleShimID = IssuerID("legacy-entry-shim-id") + LegacyBundleShimKeyID = KeyID("legacy-entry-shim-key-id") + + LegacyCRLPath = "crl" + DeltaCRLPath = "delta-crl" + DeltaCRLPathSuffix = "-delta" + UnifiedCRLPath = "unified-crl" + UnifiedDeltaCRLPath = "unified-delta-crl" + UnifiedCRLPathPrefix = "unified-" +) + +type IssuerID string + +func (p IssuerID) String() string { + return string(p) +} + +type IssuerUsage uint + +var namedIssuerUsages = map[string]IssuerUsage{ + "read-only": ReadOnlyUsage, + "issuing-certificates": IssuanceUsage, + "crl-signing": CRLSigningUsage, + "ocsp-signing": OCSPSigningUsage, +} + +func (i *IssuerUsage) ToggleUsage(usages ...IssuerUsage) { + for _, usage := range usages { + *i ^= usage + } +} + +func (i IssuerUsage) HasUsage(usage IssuerUsage) bool { + return (i & usage) == usage +} + +func (i IssuerUsage) Names() string { + var names []string + var builtUsage IssuerUsage + + // Return the known set of usages in a sorted order to not have Terraform state files flipping + // saying values are different when it's the same list in a different order. + keys := make([]string, 0, len(namedIssuerUsages)) + for k := range namedIssuerUsages { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, name := range keys { + usage := namedIssuerUsages[name] + if i.HasUsage(usage) { + names = append(names, name) + builtUsage.ToggleUsage(usage) + } + } + + if i != builtUsage { + // Found some unknown usage, we should indicate this in the names. + names = append(names, fmt.Sprintf("unknown:%v", i^builtUsage)) + } + + return strings.Join(names, ",") +} + +func NewIssuerUsageFromNames(names []string) (IssuerUsage, error) { + var result IssuerUsage + for index, name := range names { + usage, ok := namedIssuerUsages[name] + if !ok { + return ReadOnlyUsage, fmt.Errorf("unknown name for usage at index %v: %v", index, name) + } + + result.ToggleUsage(usage) + } + + return result, nil +} + +type IssuerEntry struct { + ID IssuerID `json:"id"` + Name string `json:"name"` + KeyID KeyID `json:"key_id"` + Certificate string `json:"certificate"` + CAChain []string `json:"ca_chain"` + ManualChain []IssuerID `json:"manual_chain"` + SerialNumber string `json:"serial_number"` + LeafNotAfterBehavior certutil.NotAfterBehavior `json:"not_after_behavior"` + Usage IssuerUsage `json:"usage"` + RevocationSigAlg x509.SignatureAlgorithm `json:"revocation_signature_algorithm"` + Revoked bool `json:"revoked"` + RevocationTime int64 `json:"revocation_time"` + RevocationTimeUTC time.Time `json:"revocation_time_utc"` + AIAURIs *AiaConfigEntry `json:"aia_uris,omitempty"` + LastModified time.Time `json:"last_modified"` + Version uint `json:"version"` +} + +// GetCertificate returns a x509.Certificate of the CA certificate +// represented by this issuer. +func (i IssuerEntry) GetCertificate() (*x509.Certificate, error) { + cert, err := parsing.ParseCertificateFromString(i.Certificate) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse certificate from issuer: %s: %v", err.Error(), i.ID)} + } + + return cert, nil +} + +// GetFullCaChain returns a slice of x509.Certificate values of this issuer full ca chain, +// which starts with the CA certificate represented by this issuer followed by the entire CA chain +func (i IssuerEntry) GetFullCaChain() ([]*x509.Certificate, error) { + var chains []*x509.Certificate + issuerCert, err := i.GetCertificate() + if err != nil { + return nil, err + } + + chains = append(chains, issuerCert) + + for rangeI, chainVal := range i.CAChain { + parsedChainVal, err := parsing.ParseCertificateFromString(chainVal) + if err != nil { + return nil, fmt.Errorf("error parsing issuer %s ca chain index value [%d]: %w", i.ID, rangeI, err) + } + + if bytes.Equal(parsedChainVal.Raw, issuerCert.Raw) { + continue + } + chains = append(chains, parsedChainVal) + } + + return chains, nil +} + +func (i IssuerEntry) EnsureUsage(usage IssuerUsage) error { + // We want to spit out a nice error message about missing usages. + if i.Usage.HasUsage(usage) { + return nil + } + + issuerRef := fmt.Sprintf("id:%v", i.ID) + if len(i.Name) > 0 { + issuerRef = fmt.Sprintf("%v / name:%v", issuerRef, i.Name) + } + + // These usages differ at some point in time. We've gotta find the first + // usage that differs and return a logical-sounding error message around + // that difference. + for name, candidate := range namedIssuerUsages { + if usage.HasUsage(candidate) && !i.Usage.HasUsage(candidate) { + return fmt.Errorf("requested usage %v for issuer [%v] but only had usage %v", name, issuerRef, i.Usage.Names()) + } + } + + // Maybe we have an unnamed usage that's requested. + return fmt.Errorf("unknown delta between usages: %v -> %v / for issuer [%v]", usage.Names(), i.Usage.Names(), issuerRef) +} + +func (i IssuerEntry) CanMaybeSignWithAlgo(algo x509.SignatureAlgorithm) error { + // Hack: Go isn't kind enough expose its lovely signatureAlgorithmDetails + // informational struct for our usage. However, we don't want to actually + // fetch the private key and attempt a signature with this algo (as we'll + // mint new, previously unsigned material in the process that could maybe + // be potentially abused if it leaks). + // + // So... + // + // ...we maintain our own mapping of cert.PKI<->sigAlgos. Notably, we + // exclude DSA support as the PKI engine has never supported DSA keys. + if algo == x509.UnknownSignatureAlgorithm { + // Special cased to indicate upgrade and letting Go automatically + // chose the correct value. + return nil + } + + cert, err := i.GetCertificate() + if err != nil { + return fmt.Errorf("unable to parse issuer's potential signature algorithm types: %w", err) + } + + switch cert.PublicKeyAlgorithm { + case x509.RSA: + switch algo { + case x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA, + x509.SHA256WithRSAPSS, x509.SHA384WithRSAPSS, + x509.SHA512WithRSAPSS: + return nil + } + case x509.ECDSA: + switch algo { + case x509.ECDSAWithSHA256, x509.ECDSAWithSHA384, x509.ECDSAWithSHA512: + return nil + } + case x509.Ed25519: + switch algo { + case x509.PureEd25519: + return nil + } + } + + return fmt.Errorf("unable to use issuer of type %v to sign with %v key type", cert.PublicKeyAlgorithm.String(), algo.String()) +} + +// ResolveAndFetchIssuerForIssuance takes a name or uuid referencing an issuer, loads the issuer +// and validates that we have the associated private key and is allowed to perform issuance operations. +func ResolveAndFetchIssuerForIssuance(ctx context.Context, s logical.Storage, issuerName string) (*IssuerEntry, error) { + if len(issuerName) == 0 { + return nil, fmt.Errorf("unable to fetch pki issuer: empty issuer name") + } + issuerId, err := ResolveIssuerReference(ctx, s, issuerName) + if err != nil { + return nil, fmt.Errorf("failed to resolve issuer %s: %w", issuerName, err) + } + + issuer, err := FetchIssuerById(ctx, s, issuerId) + if err != nil { + return nil, fmt.Errorf("failed to load issuer %s: %w", issuerName, err) + } + + if issuer.Usage.HasUsage(IssuanceUsage) && len(issuer.KeyID) > 0 { + return issuer, nil + } + + return nil, fmt.Errorf("issuer %s missing proper issuance usage or doesn't have associated key", issuerName) +} + +func ResolveIssuerReference(ctx context.Context, s logical.Storage, reference string) (IssuerID, error) { + if reference == DefaultRef { + // Handle fetching the default issuer. + config, err := GetIssuersConfig(ctx, s) + if err != nil { + return IssuerID("config-error"), err + } + if len(config.DefaultIssuerId) == 0 { + return IssuerRefNotFound, fmt.Errorf("no default issuer currently configured") + } + + return config.DefaultIssuerId, nil + } + + // Lookup by a direct get first to see if our reference is an ID, this is quick and cached. + if len(reference) == uuidLength { + entry, err := s.Get(ctx, IssuerPrefix+reference) + if err != nil { + return IssuerID("issuer-read"), err + } + if entry != nil { + return IssuerID(reference), nil + } + } + + // ... than to pull all issuers from storage. + issuers, err := ListIssuers(ctx, s) + if err != nil { + return IssuerID("list-error"), err + } + + for _, issuerId := range issuers { + issuer, err := FetchIssuerById(ctx, s, issuerId) + if err != nil { + return IssuerID("issuer-read"), err + } + + if issuer.Name == reference { + return issuer.ID, nil + } + } + + // Otherwise, we must not have found the issuer. + return IssuerRefNotFound, errutil.UserError{Err: fmt.Sprintf("unable to find PKI issuer for reference: %v", reference)} +} + +func ListIssuers(ctx context.Context, s logical.Storage) ([]IssuerID, error) { + strList, err := s.List(ctx, IssuerPrefix) + if err != nil { + return nil, err + } + + issuerIds := make([]IssuerID, 0, len(strList)) + for _, entry := range strList { + issuerIds = append(issuerIds, IssuerID(entry)) + } + + return issuerIds, nil +} + +// FetchIssuerById returns an IssuerEntry based on issuerId, if none found an error is returned. +func FetchIssuerById(ctx context.Context, s logical.Storage, issuerId IssuerID) (*IssuerEntry, error) { + if len(issuerId) == 0 { + return nil, errutil.InternalError{Err: "unable to fetch pki issuer: empty issuer identifier"} + } + + entry, err := s.Get(ctx, IssuerPrefix+issuerId.String()) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch pki issuer: %v", err)} + } + if entry == nil { + return nil, errutil.UserError{Err: fmt.Sprintf("pki issuer id %s does not exist", issuerId.String())} + } + + var issuer IssuerEntry + if err := entry.DecodeJSON(&issuer); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode pki issuer with id %s: %v", issuerId.String(), err)} + } + + return upgradeIssuerIfRequired(&issuer), nil +} + +func WriteIssuer(ctx context.Context, s logical.Storage, issuer *IssuerEntry) error { + issuerId := issuer.ID + if issuer.LastModified.IsZero() { + issuer.LastModified = time.Now().UTC() + } + + json, err := logical.StorageEntryJSON(IssuerPrefix+issuerId.String(), issuer) + if err != nil { + return err + } + + return s.Put(ctx, json) +} + +func DeleteIssuer(ctx context.Context, s logical.Storage, id IssuerID) (bool, error) { + config, err := GetIssuersConfig(ctx, s) + if err != nil { + return false, err + } + + wasDefault := false + if config.DefaultIssuerId == id { + wasDefault = true + // Overwrite the fetched default issuer as we're going to remove this + // entry. + config.fetchedDefault = IssuerID("") + config.DefaultIssuerId = IssuerID("") + if err := SetIssuersConfig(ctx, s, config); err != nil { + return wasDefault, err + } + } + + return wasDefault, s.Delete(ctx, IssuerPrefix+id.String()) +} + +func upgradeIssuerIfRequired(issuer *IssuerEntry) *IssuerEntry { + // *NOTE*: Don't attempt to write out the issuer here as it may cause ErrReadOnly that will direct the + // request all the way up to the primary cluster which would be horrible for local cluster operations such + // as generating a leaf cert or a revoke. + // Also even though we could tell if we are the primary cluster's active node, we can't tell if we have the + // a full rw issuer lock, so it might not be safe to write. + if issuer.Version == LatestIssuerVersion { + return issuer + } + + if issuer.Version == 0 { + // Upgrade at this step requires interrogating the certificate itself; + // if this decode fails, it indicates internal problems and the + // request will subsequently fail elsewhere. However, decoding this + // certificate is mildly expensive, so we only do it in the event of + // a Version 0 certificate. + cert, err := issuer.GetCertificate() + if err != nil { + return issuer + } + + hadCRL := issuer.Usage.HasUsage(CRLSigningUsage) + // Remove CRL signing usage if it exists on the issuer but doesn't + // exist in the KU of the x509 certificate. + if hadCRL && (cert.KeyUsage&x509.KeyUsageCRLSign) == 0 { + issuer.Usage.ToggleUsage(CRLSigningUsage) + } + + // Handle our new OCSPSigning usage flag for earlier versions. If we + // had it (prior to removing it in this upgrade), we'll add the OCSP + // flag since EKUs don't matter. + if hadCRL && !issuer.Usage.HasUsage(OCSPSigningUsage) { + issuer.Usage.ToggleUsage(OCSPSigningUsage) + } + } + + issuer.Version = LatestIssuerVersion + return issuer +} + +// FetchCAInfoByIssuerId will fetch the CA info, will return an error if no ca info exists for the given issuerId. +// This does support the loading using the legacyBundleShimID +func FetchCAInfoByIssuerId(ctx context.Context, s logical.Storage, mkv managed_key.PkiManagedKeyView, issuerId IssuerID, usage IssuerUsage) (*certutil.CAInfoBundle, error) { + entry, bundle, err := FetchCertBundleByIssuerId(ctx, s, issuerId, true) + if err != nil { + switch err.(type) { + case errutil.UserError: + return nil, err + case errutil.InternalError: + return nil, err + default: + return nil, errutil.InternalError{Err: fmt.Sprintf("error fetching CA info: %v", err)} + } + } + + if err = entry.EnsureUsage(usage); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error while attempting to use issuer %v: %v", issuerId, err)} + } + + parsedBundle, err := ParseCABundle(ctx, mkv, bundle) + if err != nil { + return nil, errutil.InternalError{Err: err.Error()} + } + + if parsedBundle.Certificate == nil { + return nil, errutil.InternalError{Err: "stored CA information not able to be parsed"} + } + if parsedBundle.PrivateKey == nil { + return nil, errutil.UserError{Err: fmt.Sprintf("unable to fetch corresponding key for issuer %v; unable to use this issuer for signing", issuerId)} + } + + caInfo := &certutil.CAInfoBundle{ + ParsedCertBundle: *parsedBundle, + URLs: nil, + LeafNotAfterBehavior: entry.LeafNotAfterBehavior, + RevocationSigAlg: entry.RevocationSigAlg, + } + + entries, err := GetAIAURLs(ctx, s, entry) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch AIA URL information: %v", err)} + } + caInfo.URLs = entries + + return caInfo, nil +} + +func ParseCABundle(ctx context.Context, mkv managed_key.PkiManagedKeyView, bundle *certutil.CertBundle) (*certutil.ParsedCertBundle, error) { + if bundle.PrivateKeyType == certutil.ManagedPrivateKey { + return managed_key.ParseManagedKeyCABundle(ctx, mkv, bundle) + } + return bundle.ToParsedCertBundle() +} + +// FetchCertBundleByIssuerId builds a certutil.CertBundle from the specified issuer identifier, +// optionally loading the key or not. This method supports loading legacy +// bundles using the legacyBundleShimID issuerId, and if no entry is found will return an error. +func FetchCertBundleByIssuerId(ctx context.Context, s logical.Storage, id IssuerID, loadKey bool) (*IssuerEntry, *certutil.CertBundle, error) { + if id == LegacyBundleShimID { + // We have not completed the migration, or started a request in legacy mode, so + // attempt to load the bundle from the legacy location + issuer, bundle, err := GetLegacyCertBundle(ctx, s) + if err != nil { + return nil, nil, err + } + if issuer == nil || bundle == nil { + return nil, nil, errutil.UserError{Err: "no legacy cert bundle exists"} + } + + return issuer, bundle, err + } + + issuer, err := FetchIssuerById(ctx, s, id) + if err != nil { + return nil, nil, err + } + + var bundle certutil.CertBundle + bundle.Certificate = issuer.Certificate + bundle.CAChain = issuer.CAChain + bundle.SerialNumber = issuer.SerialNumber + + // Fetch the key if it exists. Sometimes we don't need the key immediately. + if loadKey && issuer.KeyID != KeyID("") { + key, err := FetchKeyById(ctx, s, issuer.KeyID) + if err != nil { + return nil, nil, err + } + + bundle.PrivateKeyType = key.PrivateKeyType + bundle.PrivateKey = key.PrivateKey + } + + return issuer, &bundle, nil +} + +func GetLegacyCertBundle(ctx context.Context, s logical.Storage) (*IssuerEntry, *certutil.CertBundle, error) { + entry, err := s.Get(ctx, LegacyCertBundlePath) + if err != nil { + return nil, nil, err + } + + if entry == nil { + return nil, nil, nil + } + + cb := &certutil.CertBundle{} + err = entry.DecodeJSON(cb) + if err != nil { + return nil, nil, err + } + + // Fake a storage entry with backwards compatibility in mind. + issuer := &IssuerEntry{ + ID: LegacyBundleShimID, + KeyID: LegacyBundleShimKeyID, + Name: "legacy-entry-shim", + Certificate: cb.Certificate, + CAChain: cb.CAChain, + SerialNumber: cb.SerialNumber, + LeafNotAfterBehavior: certutil.ErrNotAfterBehavior, + } + issuer.Usage.ToggleUsage(AllIssuerUsages) + + return issuer, cb, nil +} + +func ResolveIssuerCRLPath(ctx context.Context, storage logical.Storage, useLegacyBundleCaStorage bool, reference string, unified bool) (string, error) { + if useLegacyBundleCaStorage { + return "crl", nil + } + + issuer, err := ResolveIssuerReference(ctx, storage, reference) + if err != nil { + return "crl", err + } + + var crlConfig *InternalCRLConfigEntry + if unified { + crlConfig, err = GetUnifiedCRLConfig(ctx, storage) + if err != nil { + return "crl", err + } + } else { + crlConfig, err = GetLocalCRLConfig(ctx, storage) + if err != nil { + return "crl", err + } + } + + if crlId, ok := crlConfig.IssuerIDCRLMap[issuer]; ok && len(crlId) > 0 { + path := fmt.Sprintf("crls/%v", crlId) + if unified { + path = ("unified-") + path + } + + return path, nil + } + + return "crl", fmt.Errorf("unable to find CRL for issuer: id:%v/ref:%v", issuer, reference) +} diff --git a/builtin/logical/pki/issuing/keys.go b/builtin/logical/pki/issuing/keys.go new file mode 100644 index 000000000000..c0088117435d --- /dev/null +++ b/builtin/logical/pki/issuing/keys.go @@ -0,0 +1,185 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package issuing + +import ( + "context" + "crypto" + "encoding/pem" + "fmt" + + "github.com/hashicorp/vault/builtin/logical/pki/managed_key" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + KeyPrefix = "config/key/" + KeyRefNotFound = KeyID("not-found") +) + +type KeyID string + +func (p KeyID) String() string { + return string(p) +} + +type KeyEntry struct { + ID KeyID `json:"id"` + Name string `json:"name"` + PrivateKeyType certutil.PrivateKeyType `json:"private_key_type"` + PrivateKey string `json:"private_key"` +} + +func (e KeyEntry) IsManagedPrivateKey() bool { + return e.PrivateKeyType == certutil.ManagedPrivateKey +} + +func ListKeys(ctx context.Context, s logical.Storage) ([]KeyID, error) { + strList, err := s.List(ctx, KeyPrefix) + if err != nil { + return nil, err + } + + keyIds := make([]KeyID, 0, len(strList)) + for _, entry := range strList { + keyIds = append(keyIds, KeyID(entry)) + } + + return keyIds, nil +} + +func FetchKeyById(ctx context.Context, s logical.Storage, keyId KeyID) (*KeyEntry, error) { + if len(keyId) == 0 { + return nil, errutil.InternalError{Err: "unable to fetch pki key: empty key identifier"} + } + + entry, err := s.Get(ctx, KeyPrefix+keyId.String()) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch pki key: %v", err)} + } + if entry == nil { + return nil, errutil.UserError{Err: fmt.Sprintf("pki key id %s does not exist", keyId.String())} + } + + var key KeyEntry + if err := entry.DecodeJSON(&key); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode pki key with id %s: %v", keyId.String(), err)} + } + + return &key, nil +} + +func WriteKey(ctx context.Context, s logical.Storage, key KeyEntry) error { + keyId := key.ID + + json, err := logical.StorageEntryJSON(KeyPrefix+keyId.String(), key) + if err != nil { + return err + } + + return s.Put(ctx, json) +} + +func DeleteKey(ctx context.Context, s logical.Storage, id KeyID) (bool, error) { + config, err := GetKeysConfig(ctx, s) + if err != nil { + return false, err + } + + wasDefault := false + if config.DefaultKeyId == id { + wasDefault = true + config.DefaultKeyId = KeyID("") + if err := SetKeysConfig(ctx, s, config); err != nil { + return wasDefault, err + } + } + + return wasDefault, s.Delete(ctx, KeyPrefix+id.String()) +} + +func ResolveKeyReference(ctx context.Context, s logical.Storage, reference string) (KeyID, error) { + if reference == DefaultRef { + // Handle fetching the default key. + config, err := GetKeysConfig(ctx, s) + if err != nil { + return KeyID("config-error"), err + } + if len(config.DefaultKeyId) == 0 { + return KeyRefNotFound, fmt.Errorf("no default key currently configured") + } + + return config.DefaultKeyId, nil + } + + // Lookup by a direct get first to see if our reference is an ID, this is quick and cached. + if len(reference) == uuidLength { + entry, err := s.Get(ctx, KeyPrefix+reference) + if err != nil { + return KeyID("key-read"), err + } + if entry != nil { + return KeyID(reference), nil + } + } + + // ... than to pull all keys from storage. + keys, err := ListKeys(ctx, s) + if err != nil { + return KeyID("list-error"), err + } + for _, keyId := range keys { + key, err := FetchKeyById(ctx, s, keyId) + if err != nil { + return KeyID("key-read"), err + } + + if key.Name == reference { + return key.ID, nil + } + } + + // Otherwise, we must not have found the key. + return KeyRefNotFound, errutil.UserError{Err: fmt.Sprintf("unable to find PKI key for reference: %v", reference)} +} + +func GetManagedKeyUUID(key *KeyEntry) (managed_key.UUIDKey, error) { + if !key.IsManagedPrivateKey() { + return "", errutil.InternalError{Err: "getManagedKeyUUID called on a key id %s (%s) "} + } + return managed_key.ExtractManagedKeyId([]byte(key.PrivateKey)) +} + +func GetSignerFromKeyEntry(ctx context.Context, mkv managed_key.PkiManagedKeyView, keyEntry *KeyEntry) (crypto.Signer, certutil.PrivateKeyType, error) { + if keyEntry.PrivateKeyType == certutil.UnknownPrivateKey { + return nil, certutil.UnknownPrivateKey, fmt.Errorf("unsupported unknown private key type for key: %s (%s)", keyEntry.ID, keyEntry.Name) + } + + if keyEntry.IsManagedPrivateKey() { + managedKeyId, err := GetManagedKeyUUID(keyEntry) + if err != nil { + return nil, certutil.UnknownPrivateKey, fmt.Errorf("unable to get managed key uuid: %w", err) + } + bundle, actualKeyType, err := managed_key.CreateKmsKeyBundle(ctx, mkv, managedKeyId) + if err != nil { + return nil, certutil.UnknownPrivateKey, fmt.Errorf("failed to create kms key bundle from managed key uuid %s: %w", managedKeyId, err) + } + + // The bundle's PrivateKeyType value is set to a ManagedKeyType so use the actual key type value + return bundle.PrivateKey, actualKeyType, nil + } + + pemBlock, _ := pem.Decode([]byte(keyEntry.PrivateKey)) + if pemBlock == nil { + return nil, certutil.UnknownPrivateKey, fmt.Errorf("no data found in PEM block") + } + + signer, _, err := certutil.ParseDERKey(pemBlock.Bytes) + if err != nil { + return nil, certutil.UnknownPrivateKey, fmt.Errorf("failed to parse PEM block: %w", err) + } + return signer, keyEntry.PrivateKeyType, nil +} diff --git a/builtin/logical/pki/issuing/roles.go b/builtin/logical/pki/issuing/roles.go new file mode 100644 index 000000000000..acf2f259de83 --- /dev/null +++ b/builtin/logical/pki/issuing/roles.go @@ -0,0 +1,454 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package issuing + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" +) + +var ( + DefaultRoleKeyUsages = []string{"DigitalSignature", "KeyAgreement", "KeyEncipherment"} + DefaultRoleEstKeyUsages = []string{} + DefaultRoleEstKeyUsageOids = []string{} +) + +const ( + DefaultRoleSignatureBits = 0 + DefaultRoleUsePss = false +) + +type RoleEntry struct { + LeaseMax string `json:"lease_max"` + Lease string `json:"lease"` + DeprecatedMaxTTL string `json:"max_ttl"` + DeprecatedTTL string `json:"ttl"` + TTL time.Duration `json:"ttl_duration"` + MaxTTL time.Duration `json:"max_ttl_duration"` + AllowLocalhost bool `json:"allow_localhost"` + AllowedBaseDomain string `json:"allowed_base_domain"` + AllowedDomainsOld string `json:"allowed_domains,omitempty"` + AllowedDomains []string `json:"allowed_domains_list"` + AllowedDomainsTemplate bool `json:"allowed_domains_template"` + AllowBaseDomain bool `json:"allow_base_domain"` + AllowBareDomains bool `json:"allow_bare_domains"` + AllowTokenDisplayName bool `json:"allow_token_displayname"` + AllowSubdomains bool `json:"allow_subdomains"` + AllowGlobDomains bool `json:"allow_glob_domains"` + AllowWildcardCertificates *bool `json:"allow_wildcard_certificates,omitempty"` + AllowAnyName bool `json:"allow_any_name"` + EnforceHostnames bool `json:"enforce_hostnames"` + AllowIPSANs bool `json:"allow_ip_sans"` + ServerFlag bool `json:"server_flag"` + ClientFlag bool `json:"client_flag"` + CodeSigningFlag bool `json:"code_signing_flag"` + EmailProtectionFlag bool `json:"email_protection_flag"` + UseCSRCommonName bool `json:"use_csr_common_name"` + UseCSRSANs bool `json:"use_csr_sans"` + KeyType string `json:"key_type"` + KeyBits int `json:"key_bits"` + UsePSS bool `json:"use_pss"` + SignatureBits int `json:"signature_bits"` + MaxPathLength *int `json:",omitempty"` + KeyUsageOld string `json:"key_usage,omitempty"` + KeyUsage []string `json:"key_usage_list"` + ExtKeyUsage []string `json:"extended_key_usage_list"` + OUOld string `json:"ou,omitempty"` + OU []string `json:"ou_list"` + OrganizationOld string `json:"organization,omitempty"` + Organization []string `json:"organization_list"` + Country []string `json:"country"` + Locality []string `json:"locality"` + Province []string `json:"province"` + StreetAddress []string `json:"street_address"` + PostalCode []string `json:"postal_code"` + GenerateLease *bool `json:"generate_lease,omitempty"` + NoStore bool `json:"no_store"` + NoStoreMetadata bool `json:"no_store_metadata"` + RequireCN bool `json:"require_cn"` + CNValidations []string `json:"cn_validations"` + AllowedOtherSANs []string `json:"allowed_other_sans"` + AllowedSerialNumbers []string `json:"allowed_serial_numbers"` + AllowedUserIDs []string `json:"allowed_user_ids"` + AllowedURISANs []string `json:"allowed_uri_sans"` + AllowedURISANsTemplate bool `json:"allowed_uri_sans_template"` + PolicyIdentifiers []string `json:"policy_identifiers"` + ExtKeyUsageOIDs []string `json:"ext_key_usage_oids"` + BasicConstraintsValidForNonCA bool `json:"basic_constraints_valid_for_non_ca"` + NotBeforeDuration time.Duration `json:"not_before_duration"` + NotAfter string `json:"not_after"` + Issuer string `json:"issuer"` + // Name is only set when the role has been stored, on the fly roles have a blank name + Name string `json:"-"` + // WasModified indicates to callers if the returned entry is different than the persisted version + WasModified bool `json:"-"` +} + +func (r *RoleEntry) ToResponseData() map[string]interface{} { + responseData := map[string]interface{}{ + "ttl": int64(r.TTL.Seconds()), + "max_ttl": int64(r.MaxTTL.Seconds()), + "allow_localhost": r.AllowLocalhost, + "allowed_domains": r.AllowedDomains, + "allowed_domains_template": r.AllowedDomainsTemplate, + "allow_bare_domains": r.AllowBareDomains, + "allow_token_displayname": r.AllowTokenDisplayName, + "allow_subdomains": r.AllowSubdomains, + "allow_glob_domains": r.AllowGlobDomains, + "allow_wildcard_certificates": r.AllowWildcardCertificates, + "allow_any_name": r.AllowAnyName, + "allowed_uri_sans_template": r.AllowedURISANsTemplate, + "enforce_hostnames": r.EnforceHostnames, + "allow_ip_sans": r.AllowIPSANs, + "server_flag": r.ServerFlag, + "client_flag": r.ClientFlag, + "code_signing_flag": r.CodeSigningFlag, + "email_protection_flag": r.EmailProtectionFlag, + "use_csr_common_name": r.UseCSRCommonName, + "use_csr_sans": r.UseCSRSANs, + "key_type": r.KeyType, + "key_bits": r.KeyBits, + "signature_bits": r.SignatureBits, + "use_pss": r.UsePSS, + "key_usage": r.KeyUsage, + "ext_key_usage": r.ExtKeyUsage, + "ext_key_usage_oids": r.ExtKeyUsageOIDs, + "ou": r.OU, + "organization": r.Organization, + "country": r.Country, + "locality": r.Locality, + "province": r.Province, + "street_address": r.StreetAddress, + "postal_code": r.PostalCode, + "no_store": r.NoStore, + "allowed_other_sans": r.AllowedOtherSANs, + "allowed_serial_numbers": r.AllowedSerialNumbers, + "allowed_user_ids": r.AllowedUserIDs, + "allowed_uri_sans": r.AllowedURISANs, + "require_cn": r.RequireCN, + "cn_validations": r.CNValidations, + "policy_identifiers": r.PolicyIdentifiers, + "basic_constraints_valid_for_non_ca": r.BasicConstraintsValidForNonCA, + "not_before_duration": int64(r.NotBeforeDuration.Seconds()), + "not_after": r.NotAfter, + "issuer_ref": r.Issuer, + } + if r.MaxPathLength != nil { + responseData["max_path_length"] = r.MaxPathLength + } + if r.GenerateLease != nil { + responseData["generate_lease"] = r.GenerateLease + } + AddNoStoreMetadata(responseData, r) + return responseData +} + +var ErrRoleNotFound = errors.New("role not found") + +// GetRole will load a role from storage based on the provided name and +// update its contents to the latest version if out of date. The WasUpdated field +// will be set to true if modifications were made indicating the caller should if +// possible write them back to disk. If the role is not found an ErrRoleNotFound +// will be returned as an error. +func GetRole(ctx context.Context, s logical.Storage, n string) (*RoleEntry, error) { + entry, err := s.Get(ctx, "role/"+n) + if err != nil { + return nil, fmt.Errorf("failed to load role %s: %w", n, err) + } + if entry == nil { + return nil, fmt.Errorf("%w: with name %s", ErrRoleNotFound, n) + } + + var result RoleEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, fmt.Errorf("failed decoding role %s: %w", n, err) + } + + // Migrate existing saved entries and save back if changed + modified := false + if len(result.DeprecatedTTL) == 0 && len(result.Lease) != 0 { + result.DeprecatedTTL = result.Lease + result.Lease = "" + modified = true + } + if result.TTL == 0 && len(result.DeprecatedTTL) != 0 { + parsed, err := parseutil.ParseDurationSecond(result.DeprecatedTTL) + if err != nil { + return nil, err + } + result.TTL = parsed + result.DeprecatedTTL = "" + modified = true + } + if len(result.DeprecatedMaxTTL) == 0 && len(result.LeaseMax) != 0 { + result.DeprecatedMaxTTL = result.LeaseMax + result.LeaseMax = "" + modified = true + } + if result.MaxTTL == 0 && len(result.DeprecatedMaxTTL) != 0 { + parsed, err := parseutil.ParseDurationSecond(result.DeprecatedMaxTTL) + if err != nil { + return nil, fmt.Errorf("failed parsing max_ttl field in %s: %w", n, err) + } + result.MaxTTL = parsed + result.DeprecatedMaxTTL = "" + modified = true + } + if result.AllowBaseDomain { + result.AllowBaseDomain = false + result.AllowBareDomains = true + modified = true + } + if result.AllowedDomainsOld != "" { + result.AllowedDomains = strings.Split(result.AllowedDomainsOld, ",") + result.AllowedDomainsOld = "" + modified = true + } + if result.AllowedBaseDomain != "" { + found := false + for _, v := range result.AllowedDomains { + if v == result.AllowedBaseDomain { + found = true + break + } + } + if !found { + result.AllowedDomains = append(result.AllowedDomains, result.AllowedBaseDomain) + } + result.AllowedBaseDomain = "" + modified = true + } + if result.AllowWildcardCertificates == nil { + // While not the most secure default, when AllowWildcardCertificates isn't + // explicitly specified in the stored Role, we automatically upgrade it to + // true to preserve compatibility with previous versions of Vault. Once this + // field is set, this logic will not be triggered any more. + result.AllowWildcardCertificates = new(bool) + *result.AllowWildcardCertificates = true + modified = true + } + + // Upgrade generate_lease in role + if result.GenerateLease == nil { + // All the new roles will have GenerateLease always set to a Value. A + // nil Value indicates that this role needs an upgrade. Set it to + // `true` to not alter its current behavior. + result.GenerateLease = new(bool) + *result.GenerateLease = true + modified = true + } + + // Upgrade key usages + if result.KeyUsageOld != "" { + result.KeyUsage = strings.Split(result.KeyUsageOld, ",") + result.KeyUsageOld = "" + modified = true + } + + // Upgrade OU + if result.OUOld != "" { + result.OU = strings.Split(result.OUOld, ",") + result.OUOld = "" + modified = true + } + + // Upgrade Organization + if result.OrganizationOld != "" { + result.Organization = strings.Split(result.OrganizationOld, ",") + result.OrganizationOld = "" + modified = true + } + + // Set the issuer field to default if not set. We want to do this + // unconditionally as we should probably never have an empty issuer + // on a stored roles. + if len(result.Issuer) == 0 { + result.Issuer = DefaultRef + modified = true + } + + // Update CN Validations to be the present default, "email,hostname" + if len(result.CNValidations) == 0 { + result.CNValidations = []string{"email", "hostname"} + modified = true + } + + result.Name = n + result.WasModified = modified + + return &result, nil +} + +type RoleModifier func(r *RoleEntry) + +func WithKeyUsage(keyUsages []string) RoleModifier { + return func(r *RoleEntry) { + r.KeyUsage = keyUsages + } +} + +func WithExtKeyUsage(extKeyUsages []string) RoleModifier { + return func(r *RoleEntry) { + r.ExtKeyUsage = extKeyUsages + } +} + +func WithExtKeyUsageOIDs(extKeyUsageOids []string) RoleModifier { + return func(r *RoleEntry) { + r.ExtKeyUsageOIDs = extKeyUsageOids + } +} + +func WithSignatureBits(signatureBits int) RoleModifier { + return func(r *RoleEntry) { + r.SignatureBits = signatureBits + } +} + +func WithUsePSS(usePss bool) RoleModifier { + return func(r *RoleEntry) { + r.UsePSS = usePss + } +} + +func WithTTL(ttl time.Duration) RoleModifier { + return func(r *RoleEntry) { + r.TTL = ttl + } +} + +func WithMaxTTL(ttl time.Duration) RoleModifier { + return func(r *RoleEntry) { + r.MaxTTL = ttl + } +} + +func WithGenerateLease(genLease bool) RoleModifier { + return func(r *RoleEntry) { + *r.GenerateLease = genLease + } +} + +func WithNotBeforeDuration(ttl time.Duration) RoleModifier { + return func(r *RoleEntry) { + r.NotBeforeDuration = ttl + } +} + +func WithNoStore(noStore bool) RoleModifier { + return func(r *RoleEntry) { + r.NoStore = noStore + } +} + +func WithIssuer(issuer string) RoleModifier { + return func(r *RoleEntry) { + if issuer == "" { + issuer = DefaultRef + } + r.Issuer = issuer + } +} + +// SignVerbatimRole create a sign-verbatim role with no overrides. This will store +// the signed certificate, allowing any key type and Value from a role restriction. +func SignVerbatimRole() *RoleEntry { + return SignVerbatimRoleWithOpts() +} + +// SignVerbatimRoleWithOpts create a sign-verbatim role with the normal defaults, +// but allowing any field to be tweaked based on the consumers needs. +func SignVerbatimRoleWithOpts(opts ...RoleModifier) *RoleEntry { + entry := &RoleEntry{ + AllowLocalhost: true, + AllowAnyName: true, + AllowIPSANs: true, + AllowWildcardCertificates: new(bool), + EnforceHostnames: false, + KeyType: "any", + UseCSRCommonName: true, + UseCSRSANs: true, + AllowedOtherSANs: []string{"*"}, + AllowedSerialNumbers: []string{"*"}, + AllowedURISANs: []string{"*"}, + AllowedUserIDs: []string{"*"}, + CNValidations: []string{"disabled"}, + GenerateLease: new(bool), + KeyUsage: DefaultRoleKeyUsages, + ExtKeyUsage: DefaultRoleEstKeyUsages, + ExtKeyUsageOIDs: DefaultRoleEstKeyUsageOids, + SignatureBits: DefaultRoleSignatureBits, + UsePSS: DefaultRoleUsePss, + } + *entry.AllowWildcardCertificates = true + *entry.GenerateLease = false + + if opts != nil { + for _, opt := range opts { + if opt != nil { + opt(entry) + } + } + } + + return entry +} + +func ParseExtKeyUsagesFromRole(role *RoleEntry) certutil.CertExtKeyUsage { + var parsedKeyUsages certutil.CertExtKeyUsage + + if role.ServerFlag { + parsedKeyUsages |= certutil.ServerAuthExtKeyUsage + } + + if role.ClientFlag { + parsedKeyUsages |= certutil.ClientAuthExtKeyUsage + } + + if role.CodeSigningFlag { + parsedKeyUsages |= certutil.CodeSigningExtKeyUsage + } + + if role.EmailProtectionFlag { + parsedKeyUsages |= certutil.EmailProtectionExtKeyUsage + } + + for _, k := range role.ExtKeyUsage { + switch strings.ToLower(strings.TrimSpace(k)) { + case "any": + parsedKeyUsages |= certutil.AnyExtKeyUsage + case "serverauth": + parsedKeyUsages |= certutil.ServerAuthExtKeyUsage + case "clientauth": + parsedKeyUsages |= certutil.ClientAuthExtKeyUsage + case "codesigning": + parsedKeyUsages |= certutil.CodeSigningExtKeyUsage + case "emailprotection": + parsedKeyUsages |= certutil.EmailProtectionExtKeyUsage + case "ipsecendsystem": + parsedKeyUsages |= certutil.IpsecEndSystemExtKeyUsage + case "ipsectunnel": + parsedKeyUsages |= certutil.IpsecTunnelExtKeyUsage + case "ipsecuser": + parsedKeyUsages |= certutil.IpsecUserExtKeyUsage + case "timestamping": + parsedKeyUsages |= certutil.TimeStampingExtKeyUsage + case "ocspsigning": + parsedKeyUsages |= certutil.OcspSigningExtKeyUsage + case "microsoftservergatedcrypto": + parsedKeyUsages |= certutil.MicrosoftServerGatedCryptoExtKeyUsage + case "netscapeservergatedcrypto": + parsedKeyUsages |= certutil.NetscapeServerGatedCryptoExtKeyUsage + } + } + + return parsedKeyUsages +} diff --git a/builtin/logical/pki/issuing/roles_metadata_stubs_oss.go b/builtin/logical/pki/issuing/roles_metadata_stubs_oss.go new file mode 100644 index 000000000000..455cc0cfbe55 --- /dev/null +++ b/builtin/logical/pki/issuing/roles_metadata_stubs_oss.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package issuing + +//go:generate go run github.com/hashicorp/vault/tools/stubmaker + +import "github.com/hashicorp/vault/sdk/framework" + +func AddNoStoreMetadata(roleData map[string]interface{}, r *RoleEntry) { + return +} + +func WithNoStoreMetadata(noStoreMetadata bool) RoleModifier { + return func(r *RoleEntry) { + r.NoStoreMetadata = true + } +} + +const MetadataPermitted = false + +func AddNoStoreMetadataRoleField(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + return fields +} + +func GetNoStoreMetadata(data *framework.FieldData) bool { + return true +} + +func NoStoreMetadataValue(value bool) bool { + return true +} diff --git a/builtin/logical/pki/issuing/sign_cert.go b/builtin/logical/pki/issuing/sign_cert.go new file mode 100644 index 000000000000..c6548f69521e --- /dev/null +++ b/builtin/logical/pki/issuing/sign_cert.go @@ -0,0 +1,301 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package issuing + +import ( + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "fmt" + + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +type SignCertInput interface { + CreationBundleInput + GetCSR() (*x509.CertificateRequest, error) + IsCA() bool + UseCSRValues() bool + GetPermittedDomains() []string +} + +func NewBasicSignCertInput(csr *x509.CertificateRequest, isCA, useCSRValues bool) BasicSignCertInput { + return NewBasicSignCertInputWithIgnore(csr, isCA, useCSRValues, false) +} + +func NewBasicSignCertInputWithIgnore(csr *x509.CertificateRequest, isCA, useCSRValues, ignoreCsrSignature bool) BasicSignCertInput { + return BasicSignCertInput{ + isCA: isCA, + useCSRValues: useCSRValues, + csr: csr, + ignoreCsrSignature: ignoreCsrSignature, + } +} + +var _ SignCertInput = BasicSignCertInput{} + +type BasicSignCertInput struct { + isCA bool + useCSRValues bool + csr *x509.CertificateRequest + ignoreCsrSignature bool +} + +func (b BasicSignCertInput) GetTTL() int { + return 0 +} + +func (b BasicSignCertInput) GetOptionalNotAfter() (interface{}, bool) { + return "", false +} + +func (b BasicSignCertInput) GetCommonName() string { + return "" +} + +func (b BasicSignCertInput) GetSerialNumber() string { + return "" +} + +func (b BasicSignCertInput) GetExcludeCnFromSans() bool { + return false +} + +func (b BasicSignCertInput) GetOptionalAltNames() (interface{}, bool) { + return []string{}, false +} + +func (b BasicSignCertInput) GetOtherSans() []string { + return []string{} +} + +func (b BasicSignCertInput) GetIpSans() []string { + return []string{} +} + +func (b BasicSignCertInput) GetURISans() []string { + return []string{} +} + +func (b BasicSignCertInput) GetOptionalSkid() (interface{}, bool) { + return "", false +} + +func (b BasicSignCertInput) IsUserIdInSchema() (interface{}, bool) { + return []string{}, false +} + +func (b BasicSignCertInput) GetUserIds() []string { + return []string{} +} + +func (b BasicSignCertInput) GetCSR() (*x509.CertificateRequest, error) { + return b.csr, nil +} + +func (b BasicSignCertInput) IgnoreCSRSignature() bool { + return b.ignoreCsrSignature +} + +func (b BasicSignCertInput) IsCA() bool { + return b.isCA +} + +func (b BasicSignCertInput) UseCSRValues() bool { + return b.useCSRValues +} + +func (b BasicSignCertInput) GetPermittedDomains() []string { + return []string{} +} + +func SignCert(b logical.SystemView, role *RoleEntry, entityInfo EntityInfo, caSign *certutil.CAInfoBundle, signInput SignCertInput) (*certutil.ParsedCertBundle, []string, error) { + if role == nil { + return nil, nil, errutil.InternalError{Err: "no role found in data bundle"} + } + + csr, err := signInput.GetCSR() + if err != nil { + return nil, nil, err + } + + if csr.PublicKeyAlgorithm == x509.UnknownPublicKeyAlgorithm || csr.PublicKey == nil { + return nil, nil, errutil.UserError{Err: "Refusing to sign CSR with empty PublicKey. This usually means the SubjectPublicKeyInfo field has an OID not recognized by Go, such as 1.2.840.113549.1.1.10 for rsaPSS."} + } + + // This switch validates that the CSR key type matches the role and sets + // the Value in the actualKeyType/actualKeyBits values. + actualKeyType := "" + actualKeyBits := 0 + + switch role.KeyType { + case "rsa": + // Verify that the key matches the role type + if csr.PublicKeyAlgorithm != x509.RSA { + return nil, nil, errutil.UserError{Err: fmt.Sprintf("role requires keys of type %s", role.KeyType)} + } + + pubKey, ok := csr.PublicKey.(*rsa.PublicKey) + if !ok { + return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + } + + actualKeyType = "rsa" + actualKeyBits = pubKey.N.BitLen() + case "ec": + // Verify that the key matches the role type + if csr.PublicKeyAlgorithm != x509.ECDSA { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "role requires keys of type %s", + role.KeyType)} + } + pubKey, ok := csr.PublicKey.(*ecdsa.PublicKey) + if !ok { + return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + } + + actualKeyType = "ec" + actualKeyBits = pubKey.Params().BitSize + case "ed25519": + // Verify that the key matches the role type + if csr.PublicKeyAlgorithm != x509.Ed25519 { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "role requires keys of type %s", + role.KeyType)} + } + + _, ok := csr.PublicKey.(ed25519.PublicKey) + if !ok { + return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + } + + actualKeyType = "ed25519" + actualKeyBits = 0 + case "any": + // We need to compute the actual key type and key bits, to correctly + // validate minimums and SignatureBits below. + switch csr.PublicKeyAlgorithm { + case x509.RSA: + pubKey, ok := csr.PublicKey.(*rsa.PublicKey) + if !ok { + return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + } + if pubKey.N.BitLen() < 2048 { + return nil, nil, errutil.UserError{Err: "RSA keys < 2048 bits are unsafe and not supported"} + } + + actualKeyType = "rsa" + actualKeyBits = pubKey.N.BitLen() + case x509.ECDSA: + pubKey, ok := csr.PublicKey.(*ecdsa.PublicKey) + if !ok { + return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + } + + actualKeyType = "ec" + actualKeyBits = pubKey.Params().BitSize + case x509.Ed25519: + _, ok := csr.PublicKey.(ed25519.PublicKey) + if !ok { + return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + } + + actualKeyType = "ed25519" + actualKeyBits = 0 + default: + return nil, nil, errutil.UserError{Err: "Unknown key type in CSR: " + csr.PublicKeyAlgorithm.String()} + } + default: + return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unsupported key type Value: %s", role.KeyType)} + } + + // Before validating key lengths, update our KeyBits/SignatureBits based + // on the actual CSR key type. + if role.KeyType == "any" { + // We update the Value of KeyBits and SignatureBits here (from the + // role), using the specified key type. This allows us to convert + // the default Value (0) for SignatureBits and KeyBits to a + // meaningful Value. + // + // We ignore the role's original KeyBits Value if the KeyType is any + // as legacy (pre-1.10) roles had default values that made sense only + // for RSA keys (key_bits=2048) and the older code paths ignored the role Value + // set for KeyBits when KeyType was set to any. This also enforces the + // docs saying when key_type=any, we only enforce our specified minimums + // for signing operations + var err error + if role.KeyBits, role.SignatureBits, err = certutil.ValidateDefaultOrValueKeyTypeSignatureLength( + actualKeyType, 0, role.SignatureBits); err != nil { + return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unknown internal error updating default values: %v", err)} + } + + // We're using the KeyBits field as a minimum Value below, and P-224 is safe + // and a previously allowed Value. However, the above call defaults + // to P-256 as that's a saner default than P-224 (w.r.t. generation), so + // override it here to allow 224 as the smallest size we permit. + if actualKeyType == "ec" { + role.KeyBits = 224 + } + } + + // At this point, role.KeyBits and role.SignatureBits should both + // be non-zero, for RSA and ECDSA keys. Validate the actualKeyBits based on + // the role's values. If the KeyType was any, and KeyBits was set to 0, + // KeyBits should be updated to 2048 unless some other Value was chosen + // explicitly. + // + // This validation needs to occur regardless of the role's key type, so + // that we always validate both RSA and ECDSA key sizes. + if actualKeyType == "rsa" { + if actualKeyBits < role.KeyBits { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "role requires a minimum of a %d-bit key, but CSR's key is %d bits", + role.KeyBits, actualKeyBits)} + } + + if actualKeyBits < 2048 { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "Vault requires a minimum of a 2048-bit key, but CSR's key is %d bits", + actualKeyBits)} + } + } else if actualKeyType == "ec" { + if actualKeyBits < role.KeyBits { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "role requires a minimum of a %d-bit key, but CSR's key is %d bits", + role.KeyBits, + actualKeyBits)} + } + } + + creation, warnings, err := GenerateCreationBundle(b, role, entityInfo, signInput, caSign, csr) + if err != nil { + return nil, nil, err + } + if creation.Params == nil { + return nil, nil, errutil.InternalError{Err: "nil parameters received from parameter bundle generation"} + } + + creation.Params.IsCA = signInput.IsCA() + creation.Params.UseCSRValues = signInput.UseCSRValues() + + if signInput.IsCA() { + creation.Params.PermittedDNSDomains = signInput.GetPermittedDomains() + } else { + for _, ext := range csr.Extensions { + if ext.Id.Equal(certutil.ExtensionBasicConstraintsOID) { + warnings = append(warnings, "specified CSR contained a Basic Constraints extension that was ignored during issuance") + } + } + } + + parsedBundle, err := certutil.SignCertificate(creation) + if err != nil { + return nil, nil, err + } + + return parsedBundle, warnings, nil +} diff --git a/builtin/logical/pki/key_util.go b/builtin/logical/pki/key_util.go index 5f2d19c65dad..6fe17d41e1cd 100644 --- a/builtin/logical/pki/key_util.go +++ b/builtin/logical/pki/key_util.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -10,12 +10,14 @@ import ( "errors" "fmt" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/managed_key" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/errutil" ) -func comparePublicKey(sc *storageContext, key *keyEntry, publicKey crypto.PublicKey) (bool, error) { - publicKeyForKeyEntry, err := getPublicKey(sc.Context, sc.Backend, key) +func comparePublicKey(sc *storageContext, key *issuing.KeyEntry, publicKey crypto.PublicKey) (bool, error) { + publicKeyForKeyEntry, err := getPublicKey(sc.Context, sc.GetPkiManagedView(), key) if err != nil { return false, err } @@ -23,13 +25,9 @@ func comparePublicKey(sc *storageContext, key *keyEntry, publicKey crypto.Public return certutil.ComparePublicKeysAndType(publicKeyForKeyEntry, publicKey) } -func getPublicKey(ctx context.Context, b *backend, key *keyEntry) (crypto.PublicKey, error) { +func getPublicKey(ctx context.Context, mkv managed_key.PkiManagedKeyView, key *issuing.KeyEntry) (crypto.PublicKey, error) { if key.PrivateKeyType == certutil.ManagedPrivateKey { - keyId, err := extractManagedKeyId([]byte(key.PrivateKey)) - if err != nil { - return nil, err - } - return getManagedKeyPublicKey(ctx, b, keyId) + return managed_key.GetPublicKeyFromKeyBytes(ctx, mkv, []byte(key.PrivateKey)) } signer, _, _, err := getSignerFromKeyEntryBytes(key) @@ -39,7 +37,7 @@ func getPublicKey(ctx context.Context, b *backend, key *keyEntry) (crypto.Public return signer.Public(), nil } -func getSignerFromKeyEntryBytes(key *keyEntry) (crypto.Signer, certutil.BlockType, *pem.Block, error) { +func getSignerFromKeyEntryBytes(key *issuing.KeyEntry) (crypto.Signer, certutil.BlockType, *pem.Block, error) { if key.PrivateKeyType == certutil.UnknownPrivateKey { return nil, certutil.UnknownBlock, nil, errutil.InternalError{Err: fmt.Sprintf("unsupported unknown private key type for key: %s (%s)", key.ID, key.Name)} } @@ -78,7 +76,7 @@ func getPublicKeyFromBytes(keyBytes []byte) (crypto.PublicKey, error) { return signer.Public(), nil } -func importKeyFromBytes(sc *storageContext, keyValue string, keyName string) (*keyEntry, bool, error) { +func importKeyFromBytes(sc *storageContext, keyValue string, keyName string) (*issuing.KeyEntry, bool, error) { signer, _, _, err := getSignerFromBytes([]byte(keyValue)) if err != nil { return nil, false, err diff --git a/builtin/logical/pki/managed_key/common.go b/builtin/logical/pki/managed_key/common.go new file mode 100644 index 000000000000..4637dadb4602 --- /dev/null +++ b/builtin/logical/pki/managed_key/common.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package managed_key + +import ( + "crypto" + "io" + + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" +) + +type ManagedKeyInfo struct { + publicKey crypto.PublicKey + KeyType certutil.PrivateKeyType + Name NameKey + Uuid UUIDKey +} + +type managedKeyId interface { + String() string +} + +type PkiManagedKeyView interface { + BackendUUID() string + IsSecondaryNode() bool + GetManagedKeyView() (logical.ManagedKeySystemView, error) + GetRandomReader() io.Reader +} + +type ( + UUIDKey string + NameKey string +) + +func (u UUIDKey) String() string { + return string(u) +} + +func (n NameKey) String() string { + return string(n) +} diff --git a/builtin/logical/pki/managed_key/managed_key_util_oss.go b/builtin/logical/pki/managed_key/managed_key_util_oss.go new file mode 100644 index 000000000000..ad92b39c6c19 --- /dev/null +++ b/builtin/logical/pki/managed_key/managed_key_util_oss.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package managed_key + +import ( + "context" + "crypto" + "errors" + "io" + + "github.com/hashicorp/vault/sdk/helper/certutil" +) + +var errEntOnly = errors.New("managed keys are supported within enterprise edition only") + +func GetPublicKeyFromKeyBytes(ctx context.Context, mkv PkiManagedKeyView, keyBytes []byte) (crypto.PublicKey, error) { + return nil, errEntOnly +} + +func GenerateManagedKeyCABundle(ctx context.Context, b PkiManagedKeyView, keyId managedKeyId, data *certutil.CreationBundle, randomSource io.Reader) (bundle *certutil.ParsedCertBundle, err error) { + return nil, errEntOnly +} + +func GenerateManagedKeyCSRBundle(ctx context.Context, b PkiManagedKeyView, keyId managedKeyId, data *certutil.CreationBundle, addBasicConstraints bool, randomSource io.Reader) (bundle *certutil.ParsedCSRBundle, err error) { + return nil, errEntOnly +} + +func GetManagedKeyPublicKey(ctx context.Context, b PkiManagedKeyView, keyId managedKeyId) (crypto.PublicKey, error) { + return nil, errEntOnly +} + +func ParseManagedKeyCABundle(ctx context.Context, mkv PkiManagedKeyView, bundle *certutil.CertBundle) (*certutil.ParsedCertBundle, error) { + return nil, errEntOnly +} + +func ExtractManagedKeyId(privateKeyBytes []byte) (UUIDKey, error) { + return "", errEntOnly +} + +func CreateKmsKeyBundle(ctx context.Context, mkv PkiManagedKeyView, keyId managedKeyId) (certutil.KeyBundle, certutil.PrivateKeyType, error) { + return certutil.KeyBundle{}, certutil.UnknownPrivateKey, errEntOnly +} + +func GetManagedKeyInfo(ctx context.Context, mkv PkiManagedKeyView, keyId managedKeyId) (*ManagedKeyInfo, error) { + return nil, errEntOnly +} diff --git a/builtin/logical/pki/managed_key_util.go b/builtin/logical/pki/managed_key_util.go deleted file mode 100644 index 42e031deceb3..000000000000 --- a/builtin/logical/pki/managed_key_util.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -//go:build !enterprise - -package pki - -import ( - "context" - "crypto" - "errors" - "io" - - "github.com/hashicorp/vault/sdk/helper/certutil" -) - -var errEntOnly = errors.New("managed keys are supported within enterprise edition only") - -func generateManagedKeyCABundle(ctx context.Context, b *backend, keyId managedKeyId, data *certutil.CreationBundle, randomSource io.Reader) (bundle *certutil.ParsedCertBundle, err error) { - return nil, errEntOnly -} - -func generateManagedKeyCSRBundle(ctx context.Context, b *backend, keyId managedKeyId, data *certutil.CreationBundle, addBasicConstraints bool, randomSource io.Reader) (bundle *certutil.ParsedCSRBundle, err error) { - return nil, errEntOnly -} - -func getManagedKeyPublicKey(ctx context.Context, b *backend, keyId managedKeyId) (crypto.PublicKey, error) { - return nil, errEntOnly -} - -func parseManagedKeyCABundle(ctx context.Context, b *backend, bundle *certutil.CertBundle) (*certutil.ParsedCertBundle, error) { - return nil, errEntOnly -} - -func extractManagedKeyId(privateKeyBytes []byte) (UUIDKey, error) { - return "", errEntOnly -} - -func createKmsKeyBundle(ctx context.Context, b *backend, keyId managedKeyId) (certutil.KeyBundle, certutil.PrivateKeyType, error) { - return certutil.KeyBundle{}, certutil.UnknownPrivateKey, errEntOnly -} - -func getManagedKeyInfo(ctx context.Context, b *backend, keyId managedKeyId) (*managedKeyInfo, error) { - return nil, errEntOnly -} diff --git a/builtin/logical/pki/metadata.pb.go b/builtin/logical/pki/metadata.pb.go new file mode 100644 index 000000000000..209f5fcce0f7 --- /dev/null +++ b/builtin/logical/pki/metadata.pb.go @@ -0,0 +1,188 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc (unknown) +// source: builtin/logical/pki/metadata.proto + +package pki + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CertificateMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IssuerId string `protobuf:"bytes,1,opt,name=issuer_id,json=issuerId,proto3" json:"issuer_id,omitempty"` + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` + Expiration *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expiration,proto3" json:"expiration,omitempty"` + ClientMetadata []byte `protobuf:"bytes,4,opt,name=client_metadata,json=clientMetadata,proto3,oneof" json:"client_metadata,omitempty"` +} + +func (x *CertificateMetadata) Reset() { + *x = CertificateMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_builtin_logical_pki_metadata_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CertificateMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CertificateMetadata) ProtoMessage() {} + +func (x *CertificateMetadata) ProtoReflect() protoreflect.Message { + mi := &file_builtin_logical_pki_metadata_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CertificateMetadata.ProtoReflect.Descriptor instead. +func (*CertificateMetadata) Descriptor() ([]byte, []int) { + return file_builtin_logical_pki_metadata_proto_rawDescGZIP(), []int{0} +} + +func (x *CertificateMetadata) GetIssuerId() string { + if x != nil { + return x.IssuerId + } + return "" +} + +func (x *CertificateMetadata) GetRole() string { + if x != nil { + return x.Role + } + return "" +} + +func (x *CertificateMetadata) GetExpiration() *timestamppb.Timestamp { + if x != nil { + return x.Expiration + } + return nil +} + +func (x *CertificateMetadata) GetClientMetadata() []byte { + if x != nil { + return x.ClientMetadata + } + return nil +} + +var File_builtin_logical_pki_metadata_proto protoreflect.FileDescriptor + +var file_builtin_logical_pki_metadata_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x62, 0x75, 0x69, 0x6c, 0x74, 0x69, 0x6e, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, + 0x6c, 0x2f, 0x70, 0x6b, 0x69, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x03, 0x70, 0x6b, 0x69, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc4, 0x01, 0x0a, 0x13, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x64, 0x12, + 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, + 0x6f, 0x6c, 0x65, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x2c, 0x0a, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x88, 0x01, 0x01, 0x42, 0x12, 0x0a, + 0x10, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, + 0x62, 0x75, 0x69, 0x6c, 0x74, 0x69, 0x6e, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, + 0x70, 0x6b, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_builtin_logical_pki_metadata_proto_rawDescOnce sync.Once + file_builtin_logical_pki_metadata_proto_rawDescData = file_builtin_logical_pki_metadata_proto_rawDesc +) + +func file_builtin_logical_pki_metadata_proto_rawDescGZIP() []byte { + file_builtin_logical_pki_metadata_proto_rawDescOnce.Do(func() { + file_builtin_logical_pki_metadata_proto_rawDescData = protoimpl.X.CompressGZIP(file_builtin_logical_pki_metadata_proto_rawDescData) + }) + return file_builtin_logical_pki_metadata_proto_rawDescData +} + +var file_builtin_logical_pki_metadata_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_builtin_logical_pki_metadata_proto_goTypes = []any{ + (*CertificateMetadata)(nil), // 0: pki.CertificateMetadata + (*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp +} +var file_builtin_logical_pki_metadata_proto_depIdxs = []int32{ + 1, // 0: pki.CertificateMetadata.expiration:type_name -> google.protobuf.Timestamp + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_builtin_logical_pki_metadata_proto_init() } +func file_builtin_logical_pki_metadata_proto_init() { + if File_builtin_logical_pki_metadata_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_builtin_logical_pki_metadata_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*CertificateMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_builtin_logical_pki_metadata_proto_msgTypes[0].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_builtin_logical_pki_metadata_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_builtin_logical_pki_metadata_proto_goTypes, + DependencyIndexes: file_builtin_logical_pki_metadata_proto_depIdxs, + MessageInfos: file_builtin_logical_pki_metadata_proto_msgTypes, + }.Build() + File_builtin_logical_pki_metadata_proto = out.File + file_builtin_logical_pki_metadata_proto_rawDesc = nil + file_builtin_logical_pki_metadata_proto_goTypes = nil + file_builtin_logical_pki_metadata_proto_depIdxs = nil +} diff --git a/builtin/logical/pki/metadata.proto b/builtin/logical/pki/metadata.proto new file mode 100644 index 000000000000..0d0de5473d4a --- /dev/null +++ b/builtin/logical/pki/metadata.proto @@ -0,0 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +syntax = "proto3"; + +package pki; + +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/hashicorp/vault/builtin/logical/pki"; + +message CertificateMetadata { + string issuer_id = 1; + string role = 2; + google.protobuf.Timestamp expiration = 3; + optional bytes client_metadata = 4; +} diff --git a/builtin/logical/pki/metadata_oss.go b/builtin/logical/pki/metadata_oss.go new file mode 100644 index 000000000000..fe64d57c303c --- /dev/null +++ b/builtin/logical/pki/metadata_oss.go @@ -0,0 +1,30 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package pki + +import ( + "context" + "crypto/x509" + "errors" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/sdk/logical" +) + +var ErrMetadataIsEntOnly = errors.New("certificate metadata is only supported on Vault Enterprise") + +func storeCertMetadata(ctx context.Context, storage logical.Storage, issuerId issuing.IssuerID, role string, certificate *x509.Certificate, certMetadata interface{}) error { + return ErrMetadataIsEntOnly +} + +func (b *backend) doTidyCertMetadata(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { + return ErrMetadataIsEntOnly +} + +func validateCertMetadataConfiguration(role *issuing.RoleEntry) error { + return ErrMetadataIsEntOnly +} diff --git a/builtin/logical/pki/metrics.go b/builtin/logical/pki/metrics.go new file mode 100644 index 000000000000..21264e3de513 --- /dev/null +++ b/builtin/logical/pki/metrics.go @@ -0,0 +1,264 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "errors" + "sort" + "strings" + "sync/atomic" + + "github.com/armon/go-metrics" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" +) + +type CertificateCounter struct { + certCountEnabled *atomic.Bool + publishCertCountMetrics *atomic.Bool + certCount *atomic.Uint32 + revokedCertCount *atomic.Uint32 + certsCounted *atomic.Bool + certCountError error + possibleDoubleCountedSerials []string + possibleDoubleCountedRevokedSerials []string + backendUuid string +} + +func (c *CertificateCounter) IsInitialized() bool { + return c.certsCounted.Load() +} + +func (c *CertificateCounter) IsEnabled() bool { + return c.certCountEnabled.Load() +} + +func (c *CertificateCounter) Error() error { + return c.certCountError +} + +func (c *CertificateCounter) SetError(err error) { + c.certCountError = err +} + +func (c *CertificateCounter) ReconfigureWithTidyConfig(config *tidyConfig) bool { + if config.MaintainCount { + c.enableCertCounting(config.PublishMetrics) + } else { + c.disableCertCounting() + } + + return config.MaintainCount +} + +func (c *CertificateCounter) disableCertCounting() { + c.possibleDoubleCountedRevokedSerials = nil + c.possibleDoubleCountedSerials = nil + c.certsCounted.Store(false) + c.certCount.Store(0) + c.revokedCertCount.Store(0) + c.certCountError = errors.New("Cert Count is Disabled: enable via Tidy Config maintain_stored_certificate_counts") + c.certCountEnabled.Store(false) + c.publishCertCountMetrics.Store(false) +} + +func (c *CertificateCounter) enableCertCounting(publishMetrics bool) { + c.publishCertCountMetrics.Store(publishMetrics) + c.certCountEnabled.Store(true) + + if !c.certsCounted.Load() { + c.certCountError = errors.New("Certificate Counting Has Not Been Initialized, re-initialize this mount") + } +} + +func (c *CertificateCounter) InitializeCountsFromStorage(certs, revoked []string) { + c.certCount.Add(uint32(len(certs))) + c.revokedCertCount.Add(uint32(len(revoked))) + + c.pruneDuplicates(certs, revoked) + c.certCountError = nil + c.certsCounted.Store(true) + + c.emitTotalCertCountMetric() +} + +func (c *CertificateCounter) pruneDuplicates(entries, revokedEntries []string) { + // Now that the metrics are set, we can switch from appending newly-stored certificates to the possible double-count + // list, and instead have them update the counter directly. We need to do this so that we are looking at a static + // slice of possibly double counted serials. Note that certsCounted is computed before the storage operation, so + // there may be some delay here. + + // Sort the listed-entries first, to accommodate that delay. + sort.Slice(entries, func(i, j int) bool { + return entries[i] < entries[j] + }) + + sort.Slice(revokedEntries, func(i, j int) bool { + return revokedEntries[i] < revokedEntries[j] + }) + + // We assume here that these lists are now complete. + sort.Slice(c.possibleDoubleCountedSerials, func(i, j int) bool { + return c.possibleDoubleCountedSerials[i] < c.possibleDoubleCountedSerials[j] + }) + + listEntriesIndex := 0 + possibleDoubleCountIndex := 0 + for { + if listEntriesIndex >= len(entries) { + break + } + if possibleDoubleCountIndex >= len(c.possibleDoubleCountedSerials) { + break + } + if entries[listEntriesIndex] == c.possibleDoubleCountedSerials[possibleDoubleCountIndex] { + // This represents a double-counted entry + c.decrementTotalCertificatesCountNoReport() + listEntriesIndex = listEntriesIndex + 1 + possibleDoubleCountIndex = possibleDoubleCountIndex + 1 + continue + } + if entries[listEntriesIndex] < c.possibleDoubleCountedSerials[possibleDoubleCountIndex] { + listEntriesIndex = listEntriesIndex + 1 + continue + } + if entries[listEntriesIndex] > c.possibleDoubleCountedSerials[possibleDoubleCountIndex] { + possibleDoubleCountIndex = possibleDoubleCountIndex + 1 + continue + } + } + + sort.Slice(c.possibleDoubleCountedRevokedSerials, func(i, j int) bool { + return c.possibleDoubleCountedRevokedSerials[i] < c.possibleDoubleCountedRevokedSerials[j] + }) + + listRevokedEntriesIndex := 0 + possibleRevokedDoubleCountIndex := 0 + for { + if listRevokedEntriesIndex >= len(revokedEntries) { + break + } + if possibleRevokedDoubleCountIndex >= len(c.possibleDoubleCountedRevokedSerials) { + break + } + if revokedEntries[listRevokedEntriesIndex] == c.possibleDoubleCountedRevokedSerials[possibleRevokedDoubleCountIndex] { + // This represents a double-counted revoked entry + c.decrementTotalRevokedCertificatesCountNoReport() + listRevokedEntriesIndex = listRevokedEntriesIndex + 1 + possibleRevokedDoubleCountIndex = possibleRevokedDoubleCountIndex + 1 + continue + } + if revokedEntries[listRevokedEntriesIndex] < c.possibleDoubleCountedRevokedSerials[possibleRevokedDoubleCountIndex] { + listRevokedEntriesIndex = listRevokedEntriesIndex + 1 + continue + } + if revokedEntries[listRevokedEntriesIndex] > c.possibleDoubleCountedRevokedSerials[possibleRevokedDoubleCountIndex] { + possibleRevokedDoubleCountIndex = possibleRevokedDoubleCountIndex + 1 + continue + } + } + + c.possibleDoubleCountedRevokedSerials = nil + c.possibleDoubleCountedSerials = nil +} + +func (c *CertificateCounter) decrementTotalCertificatesCountNoReport() uint32 { + newCount := c.certCount.Add(^uint32(0)) + return newCount +} + +func (c *CertificateCounter) decrementTotalRevokedCertificatesCountNoReport() uint32 { + newRevokedCertCount := c.revokedCertCount.Add(^uint32(0)) + return newRevokedCertCount +} + +func (c *CertificateCounter) CertificateCount() uint32 { + return c.certCount.Load() +} + +func (c *CertificateCounter) RevokedCount() uint32 { + return c.revokedCertCount.Load() +} + +func (c *CertificateCounter) IncrementTotalCertificatesCount(certsCounted bool, newSerial string) { + if c.certCountEnabled.Load() { + c.certCount.Add(1) + switch { + case !certsCounted: + // This is unsafe, but a good best-attempt + if strings.HasPrefix(newSerial, issuing.PathCerts) { + newSerial = newSerial[6:] + } + c.possibleDoubleCountedSerials = append(c.possibleDoubleCountedSerials, newSerial) + default: + c.emitTotalCertCountMetric() + } + } +} + +// The "certsCounted" boolean here should be loaded from the backend certsCounted before the corresponding storage call: +// eg. certsCounted := certCounter.IsInitialized() +func (c *CertificateCounter) IncrementTotalRevokedCertificatesCount(certsCounted bool, newSerial string) { + if c.certCountEnabled.Load() { + c.revokedCertCount.Add(1) + switch { + case !certsCounted: + // This is unsafe, but a good best-attempt + if strings.HasPrefix(newSerial, "revoked/") { // allow passing in the path (revoked/serial) OR the serial + newSerial = newSerial[8:] + } + c.possibleDoubleCountedRevokedSerials = append(c.possibleDoubleCountedRevokedSerials, newSerial) + default: + c.emitTotalRevokedCountMetric() + } + } +} + +func (c *CertificateCounter) DecrementTotalCertificatesCountReport() { + if c.certCountEnabled.Load() { + c.decrementTotalCertificatesCountNoReport() + c.emitTotalCertCountMetric() + } +} + +func (c *CertificateCounter) DecrementTotalRevokedCertificatesCountReport() { + if c.certCountEnabled.Load() { + c.decrementTotalRevokedCertificatesCountNoReport() + c.emitTotalRevokedCountMetric() + } +} + +func (c *CertificateCounter) EmitCertStoreMetrics() { + c.emitTotalCertCountMetric() + c.emitTotalRevokedCountMetric() +} + +func (c *CertificateCounter) emitTotalCertCountMetric() { + if c.publishCertCountMetrics.Load() { + certCount := float32(c.CertificateCount()) + metrics.SetGauge([]string{"secrets", "pki", c.backendUuid, "total_certificates_stored"}, certCount) + } +} + +func (c *CertificateCounter) emitTotalRevokedCountMetric() { + if c.publishCertCountMetrics.Load() { + revokedCount := float32(c.RevokedCount()) + metrics.SetGauge([]string{"secrets", "pki", c.backendUuid, "total_revoked_certificates_stored"}, revokedCount) + } +} + +func NewCertificateCounter(backendUuid string) *CertificateCounter { + counter := &CertificateCounter{ + backendUuid: backendUuid, + certCountEnabled: &atomic.Bool{}, + publishCertCountMetrics: &atomic.Bool{}, + certCount: &atomic.Uint32{}, + revokedCertCount: &atomic.Uint32{}, + certsCounted: &atomic.Bool{}, + certCountError: errors.New("Initialize Not Yet Run, Cert Counts Unavailable"), + possibleDoubleCountedSerials: make([]string, 0, 250), + possibleDoubleCountedRevokedSerials: make([]string, 0, 250), + } + + return counter +} diff --git a/builtin/logical/pki/parsing/certificate.go b/builtin/logical/pki/parsing/certificate.go new file mode 100644 index 000000000000..9cd52af46e52 --- /dev/null +++ b/builtin/logical/pki/parsing/certificate.go @@ -0,0 +1,97 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package parsing + +import ( + "crypto/x509" + "fmt" + "math/big" + "strings" + + "github.com/hashicorp/vault/sdk/helper/certutil" +) + +func SerialFromCert(cert *x509.Certificate) string { + return SerialFromBigInt(cert.SerialNumber) +} + +func SerialFromBigInt(serial *big.Int) string { + return strings.TrimSpace(certutil.GetHexFormatted(serial.Bytes(), ":")) +} + +// NormalizeSerialForStorageFromBigInt given a serial number, format it as a string +// that is safe to store within a filesystem +func NormalizeSerialForStorageFromBigInt(serial *big.Int) string { + return strings.TrimSpace(certutil.GetHexFormatted(serial.Bytes(), "-")) +} + +// NormalizeSerialForStorage given a serial number with ':' characters, convert +// them to '-' which is safe to store within filesystems +func NormalizeSerialForStorage(serial string) string { + return strings.ReplaceAll(strings.ToLower(serial), ":", "-") +} + +func ParseCertificateFromString(pemCert string) (*x509.Certificate, error) { + return ParseCertificateFromBytes([]byte(pemCert)) +} + +func ParseCertificateFromBytes(certBytes []byte) (*x509.Certificate, error) { + block, err := DecodePem(certBytes) + if err != nil { + return nil, fmt.Errorf("unable to parse certificate: %w", err) + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to parse certificate: %w", err) + } + + return cert, nil +} + +func ParseCertificatesFromString(pemCerts string) ([]*x509.Certificate, error) { + return ParseCertificatesFromBytes([]byte(pemCerts)) +} + +func ParseCertificatesFromBytes(certBytes []byte) ([]*x509.Certificate, error) { + block, err := DecodePem(certBytes) + if err != nil { + return nil, fmt.Errorf("unable to parse certificate: %w", err) + } + + cert, err := x509.ParseCertificates(block.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to parse certificate: %w", err) + } + + return cert, nil +} + +func ParseKeyUsages(input []string) int { + var parsedKeyUsages x509.KeyUsage + for _, k := range input { + switch strings.ToLower(strings.TrimSpace(k)) { + case "digitalsignature": + parsedKeyUsages |= x509.KeyUsageDigitalSignature + case "contentcommitment": + parsedKeyUsages |= x509.KeyUsageContentCommitment + case "keyencipherment": + parsedKeyUsages |= x509.KeyUsageKeyEncipherment + case "dataencipherment": + parsedKeyUsages |= x509.KeyUsageDataEncipherment + case "keyagreement": + parsedKeyUsages |= x509.KeyUsageKeyAgreement + case "certsign": + parsedKeyUsages |= x509.KeyUsageCertSign + case "crlsign": + parsedKeyUsages |= x509.KeyUsageCRLSign + case "encipheronly": + parsedKeyUsages |= x509.KeyUsageEncipherOnly + case "decipheronly": + parsedKeyUsages |= x509.KeyUsageDecipherOnly + } + } + + return int(parsedKeyUsages) +} diff --git a/builtin/logical/pki/parsing/csrs.go b/builtin/logical/pki/parsing/csrs.go new file mode 100644 index 000000000000..34d6c11be31a --- /dev/null +++ b/builtin/logical/pki/parsing/csrs.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package parsing + +import ( + "crypto/x509" + "fmt" +) + +func ParseCertificateRequestFromString(pemCert string) (*x509.CertificateRequest, error) { + return ParseCertificateRequestFromBytes([]byte(pemCert)) +} + +func ParseCertificateRequestFromBytes(certBytes []byte) (*x509.CertificateRequest, error) { + block, err := DecodePem(certBytes) + if err != nil { + return nil, fmt.Errorf("unable to parse certificate request: %w", err) + } + + csr, err := x509.ParseCertificateRequest(block.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to parse certificate request: %w", err) + } + + return csr, nil +} diff --git a/builtin/logical/pki/parsing/pem.go b/builtin/logical/pki/parsing/pem.go new file mode 100644 index 000000000000..aa5513ab17c4 --- /dev/null +++ b/builtin/logical/pki/parsing/pem.go @@ -0,0 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package parsing + +import ( + "encoding/pem" + "errors" + "strings" +) + +func DecodePem(certBytes []byte) (*pem.Block, error) { + block, extra := pem.Decode(certBytes) + if block == nil { + return nil, errors.New("invalid PEM") + } + if len(strings.TrimSpace(string(extra))) > 0 { + return nil, errors.New("trailing PEM data") + } + return block, nil +} diff --git a/builtin/logical/pki/path_acme_account.go b/builtin/logical/pki/path_acme_account.go new file mode 100644 index 000000000000..20804a71735a --- /dev/null +++ b/builtin/logical/pki/path_acme_account.go @@ -0,0 +1,481 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "fmt" + "net/http" + "path" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func uuidNameRegex(name string) string { + return fmt.Sprintf("(?P<%s>[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}?)", name) +} + +func pathAcmeNewAccount(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeNewAccount(b, baseUrl+"/new-account", opts) +} + +func pathAcmeUpdateAccount(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeNewAccount(b, baseUrl+"/account/"+uuidNameRegex("kid"), opts) +} + +func addFieldsForACMEPath(fields map[string]*framework.FieldSchema, pattern string) map[string]*framework.FieldSchema { + if strings.Contains(pattern, framework.GenericNameRegex("role")) { + fields["role"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The desired role for the acme request`, + Required: true, + } + } + if strings.Contains(pattern, framework.GenericNameRegex(issuerRefParam)) { + fields[issuerRefParam] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Reference to an existing issuer name or issuer id`, + Required: true, + } + } + if strings.Contains(pattern, framework.GenericNameRegex("policy")) { + fields["policy"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The policy name to pass through to the CIEPS service`, + Required: true, + } + } + + return fields +} + +func addFieldsForACMERequest(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields["protected"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: "ACME request 'protected' value", + Required: false, + } + + fields["payload"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: "ACME request 'payload' value", + Required: false, + } + + fields["signature"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: "ACME request 'signature' value", + Required: false, + } + + return fields +} + +func addFieldsForACMEKidRequest(fields map[string]*framework.FieldSchema, pattern string) map[string]*framework.FieldSchema { + if strings.Contains(pattern, uuidNameRegex("kid")) { + fields["kid"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The key identifier provided by the CA`, + Required: true, + } + } + + return fields +} + +func patternAcmeNewAccount(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + addFieldsForACMEKidRequest(fields, pattern) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeParsedWrapper(opts, b.acmeNewAccountHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func (b *backend) acmeNewAccountHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}) (*logical.Response, error) { + // Parameters + var ok bool + var onlyReturnExisting bool + var contacts []string + var termsOfServiceAgreed bool + var status string + var eabData map[string]interface{} + + rawContact, present := data["contact"] + if present { + listContact, ok := rawContact.([]interface{}) + if !ok { + return nil, fmt.Errorf("invalid type (%T) for field 'contact': %w", rawContact, ErrMalformed) + } + + for index, singleContact := range listContact { + contact, ok := singleContact.(string) + if !ok { + return nil, fmt.Errorf("invalid type (%T) for field 'contact' item %d: %w", singleContact, index, ErrMalformed) + } + + contacts = append(contacts, contact) + } + } + + rawTermsOfServiceAgreed, present := data["termsOfServiceAgreed"] + if present { + termsOfServiceAgreed, ok = rawTermsOfServiceAgreed.(bool) + if !ok { + return nil, fmt.Errorf("invalid type (%T) for field 'termsOfServiceAgreed': %w", rawTermsOfServiceAgreed, ErrMalformed) + } + } + + rawOnlyReturnExisting, present := data["onlyReturnExisting"] + if present { + onlyReturnExisting, ok = rawOnlyReturnExisting.(bool) + if !ok { + return nil, fmt.Errorf("invalid type (%T) for field 'onlyReturnExisting': %w", rawOnlyReturnExisting, ErrMalformed) + } + } + + // Per RFC 8555 7.3.6 Account deactivation, we will handle it within our update API. + rawStatus, present := data["status"] + if present { + status, ok = rawStatus.(string) + if !ok { + return nil, fmt.Errorf("invalid type (%T) for field 'onlyReturnExisting': %w", rawOnlyReturnExisting, ErrMalformed) + } + } + + if eabDataRaw, ok := data["externalAccountBinding"]; ok { + eabData, ok = eabDataRaw.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("%w: externalAccountBinding field was unparseable", ErrMalformed) + } + } + + // We have two paths here: search or create. + if onlyReturnExisting { + return b.acmeAccountSearchHandler(acmeCtx, userCtx) + } + + // Pass through the /new-account API calls to this specific handler as its requirements are different + // from the account update handler. + if strings.HasSuffix(r.Path, "/new-account") { + return b.acmeNewAccountCreateHandler(acmeCtx, userCtx, contacts, termsOfServiceAgreed, r, eabData) + } + + return b.acmeNewAccountUpdateHandler(acmeCtx, userCtx, contacts, status, eabData) +} + +func formatNewAccountResponse(acmeCtx *acmeContext, acct *acmeAccount, eabData map[string]interface{}) *logical.Response { + resp := formatAccountResponse(acmeCtx, acct) + + // Per RFC 8555 Section 7.1.2. Account Objects + // Including this field in a newAccount request indicates approval by + // the holder of an existing non-ACME account to bind that account to + // this ACME account + if acct.Eab != nil && len(eabData) != 0 { + resp.Data["externalAccountBinding"] = eabData + } + + return resp +} + +func formatAccountResponse(acmeCtx *acmeContext, acct *acmeAccount) *logical.Response { + location := acmeCtx.baseUrl.String() + "account/" + acct.KeyId + + resp := &logical.Response{ + Data: map[string]interface{}{ + "status": acct.Status, + "orders": location + "/orders", + }, + Headers: map[string][]string{ + "Location": {location}, + }, + } + + if len(acct.Contact) > 0 { + resp.Data["contact"] = acct.Contact + } + + return resp +} + +func (b *backend) acmeAccountSearchHandler(acmeCtx *acmeContext, userCtx *jwsCtx) (*logical.Response, error) { + thumbprint, err := userCtx.GetKeyThumbprint() + if err != nil { + return nil, fmt.Errorf("failed generating thumbprint for key: %w", err) + } + + account, err := b.GetAcmeState().LoadAccountByKey(acmeCtx, thumbprint) + if err != nil { + return nil, fmt.Errorf("failed to load account by thumbprint: %w", err) + } + + if account != nil { + if err = acmeCtx.eabPolicy.EnforceForExistingAccount(account); err != nil { + return nil, err + } + return formatAccountResponse(acmeCtx, account), nil + } + + // Per RFC 8555 Section 7.3.1. Finding an Account URL Given a Key: + // + // > If a client sends such a request and an account does not exist, + // > then the server MUST return an error response with status code + // > 400 (Bad Request) and type "urn:ietf:params:acme:error:accountDoesNotExist". + return nil, fmt.Errorf("An account with this key does not exist: %w", ErrAccountDoesNotExist) +} + +func (b *backend) acmeNewAccountCreateHandler(acmeCtx *acmeContext, userCtx *jwsCtx, contact []string, termsOfServiceAgreed bool, r *logical.Request, eabData map[string]interface{}) (*logical.Response, error) { + if userCtx.Existing { + return nil, fmt.Errorf("cannot submit to newAccount with 'kid': %w", ErrMalformed) + } + + // If the account already exists, return the existing one. + thumbprint, err := userCtx.GetKeyThumbprint() + if err != nil { + return nil, fmt.Errorf("failed generating thumbprint for key: %w", err) + } + + accountByKey, err := b.GetAcmeState().LoadAccountByKey(acmeCtx, thumbprint) + if err != nil { + return nil, fmt.Errorf("failed to load account by thumbprint: %w", err) + } + + if accountByKey != nil { + if err = acmeCtx.eabPolicy.EnforceForExistingAccount(accountByKey); err != nil { + return nil, err + } + return formatAccountResponse(acmeCtx, accountByKey), nil + } + + var eab *eabType + if len(eabData) != 0 { + eab, err = verifyEabPayload(b.GetAcmeState(), acmeCtx, userCtx, r.Path, eabData) + if err != nil { + return nil, err + } + } + + // Verify against our EAB policy + if err = acmeCtx.eabPolicy.EnforceForNewAccount(eab); err != nil { + return nil, err + } + + // TODO: Limit this only when ToS are required or set by the operator, since we don't have a + // ToS URL in the directory at the moment, we can not enforce this. + //if !termsOfServiceAgreed { + // return nil, fmt.Errorf("terms of service not agreed to: %w", ErrUserActionRequired) + //} + + if eab != nil { + // We delete the EAB to prevent future re-use after associating it with an account, worst + // case if we fail creating the account we simply nuked the EAB which they can create another + // and retry + wasDeleted, err := b.GetAcmeState().DeleteEab(acmeCtx.sc, eab.KeyID) + if err != nil { + return nil, fmt.Errorf("failed to delete eab reference: %w", err) + } + + if !wasDeleted { + // Something consumed our EAB before we did bail... + return nil, fmt.Errorf("eab was already used: %w", ErrUnauthorized) + } + } + + b.acmeAccountLock.RLock() // Prevents Account Creation and Tidy Interfering + defer b.acmeAccountLock.RUnlock() + + accountByKid, err := b.GetAcmeState().CreateAccount(acmeCtx, userCtx, contact, termsOfServiceAgreed, eab) + if err != nil { + if eab != nil { + return nil, fmt.Errorf("failed to create account: %w; the EAB key used for this request has been deleted as a result of this operation; fetch a new EAB key before retrying", err) + } + return nil, fmt.Errorf("failed to create account: %w", err) + } + + resp := formatNewAccountResponse(acmeCtx, accountByKid, eabData) + + // Per RFC 8555 Section 7.3. Account Management: + // + // > The server returns this account object in a 201 (Created) response, + // > with the account URL in a Location header field. + resp.Data[logical.HTTPStatusCode] = http.StatusCreated + return resp, nil +} + +func (b *backend) acmeNewAccountUpdateHandler(acmeCtx *acmeContext, userCtx *jwsCtx, contact []string, status string, eabData map[string]interface{}) (*logical.Response, error) { + if !userCtx.Existing { + return nil, fmt.Errorf("cannot submit to account updates without a 'kid': %w", ErrMalformed) + } + + if len(eabData) != 0 { + return nil, fmt.Errorf("%w: not allowed to update EAB data in accounts", ErrMalformed) + } + + account, err := b.GetAcmeState().LoadAccount(acmeCtx, userCtx.Kid) + if err != nil { + return nil, fmt.Errorf("error loading account: %w", err) + } + + if err = acmeCtx.eabPolicy.EnforceForExistingAccount(account); err != nil { + return nil, err + } + + // Per RFC 8555 7.3.6 Account deactivation, if we were previously deactivated, we should return + // unauthorized. There is no way to reactivate any accounts per ACME RFC. + if account.Status != AccountStatusValid { + // Treating "revoked" and "deactivated" as the same here. + return nil, ErrUnauthorized + } + + shouldUpdate := false + // Check to see if we should update, we don't really care about ordering + if !strutil.EquivalentSlices(account.Contact, contact) { + shouldUpdate = true + account.Contact = contact + } + + // Check to process account de-activation status was requested. + // 7.3.6. Account Deactivation + if string(AccountStatusDeactivated) == status { + shouldUpdate = true + // TODO: This should cancel any ongoing operations (do not revoke certs), + // perhaps we should delete this account here? + account.Status = AccountStatusDeactivated + account.AccountRevokedDate = time.Now() + } + + if shouldUpdate { + err = b.GetAcmeState().UpdateAccount(acmeCtx.sc, account) + if err != nil { + return nil, fmt.Errorf("failed to update account: %w", err) + } + } + + resp := formatAccountResponse(acmeCtx, account) + return resp, nil +} + +func (b *backend) tidyAcmeAccountByThumbprint(as *acmeState, sc *storageContext, keyThumbprint string, certTidyBuffer, accountTidyBuffer time.Duration) error { + thumbprintEntry, err := sc.Storage.Get(sc.Context, path.Join(acmeThumbprintPrefix, keyThumbprint)) + if err != nil { + return fmt.Errorf("error retrieving thumbprint entry %v, unable to find corresponding account entry: %w", keyThumbprint, err) + } + if thumbprintEntry == nil { + return fmt.Errorf("empty thumbprint entry %v, unable to find corresponding account entry", keyThumbprint) + } + + var thumbprint acmeThumbprint + err = thumbprintEntry.DecodeJSON(&thumbprint) + if err != nil { + return fmt.Errorf("unable to decode thumbprint entry %v to find account entry: %w", keyThumbprint, err) + } + + if len(thumbprint.Kid) == 0 { + return fmt.Errorf("unable to find account entry: empty kid within thumbprint entry: %s", keyThumbprint) + } + + // Now Get the Account: + accountEntry, err := sc.Storage.Get(sc.Context, acmeAccountPrefix+thumbprint.Kid) + if err != nil { + return err + } + if accountEntry == nil { + // We delete the Thumbprint Associated with the Account, and we are done + err = sc.Storage.Delete(sc.Context, path.Join(acmeThumbprintPrefix, keyThumbprint)) + if err != nil { + return err + } + b.tidyStatusIncDeletedAcmeAccountCount() + return nil + } + + var account acmeAccount + err = accountEntry.DecodeJSON(&account) + if err != nil { + return err + } + account.KeyId = thumbprint.Kid + + // Tidy Orders On the Account + orderIds, err := as.ListOrderIds(sc, thumbprint.Kid) + if err != nil { + return err + } + allOrdersTidied := true + maxCertExpiryUpdated := false + for _, orderId := range orderIds { + wasTidied, orderExpiry, err := b.acmeTidyOrder(sc, thumbprint.Kid, getOrderPath(thumbprint.Kid, orderId), certTidyBuffer) + if err != nil { + return err + } + if !wasTidied { + allOrdersTidied = false + } + + if !orderExpiry.IsZero() && account.MaxCertExpiry.Before(orderExpiry) { + account.MaxCertExpiry = orderExpiry + maxCertExpiryUpdated = true + } + } + + now := time.Now() + if allOrdersTidied && + now.After(account.AccountCreatedDate.Add(accountTidyBuffer)) && + now.After(account.MaxCertExpiry.Add(accountTidyBuffer)) { + // Tidy this account + // If it is Revoked or Deactivated: + if (account.Status == AccountStatusRevoked || account.Status == AccountStatusDeactivated) && now.After(account.AccountRevokedDate.Add(accountTidyBuffer)) { + // We Delete the Account Associated with this Thumbprint: + err = sc.Storage.Delete(sc.Context, path.Join(acmeAccountPrefix, thumbprint.Kid)) + if err != nil { + return err + } + + // Now we delete the Thumbprint Associated with the Account: + err = sc.Storage.Delete(sc.Context, path.Join(acmeThumbprintPrefix, keyThumbprint)) + if err != nil { + return err + } + b.tidyStatusIncDeletedAcmeAccountCount() + } else if account.Status == AccountStatusValid { + // Revoke This Account + account.AccountRevokedDate = now + account.Status = AccountStatusRevoked + err := as.UpdateAccount(sc, &account) + if err != nil { + return err + } + b.tidyStatusIncRevAcmeAccountCount() + } + } + + // Only update the account if we modified the max cert expiry values and the account is still valid, + // to prevent us from adding back a deleted account or not re-writing the revoked account that was + // already written above. + if maxCertExpiryUpdated && account.Status == AccountStatusValid { + // Update our expiry time we previously setup. + err := as.UpdateAccount(sc, &account) + if err != nil { + return err + } + } + + return nil +} diff --git a/builtin/logical/pki/path_acme_authorizations.go b/builtin/logical/pki/path_acme_authorizations.go new file mode 100644 index 000000000000..983c55fee4e5 --- /dev/null +++ b/builtin/logical/pki/path_acme_authorizations.go @@ -0,0 +1,100 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathAcmeAuthorization(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeAuthorization(b, baseUrl+"/authorization/"+framework.MatchAllRegex("auth_id"), opts) +} + +func addFieldsForACMEAuthorization(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields["auth_id"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: "ACME authorization identifier value", + Required: true, + } + + return fields +} + +func patternAcmeAuthorization(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + addFieldsForACMEAuthorization(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeAccountRequiredWrapper(opts, b.acmeAuthorizationHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func (b *backend) acmeAuthorizationHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, _ *acmeAccount) (*logical.Response, error) { + authId := fields.Get("auth_id").(string) + authz, err := b.GetAcmeState().LoadAuthorization(acmeCtx, userCtx, authId) + if err != nil { + return nil, fmt.Errorf("failed to load authorization: %w", err) + } + + var status string + rawStatus, haveStatus := data["status"] + if haveStatus { + var ok bool + status, ok = rawStatus.(string) + if !ok { + return nil, fmt.Errorf("bad type (%T) for value 'status': %w", rawStatus, ErrMalformed) + } + } + + if len(data) == 0 { + return b.acmeAuthorizationFetchHandler(acmeCtx, r, fields, userCtx, data, authz) + } + + if haveStatus && status == "deactivated" { + return b.acmeAuthorizationDeactivateHandler(acmeCtx, r, fields, userCtx, data, authz) + } + + return nil, ErrMalformed +} + +func (b *backend) acmeAuthorizationFetchHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, authz *ACMEAuthorization) (*logical.Response, error) { + return &logical.Response{ + Data: authz.NetworkMarshal(acmeCtx), + }, nil +} + +func (b *backend) acmeAuthorizationDeactivateHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, authz *ACMEAuthorization) (*logical.Response, error) { + if authz.Status != ACMEAuthorizationPending && authz.Status != ACMEAuthorizationValid { + return nil, fmt.Errorf("unable to deactivate authorization in '%v' status: %w", authz.Status, ErrMalformed) + } + + authz.Status = ACMEAuthorizationDeactivated + for _, challenge := range authz.Challenges { + challenge.Status = ACMEChallengeInvalid + } + + if err := b.GetAcmeState().SaveAuthorization(acmeCtx, authz); err != nil { + return nil, fmt.Errorf("error saving deactivated authorization: %w", err) + } + + return &logical.Response{ + Data: authz.NetworkMarshal(acmeCtx), + }, nil +} diff --git a/builtin/logical/pki/path_acme_challenges.go b/builtin/logical/pki/path_acme_challenges.go new file mode 100644 index 000000000000..eed8d1ea41f7 --- /dev/null +++ b/builtin/logical/pki/path_acme_challenges.go @@ -0,0 +1,114 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathAcmeChallenge(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeChallenge(b, baseUrl+ + "/challenge/"+framework.MatchAllRegex("auth_id")+"/"+framework.MatchAllRegex("challenge_type"), opts) +} + +func addFieldsForACMEChallenge(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields["auth_id"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: "ACME authorization identifier value", + Required: true, + } + + fields["challenge_type"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: "ACME challenge type", + Required: true, + } + + return fields +} + +func patternAcmeChallenge(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + addFieldsForACMEChallenge(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeAccountRequiredWrapper(opts, b.acmeChallengeHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func (b *backend) acmeChallengeHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, _ *acmeAccount) (*logical.Response, error) { + authId := fields.Get("auth_id").(string) + challengeType := fields.Get("challenge_type").(string) + + authz, err := b.GetAcmeState().LoadAuthorization(acmeCtx, userCtx, authId) + if err != nil { + return nil, fmt.Errorf("failed to load authorization: %w", err) + } + + return b.acmeChallengeFetchHandler(acmeCtx, r, fields, userCtx, data, authz, challengeType) +} + +func (b *backend) acmeChallengeFetchHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, authz *ACMEAuthorization, challengeType string) (*logical.Response, error) { + var challenge *ACMEChallenge + for _, c := range authz.Challenges { + if string(c.Type) == challengeType { + challenge = c + break + } + } + + if challenge == nil { + return nil, fmt.Errorf("unknown challenge of type '%v' in authorization: %w", challengeType, ErrMalformed) + } + + // Per RFC 8555 Section 7.5.1. Responding to Challenges: + // + // > The client indicates to the server that it is ready for the challenge + // > validation by sending an empty JSON body ("{}") carried in a POST + // > request to the challenge URL (not the authorization URL). + if len(data) > 0 { + return nil, fmt.Errorf("unexpected request parameters: %w", ErrMalformed) + } + + // If data was nil, we got a POST-as-GET request, just return current challenge without an accept, + // otherwise we most likely got a "{}" payload which we should now accept the challenge. + if data != nil { + thumbprint, err := userCtx.GetKeyThumbprint() + if err != nil { + return nil, fmt.Errorf("failed to get thumbprint for key: %w", err) + } + + if err := b.GetAcmeState().validator.AcceptChallenge(acmeCtx.sc, userCtx.Kid, authz, challenge, thumbprint); err != nil { + return nil, fmt.Errorf("error submitting challenge for validation: %w", err) + } + } + + return &logical.Response{ + Data: challenge.NetworkMarshal(acmeCtx, authz.Id), + + // Per RFC 8555 Section 7.1. Resources: + // + // > The "up" link relation is used with challenge resources to indicate + // > the authorization resource to which a challenge belongs. + Headers: map[string][]string{ + "Link": {fmt.Sprintf("<%s>;rel=\"up\"", buildAuthorizationUrl(acmeCtx, authz.Id))}, + }, + }, nil +} diff --git a/builtin/logical/pki/path_acme_directory.go b/builtin/logical/pki/path_acme_directory.go new file mode 100644 index 000000000000..ea49a44ee649 --- /dev/null +++ b/builtin/logical/pki/path_acme_directory.go @@ -0,0 +1,70 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + pathAcmeHelpSync = `An endpoint implementing the standard ACME protocol` + pathAcmeHelpDesc = `This API endpoint implementing a subset of the ACME protocol + defined in RFC 8555, with its own authentication and argument syntax that + does not follow conventional Vault operations. An ACME client tool or library + should be used to interact with these endpoints.` +) + +func pathAcmeDirectory(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeDirectory(b, baseUrl+"/directory", opts) +} + +func patternAcmeDirectory(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.acmeWrapper(opts, b.acmeDirectoryHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func (b *backend) acmeDirectoryHandler(acmeCtx *acmeContext, r *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + rawBody, err := json.Marshal(map[string]interface{}{ + "newNonce": acmeCtx.baseUrl.JoinPath("new-nonce").String(), + "newAccount": acmeCtx.baseUrl.JoinPath("new-account").String(), + "newOrder": acmeCtx.baseUrl.JoinPath("new-order").String(), + "revokeCert": acmeCtx.baseUrl.JoinPath("revoke-cert").String(), + "keyChange": acmeCtx.baseUrl.JoinPath("key-change").String(), + // This is purposefully missing newAuthz as we don't support pre-authorization + "meta": map[string]interface{}{ + "externalAccountRequired": acmeCtx.eabPolicy.IsExternalAccountRequired(), + }, + }) + if err != nil { + return nil, fmt.Errorf("failed encoding response: %w", err) + } + + return &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: "application/json", + logical.HTTPStatusCode: http.StatusOK, + logical.HTTPRawBody: rawBody, + }, + }, nil +} diff --git a/builtin/logical/pki/path_acme_eab.go b/builtin/logical/pki/path_acme_eab.go new file mode 100644 index 000000000000..fa026a1c1892 --- /dev/null +++ b/builtin/logical/pki/path_acme_eab.go @@ -0,0 +1,295 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "context" + "crypto/rand" + "encoding/base64" + "fmt" + "net/http" + "path" + "strings" + "time" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +var decodedTokenPrefix = mustBase64Decode("vault-eab-0-") + +func mustBase64Decode(s string) []byte { + bytes, err := base64.RawURLEncoding.DecodeString(s) + if err != nil { + panic(fmt.Sprintf("Token prefix value: %s failed decoding: %v", s, err)) + } + + // Should be dividable by 3 otherwise our prefix will not be properly honored. + if len(bytes)%3 != 0 { + panic(fmt.Sprintf("Token prefix value: %s is not dividable by 3, will not prefix properly", s)) + } + return bytes +} + +/* + * This file unlike the other path_acme_xxx.go are VAULT APIs to manage the + * ACME External Account Bindings, this isn't providing any APIs that an ACME + * client would use. + */ +func pathAcmeEabList(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "eab/?$", + Fields: map[string]*framework.FieldSchema{}, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.pathAcmeListEab, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "list-eab-keys", + Description: "List all eab key identifiers yet to be used.", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "keys": { + Type: framework.TypeStringSlice, + Description: `A list of unused eab keys`, + Required: true, + }, + "key_info": { + Type: framework.TypeMap, + Description: `EAB details keyed by the eab key id`, + Required: false, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: "list external account bindings to be used for ACME", + HelpDescription: `list identifiers that have been generated but yet to be used.`, + } +} + +func pathAcmeNewEab(b *backend, baseUrl string) *framework.Path { + return patternAcmeNewEab(b, baseUrl+"/new-eab") +} + +func patternAcmeNewEab(b *backend, pattern string) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + + opSuffix := getAcmeOperationSuffix(pattern) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathAcmeCreateEab, + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "generate-eab-key", + OperationSuffix: opSuffix, + Description: "Generate an ACME EAB token for a directory", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "id": { + Type: framework.TypeString, + Description: `The EAB key identifier`, + Required: true, + }, + "key_type": { + Type: framework.TypeString, + Description: `The EAB key type`, + Required: true, + }, + "key": { + Type: framework.TypeString, + Description: `The EAB hmac key`, + Required: true, + }, + "acme_directory": { + Type: framework.TypeString, + Description: `The ACME directory to which the key belongs`, + Required: true, + }, + "created_on": { + Type: framework.TypeTime, + Description: `An RFC3339 formatted date time when the EAB token was created`, + Required: true, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: "Generate external account bindings to be used for ACME", + HelpDescription: `Generate single use id/key pairs to be used for ACME EAB.`, + } +} + +func pathAcmeEabDelete(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "eab/" + uuidNameRegex("key_id"), + + Fields: map[string]*framework.FieldSchema{ + "key_id": { + Type: framework.TypeString, + Description: "EAB key identifier", + Required: true, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathAcmeDeleteEab, + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "delete-eab-key", + Description: "Delete an unused EAB token", + }, + }, + }, + + HelpSynopsis: "Delete an external account binding id prior to its use within an ACME account", + HelpDescription: `Allows an operator to delete an external account binding, +before its bound to a new ACME account. If the identifier provided does not exist or +was already consumed by an ACME account a successful response is returned along with +a warning that it did not exist.`, + } +} + +type eabType struct { + KeyID string `json:"-"` + KeyType string `json:"key-type"` + PrivateBytes []byte `json:"private-bytes"` + AcmeDirectory string `json:"acme-directory"` + CreatedOn time.Time `json:"created-on"` +} + +func (b *backend) pathAcmeListEab(ctx context.Context, r *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, r.Storage) + + acmeState := b.GetAcmeState() + eabIds, err := acmeState.ListEabIds(sc) + if err != nil { + return nil, err + } + + var warnings []string + var keyIds []string + keyInfos := map[string]interface{}{} + + for _, eabKey := range eabIds { + eab, err := acmeState.LoadEab(sc, eabKey) + if err != nil { + warnings = append(warnings, fmt.Sprintf("failed loading eab entry %s: %v", eabKey, err)) + continue + } + + keyIds = append(keyIds, eab.KeyID) + keyInfos[eab.KeyID] = map[string]interface{}{ + "key_type": eab.KeyType, + "acme_directory": path.Join(eab.AcmeDirectory, "directory"), + "created_on": eab.CreatedOn.Format(time.RFC3339), + } + } + + resp := logical.ListResponseWithInfo(keyIds, keyInfos) + for _, warning := range warnings { + resp.AddWarning(warning) + } + return resp, nil +} + +func (b *backend) pathAcmeCreateEab(ctx context.Context, r *logical.Request, data *framework.FieldData) (*logical.Response, error) { + kid := genUuid() + size := 32 + bytes, err := uuid.GenerateRandomBytesWithReader(size, rand.Reader) + if err != nil { + return nil, fmt.Errorf("failed generating eab key: %w", err) + } + + acmeDirectory, err := getAcmeDirectory(r) + if err != nil { + return nil, err + } + + eab := &eabType{ + KeyID: kid, + KeyType: "hs", + PrivateBytes: append(decodedTokenPrefix, bytes...), // we do this to avoid generating tokens that start with - + AcmeDirectory: acmeDirectory, + CreatedOn: time.Now(), + } + + sc := b.makeStorageContext(ctx, r.Storage) + err = b.GetAcmeState().SaveEab(sc, eab) + if err != nil { + return nil, fmt.Errorf("failed saving generated eab: %w", err) + } + + encodedKey := base64.RawURLEncoding.EncodeToString(eab.PrivateBytes) + + return &logical.Response{ + Data: map[string]interface{}{ + "id": eab.KeyID, + "key_type": eab.KeyType, + "key": encodedKey, + "acme_directory": path.Join(eab.AcmeDirectory, "directory"), + "created_on": eab.CreatedOn.Format(time.RFC3339), + }, + }, nil +} + +func (b *backend) pathAcmeDeleteEab(ctx context.Context, r *logical.Request, d *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, r.Storage) + keyId := d.Get("key_id").(string) + + _, err := uuid.ParseUUID(keyId) + if err != nil { + return nil, fmt.Errorf("badly formatted key_id field") + } + + deleted, err := b.GetAcmeState().DeleteEab(sc, keyId) + if err != nil { + return nil, fmt.Errorf("failed deleting key id: %w", err) + } + + resp := &logical.Response{} + if !deleted { + resp.AddWarning("No key id found with id: " + keyId) + } + return resp, nil +} + +// getAcmeOperationSuffix used mainly to compute the OpenAPI spec suffix value to distinguish +// different versions of ACME Vault APIs based on directory paths +func getAcmeOperationSuffix(pattern string) string { + hasRole := strings.Contains(pattern, framework.GenericNameRegex("role")) + hasIssuer := strings.Contains(pattern, framework.GenericNameRegex(issuerRefParam)) + + switch { + case hasRole && hasIssuer: + return "for-issuer-and-role" + case hasRole: + return "for-role" + case hasIssuer: + return "for-issuer" + default: + return "" + } +} diff --git a/builtin/logical/pki/path_acme_nonce.go b/builtin/logical/pki/path_acme_nonce.go new file mode 100644 index 000000000000..7c8d5d407bb9 --- /dev/null +++ b/builtin/logical/pki/path_acme_nonce.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "fmt" + "net/http" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathAcmeNonce(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeNonce(b, baseUrl+"/new-nonce", opts) +} + +func patternAcmeNonce(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.HeaderOperation: &framework.PathOperation{ + Callback: b.acmeWrapper(opts, b.acmeNonceHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.acmeWrapper(opts, b.acmeNonceHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func (b *backend) acmeNonceHandler(ctx *acmeContext, r *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + nonce, _, err := b.GetAcmeState().GetNonce() + if err != nil { + return nil, err + } + + // Header operations return 200, GET return 204. + httpStatus := http.StatusOK + if r.Operation == logical.ReadOperation { + httpStatus = http.StatusNoContent + } + + return &logical.Response{ + Headers: map[string][]string{ + "Cache-Control": {"no-store"}, + "Replay-Nonce": {nonce}, + "Link": genAcmeLinkHeader(ctx), + }, + Data: map[string]interface{}{ + logical.HTTPStatusCode: httpStatus, + // Get around Vault limitation of requiring a body set if the status is not http.StatusNoContent + // for our HEAD request responses. + logical.HTTPContentType: "", + }, + }, nil +} + +func genAcmeLinkHeader(ctx *acmeContext) []string { + path := fmt.Sprintf("<%s>;rel=\"index\"", ctx.baseUrl.JoinPath("directory").String()) + return []string{path} +} diff --git a/builtin/logical/pki/path_acme_order.go b/builtin/logical/pki/path_acme_order.go new file mode 100644 index 000000000000..197b7d09161f --- /dev/null +++ b/builtin/logical/pki/path_acme_order.go @@ -0,0 +1,1085 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + "net" + "net/http" + "sort" + "strings" + "time" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" + "golang.org/x/net/idna" +) + +func pathAcmeListOrders(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeListOrders(b, baseUrl+"/orders", opts) +} + +func pathAcmeGetOrder(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeGetOrder(b, baseUrl+"/order/"+uuidNameRegex("order_id"), opts) +} + +func pathAcmeNewOrder(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeNewOrder(b, baseUrl+"/new-order", opts) +} + +func pathAcmeFinalizeOrder(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeFinalizeOrder(b, baseUrl+"/order/"+uuidNameRegex("order_id")+"/finalize", opts) +} + +func pathAcmeFetchOrderCert(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeFetchOrderCert(b, baseUrl+"/order/"+uuidNameRegex("order_id")+"/cert", opts) +} + +func patternAcmeNewOrder(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeAccountRequiredWrapper(opts, b.acmeNewOrderHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func patternAcmeListOrders(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeAccountRequiredWrapper(opts, b.acmeListOrdersHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func patternAcmeGetOrder(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + addFieldsForACMEOrder(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeAccountRequiredWrapper(opts, b.acmeGetOrderHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func patternAcmeFinalizeOrder(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + addFieldsForACMEOrder(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeAccountRequiredWrapper(opts, b.acmeFinalizeOrderHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func patternAcmeFetchOrderCert(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + addFieldsForACMEOrder(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeAccountRequiredWrapper(opts, b.acmeFetchCertOrderHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func addFieldsForACMEOrder(fields map[string]*framework.FieldSchema) { + fields["order_id"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The ACME order identifier to fetch`, + Required: true, + } +} + +func (b *backend) acmeFetchCertOrderHandler(ac *acmeContext, _ *logical.Request, fields *framework.FieldData, uc *jwsCtx, data map[string]interface{}, _ *acmeAccount) (*logical.Response, error) { + orderId := fields.Get("order_id").(string) + + order, err := b.GetAcmeState().LoadOrder(ac, uc, orderId) + if err != nil { + return nil, err + } + + if order.Status != ACMEOrderValid { + return nil, fmt.Errorf("%w: order is status %s, needs to be in valid state", ErrOrderNotReady, order.Status) + } + + if len(order.IssuerId) == 0 || len(order.CertificateSerialNumber) == 0 { + return nil, fmt.Errorf("order is missing required fields to load certificate") + } + + certEntry, err := fetchCertBySerial(ac.sc, issuing.PathCerts, order.CertificateSerialNumber) + if err != nil { + return nil, fmt.Errorf("failed reading certificate %s from storage: %w", order.CertificateSerialNumber, err) + } + if certEntry == nil || len(certEntry.Value) == 0 { + return nil, fmt.Errorf("missing certificate %s from storage", order.CertificateSerialNumber) + } + + cert, err := x509.ParseCertificate(certEntry.Value) + if err != nil { + return nil, fmt.Errorf("failed parsing certificate %s: %w", order.CertificateSerialNumber, err) + } + + issuer, err := ac.sc.fetchIssuerById(order.IssuerId) + if err != nil { + return nil, fmt.Errorf("failed loading certificate issuer %s from storage: %w", order.IssuerId, err) + } + + allPems, err := func() ([]byte, error) { + leafPEM := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: cert.Raw, + }) + + chains := []byte(issuer.Certificate) + for _, chainVal := range issuer.CAChain { + if chainVal == issuer.Certificate { + continue + } + chains = append(chains, []byte(chainVal)...) + } + + return append(leafPEM, chains...), nil + }() + if err != nil { + return nil, fmt.Errorf("failed encoding certificate ca chain: %w", err) + } + + return &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: "application/pem-certificate-chain", + logical.HTTPStatusCode: http.StatusOK, + logical.HTTPRawBody: allPems, + }, + }, nil +} + +func (b *backend) acmeFinalizeOrderHandler(ac *acmeContext, r *logical.Request, fields *framework.FieldData, uc *jwsCtx, data map[string]interface{}, account *acmeAccount) (*logical.Response, error) { + orderId := fields.Get("order_id").(string) + + csr, err := parseCsrFromFinalize(data) + if err != nil { + return nil, err + } + + order, err := b.GetAcmeState().LoadOrder(ac, uc, orderId) + if err != nil { + return nil, err + } + + order.Status, err = computeOrderStatus(ac, uc, order) + if err != nil { + return nil, err + } + + if order.Status != ACMEOrderReady { + return nil, fmt.Errorf("%w: order is status %s, needs to be in ready state", ErrOrderNotReady, order.Status) + } + + now := time.Now() + if !order.Expires.IsZero() && now.After(order.Expires) { + return nil, fmt.Errorf("%w: order %s is expired", ErrMalformed, orderId) + } + + if err = validateCsrMatchesOrder(csr, order); err != nil { + return nil, err + } + + if err = validateCsrNotUsingAccountKey(csr, uc); err != nil { + return nil, err + } + + var signedCertBundle *certutil.ParsedCertBundle + var issuerId issuing.IssuerID + if ac.runtimeOpts.isCiepsEnabled { + // Note that issueAcmeCertUsingCieps enforces storage requirements and + // does the certificate storage for us + signedCertBundle, issuerId, err = issueAcmeCertUsingCieps(b, ac, r, fields, uc, account, order, csr) + if err != nil { + return nil, err + } + } else { + signedCertBundle, issuerId, err = issueCertFromCsr(ac, csr) + if err != nil { + return nil, err + } + + err = issuing.StoreCertificate(ac.sc.Context, ac.sc.Storage, ac.sc.GetCertificateCounter(), signedCertBundle) + if err != nil { + return nil, err + } + } + hyphenSerialNumber := normalizeSerialFromBigInt(signedCertBundle.Certificate.SerialNumber) + + if err := b.GetAcmeState().TrackIssuedCert(ac, order.AccountId, hyphenSerialNumber, order.OrderId); err != nil { + b.Logger().Warn("orphaned generated ACME certificate due to error saving account->cert->order reference", "serial_number", hyphenSerialNumber, "error", err) + return nil, err + } + + order.Status = ACMEOrderValid + order.CertificateSerialNumber = hyphenSerialNumber + order.CertificateExpiry = signedCertBundle.Certificate.NotAfter + order.IssuerId = issuerId + + err = b.GetAcmeState().SaveOrder(ac, order) + if err != nil { + b.Logger().Warn("orphaned generated ACME certificate due to error saving order", "serial_number", hyphenSerialNumber, "error", err) + return nil, fmt.Errorf("failed saving updated order: %w", err) + } + + if err := b.doTrackBilling(ac.sc.Context, order.Identifiers); err != nil { + b.Logger().Error("failed to track billing for order", "order", orderId, "error", err) + err = nil + } + + return formatOrderResponse(ac, order), nil +} + +func computeOrderStatus(ac *acmeContext, uc *jwsCtx, order *acmeOrder) (ACMEOrderStatusType, error) { + // If we reached a final stage, no use computing anything else + if order.Status == ACMEOrderInvalid || order.Status == ACMEOrderValid { + return order.Status, nil + } + + // We aren't in a final state yet, check for expiry + if time.Now().After(order.Expires) { + return ACMEOrderInvalid, nil + } + + // Intermediary steps passed authorizations should short circuit us as well + if order.Status == ACMEOrderReady || order.Status == ACMEOrderProcessing { + return order.Status, nil + } + + // If we have no authorizations attached to the order, nothing to compute either + if len(order.AuthorizationIds) == 0 { + return ACMEOrderPending, nil + } + + anyFailed := false + allPassed := true + for _, authId := range order.AuthorizationIds { + authorization, err := ac.getAcmeState().LoadAuthorization(ac, uc, authId) + if err != nil { + return order.Status, fmt.Errorf("failed loading authorization: %s: %w", authId, err) + } + + if authorization.Status == ACMEAuthorizationPending { + allPassed = false + continue + } + + if authorization.Status != ACMEAuthorizationValid { + // Per RFC 8555 - 7.1.6. Status Changes + // The order also moves to the "invalid" state if it expires or + // one of its authorizations enters a final state other than + // "valid" ("expired", "revoked", or "deactivated"). + allPassed = false + anyFailed = true + break + } + } + + if anyFailed { + return ACMEOrderInvalid, nil + } + + if allPassed { + return ACMEOrderReady, nil + } + + // The order has not expired, no authorizations have yet to be marked as failed + // nor have we passed them all. + return ACMEOrderPending, nil +} + +func validateCsrNotUsingAccountKey(csr *x509.CertificateRequest, uc *jwsCtx) error { + csrKey := csr.PublicKey + userKey := uc.Key.Public().Key + + sameKey, err := certutil.ComparePublicKeysAndType(csrKey, userKey) + if err != nil { + return err + } + + if sameKey { + return fmt.Errorf("%w: certificate public key must not match account key", ErrBadCSR) + } + + return nil +} + +func validateCsrMatchesOrder(csr *x509.CertificateRequest, order *acmeOrder) error { + csrDNSIdentifiers, csrIPIdentifiers := getIdentifiersFromCSR(csr) + orderDNSIdentifiers := strutil.RemoveDuplicates(order.getIdentifierDNSValues(), true) + orderIPIdentifiers := removeDuplicatesAndSortIps(order.getIdentifierIPValues()) + + if len(orderDNSIdentifiers) == 0 && len(orderIPIdentifiers) == 0 { + return fmt.Errorf("%w: order did not include any identifiers", ErrServerInternal) + } + + if len(orderDNSIdentifiers) != len(csrDNSIdentifiers) { + return fmt.Errorf("%w: Order (%v) and CSR (%v) mismatch on number of DNS identifiers", ErrBadCSR, len(orderDNSIdentifiers), len(csrDNSIdentifiers)) + } + + if len(orderIPIdentifiers) != len(csrIPIdentifiers) { + return fmt.Errorf("%w: Order (%v) and CSR (%v) mismatch on number of IP identifiers", ErrBadCSR, len(orderIPIdentifiers), len(csrIPIdentifiers)) + } + + for i, identifier := range orderDNSIdentifiers { + if identifier != csrDNSIdentifiers[i] { + return fmt.Errorf("%w: CSR is missing order DNS identifier %s", ErrBadCSR, identifier) + } + } + + for i, identifier := range orderIPIdentifiers { + if !identifier.Equal(csrIPIdentifiers[i]) { + return fmt.Errorf("%w: CSR is missing order IP identifier %s", ErrBadCSR, identifier.String()) + } + } + + // Since we do not support NotBefore/NotAfter dates at this time no need to validate CSR/Order match. + + return nil +} + +func (b *backend) validateIdentifiersAgainstRole(role *issuing.RoleEntry, identifiers []*ACMEIdentifier) error { + for _, identifier := range identifiers { + switch identifier.Type { + case ACMEDNSIdentifier: + data := &inputBundle{ + role: role, + req: &logical.Request{}, + apiData: &framework.FieldData{}, + } + + if validateNames(b, data, []string{identifier.OriginalValue}) != "" { + return fmt.Errorf("%w: role (%s) will not issue certificate for name %v", + ErrRejectedIdentifier, role.Name, identifier.OriginalValue) + } + case ACMEIPIdentifier: + if !role.AllowIPSANs { + return fmt.Errorf("%w: role (%s) does not allow IP sans, so cannot issue certificate for %v", + ErrRejectedIdentifier, role.Name, identifier.OriginalValue) + } + default: + return fmt.Errorf("unknown type of identifier: %v for %v", identifier.Type, identifier.OriginalValue) + } + } + + return nil +} + +func getIdentifiersFromCSR(csr *x509.CertificateRequest) ([]string, []net.IP) { + dnsIdentifiers := append([]string(nil), csr.DNSNames...) + ipIdentifiers := append([]net.IP(nil), csr.IPAddresses...) + + if csr.Subject.CommonName != "" { + ip := net.ParseIP(csr.Subject.CommonName) + if ip != nil { + ipIdentifiers = append(ipIdentifiers, ip) + } else { + dnsIdentifiers = append(dnsIdentifiers, csr.Subject.CommonName) + } + } + + return strutil.RemoveDuplicates(dnsIdentifiers, true), removeDuplicatesAndSortIps(ipIdentifiers) +} + +func removeDuplicatesAndSortIps(ipIdentifiers []net.IP) []net.IP { + var uniqueIpIdentifiers []net.IP + for _, ip := range ipIdentifiers { + found := false + for _, curIp := range uniqueIpIdentifiers { + if curIp.Equal(ip) { + found = true + } + } + + if !found { + uniqueIpIdentifiers = append(uniqueIpIdentifiers, ip) + } + } + + sort.Slice(uniqueIpIdentifiers, func(i, j int) bool { + return uniqueIpIdentifiers[i].String() < uniqueIpIdentifiers[j].String() + }) + return uniqueIpIdentifiers +} + +func maybeAugmentReqDataWithSuitableCN(ac *acmeContext, csr *x509.CertificateRequest, data *framework.FieldData) { + // Role doesn't require a CN, so we don't care. + if !ac.Role.RequireCN { + return + } + + // CSR contains a CN, so use that one. + if csr.Subject.CommonName != "" { + return + } + + // Choose a CN in the order wildcard -> DNS -> IP -> fail. + for _, name := range csr.DNSNames { + if strings.Contains(name, "*") { + data.Raw["common_name"] = name + return + } + } + if len(csr.DNSNames) > 0 { + data.Raw["common_name"] = csr.DNSNames[0] + return + } + if len(csr.IPAddresses) > 0 { + data.Raw["common_name"] = csr.IPAddresses[0].String() + return + } +} + +func issueCertFromCsr(ac *acmeContext, csr *x509.CertificateRequest) (*certutil.ParsedCertBundle, issuing.IssuerID, error) { + pemBlock := &pem.Block{ + Type: "CERTIFICATE REQUEST", + Headers: nil, + Bytes: csr.Raw, + } + pemCsr := string(pem.EncodeToMemory(pemBlock)) + + data := &framework.FieldData{ + Raw: map[string]interface{}{ + "csr": pemCsr, + }, + Schema: getCsrSignVerbatimSchemaFields(), + } + + // XXX: Usability hack: by default, minimalist roles have require_cn=true, + // but some ACME clients do not provision one in the certificate as modern + // (TLS) clients are mostly verifying against server's DNS SANs. + maybeAugmentReqDataWithSuitableCN(ac, csr, data) + + signingBundle, issuerId, err := ac.sc.fetchCAInfoWithIssuer(ac.Issuer.ID.String(), issuing.IssuanceUsage) + if err != nil { + return nil, "", fmt.Errorf("failed loading CA %s: %w", ac.Issuer.ID.String(), err) + } + + // ACME issued cert will override the TTL values to truncate to the issuer's + // expiration if we go beyond, no matter the setting + if signingBundle.LeafNotAfterBehavior == certutil.ErrNotAfterBehavior { + signingBundle.LeafNotAfterBehavior = certutil.TruncateNotAfterBehavior + } + + input := &inputBundle{ + req: &logical.Request{}, + apiData: data, + role: ac.Role, + } + + normalNotAfter, _, err := getCertificateNotAfter(ac.sc.System(), input, signingBundle) + if err != nil { + return nil, "", fmt.Errorf("failed computing certificate TTL from role/mount: %v: %w", err, ErrMalformed) + } + + // We only allow ServerAuth key usage from ACME issued certs + // when configuration does not allow usage of ExtKeyusage field. + config, err := ac.acmeState.getConfigWithUpdate(ac.sc) + if err != nil { + return nil, "", fmt.Errorf("failed to fetch ACME configuration: %w", err) + } + + // Force our configured max acme TTL + if time.Now().Add(config.MaxTTL).Before(normalNotAfter) { + input.apiData.Raw["ttl"] = config.MaxTTL.Seconds() + } + + if csr.PublicKeyAlgorithm == x509.UnknownPublicKeyAlgorithm || csr.PublicKey == nil { + return nil, "", fmt.Errorf("%w: Refusing to sign CSR with empty PublicKey", ErrBadCSR) + } + + // UseCSRValues as defined in certutil/helpers.go accepts the following + // fields off of the CSR: + // + // 1. Subject fields, + // 2. SANs, + // 3. Extensions (except for a BasicConstraint extension) + // + // Because we have stricter validation of subject parameters, and no way + // to validate or allow extensions, we do not wish to use the CSR's + // parameters for these values. If a CSR sets, e.g., an organizational + // unit, we have no way of validating this (via ACME here, without perhaps + // an external policy engine), and thus should not be setting it on our + // final issued certificate. + parsedBundle, _, err := signCert(ac.sc.System(), input, signingBundle, false /* is_ca=false */, false /* use_csr_values */) + if err != nil { + return nil, "", fmt.Errorf("%w: refusing to sign CSR: %s", ErrBadCSR, err.Error()) + } + + if err = parsedBundle.Verify(); err != nil { + return nil, "", fmt.Errorf("verification of parsed bundle failed: %w", err) + } + + if !config.AllowRoleExtKeyUsage { + for _, usage := range parsedBundle.Certificate.ExtKeyUsage { + if usage != x509.ExtKeyUsageServerAuth { + return nil, "", fmt.Errorf("%w: ACME certs only allow ServerAuth key usage", ErrBadCSR) + } + } + } + + return parsedBundle, issuerId, err +} + +func parseCsrFromFinalize(data map[string]interface{}) (*x509.CertificateRequest, error) { + csrInterface, present := data["csr"] + if !present { + return nil, fmt.Errorf("%w: missing csr in payload", ErrMalformed) + } + + base64Csr, ok := csrInterface.(string) + if !ok { + return nil, fmt.Errorf("%w: csr in payload not the expected type: %T", ErrMalformed, csrInterface) + } + + derCsr, err := base64.RawURLEncoding.DecodeString(base64Csr) + if err != nil { + return nil, fmt.Errorf("%w: failed base64 decoding csr: %s", ErrMalformed, err.Error()) + } + + csr, err := x509.ParseCertificateRequest(derCsr) + if err != nil { + return nil, fmt.Errorf("%w: failed to parse csr: %s", ErrMalformed, err.Error()) + } + + if csr.PublicKey == nil || csr.PublicKeyAlgorithm == x509.UnknownPublicKeyAlgorithm { + return nil, fmt.Errorf("%w: failed to parse csr no public key info or unknown key algorithm used", ErrBadCSR) + } + + for _, ext := range csr.Extensions { + if ext.Id.Equal(certutil.ExtensionBasicConstraintsOID) { + isCa, _, err := certutil.ParseBasicConstraintExtension(ext) + if err != nil { + return nil, fmt.Errorf("%w: refusing to accept CSR with Basic Constraints extension: %v", ErrBadCSR, err.Error()) + } + + if isCa { + return nil, fmt.Errorf("%w: refusing to accept CSR with Basic Constraints extension with CA set to true", ErrBadCSR) + } + } + } + + return csr, nil +} + +func (b *backend) acmeGetOrderHandler(ac *acmeContext, _ *logical.Request, fields *framework.FieldData, uc *jwsCtx, _ map[string]interface{}, _ *acmeAccount) (*logical.Response, error) { + orderId := fields.Get("order_id").(string) + + order, err := b.GetAcmeState().LoadOrder(ac, uc, orderId) + if err != nil { + return nil, err + } + + order.Status, err = computeOrderStatus(ac, uc, order) + if err != nil { + return nil, err + } + + // Per RFC 8555 -> 7.1.3. Order Objects + // For final orders (in the "valid" or "invalid" state), the authorizations that were completed. + // + // Otherwise, for "pending" orders we will return our list as it was originally saved. + requiresFiltering := order.Status == ACMEOrderValid || order.Status == ACMEOrderInvalid + if requiresFiltering { + filteredAuthorizationIds := []string{} + + for _, authId := range order.AuthorizationIds { + authorization, err := b.GetAcmeState().LoadAuthorization(ac, uc, authId) + if err != nil { + return nil, err + } + + if (order.Status == ACMEOrderInvalid || order.Status == ACMEOrderValid) && + authorization.Status == ACMEAuthorizationValid { + filteredAuthorizationIds = append(filteredAuthorizationIds, authId) + } + } + + order.AuthorizationIds = filteredAuthorizationIds + } + + return formatOrderResponse(ac, order), nil +} + +func (b *backend) acmeListOrdersHandler(ac *acmeContext, _ *logical.Request, _ *framework.FieldData, uc *jwsCtx, _ map[string]interface{}, acct *acmeAccount) (*logical.Response, error) { + orderIds, err := b.GetAcmeState().ListOrderIds(ac.sc, acct.KeyId) + if err != nil { + return nil, err + } + + orderUrls := []string{} + for _, orderId := range orderIds { + order, err := b.GetAcmeState().LoadOrder(ac, uc, orderId) + if err != nil { + return nil, err + } + + if order.Status == ACMEOrderInvalid { + // Per RFC8555 -> 7.1.2.1 - Orders List + // The server SHOULD include pending orders and SHOULD NOT + // include orders that are invalid in the array of URLs. + continue + } + + orderUrls = append(orderUrls, buildOrderUrl(ac, orderId)) + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "orders": orderUrls, + }, + } + + return resp, nil +} + +func (b *backend) acmeNewOrderHandler(ac *acmeContext, _ *logical.Request, _ *framework.FieldData, _ *jwsCtx, data map[string]interface{}, account *acmeAccount) (*logical.Response, error) { + identifiers, err := parseOrderIdentifiers(data) + if err != nil { + return nil, err + } + + notBefore, err := parseOptRFC3339Field(data, "notBefore") + if err != nil { + return nil, err + } + + notAfter, err := parseOptRFC3339Field(data, "notAfter") + if err != nil { + return nil, err + } + + if !notBefore.IsZero() || !notAfter.IsZero() { + return nil, fmt.Errorf("%w: NotBefore and NotAfter are not supported", ErrMalformed) + } + + err = validateAcmeProvidedOrderDates(notBefore, notAfter) + if err != nil { + return nil, err + } + + err = b.validateIdentifiersAgainstRole(ac.Role, identifiers) + if err != nil { + return nil, err + } + + // Per RFC 8555 -> 7.1.3. Order Objects + // For pending orders, the authorizations that the client needs to complete before the + // requested certificate can be issued (see Section 7.5), including + // unexpired authorizations that the client has completed in the past + // for identifiers specified in the order. + // + // Since we are generating all authorizations here, there is no need to filter them out + // IF/WHEN we support pre-authz workflows and associate existing authorizations to this + // order they will need filtering. + var authorizations []*ACMEAuthorization + var authorizationIds []string + for _, identifier := range identifiers { + authz, err := generateAuthorization(account, identifier) + if err != nil { + return nil, fmt.Errorf("error generating authorizations: %w", err) + } + authorizations = append(authorizations, authz) + + err = b.GetAcmeState().SaveAuthorization(ac, authz) + if err != nil { + return nil, fmt.Errorf("failed storing authorization: %w", err) + } + + authorizationIds = append(authorizationIds, authz.Id) + } + + order := &acmeOrder{ + OrderId: genUuid(), + AccountId: account.KeyId, + Status: ACMEOrderPending, + Expires: time.Now().Add(24 * time.Hour), // TODO: Readjust this based on authz and/or config + Identifiers: identifiers, + AuthorizationIds: authorizationIds, + } + + err = b.GetAcmeState().SaveOrder(ac, order) + if err != nil { + return nil, fmt.Errorf("failed storing order: %w", err) + } + + resp := formatOrderResponse(ac, order) + + // Per RFC 8555 Section 7.4. Applying for Certificate Issuance: + // + // > If the server is willing to issue the requested certificate, it + // > responds with a 201 (Created) response. + resp.Data[logical.HTTPStatusCode] = http.StatusCreated + return resp, nil +} + +func validateAcmeProvidedOrderDates(notBefore time.Time, notAfter time.Time) error { + if !notBefore.IsZero() && !notAfter.IsZero() { + if notBefore.Equal(notAfter) { + return fmt.Errorf("%w: provided notBefore and notAfter dates can not be equal", ErrMalformed) + } + + if notBefore.After(notAfter) { + return fmt.Errorf("%w: provided notBefore can not be greater than notAfter", ErrMalformed) + } + } + + if !notAfter.IsZero() { + if time.Now().After(notAfter) { + return fmt.Errorf("%w: provided notAfter can not be in the past", ErrMalformed) + } + } + + return nil +} + +func formatOrderResponse(acmeCtx *acmeContext, order *acmeOrder) *logical.Response { + baseOrderUrl := buildOrderUrl(acmeCtx, order.OrderId) + + var authorizationUrls []string + for _, authId := range order.AuthorizationIds { + authorizationUrls = append(authorizationUrls, buildAuthorizationUrl(acmeCtx, authId)) + } + + var identifiers []map[string]interface{} + for _, identifier := range order.Identifiers { + identifiers = append(identifiers, identifier.NetworkMarshal( /* use original value */ true)) + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "status": order.Status, + "expires": order.Expires.Format(time.RFC3339), + "identifiers": identifiers, + "authorizations": authorizationUrls, + "finalize": baseOrderUrl + "/finalize", + }, + Headers: map[string][]string{ + "Location": {baseOrderUrl}, + }, + } + + // Only reply with the certificate URL if we are in a valid order state. + if order.Status == ACMEOrderValid { + resp.Data["certificate"] = baseOrderUrl + "/cert" + } + + return resp +} + +func buildAuthorizationUrl(acmeCtx *acmeContext, authId string) string { + return acmeCtx.baseUrl.JoinPath("authorization", authId).String() +} + +func buildOrderUrl(acmeCtx *acmeContext, orderId string) string { + return acmeCtx.baseUrl.JoinPath("order", orderId).String() +} + +func generateAuthorization(acct *acmeAccount, identifier *ACMEIdentifier) (*ACMEAuthorization, error) { + authId := genUuid() + + // Certain challenges have certain restrictions: DNS challenges cannot + // be used to validate IP addresses, and only DNS challenges can be used + // to validate wildcards. + allowedChallenges := []ACMEChallengeType{ACMEHTTPChallenge, ACMEDNSChallenge, ACMEALPNChallenge} + if identifier.Type == ACMEIPIdentifier { + allowedChallenges = []ACMEChallengeType{ACMEHTTPChallenge} + } else if identifier.IsWildcard { + allowedChallenges = []ACMEChallengeType{ACMEDNSChallenge} + } + + var challenges []*ACMEChallenge + for _, challengeType := range allowedChallenges { + token, err := getACMEToken() + if err != nil { + return nil, err + } + + challenge := &ACMEChallenge{ + Type: challengeType, + Status: ACMEChallengePending, + ChallengeFields: map[string]interface{}{ + "token": token, + }, + } + + challenges = append(challenges, challenge) + } + + return &ACMEAuthorization{ + Id: authId, + AccountId: acct.KeyId, + Identifier: identifier, + Status: ACMEAuthorizationPending, + Expires: "", // only populated when it switches to valid. + Challenges: challenges, + Wildcard: identifier.IsWildcard, + }, nil +} + +func parseOptRFC3339Field(data map[string]interface{}, keyName string) (time.Time, error) { + var timeVal time.Time + var err error + + rawBefore, present := data[keyName] + if present { + beforeStr, ok := rawBefore.(string) + if !ok { + return timeVal, fmt.Errorf("invalid type (%T) for field '%s': %w", rawBefore, keyName, ErrMalformed) + } + timeVal, err = time.Parse(time.RFC3339, beforeStr) + if err != nil { + return timeVal, fmt.Errorf("failed parsing field '%s' (%s): %s: %w", keyName, rawBefore, err.Error(), ErrMalformed) + } + + if timeVal.IsZero() { + return timeVal, fmt.Errorf("provided time value is invalid '%s' (%s): %w", keyName, rawBefore, ErrMalformed) + } + } + + return timeVal, nil +} + +func parseOrderIdentifiers(data map[string]interface{}) ([]*ACMEIdentifier, error) { + rawIdentifiers, present := data["identifiers"] + if !present { + return nil, fmt.Errorf("missing required identifiers argument: %w", ErrMalformed) + } + + listIdentifiers, ok := rawIdentifiers.([]interface{}) + if !ok { + return nil, fmt.Errorf("invalid type (%T) for field 'identifiers': %w", rawIdentifiers, ErrMalformed) + } + + var identifiers []*ACMEIdentifier + for _, rawIdentifier := range listIdentifiers { + mapIdentifier, ok := rawIdentifier.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("invalid type (%T) for value in 'identifiers': %w", rawIdentifier, ErrMalformed) + } + + typeVal, present := mapIdentifier["type"] + if !present { + return nil, fmt.Errorf("missing type argument for value in 'identifiers': %w", ErrMalformed) + } + typeStr, ok := typeVal.(string) + if !ok { + return nil, fmt.Errorf("invalid type for type argument (%T) for value in 'identifiers': %w", typeStr, ErrMalformed) + } + + valueVal, present := mapIdentifier["value"] + if !present { + return nil, fmt.Errorf("missing value argument for value in 'identifiers': %w", ErrMalformed) + } + valueStr, ok := valueVal.(string) + if !ok { + return nil, fmt.Errorf("invalid type for value argument (%T) for value in 'identifiers': %w", valueStr, ErrMalformed) + } + + if len(valueStr) == 0 { + return nil, fmt.Errorf("value argument for value in 'identifiers' can not be blank: %w", ErrMalformed) + } + + identifier := &ACMEIdentifier{ + Value: valueStr, + OriginalValue: valueStr, + } + + switch typeStr { + case string(ACMEIPIdentifier): + identifier.Type = ACMEIPIdentifier + ip := net.ParseIP(valueStr) + if ip == nil { + return nil, fmt.Errorf("value argument (%s) failed validation: failed parsing as IP: %w", valueStr, ErrMalformed) + } + case string(ACMEDNSIdentifier): + identifier.Type = ACMEDNSIdentifier + + // This check modifies the identifier if it is a wildcard, + // removing the non-wildcard portion. We do this before the + // IP address checks, in case of an attempt to bypass the IP/DNS + // check via including a leading wildcard (e.g., *.127.0.0.1). + // + // Per RFC 8555 Section 7.1.4. Authorization Objects: + // + // > Wildcard domain names (with "*" as the first label) MUST NOT + // > be included in authorization objects. + if _, _, err := identifier.MaybeParseWildcard(); err != nil { + return nil, fmt.Errorf("value argument (%s) failed validation: invalid wildcard: %v: %w", valueStr, err, ErrMalformed) + } + + if isIP := net.ParseIP(identifier.Value); isIP != nil { + return nil, fmt.Errorf("refusing to accept argument (%s) as DNS type identifier: parsed OK as IP address: %w", valueStr, ErrMalformed) + } + + // Use the reduced (identifier.Value) in case this was a wildcard + // domain. + p := idna.New(idna.ValidateForRegistration()) + converted, err := p.ToASCII(identifier.Value) + if err != nil { + return nil, fmt.Errorf("value argument (%s) failed validation: %s: %w", valueStr, err.Error(), ErrMalformed) + } + + // Per RFC 8555 Section 7.1.4. Authorization Objects: + // + // > The domain name MUST be encoded in the form in which it + // > would appear in a certificate. That is, it MUST be encoded + // > according to the rules in Section 7 of [RFC5280]. Servers + // > MUST verify any identifier values that begin with the + // > ASCII-Compatible Encoding prefix "xn--" as defined in + // > [RFC5890] are properly encoded. + if identifier.Value != converted { + return nil, fmt.Errorf("value argument (%s) failed IDNA round-tripping to ASCII: %w", valueStr, ErrMalformed) + } + default: + return nil, fmt.Errorf("unsupported identifier type %s: %w", typeStr, ErrUnsupportedIdentifier) + } + + identifiers = append(identifiers, identifier) + } + + return identifiers, nil +} + +func (b *backend) acmeTidyOrder(sc *storageContext, accountId string, orderPath string, certTidyBuffer time.Duration) (bool, time.Time, error) { + // First we get the order; note that the orderPath includes the account + // It's only accessed at acme/orders/ with the account context + // It's saved at acme//orders/ + entry, err := sc.Storage.Get(sc.Context, orderPath) + if err != nil { + return false, time.Time{}, fmt.Errorf("error loading order: %w", err) + } + if entry == nil { + return false, time.Time{}, fmt.Errorf("order does not exist: %w", ErrMalformed) + } + var order acmeOrder + err = entry.DecodeJSON(&order) + if err != nil { + return false, time.Time{}, fmt.Errorf("error decoding order: %w", err) + } + + // Determine whether we should tidy this order + shouldTidy := false + + // Track either the order expiry or certificate expiry to return to the caller, this + // can be used to influence the account's expiry + orderExpiry := order.CertificateExpiry + + // It is faster to check certificate information on the order entry rather than fetch the cert entry to parse: + if !order.CertificateExpiry.IsZero() { + // This implies that a certificate exists + // When a certificate exists, we want to expire and tidy the order when we tidy the certificate: + if time.Now().After(order.CertificateExpiry.Add(certTidyBuffer)) { // It's time to clean + shouldTidy = true + } + } else { + // This implies that no certificate exists + // In this case, we want to expire the order after it has expired (+ some safety buffer) + if time.Now().After(order.Expires) { + shouldTidy = true + } + orderExpiry = order.Expires + } + if shouldTidy == false { + return shouldTidy, orderExpiry, nil + } + + // Tidy this Order + // That includes any certificate acme//orders/orderPath/cert + // That also includes any related authorizations: acme//authorizations/ + + // First Authorizations + for _, authorizationId := range order.AuthorizationIds { + err = sc.Storage.Delete(sc.Context, getAuthorizationPath(accountId, authorizationId)) + if err != nil { + return false, orderExpiry, err + } + } + + // Normal Tidy will Take Care of the Certificate, we need to clean up the certificate to account tracker though + err = sc.Storage.Delete(sc.Context, getAcmeSerialToAccountTrackerPath(accountId, order.CertificateSerialNumber)) + if err != nil { + return false, orderExpiry, err + } + + // And Finally, the order: + err = sc.Storage.Delete(sc.Context, orderPath) + if err != nil { + return false, orderExpiry, err + } + b.tidyStatusIncDelAcmeOrderCount() + + return true, orderExpiry, nil +} diff --git a/builtin/logical/pki/path_acme_order_test.go b/builtin/logical/pki/path_acme_order_test.go new file mode 100644 index 000000000000..01472536ce33 --- /dev/null +++ b/builtin/logical/pki/path_acme_order_test.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "net" + "testing" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +// TestACME_ValidateIdentifiersAgainstRole Verify the ACME order creation +// function verifies somewhat the identifiers that were provided have a +// decent chance of being allowed by the selected role. +func TestACME_ValidateIdentifiersAgainstRole(t *testing.T) { + b, _ := CreateBackendWithStorage(t) + + tests := []struct { + name string + role *issuing.RoleEntry + identifiers []*ACMEIdentifier + expectErr bool + }{ + { + name: "verbatim-role-allows-dns-ip", + role: issuing.SignVerbatimRole(), + identifiers: _buildACMEIdentifiers("test.com", "127.0.0.1"), + expectErr: false, + }, + { + name: "default-role-does-not-allow-dns", + role: buildTestRole(t, nil), + identifiers: _buildACMEIdentifiers("www.test.com"), + expectErr: true, + }, + { + name: "default-role-allows-ip", + role: buildTestRole(t, nil), + identifiers: _buildACMEIdentifiers("192.168.0.1"), + expectErr: false, + }, + { + name: "disable-ip-sans-forbids-ip", + role: buildTestRole(t, map[string]interface{}{"allow_ip_sans": false}), + identifiers: _buildACMEIdentifiers("192.168.0.1"), + expectErr: true, + }, + { + name: "role-no-wildcards-allowed-without", + role: buildTestRole(t, map[string]interface{}{ + "allow_subdomains": true, + "allow_bare_domains": true, + "allowed_domains": []string{"test.com"}, + "allow_wildcard_certificates": false, + }), + identifiers: _buildACMEIdentifiers("www.test.com", "test.com"), + expectErr: false, + }, + { + name: "role-no-wildcards-allowed-with-wildcard", + role: buildTestRole(t, map[string]interface{}{ + "allow_subdomains": true, + "allowed_domains": []string{"test.com"}, + "allow_wildcard_certificates": false, + }), + identifiers: _buildACMEIdentifiers("*.test.com"), + expectErr: true, + }, + { + name: "role-wildcards-allowed-with-wildcard", + role: buildTestRole(t, map[string]interface{}{ + "allow_subdomains": true, + "allowed_domains": []string{"test.com"}, + "allow_wildcard_certificates": true, + }), + identifiers: _buildACMEIdentifiers("*.test.com"), + expectErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := b.validateIdentifiersAgainstRole(tt.role, tt.identifiers) + + if tt.expectErr { + require.Error(t, err, "validateIdentifiersAgainstRole(%v, %v)", tt.role.ToResponseData(), tt.identifiers) + // If we did return an error if should be classified as a ErrRejectedIdentifier + require.ErrorIs(t, err, ErrRejectedIdentifier) + } else { + require.NoError(t, err, "validateIdentifiersAgainstRole(%v, %v)", tt.role.ToResponseData(), tt.identifiers) + } + }) + } +} + +func _buildACMEIdentifiers(values ...string) []*ACMEIdentifier { + var identifiers []*ACMEIdentifier + + for _, value := range values { + identifiers = append(identifiers, _buildACMEIdentifier(value)) + } + + return identifiers +} + +func _buildACMEIdentifier(val string) *ACMEIdentifier { + ip := net.ParseIP(val) + if ip == nil { + identifier := &ACMEIdentifier{Type: "dns", Value: val, OriginalValue: val, IsWildcard: false} + _, _, _ = identifier.MaybeParseWildcard() + return identifier + } + + return &ACMEIdentifier{Type: "ip", Value: val, OriginalValue: val, IsWildcard: false} +} + +// Easily allow tests to create valid roles with proper defaults, since we don't have an easy +// way to generate roles with proper defaults, go through the createRole handler with the handlers +// field data so we pickup all the defaults specified there. +func buildTestRole(t *testing.T, config map[string]interface{}) *issuing.RoleEntry { + b, s := CreateBackendWithStorage(t) + + path := pathRoles(b) + fields := path.Fields + if config == nil { + config = map[string]interface{}{} + } + + if _, exists := config["name"]; !exists { + config["name"] = genUuid() + } + + _, err := b.pathRoleCreate(ctx, &logical.Request{Storage: s}, &framework.FieldData{Raw: config, Schema: fields}) + require.NoError(t, err, "failed generating role with config %v", config) + + role, err := b.GetRole(ctx, s, config["name"].(string)) + require.NoError(t, err, "failed loading stored role") + + return role +} diff --git a/builtin/logical/pki/path_acme_revoke.go b/builtin/logical/pki/path_acme_revoke.go new file mode 100644 index 000000000000..70d700d7d4b9 --- /dev/null +++ b/builtin/logical/pki/path_acme_revoke.go @@ -0,0 +1,184 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/base64" + "fmt" + "time" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/pki_backend" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathAcmeRevoke(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeRevoke(b, baseUrl+"/revoke-cert", opts) +} + +func patternAcmeRevoke(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeParsedWrapper(opts, b.acmeRevocationHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func (b *backend) acmeRevocationHandler(acmeCtx *acmeContext, _ *logical.Request, _ *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}) (*logical.Response, error) { + var cert *x509.Certificate + + rawCertificate, present := data["certificate"] + if present { + certBase64, ok := rawCertificate.(string) + if !ok { + return nil, fmt.Errorf("invalid type (%T; expected string) for field 'certificate': %w", rawCertificate, ErrMalformed) + } + + certBytes, err := base64.RawURLEncoding.DecodeString(certBase64) + if err != nil { + return nil, fmt.Errorf("failed to base64 decode certificate: %v: %w", err, ErrMalformed) + } + + cert, err = x509.ParseCertificate(certBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %v: %w", err, ErrMalformed) + } + } else { + return nil, fmt.Errorf("bad request was lacking required field 'certificate': %w", ErrMalformed) + } + + rawReason, present := data["reason"] + if present { + reason, ok := rawReason.(float64) + if !ok { + return nil, fmt.Errorf("invalid type (%T; expected float64) for field 'reason': %w", rawReason, ErrMalformed) + } + + if int(reason) != 0 { + return nil, fmt.Errorf("Vault does not support revocation reasons (got %v; expected omitted or 0/unspecified): %w", int(reason), ErrBadRevocationReason) + } + } + + // If the certificate expired, there's no point in revoking it. + if cert.NotAfter.Before(time.Now()) { + return nil, fmt.Errorf("refusing to revoke expired certificate: %w", ErrMalformed) + } + + // Fetch the CRL config as we need it to ultimately do the + // revocation. This should be cached and thus relatively fast. + config, err := b.CrlBuilder().GetConfigWithUpdate(acmeCtx.sc) + if err != nil { + return nil, fmt.Errorf("unable to revoke certificate: failed reading revocation config: %v: %w", err, ErrServerInternal) + } + + // Load our certificate from storage to ensure it exists and matches + // what was given to us. + serial := serialFromCert(cert) + certEntry, err := fetchCertBySerial(acmeCtx.sc, issuing.PathCerts, serial) + if err != nil { + return nil, fmt.Errorf("unable to revoke certificate: err reading global cert entry: %v: %w", err, ErrServerInternal) + } + if certEntry == nil { + return nil, fmt.Errorf("unable to revoke certificate: no global cert entry found: %w", ErrServerInternal) + } + + // Validate that the provided certificate matches the stored + // certificate. This completes the chain of: + // + // provided_auth -> provided_cert == stored cert. + // + // Allowing revocation to be safe. + // + // We use the non-subtle unsafe bytes equality check here as we have + // already fetched this certificate from storage, thus already leaking + // timing information that this cert exists. The user could thus simply + // fetch the cert from Vault matching this serial number via the unauthed + // pki/certs/:serial API endpoint. + if !bytes.Equal(certEntry.Value, cert.Raw) { + return nil, fmt.Errorf("unable to revoke certificate: supplied certificate does not match CA's stored value: %w", ErrMalformed) + } + + // Check if it was already revoked; in this case, we do not need to + // revoke it again and want to respond with an appropriate error message. + revEntry, err := fetchCertBySerial(acmeCtx.sc, "revoked/", serial) + if err != nil { + return nil, fmt.Errorf("unable to revoke certificate: err reading revocation entry: %v: %w", err, ErrServerInternal) + } + if revEntry != nil { + return nil, fmt.Errorf("unable to revoke certificate: %w", ErrAlreadyRevoked) + } + + // Finally, do the relevant permissions/authorization check as + // appropriate based on the type of revocation happening. + if !userCtx.Existing { + return b.acmeRevocationByPoP(acmeCtx, userCtx, cert, config) + } + + return b.acmeRevocationByAccount(acmeCtx, userCtx, cert, config) +} + +func (b *backend) acmeRevocationByPoP(acmeCtx *acmeContext, userCtx *jwsCtx, cert *x509.Certificate, config *pki_backend.CrlConfig) (*logical.Response, error) { + // Since this account does not exist, ensure we've gotten a private key + // matching the certificate's public key. This private key isn't + // explicitly provided, but instead provided by proxy (public key, + // signature over message). That signature is validated by an earlier + // wrapper (VerifyJWS called by ParseRequestParams). What still remains + // is validating that this implicit private key (with given public key + // and valid JWS signature) matches the certificate's public key. + givenPublic, ok := userCtx.Key.Key.(crypto.PublicKey) + if !ok { + return nil, fmt.Errorf("unable to revoke certificate: unable to parse message header's JWS key of type (%T): %w", userCtx.Key.Key, ErrMalformed) + } + + // Ensure that our PoP's implicit private key matches this certificate's + // public key. + if err := validatePublicKeyMatchesCert(givenPublic, cert); err != nil { + return nil, fmt.Errorf("unable to revoke certificate: unable to verify proof of possession of private key provided by proxy: %v: %w", err, ErrMalformed) + } + + // Now it is safe to revoke. + b.GetRevokeStorageLock().Lock() + defer b.GetRevokeStorageLock().Unlock() + + return revokeCert(acmeCtx.sc, config, cert) +} + +func (b *backend) acmeRevocationByAccount(acmeCtx *acmeContext, userCtx *jwsCtx, cert *x509.Certificate, config *pki_backend.CrlConfig) (*logical.Response, error) { + // Fetch the account; disallow revocations from non-valid-status accounts. + _, err := requireValidAcmeAccount(acmeCtx, userCtx) + if err != nil { + return nil, fmt.Errorf("failed to lookup account: %w", err) + } + + // We only support certificates issued by this user, we don't support + // cross-account revocations. + serial := serialFromCert(cert) + acmeEntry, err := b.GetAcmeState().GetIssuedCert(acmeCtx, userCtx.Kid, serial) + if err != nil || acmeEntry == nil { + return nil, fmt.Errorf("unable to revoke certificate: %v: %w", err, ErrMalformed) + } + + // Now it is safe to revoke. + b.GetRevokeStorageLock().Lock() + defer b.GetRevokeStorageLock().Unlock() + + return revokeCert(acmeCtx.sc, config, cert) +} diff --git a/builtin/logical/pki/path_acme_test.go b/builtin/logical/pki/path_acme_test.go new file mode 100644 index 000000000000..c1c751f69d8e --- /dev/null +++ b/builtin/logical/pki/path_acme_test.go @@ -0,0 +1,1921 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "os" + "path" + "strings" + "testing" + "time" + + "github.com/go-test/deep" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/pki/dnstest" + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/helper/testhelpers" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/stretchr/testify/require" + "golang.org/x/crypto/acme" + "golang.org/x/net/http2" +) + +// TestAcmeBasicWorkflow a test that will validate a basic ACME workflow using the Golang ACME client. +func TestAcmeBasicWorkflow(t *testing.T) { + t.Parallel() + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + cases := []struct { + name string + prefixUrl string + }{ + {"root", "acme/"}, + {"role", "roles/test-role/acme/"}, + {"issuer", "issuer/int-ca/acme/"}, + {"issuer_role", "issuer/int-ca/roles/test-role/acme/"}, + } + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + baseAcmeURL := "/v1/pki/" + tc.prefixUrl + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + t.Logf("Testing discover on %s", baseAcmeURL) + discovery, err := acmeClient.Discover(testCtx) + require.NoError(t, err, "failed acme discovery call") + + discoveryBaseUrl := client.Address() + baseAcmeURL + require.Equal(t, discoveryBaseUrl+"new-nonce", discovery.NonceURL) + require.Equal(t, discoveryBaseUrl+"new-account", discovery.RegURL) + require.Equal(t, discoveryBaseUrl+"new-order", discovery.OrderURL) + require.Equal(t, discoveryBaseUrl+"revoke-cert", discovery.RevokeURL) + require.Equal(t, discoveryBaseUrl+"key-change", discovery.KeyChangeURL) + require.False(t, discovery.ExternalAccountRequired, "bad value for external account required in directory") + + // Attempt to update prior to creating an account + t.Logf("Testing updates with no proper account fail on %s", baseAcmeURL) + _, err = acmeClient.UpdateReg(testCtx, &acme.Account{Contact: []string{"mailto:shouldfail@example.com"}}) + require.ErrorIs(t, err, acme.ErrNoAccount, "expected failure attempting to update prior to account registration") + + // Create new account + t.Logf("Testing register on %s", baseAcmeURL) + acct, err := acmeClient.Register(testCtx, &acme.Account{ + Contact: []string{"mailto:test@example.com", "mailto:test2@test.com"}, + }, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + require.Equal(t, acme.StatusValid, acct.Status) + require.Contains(t, acct.Contact, "mailto:test@example.com") + require.Contains(t, acct.Contact, "mailto:test2@test.com") + require.Len(t, acct.Contact, 2) + + // Call register again we should get existing account + t.Logf("Testing duplicate register returns existing account on %s", baseAcmeURL) + _, err = acmeClient.Register(testCtx, acct, func(tosURL string) bool { return true }) + require.ErrorIs(t, err, acme.ErrAccountAlreadyExists, + "We should have returned a 200 status code which would have triggered an error in the golang acme"+ + " library") + + // Update contact + t.Logf("Testing Update account contacts on %s", baseAcmeURL) + acct.Contact = []string{"mailto:test3@example.com"} + acct2, err := acmeClient.UpdateReg(testCtx, acct) + require.NoError(t, err, "failed updating account") + require.Equal(t, acme.StatusValid, acct2.Status) + // We should get this back, not the original values. + require.Contains(t, acct2.Contact, "mailto:test3@example.com") + require.Len(t, acct2.Contact, 1) + + // Make sure order's do not accept dates + _, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "localhost"}}, + acme.WithOrderNotBefore(time.Now().Add(10*time.Minute))) + require.Error(t, err, "should have rejected a new order with NotBefore set") + + _, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "localhost"}}, + acme.WithOrderNotAfter(time.Now().Add(10*time.Minute))) + require.Error(t, err, "should have rejected a new order with NotAfter set") + + // Make sure DNS identifiers cannot include IP addresses + _, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "127.0.0.1"}}, + acme.WithOrderNotAfter(time.Now().Add(10*time.Minute))) + require.Error(t, err, "should have rejected a new order with IP-like DNS-type identifier") + _, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "*.127.0.0.1"}}, + acme.WithOrderNotAfter(time.Now().Add(10*time.Minute))) + require.Error(t, err, "should have rejected a new order with IP-like DNS-type identifier") + + // Create an order + t.Logf("Testing Authorize Order on %s", baseAcmeURL) + identifiers := []string{"localhost.localdomain", "*.localdomain"} + createOrder, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + {Type: "dns", Value: identifiers[1]}, + }) + require.NoError(t, err, "failed creating order") + require.Equal(t, acme.StatusPending, createOrder.Status) + require.Empty(t, createOrder.CertURL) + require.Equal(t, createOrder.URI+"/finalize", createOrder.FinalizeURL) + require.Len(t, createOrder.AuthzURLs, 2, "expected two authzurls") + + // Get order + t.Logf("Testing GetOrder on %s", baseAcmeURL) + getOrder, err := acmeClient.GetOrder(testCtx, createOrder.URI) + require.NoError(t, err, "failed fetching order") + require.Equal(t, acme.StatusPending, createOrder.Status) + if diffs := deep.Equal(createOrder, getOrder); diffs != nil { + t.Fatalf("Differences exist between create and get order: \n%v", strings.Join(diffs, "\n")) + } + + // Make sure the identifiers returned in the order contain the original values + var ids []string + for _, id := range getOrder.Identifiers { + require.Equal(t, "dns", id.Type) + ids = append(ids, id.Value) + } + require.ElementsMatch(t, identifiers, ids, "order responses should have all original identifiers") + + // Load authorizations + var authorizations []*acme.Authorization + for _, authUrl := range getOrder.AuthzURLs { + auth, err := acmeClient.GetAuthorization(testCtx, authUrl) + require.NoError(t, err, "failed fetching authorization: %s", authUrl) + + authorizations = append(authorizations, auth) + } + + // We should have 2 separate auth challenges as we have two separate identifier + require.Len(t, authorizations, 2, "expected 2 authorizations in order") + + var wildcardAuth *acme.Authorization + var domainAuth *acme.Authorization + for _, auth := range authorizations { + if auth.Wildcard { + wildcardAuth = auth + } else { + domainAuth = auth + } + } + + // Test the values for the domain authentication + require.Equal(t, acme.StatusPending, domainAuth.Status) + require.Equal(t, "dns", domainAuth.Identifier.Type) + require.Equal(t, "localhost.localdomain", domainAuth.Identifier.Value) + require.False(t, domainAuth.Wildcard, "should not be a wildcard") + require.True(t, domainAuth.Expires.IsZero(), "authorization should only have expiry set on valid status") + + require.Len(t, domainAuth.Challenges, 3, "expected three challenges") + require.Equal(t, acme.StatusPending, domainAuth.Challenges[0].Status) + require.True(t, domainAuth.Challenges[0].Validated.IsZero(), "validated time should be 0 on challenge") + require.Equal(t, "http-01", domainAuth.Challenges[0].Type) + require.NotEmpty(t, domainAuth.Challenges[0].Token, "missing challenge token") + require.Equal(t, acme.StatusPending, domainAuth.Challenges[1].Status) + require.True(t, domainAuth.Challenges[1].Validated.IsZero(), "validated time should be 0 on challenge") + require.Equal(t, "dns-01", domainAuth.Challenges[1].Type) + require.NotEmpty(t, domainAuth.Challenges[1].Token, "missing challenge token") + require.Equal(t, acme.StatusPending, domainAuth.Challenges[2].Status) + require.True(t, domainAuth.Challenges[2].Validated.IsZero(), "validated time should be 0 on challenge") + require.Equal(t, "tls-alpn-01", domainAuth.Challenges[2].Type) + require.NotEmpty(t, domainAuth.Challenges[2].Token, "missing challenge token") + + // Test the values for the wildcard authentication + require.Equal(t, acme.StatusPending, wildcardAuth.Status) + require.Equal(t, "dns", wildcardAuth.Identifier.Type) + require.Equal(t, "localdomain", wildcardAuth.Identifier.Value) // Make sure we strip the *. in auth responses + require.True(t, wildcardAuth.Wildcard, "should be a wildcard") + require.True(t, wildcardAuth.Expires.IsZero(), "authorization should only have expiry set on valid status") + + require.Len(t, wildcardAuth.Challenges, 1, "expected one challenge") + require.Equal(t, acme.StatusPending, domainAuth.Challenges[0].Status) + require.True(t, wildcardAuth.Challenges[0].Validated.IsZero(), "validated time should be 0 on challenge") + require.Equal(t, "dns-01", wildcardAuth.Challenges[0].Type) + require.NotEmpty(t, domainAuth.Challenges[0].Token, "missing challenge token") + + // Make sure that getting a challenge does not start it. + challenge, err := acmeClient.GetChallenge(testCtx, domainAuth.Challenges[0].URI) + require.NoError(t, err, "failed to load challenge") + require.Equal(t, acme.StatusPending, challenge.Status) + require.True(t, challenge.Validated.IsZero(), "validated time should be 0 on challenge") + require.Equal(t, "http-01", challenge.Type) + + // Accept a challenge; this triggers validation to start. + challenge, err = acmeClient.Accept(testCtx, domainAuth.Challenges[0]) + require.NoError(t, err, "failed to load challenge") + require.Equal(t, acme.StatusProcessing, challenge.Status) + require.True(t, challenge.Validated.IsZero(), "validated time should be 0 on challenge") + require.Equal(t, "http-01", challenge.Type) + + require.NotEmpty(t, challenge.Token, "missing challenge token") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow + // test. + markAuthorizationSuccess(t, client, acmeClient, acct, getOrder) + + // Make sure sending a CSR with the account key gets rejected. + goodCr := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: identifiers[1]}, + DNSNames: []string{identifiers[0], identifiers[1]}, + } + t.Logf("csr: %v", goodCr) + + // We want to make sure people are not using the same keys for CSR/Certs and their ACME account. + csrSignedWithAccountKey, err := x509.CreateCertificateRequest(rand.Reader, goodCr, accountKey) + require.NoError(t, err, "failed generating csr") + _, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrSignedWithAccountKey, true) + require.Error(t, err, "should not be allowed to use the account key for a CSR") + + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + + // Validate we reject CSRs that contain CN that aren't in the original order + badCr := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "not-in-original-order.com"}, + DNSNames: []string{identifiers[0], identifiers[1]}, + } + t.Logf("csr: %v", badCr) + + csrWithBadCName, err := x509.CreateCertificateRequest(rand.Reader, badCr, csrKey) + require.NoError(t, err, "failed generating csr with bad common name") + + _, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadCName, true) + require.Error(t, err, "should not be allowed to csr with different common names than order") + + // Validate we reject CSRs that contain DNS names that aren't in the original order + badCr = &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: createOrder.Identifiers[0].Value}, + DNSNames: []string{"www.notinorder.com"}, + } + + csrWithBadName, err := x509.CreateCertificateRequest(rand.Reader, badCr, csrKey) + require.NoError(t, err, "failed generating csr with bad name") + + _, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadName, true) + require.Error(t, err, "should not be allowed to csr with different names than order") + + // Validate we reject CSRs that contain IP addresses that weren't in the original order + badCr = &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: createOrder.Identifiers[0].Value}, + IPAddresses: []net.IP{{127, 0, 0, 1}}, + } + + csrWithBadIP, err := x509.CreateCertificateRequest(rand.Reader, badCr, csrKey) + require.NoError(t, err, "failed generating csr with bad name") + + _, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadIP, true) + require.Error(t, err, "should not be allowed to csr with different ip address than order") + + // Validate we reject CSRs that contains fewer names than in the original order. + badCr = &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: identifiers[0]}, + } + + csrWithBadName, err = x509.CreateCertificateRequest(rand.Reader, badCr, csrKey) + require.NoError(t, err, "failed generating csr with bad name") + + _, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadName, true) + require.Error(t, err, "should not be allowed to csr with different names than order") + + // Finally test a proper CSR, with the correct name and signed with a different key works. + csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csr, true) + require.NoError(t, err, "failed finalizing order") + require.Len(t, certs, 3, "expected three items within the returned certs") + + testAcmeCertSignedByCa(t, client, certs, "int-ca") + + // Make sure the certificate has a NotAfter date of a maximum of 90 days + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert bytes") + maxAcmeNotAfter := time.Now().Add(defaultAcmeMaxTTL) + if maxAcmeNotAfter.Before(acmeCert.NotAfter) { + require.Fail(t, fmt.Sprintf("certificate has a NotAfter value %v greater than ACME max ttl %v", acmeCert.NotAfter, maxAcmeNotAfter)) + } + + // Can we revoke it using the account key revocation + err = acmeClient.RevokeCert(ctx, nil, certs[0], acme.CRLReasonUnspecified) + require.NoError(t, err, "failed to revoke certificate through account key") + + // Make sure it was actually revoked + certResp, err := client.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert)) + require.NoError(t, err, "failed to read certificate status") + require.NotNil(t, certResp, "certificate status response was nil") + revocationTime := certResp.Data["revocation_time"].(json.Number) + revocationTimeInt, err := revocationTime.Int64() + require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime) + require.Greater(t, revocationTimeInt, int64(0), + "revocation time was not greater than 0, revocation did not work value was: %v", revocationTimeInt) + + // Make sure we can revoke an authorization as a client + err = acmeClient.RevokeAuthorization(ctx, authorizations[0].URI) + require.NoError(t, err, "failed revoking authorization status") + + revokedAuth, err := acmeClient.GetAuthorization(ctx, authorizations[0].URI) + require.NoError(t, err, "failed fetching authorization") + require.Equal(t, acme.StatusDeactivated, revokedAuth.Status) + + // Deactivate account + t.Logf("Testing deactivate account on %s", baseAcmeURL) + err = acmeClient.DeactivateReg(testCtx) + require.NoError(t, err, "failed deactivating account") + + // Make sure we get an unauthorized error trying to update the account again. + t.Logf("Testing update on deactivated account fails on %s", baseAcmeURL) + _, err = acmeClient.UpdateReg(testCtx, acct) + require.Error(t, err, "expected account to be deactivated") + require.IsType(t, &acme.Error{}, err, "expected acme error type") + acmeErr := err.(*acme.Error) + require.Equal(t, "urn:ietf:params:acme:error:unauthorized", acmeErr.ProblemType) + }) + } +} + +// TestAcmeBasicWorkflowWithEab verify that new accounts require EAB's if enforced by configuration. +func TestAcmeBasicWorkflowWithEab(t *testing.T) { + t.Parallel() + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + // Enable EAB + _, err := client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ + "enabled": true, + "eab_policy": "always-required", + }) + require.NoError(t, err) + + cases := []struct { + name string + prefixUrl string + }{ + {"root", "acme/"}, + {"role", "roles/test-role/acme/"}, + {"issuer", "issuer/int-ca/acme/"}, + {"issuer_role", "issuer/int-ca/roles/test-role/acme/"}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + baseAcmeURL := "/v1/pki/" + tc.prefixUrl + accountKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed creating ec key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + t.Logf("Testing discover on %s", baseAcmeURL) + discovery, err := acmeClient.Discover(testCtx) + require.NoError(t, err, "failed acme discovery call") + require.True(t, discovery.ExternalAccountRequired, "bad value for external account required in directory") + + // Create new account without EAB, should fail + t.Logf("Testing register on %s", baseAcmeURL) + _, err = acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.ErrorContains(t, err, "urn:ietf:params:acme:error:externalAccountRequired", + "expected failure creating an account without eab") + + // Test fetch, list, delete workflow + kid, _ := getEABKey(t, client, tc.prefixUrl) + resp, err := client.Logical().ListWithContext(testCtx, "pki/eab") + require.NoError(t, err, "failed to list eab tokens") + require.NotNil(t, resp, "list response for eab tokens should not be nil") + require.Contains(t, resp.Data, "keys") + require.Contains(t, resp.Data, "key_info") + require.Len(t, resp.Data["keys"], 1) + require.Contains(t, resp.Data["keys"], kid) + + _, err = client.Logical().DeleteWithContext(testCtx, "pki/eab/"+kid) + require.NoError(t, err, "failed to delete eab") + + // List eabs should return zero results + resp, err = client.Logical().ListWithContext(testCtx, "pki/eab") + require.NoError(t, err, "failed to list eab tokens") + require.Nil(t, resp, "list response for eab tokens should have been nil") + + // fetch a new EAB + kid, eabKeyBytes := getEABKey(t, client, tc.prefixUrl) + acct := &acme.Account{ + ExternalAccountBinding: &acme.ExternalAccountBinding{ + KID: kid, + Key: eabKeyBytes, + }, + } + + // Make sure we can list our key + resp, err = client.Logical().ListWithContext(testCtx, "pki/eab") + require.NoError(t, err, "failed to list eab tokens") + require.NotNil(t, resp, "list response for eab tokens should not be nil") + require.Contains(t, resp.Data, "keys") + require.Contains(t, resp.Data, "key_info") + require.Len(t, resp.Data["keys"], 1) + require.Contains(t, resp.Data["keys"], kid) + + keyInfo := resp.Data["key_info"].(map[string]interface{}) + require.Contains(t, keyInfo, kid) + + infoForKid := keyInfo[kid].(map[string]interface{}) + require.Equal(t, "hs", infoForKid["key_type"]) + require.Equal(t, tc.prefixUrl+"directory", infoForKid["acme_directory"]) + + // Create new account with EAB + t.Logf("Testing register on %s", baseAcmeURL) + _, err = acmeClient.Register(testCtx, acct, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering new account with eab") + + // Make sure our EAB is no longer available + resp, err = client.Logical().ListWithContext(context.Background(), "pki/eab") + require.NoError(t, err, "failed to list eab tokens") + require.Nil(t, resp, "list response for eab tokens should have been nil due to empty list") + + // Attempt to create another account with the same EAB as before -- should fail + accountKey2, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed creating ec key") + + acmeClient2 := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey2) + acct2 := &acme.Account{ + ExternalAccountBinding: &acme.ExternalAccountBinding{ + KID: kid, + Key: eabKeyBytes, + }, + } + + _, err = acmeClient2.Register(testCtx, acct2, func(tosURL string) bool { return true }) + require.ErrorContains(t, err, "urn:ietf:params:acme:error:unauthorized", "should fail due to EAB re-use") + + // We can lookup/find an existing account without EAB if we have the account key + _, err = acmeClient.GetReg(testCtx /* unused url */, "") + require.NoError(t, err, "expected to lookup existing account without eab") + }) + } +} + +// TestAcmeNonce a basic test that will validate we get back a nonce with the proper status codes +// based on the +func TestAcmeNonce(t *testing.T) { + t.Parallel() + cluster, client, pathConfig := setupAcmeBackend(t) + defer cluster.Cleanup() + + cases := []struct { + name string + prefixUrl string + directoryUrl string + }{ + {"root", "", "pki/acme/new-nonce"}, + {"role", "/roles/test-role", "pki/roles/test-role/acme/new-nonce"}, + {"issuer", "/issuer/default", "pki/issuer/default/acme/new-nonce"}, + {"issuer_role", "/issuer/default/roles/test-role", "pki/issuer/default/roles/test-role/acme/new-nonce"}, + } + + for _, tc := range cases { + for _, httpOp := range []string{"get", "header"} { + t.Run(fmt.Sprintf("%s-%s", tc.name, httpOp), func(t *testing.T) { + var req *api.Request + switch httpOp { + case "get": + req = client.NewRequest(http.MethodGet, "/v1/"+tc.directoryUrl) + case "header": + req = client.NewRequest(http.MethodHead, "/v1/"+tc.directoryUrl) + } + res, err := client.RawRequestWithContext(ctx, req) + require.NoError(t, err, "failed sending raw request") + _ = res.Body.Close() + + // Proper Status Code + switch httpOp { + case "get": + require.Equal(t, http.StatusNoContent, res.StatusCode) + case "header": + require.Equal(t, http.StatusOK, res.StatusCode) + } + + // Make sure we don't have a Content-Type header. + require.Equal(t, "", res.Header.Get("Content-Type")) + + // Make sure we return the Cache-Control header + require.Contains(t, res.Header.Get("Cache-Control"), "no-store", + "missing Cache-Control header with no-store header value") + + // Test for our nonce header value + require.NotEmpty(t, res.Header.Get("Replay-Nonce"), "missing Replay-Nonce header with an actual value") + + // Test Link header value + expectedLinkHeader := fmt.Sprintf("<%s>;rel=\"index\"", pathConfig+tc.prefixUrl+"/acme/directory") + require.Contains(t, res.Header.Get("Link"), expectedLinkHeader, + "different value for link header than expected") + }) + } + } +} + +// TestAcmeClusterPathNotConfigured basic testing of the ACME error handler. +func TestAcmeClusterPathNotConfigured(t *testing.T) { + t.Parallel() + cluster, client := setupTestPkiCluster(t) + defer cluster.Cleanup() + + // Go sneaky, sneaky and update the acme configuration through sys/raw to bypass config/cluster path checks + pkiMount := findStorageMountUuid(t, client, "pki") + rawPath := path.Join("/sys/raw/logical/", pkiMount, storageAcmeConfig) + _, err := client.Logical().WriteWithContext(context.Background(), rawPath, map[string]interface{}{ + "value": "{\"enabled\": true, \"eab_policy_name\": \"not-required\"}", + }) + require.NoError(t, err, "failed updating acme config through sys/raw") + + // Force reload the plugin so we read the new config we slipped in. + _, err = client.Sys().ReloadPluginWithContext(context.Background(), &api.ReloadPluginInput{Mounts: []string{"pki"}}) + require.NoError(t, err, "failed reloading plugin") + + // Do not fill in the path option within the local cluster configuration + cases := []struct { + name string + directoryUrl string + }{ + {"root", "pki/acme/directory"}, + {"role", "pki/roles/test-role/acme/directory"}, + {"issuer", "pki/issuer/default/acme/directory"}, + {"issuer_role", "pki/issuer/default/roles/test-role/acme/directory"}, + } + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + dirResp, err := client.Logical().ReadRawWithContext(testCtx, tc.directoryUrl) + require.Error(t, err, "expected failure reading ACME directory configuration got none") + + require.Equal(t, "application/problem+json", dirResp.Header.Get("Content-Type")) + require.Equal(t, http.StatusInternalServerError, dirResp.StatusCode) + + rawBodyBytes, err := io.ReadAll(dirResp.Body) + require.NoError(t, err, "failed reading from directory response body") + _ = dirResp.Body.Close() + + respType := map[string]interface{}{} + err = json.Unmarshal(rawBodyBytes, &respType) + require.NoError(t, err, "failed unmarshalling ACME directory response body") + + require.Equal(t, "urn:ietf:params:acme:error:serverInternal", respType["type"]) + require.NotEmpty(t, respType["detail"]) + }) + } +} + +// TestAcmeAccountsCrossingDirectoryPath make sure that if an account attempts to use a different ACME +// directory path that we get an error. +func TestAcmeAccountsCrossingDirectoryPath(t *testing.T) { + t.Parallel() + cluster, _, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + baseAcmeURL := "/v1/pki/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account + acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Try to update the account under another ACME directory + baseAcmeURL2 := "/v1/pki/roles/test-role/acme/" + acmeClient2 := getAcmeClientForCluster(t, cluster, baseAcmeURL2, accountKey) + acct.Contact = []string{"mailto:test3@example.com"} + _, err = acmeClient2.UpdateReg(testCtx, acct) + require.Error(t, err, "successfully updated account when we should have failed due to different directory") + // We don't test for the specific error about using the wrong directory, as the golang library + // swallows the error we are sending back to a no account error +} + +// TestAcmeEabCrossingDirectoryPath make sure that if an account attempts to use a different ACME +// directory path that an EAB was created within we get an error. +func TestAcmeEabCrossingDirectoryPath(t *testing.T) { + t.Parallel() + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + // Enable EAB + _, err := client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ + "enabled": true, + "eab_policy": "always-required", + }) + require.NoError(t, err) + + baseAcmeURL := "/v1/pki/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // fetch a new EAB + kid, eabKeyBytes := getEABKey(t, client, "roles/test-role/acme/") + acct := &acme.Account{ + ExternalAccountBinding: &acme.ExternalAccountBinding{ + KID: kid, + Key: eabKeyBytes, + }, + } + + // Create new account + _, err = acmeClient.Register(testCtx, acct, func(tosURL string) bool { return true }) + require.ErrorContains(t, err, "failed to verify eab", "should have failed as EAB is for a different directory") +} + +// TestAcmeDisabledWithEnvVar verifies if VAULT_DISABLE_PUBLIC_ACME is set that we completely +// disable the ACME service +func TestAcmeDisabledWithEnvVar(t *testing.T) { + // Setup a cluster with the configuration set to not-required, initially as the + // configuration will validate if the environment var is set + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + // Seal setup the environment variable, and unseal which now means we have a cluster + // with ACME configuration saying it is enabled with a bad EAB policy. + cluster.EnsureCoresSealed(t) + t.Setenv("VAULT_DISABLE_PUBLIC_ACME", "true") + cluster.UnsealCores(t) + + // Make sure that ACME is disabled now. + for _, method := range []string{http.MethodHead, http.MethodGet} { + t.Run(fmt.Sprintf("%s", method), func(t *testing.T) { + req := client.NewRequest(method, "/v1/pki/acme/new-nonce") + _, err := client.RawRequestWithContext(ctx, req) + require.Error(t, err, "should have received an error as ACME should have been disabled") + + if apiError, ok := err.(*api.ResponseError); ok { + require.Equal(t, 404, apiError.StatusCode) + } + }) + } +} + +// TestAcmeConfigChecksPublicAcmeEnv verifies certain EAB policy values can not be set if ENV var is enabled +func TestAcmeConfigChecksPublicAcmeEnv(t *testing.T) { + t.Setenv("VAULT_DISABLE_PUBLIC_ACME", "true") + cluster, client := setupTestPkiCluster(t) + defer cluster.Cleanup() + + _, err := client.Logical().WriteWithContext(context.Background(), "pki/config/cluster", map[string]interface{}{ + "path": "https://dadgarcorp.com/v1/pki", + }) + require.NoError(t, err) + + _, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ + "enabled": true, + "eab_policy": string(eabPolicyAlwaysRequired), + }) + require.NoError(t, err) + + for _, policyName := range []EabPolicyName{eabPolicyNewAccountRequired, eabPolicyNotRequired} { + _, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ + "enabled": true, + "eab_policy": string(policyName), + }) + require.Error(t, err, "eab policy %s should have not been allowed to be set") + } + + // Make sure we can disable ACME and the eab policy is not checked + _, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ + "enabled": false, + "eab_policy": string(eabPolicyNotRequired), + }) + require.NoError(t, err) +} + +// TestAcmeTruncatesToIssuerExpiry make sure that if the selected issuer's expiry is shorter than the +// CSR's selected TTL value in ACME and the issuer's leaf_not_after_behavior setting is set to Err, +// we will override the configured behavior and truncate to the issuer's NotAfter +func TestAcmeTruncatesToIssuerExpiry(t *testing.T) { + t.Parallel() + + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + mount := "pki" + resp, err := client.Logical().WriteWithContext(context.Background(), mount+"/issuers/generate/intermediate/internal", + map[string]interface{}{ + "key_name": "short-key", + "key_type": "ec", + "common_name": "test.com", + }) + require.NoError(t, err, "failed creating intermediary CSR") + intermediateCSR := resp.Data["csr"].(string) + + // Sign the intermediate CSR using /pki + resp, err = client.Logical().Write(mount+"/issuer/root-ca/sign-intermediate", map[string]interface{}{ + "csr": intermediateCSR, + "ttl": "10m", + "max_ttl": "1h", + }) + require.NoError(t, err, "failed signing intermediary CSR") + intermediateCertPEM := resp.Data["certificate"].(string) + + shortCa := parseCert(t, intermediateCertPEM) + + // Configure the intermediate cert as the CA in /pki2 + resp, err = client.Logical().Write(mount+"/issuers/import/cert", map[string]interface{}{ + "pem_bundle": intermediateCertPEM, + }) + require.NoError(t, err, "failed importing intermediary cert") + importedIssuersRaw := resp.Data["imported_issuers"].([]interface{}) + require.Len(t, importedIssuersRaw, 1) + shortCaUuid := importedIssuersRaw[0].(string) + + _, err = client.Logical().Write(mount+"/issuer/"+shortCaUuid, map[string]interface{}{ + "leaf_not_after_behavior": "err", + "issuer_name": "short-ca", + }) + require.NoError(t, err, "failed updating issuer name") + + baseAcmeURL := "/v1/pki/issuer/short-ca/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account + t.Logf("Testing register on %s", baseAcmeURL) + acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Create an order + t.Logf("Testing Authorize Order on %s", baseAcmeURL) + identifiers := []string{"*.localdomain"} + order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + }) + require.NoError(t, err, "failed creating order") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow + // test. + markAuthorizationSuccess(t, client, acmeClient, acct, order) + + // Build a proper CSR, with the correct name and signed with a different key works. + goodCr := &x509.CertificateRequest{DNSNames: []string{identifiers[0]}} + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) + require.NoError(t, err, "failed finalizing order") + require.Len(t, certs, 3, "expected full acme chain") + + testAcmeCertSignedByCa(t, client, certs, "short-ca") + + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert") + + require.Equal(t, shortCa.NotAfter, acmeCert.NotAfter, "certificate times aren't the same") +} + +// TestAcmeRoleExtKeyUsage verify that ACME by default ignores the role's various ExtKeyUsage flags, +// but if the ACME configuration override of allow_role_ext_key_usage is set that we then honor +// the role's flag. +func TestAcmeRoleExtKeyUsage(t *testing.T) { + t.Parallel() + + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + roleName := "test-role" + + roleOpt := map[string]interface{}{ + "ttl": "365h", + "max_ttl": "720h", + "key_type": "any", + "allowed_domains": "localdomain", + "allow_subdomains": "true", + "allow_wildcard_certificates": "true", + "require_cn": "true", /* explicit default */ + "server_flag": "true", + "client_flag": "true", + "code_signing_flag": "true", + "email_protection_flag": "true", + } + + _, err := client.Logical().Write("pki/roles/"+roleName, roleOpt) + + baseAcmeURL := "/v1/pki/roles/" + roleName + "/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + require.NoError(t, err, "failed creating role test-role") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account + t.Logf("Testing register on %s", baseAcmeURL) + acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Create an order + t.Logf("Testing Authorize Order on %s", baseAcmeURL) + identifiers := []string{"*.localdomain"} + order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + }) + require.NoError(t, err, "failed creating order") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow test. + markAuthorizationSuccess(t, client, acmeClient, acct, order) + + // Build a proper CSR, with the correct name and signed with a different key works. + goodCr := &x509.CertificateRequest{DNSNames: []string{identifiers[0]}} + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) + require.NoError(t, err, "order finalization failed") + require.GreaterOrEqual(t, len(certs), 1, "expected at least one cert in bundle") + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert") + + require.Equal(t, 1, len(acmeCert.ExtKeyUsage), "mis-match on expected ExtKeyUsages") + require.ElementsMatch(t, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, acmeCert.ExtKeyUsage, + "mismatch of ExtKeyUsage flags") + + // Now turn the ACME configuration allow_role_ext_key_usage and retest to make sure we get a certificate + // with them all + _, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ + "enabled": true, + "eab_policy": "not-required", + "allow_role_ext_key_usage": true, + }) + require.NoError(t, err, "failed updating ACME configuration") + + t.Logf("Testing Authorize Order on %s", baseAcmeURL) + order, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + }) + require.NoError(t, err, "failed creating order") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow test. + markAuthorizationSuccess(t, client, acmeClient, acct, order) + + certs, _, err = acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) + require.NoError(t, err, "order finalization failed") + require.GreaterOrEqual(t, len(certs), 1, "expected at least one cert in bundle") + acmeCert, err = x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert") + + require.Equal(t, 4, len(acmeCert.ExtKeyUsage), "mis-match on expected ExtKeyUsages") + require.ElementsMatch(t, []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageCodeSigning, x509.ExtKeyUsageEmailProtection, + }, + acmeCert.ExtKeyUsage, "mismatch of ExtKeyUsage flags") +} + +func TestIssuerRoleDirectoryAssociations(t *testing.T) { + t.Parallel() + + // This creates two issuers for us (root-ca, int-ca) and two + // roles (test-role, acme) that we can use with various directory + // configurations. + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + // Setup DNS for validations. + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + dns := dnstest.SetupResolver(t, "dadgarcorp.com") + defer dns.Cleanup() + _, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ + "dns_resolver": dns.GetLocalAddr(), + }) + require.NoError(t, err, "failed to specify dns resolver") + + // 1. Use a forbidden role should fail. + resp, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ + "enabled": true, + "allowed_roles": []string{"acme"}, + }) + require.NoError(t, err, "failed to write config") + require.NotNil(t, resp) + + _, err = client.Logical().ReadWithContext(testCtx, "pki/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role under default issuer") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role under int-ca issuer") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role under root-ca issuer") + + _, err = client.Logical().ReadWithContext(testCtx, "pki/roles/acme/acme/directory") + require.NoError(t, err, "failed to allow usage of acme") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/acme/acme/directory") + require.NoError(t, err, "failed to allow usage of acme under default issuer") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/acme/acme/directory") + require.NoError(t, err, "failed to allow usage of acme under int-ca issuer") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/acme/acme/directory") + require.NoError(t, err, "failed to allow usage of acme under root-ca issuer") + + // 2. Use a forbidden issuer should fail. + resp, err = client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ + "allowed_roles": []string{"acme"}, + "allowed_issuers": []string{"int-ca"}, + }) + require.NoError(t, err, "failed to write config") + require.NotNil(t, resp) + + _, err = client.Logical().ReadWithContext(testCtx, "pki/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role under default issuer") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role under int-ca issuer") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role under root-ca issuer") + + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/acme/acme/directory") + require.Error(t, err, "failed to forbid usage of acme under root-ca issuer") + + _, err = client.Logical().ReadWithContext(testCtx, "pki/roles/acme/acme/directory") + require.NoError(t, err, "failed to allow usage of acme") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/acme/acme/directory") + require.NoError(t, err, "failed to allow usage of acme under default issuer") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/acme/acme/directory") + require.NoError(t, err, "failed to allow usage of acme under int-ca issuer") + + // 3. Setting the default directory to be a sign-verbatim policy and + // using two different CAs should result in certs signed by each CA. + resp, err = client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ + "allowed_roles": []string{"*"}, + "allowed_issuers": []string{"*"}, + "default_directory_policy": "sign-verbatim", + }) + require.NoError(t, err, "failed to write config") + require.NotNil(t, resp) + + // default == int-ca + acmeClientDefault := getAcmeClientForCluster(t, cluster, "/v1/pki/issuer/default/acme/", nil) + defaultLeafCert := doACMEForDomainWithDNS(t, dns, acmeClientDefault, []string{"default-ca.dadgarcorp.com"}) + requireSignedByAtPath(t, client, defaultLeafCert, "pki/issuer/int-ca") + + acmeClientIntCA := getAcmeClientForCluster(t, cluster, "/v1/pki/issuer/int-ca/acme/", nil) + intCALeafCert := doACMEForDomainWithDNS(t, dns, acmeClientIntCA, []string{"int-ca.dadgarcorp.com"}) + requireSignedByAtPath(t, client, intCALeafCert, "pki/issuer/int-ca") + + acmeClientRootCA := getAcmeClientForCluster(t, cluster, "/v1/pki/issuer/root-ca/acme/", nil) + rootCALeafCert := doACMEForDomainWithDNS(t, dns, acmeClientRootCA, []string{"root-ca.dadgarcorp.com"}) + requireSignedByAtPath(t, client, rootCALeafCert, "pki/issuer/root-ca") + + // 4. Using a role-based default directory should allow us to control leaf + // issuance on the base and issuer-specific directories. + resp, err = client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ + "allowed_roles": []string{"*"}, + "allowed_issuers": []string{"*"}, + "default_directory_policy": "role:acme", + }) + require.NoError(t, err, "failed to write config") + require.NotNil(t, resp) + + resp, err = client.Logical().JSONMergePatch(testCtx, "pki/roles/acme", map[string]interface{}{ + "ou": "IT Security", + "organization": []string{"Dadgar Corporation, Limited"}, + "allow_any_name": true, + }) + require.NoError(t, err, "failed to write role differentiator") + require.NotNil(t, resp) + + for _, issuer := range []string{"", "default", "int-ca", "root-ca"} { + // Path should override role. + directory := "/v1/pki/issuer/" + issuer + "/acme/" + issuerPath := "/pki/issuer/" + issuer + if issuer == "" { + directory = "/v1/pki/acme/" + issuerPath = "/pki/issuer/int-ca" + } else if issuer == "default" { + issuerPath = "/pki/issuer/int-ca" + } + + t.Logf("using directory: %v / issuer: %v", directory, issuerPath) + + acmeClient := getAcmeClientForCluster(t, cluster, directory, nil) + leafCert := doACMEForDomainWithDNS(t, dns, acmeClient, []string{"role-restricted.dadgarcorp.com"}) + require.Contains(t, leafCert.Subject.Organization, "Dadgar Corporation, Limited", "on directory: %v", directory) + require.Contains(t, leafCert.Subject.OrganizationalUnit, "IT Security", "on directory: %v", directory) + requireSignedByAtPath(t, client, leafCert, issuerPath) + } +} + +func TestACMESubjectFieldsAndExtensionsIgnored(t *testing.T) { + t.Parallel() + + // This creates two issuers for us (root-ca, int-ca) and two + // roles (test-role, acme) that we can use with various directory + // configurations. + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + // Setup DNS for validations. + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + dns := dnstest.SetupResolver(t, "dadgarcorp.com") + defer dns.Cleanup() + _, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ + "dns_resolver": dns.GetLocalAddr(), + }) + require.NoError(t, err, "failed to specify dns resolver") + + // Use the default sign-verbatim policy and ensure OU does not get set. + directory := "/v1/pki/acme/" + domains := []string{"no-ou.dadgarcorp.com"} + acmeClient := getAcmeClientForCluster(t, cluster, directory, nil) + cr := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: domains[0], OrganizationalUnit: []string{"DadgarCorp IT"}}, + DNSNames: domains, + } + cert := doACMEForCSRWithDNS(t, dns, acmeClient, domains, cr) + t.Logf("Got certificate: %v", cert) + require.Empty(t, cert.Subject.OrganizationalUnit) + + // Use the default sign-verbatim policy and ensure extension does not get set. + domains = []string{"no-ext.dadgarcorp.com"} + extension, err := certutil.CreateDeltaCRLIndicatorExt(12345) + require.NoError(t, err) + cr = &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: domains[0]}, + DNSNames: domains, + ExtraExtensions: []pkix.Extension{extension}, + } + cert = doACMEForCSRWithDNS(t, dns, acmeClient, domains, cr) + t.Logf("Got certificate: %v", cert) + for _, ext := range cert.Extensions { + require.False(t, ext.Id.Equal(certutil.DeltaCRLIndicatorOID)) + } + require.NotEmpty(t, cert.Extensions) +} + +// TestAcmeWithCsrIncludingBasicConstraintExtension verify that we error out for a CSR that is requesting a +// certificate with the IsCA set to true, false is okay, within the basic constraints extension and that no matter what +// the extension is not present on the returned certificate. +func TestAcmeWithCsrIncludingBasicConstraintExtension(t *testing.T) { + t.Parallel() + + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + baseAcmeURL := "/v1/pki/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account + t.Logf("Testing register on %s", baseAcmeURL) + acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Create an order + t.Logf("Testing Authorize Order on %s", baseAcmeURL) + identifiers := []string{"*.localdomain"} + order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + }) + require.NoError(t, err, "failed creating order") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow test. + markAuthorizationSuccess(t, client, acmeClient, acct, order) + + // Build a CSR with IsCA set to true, making sure we reject it + extension, err := certutil.CreateBasicConstraintExtension(true, -1) + require.NoError(t, err, "failed generating basic constraint extension") + + isCATrueCSR := &x509.CertificateRequest{ + DNSNames: []string{identifiers[0]}, + ExtraExtensions: []pkix.Extension{extension}, + } + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + csr, err := x509.CreateCertificateRequest(rand.Reader, isCATrueCSR, csrKey) + require.NoError(t, err, "failed generating csr") + + _, _, err = acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) + require.Error(t, err, "order finalization should have failed with IsCA set to true") + + extension, err = certutil.CreateBasicConstraintExtension(false, -1) + require.NoError(t, err, "failed generating basic constraint extension") + isCAFalseCSR := &x509.CertificateRequest{ + DNSNames: []string{identifiers[0]}, + Extensions: []pkix.Extension{extension}, + } + + csr, err = x509.CreateCertificateRequest(rand.Reader, isCAFalseCSR, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) + require.NoError(t, err, "order finalization should have failed with IsCA set to false") + + require.GreaterOrEqual(t, len(certs), 1, "expected at least one cert in bundle") + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert") + + // Make sure we don't have any basic constraint extension within the returned cert + for _, ext := range acmeCert.Extensions { + if ext.Id.Equal(certutil.ExtensionBasicConstraintsOID) { + // We shouldn't have this extension in our cert + t.Fatalf("acme csr contained a basic constraints extension") + } + } +} + +func markAuthorizationSuccess(t *testing.T, client *api.Client, acmeClient *acme.Client, acct *acme.Account, order *acme.Order) { + testCtx := context.Background() + + pkiMount := findStorageMountUuid(t, client, "pki") + + // Delete any and all challenge validation entries to stop the engine from overwriting our hack here + i := 0 + for { + deleteCvEntries(t, client, pkiMount) + + accountId := acct.URI[strings.LastIndex(acct.URI, "/"):] + for _, authURI := range order.AuthzURLs { + authId := authURI[strings.LastIndex(authURI, "/"):] + + // sys/raw does not work with namespaces + baseClient := client.WithNamespace("") + + values, err := baseClient.Logical().ListWithContext(testCtx, "sys/raw/logical/") + require.NoError(t, err) + require.True(t, true, "values: %v", values) + + rawPath := path.Join("sys/raw/logical/", pkiMount, getAuthorizationPath(accountId, authId)) + resp, err := baseClient.Logical().ReadWithContext(testCtx, rawPath) + require.NoError(t, err, "failed looking up authorization storage") + require.NotNil(t, resp, "sys raw response was nil") + require.NotEmpty(t, resp.Data["value"], "no value field in sys raw response") + + var authz ACMEAuthorization + err = jsonutil.DecodeJSON([]byte(resp.Data["value"].(string)), &authz) + require.NoError(t, err, "error decoding authorization: %w", err) + authz.Status = ACMEAuthorizationValid + for _, challenge := range authz.Challenges { + challenge.Status = ACMEChallengeValid + } + + encodeJSON, err := jsonutil.EncodeJSON(authz) + require.NoError(t, err, "failed encoding authz json") + _, err = baseClient.Logical().WriteWithContext(testCtx, rawPath, map[string]interface{}{ + "value": base64.StdEncoding.EncodeToString(encodeJSON), + "encoding": "base64", + }) + require.NoError(t, err, "failed writing authorization storage") + } + + // Give some time + time.Sleep(200 * time.Millisecond) + + // Check to see if we have fixed up the status and no new entries have appeared. + if !deleteCvEntries(t, client, pkiMount) { + // No entries found + // Look to see if we raced against the engine + orderLookup, err := acmeClient.GetOrder(testCtx, order.URI) + require.NoError(t, err, "failed loading order status after manually ") + + if orderLookup.Status == string(ACMEOrderReady) { + // Our order seems to be in the proper status, should be safe-ish to go ahead now + break + } else { + t.Logf("order status was not ready, retrying") + } + } else { + t.Logf("new challenge entries appeared after deletion, retrying") + } + + if i > 5 { + t.Fatalf("We are constantly deleting cv entries or order status is not changing, something is wrong") + } + + i++ + } +} + +func deleteCvEntries(t *testing.T, client *api.Client, pkiMount string) bool { + testCtx := context.Background() + + baseClient := client.WithNamespace("") + + cvPath := path.Join("sys/raw/logical/", pkiMount, acmeValidationPrefix) + resp, err := baseClient.Logical().ListWithContext(testCtx, cvPath) + require.NoError(t, err, "failed listing cv path items") + + deletedEntries := false + if resp != nil { + cvEntries := resp.Data["keys"].([]interface{}) + for _, cvEntry := range cvEntries { + cvEntryPath := path.Join(cvPath, cvEntry.(string)) + _, err = baseClient.Logical().DeleteWithContext(testCtx, cvEntryPath) + require.NoError(t, err, "failed to delete cv entry") + deletedEntries = true + } + } + + return deletedEntries +} + +func setupAcmeBackend(t *testing.T) (*vault.TestCluster, *api.Client, string) { + cluster, client := setupTestPkiCluster(t) + + return setupAcmeBackendOnClusterAtPath(t, cluster, client, "pki") +} + +func setupAcmeBackendOnClusterAtPath(t *testing.T, cluster *vault.TestCluster, client *api.Client, mount string) (*vault.TestCluster, *api.Client, string) { + mount = strings.Trim(mount, "/") + + // Setting templated AIAs should succeed. + pathConfig := client.Address() + "/v1/" + mount + + namespace := "" + mountName := mount + if mount != "pki" { + if strings.Contains(mount, "/") && constants.IsEnterprise { + ns_pieces := strings.Split(mount, "/") + c := len(ns_pieces) + // mount is c-1 + ns_name := ns_pieces[c-2] + if len(ns_pieces) > 2 { + // Parent's namespaces + parent := strings.Join(ns_pieces[0:c-2], "/") + _, err := client.WithNamespace(parent).Logical().Write("/sys/namespaces/"+ns_name, nil) + require.NoError(t, err, "failed to create nested namespaces "+parent+" -> "+ns_name) + } else { + _, err := client.Logical().Write("/sys/namespaces/"+ns_name, nil) + require.NoError(t, err, "failed to create nested namespace "+ns_name) + } + namespace = strings.Join(ns_pieces[0:c-1], "/") + mountName = ns_pieces[c-1] + } + + err := client.WithNamespace(namespace).Sys().Mount(mountName, &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "3000h", + MaxLeaseTTL: "600000h", + }, + }) + require.NoError(t, err, "failed to mount new PKI instance at "+mount) + } + + err := client.Sys().TuneMountWithContext(ctx, mount, api.MountConfigInput{ + DefaultLeaseTTL: "3000h", + MaxLeaseTTL: "600000h", + }) + require.NoError(t, err, "failed updating mount lease times "+mount) + + _, err = client.Logical().WriteWithContext(context.Background(), mount+"/config/cluster", map[string]interface{}{ + "path": pathConfig, + "aia_path": "http://localhost:8200/cdn/" + mount, + }) + require.NoError(t, err) + + _, err = client.Logical().WriteWithContext(context.Background(), mount+"/config/acme", map[string]interface{}{ + "enabled": true, + "eab_policy": "not-required", + }) + require.NoError(t, err) + + // Allow certain headers to pass through for ACME support + _, err = client.WithNamespace(namespace).Logical().WriteWithContext(context.Background(), "sys/mounts/"+mountName+"/tune", map[string]interface{}{ + "allowed_response_headers": []string{"Last-Modified", "Replay-Nonce", "Link", "Location"}, + "max_lease_ttl": "920000h", + }) + require.NoError(t, err, "failed tuning mount response headers") + + resp, err := client.Logical().WriteWithContext(context.Background(), mount+"/issuers/generate/root/internal", + map[string]interface{}{ + "issuer_name": "root-ca", + "key_name": "root-key", + "key_type": "ec", + "common_name": "Test Root R1 " + mount, + "ttl": "7200h", + "max_ttl": "920000h", + }) + require.NoError(t, err, "failed creating root CA") + + resp, err = client.Logical().WriteWithContext(context.Background(), mount+"/issuers/generate/intermediate/internal", + map[string]interface{}{ + "key_name": "int-key", + "key_type": "ec", + "common_name": "Test Int X1 " + mount, + }) + require.NoError(t, err, "failed creating intermediary CSR") + intermediateCSR := resp.Data["csr"].(string) + + // Sign the intermediate CSR using /pki + resp, err = client.Logical().Write(mount+"/issuer/root-ca/sign-intermediate", map[string]interface{}{ + "csr": intermediateCSR, + "ttl": "7100h", + "max_ttl": "910000h", + }) + require.NoError(t, err, "failed signing intermediary CSR") + intermediateCertPEM := resp.Data["certificate"].(string) + + // Configure the intermediate cert as the CA in /pki2 + resp, err = client.Logical().Write(mount+"/issuers/import/cert", map[string]interface{}{ + "pem_bundle": intermediateCertPEM, + }) + require.NoError(t, err, "failed importing intermediary cert") + importedIssuersRaw := resp.Data["imported_issuers"].([]interface{}) + require.Len(t, importedIssuersRaw, 1) + intCaUuid := importedIssuersRaw[0].(string) + + _, err = client.Logical().Write(mount+"/issuer/"+intCaUuid, map[string]interface{}{ + "issuer_name": "int-ca", + }) + require.NoError(t, err, "failed updating issuer name") + + _, err = client.Logical().Write(mount+"/config/issuers", map[string]interface{}{ + "default": "int-ca", + }) + require.NoError(t, err, "failed updating default issuer") + + _, err = client.Logical().Write(mount+"/roles/test-role", map[string]interface{}{ + "ttl": "168h", + "max_ttl": "168h", + "key_type": "any", + "allowed_domains": "localdomain", + "allow_subdomains": "true", + "allow_wildcard_certificates": "true", + }) + require.NoError(t, err, "failed creating role test-role") + + _, err = client.Logical().Write(mount+"/roles/acme", map[string]interface{}{ + "ttl": "3650h", + "max_ttl": "7200h", + "key_type": "any", + }) + require.NoError(t, err, "failed creating role acme") + + return cluster, client, pathConfig +} + +func testAcmeCertSignedByCa(t *testing.T, client *api.Client, derCerts [][]byte, issuerRef string) *x509.Certificate { + t.Helper() + require.NotEmpty(t, derCerts) + acmeCert, err := x509.ParseCertificate(derCerts[0]) + require.NoError(t, err, "failed parsing acme cert bytes") + + resp, err := client.Logical().ReadWithContext(context.Background(), "pki/issuer/"+issuerRef) + require.NoError(t, err, "failed reading issuer with name %s", issuerRef) + issuerCert := parseCert(t, resp.Data["certificate"].(string)) + issuerChainRaw := resp.Data["ca_chain"].([]interface{}) + + err = acmeCert.CheckSignatureFrom(issuerCert) + require.NoError(t, err, "issuer %s did not sign provided cert", issuerRef) + + expectedCerts := [][]byte{derCerts[0]} + + for _, entry := range issuerChainRaw { + chainCert := parseCert(t, entry.(string)) + expectedCerts = append(expectedCerts, chainCert.Raw) + } + + if diffs := deep.Equal(expectedCerts, derCerts); diffs != nil { + t.Fatalf("diffs were found between the acme chain returned and the expected value: \n%v", diffs) + } + + return acmeCert +} + +// TestAcmeValidationError make sure that we properly return errors on validation errors. +func TestAcmeValidationError(t *testing.T) { + t.Parallel() + cluster, _, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + baseAcmeURL := "/v1/pki/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account + t.Logf("Testing register on %s", baseAcmeURL) + _, err = acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Create an order + t.Logf("Testing Authorize Order on %s", baseAcmeURL) + identifiers := []string{"www.dadgarcorp.com"} + order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + }) + require.NoError(t, err, "failed creating order") + + // Load authorizations + var authorizations []*acme.Authorization + for _, authUrl := range order.AuthzURLs { + auth, err := acmeClient.GetAuthorization(testCtx, authUrl) + require.NoError(t, err, "failed fetching authorization: %s", authUrl) + + authorizations = append(authorizations, auth) + } + require.Len(t, authorizations, 1, "expected a certain number of authorizations") + require.Len(t, authorizations[0].Challenges, 3, "expected a certain number of challenges associated with authorization") + + acceptedAuth, err := acmeClient.Accept(testCtx, authorizations[0].Challenges[0]) + require.NoError(t, err, "Should have been allowed to accept challenge 1") + require.Equal(t, string(ACMEChallengeProcessing), acceptedAuth.Status) + + _, err = acmeClient.Accept(testCtx, authorizations[0].Challenges[1]) + require.Error(t, err, "Should have been prevented to accept challenge 2") + + // Make sure our challenge returns errors + testhelpers.RetryUntil(t, 30*time.Second, func() error { + challenge, err := acmeClient.GetChallenge(testCtx, authorizations[0].Challenges[0].URI) + if err != nil { + return err + } + + if challenge.Error == nil { + return fmt.Errorf("no error set in challenge yet") + } + + acmeError, ok := challenge.Error.(*acme.Error) + if !ok { + return fmt.Errorf("unexpected error back: %v", err) + } + + if acmeError.ProblemType != "urn:ietf:params:acme:error:incorrectResponse" { + return fmt.Errorf("unexpected ACME error back: %v", acmeError) + } + + return nil + }) + + // Make sure our challenge,auth and order status change. + // This takes a little too long to run in CI properly, we need the ability to influence + // how long the validations take before CI can go wild on this. + if os.Getenv("CI") == "" { + testhelpers.RetryUntil(t, 10*time.Minute, func() error { + challenge, err := acmeClient.GetChallenge(testCtx, authorizations[0].Challenges[0].URI) + if err != nil { + return fmt.Errorf("failed to load challenge: %w", err) + } + + if challenge.Status != string(ACMEChallengeInvalid) { + return fmt.Errorf("challenge state was not changed to invalid: %v", challenge) + } + + authz, err := acmeClient.GetAuthorization(testCtx, authorizations[0].URI) + if err != nil { + return fmt.Errorf("failed to load authorization: %w", err) + } + + if authz.Status != string(ACMEAuthorizationInvalid) { + return fmt.Errorf("authz state was not changed to invalid: %v", authz) + } + + myOrder, err := acmeClient.GetOrder(testCtx, order.URI) + if err != nil { + return fmt.Errorf("failed to load order: %w", err) + } + + if myOrder.Status != string(ACMEOrderInvalid) { + return fmt.Errorf("order state was not changed to invalid: %v", order) + } + + return nil + }) + } +} + +// TestAcmeRevocationAcrossAccounts makes sure that we can revoke certificates using different accounts if +// we have another ACME account or not but access to the certificate key. Also verifies we can't revoke +// certificates across account keys. +func TestAcmeRevocationAcrossAccounts(t *testing.T) { + t.Parallel() + + cluster, vaultClient, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + baseAcmeURL := "/v1/pki/acme/" + accountKey1, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient1 := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey1) + + leafKey, certs := doACMEWorkflow(t, vaultClient, acmeClient1) + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert bytes") + + // Make sure our cert is not revoked + certResp, err := vaultClient.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert)) + require.NoError(t, err, "failed to read certificate status") + require.NotNil(t, certResp, "certificate status response was nil") + revocationTime := certResp.Data["revocation_time"].(json.Number) + revocationTimeInt, err := revocationTime.Int64() + require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime) + require.Equal(t, revocationTimeInt, int64(0), + "revocation time was not 0, cert was already revoked: %v", revocationTimeInt) + + // Test that we can't revoke the certificate with another account's key + accountKey2, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + require.NoError(t, err, "failed creating rsa key") + + acmeClient2 := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey2) + _, err = acmeClient2.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering second account") + + err = acmeClient2.RevokeCert(ctx, nil, certs[0], acme.CRLReasonUnspecified) + require.Error(t, err, "should have failed revoking the certificate with a different account") + + // Make sure our cert is not revoked + certResp, err = vaultClient.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert)) + require.NoError(t, err, "failed to read certificate status") + require.NotNil(t, certResp, "certificate status response was nil") + revocationTime = certResp.Data["revocation_time"].(json.Number) + revocationTimeInt, err = revocationTime.Int64() + require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime) + require.Equal(t, revocationTimeInt, int64(0), + "revocation time was not 0, cert was already revoked: %v", revocationTimeInt) + + // But we can revoke if we sign the request with the certificate's key and a different account + err = acmeClient2.RevokeCert(ctx, leafKey, certs[0], acme.CRLReasonUnspecified) + require.NoError(t, err, "should have been allowed to revoke certificate with csr key across accounts") + + // Make sure our cert is now revoked + certResp, err = vaultClient.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert)) + require.NoError(t, err, "failed to read certificate status") + require.NotNil(t, certResp, "certificate status response was nil") + revocationTime = certResp.Data["revocation_time"].(json.Number) + revocationTimeInt, err = revocationTime.Int64() + require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime) + require.Greater(t, revocationTimeInt, int64(0), + "revocation time was not greater than 0, cert was not revoked: %v", revocationTimeInt) + + // Make sure we can revoke a certificate without a registered ACME account + leafKey2, certs2 := doACMEWorkflow(t, vaultClient, acmeClient1) + + acmeClient3 := getAcmeClientForCluster(t, cluster, baseAcmeURL, nil) + err = acmeClient3.RevokeCert(ctx, leafKey2, certs2[0], acme.CRLReasonUnspecified) + require.NoError(t, err, "should be allowed to revoke a cert with no ACME account but with cert key") + + // Make sure our cert is now revoked + acmeCert2, err := x509.ParseCertificate(certs2[0]) + require.NoError(t, err, "failed parsing acme cert 2 bytes") + + certResp, err = vaultClient.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert2)) + require.NoError(t, err, "failed to read certificate status") + require.NotNil(t, certResp, "certificate status response was nil") + revocationTime = certResp.Data["revocation_time"].(json.Number) + revocationTimeInt, err = revocationTime.Int64() + require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime) + require.Greater(t, revocationTimeInt, int64(0), + "revocation time was not greater than 0, cert was not revoked: %v", revocationTimeInt) +} + +// TestAcmeMaxTTL verify that we can update the ACME configuration's max_ttl value and +// get a certificate that has a higher notAfter beyond the 90 day original limit +func TestAcmeMaxTTL(t *testing.T) { + t.Parallel() + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + numHours := 140 * 24 // The ACME role has a TTL of 152 days + acmeConfig := map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "default_directory_policy": "role:acme", + "dns_resolver": "", + "eab_policy_name": "", + "max_ttl": fmt.Sprintf("%dh", numHours), + } + _, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", acmeConfig) + require.NoError(t, err, "error configuring acme") + + // First Create Our Client + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + acmeClient := getAcmeClientForCluster(t, cluster, "/v1/pki/acme/", accountKey) + + discovery, err := acmeClient.Discover(testCtx) + require.NoError(t, err, "failed acme discovery call") + t.Logf("%v", discovery) + + acct, err := acmeClient.Register(testCtx, &acme.Account{ + Contact: []string{"mailto:test@example.com"}, + }, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + require.Equal(t, acme.StatusValid, acct.Status) + require.Contains(t, acct.Contact, "mailto:test@example.com") + require.Len(t, acct.Contact, 1) + + authorizations := []acme.AuthzID{ + {"dns", "localhost"}, + } + // Create an order + identifiers := make([]string, len(authorizations)) + for index, auth := range authorizations { + identifiers[index] = auth.Value + } + + createOrder, err := acmeClient.AuthorizeOrder(testCtx, authorizations) + require.NoError(t, err, "failed creating order") + require.Equal(t, acme.StatusPending, createOrder.Status) + require.Empty(t, createOrder.CertURL) + require.Equal(t, createOrder.URI+"/finalize", createOrder.FinalizeURL) + require.Len(t, createOrder.AuthzURLs, len(authorizations), "expected same number of authzurls as identifiers") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow + // test. + markAuthorizationSuccess(t, client, acmeClient, acct, createOrder) + + // Submit the CSR + requestCSR := x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "localhost"}, + } + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + csr, err := x509.CreateCertificateRequest(rand.Reader, &requestCSR, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csr, true) + require.NoError(t, err, "failed finalizing order") + + // Validate we get a signed cert back + acmeCert := testAcmeCertSignedByCa(t, client, certs, "int-ca") + duration := time.Duration(numHours) * time.Hour + maxTTL := time.Now().Add(duration) + buffer := time.Duration(24) * time.Hour + dayTruncate := time.Duration(24) * time.Hour + + acmeCertNotAfter := acmeCert.NotAfter.Truncate(dayTruncate) + + // Make sure we are in the ballpark of our max_ttl value. + require.Greaterf(t, acmeCertNotAfter, maxTTL.Add(-1*buffer), "ACME cert: %v should have been greater than max TTL was %v", acmeCert.NotAfter, maxTTL) + require.Less(t, acmeCertNotAfter, maxTTL.Add(buffer), "ACME cert: %v should have been less than max TTL was %v", acmeCert.NotAfter, maxTTL) +} + +func doACMEWorkflow(t *testing.T, vaultClient *api.Client, acmeClient *acme.Client) (*ecdsa.PrivateKey, [][]byte) { + testCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create new account + acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + if err != nil { + if strings.Contains(err.Error(), "acme: account already exists") { + acct, err = acmeClient.GetReg(testCtx, "") + require.NoError(t, err, "failed looking up account after account exists error?") + } else { + require.NoError(t, err, "failed registering account") + } + } + + // Create an order + identifiers := []string{"*.localdomain"} + order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + }) + require.NoError(t, err, "failed creating order") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow + // test. + markAuthorizationSuccess(t, vaultClient, acmeClient, acct, order) + + // Build a proper CSR, with the correct name and signed with a different key works. + goodCr := &x509.CertificateRequest{DNSNames: []string{identifiers[0]}} + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) + require.NoError(t, err, "failed finalizing order") + require.Len(t, certs, 3, "expected full acme chain") + + return csrKey, certs +} + +func setupTestPkiCluster(t *testing.T) (*vault.TestCluster, *api.Client) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + EnableRaw: true, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + client := cluster.Cores[0].Client + mountPKIEndpoint(t, client, "pki") + return cluster, client +} + +func getAcmeClientForCluster(t *testing.T, cluster *vault.TestCluster, baseUrl string, key crypto.Signer) *acme.Client { + coreAddr := cluster.Cores[0].Listeners[0].Address + tlsConfig := cluster.Cores[0].TLSConfig() + + transport := cleanhttp.DefaultPooledTransport() + transport.TLSClientConfig = tlsConfig.Clone() + if err := http2.ConfigureTransport(transport); err != nil { + t.Fatal(err) + } + httpClient := &http.Client{Transport: transport} + if baseUrl[0] == '/' { + baseUrl = baseUrl[1:] + } + if !strings.HasPrefix(baseUrl, "v1/") { + baseUrl = "v1/" + baseUrl + } + if !strings.HasSuffix(baseUrl, "/") { + baseUrl = baseUrl + "/" + } + baseAcmeURL := fmt.Sprintf("https://%s/%s", coreAddr.String(), baseUrl) + return &acme.Client{ + Key: key, + HTTPClient: httpClient, + DirectoryURL: baseAcmeURL + "directory", + } +} + +func getEABKey(t *testing.T, client *api.Client, baseUrl string) (string, []byte) { + t.Helper() + + resp, err := client.Logical().WriteWithContext(ctx, path.Join("pki/", baseUrl, "/new-eab"), map[string]interface{}{}) + require.NoError(t, err, "failed getting eab key") + require.NotNil(t, resp, "eab key returned nil response") + require.NotEmpty(t, resp.Data["id"], "eab key response missing id field") + kid := resp.Data["id"].(string) + + require.NotEmpty(t, resp.Data["key"], "eab key response missing private_key field") + base64Key := resp.Data["key"].(string) + require.True(t, strings.HasPrefix(base64Key, "vault-eab-0-"), "%s should have had a prefix of vault-eab-0-", base64Key) + privateKeyBytes, err := base64.RawURLEncoding.DecodeString(base64Key) + require.NoError(t, err, "failed base 64 decoding eab key response") + + require.Equal(t, "hs", resp.Data["key_type"], "eab key_type field mis-match") + require.Equal(t, baseUrl+"directory", resp.Data["acme_directory"], "eab acme_directory field mis-match") + require.NotEmpty(t, resp.Data["created_on"], "empty created_on field") + _, err = time.Parse(time.RFC3339, resp.Data["created_on"].(string)) + require.NoError(t, err, "failed parsing eab created_on field") + + return kid, privateKeyBytes +} + +func TestACMEClientRequestLimits(t *testing.T) { + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + cases := []struct { + name string + authorizations []acme.AuthzID + requestCSR x509.CertificateRequest + valid bool + }{ + { + "validate-only-cn", + []acme.AuthzID{ + {"dns", "localhost"}, + }, + x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "localhost"}, + }, + true, + }, + { + "validate-only-san", + []acme.AuthzID{ + {"dns", "localhost"}, + }, + x509.CertificateRequest{ + DNSNames: []string{"localhost"}, + }, + true, + }, + { + "validate-only-ip-address", + []acme.AuthzID{ + {"ip", "127.0.0.1"}, + }, + x509.CertificateRequest{ + IPAddresses: []net.IP{{127, 0, 0, 1}}, + }, + true, + }, + } + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + acmeConfig := map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "default_directory_policy": "sign-verbatim", + "dns_resolver": "", + "eab_policy_name": "", + } + _, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", acmeConfig) + require.NoError(t, err, "error configuring acme") + + for _, tc := range cases { + + // First Create Our Client + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + acmeClient := getAcmeClientForCluster(t, cluster, "/v1/pki/acme/", accountKey) + + discovery, err := acmeClient.Discover(testCtx) + require.NoError(t, err, "failed acme discovery call") + t.Logf("%v", discovery) + + acct, err := acmeClient.Register(testCtx, &acme.Account{ + Contact: []string{"mailto:test@example.com"}, + }, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + require.Equal(t, acme.StatusValid, acct.Status) + require.Contains(t, acct.Contact, "mailto:test@example.com") + require.Len(t, acct.Contact, 1) + + // Create an order + t.Logf("Testing Authorize Order on %s", "pki/acme") + identifiers := make([]string, len(tc.authorizations)) + for index, auth := range tc.authorizations { + identifiers[index] = auth.Value + } + + createOrder, err := acmeClient.AuthorizeOrder(testCtx, tc.authorizations) + require.NoError(t, err, "failed creating order") + require.Equal(t, acme.StatusPending, createOrder.Status) + require.Empty(t, createOrder.CertURL) + require.Equal(t, createOrder.URI+"/finalize", createOrder.FinalizeURL) + require.Len(t, createOrder.AuthzURLs, len(tc.authorizations), "expected same number of authzurls as identifiers") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow + // test. + markAuthorizationSuccess(t, client, acmeClient, acct, createOrder) + + // Submit the CSR + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + csr, err := x509.CreateCertificateRequest(rand.Reader, &tc.requestCSR, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csr, true) + + if tc.valid { + require.NoError(t, err, "failed finalizing order") + + // Validate we get a signed cert back + testAcmeCertSignedByCa(t, client, certs, "int-ca") + } else { + require.Error(t, err, "Not a valid CSR, should err") + } + } +} diff --git a/builtin/logical/pki/path_config_acme.go b/builtin/logical/pki/path_config_acme.go new file mode 100644 index 000000000000..2a233cad7425 --- /dev/null +++ b/builtin/logical/pki/path_config_acme.go @@ -0,0 +1,428 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "context" + "fmt" + "net" + "os" + "regexp" + "strconv" + "strings" + "time" + + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + storageAcmeConfig = "config/acme" + pathConfigAcmeHelpSyn = "Configuration of ACME Endpoints" + pathConfigAcmeHelpDesc = "Here we configure:\n\nenabled=false, whether ACME is enabled, defaults to false meaning that clusters will by default not get ACME support,\nallowed_issuers=\"default\", which issuers are allowed for use with ACME; by default, this will only be the primary (default) issuer,\nallowed_roles=\"*\", which roles are allowed for use with ACME; by default these will be all roles matching our selection criteria,\ndefault_directory_policy=\"\", either \"forbid\", preventing the default directory from being used at all, \"role:\" which is the role to be used for non-role-qualified ACME requests; or \"sign-verbatim\", the default meaning ACME issuance will be equivalent to sign-verbatim.,\ndns_resolver=\"\", which specifies a custom DNS resolver to use for all ACME-related DNS lookups" + disableAcmeEnvVar = "VAULT_DISABLE_PUBLIC_ACME" + defaultAcmeMaxTTL = 90 * (24 * time.Hour) +) + +type acmeConfigEntry struct { + Enabled bool `json:"enabled"` + AllowedIssuers []string `json:"allowed_issuers="` + AllowedRoles []string `json:"allowed_roles"` + AllowRoleExtKeyUsage bool `json:"allow_role_ext_key_usage"` + DefaultDirectoryPolicy string `json:"default_directory_policy"` + DNSResolver string `json:"dns_resolver"` + EabPolicyName EabPolicyName `json:"eab_policy_name"` + MaxTTL time.Duration `json:"max_ttl"` +} + +var defaultAcmeConfig = acmeConfigEntry{ + Enabled: false, + AllowedIssuers: []string{"*"}, + AllowedRoles: []string{"*"}, + AllowRoleExtKeyUsage: false, + DefaultDirectoryPolicy: "sign-verbatim", + DNSResolver: "", + EabPolicyName: eabPolicyNotRequired, + MaxTTL: defaultAcmeMaxTTL, +} + +var ( + extPolicyPrefix = "external-policy" + extPolicyPrefixLength = len(extPolicyPrefix) + extPolicyRegex = regexp.MustCompile(framework.GenericNameRegex("policy")) + rolePrefix = "role:" + rolePrefixLength = len(rolePrefix) +) + +func getAcmeConfig(sc *storageContext) (*acmeConfigEntry, error) { + entry, err := sc.Storage.Get(sc.Context, storageAcmeConfig) + if err != nil { + return nil, err + } + + var mapping acmeConfigEntry + if entry == nil { + mapping = defaultAcmeConfig + return &mapping, nil + } + + if err := entry.DecodeJSON(&mapping); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode ACME configuration: %v", err)} + } + + // Update previous stored configurations to use the default max ttl we used to enforce + if mapping.MaxTTL == 0 { + mapping.MaxTTL = defaultAcmeMaxTTL + } + + return &mapping, nil +} + +func (sc *storageContext) setAcmeConfig(entry *acmeConfigEntry) error { + json, err := logical.StorageEntryJSON(storageAcmeConfig, entry) + if err != nil { + return fmt.Errorf("failed creating storage entry: %w", err) + } + + if err := sc.Storage.Put(sc.Context, json); err != nil { + return fmt.Errorf("failed writing storage entry: %w", err) + } + + return nil +} + +func pathAcmeConfig(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/acme", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + }, + + Fields: map[string]*framework.FieldSchema{ + "enabled": { + Type: framework.TypeBool, + Description: `whether ACME is enabled, defaults to false meaning that clusters will by default not get ACME support`, + Default: false, + }, + "allowed_issuers": { + Type: framework.TypeCommaStringSlice, + Description: `which issuers are allowed for use with ACME; by default, this will only be the primary (default) issuer`, + Default: []string{"*"}, + }, + "allowed_roles": { + Type: framework.TypeCommaStringSlice, + Description: `which roles are allowed for use with ACME; by default via '*', these will be all roles including sign-verbatim; when concrete role names are specified, any default_directory_policy role must be included to allow usage of the default acme directories under /pki/acme/directory and /pki/issuer/:issuer_id/acme/directory.`, + Default: []string{"*"}, + }, + "allow_role_ext_key_usage": { + Type: framework.TypeBool, + Description: `whether the ExtKeyUsage field from a role is used, defaults to false meaning that certificate will be signed with ServerAuth.`, + Default: false, + }, + "default_directory_policy": { + Type: framework.TypeString, + Description: `the policy to be used for non-role-qualified ACME requests; by default ACME issuance will be otherwise unrestricted, equivalent to the sign-verbatim endpoint; one may also specify a role to use as this policy, as "role:", the specified role must be allowed by allowed_roles`, + Default: "sign-verbatim", + }, + "dns_resolver": { + Type: framework.TypeString, + Description: `DNS resolver to use for domain resolution on this mount. Defaults to using the default system resolver. Must be in the format :, with both parts mandatory.`, + Default: "", + }, + "eab_policy": { + Type: framework.TypeString, + Description: `Specify the policy to use for external account binding behaviour, 'not-required', 'new-account-required' or 'always-required'`, + Default: "always-required", + }, + "max_ttl": { + Type: framework.TypeDurationSecond, + Description: `Specify the maximum TTL for ACME certificates. Role TTL values will be limited to this value`, + Default: defaultAcmeMaxTTL.Seconds(), + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "acme-configuration", + }, + Callback: b.pathAcmeRead, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathAcmeWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "acme", + }, + // Read more about why these flags are set in backend.go. + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + }, + + HelpSynopsis: pathConfigAcmeHelpSyn, + HelpDescription: pathConfigAcmeHelpDesc, + } +} + +func (b *backend) pathAcmeRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, req.Storage) + config, err := b.GetAcmeState().getConfigWithForcedUpdate(sc) + if err != nil { + return nil, err + } + + var warnings []string + if config.Enabled { + _, err := getBasePathFromClusterConfig(sc) + if err != nil { + warnings = append(warnings, err.Error()) + } + } + + return genResponseFromAcmeConfig(config, warnings), nil +} + +func genResponseFromAcmeConfig(config *acmeConfigEntry, warnings []string) *logical.Response { + response := &logical.Response{ + Data: map[string]interface{}{ + "allowed_roles": config.AllowedRoles, + "allow_role_ext_key_usage": config.AllowRoleExtKeyUsage, + "allowed_issuers": config.AllowedIssuers, + "default_directory_policy": config.DefaultDirectoryPolicy, + "enabled": config.Enabled, + "dns_resolver": config.DNSResolver, + "eab_policy": config.EabPolicyName, + "max_ttl": config.MaxTTL.Seconds(), + }, + Warnings: warnings, + } + + // TODO: Add some nice warning if we are on a replication cluster and path isn't set + + return response +} + +func (b *backend) pathAcmeWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, req.Storage) + + config, err := b.GetAcmeState().getConfigWithForcedUpdate(sc) + if err != nil { + return nil, err + } + + if enabledRaw, ok := d.GetOk("enabled"); ok { + config.Enabled = enabledRaw.(bool) + } + + if allowedRolesRaw, ok := d.GetOk("allowed_roles"); ok { + config.AllowedRoles = allowedRolesRaw.([]string) + if len(config.AllowedRoles) == 0 { + return nil, fmt.Errorf("allowed_roles must take a non-zero length value; specify '*' as the value to allow anything or specify enabled=false to disable ACME entirely") + } + } + + if allowRoleExtKeyUsageRaw, ok := d.GetOk("allow_role_ext_key_usage"); ok { + config.AllowRoleExtKeyUsage = allowRoleExtKeyUsageRaw.(bool) + } + + if defaultDirectoryPolicyRaw, ok := d.GetOk("default_directory_policy"); ok { + config.DefaultDirectoryPolicy = defaultDirectoryPolicyRaw.(string) + } + + if allowedIssuersRaw, ok := d.GetOk("allowed_issuers"); ok { + config.AllowedIssuers = allowedIssuersRaw.([]string) + if len(config.AllowedIssuers) == 0 { + return nil, fmt.Errorf("allowed_issuers must take a non-zero length value; specify '*' as the value to allow anything or specify enabled=false to disable ACME entirely") + } + } + + if dnsResolverRaw, ok := d.GetOk("dns_resolver"); ok { + config.DNSResolver = dnsResolverRaw.(string) + if config.DNSResolver != "" { + addr, _, err := net.SplitHostPort(config.DNSResolver) + if err != nil { + return nil, fmt.Errorf("failed to parse DNS resolver address: %w", err) + } + if addr == "" { + return nil, fmt.Errorf("failed to parse DNS resolver address: got empty address") + } + if net.ParseIP(addr) == nil { + return nil, fmt.Errorf("failed to parse DNS resolver address: expected IPv4/IPv6 address, likely got hostname") + } + } + } + + if eabPolicyRaw, ok := d.GetOk("eab_policy"); ok { + eabPolicy, err := getEabPolicyByString(eabPolicyRaw.(string)) + if err != nil { + return nil, fmt.Errorf("invalid eab policy name provided, valid values are '%s', '%s', '%s'", + eabPolicyNotRequired, eabPolicyNewAccountRequired, eabPolicyAlwaysRequired) + } + config.EabPolicyName = eabPolicy.Name + } + + if maxTTLRaw, ok := d.GetOk("max_ttl"); ok { + maxTTL := time.Second * time.Duration(maxTTLRaw.(int)) + if maxTTL <= 0 { + return nil, fmt.Errorf("invalid max_ttl value, must be greater than 0") + } + config.MaxTTL = maxTTL + } + + // Validate Default Directory Behavior: + defaultDirectoryPolicyType, extraInfo, err := getDefaultDirectoryPolicyType(config.DefaultDirectoryPolicy) + if err != nil { + return nil, fmt.Errorf("invalid default_directory_policy: %w", err) + } + defaultDirectoryRoleName := "" + switch defaultDirectoryPolicyType { + case Forbid: + case SignVerbatim: + case ExternalPolicy: + if !constants.IsEnterprise { + return nil, fmt.Errorf("external-policy is only available in enterprise versions of Vault") + } + case Role: + defaultDirectoryRoleName = extraInfo + + _, err := getAndValidateAcmeRole(sc, defaultDirectoryRoleName) + if err != nil { + return nil, fmt.Errorf("default directory policy role %v is not a valid ACME role: %w", defaultDirectoryRoleName, err) + } + default: + return nil, fmt.Errorf("validation for the type of policy defined by %v is undefined", config.DefaultDirectoryPolicy) + } + + // Validate Allowed Roles + allowAnyRole := len(config.AllowedRoles) == 1 && config.AllowedRoles[0] == "*" + foundDefault := false + if !allowAnyRole { + for index, name := range config.AllowedRoles { + if name == "*" { + return nil, fmt.Errorf("cannot use '*' as role name at index %d", index) + } + + _, err := getAndValidateAcmeRole(sc, name) + if err != nil { + return nil, fmt.Errorf("allowed_role %v is not a valid acme role: %w", name, err) + } + + if defaultDirectoryPolicyType == Role && name == defaultDirectoryRoleName { + foundDefault = true + } + } + + if !foundDefault && defaultDirectoryPolicyType == Role { + return nil, fmt.Errorf("default directory policy %v was not specified in allowed_roles: %v", config.DefaultDirectoryPolicy, config.AllowedRoles) + } + } + + allowAnyIssuer := len(config.AllowedIssuers) == 1 && config.AllowedIssuers[0] == "*" + if !allowAnyIssuer { + for index, name := range config.AllowedIssuers { + if name == "*" { + return nil, fmt.Errorf("cannot use '*' as issuer name at index %d", index) + } + + _, err := sc.resolveIssuerReference(name) + if err != nil { + return nil, fmt.Errorf("failed validating allowed_issuers: unable to fetch issuer: %v: %w", name, err) + } + } + } + + // Check to make sure that we have a proper value for the cluster path which ACME requires + if config.Enabled { + _, err = getBasePathFromClusterConfig(sc) + if err != nil { + return nil, err + } + } + + var warnings []string + // Lastly lets verify that the configuration is honored/invalidated by the public ACME env var. + isPublicAcmeDisabledByEnv, err := isPublicACMEDisabledByEnv() + if err != nil { + warnings = append(warnings, err.Error()) + } + if isPublicAcmeDisabledByEnv && config.Enabled { + eabPolicy := getEabPolicyByName(config.EabPolicyName) + if !eabPolicy.OverrideEnvDisablingPublicAcme() { + resp := logical.ErrorResponse("%s env var is enabled, ACME EAB policy needs to be '%s' with ACME enabled", + disableAcmeEnvVar, eabPolicyAlwaysRequired) + resp.Warnings = warnings + return resp, nil + } + } + + if _, err := b.GetAcmeState().writeConfig(sc, config); err != nil { + return nil, fmt.Errorf("failed persisting: %w", err) + } + + return genResponseFromAcmeConfig(config, warnings), nil +} + +func isPublicACMEDisabledByEnv() (bool, error) { + disableAcmeRaw, ok := os.LookupEnv(disableAcmeEnvVar) + if !ok { + return false, nil + } + + disableAcme, err := strconv.ParseBool(disableAcmeRaw) + if err != nil { + // So the environment variable was set but we couldn't parse the value as a string, assume + // the operator wanted public ACME disabled. + return true, fmt.Errorf("failed parsing environment variable %s: %w", disableAcmeEnvVar, err) + } + + return disableAcme, nil +} + +func getDefaultDirectoryPolicyType(defaultDirectoryPolicy string) (DefaultDirectoryPolicyType, string, error) { + switch { + case defaultDirectoryPolicy == "forbid": + return Forbid, "", nil + case defaultDirectoryPolicy == "sign-verbatim": + return SignVerbatim, "", nil + case strings.HasPrefix(defaultDirectoryPolicy, rolePrefix): + if len(defaultDirectoryPolicy) == rolePrefixLength { + return Forbid, "", fmt.Errorf("no role specified by policy %v", defaultDirectoryPolicy) + } + roleName := defaultDirectoryPolicy[rolePrefixLength:] + return Role, roleName, nil + case strings.HasPrefix(defaultDirectoryPolicy, extPolicyPrefix): + if len(defaultDirectoryPolicy) == extPolicyPrefixLength { + // default external-policy case without a specified policy + return ExternalPolicy, "", nil + } + + if strings.HasPrefix(defaultDirectoryPolicy, extPolicyPrefix+":") && + len(defaultDirectoryPolicy) == extPolicyPrefixLength+1 { + // end user set 'external-policy:', so no policy which is acceptable + return ExternalPolicy, "", nil + } + + policyName := defaultDirectoryPolicy[extPolicyPrefixLength+1:] + if ok := extPolicyRegex.MatchString(policyName); !ok { + return Forbid, "", fmt.Errorf("invalid characters within external-policy name: %s", defaultDirectoryPolicy) + } + return ExternalPolicy, policyName, nil + default: + return Forbid, "", fmt.Errorf("string %v not a valid Default Directory Policy", defaultDirectoryPolicy) + } +} + +//go:generate enumer -type=DefaultDirectoryPolicyType +type DefaultDirectoryPolicyType int + +const ( + Forbid DefaultDirectoryPolicyType = iota + SignVerbatim + Role + ExternalPolicy +) diff --git a/builtin/logical/pki/path_config_acme_test.go b/builtin/logical/pki/path_config_acme_test.go new file mode 100644 index 000000000000..47ba1f817dec --- /dev/null +++ b/builtin/logical/pki/path_config_acme_test.go @@ -0,0 +1,155 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "testing" + "time" + + "github.com/hashicorp/vault/helper/constants" + "github.com/stretchr/testify/require" +) + +func TestAcmeConfig(t *testing.T) { + t.Parallel() + + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + cases := []struct { + name string + AcmeConfig map[string]interface{} + prefixUrl string + validConfig bool + works bool + }{ + {"unspecified-root", map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "dns_resolver": "", + "eab_policy_name": "", + }, "acme/", true, true}, + {"bad-policy-root", map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "default_directory_policy": "bad", + "dns_resolver": "", + "eab_policy_name": "", + }, "acme/", false, false}, + {"forbid-root", map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "default_directory_policy": "forbid", + "dns_resolver": "", + "eab_policy_name": "", + }, "acme/", true, false}, + {"sign-verbatim-root", map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "default_directory_policy": "sign-verbatim", + "dns_resolver": "", + "eab_policy_name": "", + }, "acme/", true, true}, + {"role-root", map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "default_directory_policy": "role:exists", + "dns_resolver": "", + "eab_policy_name": "", + }, "acme/", true, true}, + {"bad-role-root", map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "default_directory_policy": "role:notgood", + "dns_resolver": "", + "eab_policy_name": "", + }, "acme/", false, true}, + {"disallowed-role-root", map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "good", + "default_directory_policy": "role:exists", + "dns_resolver": "", + "eab_policy_name": "", + }, "acme/", false, false}, + } + + roleConfig := map[string]interface{}{ + "issuer_ref": "default", + "allowed_domains": "example.com", + "allow_subdomains": true, + "max_ttl": "720h", + } + + testCtx := context.Background() + + for _, tc := range cases { + deadline := time.Now().Add(1 * time.Minute) + subTestCtx, _ := context.WithDeadline(testCtx, deadline) + + _, err := client.Logical().WriteWithContext(subTestCtx, "pki/roles/exists", roleConfig) + require.NoError(t, err) + _, err = client.Logical().WriteWithContext(subTestCtx, "pki/roles/good", roleConfig) + require.NoError(t, err) + + t.Run(tc.name, func(t *testing.T) { + _, err := client.Logical().WriteWithContext(subTestCtx, "pki/config/acme", tc.AcmeConfig) + + if tc.validConfig { + require.NoError(t, err) + } else { + require.Error(t, err) + return + } + + _, err = client.Logical().ReadWithContext(subTestCtx, "pki/acme/directory") + if tc.works { + require.NoError(t, err) + + baseAcmeURL := "/v1/pki/" + tc.prefixUrl + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account + _, err = acmeClient.Discover(subTestCtx) + require.NoError(t, err, "failed acme discovery call") + } else { + require.Error(t, err, "Acme Configuration should prevent usage") + } + }) + } +} + +// TestAcmeExternalPolicyOss make sure setting external-policy on OSS within acme configuration fails +func TestAcmeExternalPolicyOss(t *testing.T) { + if constants.IsEnterprise { + t.Skip("this test is only valid on OSS") + } + + t.Parallel() + b, s := CreateBackendWithStorage(t) + + values := []string{"external-policy", "external-policy:", "external-policy:test"} + for _, value := range values { + t.Run(value, func(st *testing.T) { + _, err := CBWrite(b, s, "config/acme", map[string]interface{}{ + "enabled": true, + "default_directory_policy": value, + }) + + require.Error(st, err, "should have failed setting acme config") + }) + } +} diff --git a/builtin/logical/pki/path_config_ca.go b/builtin/logical/pki/path_config_ca.go index 3c4927bf1f57..a898b811befb 100644 --- a/builtin/logical/pki/path_config_ca.go +++ b/builtin/logical/pki/path_config_ca.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -7,6 +7,7 @@ import ( "context" "net/http" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) @@ -14,6 +15,13 @@ import ( func pathConfigCA(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/ca", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "configure", + OperationSuffix: "ca", + }, + Fields: map[string]*framework.FieldSchema{ "pem_bundle": { Type: framework.TypeString, @@ -44,6 +52,16 @@ secret key and certificate.`, Description: "Net-new issuers imported as a part of this request", Required: true, }, + "existing_keys": { + Type: framework.TypeCommaStringSlice, + Description: "Existing keys specified as part of the import bundle of this request", + Required: true, + }, + "existing_issuers": { + Type: framework.TypeCommaStringSlice, + Description: "Existing issuers specified as part of the import bundle of this request", + Required: true, + }, }, }}, }, @@ -73,6 +91,11 @@ For security reasons, the secret key cannot be retrieved later. func pathConfigIssuers(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/issuers", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + }, + Fields: map[string]*framework.FieldSchema{ defaultRef: { Type: framework.TypeString, @@ -87,6 +110,9 @@ func pathConfigIssuers(b *backend) *framework.Path { Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathCAIssuersRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "issuers-configuration", + }, Responses: map[int][]framework.Response{ http.StatusOK: {{ Description: "OK", @@ -107,6 +133,10 @@ func pathConfigIssuers(b *backend) *framework.Path { }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathCAIssuersWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "issuers", + }, Responses: map[int][]framework.Response{ http.StatusOK: {{ Description: "OK", @@ -136,6 +166,13 @@ func pathConfigIssuers(b *backend) *framework.Path { func pathReplaceRoot(b *backend) *framework.Path { return &framework.Path{ Pattern: "root/replace", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "replace", + OperationSuffix: "root", + }, + Fields: map[string]*framework.FieldSchema{ "default": { Type: framework.TypeString, @@ -176,7 +213,7 @@ func pathReplaceRoot(b *backend) *framework.Path { } func (b *backend) pathCAIssuersRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Cannot read defaults until migration has completed"), nil } @@ -189,7 +226,7 @@ func (b *backend) pathCAIssuersRead(ctx context.Context, req *logical.Request, _ return b.formatCAIssuerConfigRead(config), nil } -func (b *backend) formatCAIssuerConfigRead(config *issuerConfigEntry) *logical.Response { +func (b *backend) formatCAIssuerConfigRead(config *issuing.IssuerConfigEntry) *logical.Response { return &logical.Response{ Data: map[string]interface{}{ defaultRef: config.DefaultIssuerId, @@ -204,7 +241,7 @@ func (b *backend) pathCAIssuersWrite(ctx context.Context, req *logical.Request, b.issuersLock.Lock() defer b.issuersLock.Unlock() - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Cannot update defaults until migration has completed"), nil } @@ -273,6 +310,11 @@ value of the issuer with the name "next", if it exists. func pathConfigKeys(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/keys", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + }, + Fields: map[string]*framework.FieldSchema{ defaultRef: { Type: framework.TypeString, @@ -283,6 +325,10 @@ func pathConfigKeys(b *backend) *framework.Path { Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathKeyDefaultWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "keys", + }, Responses: map[int][]framework.Response{ http.StatusOK: {{ Description: "OK", @@ -300,6 +346,9 @@ func pathConfigKeys(b *backend) *framework.Path { }, logical.ReadOperation: &framework.PathOperation{ Callback: b.pathKeyDefaultRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "keys-configuration", + }, Responses: map[int][]framework.Response{ http.StatusOK: {{ Description: "OK", @@ -322,7 +371,7 @@ func pathConfigKeys(b *backend) *framework.Path { } func (b *backend) pathKeyDefaultRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Cannot read key defaults until migration has completed"), nil } @@ -345,7 +394,7 @@ func (b *backend) pathKeyDefaultWrite(ctx context.Context, req *logical.Request, b.issuersLock.Lock() defer b.issuersLock.Unlock() - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Cannot update key defaults until migration has completed"), nil } diff --git a/builtin/logical/pki/path_config_cluster.go b/builtin/logical/pki/path_config_cluster.go index a9ea8cb787d3..a97769831b1f 100644 --- a/builtin/logical/pki/path_config_cluster.go +++ b/builtin/logical/pki/path_config_cluster.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -16,6 +16,11 @@ import ( func pathConfigCluster(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/cluster", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + }, + Fields: map[string]*framework.FieldSchema{ "path": { Type: framework.TypeString, @@ -44,6 +49,10 @@ For example: http://cdn.example.com/pr1/pki`, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "cluster", + }, Callback: b.pathWriteCluster, Responses: map[int][]framework.Response{ http.StatusOK: {{ @@ -78,6 +87,9 @@ For example: http://cdn.example.com/pr1/pki`, }, logical.ReadOperation: &framework.PathOperation{ Callback: b.pathReadCluster, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "cluster-configuration", + }, Responses: map[int][]framework.Response{ http.StatusOK: {{ Description: "OK", @@ -143,6 +155,9 @@ func (b *backend) pathWriteCluster(ctx context.Context, req *logical.Request, da if value, ok := data.GetOk("path"); ok { cfg.Path = value.(string) + + // This field is required by ACME, if ever we allow un-setting in the + // future, this code will need to verify that ACME is not enabled. if !govalidator.IsURL(cfg.Path) { return nil, fmt.Errorf("invalid, non-URL path given to cluster: %v", cfg.Path) } diff --git a/builtin/logical/pki/path_config_crl.go b/builtin/logical/pki/path_config_crl.go index 14623f9e1b4b..81815ff21592 100644 --- a/builtin/logical/pki/path_config_crl.go +++ b/builtin/logical/pki/path_config_crl.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -7,51 +7,23 @@ import ( "context" "fmt" "net/http" - "time" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/builtin/logical/pki/pki_backend" "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" ) -const latestCrlConfigVersion = 1 - -// CRLConfig holds basic CRL configuration information -type crlConfig struct { - Version int `json:"version"` - Expiry string `json:"expiry"` - Disable bool `json:"disable"` - OcspDisable bool `json:"ocsp_disable"` - AutoRebuild bool `json:"auto_rebuild"` - AutoRebuildGracePeriod string `json:"auto_rebuild_grace_period"` - OcspExpiry string `json:"ocsp_expiry"` - EnableDelta bool `json:"enable_delta"` - DeltaRebuildInterval string `json:"delta_rebuild_interval"` - UseGlobalQueue bool `json:"cross_cluster_revocation"` - UnifiedCRL bool `json:"unified_crl"` - UnifiedCRLOnExistingPaths bool `json:"unified_crl_on_existing_paths"` -} - -// Implicit default values for the config if it does not exist. -var defaultCrlConfig = crlConfig{ - Version: latestCrlConfigVersion, - Expiry: "72h", - Disable: false, - OcspDisable: false, - OcspExpiry: "12h", - AutoRebuild: false, - AutoRebuildGracePeriod: "12h", - EnableDelta: false, - DeltaRebuildInterval: "15m", - UseGlobalQueue: false, - UnifiedCRL: false, - UnifiedCRLOnExistingPaths: false, -} - func pathConfigCRL(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/crl", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + }, + Fields: map[string]*framework.FieldSchema{ "expiry": { Type: framework.TypeString, @@ -113,6 +85,9 @@ existing CRL and OCSP paths will return the unified CRL instead of a response ba Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "crl-configuration", + }, Callback: b.pathCRLRead, Responses: map[int][]framework.Response{ http.StatusOK: {{ @@ -185,6 +160,10 @@ existing CRL and OCSP paths will return the unified CRL instead of a response ba }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathCRLWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "crl", + }, Responses: map[int][]framework.Response{ http.StatusOK: {{ Description: "OK", @@ -262,9 +241,10 @@ existing CRL and OCSP paths will return the unified CRL instead of a response ba func (b *backend) pathCRLRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { sc := b.makeStorageContext(ctx, req.Storage) - config, err := sc.getRevocationConfig() + + config, err := b.CrlBuilder().getConfigWithForcedUpdate(sc) if err != nil { - return nil, err + return nil, fmt.Errorf("failed fetching CRL config: %w", err) } return genResponseFromCrlConfig(config), nil @@ -272,14 +252,14 @@ func (b *backend) pathCRLRead(ctx context.Context, req *logical.Request, _ *fram func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { sc := b.makeStorageContext(ctx, req.Storage) - config, err := sc.getRevocationConfig() + config, err := b.CrlBuilder().getConfigWithForcedUpdate(sc) if err != nil { return nil, err } if expiryRaw, ok := d.GetOk("expiry"); ok { expiry := expiryRaw.(string) - _, err := time.ParseDuration(expiry) + _, err := parseutil.ParseDurationSecond(expiry) if err != nil { return logical.ErrorResponse(fmt.Sprintf("given expiry could not be decoded: %s", err)), nil } @@ -297,7 +277,7 @@ func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *fra if expiryRaw, ok := d.GetOk("ocsp_expiry"); ok { expiry := expiryRaw.(string) - duration, err := time.ParseDuration(expiry) + duration, err := parseutil.ParseDurationSecond(expiry) if err != nil { return logical.ErrorResponse(fmt.Sprintf("given ocsp_expiry could not be decoded: %s", err)), nil } @@ -314,7 +294,7 @@ func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *fra if autoRebuildGracePeriodRaw, ok := d.GetOk("auto_rebuild_grace_period"); ok { autoRebuildGracePeriod := autoRebuildGracePeriodRaw.(string) - if _, err := time.ParseDuration(autoRebuildGracePeriod); err != nil { + if _, err := parseutil.ParseDurationSecond(autoRebuildGracePeriod); err != nil { return logical.ErrorResponse(fmt.Sprintf("given auto_rebuild_grace_period could not be decoded: %s", err)), nil } config.AutoRebuildGracePeriod = autoRebuildGracePeriod @@ -327,7 +307,7 @@ func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *fra if deltaRebuildIntervalRaw, ok := d.GetOk("delta_rebuild_interval"); ok { deltaRebuildInterval := deltaRebuildIntervalRaw.(string) - if _, err := time.ParseDuration(deltaRebuildInterval); err != nil { + if _, err := parseutil.ParseDurationSecond(deltaRebuildInterval); err != nil { return logical.ErrorResponse(fmt.Sprintf("given delta_rebuild_interval could not be decoded: %s", err)), nil } config.DeltaRebuildInterval = deltaRebuildInterval @@ -350,16 +330,16 @@ func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *fra return logical.ErrorResponse("unified_crl_on_existing_paths cannot be enabled if unified_crl is disabled"), nil } - expiry, _ := time.ParseDuration(config.Expiry) + expiry, _ := parseutil.ParseDurationSecond(config.Expiry) if config.AutoRebuild { - gracePeriod, _ := time.ParseDuration(config.AutoRebuildGracePeriod) + gracePeriod, _ := parseutil.ParseDurationSecond(config.AutoRebuildGracePeriod) if gracePeriod >= expiry { return logical.ErrorResponse(fmt.Sprintf("CRL auto-rebuilding grace period (%v) must be strictly shorter than CRL expiry (%v) value when auto-rebuilding of CRLs is enabled", config.AutoRebuildGracePeriod, config.Expiry)), nil } } if config.EnableDelta { - deltaRebuildInterval, _ := time.ParseDuration(config.DeltaRebuildInterval) + deltaRebuildInterval, _ := parseutil.ParseDurationSecond(config.DeltaRebuildInterval) if deltaRebuildInterval >= expiry { return logical.ErrorResponse(fmt.Sprintf("CRL delta rebuild window (%v) must be strictly shorter than CRL expiry (%v) value when delta CRLs are enabled", config.DeltaRebuildInterval, config.Expiry)), nil } @@ -397,27 +377,21 @@ func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *fra return logical.ErrorResponse("unified_crl=true requires auto_rebuild=true, as unified CRLs cannot be rebuilt on every revocation."), nil } - entry, err := logical.StorageEntryJSON("config/crl", config) - if err != nil { - return nil, err - } - err = req.Storage.Put(ctx, entry) - if err != nil { - return nil, err + if _, err := b.CrlBuilder().writeConfig(sc, config); err != nil { + return nil, fmt.Errorf("failed persisting CRL config: %w", err) } - b.crlBuilder.markConfigDirty() - b.crlBuilder.reloadConfigIfRequired(sc) + resp := genResponseFromCrlConfig(config) // Note this only affects/happens on the main cluster node, if you need to // notify something based on a configuration change on all server types - // have a look at crlBuilder::reloadConfigIfRequired + // have a look at CrlBuilder::reloadConfigIfRequired if oldDisable != config.Disable || (oldAutoRebuild && !config.AutoRebuild) || (oldEnableDelta != config.EnableDelta) || (oldUnifiedCRL != config.UnifiedCRL) { // It wasn't disabled but now it is (or equivalently, we were set to // auto-rebuild and we aren't now or equivalently, we changed our // mind about delta CRLs and need a new complete one or equivalently, // we changed our mind about unified CRLs), rotate the CRLs. - crlErr := b.crlBuilder.rebuild(sc, true) + warnings, crlErr := b.CrlBuilder().Rebuild(sc, true) if crlErr != nil { switch crlErr.(type) { case errutil.UserError: @@ -426,12 +400,15 @@ func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *fra return nil, fmt.Errorf("error encountered during CRL building: %w", crlErr) } } + for index, warning := range warnings { + resp.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } } - return genResponseFromCrlConfig(config), nil + return resp, nil } -func genResponseFromCrlConfig(config *crlConfig) *logical.Response { +func genResponseFromCrlConfig(config *pki_backend.CrlConfig) *logical.Response { return &logical.Response{ Data: map[string]interface{}{ "expiry": config.Expiry, diff --git a/builtin/logical/pki/path_config_urls.go b/builtin/logical/pki/path_config_urls.go index f6c15c742f4a..c79102b0350a 100644 --- a/builtin/logical/pki/path_config_urls.go +++ b/builtin/logical/pki/path_config_urls.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -7,9 +7,8 @@ import ( "context" "fmt" "net/http" - "strings" - "github.com/asaskevich/govalidator" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) @@ -17,6 +16,11 @@ import ( func pathConfigURLs(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/urls", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + }, + Fields: map[string]*framework.FieldSchema{ "issuing_certificates": { Type: framework.TypeCommaStringSlice, @@ -51,6 +55,10 @@ to be set on all PR secondary clusters.`, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "urls", + }, Callback: b.pathWriteURL, Responses: map[int][]framework.Response{ http.StatusOK: {{ @@ -86,6 +94,9 @@ set on all PR Secondary clusters.`, }, logical.ReadOperation: &framework.PathOperation{ Callback: b.pathReadURL, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "urls-configuration", + }, Responses: map[int][]framework.Response{ http.StatusOK: {{ Description: "OK", @@ -128,23 +139,13 @@ set on all PR Secondary clusters.`, } } -func validateURLs(urls []string) string { - for _, curr := range urls { - if !govalidator.IsURL(curr) || strings.Contains(curr, "{{issuer_id}}") || strings.Contains(curr, "{{cluster_path}}") || strings.Contains(curr, "{{cluster_aia_path}}") { - return curr - } - } - - return "" -} - -func getGlobalAIAURLs(ctx context.Context, storage logical.Storage) (*aiaConfigEntry, error) { +func getGlobalAIAURLs(ctx context.Context, storage logical.Storage) (*issuing.AiaConfigEntry, error) { entry, err := storage.Get(ctx, "urls") if err != nil { return nil, err } - entries := &aiaConfigEntry{ + entries := &issuing.AiaConfigEntry{ IssuingCertificates: []string{}, CRLDistributionPoints: []string{}, OCSPServers: []string{}, @@ -162,7 +163,7 @@ func getGlobalAIAURLs(ctx context.Context, storage logical.Storage) (*aiaConfigE return entries, nil } -func writeURLs(ctx context.Context, storage logical.Storage, entries *aiaConfigEntry) error { +func writeURLs(ctx context.Context, storage logical.Storage, entries *issuing.AiaConfigEntry) error { entry, err := logical.StorageEntryJSON("urls", entries) if err != nil { return err @@ -225,7 +226,7 @@ func (b *backend) pathWriteURL(ctx context.Context, req *logical.Request, data * }, } - if entries.EnableTemplating && !b.useLegacyBundleCaStorage() { + if entries.EnableTemplating && !b.UseLegacyBundleCaStorage() { sc := b.makeStorageContext(ctx, req.Storage) issuers, err := sc.listIssuers() if err != nil { @@ -238,23 +239,23 @@ func (b *backend) pathWriteURL(ctx context.Context, req *logical.Request, data * return nil, fmt.Errorf("unable to read issuer to validate templated URIs: %w", err) } - _, err = entries.toURLEntries(sc, issuer.ID) + _, err = ToURLEntries(sc, issuer.ID, entries) if err != nil { resp.AddWarning(fmt.Sprintf("issuance may fail: %v\n\nConsider setting the cluster-local address if it is not already set.", err)) } } } else if !entries.EnableTemplating { - if badURL := validateURLs(entries.IssuingCertificates); badURL != "" { + if badURL := issuing.ValidateURLs(entries.IssuingCertificates); badURL != "" { return logical.ErrorResponse(fmt.Sprintf( "invalid URL found in Authority Information Access (AIA) parameter issuing_certificates: %s", badURL)), nil } - if badURL := validateURLs(entries.CRLDistributionPoints); badURL != "" { + if badURL := issuing.ValidateURLs(entries.CRLDistributionPoints); badURL != "" { return logical.ErrorResponse(fmt.Sprintf( "invalid URL found in Authority Information Access (AIA) parameter crl_distribution_points: %s", badURL)), nil } - if badURL := validateURLs(entries.OCSPServers); badURL != "" { + if badURL := issuing.ValidateURLs(entries.OCSPServers); badURL != "" { return logical.ErrorResponse(fmt.Sprintf( "invalid URL found in Authority Information Access (AIA) parameter ocsp_servers: %s", badURL)), nil } diff --git a/builtin/logical/pki/path_fetch.go b/builtin/logical/pki/path_fetch.go index 4f214ee7f275..c18c73a1cebb 100644 --- a/builtin/logical/pki/path_fetch.go +++ b/builtin/logical/pki/path_fetch.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -11,8 +11,9 @@ import ( "strings" "time" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/revocation" "github.com/hashicorp/vault/helper/constants" - "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" @@ -28,7 +29,7 @@ var pathFetchReadSchema = map[int][]framework.Response{ Required: false, }, "revocation_time": { - Type: framework.TypeString, + Type: framework.TypeInt64, Description: `Revocation time`, Required: false, }, @@ -43,7 +44,7 @@ var pathFetchReadSchema = map[int][]framework.Response{ Required: false, }, "ca_chain": { - Type: framework.TypeStringSlice, + Type: framework.TypeString, Description: `Issuing CA Chain`, Required: false, }, @@ -56,6 +57,11 @@ func pathFetchCA(b *backend) *framework.Path { return &framework.Path{ Pattern: `ca(/pem)?`, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "ca-der|ca-pem", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathFetchRead, @@ -73,6 +79,11 @@ func pathFetchCAChain(b *backend) *framework.Path { return &framework.Path{ Pattern: `(cert/)?ca_chain`, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "ca-chain-pem|cert-ca-chain", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathFetchRead, @@ -90,6 +101,11 @@ func pathFetchCRL(b *backend) *framework.Path { return &framework.Path{ Pattern: `crl(/pem|/delta(/pem)?)?`, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "crl-der|crl-pem|crl-delta|crl-delta-pem", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathFetchRead, @@ -107,6 +123,11 @@ func pathFetchUnifiedCRL(b *backend) *framework.Path { return &framework.Path{ Pattern: `unified-crl(/pem|/delta(/pem)?)?`, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "unified-crl-der|unified-crl-pem|unified-crl-delta|unified-crl-delta-pem", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathFetchRead, @@ -122,6 +143,12 @@ func pathFetchUnifiedCRL(b *backend) *framework.Path { func pathFetchValidRaw(b *backend) *framework.Path { return &framework.Path{ Pattern: `cert/(?P[0-9A-Fa-f-:]+)/raw(/pem)?`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "cert-raw-der|cert-raw-pem", + }, + Fields: map[string]*framework.FieldSchema{ "serial": { Type: framework.TypeString, @@ -147,6 +174,12 @@ hyphen-separated octal`, func pathFetchValid(b *backend) *framework.Path { return &framework.Path{ Pattern: `cert/(?P[0-9A-Fa-f-:]+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "cert", + }, + Fields: map[string]*framework.FieldSchema{ "serial": { Type: framework.TypeString, @@ -177,6 +210,11 @@ func pathFetchCRLViaCertPath(b *backend) *framework.Path { return &framework.Path{ Pattern: pattern, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "cert-crl|cert-delta-crl|cert-unified-crl|cert-unified-delta-crl", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathFetchRead, @@ -194,21 +232,14 @@ func pathFetchListCerts(b *backend) *framework.Path { return &framework.Path{ Pattern: "certs/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "certs", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathFetchCertList, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "keys": { - Type: framework.TypeStringSlice, - Description: `A list of keys`, - Required: true, - }, - }, - }}, - }, }, }, @@ -218,7 +249,7 @@ func pathFetchListCerts(b *backend) *framework.Path { } func (b *backend) pathFetchCertList(ctx context.Context, req *logical.Request, _ *framework.FieldData) (response *logical.Response, retErr error) { - entries, err := req.Storage.List(ctx, "certs/") + entries, err := req.Storage.List(ctx, issuing.PathCerts) if err != nil { return nil, err } @@ -276,7 +307,7 @@ func (b *backend) pathFetchRead(ctx context.Context, req *logical.Request, data contentType = "application/pkix-cert" } case req.Path == "crl" || req.Path == "crl/pem" || req.Path == "crl/delta" || req.Path == "crl/delta/pem" || req.Path == "cert/crl" || req.Path == "cert/crl/raw" || req.Path == "cert/crl/raw/pem" || req.Path == "cert/delta-crl" || req.Path == "cert/delta-crl/raw" || req.Path == "cert/delta-crl/raw/pem" || req.Path == "unified-crl" || req.Path == "unified-crl/pem" || req.Path == "unified-crl/delta" || req.Path == "unified-crl/delta/pem" || req.Path == "cert/unified-crl" || req.Path == "cert/unified-crl/raw" || req.Path == "cert/unified-crl/raw/pem" || req.Path == "cert/unified-delta-crl" || req.Path == "cert/unified-delta-crl/raw" || req.Path == "cert/unified-delta-crl/raw/pem": - config, err := b.crlBuilder.getConfigWithUpdate(sc) + config, err := b.CrlBuilder().GetConfigWithUpdate(sc) if err != nil { retErr = err goto reply @@ -330,7 +361,9 @@ func (b *backend) pathFetchRead(ctx context.Context, req *logical.Request, data contentType = "application/pem-certificate-chain" } default: - serial = data.Get("serial").(string) + if ser, ok := data.GetOk("serial"); ok { + serial = ser.(string) + } pemType = "CERTIFICATE" } if len(serial) == 0 { @@ -340,7 +373,7 @@ func (b *backend) pathFetchRead(ctx context.Context, req *logical.Request, data // Prefer fetchCAInfo to fetchCertBySerial for CA certificates. if serial == "ca_chain" || serial == "ca" { - caInfo, err := sc.fetchCAInfo(defaultRef, ReadOnlyUsage) + caInfo, err := sc.fetchCAInfo(defaultRef, issuing.ReadOnlyUsage) if err != nil { switch err.(type) { case errutil.UserError: @@ -422,7 +455,7 @@ func (b *backend) pathFetchRead(ctx context.Context, req *logical.Request, data } } if revokedEntry != nil { - var revInfo revocationInfo + var revInfo revocation.RevocationInfo err := revokedEntry.DecodeJSON(&revInfo) if err != nil { return logical.ErrorResponse(fmt.Sprintf("Error decoding revocation entry for serial %s: %s", serial, err)), nil diff --git a/builtin/logical/pki/path_fetch_issuers.go b/builtin/logical/pki/path_fetch_issuers.go index 400a3eed99e7..6e6cf42010aa 100644 --- a/builtin/logical/pki/path_fetch_issuers.go +++ b/builtin/logical/pki/path_fetch_issuers.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -12,6 +12,7 @@ import ( "strings" "time" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" @@ -21,6 +22,11 @@ func pathListIssuers(b *backend) *framework.Path { return &framework.Path{ Pattern: "issuers/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "issuers", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathListIssuersHandler, @@ -50,7 +56,7 @@ func pathListIssuers(b *backend) *framework.Path { } func (b *backend) pathListIssuersHandler(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not list issuers until migration has completed"), nil } @@ -79,8 +85,28 @@ func (b *backend) pathListIssuersHandler(ctx context.Context, req *logical.Reque responseKeys = append(responseKeys, string(identifier)) responseInfo[string(identifier)] = map[string]interface{}{ - "issuer_name": issuer.Name, - "is_default": identifier == config.DefaultIssuerId, + "issuer_name": issuer.Name, + "is_default": identifier == config.DefaultIssuerId, + "serial_number": issuer.SerialNumber, + + // While nominally this could be considered sensitive information + // to be returned on an unauthed endpoint, there's two mitigating + // circumstances: + // + // 1. Key IDs are purely random numbers generated by Vault and + // have no relationship to the actual key material. + // 2. They also don't _do_ anything by themselves. There is no + // modification of KeyIDs allowed, you need to be authenticated + // to Vault to understand what they mean, you _essentially_ + // get the same information from looking at/comparing various + // cert's SubjectPublicKeyInfo field, and there's the `default` + // reference that anyone with issuer generation capabilities + // can use even if they can't access any of the other /key/* + // endpoints. + // + // So all in all, exposing this value is not a security risk and + // is otherwise beneficial for the UI, hence its inclusion. + "key_id": issuer.KeyID, } } @@ -96,11 +122,28 @@ their identifier and their name (if set). ) func pathGetIssuer(b *backend) *framework.Path { - pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "(/der|/pem|/json)?" - return buildPathGetIssuer(b, pattern) + pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "$" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "issuer", + } + + return buildPathIssuer(b, pattern, displayAttrs) +} + +func pathGetUnauthedIssuer(b *backend) *framework.Path { + pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/(json|der|pem)$" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "issuer-json|issuer-der|issuer-pem", + } + + return buildPathGetIssuer(b, pattern, displayAttrs) } -func buildPathGetIssuer(b *backend, pattern string) *framework.Path { +func buildPathIssuer(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { fields := map[string]*framework.FieldSchema{} fields = addIssuerRefNameFields(fields) @@ -207,7 +250,7 @@ to be set on all PR secondary clusters.`, Required: false, }, "usage": { - Type: framework.TypeStringSlice, + Type: framework.TypeString, Description: `Usage`, Required: false, }, @@ -241,7 +284,12 @@ to be set on all PR secondary clusters.`, }, "ocsp_servers": { Type: framework.TypeStringSlice, - Description: `OSCP Servers`, + Description: `OCSP Servers`, + Required: false, + }, + "enable_aia_url_templating": { + Type: framework.TypeBool, + Description: `Whether or not templating is enabled for AIA fields`, Required: false, }, }, @@ -250,8 +298,9 @@ to be set on all PR secondary clusters.`, return &framework.Path{ // Returns a JSON entry. - Pattern: pattern, - Fields: fields, + Pattern: pattern, + DisplayAttrs: displayAttrs, + Fields: fields, Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ @@ -291,17 +340,70 @@ to be set on all PR secondary clusters.`, } } +func buildPathGetIssuer(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { + fields := map[string]*framework.FieldSchema{} + fields = addIssuerRefField(fields) + + getIssuerSchema := map[int][]framework.Response{ + http.StatusNotModified: {{ + Description: "Not Modified", + }}, + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "issuer_id": { + Type: framework.TypeString, + Description: `Issuer Id`, + Required: true, + }, + "issuer_name": { + Type: framework.TypeString, + Description: `Issuer Name`, + Required: true, + }, + "certificate": { + Type: framework.TypeString, + Description: `Certificate`, + Required: true, + }, + "ca_chain": { + Type: framework.TypeStringSlice, + Description: `CA Chain`, + Required: true, + }, + }, + }}, + } + + return &framework.Path{ + // Returns a JSON entry. + Pattern: pattern, + DisplayAttrs: displayAttrs, + Fields: fields, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathGetIssuer, + Responses: getIssuerSchema, + }, + }, + + HelpSynopsis: pathGetIssuerHelpSyn, + HelpDescription: pathGetIssuerHelpDesc, + } +} + func (b *backend) pathGetIssuer(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { // Handle raw issuers first. if strings.HasSuffix(req.Path, "/der") || strings.HasSuffix(req.Path, "/pem") || strings.HasSuffix(req.Path, "/json") { return b.pathGetRawIssuer(ctx, req, data) } - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not get issuer until migration has completed"), nil } - issuerName := getIssuerRef(data) + issuerName := GetIssuerRef(data) if len(issuerName) == 0 { return logical.ErrorResponse("missing issuer reference"), nil } @@ -323,7 +425,7 @@ func (b *backend) pathGetIssuer(ctx context.Context, req *logical.Request, data return respondReadIssuer(issuer) } -func respondReadIssuer(issuer *issuerEntry) (*logical.Response, error) { +func respondReadIssuer(issuer *issuing.IssuerEntry) (*logical.Response, error) { var respManualChain []string for _, entity := range issuer.ManualChain { respManualChain = append(respManualChain, string(entity)) @@ -362,6 +464,7 @@ func respondReadIssuer(issuer *issuerEntry) (*logical.Response, error) { data["issuing_certificates"] = issuer.AIAURIs.IssuingCertificates data["crl_distribution_points"] = issuer.AIAURIs.CRLDistributionPoints data["ocsp_servers"] = issuer.AIAURIs.OCSPServers + data["enable_aia_url_templating"] = issuer.AIAURIs.EnableTemplating } response := &logical.Response{ @@ -381,11 +484,11 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da b.issuersLock.Lock() defer b.issuersLock.Unlock() - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not update issuer until migration has completed"), nil } - issuerName := getIssuerRef(data) + issuerName := GetIssuerRef(data) if len(issuerName) == 0 { return logical.ErrorResponse("missing issuer reference"), nil } @@ -435,9 +538,9 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da } rawUsage := data.Get("usage").([]string) - newUsage, err := NewIssuerUsageFromNames(rawUsage) + newUsage, err := issuing.NewIssuerUsageFromNames(rawUsage) if err != nil { - return logical.ErrorResponse(fmt.Sprintf("Unable to parse specified usages: %v - valid values are %v", rawUsage, AllIssuerUsages.Names())), nil + return logical.ErrorResponse(fmt.Sprintf("Unable to parse specified usages: %v - valid values are %v", rawUsage, issuing.AllIssuerUsages.Names())), nil } // Revocation signature algorithm changes @@ -460,15 +563,15 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da // AIA access changes enableTemplating := data.Get("enable_aia_url_templating").(bool) issuerCertificates := data.Get("issuing_certificates").([]string) - if badURL := validateURLs(issuerCertificates); !enableTemplating && badURL != "" { + if badURL := issuing.ValidateURLs(issuerCertificates); !enableTemplating && badURL != "" { return logical.ErrorResponse(fmt.Sprintf("invalid URL found in Authority Information Access (AIA) parameter issuing_certificates: %s", badURL)), nil } crlDistributionPoints := data.Get("crl_distribution_points").([]string) - if badURL := validateURLs(crlDistributionPoints); !enableTemplating && badURL != "" { + if badURL := issuing.ValidateURLs(crlDistributionPoints); !enableTemplating && badURL != "" { return logical.ErrorResponse(fmt.Sprintf("invalid URL found in Authority Information Access (AIA) parameter crl_distribution_points: %s", badURL)), nil } ocspServers := data.Get("ocsp_servers").([]string) - if badURL := validateURLs(ocspServers); !enableTemplating && badURL != "" { + if badURL := issuing.ValidateURLs(ocspServers); !enableTemplating && badURL != "" { return logical.ErrorResponse(fmt.Sprintf("invalid URL found in Authority Information Access (AIA) parameter ocsp_servers: %s", badURL)), nil } @@ -480,8 +583,8 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da issuer.Name = newName issuer.LastModified = time.Now().UTC() // See note in updateDefaultIssuerId about why this is necessary. - b.crlBuilder.invalidateCRLBuildTime() - b.crlBuilder.flushCRLBuildTimeInvalidation(sc) + b.CrlBuilder().invalidateCRLBuildTime() + b.CrlBuilder().flushCRLBuildTimeInvalidation(sc) modified = true } @@ -491,7 +594,7 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da } if newUsage != issuer.Usage { - if issuer.Revoked && newUsage.HasUsage(IssuanceUsage) { + if issuer.Revoked && newUsage.HasUsage(issuing.IssuanceUsage) { // Forbid allowing cert signing on its usage. return logical.ErrorResponse("This issuer was revoked; unable to modify its usage to include certificate signing again. Reissue this certificate (preferably with a new key) and modify that entry instead."), nil } @@ -502,7 +605,7 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da if err != nil { return nil, fmt.Errorf("unable to parse issuer's certificate: %w", err) } - if (cert.KeyUsage&x509.KeyUsageCRLSign) == 0 && newUsage.HasUsage(CRLSigningUsage) { + if (cert.KeyUsage&x509.KeyUsageCRLSign) == 0 && newUsage.HasUsage(issuing.CRLSigningUsage) { return logical.ErrorResponse("This issuer's underlying certificate lacks the CRLSign KeyUsage value; unable to set CRLSigningUsage on this issuer as a result."), nil } @@ -516,7 +619,7 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da } if issuer.AIAURIs == nil && (len(issuerCertificates) > 0 || len(crlDistributionPoints) > 0 || len(ocspServers) > 0) { - issuer.AIAURIs = &aiaConfigEntry{} + issuer.AIAURIs = &issuing.AiaConfigEntry{} } if issuer.AIAURIs != nil { // Associative mapping from data source to destination on the @@ -563,7 +666,7 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da // it'll write it out to disk for us. We'd hate to then modify the issuer // again and write it a second time. var updateChain bool - var constructedChain []issuerID + var constructedChain []issuing.IssuerID for index, newPathRef := range newPath { // Allow self for the first entry. if index == 0 && newPathRef == "self" { @@ -613,7 +716,7 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da addWarningOnDereferencing(sc, oldName, response) } if issuer.AIAURIs != nil && issuer.AIAURIs.EnableTemplating { - _, aiaErr := issuer.AIAURIs.toURLEntries(sc, issuer.ID) + _, aiaErr := ToURLEntries(sc, issuer.ID, issuer.AIAURIs) if aiaErr != nil { response.AddWarning(fmt.Sprintf("issuance may fail: %v\n\nConsider setting the cluster-local address if it is not already set.", aiaErr)) } @@ -628,12 +731,12 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat b.issuersLock.Lock() defer b.issuersLock.Unlock() - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not patch issuer until migration has completed"), nil } // First we fetch the issuer - issuerName := getIssuerRef(data) + issuerName := GetIssuerRef(data) if len(issuerName) == 0 { return logical.ErrorResponse("missing issuer reference"), nil } @@ -680,14 +783,14 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat issuer.Name = newName issuer.LastModified = time.Now().UTC() // See note in updateDefaultIssuerId about why this is necessary. - b.crlBuilder.invalidateCRLBuildTime() - b.crlBuilder.flushCRLBuildTimeInvalidation(sc) + b.CrlBuilder().invalidateCRLBuildTime() + b.CrlBuilder().flushCRLBuildTimeInvalidation(sc) modified = true } } // Leaf Not After Changes - rawLeafBehaviorData, ok := data.GetOk("leaf_not_after_behaivor") + rawLeafBehaviorData, ok := data.GetOk("leaf_not_after_behavior") if ok { rawLeafBehavior := rawLeafBehaviorData.(string) var newLeafBehavior certutil.NotAfterBehavior @@ -711,12 +814,12 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat rawUsageData, ok := data.GetOk("usage") if ok { rawUsage := rawUsageData.([]string) - newUsage, err := NewIssuerUsageFromNames(rawUsage) + newUsage, err := issuing.NewIssuerUsageFromNames(rawUsage) if err != nil { - return logical.ErrorResponse(fmt.Sprintf("Unable to parse specified usages: %v - valid values are %v", rawUsage, AllIssuerUsages.Names())), nil + return logical.ErrorResponse(fmt.Sprintf("Unable to parse specified usages: %v - valid values are %v", rawUsage, issuing.AllIssuerUsages.Names())), nil } if newUsage != issuer.Usage { - if issuer.Revoked && newUsage.HasUsage(IssuanceUsage) { + if issuer.Revoked && newUsage.HasUsage(issuing.IssuanceUsage) { // Forbid allowing cert signing on its usage. return logical.ErrorResponse("This issuer was revoked; unable to modify its usage to include certificate signing again. Reissue this certificate (preferably with a new key) and modify that entry instead."), nil } @@ -725,7 +828,7 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat if err != nil { return nil, fmt.Errorf("unable to parse issuer's certificate: %w", err) } - if (cert.KeyUsage&x509.KeyUsageCRLSign) == 0 && newUsage.HasUsage(CRLSigningUsage) { + if (cert.KeyUsage&x509.KeyUsageCRLSign) == 0 && newUsage.HasUsage(issuing.CRLSigningUsage) { return logical.ErrorResponse("This issuer's underlying certificate lacks the CRLSign KeyUsage value; unable to set CRLSigningUsage on this issuer as a result."), nil } @@ -762,7 +865,7 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat // AIA access changes. if issuer.AIAURIs == nil { - issuer.AIAURIs = &aiaConfigEntry{} + issuer.AIAURIs = &issuing.AiaConfigEntry{} } // Associative mapping from data source to destination on the @@ -801,7 +904,7 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat rawURLsValue, ok := data.GetOk(pair.Source) if ok { urlsValue := rawURLsValue.([]string) - if badURL := validateURLs(urlsValue); !issuer.AIAURIs.EnableTemplating && badURL != "" { + if badURL := issuing.ValidateURLs(urlsValue); !issuer.AIAURIs.EnableTemplating && badURL != "" { return logical.ErrorResponse(fmt.Sprintf("invalid URL found in Authority Information Access (AIA) parameter %v: %s", pair.Source, badURL)), nil } @@ -823,7 +926,7 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat if ok { newPath := newPathData.([]string) var updateChain bool - var constructedChain []issuerID + var constructedChain []issuing.IssuerID for index, newPathRef := range newPath { // Allow self for the first entry. if index == 0 && newPathRef == "self" { @@ -874,7 +977,7 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat addWarningOnDereferencing(sc, oldName, response) } if issuer.AIAURIs != nil && issuer.AIAURIs.EnableTemplating { - _, aiaErr := issuer.AIAURIs.toURLEntries(sc, issuer.ID) + _, aiaErr := ToURLEntries(sc, issuer.ID, issuer.AIAURIs) if aiaErr != nil { response.AddWarning(fmt.Sprintf("issuance may fail: %v\n\nConsider setting the cluster-local address if it is not already set.", aiaErr)) } @@ -884,11 +987,11 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat } func (b *backend) pathGetRawIssuer(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not get issuer until migration has completed"), nil } - issuerName := getIssuerRef(data) + issuerName := GetIssuerRef(data) if len(issuerName) == 0 { return logical.ErrorResponse("missing issuer reference"), nil } @@ -967,11 +1070,11 @@ func (b *backend) pathDeleteIssuer(ctx context.Context, req *logical.Request, da b.issuersLock.Lock() defer b.issuersLock.Unlock() - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not delete issuer until migration has completed"), nil } - issuerName := getIssuerRef(data) + issuerName := GetIssuerRef(data) if len(issuerName) == 0 { return logical.ErrorResponse("missing issuer reference"), nil } @@ -980,7 +1083,7 @@ func (b *backend) pathDeleteIssuer(ctx context.Context, req *logical.Request, da ref, err := sc.resolveIssuerReference(issuerName) if err != nil { // Return as if we deleted it if we fail to lookup the issuer. - if ref == IssuerRefNotFound { + if ref == issuing.IssuerRefNotFound { return &logical.Response{}, nil } return nil, err @@ -1015,6 +1118,18 @@ func (b *backend) pathDeleteIssuer(ctx context.Context, req *logical.Request, da response.AddWarning(msg) } + // Finally, we need to rebuild both the local and the unified CRLs. This + // will free up any now unnecessary space used in both the CRL config + // and for the underlying CRL. + warnings, err := b.CrlBuilder().Rebuild(sc, true) + if err != nil { + return nil, err + } + + for index, warning := range warnings { + response.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } + return response, nil } @@ -1053,22 +1168,35 @@ the certificate. func pathGetIssuerCRL(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/crl(/pem|/der|/delta(/pem|/der)?)?" - return buildPathGetIssuerCRL(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationSuffix: "crl|crl-pem|crl-der|crl-delta|crl-delta-pem|crl-delta-der", + } + + return buildPathGetIssuerCRL(b, pattern, displayAttrs) } func pathGetIssuerUnifiedCRL(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/unified-crl(/pem|/der|/delta(/pem|/der)?)?" - return buildPathGetIssuerCRL(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationSuffix: "unified-crl|unified-crl-pem|unified-crl-der|unified-crl-delta|unified-crl-delta-pem|unified-crl-delta-der", + } + + return buildPathGetIssuerCRL(b, pattern, displayAttrs) } -func buildPathGetIssuerCRL(b *backend, pattern string) *framework.Path { +func buildPathGetIssuerCRL(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { fields := map[string]*framework.FieldSchema{} fields = addIssuerRefNameFields(fields) return &framework.Path{ // Returns raw values. - Pattern: pattern, - Fields: fields, + Pattern: pattern, + DisplayAttrs: displayAttrs, + Fields: fields, Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ @@ -1093,19 +1221,30 @@ func buildPathGetIssuerCRL(b *backend, pattern string) *framework.Path { } func (b *backend) pathGetIssuerCRL(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not get issuer's CRL until migration has completed"), nil } - issuerName := getIssuerRef(data) + issuerName := GetIssuerRef(data) if len(issuerName) == 0 { return logical.ErrorResponse("missing issuer reference"), nil } sc := b.makeStorageContext(ctx, req.Storage) - if err := b.crlBuilder.rebuildIfForced(sc); err != nil { + warnings, err := b.CrlBuilder().RebuildIfForced(sc) + if err != nil { return nil, err } + if len(warnings) > 0 { + // Since this is a fetch of a specific CRL, this most likely comes + // from an automated system of some sort; these warnings would be + // ignored and likely meaningless. Log them instead. + msg := "During rebuild of CRL on issuer CRL fetch, got the following warnings:" + for index, warning := range warnings { + msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) + } + b.Logger().Warn(msg) + } var certificate []byte var contentType string @@ -1132,7 +1271,7 @@ func (b *backend) pathGetIssuerCRL(ctx context.Context, req *logical.Request, da return response, nil } - crlPath, err := sc.resolveIssuerCRLPath(issuerName, isUnified) + crlPath, err := issuing.ResolveIssuerCRLPath(sc.GetContext(), sc.GetStorage(), sc.UseLegacyBundleCaStorage(), issuerName, isUnified) if err != nil { return nil, err } diff --git a/builtin/logical/pki/path_fetch_keys.go b/builtin/logical/pki/path_fetch_keys.go index af88bc595e41..b0d1d9d680a6 100644 --- a/builtin/logical/pki/path_fetch_keys.go +++ b/builtin/logical/pki/path_fetch_keys.go @@ -1,16 +1,19 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki import ( "context" + "crypto" "fmt" "net/http" - "github.com/hashicorp/vault/sdk/helper/errutil" - + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/managed_key" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -18,6 +21,11 @@ func pathListKeys(b *backend) *framework.Path { return &framework.Path{ Pattern: "keys/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "keys", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathListKeysHandler, @@ -55,7 +63,7 @@ their identifier and their name (if set).` ) func (b *backend) pathListKeysHandler(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not list keys until migration has completed"), nil } @@ -91,12 +99,19 @@ func (b *backend) pathListKeysHandler(ctx context.Context, req *logical.Request, func pathKey(b *backend) *framework.Path { pattern := "key/" + framework.GenericNameRegex(keyRefParam) - return buildPathKey(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "key", + } + + return buildPathKey(b, pattern, displayAttrs) } -func buildPathKey(b *backend, pattern string) *framework.Path { +func buildPathKey(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { return &framework.Path{ - Pattern: pattern, + Pattern: pattern, + DisplayAttrs: displayAttrs, Fields: map[string]*framework.FieldSchema{ keyRefParam: { @@ -132,6 +147,11 @@ func buildPathKey(b *backend, pattern string) *framework.Path { Description: `Key Type`, Required: true, }, + "subject_key_id": { + Type: framework.TypeString, + Description: `RFC 5280 Subject Key Identifier of the public counterpart`, + Required: false, + }, "managed_key_id": { Type: framework.TypeString, Description: `Managed Key Id`, @@ -206,7 +226,7 @@ the certificate. ) func (b *backend) pathGetKeyHandler(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not get keys until migration has completed"), nil } @@ -235,23 +255,40 @@ func (b *backend) pathGetKeyHandler(ctx context.Context, req *logical.Request, d keyTypeParam: string(key.PrivateKeyType), } - if key.isManagedPrivateKey() { - managedKeyUUID, err := key.getManagedKeyUUID() + var pkForSkid crypto.PublicKey + if key.IsManagedPrivateKey() { + managedKeyUUID, err := issuing.GetManagedKeyUUID(key) if err != nil { return nil, errutil.InternalError{Err: fmt.Sprintf("failed extracting managed key uuid from key id %s (%s): %v", key.ID, key.Name, err)} } - keyInfo, err := getManagedKeyInfo(ctx, b, managedKeyUUID) + keyInfo, err := managed_key.GetManagedKeyInfo(ctx, b, managedKeyUUID) if err != nil { return nil, errutil.InternalError{Err: fmt.Sprintf("failed fetching managed key info from key id %s (%s): %v", key.ID, key.Name, err)} } + pkForSkid, err = managed_key.GetManagedKeyPublicKey(sc.Context, sc.GetPkiManagedView(), managedKeyUUID) + if err != nil { + return nil, err + } + // To remain consistent across the api responses (mainly generate root/intermediate calls), return the actual // type of key, not that it is a managed key. - respData[keyTypeParam] = string(keyInfo.keyType) - respData[managedKeyIdArg] = string(keyInfo.uuid) - respData[managedKeyNameArg] = string(keyInfo.name) + respData[keyTypeParam] = string(keyInfo.KeyType) + respData[managedKeyIdArg] = string(keyInfo.Uuid) + respData[managedKeyNameArg] = string(keyInfo.Name) + } else { + pkForSkid, err = getPublicKeyFromBytes([]byte(key.PrivateKey)) + if err != nil { + return nil, err + } + } + + skid, err := certutil.GetSubjectKeyID(pkForSkid) + if err != nil { + return nil, err } + respData[skidParam] = certutil.GetHexFormatted([]byte(skid), ":") return &logical.Response{Data: respData}, nil } @@ -262,7 +299,7 @@ func (b *backend) pathUpdateKeyHandler(ctx context.Context, req *logical.Request b.issuersLock.Lock() defer b.issuersLock.Unlock() - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not update keys until migration has completed"), nil } @@ -320,7 +357,7 @@ func (b *backend) pathDeleteKeyHandler(ctx context.Context, req *logical.Request b.issuersLock.Lock() defer b.issuersLock.Unlock() - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not delete keys until migration has completed"), nil } @@ -332,7 +369,7 @@ func (b *backend) pathDeleteKeyHandler(ctx context.Context, req *logical.Request sc := b.makeStorageContext(ctx, req.Storage) keyId, err := sc.resolveKeyReference(keyRef) if err != nil { - if keyId == KeyRefNotFound { + if keyId == issuing.KeyRefNotFound { // We failed to lookup the key, we should ignore any error here and reply as if it was deleted. return nil, nil } diff --git a/builtin/logical/pki/path_intermediate.go b/builtin/logical/pki/path_intermediate.go index 6ba1dfe23324..60742a3d8c3b 100644 --- a/builtin/logical/pki/path_intermediate.go +++ b/builtin/logical/pki/path_intermediate.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -15,13 +15,27 @@ import ( ) func pathGenerateIntermediate(b *backend) *framework.Path { - return buildPathGenerateIntermediate(b, "intermediate/generate/"+framework.GenericNameRegex("exported")) + pattern := "intermediate/generate/" + framework.GenericNameRegex("exported") + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "generate", + OperationSuffix: "intermediate", + } + + return buildPathGenerateIntermediate(b, pattern, displayAttrs) } func pathSetSignedIntermediate(b *backend) *framework.Path { ret := &framework.Path{ Pattern: "intermediate/set-signed", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "set-signed", + OperationSuffix: "intermediate", + }, + Fields: map[string]*framework.FieldSchema{ "certificate": { Type: framework.TypeString, @@ -54,6 +68,16 @@ appended to the bundle.`, Description: "Net-new issuers imported as a part of this request", Required: true, }, + "existing_keys": { + Type: framework.TypeCommaStringSlice, + Description: "Existing keys specified as part of the import bundle of this request", + Required: true, + }, + "existing_issuers": { + Type: framework.TypeCommaStringSlice, + Description: "Existing issuers specified as part of the import bundle of this request", + Required: true, + }, }, }}, }, @@ -78,7 +102,7 @@ func (b *backend) pathGenerateIntermediate(ctx context.Context, req *logical.Req var err error - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not create intermediate until migration has completed"), nil } diff --git a/builtin/logical/pki/path_issue_sign.go b/builtin/logical/pki/path_issue_sign.go index f685e9154ea7..57ae2caef766 100644 --- a/builtin/logical/pki/path_issue_sign.go +++ b/builtin/logical/pki/path_issue_sign.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -14,6 +14,7 @@ import ( "strings" "time" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/consts" @@ -23,17 +24,32 @@ import ( func pathIssue(b *backend) *framework.Path { pattern := "issue/" + framework.GenericNameRegex("role") - return buildPathIssue(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "issue", + OperationSuffix: "with-role", + } + + return buildPathIssue(b, pattern, displayAttrs) } func pathIssuerIssue(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/issue/" + framework.GenericNameRegex("role") - return buildPathIssue(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationVerb: "issue", + OperationSuffix: "with-role", + } + + return buildPathIssue(b, pattern, displayAttrs) } -func buildPathIssue(b *backend, pattern string) *framework.Path { +func buildPathIssue(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { ret := &framework.Path{ - Pattern: pattern, + Pattern: pattern, + DisplayAttrs: displayAttrs, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ @@ -60,12 +76,12 @@ func buildPathIssue(b *backend, pattern string) *framework.Path { "serial_number": { Type: framework.TypeString, Description: `Serial Number`, - Required: false, + Required: true, }, "expiration": { - Type: framework.TypeString, + Type: framework.TypeInt64, Description: `Time of expiration`, - Required: false, + Required: true, }, "private_key": { Type: framework.TypeString, @@ -93,17 +109,32 @@ func buildPathIssue(b *backend, pattern string) *framework.Path { func pathSign(b *backend) *framework.Path { pattern := "sign/" + framework.GenericNameRegex("role") - return buildPathSign(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "sign", + OperationSuffix: "with-role", + } + + return buildPathSign(b, pattern, displayAttrs) } func pathIssuerSign(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign/" + framework.GenericNameRegex("role") - return buildPathSign(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationVerb: "sign", + OperationSuffix: "with-role", + } + + return buildPathSign(b, pattern, displayAttrs) } -func buildPathSign(b *backend, pattern string) *framework.Path { +func buildPathSign(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { ret := &framework.Path{ - Pattern: pattern, + Pattern: pattern, + DisplayAttrs: displayAttrs, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ @@ -133,20 +164,10 @@ func buildPathSign(b *backend, pattern string) *framework.Path { Required: true, }, "expiration": { - Type: framework.TypeString, + Type: framework.TypeInt64, Description: `Time of expiration`, Required: true, }, - "private_key": { - Type: framework.TypeString, - Description: `Private key`, - Required: false, - }, - "private_key_type": { - Type: framework.TypeString, - Description: `Private key type`, - Required: false, - }, }, }}, }, @@ -170,18 +191,33 @@ func buildPathSign(b *backend, pattern string) *framework.Path { func pathIssuerSignVerbatim(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign-verbatim" + framework.OptionalParamRegex("role") - return buildPathIssuerSignVerbatim(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationVerb: "sign", + OperationSuffix: "verbatim|verbatim-with-role", + } + + return buildPathIssuerSignVerbatim(b, pattern, displayAttrs) } func pathSignVerbatim(b *backend) *framework.Path { pattern := "sign-verbatim" + framework.OptionalParamRegex("role") - return buildPathIssuerSignVerbatim(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "sign", + OperationSuffix: "verbatim|verbatim-with-role", + } + + return buildPathIssuerSignVerbatim(b, pattern, displayAttrs) } -func buildPathIssuerSignVerbatim(b *backend, pattern string) *framework.Path { +func buildPathIssuerSignVerbatim(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { ret := &framework.Path{ - Pattern: pattern, - Fields: map[string]*framework.FieldSchema{}, + Pattern: pattern, + DisplayAttrs: displayAttrs, + Fields: getCsrSignVerbatimSchemaFields(), Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ @@ -208,22 +244,12 @@ func buildPathIssuerSignVerbatim(b *backend, pattern string) *framework.Path { "serial_number": { Type: framework.TypeString, Description: `Serial Number`, - Required: false, + Required: true, }, "expiration": { - Type: framework.TypeString, + Type: framework.TypeInt64, Description: `Time of expiration`, - Required: false, - }, - "private_key": { - Type: framework.TypeString, - Description: `Private key`, - Required: false, - }, - "private_key_type": { - Type: framework.TypeString, - Description: `Private key type`, - Required: false, + Required: true, }, }, }}, @@ -235,61 +261,6 @@ func buildPathIssuerSignVerbatim(b *backend, pattern string) *framework.Path { HelpDescription: pathIssuerSignVerbatimHelpDesc, } - ret.Fields = addNonCACommonFields(ret.Fields) - - ret.Fields["csr"] = &framework.FieldSchema{ - Type: framework.TypeString, - Default: "", - Description: `PEM-format CSR to be signed. Values will be -taken verbatim from the CSR, except for -basic constraints.`, - } - - ret.Fields["key_usage"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Default: []string{"DigitalSignature", "KeyAgreement", "KeyEncipherment"}, - Description: `A comma-separated string or list of key usages (not extended -key usages). Valid values can be found at -https://golang.org/pkg/crypto/x509/#KeyUsage --- simply drop the "KeyUsage" part of the name. -To remove all key usages from being set, set -this value to an empty list.`, - } - - ret.Fields["ext_key_usage"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Default: []string{}, - Description: `A comma-separated string or list of extended key usages. Valid values can be found at -https://golang.org/pkg/crypto/x509/#ExtKeyUsage --- simply drop the "ExtKeyUsage" part of the name. -To remove all key usages from being set, set -this value to an empty list.`, - } - - ret.Fields["ext_key_usage_oids"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `A comma-separated string or list of extended key usage oids.`, - } - - ret.Fields["signature_bits"] = &framework.FieldSchema{ - Type: framework.TypeInt, - Default: 0, - Description: `The number of bits to use in the signature -algorithm; accepts 256 for SHA-2-256, 384 for SHA-2-384, and 512 for -SHA-2-512. Defaults to 0 to automatically detect based on key length -(SHA-2-256 for RSA keys, and matching the curve size for NIST P-Curves).`, - DisplayAttrs: &framework.DisplayAttributes{ - Value: 0, - }, - } - - ret.Fields["use_pss"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Default: false, - Description: `Whether or not to use PSS signatures when using a -RSA key-type issuer. Defaults to false.`, - } - return ret } @@ -315,7 +286,7 @@ See the API documentation for more information about required parameters. // pathIssue issues a certificate and private key from given parameters, // subject to role restrictions -func (b *backend) pathIssue(ctx context.Context, req *logical.Request, data *framework.FieldData, role *roleEntry) (*logical.Response, error) { +func (b *backend) pathIssue(ctx context.Context, req *logical.Request, data *framework.FieldData, role *issuing.RoleEntry) (*logical.Response, error) { if role.KeyType == "any" { return logical.ErrorResponse("role key type \"any\" not allowed for issuing certificates, only signing"), nil } @@ -325,66 +296,64 @@ func (b *backend) pathIssue(ctx context.Context, req *logical.Request, data *fra // pathSign issues a certificate from a submitted CSR, subject to role // restrictions -func (b *backend) pathSign(ctx context.Context, req *logical.Request, data *framework.FieldData, role *roleEntry) (*logical.Response, error) { +func (b *backend) pathSign(ctx context.Context, req *logical.Request, data *framework.FieldData, role *issuing.RoleEntry) (*logical.Response, error) { return b.pathIssueSignCert(ctx, req, data, role, true, false) } // pathSignVerbatim issues a certificate from a submitted CSR, *not* subject to // role restrictions -func (b *backend) pathSignVerbatim(ctx context.Context, req *logical.Request, data *framework.FieldData, role *roleEntry) (*logical.Response, error) { - entry := &roleEntry{ - AllowLocalhost: true, - AllowAnyName: true, - AllowIPSANs: true, - AllowWildcardCertificates: new(bool), - EnforceHostnames: false, - KeyType: "any", - UseCSRCommonName: true, - UseCSRSANs: true, - AllowedOtherSANs: []string{"*"}, - AllowedSerialNumbers: []string{"*"}, - AllowedURISANs: []string{"*"}, - AllowedUserIDs: []string{"*"}, - CNValidations: []string{"disabled"}, - GenerateLease: new(bool), - KeyUsage: data.Get("key_usage").([]string), - ExtKeyUsage: data.Get("ext_key_usage").([]string), - ExtKeyUsageOIDs: data.Get("ext_key_usage_oids").([]string), - SignatureBits: data.Get("signature_bits").(int), - UsePSS: data.Get("use_pss").(bool), - } - *entry.AllowWildcardCertificates = true - - *entry.GenerateLease = false +func (b *backend) pathSignVerbatim(ctx context.Context, req *logical.Request, data *framework.FieldData, role *issuing.RoleEntry) (*logical.Response, error) { + opts := []issuing.RoleModifier{ + issuing.WithKeyUsage(data.Get("key_usage").([]string)), + issuing.WithExtKeyUsage(data.Get("ext_key_usage").([]string)), + issuing.WithExtKeyUsageOIDs(data.Get("ext_key_usage_oids").([]string)), + issuing.WithSignatureBits(data.Get("signature_bits").(int)), + issuing.WithUsePSS(data.Get("use_pss").(bool)), + } + // if we did receive a role parameter value with a valid role, use some of its values + // to populate and influence the sign-verbatim behavior. if role != nil { + opts = append(opts, issuing.WithNoStore(role.NoStore)) + opts = append(opts, issuing.WithNoStoreMetadata(role.NoStoreMetadata)) + opts = append(opts, issuing.WithIssuer(role.Issuer)) + if role.TTL > 0 { - entry.TTL = role.TTL + opts = append(opts, issuing.WithTTL(role.TTL)) } + if role.MaxTTL > 0 { - entry.MaxTTL = role.MaxTTL + opts = append(opts, issuing.WithMaxTTL(role.MaxTTL)) } + if role.GenerateLease != nil { - *entry.GenerateLease = *role.GenerateLease + opts = append(opts, issuing.WithGenerateLease(*role.GenerateLease)) } + if role.NotBeforeDuration > 0 { - entry.NotBeforeDuration = role.NotBeforeDuration + opts = append(opts, issuing.WithNotBeforeDuration(role.NotBeforeDuration)) } - entry.NoStore = role.NoStore - entry.Issuer = role.Issuer - } - - if len(entry.Issuer) == 0 { - entry.Issuer = defaultRef } + entry := issuing.SignVerbatimRoleWithOpts(opts...) return b.pathIssueSignCert(ctx, req, data, entry, true, true) } -func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, data *framework.FieldData, role *roleEntry, useCSR, useCSRValues bool) (*logical.Response, error) { - // If storing the certificate and on a performance standby, forward this request on to the primary - // Allow performance secondaries to generate and store certificates locally to them. - if !role.NoStore && b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) { +func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, data *framework.FieldData, role *issuing.RoleEntry, useCSR, useCSRValues bool) (*logical.Response, error) { + // Error out early if incompatible fields set: + certMetadata, metadataInRequest := data.GetOk("cert_metadata") + if metadataInRequest { + err := validateCertMetadataConfiguration(role) + if err != nil { + return nil, err + } + } + + // If storing the certificate or certMetadata about this certificate and on a performance standby, forward this request + // on to the primary + // Allow performance secondaries to generate and store certificates and certMetadata locally to them. + needsStorage := !role.NoStore || (metadataInRequest && !role.NoStoreMetadata && issuing.MetadataPermitted) + if needsStorage && b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) { return nil, logical.ErrReadOnly } @@ -407,7 +376,7 @@ func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, d } else { // Otherwise, we must have a newer API which requires an issuer // reference. Fetch it in this case - issuerName = getIssuerRef(data) + issuerName = GetIssuerRef(data) if len(issuerName) == 0 { return logical.ErrorResponse("missing issuer reference"), nil } @@ -421,7 +390,7 @@ func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, d var caErr error sc := b.makeStorageContext(ctx, req.Storage) - signingBundle, caErr := sc.fetchCAInfo(issuerName, IssuanceUsage) + signingBundle, caErr := sc.fetchCAInfo(issuerName, issuing.IssuanceUsage) if caErr != nil { switch caErr.(type) { case errutil.UserError: @@ -432,17 +401,23 @@ func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, d "error fetching CA certificate: %s", caErr)} } } - + issuerId, err := issuing.ResolveIssuerReference(ctx, req.Storage, role.Issuer) + if err != nil { + if issuerId == issuing.IssuerRefNotFound { + b.Logger().Warn("could not resolve issuer reference, may be using a legacy CA bundle") + } else { + return nil, err + } + } input := &inputBundle{ req: req, apiData: data, role: role, } var parsedBundle *certutil.ParsedCertBundle - var err error var warnings []string if useCSR { - parsedBundle, warnings, err = signCert(b, input, signingBundle, false, useCSRValues) + parsedBundle, warnings, err = signCert(b.System(), input, signingBundle, false, useCSRValues) } else { parsedBundle, warnings, err = generateCert(sc, input, signingBundle, false, rand.Reader) } @@ -457,99 +432,34 @@ func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, d } } - signingCB, err := signingBundle.ToCertBundle() - if err != nil { - return nil, fmt.Errorf("error converting raw signing bundle to cert bundle: %w", err) + generateLease := false + if role.GenerateLease != nil && *role.GenerateLease { + generateLease = true } - cb, err := parsedBundle.ToCertBundle() + resp, err := signIssueApiResponse(b, data, parsedBundle, signingBundle, generateLease, warnings) if err != nil { - return nil, fmt.Errorf("error converting raw cert bundle to cert bundle: %w", err) - } - - caChainGen := newCaChainOutput(parsedBundle, data) - - respData := map[string]interface{}{ - "expiration": int64(parsedBundle.Certificate.NotAfter.Unix()), - "serial_number": cb.SerialNumber, - } - - switch format { - case "pem": - respData["issuing_ca"] = signingCB.Certificate - respData["certificate"] = cb.Certificate - if caChainGen.containsChain() { - respData["ca_chain"] = caChainGen.pemEncodedChain() - } - if !useCSR { - respData["private_key"] = cb.PrivateKey - respData["private_key_type"] = cb.PrivateKeyType - } - - case "pem_bundle": - respData["issuing_ca"] = signingCB.Certificate - respData["certificate"] = cb.ToPEMBundle() - if caChainGen.containsChain() { - respData["ca_chain"] = caChainGen.pemEncodedChain() - } - if !useCSR { - respData["private_key"] = cb.PrivateKey - respData["private_key_type"] = cb.PrivateKeyType - } - - case "der": - respData["certificate"] = base64.StdEncoding.EncodeToString(parsedBundle.CertificateBytes) - respData["issuing_ca"] = base64.StdEncoding.EncodeToString(signingBundle.CertificateBytes) - - if caChainGen.containsChain() { - respData["ca_chain"] = caChainGen.derEncodedChain() - } - - if !useCSR { - respData["private_key"] = base64.StdEncoding.EncodeToString(parsedBundle.PrivateKeyBytes) - respData["private_key_type"] = cb.PrivateKeyType - } - default: - return nil, fmt.Errorf("unsupported format: %s", format) + return nil, err } - var resp *logical.Response - switch { - case role.GenerateLease == nil: - return nil, fmt.Errorf("generate lease in role is nil") - case !*role.GenerateLease: - // If lease generation is disabled do not populate `Secret` field in - // the response - resp = &logical.Response{ - Data: respData, + if !role.NoStore { + err = issuing.StoreCertificate(ctx, req.Storage, b.GetCertificateCounter(), parsedBundle) + if err != nil { + return nil, err } - default: - resp = b.Secret(SecretCertsType).Response( - respData, - map[string]interface{}{ - "serial_number": cb.SerialNumber, - }) - resp.Secret.TTL = parsedBundle.Certificate.NotAfter.Sub(time.Now()) } - if data.Get("private_key_format").(string) == "pkcs8" { - err = convertRespToPKCS8(resp) + if metadataInRequest { + metadataBytes, err := base64.StdEncoding.DecodeString(certMetadata.(string)) if err != nil { + // TODO: Should we clean up the original cert here? return nil, err } - } - - if !role.NoStore { - key := "certs/" + normalizeSerial(cb.SerialNumber) - certsCounted := b.certsCounted.Load() - err = req.Storage.Put(ctx, &logical.StorageEntry{ - Key: key, - Value: parsedBundle.CertificateBytes, - }) + err = storeCertMetadata(ctx, req.Storage, issuerId, role.Name, parsedBundle.Certificate, metadataBytes) if err != nil { - return nil, fmt.Errorf("unable to store certificate locally: %w", err) + // TODO: Should we clean up the original cert here? + return nil, err } - b.ifCountEnabledIncrementTotalCertificatesCount(certsCounted, key) } if useCSR { @@ -610,6 +520,93 @@ func (cac *caChainOutput) derEncodedChain() []string { return derCaChain } +func signIssueApiResponse(b *backend, data *framework.FieldData, parsedBundle *certutil.ParsedCertBundle, signingBundle *certutil.CAInfoBundle, generateLease bool, warnings []string) (*logical.Response, error) { + cb, err := parsedBundle.ToCertBundle() + if err != nil { + return nil, fmt.Errorf("error converting raw cert bundle to cert bundle: %w", err) + } + + signingCB, err := signingBundle.ToCertBundle() + if err != nil { + return nil, fmt.Errorf("error converting raw signing bundle to cert bundle: %w", err) + } + + caChainGen := newCaChainOutput(parsedBundle, data) + includeKey := parsedBundle.PrivateKey != nil + + respData := map[string]interface{}{ + "expiration": parsedBundle.Certificate.NotAfter.Unix(), + "serial_number": cb.SerialNumber, + } + + format := getFormat(data) + switch format { + case "pem": + respData["issuing_ca"] = signingCB.Certificate + respData["certificate"] = cb.Certificate + if caChainGen.containsChain() { + respData["ca_chain"] = caChainGen.pemEncodedChain() + } + if includeKey { + respData["private_key"] = cb.PrivateKey + respData["private_key_type"] = cb.PrivateKeyType + } + + case "pem_bundle": + respData["issuing_ca"] = signingCB.Certificate + respData["certificate"] = cb.ToPEMBundle() + if caChainGen.containsChain() { + respData["ca_chain"] = caChainGen.pemEncodedChain() + } + if includeKey { + respData["private_key"] = cb.PrivateKey + respData["private_key_type"] = cb.PrivateKeyType + } + + case "der": + respData["certificate"] = base64.StdEncoding.EncodeToString(parsedBundle.CertificateBytes) + respData["issuing_ca"] = base64.StdEncoding.EncodeToString(signingBundle.CertificateBytes) + + if caChainGen.containsChain() { + respData["ca_chain"] = caChainGen.derEncodedChain() + } + + if includeKey { + respData["private_key"] = base64.StdEncoding.EncodeToString(parsedBundle.PrivateKeyBytes) + respData["private_key_type"] = cb.PrivateKeyType + } + default: + return nil, fmt.Errorf("unsupported format: %s", format) + } + + var resp *logical.Response + if generateLease { + resp = b.Secret(SecretCertsType).Response( + respData, + map[string]interface{}{ + "serial_number": cb.SerialNumber, + }) + resp.Secret.TTL = parsedBundle.Certificate.NotAfter.Sub(time.Now()) + } else { + resp = &logical.Response{ + Data: respData, + } + } + + if includeKey { + if keyFormat := data.Get("private_key_format"); keyFormat == "pkcs8" { + err := convertRespToPKCS8(resp) + if err != nil { + return nil, err + } + } + } + + resp = addWarnings(resp, warnings) + + return resp, nil +} + const pathIssueHelpSyn = ` Request a certificate using a certain role with the provided details. ` diff --git a/builtin/logical/pki/path_manage_issuers.go b/builtin/logical/pki/path_manage_issuers.go index 7d778dedd1ce..01c0d6653063 100644 --- a/builtin/logical/pki/path_manage_issuers.go +++ b/builtin/logical/pki/path_manage_issuers.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -13,22 +13,41 @@ import ( "strings" "time" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/revocation" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" ) func pathIssuerGenerateRoot(b *backend) *framework.Path { - return buildPathGenerateRoot(b, "issuers/generate/root/"+framework.GenericNameRegex("exported")) + pattern := "issuers/generate/root/" + framework.GenericNameRegex("exported") + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuers, + OperationVerb: "generate", + OperationSuffix: "root", + } + + return buildPathGenerateRoot(b, pattern, displayAttrs) } func pathRotateRoot(b *backend) *framework.Path { - return buildPathGenerateRoot(b, "root/rotate/"+framework.GenericNameRegex("exported")) + pattern := "root/rotate/" + framework.GenericNameRegex("exported") + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "rotate", + OperationSuffix: "root", + } + + return buildPathGenerateRoot(b, pattern, displayAttrs) } -func buildPathGenerateRoot(b *backend, pattern string) *framework.Path { +func buildPathGenerateRoot(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { ret := &framework.Path{ - Pattern: pattern, + Pattern: pattern, + DisplayAttrs: displayAttrs, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ @@ -38,8 +57,8 @@ func buildPathGenerateRoot(b *backend, pattern string) *framework.Path { Description: "OK", Fields: map[string]*framework.FieldSchema{ "expiration": { - Type: framework.TypeString, - Description: `The expiration of the given.`, + Type: framework.TypeInt64, + Description: `The expiration of the given issuer.`, Required: true, }, "serial_number": { @@ -98,21 +117,38 @@ func buildPathGenerateRoot(b *backend, pattern string) *framework.Path { ret.Fields = addCACommonFields(map[string]*framework.FieldSchema{}) ret.Fields = addCAKeyGenerationFields(ret.Fields) ret.Fields = addCAIssueFields(ret.Fields) + ret.Fields = addCACertKeyUsage(ret.Fields) return ret } func pathIssuerGenerateIntermediate(b *backend) *framework.Path { - return buildPathGenerateIntermediate(b, - "issuers/generate/intermediate/"+framework.GenericNameRegex("exported")) + pattern := "issuers/generate/intermediate/" + framework.GenericNameRegex("exported") + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuers, + OperationVerb: "generate", + OperationSuffix: "intermediate", + } + + return buildPathGenerateIntermediate(b, pattern, displayAttrs) } func pathCrossSignIntermediate(b *backend) *framework.Path { - return buildPathGenerateIntermediate(b, "intermediate/cross-sign") + pattern := "intermediate/cross-sign" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "cross-sign", + OperationSuffix: "intermediate", + } + + return buildPathGenerateIntermediate(b, pattern, displayAttrs) } -func buildPathGenerateIntermediate(b *backend, pattern string) *framework.Path { +func buildPathGenerateIntermediate(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { ret := &framework.Path{ - Pattern: pattern, + Pattern: pattern, + DisplayAttrs: displayAttrs, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathGenerateIntermediate, @@ -162,6 +198,7 @@ extension with CA: true. Only needed as a workaround in some compatibility scenarios with Active Directory Certificate Services.`, } + ret.Fields = addCaCsrKeyUsage(ret.Fields) // At this time Go does not support signing CSRs using PSS signatures, see // https://github.com/golang/go/issues/45990 @@ -173,6 +210,13 @@ with Active Directory Certificate Services.`, func pathImportIssuer(b *backend) *framework.Path { return &framework.Path{ Pattern: "issuers/import/(cert|bundle)", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuers, + OperationVerb: "import", + OperationSuffix: "cert|bundle", + }, + Fields: map[string]*framework.FieldSchema{ "pem_bundle": { Type: framework.TypeString, @@ -203,6 +247,16 @@ secret-key (optional) and certificates.`, Description: "Net-new issuers imported as a part of this request", Required: true, }, + "existing_keys": { + Type: framework.TypeCommaStringSlice, + Description: "Existing keys specified as part of the import bundle of this request", + Required: true, + }, + "existing_issuers": { + Type: framework.TypeCommaStringSlice, + Description: "Existing issuers specified as part of the import bundle of this request", + Required: true, + }, }, }}, }, @@ -225,7 +279,7 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d keysAllowed := strings.HasSuffix(req.Path, "bundle") || req.Path == "config/ca" - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not import issuers until migration has completed"), nil } @@ -268,6 +322,8 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d var createdKeys []string var createdIssuers []string + var existingKeys []string + var existingIssuers []string issuerKeyMap := make(map[string]string) // Rather than using certutil.ParsePEMBundle (which restricts the @@ -324,6 +380,8 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d if !existing { createdKeys = append(createdKeys, key.ID.String()) + } else { + existingKeys = append(existingKeys, key.ID.String()) } } @@ -336,6 +394,8 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d issuerKeyMap[cert.ID.String()] = cert.KeyID.String() if !existing { createdIssuers = append(createdIssuers, cert.ID.String()) + } else { + existingIssuers = append(existingIssuers, cert.ID.String()) } } @@ -344,11 +404,13 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d "mapping": issuerKeyMap, "imported_keys": createdKeys, "imported_issuers": createdIssuers, + "existing_keys": existingKeys, + "existing_issuers": existingIssuers, }, } if len(createdIssuers) > 0 { - err := b.crlBuilder.rebuild(sc, true) + warnings, err := b.CrlBuilder().Rebuild(sc, true) if err != nil { // Before returning, check if the error message includes the // string "PSS". If so, it indicates we might've wanted to modify @@ -363,6 +425,9 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d return nil, err } + for index, warning := range warnings { + response.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } var issuersWithKeys []string for _, issuer := range createdIssuers { @@ -377,7 +442,7 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d response.AddWarning("Unable to fetch default issuers configuration to update default issuer if necessary: " + err.Error()) } else if config.DefaultFollowsLatestIssuer { if len(issuersWithKeys) == 1 { - if err := sc.updateDefaultIssuerId(issuerID(issuersWithKeys[0])); err != nil { + if err := sc.updateDefaultIssuerId(issuing.IssuerID(issuersWithKeys[0])); err != nil { response.AddWarning("Unable to update this new root as the default issuer: " + err.Error()) } } else if len(issuersWithKeys) > 1 { @@ -454,7 +519,14 @@ func pathRevokeIssuer(b *backend) *framework.Path { return &framework.Path{ Pattern: "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/revoke", - Fields: fields, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "revoke", + OperationSuffix: "issuer", + }, + + Fields: fields, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ @@ -559,11 +631,11 @@ func (b *backend) pathRevokeIssuer(ctx context.Context, req *logical.Request, da defer b.issuersLock.Unlock() // Issuer revocation can't work on the legacy cert bundle. - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("cannot revoke issuer until migration has completed"), nil } - issuerName := getIssuerRef(data) + issuerName := GetIssuerRef(data) if len(issuerName) == 0 { return logical.ErrorResponse("missing issuer reference"), nil } @@ -593,8 +665,8 @@ func (b *backend) pathRevokeIssuer(ctx context.Context, req *logical.Request, da // new revocations of leaves issued by this issuer to trigger a CRL // rebuild still. issuer.Revoked = true - if issuer.Usage.HasUsage(IssuanceUsage) { - issuer.Usage.ToggleUsage(IssuanceUsage) + if issuer.Usage.HasUsage(issuing.IssuanceUsage) { + issuer.Usage.ToggleUsage(issuing.IssuanceUsage) } currTime := time.Now() @@ -608,7 +680,7 @@ func (b *backend) pathRevokeIssuer(ctx context.Context, req *logical.Request, da // Now, if the parent issuer exists within this mount, we'd have written // a storage entry for this certificate, making it appear as any other - // leaf. We need to add a revocationInfo entry for this into storage, + // leaf. We need to add a RevocationInfo entry for this into storage, // so that it appears as if it was revoked. // // This is a _necessary_ but not necessarily _sufficient_ step to @@ -619,7 +691,7 @@ func (b *backend) pathRevokeIssuer(ctx context.Context, req *logical.Request, da // include both in two separate CRLs. Hence, the former is the condition // we check in CRL building, but this step satisfies other guarantees // within Vault. - certEntry, err := fetchCertBySerial(sc, "certs/", issuer.SerialNumber) + certEntry, err := fetchCertBySerial(sc, issuing.PathCerts, issuer.SerialNumber) if err == nil && certEntry != nil { // We've inverted this error check as it doesn't matter; we already // consider this certificate revoked. @@ -643,7 +715,7 @@ func (b *backend) pathRevokeIssuer(ctx context.Context, req *logical.Request, da // // We'll let a cleanup pass or CRL build identify the issuer for // us. - revInfo := revocationInfo{ + revInfo := revocation.RevocationInfo{ CertificateBytes: issuerCert.Raw, RevocationTime: issuer.RevocationTime, RevocationTimeUTC: issuer.RevocationTimeUTC, @@ -662,7 +734,7 @@ func (b *backend) pathRevokeIssuer(ctx context.Context, req *logical.Request, da } // Rebuild the CRL to include the newly revoked issuer. - crlErr := b.crlBuilder.rebuild(sc, false) + warnings, crlErr := b.CrlBuilder().Rebuild(sc, false) if crlErr != nil { switch crlErr.(type) { case errutil.UserError: @@ -678,6 +750,9 @@ func (b *backend) pathRevokeIssuer(ctx context.Context, req *logical.Request, da // Impossible. return nil, err } + for index, warning := range warnings { + response.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } // For sanity, we'll add a warning message here if there's no other // issuer which verifies this issuer. diff --git a/builtin/logical/pki/path_manage_keys.go b/builtin/logical/pki/path_manage_keys.go index 4d694204ae5f..a7727d9d6d8b 100644 --- a/builtin/logical/pki/path_manage_keys.go +++ b/builtin/logical/pki/path_manage_keys.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -10,6 +10,7 @@ import ( "net/http" "strings" + "github.com/hashicorp/vault/builtin/logical/pki/managed_key" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" @@ -19,6 +20,12 @@ func pathGenerateKey(b *backend) *framework.Path { return &framework.Path{ Pattern: "keys/generate/(internal|exported|kms)", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "generate", + OperationSuffix: "internal-key|exported-key|kms-key", + }, + Fields: map[string]*framework.FieldSchema{ keyNameParam: { Type: framework.TypeString, @@ -38,9 +45,8 @@ func pathGenerateKey(b *backend) *framework.Path { Type: framework.TypeInt, Default: 0, Description: `The number of bits to use. Allowed values are -0 (universal default); with rsa key_type: 2048 (default), 3072, or -4096; with ec key_type: 224, 256 (default), 384, or 521; ignored with -ed25519.`, +0 (universal default); with rsa key_type: 2048 (default), 3072, 4096 or 8192; +with ec key_type: 224, 256 (default), 384, or 521; ignored with ed25519.`, }, "managed_key_name": { Type: framework.TypeString, @@ -109,7 +115,7 @@ func (b *backend) pathGenerateKeyHandler(ctx context.Context, req *logical.Reque b.issuersLock.Lock() defer b.issuersLock.Unlock() - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not generate keys until migration has completed"), nil } @@ -148,7 +154,7 @@ func (b *backend) pathGenerateKeyHandler(ctx context.Context, req *logical.Reque return nil, err } - keyBundle, actualPrivateKeyType, err = createKmsKeyBundle(ctx, b, keyId) + keyBundle, actualPrivateKeyType, err = managed_key.CreateKmsKeyBundle(ctx, b, keyId) if err != nil { return nil, err } @@ -182,6 +188,12 @@ func pathImportKey(b *backend) *framework.Path { return &framework.Path{ Pattern: "keys/import", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "import", + OperationSuffix: "key", + }, + Fields: map[string]*framework.FieldSchema{ keyNameParam: { Type: framework.TypeString, @@ -241,7 +253,7 @@ func (b *backend) pathImportKeyHandler(ctx context.Context, req *logical.Request b.issuersLock.Lock() defer b.issuersLock.Unlock() - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Cannot import keys until migration has completed"), nil } diff --git a/builtin/logical/pki/path_manage_keys_test.go b/builtin/logical/pki/path_manage_keys_test.go index 3c5708a8bb04..58c4222260b1 100644 --- a/builtin/logical/pki/path_manage_keys_test.go +++ b/builtin/logical/pki/path_manage_keys_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -12,10 +12,9 @@ import ( "fmt" "testing" - "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" - + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/helper/certutil" - + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" "github.com/hashicorp/vault/sdk/logical" "github.com/stretchr/testify/require" ) @@ -31,7 +30,7 @@ func TestPKI_PathManageKeys_GenerateInternalKeys(t *testing.T) { wantLogicalErr bool }{ {"all-defaults", "", []int{0}, false}, - {"rsa", "rsa", []int{0, 2048, 3072, 4096}, false}, + {"rsa", "rsa", []int{0, 2048, 3072, 4096, 8192}, false}, {"ec", "ec", []int{0, 224, 256, 384, 521}, false}, {"ed25519", "ed25519", []int{0}, false}, {"error-rsa", "rsa", []int{-1, 343444}, true}, @@ -152,7 +151,7 @@ func TestPKI_PathManageKeys_ImportKeyBundle(t *testing.T) { require.NotEmpty(t, resp.Data["key_id"], "key id for ec import response was empty") require.Equal(t, "my-ec-key", resp.Data["key_name"], "key_name was incorrect for ec key") require.Equal(t, certutil.ECPrivateKey, resp.Data["key_type"]) - keyId1 := resp.Data["key_id"].(keyID) + keyId1 := resp.Data["key_id"].(issuing.KeyID) resp, err = b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, @@ -170,7 +169,7 @@ func TestPKI_PathManageKeys_ImportKeyBundle(t *testing.T) { require.NotEmpty(t, resp.Data["key_id"], "key id for rsa import response was empty") require.Equal(t, "my-rsa-key", resp.Data["key_name"], "key_name was incorrect for ec key") require.Equal(t, certutil.RSAPrivateKey, resp.Data["key_type"]) - keyId2 := resp.Data["key_id"].(keyID) + keyId2 := resp.Data["key_id"].(issuing.KeyID) require.NotEqual(t, keyId1, keyId2) @@ -251,7 +250,7 @@ func TestPKI_PathManageKeys_ImportKeyBundle(t *testing.T) { require.NotEmpty(t, resp.Data["key_id"], "key id for rsa import response was empty") require.Equal(t, "my-rsa-key", resp.Data["key_name"], "key_name was incorrect for ec key") require.Equal(t, certutil.RSAPrivateKey, resp.Data["key_type"]) - keyId2Reimport := resp.Data["key_id"].(keyID) + keyId2Reimport := resp.Data["key_id"].(issuing.KeyID) require.NotEqual(t, keyId2, keyId2Reimport, "re-importing key 2 did not generate a new key id") } @@ -270,7 +269,7 @@ func TestPKI_PathManageKeys_DeleteDefaultKeyWarns(t *testing.T) { require.NoError(t, err, "Failed generating key") require.NotNil(t, resp, "Got nil response generating key") require.False(t, resp.IsError(), "resp contained errors generating key: %#v", resp.Error()) - keyId := resp.Data["key_id"].(keyID) + keyId := resp.Data["key_id"].(issuing.KeyID) resp, err = b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.DeleteOperation, @@ -298,7 +297,7 @@ func TestPKI_PathManageKeys_DeleteUsedKeyFails(t *testing.T) { require.NoError(t, err, "Failed generating issuer") require.NotNil(t, resp, "Got nil response generating issuer") require.False(t, resp.IsError(), "resp contained errors generating issuer: %#v", resp.Error()) - keyId := resp.Data["key_id"].(keyID) + keyId := resp.Data["key_id"].(issuing.KeyID) resp, err = b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.DeleteOperation, @@ -325,7 +324,7 @@ func TestPKI_PathManageKeys_UpdateKeyDetails(t *testing.T) { require.NoError(t, err, "Failed generating key") require.NotNil(t, resp, "Got nil response generating key") require.False(t, resp.IsError(), "resp contained errors generating key: %#v", resp.Error()) - keyId := resp.Data["key_id"].(keyID) + keyId := resp.Data["key_id"].(issuing.KeyID) resp, err = b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, diff --git a/builtin/logical/pki/path_ocsp.go b/builtin/logical/pki/path_ocsp.go index ad181cf4108b..ea93b5da4ac3 100644 --- a/builtin/logical/pki/path_ocsp.go +++ b/builtin/logical/pki/path_ocsp.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -19,13 +19,16 @@ import ( "strings" "time" - "github.com/hashicorp/vault/sdk/helper/errutil" - - "golang.org/x/crypto/ocsp" - + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/pki_backend" + "github.com/hashicorp/vault/builtin/logical/pki/revocation" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" + "golang.org/x/crypto/ocsp" ) const ( @@ -38,7 +41,7 @@ type ocspRespInfo struct { serialNumber *big.Int ocspStatus int revocationTimeUTC *time.Time - issuerID issuerID + issuerID issuing.IssuerID } // These response variables should not be mutated, instead treat them as constants @@ -71,16 +74,33 @@ var ( ) func buildPathOcspGet(b *backend) *framework.Path { - return buildOcspGetWithPath(b, "ocsp/"+framework.MatchAllRegex(ocspReqParam)) + pattern := "ocsp/" + framework.MatchAllRegex(ocspReqParam) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "query", + OperationSuffix: "ocsp-with-get-req", + } + + return buildOcspGetWithPath(b, pattern, displayAttrs) } func buildPathUnifiedOcspGet(b *backend) *framework.Path { - return buildOcspGetWithPath(b, "unified-ocsp/"+framework.MatchAllRegex(ocspReqParam)) + pattern := "unified-ocsp/" + framework.MatchAllRegex(ocspReqParam) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "query", + OperationSuffix: "unified-ocsp-with-get-req", + } + + return buildOcspGetWithPath(b, pattern, displayAttrs) } -func buildOcspGetWithPath(b *backend, pattern string) *framework.Path { +func buildOcspGetWithPath(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { return &framework.Path{ - Pattern: pattern, + Pattern: pattern, + DisplayAttrs: displayAttrs, Fields: map[string]*framework.FieldSchema{ ocspReqParam: { Type: framework.TypeString, @@ -99,16 +119,33 @@ func buildOcspGetWithPath(b *backend, pattern string) *framework.Path { } func buildPathOcspPost(b *backend) *framework.Path { - return buildOcspPostWithPath(b, "ocsp") + pattern := "ocsp" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "query", + OperationSuffix: "ocsp", + } + + return buildOcspPostWithPath(b, pattern, displayAttrs) } func buildPathUnifiedOcspPost(b *backend) *framework.Path { - return buildOcspPostWithPath(b, "unified-ocsp") + pattern := "unified-ocsp" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "query", + OperationSuffix: "unified-ocsp", + } + + return buildOcspPostWithPath(b, pattern, displayAttrs) } -func buildOcspPostWithPath(b *backend, pattern string) *framework.Path { +func buildOcspPostWithPath(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { return &framework.Path{ - Pattern: pattern, + Pattern: pattern, + DisplayAttrs: displayAttrs, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.ocspHandler, @@ -122,7 +159,7 @@ func buildOcspPostWithPath(b *backend, pattern string) *framework.Path { func (b *backend) ocspHandler(ctx context.Context, request *logical.Request, data *framework.FieldData) (*logical.Response, error) { sc := b.makeStorageContext(ctx, request.Storage) - cfg, err := b.crlBuilder.getConfigWithUpdate(sc) + cfg, err := b.CrlBuilder().GetConfigWithUpdate(sc) if err != nil || cfg.OcspDisable || (isUnifiedOcspPath(request) && !cfg.UnifiedCRL) { return OcspUnauthorizedResponse, nil } @@ -141,7 +178,7 @@ func (b *backend) ocspHandler(ctx context.Context, request *logical.Request, dat ocspStatus, err := getOcspStatus(sc, ocspReq, useUnifiedStorage) if err != nil { - return logAndReturnInternalError(b, err), nil + return logAndReturnInternalError(b.Logger(), err), nil } caBundle, issuer, err := lookupOcspIssuer(sc, ocspReq, ocspStatus.issuerID) @@ -159,12 +196,12 @@ func (b *backend) ocspHandler(ctx context.Context, request *logical.Request, dat // https://www.rfc-editor.org/rfc/rfc5019#section-2.2.3 return OcspUnauthorizedResponse, nil } - return logAndReturnInternalError(b, err), nil + return logAndReturnInternalError(b.Logger(), err), nil } byteResp, err := genResponse(cfg, caBundle, ocspStatus, ocspReq.HashAlgorithm, issuer.RevocationSigAlg) if err != nil { - return logAndReturnInternalError(b, err), nil + return logAndReturnInternalError(b.Logger(), err), nil } return &logical.Response{ @@ -176,7 +213,7 @@ func (b *backend) ocspHandler(ctx context.Context, request *logical.Request, dat }, nil } -func canUseUnifiedStorage(req *logical.Request, cfg *crlConfig) bool { +func canUseUnifiedStorage(req *logical.Request, cfg *pki_backend.CrlConfig) bool { if isUnifiedOcspPath(req) { return true } @@ -190,13 +227,13 @@ func isUnifiedOcspPath(req *logical.Request) bool { return strings.HasPrefix(req.Path, "unified-ocsp") } -func generateUnknownResponse(cfg *crlConfig, sc *storageContext, ocspReq *ocsp.Request) *logical.Response { +func generateUnknownResponse(cfg *pki_backend.CrlConfig, sc *storageContext, ocspReq *ocsp.Request) *logical.Response { // Generate an Unknown OCSP response, signing with the default issuer from the mount as we did // not match the request's issuer. If no default issuer can be used, return with Unauthorized as there // isn't much else we can do at this point. config, err := sc.getIssuersConfig() if err != nil { - return logAndReturnInternalError(sc.Backend, err) + return logAndReturnInternalError(sc.Logger(), err) } if config.DefaultIssuerId == "" { @@ -211,10 +248,10 @@ func generateUnknownResponse(cfg *crlConfig, sc *storageContext, ocspReq *ocsp.R // no way to sign a response so Unauthorized it is. return OcspUnauthorizedResponse } - return logAndReturnInternalError(sc.Backend, err) + return logAndReturnInternalError(sc.Logger(), err) } - if !issuer.Usage.HasUsage(OCSPSigningUsage) { + if !issuer.Usage.HasUsage(issuing.OCSPSigningUsage) { // If we don't have any issuers or default issuers set, no way to sign a response so Unauthorized it is. return OcspUnauthorizedResponse } @@ -226,7 +263,7 @@ func generateUnknownResponse(cfg *crlConfig, sc *storageContext, ocspReq *ocsp.R byteResp, err := genResponse(cfg, caBundle, info, ocspReq.HashAlgorithm, issuer.RevocationSigAlg) if err != nil { - return logAndReturnInternalError(sc.Backend, err) + return logAndReturnInternalError(sc.Logger(), err) } return &logical.Response{ @@ -279,12 +316,12 @@ func fetchDerEncodedRequest(request *logical.Request, data *framework.FieldData) } } -func logAndReturnInternalError(b *backend, err error) *logical.Response { +func logAndReturnInternalError(logger hclog.Logger, err error) *logical.Response { // Since OCSP might be a high traffic endpoint, we will log at debug level only // any internal errors we do get. There is no way for us to return to the end-user // errors, so we rely on the log statement to help in debugging possible // issues in the field. - b.Logger().Debug("OCSP internal error", "error", err) + logger.Debug("OCSP internal error", "error", err) return OcspInternalErrorResponse } @@ -300,7 +337,7 @@ func getOcspStatus(sc *storageContext, ocspReq *ocsp.Request, useUnifiedStorage } if revEntryRaw != nil { - var revEntry revocationInfo + var revEntry revocation.RevocationInfo if err := revEntryRaw.DecodeJSON(&revEntry); err != nil { return nil, err } @@ -325,7 +362,7 @@ func getOcspStatus(sc *storageContext, ocspReq *ocsp.Request, useUnifiedStorage return &info, nil } -func lookupOcspIssuer(sc *storageContext, req *ocsp.Request, optRevokedIssuer issuerID) (*certutil.ParsedCertBundle, *issuerEntry, error) { +func lookupOcspIssuer(sc *storageContext, req *ocsp.Request, optRevokedIssuer issuing.IssuerID) (*certutil.ParsedCertBundle, *issuing.IssuerEntry, error) { reqHash := req.HashAlgorithm if !reqHash.Available() { return nil, nil, x509.ErrUnsupportedAlgorithm @@ -362,7 +399,7 @@ func lookupOcspIssuer(sc *storageContext, req *ocsp.Request, optRevokedIssuer is } if matches { - if !issuer.Usage.HasUsage(OCSPSigningUsage) { + if !issuer.Usage.HasUsage(issuing.OCSPSigningUsage) { matchedButNoUsage = true // We found a matching issuer, but it's not allowed to sign the // response, there might be another issuer that we rotated @@ -382,7 +419,7 @@ func lookupOcspIssuer(sc *storageContext, req *ocsp.Request, optRevokedIssuer is return nil, nil, ErrUnknownIssuer } -func getOcspIssuerParsedBundle(sc *storageContext, issuerId issuerID) (*certutil.ParsedCertBundle, *issuerEntry, error) { +func getOcspIssuerParsedBundle(sc *storageContext, issuerId issuing.IssuerID) (*certutil.ParsedCertBundle, *issuing.IssuerEntry, error) { issuer, bundle, err := sc.fetchCertBundleByIssuerId(issuerId, true) if err != nil { switch err.(type) { @@ -399,7 +436,7 @@ func getOcspIssuerParsedBundle(sc *storageContext, issuerId issuerID) (*certutil return nil, nil, ErrIssuerHasNoKey } - caBundle, err := parseCABundle(sc.Context, sc.Backend, bundle) + caBundle, err := parseCABundle(sc.Context, sc.GetPkiManagedView(), bundle) if err != nil { return nil, nil, err } @@ -407,13 +444,13 @@ func getOcspIssuerParsedBundle(sc *storageContext, issuerId issuerID) (*certutil return caBundle, issuer, nil } -func lookupIssuerIds(sc *storageContext, optRevokedIssuer issuerID) ([]issuerID, error) { +func lookupIssuerIds(sc *storageContext, optRevokedIssuer issuing.IssuerID) ([]issuing.IssuerID, error) { if optRevokedIssuer != "" { - return []issuerID{optRevokedIssuer}, nil + return []issuing.IssuerID{optRevokedIssuer}, nil } - if sc.Backend.useLegacyBundleCaStorage() { - return []issuerID{legacyBundleShimID}, nil + if sc.UseLegacyBundleCaStorage() { + return []issuing.IssuerID{legacyBundleShimID}, nil } return sc.listIssuers() @@ -440,9 +477,9 @@ func doesRequestMatchIssuer(parsedBundle *certutil.ParsedCertBundle, req *ocsp.R return bytes.Equal(req.IssuerKeyHash, issuerKeyHash) && bytes.Equal(req.IssuerNameHash, issuerNameHash), nil } -func genResponse(cfg *crlConfig, caBundle *certutil.ParsedCertBundle, info *ocspRespInfo, reqHash crypto.Hash, revSigAlg x509.SignatureAlgorithm) ([]byte, error) { +func genResponse(cfg *pki_backend.CrlConfig, caBundle *certutil.ParsedCertBundle, info *ocspRespInfo, reqHash crypto.Hash, revSigAlg x509.SignatureAlgorithm) ([]byte, error) { curTime := time.Now() - duration, err := time.ParseDuration(cfg.OcspExpiry) + duration, err := parseutil.ParseDurationSecond(cfg.OcspExpiry) if err != nil { return nil, err } @@ -465,17 +502,26 @@ func genResponse(cfg *crlConfig, caBundle *certutil.ParsedCertBundle, info *ocsp revSigAlg = x509.SHA512WithRSA } + // Due to a bug in Go's ocsp.ParseResponse(...), we do not provision + // Certificate any more on the response to help Go based OCSP clients. + // This was technically unnecessary, as the Certificate given here + // both signed the OCSP response and issued the leaf cert, and so + // should already be trusted by the client. + // + // See also: https://github.com/golang/go/issues/59641 template := ocsp.Response{ IssuerHash: reqHash, Status: info.ocspStatus, SerialNumber: info.serialNumber, ThisUpdate: curTime, - NextUpdate: curTime.Add(duration), - Certificate: caBundle.Certificate, ExtraExtensions: []pkix.Extension{}, SignatureAlgorithm: revSigAlg, } + if duration > 0 { + template.NextUpdate = curTime.Add(duration) + } + if info.ocspStatus == ocsp.Revoked { template.RevokedAt = *info.revocationTimeUTC template.RevocationReason = ocsp.Unspecified diff --git a/builtin/logical/pki/path_ocsp_test.go b/builtin/logical/pki/path_ocsp_test.go index 577e4f98409c..517b55de79d7 100644 --- a/builtin/logical/pki/path_ocsp_test.go +++ b/builtin/logical/pki/path_ocsp_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -17,12 +17,14 @@ import ( "testing" "time" - "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" - + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/pki_backend" + "github.com/hashicorp/vault/builtin/logical/pki/revocation" vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/vault" - + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" "github.com/stretchr/testify/require" "golang.org/x/crypto/ocsp" ) @@ -163,7 +165,7 @@ func TestOcsp_InvalidIssuerIdInRevocationEntry(t *testing.T) { // Twiddle the entry so that the issuer id is no longer valid. storagePath := revokedPath + normalizeSerial(serial) - var revInfo revocationInfo + var revInfo revocation.RevocationInfo revEntry, err := s.Get(ctx, storagePath) require.NoError(t, err, "failed looking up storage path: %s", storagePath) err = revEntry.DecodeJSON(&revInfo) @@ -207,7 +209,7 @@ func TestOcsp_UnknownIssuerIdWithDefaultHavingOcspUsageRemoved(t *testing.T) { // Twiddle the entry so that the issuer id is no longer valid. storagePath := revokedPath + normalizeSerial(serial) - var revInfo revocationInfo + var revInfo revocation.RevocationInfo revEntry, err := s.Get(ctx, storagePath) require.NoError(t, err, "failed looking up storage path: %s", storagePath) err = revEntry.DecodeJSON(&revInfo) @@ -259,11 +261,11 @@ func TestOcsp_RevokedCertHasIssuerWithoutOcspUsage(t *testing.T) { requireFieldsSetInResp(t, resp, "usage") // Do not assume a specific ordering for usage... - usages, err := NewIssuerUsageFromNames(strings.Split(resp.Data["usage"].(string), ",")) + usages, err := issuing.NewIssuerUsageFromNames(strings.Split(resp.Data["usage"].(string), ",")) require.NoError(t, err, "failed parsing usage return value") - require.True(t, usages.HasUsage(IssuanceUsage)) - require.True(t, usages.HasUsage(CRLSigningUsage)) - require.False(t, usages.HasUsage(OCSPSigningUsage)) + require.True(t, usages.HasUsage(issuing.IssuanceUsage)) + require.True(t, usages.HasUsage(issuing.CRLSigningUsage)) + require.False(t, usages.HasUsage(issuing.OCSPSigningUsage)) // Request an OCSP request from it, we should get an Unauthorized response back resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) @@ -291,7 +293,7 @@ func TestOcsp_RevokedCertHasIssuerWithoutAKey(t *testing.T) { resp, err = CBRead(b, s, "issuer/"+testEnv.issuerId1.String()) requireSuccessNonNilResponse(t, resp, err, "failed reading issuer") requireFieldsSetInResp(t, resp, "key_id") - keyId := resp.Data["key_id"].(keyID) + keyId := resp.Data["key_id"].(issuing.KeyID) // This is a bit naughty but allow me to delete the key... sc := b.makeStorageContext(context.Background(), s) @@ -344,11 +346,11 @@ func TestOcsp_MultipleMatchingIssuersOneWithoutSigningUsage(t *testing.T) { requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer") requireFieldsSetInResp(t, resp, "usage") // Do not assume a specific ordering for usage... - usages, err := NewIssuerUsageFromNames(strings.Split(resp.Data["usage"].(string), ",")) + usages, err := issuing.NewIssuerUsageFromNames(strings.Split(resp.Data["usage"].(string), ",")) require.NoError(t, err, "failed parsing usage return value") - require.True(t, usages.HasUsage(IssuanceUsage)) - require.True(t, usages.HasUsage(CRLSigningUsage)) - require.False(t, usages.HasUsage(OCSPSigningUsage)) + require.True(t, usages.HasUsage(issuing.IssuanceUsage)) + require.True(t, usages.HasUsage(issuing.CRLSigningUsage)) + require.False(t, usages.HasUsage(issuing.OCSPSigningUsage)) // Request an OCSP request from it, we should get a Good response back, from the rotated cert resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) @@ -365,7 +367,6 @@ func TestOcsp_MultipleMatchingIssuersOneWithoutSigningUsage(t *testing.T) { require.Equal(t, crypto.SHA1, ocspResp.IssuerHash) require.Equal(t, 0, ocspResp.RevocationReason) require.Equal(t, testEnv.leafCertIssuer1.SerialNumber, ocspResp.SerialNumber) - require.Equal(t, rotatedCert, ocspResp.Certificate) requireOcspSignatureAlgoForKey(t, rotatedCert.SignatureAlgorithm, ocspResp.SignatureAlgorithm) requireOcspResponseSignedBy(t, ocspResp, rotatedCert) @@ -442,11 +443,15 @@ func TestOcsp_HigherLevel(t *testing.T) { require.NoError(t, err, "parsing ocsp get response") require.Equal(t, ocsp.Revoked, ocspResp.Status) - require.Equal(t, issuerCert, ocspResp.Certificate) require.Equal(t, certToRevoke.SerialNumber, ocspResp.SerialNumber) // Test OCSP Get request for ocsp urlEncoded := base64.StdEncoding.EncodeToString(ocspReq) + if strings.Contains(urlEncoded, "//") { + // workaround known redirect bug that is difficult to fix + t.Skipf("VAULT-13630 - Skipping GET OCSP test with encoded issuer cert containing // triggering redirection bug") + } + ocspGetReq := client.NewRequest(http.MethodGet, "/v1/pki/ocsp/"+urlEncoded) ocspGetReq.Headers.Set("Content-Type", "application/ocsp-request") rawResp, err = client.RawRequest(ocspGetReq) @@ -463,10 +468,21 @@ func TestOcsp_HigherLevel(t *testing.T) { require.NoError(t, err, "parsing ocsp get response") require.Equal(t, ocsp.Revoked, ocspResp.Status) - require.Equal(t, issuerCert, ocspResp.Certificate) require.Equal(t, certToRevoke.SerialNumber, ocspResp.SerialNumber) } +// TestOcsp_NextUpdate make sure that we are setting the appropriate values +// for the NextUpdate field within our responses. +func TestOcsp_NextUpdate(t *testing.T) { + // Within the runOcspRequestTest, with a ocspExpiry of 0, + // we will validate that NextUpdate was not set in the response + runOcspRequestTest(t, "POST", "ec", 0, 0, crypto.SHA256, 0) + + // Within the runOcspRequestTest, with a ocspExpiry of 24 hours, we will validate + // that NextUpdate is set and has a time 24 hours larger than ThisUpdate + runOcspRequestTest(t, "POST", "ec", 0, 0, crypto.SHA256, 24*time.Hour) +} + func TestOcsp_ValidRequests(t *testing.T) { type caKeyConf struct { keyType string @@ -506,13 +522,15 @@ func TestOcsp_ValidRequests(t *testing.T) { localTT.reqHash) t.Run(testName, func(t *testing.T) { runOcspRequestTest(t, localTT.reqType, localTT.keyConf.keyType, localTT.keyConf.keyBits, - localTT.keyConf.sigBits, localTT.reqHash) + localTT.keyConf.sigBits, localTT.reqHash, 12*time.Hour) }) } } -func runOcspRequestTest(t *testing.T, requestType string, caKeyType string, caKeyBits int, caKeySigBits int, requestHash crypto.Hash) { - b, s, testEnv := setupOcspEnvWithCaKeyConfig(t, caKeyType, caKeyBits, caKeySigBits) +func runOcspRequestTest(t *testing.T, requestType string, caKeyType string, + caKeyBits int, caKeySigBits int, requestHash crypto.Hash, ocspExpiry time.Duration, +) { + b, s, testEnv := setupOcspEnvWithCaKeyConfig(t, caKeyType, caKeyBits, caKeySigBits, ocspExpiry) // Non-revoked cert resp, err := SendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer1, testEnv.issuer1, requestHash) @@ -527,7 +545,6 @@ func runOcspRequestTest(t *testing.T, requestType string, caKeyType string, caKe require.Equal(t, ocsp.Good, ocspResp.Status) require.Equal(t, requestHash, ocspResp.IssuerHash) - require.Equal(t, testEnv.issuer1, ocspResp.Certificate) require.Equal(t, 0, ocspResp.RevocationReason) require.Equal(t, testEnv.leafCertIssuer1.SerialNumber, ocspResp.SerialNumber) @@ -552,7 +569,6 @@ func runOcspRequestTest(t *testing.T, requestType string, caKeyType string, caKe require.Equal(t, ocsp.Revoked, ocspResp.Status) require.Equal(t, requestHash, ocspResp.IssuerHash) - require.Equal(t, testEnv.issuer1, ocspResp.Certificate) require.Equal(t, 0, ocspResp.RevocationReason) require.Equal(t, testEnv.leafCertIssuer1.SerialNumber, ocspResp.SerialNumber) @@ -572,22 +588,32 @@ func runOcspRequestTest(t *testing.T, requestType string, caKeyType string, caKe require.Equal(t, ocsp.Good, ocspResp.Status) require.Equal(t, requestHash, ocspResp.IssuerHash) - require.Equal(t, testEnv.issuer2, ocspResp.Certificate) require.Equal(t, 0, ocspResp.RevocationReason) require.Equal(t, testEnv.leafCertIssuer2.SerialNumber, ocspResp.SerialNumber) // Verify that our thisUpdate and nextUpdate fields are updated as expected - thisUpdate := ocspResp.ThisUpdate - nextUpdate := ocspResp.NextUpdate - require.True(t, thisUpdate.Before(nextUpdate), - fmt.Sprintf("thisUpdate %s, should have been before nextUpdate: %s", thisUpdate, nextUpdate)) - nextUpdateDiff := nextUpdate.Sub(thisUpdate) - expectedDiff, err := time.ParseDuration(defaultCrlConfig.OcspExpiry) + resp, err = CBRead(b, s, "config/crl") + requireSuccessNonNilResponse(t, resp, err, "failed reading from config/crl") + requireFieldsSetInResp(t, resp, "ocsp_expiry") + ocspExpiryRaw := resp.Data["ocsp_expiry"].(string) + expectedDiff, err := parseutil.ParseDurationSecond(ocspExpiryRaw) require.NoError(t, err, "failed to parse default ocsp expiry value") - require.Equal(t, expectedDiff, nextUpdateDiff, - fmt.Sprintf("the delta between thisUpdate %s and nextUpdate: %s should have been around: %s but was %s", - thisUpdate, nextUpdate, defaultCrlConfig.OcspExpiry, nextUpdateDiff)) + thisUpdate := ocspResp.ThisUpdate + require.Less(t, time.Since(thisUpdate), 10*time.Second, "expected ThisUpdate field to be within the last 10 seconds") + if expectedDiff != 0 { + nextUpdate := ocspResp.NextUpdate + require.False(t, nextUpdate.IsZero(), "nextUpdate field value should have been a non-zero time") + require.True(t, thisUpdate.Before(nextUpdate), + fmt.Sprintf("thisUpdate %s, should have been before nextUpdate: %s", thisUpdate, nextUpdate)) + nextUpdateDiff := nextUpdate.Sub(thisUpdate) + require.Equal(t, expectedDiff, nextUpdateDiff, + fmt.Sprintf("the delta between thisUpdate %s and nextUpdate: %s should have been around: %s but was %s", + thisUpdate, nextUpdate, pki_backend.DefaultCrlConfig.OcspExpiry, nextUpdateDiff)) + } else { + // With the config value set to 0, we shouldn't have a NextUpdate field set + require.True(t, ocspResp.NextUpdate.IsZero(), "nextUpdate value was not zero as expected was: %v", ocspResp.NextUpdate) + } requireOcspSignatureAlgoForKey(t, testEnv.issuer2.SignatureAlgorithm, ocspResp.SignatureAlgorithm) requireOcspResponseSignedBy(t, ocspResp, testEnv.issuer2) } @@ -602,26 +628,32 @@ type ocspTestEnv struct { issuer1 *x509.Certificate issuer2 *x509.Certificate - issuerId1 issuerID - issuerId2 issuerID + issuerId1 issuing.IssuerID + issuerId2 issuing.IssuerID leafCertIssuer1 *x509.Certificate leafCertIssuer2 *x509.Certificate - keyId1 keyID - keyId2 keyID + keyId1 issuing.KeyID + keyId2 issuing.KeyID } func setupOcspEnv(t *testing.T, keyType string) (*backend, logical.Storage, *ocspTestEnv) { - return setupOcspEnvWithCaKeyConfig(t, keyType, 0, 0) + return setupOcspEnvWithCaKeyConfig(t, keyType, 0, 0, 12*time.Hour) } -func setupOcspEnvWithCaKeyConfig(t *testing.T, keyType string, caKeyBits int, caKeySigBits int) (*backend, logical.Storage, *ocspTestEnv) { +func setupOcspEnvWithCaKeyConfig(t *testing.T, keyType string, caKeyBits int, caKeySigBits int, ocspExpiry time.Duration) (*backend, logical.Storage, *ocspTestEnv) { b, s := CreateBackendWithStorage(t) var issuerCerts []*x509.Certificate var leafCerts []*x509.Certificate - var issuerIds []issuerID - var keyIds []keyID + var issuerIds []issuing.IssuerID + var keyIds []issuing.KeyID + + resp, err := CBWrite(b, s, "config/crl", map[string]interface{}{ + "ocsp_enable": true, + "ocsp_expiry": fmt.Sprintf("%ds", int(ocspExpiry.Seconds())), + }) + requireSuccessNonNilResponse(t, resp, err, "config/crl failed") for i := 0; i < 2; i++ { resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ @@ -633,8 +665,8 @@ func setupOcspEnvWithCaKeyConfig(t *testing.T, keyType string, caKeyBits int, ca }) requireSuccessNonNilResponse(t, resp, err, "root/generate/internal") requireFieldsSetInResp(t, resp, "issuer_id", "key_id") - issuerId := resp.Data["issuer_id"].(issuerID) - keyId := resp.Data["key_id"].(keyID) + issuerId := resp.Data["issuer_id"].(issuing.IssuerID) + keyId := resp.Data["key_id"].(issuing.KeyID) resp, err = CBWrite(b, s, "roles/test"+strconv.FormatInt(int64(i), 10), map[string]interface{}{ "allow_bare_domains": true, @@ -711,19 +743,3 @@ func sendOcspPostRequest(b *backend, s logical.Storage, ocspRequest []byte) (*lo return resp, err } - -func generateRequest(t *testing.T, requestHash crypto.Hash, cert *x509.Certificate, issuer *x509.Certificate) []byte { - t.Helper() - - opts := &ocsp.RequestOptions{Hash: requestHash} - ocspRequestDer, err := ocsp.CreateRequest(cert, issuer, opts) - require.NoError(t, err, "Failed generating OCSP request") - return ocspRequestDer -} - -func requireOcspResponseSignedBy(t *testing.T, ocspResp *ocsp.Response, issuer *x509.Certificate) { - t.Helper() - - err := ocspResp.CheckSignatureFrom(issuer) - require.NoError(t, err, "Failed signature verification of ocsp response: %w", err) -} diff --git a/builtin/logical/pki/path_resign_crls.go b/builtin/logical/pki/path_resign_crls.go index 93b0837092ba..65563dc7f77a 100644 --- a/builtin/logical/pki/path_resign_crls.go +++ b/builtin/logical/pki/path_resign_crls.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -21,6 +21,8 @@ import ( "time" "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/pki_backend" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" @@ -43,6 +45,13 @@ var ( func pathResignCrls(b *backend) *framework.Path { return &framework.Path{ Pattern: "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/resign-crls", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationVerb: "resign", + OperationSuffix: "crls", + }, + Fields: map[string]*framework.FieldSchema{ issuerRefParam: { Type: framework.TypeString, @@ -65,7 +74,7 @@ to the issuer.`, Type: framework.TypeString, Description: `The amount of time the generated CRL should be valid; defaults to 72 hours.`, - Default: defaultCrlConfig.Expiry, + Default: pki_backend.DefaultCrlConfig.Expiry, }, crlsParam: { Type: framework.TypeStringSlice, @@ -105,6 +114,13 @@ base64 encoded. Defaults to "pem".`, func pathSignRevocationList(b *backend) *framework.Path { return &framework.Path{ Pattern: "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign-revocation-list", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationVerb: "sign", + OperationSuffix: "revocation-list", + }, + Fields: map[string]*framework.FieldSchema{ issuerRefParam: { Type: framework.TypeString, @@ -127,7 +143,7 @@ to the issuer.`, Type: framework.TypeString, Description: `The amount of time the generated CRL should be valid; defaults to 72 hours.`, - Default: defaultCrlConfig.Expiry, + Default: pki_backend.DefaultCrlConfig.Expiry, }, formatParam: { Type: framework.TypeString, @@ -171,11 +187,11 @@ return a signed CRL based on the parameter values.`, } func (b *backend) pathUpdateResignCrlsHandler(ctx context.Context, request *logical.Request, data *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("This API cannot be used until the migration has completed"), nil } - issuerRef := getIssuerRef(data) + issuerRef := GetIssuerRef(data) crlNumber := data.Get(crlNumberParam).(int) deltaCrlBaseNumber := data.Get(deltaCrlBaseNumberParam).(int) nextUpdateStr := data.Get(nextUpdateParam).(string) @@ -238,7 +254,7 @@ func (b *backend) pathUpdateResignCrlsHandler(ctx context.Context, request *logi if deltaCrlBaseNumber > -1 { ext, err := certutil.CreateDeltaCRLIndicatorExt(int64(deltaCrlBaseNumber)) if err != nil { - return nil, fmt.Errorf("could not create crl delta indicator extension: %v", err) + return nil, fmt.Errorf("could not create crl delta indicator extension: %w", err) } template.ExtraExtensions = []pkix.Extension{ext} } @@ -259,11 +275,11 @@ func (b *backend) pathUpdateResignCrlsHandler(ctx context.Context, request *logi } func (b *backend) pathUpdateSignRevocationListHandler(ctx context.Context, request *logical.Request, data *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("This API cannot be used until the migration has completed"), nil } - issuerRef := getIssuerRef(data) + issuerRef := GetIssuerRef(data) crlNumber := data.Get(crlNumberParam).(int) deltaCrlBaseNumber := data.Get(deltaCrlBaseNumberParam).(int) nextUpdateStr := data.Get(nextUpdateParam).(string) @@ -311,7 +327,7 @@ func (b *backend) pathUpdateSignRevocationListHandler(ctx context.Context, reque if deltaCrlBaseNumber > -1 { ext, err := certutil.CreateDeltaCRLIndicatorExt(int64(deltaCrlBaseNumber)) if err != nil { - return nil, fmt.Errorf("could not create crl delta indicator extension: %v", err) + return nil, fmt.Errorf("could not create crl delta indicator extension: %w", err) } crlExtensions = append(crlExtensions, ext) } @@ -537,6 +553,10 @@ func parseSerialNum(cert map[string]interface{}) (*big.Int, error) { if !serialExists { return nil, errors.New("missing 'serial_number' field") } + return parseSerialNumStr(serialNumRaw) +} + +func parseSerialNumStr(serialNumRaw interface{}) (*big.Int, error) { serialNumStr, err := parseutil.ParseString(serialNumRaw) if err != nil { return nil, fmt.Errorf("'serial_number' field value was not a string: %w", err) @@ -635,7 +655,7 @@ func getCaBundle(sc *storageContext, issuerRef string) (*certutil.CAInfoBundle, return nil, fmt.Errorf("failed to resolve issuer %s: %w", issuerRefParam, err) } - return sc.fetchCAInfoByIssuerId(issuerId, CRLSigningUsage) + return sc.fetchCAInfoByIssuerId(issuerId, issuing.CRLSigningUsage) } func decodePemCrls(rawCrls []string) ([]*x509.RevocationList, error) { diff --git a/builtin/logical/pki/path_resign_crls_test.go b/builtin/logical/pki/path_resign_crls_test.go index f1ee1152c041..2b2e1ede137c 100644 --- a/builtin/logical/pki/path_resign_crls_test.go +++ b/builtin/logical/pki/path_resign_crls_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -13,13 +13,11 @@ import ( "testing" "time" - "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" - "github.com/hashicorp/vault/api" vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/vault" - + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" "github.com/stretchr/testify/require" ) diff --git a/builtin/logical/pki/path_revoke.go b/builtin/logical/pki/path_revoke.go index fa6f3b648700..f6c35e67f732 100644 --- a/builtin/logical/pki/path_revoke.go +++ b/builtin/logical/pki/path_revoke.go @@ -1,10 +1,11 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki import ( "context" + "crypto" "crypto/ecdsa" "crypto/ed25519" "crypto/rsa" @@ -16,10 +17,11 @@ import ( "strings" "time" - "github.com/hashicorp/vault/sdk/helper/consts" - + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/pki_backend" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -28,21 +30,14 @@ func pathListCertsRevoked(b *backend) *framework.Path { return &framework.Path{ Pattern: "certs/revoked/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "revoked-certs", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathListRevokedCertsHandler, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "keys": { - Type: framework.TypeStringSlice, - Description: `List of Keys`, - Required: false, - }, - }, - }}, - }, }, }, @@ -55,6 +50,11 @@ func pathListCertsRevocationQueue(b *backend) *framework.Path { return &framework.Path{ Pattern: "certs/revocation-queue/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "certs-revocation-queue", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathListRevocationQueueHandler, @@ -69,6 +69,12 @@ func pathListCertsRevocationQueue(b *backend) *framework.Path { func pathRevoke(b *backend) *framework.Path { return &framework.Path{ Pattern: `revoke`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "revoke", + }, + Fields: map[string]*framework.FieldSchema{ "serial_number": { Type: framework.TypeString, @@ -94,7 +100,7 @@ signed by an issuer in this mount.`, Description: "OK", Fields: map[string]*framework.FieldSchema{ "revocation_time": { - Type: framework.TypeDurationSecond, + Type: framework.TypeInt64, Description: `Revocation Time`, Required: false, }, @@ -122,6 +128,13 @@ signed by an issuer in this mount.`, func pathRevokeWithKey(b *backend) *framework.Path { return &framework.Path{ Pattern: `revoke-with-key`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "revoke", + OperationSuffix: "with-key", + }, + Fields: map[string]*framework.FieldSchema{ "serial_number": { Type: framework.TypeString, @@ -152,7 +165,7 @@ be in PEM format.`, Description: "OK", Fields: map[string]*framework.FieldSchema{ "revocation_time": { - Type: framework.TypeDurationSecond, + Type: framework.TypeInt64, Description: `Revocation Time`, Required: false, }, @@ -181,6 +194,12 @@ func pathRotateCRL(b *backend) *framework.Path { return &framework.Path{ Pattern: `crl/rotate`, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "rotate", + OperationSuffix: "crl", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathRotateCRLRead, @@ -212,6 +231,12 @@ func pathRotateDeltaCRL(b *backend) *framework.Path { return &framework.Path{ Pattern: `crl/rotate-delta`, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "rotate", + OperationSuffix: "delta-crl", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathRotateDeltaCRLRead, @@ -243,6 +268,11 @@ func pathListUnifiedRevoked(b *backend) *framework.Path { return &framework.Path{ Pattern: "certs/unified-revoked/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "unified-revoked-certs", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathListUnifiedRevokedCertsHandler, @@ -277,7 +307,7 @@ func (b *backend) pathRevokeWriteHandleCertificate(ctx context.Context, req *log // // We return the parsed serial number, an optionally-nil byte array to // write out to disk, and an error if one occurred. - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { // We require listing all issuers from the 1.11 method. If we're // still using the legacy CA bundle but with the newer certificate // attribute, we err and require the operator to upgrade and migrate @@ -317,7 +347,7 @@ func (b *backend) pathRevokeWriteHandleCertificate(ctx context.Context, req *log // Start with the latter since its cheaper. Fetch the cert (by serial) // and if it exists, compare the contents. sc := b.makeStorageContext(ctx, req.Storage) - certEntry, err := fetchCertBySerial(sc, "certs/", serial) + certEntry, err := fetchCertBySerial(sc, issuing.PathCerts, serial) if err != nil { return serial, false, nil, err } @@ -406,6 +436,28 @@ func (b *backend) pathRevokeWriteHandleKey(req *logical.Request, certReference * return fmt.Errorf("failed to parse provided private key: %w", err) } + return validatePrivateKeyMatchesCert(signer, certReference) +} + +func validatePrivateKeyMatchesCert(signer crypto.Signer, certReference *x509.Certificate) error { + public := signer.Public() + + switch certReference.PublicKey.(type) { + case *rsa.PublicKey: + rsaPriv, ok := signer.(*rsa.PrivateKey) + if !ok { + return errutil.UserError{Err: "provided private key type does not match certificate's public key type"} + } + + if err := rsaPriv.Validate(); err != nil { + return errutil.UserError{Err: fmt.Sprintf("error validating integrity of private key: %v", err)} + } + } + + return validatePublicKeyMatchesCert(public, certReference) +} + +func validatePublicKeyMatchesCert(verifier crypto.PublicKey, certReference *x509.Certificate) error { // Finally, verify if the cert and key match. This code has been // cribbed from the Go TLS config code, with minor modifications. // @@ -413,22 +465,18 @@ func (b *backend) pathRevokeWriteHandleKey(req *logical.Request, certReference * // components and ensure we validate exponent and curve information // as well. // - // // See: https://github.com/golang/go/blob/c6a2dada0df8c2d75cf3ae599d7caed77d416fa2/src/crypto/tls/tls.go#L304-L331 switch certPub := certReference.PublicKey.(type) { case *rsa.PublicKey: - privPub, ok := signer.Public().(*rsa.PublicKey) + privPub, ok := verifier.(*rsa.PublicKey) if !ok { return errutil.UserError{Err: "provided private key type does not match certificate's public key type"} } - if err := signer.(*rsa.PrivateKey).Validate(); err != nil { - return err - } if certPub.N.Cmp(privPub.N) != 0 || certPub.E != privPub.E { return errutil.UserError{Err: "provided private key does not match certificate's public key"} } case *ecdsa.PublicKey: - privPub, ok := signer.Public().(*ecdsa.PublicKey) + privPub, ok := verifier.(*ecdsa.PublicKey) if !ok { return errutil.UserError{Err: "provided private key type does not match certificate's public key type"} } @@ -436,7 +484,7 @@ func (b *backend) pathRevokeWriteHandleKey(req *logical.Request, certReference * return errutil.UserError{Err: "provided private key does not match certificate's public key"} } case ed25519.PublicKey: - privPub, ok := signer.Public().(ed25519.PublicKey) + privPub, ok := verifier.(ed25519.PublicKey) if !ok { return errutil.UserError{Err: "provided private key type does not match certificate's public key type"} } @@ -450,7 +498,7 @@ func (b *backend) pathRevokeWriteHandleKey(req *logical.Request, certReference * return nil } -func (b *backend) maybeRevokeCrossCluster(sc *storageContext, config *crlConfig, serial string, havePrivateKey bool) (*logical.Response, error) { +func (b *backend) maybeRevokeCrossCluster(sc *storageContext, config *pki_backend.CrlConfig, serial string, havePrivateKey bool) (*logical.Response, error) { if !config.UseGlobalQueue { return logical.ErrorResponse(fmt.Sprintf("certificate with serial %s not found.", serial)), nil } @@ -475,7 +523,7 @@ func (b *backend) maybeRevokeCrossCluster(sc *storageContext, config *crlConfig, } if err := sc.Storage.Put(sc.Context, reqEntry); err != nil { - return nil, fmt.Errorf("error persisting cross-cluster revocation request: %w\nThis may occur when the active node of the primary performance replication cluster is unavailable.", err) + return nil, fmt.Errorf("error persisting cross-cluster revocation request: %w", err) } resp := &logical.Response{ @@ -487,7 +535,7 @@ func (b *backend) maybeRevokeCrossCluster(sc *storageContext, config *crlConfig, return resp, nil } -func (b *backend) pathRevokeWrite(ctx context.Context, req *logical.Request, data *framework.FieldData, _ *roleEntry) (*logical.Response, error) { +func (b *backend) pathRevokeWrite(ctx context.Context, req *logical.Request, data *framework.FieldData, _ *issuing.RoleEntry) (*logical.Response, error) { rawSerial, haveSerial := data.GetOk("serial_number") rawCertificate, haveCert := data.GetOk("certificate") sc := b.makeStorageContext(ctx, req.Storage) @@ -516,7 +564,7 @@ func (b *backend) pathRevokeWrite(ctx context.Context, req *logical.Request, dat var cert *x509.Certificate var serial string - config, err := sc.Backend.crlBuilder.getConfigWithUpdate(sc) + config, err := sc.CrlBuilder().GetConfigWithUpdate(sc) if err != nil { return nil, fmt.Errorf("error revoking serial: %s: failed reading config: %w", serial, err) } @@ -533,7 +581,7 @@ func (b *backend) pathRevokeWrite(ctx context.Context, req *logical.Request, dat return logical.ErrorResponse("The serial number must be provided"), nil } - certEntry, err := fetchCertBySerial(sc, "certs/", serial) + certEntry, err := fetchCertBySerial(sc, issuing.PathCerts, serial) if err != nil { switch err.(type) { case errutil.UserError: @@ -585,7 +633,7 @@ func (b *backend) pathRevokeWrite(ctx context.Context, req *logical.Request, dat // disk. if writeCert { err := req.Storage.Put(ctx, &logical.StorageEntry{ - Key: "certs/" + normalizeSerial(serial), + Key: issuing.PathCerts + normalizeSerial(serial), Value: cert.Raw, }) if err != nil { @@ -600,18 +648,18 @@ func (b *backend) pathRevokeWrite(ctx context.Context, req *logical.Request, dat return nil, logical.ErrReadOnly } - b.revokeStorageLock.Lock() - defer b.revokeStorageLock.Unlock() + b.GetRevokeStorageLock().Lock() + defer b.GetRevokeStorageLock().Unlock() return revokeCert(sc, config, cert) } func (b *backend) pathRotateCRLRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - b.revokeStorageLock.RLock() - defer b.revokeStorageLock.RUnlock() + b.GetRevokeStorageLock().RLock() + defer b.GetRevokeStorageLock().RUnlock() sc := b.makeStorageContext(ctx, req.Storage) - crlErr := b.crlBuilder.rebuild(sc, false) + warnings, crlErr := b.CrlBuilder().Rebuild(sc, false) if crlErr != nil { switch crlErr.(type) { case errutil.UserError: @@ -621,24 +669,30 @@ func (b *backend) pathRotateCRLRead(ctx context.Context, req *logical.Request, _ } } - return &logical.Response{ + resp := &logical.Response{ Data: map[string]interface{}{ "success": true, }, - }, nil + } + + for index, warning := range warnings { + resp.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } + + return resp, nil } func (b *backend) pathRotateDeltaCRLRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { sc := b.makeStorageContext(ctx, req.Storage) - cfg, err := b.crlBuilder.getConfigWithUpdate(sc) + cfg, err := b.CrlBuilder().GetConfigWithUpdate(sc) if err != nil { return nil, fmt.Errorf("error fetching CRL configuration: %w", err) } isEnabled := cfg.EnableDelta - crlErr := b.crlBuilder.rebuildDeltaCRLsIfForced(sc, true) + warnings, crlErr := b.CrlBuilder().rebuildDeltaCRLsIfForced(sc, true) if crlErr != nil { switch crlErr.(type) { case errutil.UserError: @@ -657,6 +711,9 @@ func (b *backend) pathRotateDeltaCRLRead(ctx context.Context, req *logical.Reque if !isEnabled { resp.AddWarning("requested rebuild of delta CRL when delta CRL is not enabled; this is a no-op") } + for index, warning := range warnings { + resp.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } return resp, nil } diff --git a/builtin/logical/pki/path_roles.go b/builtin/logical/pki/path_roles.go index 1fd2cc5826fc..4416203f8ff8 100644 --- a/builtin/logical/pki/path_roles.go +++ b/builtin/logical/pki/path_roles.go @@ -1,18 +1,18 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki import ( "context" - "crypto/x509" "encoding/json" + "errors" "fmt" "net/http" "strings" "time" - "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/consts" @@ -24,21 +24,14 @@ func pathListRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "roles", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathRoleList, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "keys": { - Type: framework.TypeMap, - Description: `List of keys`, - Required: false, - }, - }, - }}, - }, }, }, @@ -48,9 +41,9 @@ func pathListRoles(b *backend) *framework.Path { } func pathRoles(b *backend) *framework.Path { - pathRolesResponse := map[string]*framework.FieldSchema{ + pathRolesResponseFields := map[string]*framework.FieldSchema{ "ttl": { - Type: framework.TypeDurationSecond, + Type: framework.TypeInt64, Required: true, Description: `The lease duration (validity period of the certificate) if no specific lease duration is requested. @@ -60,7 +53,7 @@ value or the value of max_ttl, whichever is shorter.`, }, "max_ttl": { - Type: framework.TypeDurationSecond, + Type: framework.TypeInt64, Required: true, Description: `The maximum allowed lease duration. If not set, defaults to the system maximum lease TTL.`, @@ -379,8 +372,8 @@ information, which must include an oid, and may include a notice and/or cps url, Description: `Mark Basic Constraints valid when issuing non-CA certificates.`, }, "not_before_duration": { - Type: framework.TypeDurationSecond, - Description: `The duration before now which the certificate needs to be backdated by.`, + Type: framework.TypeInt64, + Description: `The duration in seconds before now which the certificate needs to be backdated by.`, }, "not_after": { Type: framework.TypeString, @@ -394,9 +387,17 @@ serviced by this role.`, }, } + issuing.AddNoStoreMetadataRoleField(pathRolesResponseFields) + return &framework.Path{ Pattern: "roles/" + framework.GenericNameRegex("name"), - Fields: map[string]*framework.FieldSchema{ + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "role", + }, + + Fields: issuing.AddNoStoreMetadataRoleField(map[string]*framework.FieldSchema{ "backend": { Type: framework.TypeString, Description: "Backend Type", @@ -807,7 +808,7 @@ The value format should be given in UTC format YYYY-MM-ddTHH:MM:SSZ.`, serviced by this role.`, Default: defaultRef, }, - }, + }), Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ @@ -815,7 +816,7 @@ serviced by this role.`, Responses: map[int][]framework.Response{ http.StatusOK: {{ Description: "OK", - Fields: pathRolesResponse, + Fields: pathRolesResponseFields, }}, }, }, @@ -824,7 +825,7 @@ serviced by this role.`, Responses: map[int][]framework.Response{ http.StatusOK: {{ Description: "OK", - Fields: pathRolesResponse, + Fields: pathRolesResponseFields, }}, }, // Read more about why these flags are set in backend.go. @@ -847,7 +848,7 @@ serviced by this role.`, Responses: map[int][]framework.Response{ http.StatusOK: {{ Description: "OK", - Fields: pathRolesResponse, + Fields: pathRolesResponseFields, }}, }, // Read more about why these flags are set in backend.go. @@ -861,137 +862,26 @@ serviced by this role.`, } } -func (b *backend) getRole(ctx context.Context, s logical.Storage, n string) (*roleEntry, error) { - entry, err := s.Get(ctx, "role/"+n) +// GetRole loads a role from storage, will validate it and error out if, +// updates it and stores it back if possible. If the role does not exist +// a nil, nil response is returned. +func (b *backend) GetRole(ctx context.Context, s logical.Storage, n string) (*issuing.RoleEntry, error) { + result, err := issuing.GetRole(ctx, s, n) if err != nil { - return nil, err - } - if entry == nil { - return nil, nil - } - - var result roleEntry - if err := entry.DecodeJSON(&result); err != nil { - return nil, err - } - - // Migrate existing saved entries and save back if changed - modified := false - if len(result.DeprecatedTTL) == 0 && len(result.Lease) != 0 { - result.DeprecatedTTL = result.Lease - result.Lease = "" - modified = true - } - if result.TTL == 0 && len(result.DeprecatedTTL) != 0 { - parsed, err := parseutil.ParseDurationSecond(result.DeprecatedTTL) - if err != nil { - return nil, err - } - result.TTL = parsed - result.DeprecatedTTL = "" - modified = true - } - if len(result.DeprecatedMaxTTL) == 0 && len(result.LeaseMax) != 0 { - result.DeprecatedMaxTTL = result.LeaseMax - result.LeaseMax = "" - modified = true - } - if result.MaxTTL == 0 && len(result.DeprecatedMaxTTL) != 0 { - parsed, err := parseutil.ParseDurationSecond(result.DeprecatedMaxTTL) - if err != nil { - return nil, err + if errors.Is(err, issuing.ErrRoleNotFound) { + return nil, nil } - result.MaxTTL = parsed - result.DeprecatedMaxTTL = "" - modified = true - } - if result.AllowBaseDomain { - result.AllowBaseDomain = false - result.AllowBareDomains = true - modified = true - } - if result.AllowedDomainsOld != "" { - result.AllowedDomains = strings.Split(result.AllowedDomainsOld, ",") - result.AllowedDomainsOld = "" - modified = true - } - if result.AllowedBaseDomain != "" { - found := false - for _, v := range result.AllowedDomains { - if v == result.AllowedBaseDomain { - found = true - break - } - } - if !found { - result.AllowedDomains = append(result.AllowedDomains, result.AllowedBaseDomain) - } - result.AllowedBaseDomain = "" - modified = true - } - if result.AllowWildcardCertificates == nil { - // While not the most secure default, when AllowWildcardCertificates isn't - // explicitly specified in the stored Role, we automatically upgrade it to - // true to preserve compatibility with previous versions of Vault. Once this - // field is set, this logic will not be triggered any more. - result.AllowWildcardCertificates = new(bool) - *result.AllowWildcardCertificates = true - modified = true - } - - // Upgrade generate_lease in role - if result.GenerateLease == nil { - // All the new roles will have GenerateLease always set to a value. A - // nil value indicates that this role needs an upgrade. Set it to - // `true` to not alter its current behavior. - result.GenerateLease = new(bool) - *result.GenerateLease = true - modified = true - } - - // Upgrade key usages - if result.KeyUsageOld != "" { - result.KeyUsage = strings.Split(result.KeyUsageOld, ",") - result.KeyUsageOld = "" - modified = true - } - - // Upgrade OU - if result.OUOld != "" { - result.OU = strings.Split(result.OUOld, ",") - result.OUOld = "" - modified = true - } - - // Upgrade Organization - if result.OrganizationOld != "" { - result.Organization = strings.Split(result.OrganizationOld, ",") - result.OrganizationOld = "" - modified = true - } - - // Set the issuer field to default if not set. We want to do this - // unconditionally as we should probably never have an empty issuer - // on a stored roles. - if len(result.Issuer) == 0 { - result.Issuer = defaultRef - modified = true - } - - // Update CN Validations to be the present default, "email,hostname" - if len(result.CNValidations) == 0 { - result.CNValidations = []string{"email", "hostname"} - modified = true + return nil, err } // Ensure the role is valid after updating. - _, err = validateRole(b, &result, ctx, s) + _, err = validateRole(b, result, ctx, s) if err != nil { return nil, err } - if modified && (b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { - jsonEntry, err := logical.StorageEntryJSON("role/"+n, &result) + if result.WasModified && (b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { + jsonEntry, err := logical.StorageEntryJSON("role/"+n, result) if err != nil { return nil, err } @@ -1001,9 +891,10 @@ func (b *backend) getRole(ctx context.Context, s logical.Storage, n string) (*ro return nil, err } } + result.WasModified = false } - return &result, nil + return result, nil } func (b *backend) pathRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { @@ -1021,7 +912,7 @@ func (b *backend) pathRoleRead(ctx context.Context, req *logical.Request, data * return logical.ErrorResponse("missing role name"), nil } - role, err := b.getRole(ctx, req.Storage, roleName) + role, err := b.GetRole(ctx, req.Storage, roleName) if err != nil { return nil, err } @@ -1048,7 +939,7 @@ func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data var err error name := data.Get("name").(string) - entry := &roleEntry{ + entry := &issuing.RoleEntry{ MaxTTL: time.Duration(data.Get("max_ttl").(int)) * time.Second, TTL: time.Duration(data.Get("ttl").(int)) * time.Second, AllowLocalhost: data.Get("allow_localhost").(bool), @@ -1085,6 +976,7 @@ func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data PostalCode: data.Get("postal_code").([]string), GenerateLease: new(bool), NoStore: data.Get("no_store").(bool), + NoStoreMetadata: issuing.GetNoStoreMetadata(data), RequireCN: data.Get("require_cn").(bool), CNValidations: data.Get("cn_validations").([]string), AllowedSerialNumbers: data.Get("allowed_serial_numbers").([]string), @@ -1094,6 +986,7 @@ func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data NotBeforeDuration: time.Duration(data.Get("not_before_duration").(int)) * time.Second, NotAfter: data.Get("not_after").(string), Issuer: data.Get("issuer_ref").(string), + Name: name, } allowedOtherSANs := data.Get("allowed_other_sans").([]string) @@ -1154,7 +1047,7 @@ func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data return resp, nil } -func validateRole(b *backend, entry *roleEntry, ctx context.Context, s logical.Storage) (*logical.Response, error) { +func validateRole(b *backend, entry *issuing.RoleEntry, ctx context.Context, s logical.Storage) (*logical.Response, error) { resp := &logical.Response{} var err error @@ -1192,11 +1085,11 @@ func validateRole(b *backend, entry *roleEntry, ctx context.Context, s logical.S entry.Issuer = defaultRef } // Check that the issuers reference set resolves to something - if !b.useLegacyBundleCaStorage() { + if !b.UseLegacyBundleCaStorage() { sc := b.makeStorageContext(ctx, s) issuerId, err := sc.resolveIssuerReference(entry.Issuer) if err != nil { - if issuerId == IssuerRefNotFound { + if issuerId == issuing.IssuerRefNotFound { resp = &logical.Response{} if entry.Issuer == defaultRef { resp.AddWarning("Issuing Certificate was set to default, but no default issuing certificate (configurable at /config/issuers) is currently set") @@ -1239,7 +1132,7 @@ func getTimeWithExplicitDefault(data *framework.FieldData, field string, default func (b *backend) pathRolePatch(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { name := data.Get("name").(string) - oldEntry, err := b.getRole(ctx, req.Storage, name) + oldEntry, err := b.GetRole(ctx, req.Storage, name) if err != nil { return nil, err } @@ -1247,7 +1140,7 @@ func (b *backend) pathRolePatch(ctx context.Context, req *logical.Request, data return logical.ErrorResponse("Unable to fetch role entry to patch"), nil } - entry := &roleEntry{ + entry := &issuing.RoleEntry{ MaxTTL: getTimeWithExplicitDefault(data, "max_ttl", oldEntry.MaxTTL), TTL: getTimeWithExplicitDefault(data, "ttl", oldEntry.TTL), AllowLocalhost: getWithExplicitDefault(data, "allow_localhost", oldEntry.AllowLocalhost).(bool), @@ -1284,6 +1177,7 @@ func (b *backend) pathRolePatch(ctx context.Context, req *logical.Request, data PostalCode: getWithExplicitDefault(data, "postal_code", oldEntry.PostalCode).([]string), GenerateLease: new(bool), NoStore: getWithExplicitDefault(data, "no_store", oldEntry.NoStore).(bool), + NoStoreMetadata: issuing.NoStoreMetadataValue(getWithExplicitDefault(data, "no_store_metadata", oldEntry.NoStoreMetadata).(bool)), RequireCN: getWithExplicitDefault(data, "require_cn", oldEntry.RequireCN).(bool), CNValidations: getWithExplicitDefault(data, "cn_validations", oldEntry.CNValidations).([]string), AllowedSerialNumbers: getWithExplicitDefault(data, "allowed_serial_numbers", oldEntry.AllowedSerialNumbers).([]string), @@ -1361,204 +1255,6 @@ func (b *backend) pathRolePatch(ctx context.Context, req *logical.Request, data return resp, nil } -func parseKeyUsages(input []string) int { - var parsedKeyUsages x509.KeyUsage - for _, k := range input { - switch strings.ToLower(strings.TrimSpace(k)) { - case "digitalsignature": - parsedKeyUsages |= x509.KeyUsageDigitalSignature - case "contentcommitment": - parsedKeyUsages |= x509.KeyUsageContentCommitment - case "keyencipherment": - parsedKeyUsages |= x509.KeyUsageKeyEncipherment - case "dataencipherment": - parsedKeyUsages |= x509.KeyUsageDataEncipherment - case "keyagreement": - parsedKeyUsages |= x509.KeyUsageKeyAgreement - case "certsign": - parsedKeyUsages |= x509.KeyUsageCertSign - case "crlsign": - parsedKeyUsages |= x509.KeyUsageCRLSign - case "encipheronly": - parsedKeyUsages |= x509.KeyUsageEncipherOnly - case "decipheronly": - parsedKeyUsages |= x509.KeyUsageDecipherOnly - } - } - - return int(parsedKeyUsages) -} - -func parseExtKeyUsages(role *roleEntry) certutil.CertExtKeyUsage { - var parsedKeyUsages certutil.CertExtKeyUsage - - if role.ServerFlag { - parsedKeyUsages |= certutil.ServerAuthExtKeyUsage - } - - if role.ClientFlag { - parsedKeyUsages |= certutil.ClientAuthExtKeyUsage - } - - if role.CodeSigningFlag { - parsedKeyUsages |= certutil.CodeSigningExtKeyUsage - } - - if role.EmailProtectionFlag { - parsedKeyUsages |= certutil.EmailProtectionExtKeyUsage - } - - for _, k := range role.ExtKeyUsage { - switch strings.ToLower(strings.TrimSpace(k)) { - case "any": - parsedKeyUsages |= certutil.AnyExtKeyUsage - case "serverauth": - parsedKeyUsages |= certutil.ServerAuthExtKeyUsage - case "clientauth": - parsedKeyUsages |= certutil.ClientAuthExtKeyUsage - case "codesigning": - parsedKeyUsages |= certutil.CodeSigningExtKeyUsage - case "emailprotection": - parsedKeyUsages |= certutil.EmailProtectionExtKeyUsage - case "ipsecendsystem": - parsedKeyUsages |= certutil.IpsecEndSystemExtKeyUsage - case "ipsectunnel": - parsedKeyUsages |= certutil.IpsecTunnelExtKeyUsage - case "ipsecuser": - parsedKeyUsages |= certutil.IpsecUserExtKeyUsage - case "timestamping": - parsedKeyUsages |= certutil.TimeStampingExtKeyUsage - case "ocspsigning": - parsedKeyUsages |= certutil.OcspSigningExtKeyUsage - case "microsoftservergatedcrypto": - parsedKeyUsages |= certutil.MicrosoftServerGatedCryptoExtKeyUsage - case "netscapeservergatedcrypto": - parsedKeyUsages |= certutil.NetscapeServerGatedCryptoExtKeyUsage - } - } - - return parsedKeyUsages -} - -type roleEntry struct { - LeaseMax string `json:"lease_max"` - Lease string `json:"lease"` - DeprecatedMaxTTL string `json:"max_ttl"` - DeprecatedTTL string `json:"ttl"` - TTL time.Duration `json:"ttl_duration"` - MaxTTL time.Duration `json:"max_ttl_duration"` - AllowLocalhost bool `json:"allow_localhost"` - AllowedBaseDomain string `json:"allowed_base_domain"` - AllowedDomainsOld string `json:"allowed_domains,omitempty"` - AllowedDomains []string `json:"allowed_domains_list"` - AllowedDomainsTemplate bool `json:"allowed_domains_template"` - AllowBaseDomain bool `json:"allow_base_domain"` - AllowBareDomains bool `json:"allow_bare_domains"` - AllowTokenDisplayName bool `json:"allow_token_displayname"` - AllowSubdomains bool `json:"allow_subdomains"` - AllowGlobDomains bool `json:"allow_glob_domains"` - AllowWildcardCertificates *bool `json:"allow_wildcard_certificates,omitempty"` - AllowAnyName bool `json:"allow_any_name"` - EnforceHostnames bool `json:"enforce_hostnames"` - AllowIPSANs bool `json:"allow_ip_sans"` - ServerFlag bool `json:"server_flag"` - ClientFlag bool `json:"client_flag"` - CodeSigningFlag bool `json:"code_signing_flag"` - EmailProtectionFlag bool `json:"email_protection_flag"` - UseCSRCommonName bool `json:"use_csr_common_name"` - UseCSRSANs bool `json:"use_csr_sans"` - KeyType string `json:"key_type"` - KeyBits int `json:"key_bits"` - UsePSS bool `json:"use_pss"` - SignatureBits int `json:"signature_bits"` - MaxPathLength *int `json:",omitempty"` - KeyUsageOld string `json:"key_usage,omitempty"` - KeyUsage []string `json:"key_usage_list"` - ExtKeyUsage []string `json:"extended_key_usage_list"` - OUOld string `json:"ou,omitempty"` - OU []string `json:"ou_list"` - OrganizationOld string `json:"organization,omitempty"` - Organization []string `json:"organization_list"` - Country []string `json:"country"` - Locality []string `json:"locality"` - Province []string `json:"province"` - StreetAddress []string `json:"street_address"` - PostalCode []string `json:"postal_code"` - GenerateLease *bool `json:"generate_lease,omitempty"` - NoStore bool `json:"no_store"` - RequireCN bool `json:"require_cn"` - CNValidations []string `json:"cn_validations"` - AllowedOtherSANs []string `json:"allowed_other_sans"` - AllowedSerialNumbers []string `json:"allowed_serial_numbers"` - AllowedUserIDs []string `json:"allowed_user_ids"` - AllowedURISANs []string `json:"allowed_uri_sans"` - AllowedURISANsTemplate bool `json:"allowed_uri_sans_template"` - PolicyIdentifiers []string `json:"policy_identifiers"` - ExtKeyUsageOIDs []string `json:"ext_key_usage_oids"` - BasicConstraintsValidForNonCA bool `json:"basic_constraints_valid_for_non_ca"` - NotBeforeDuration time.Duration `json:"not_before_duration"` - NotAfter string `json:"not_after"` - Issuer string `json:"issuer"` -} - -func (r *roleEntry) ToResponseData() map[string]interface{} { - responseData := map[string]interface{}{ - "ttl": int64(r.TTL.Seconds()), - "max_ttl": int64(r.MaxTTL.Seconds()), - "allow_localhost": r.AllowLocalhost, - "allowed_domains": r.AllowedDomains, - "allowed_domains_template": r.AllowedDomainsTemplate, - "allow_bare_domains": r.AllowBareDomains, - "allow_token_displayname": r.AllowTokenDisplayName, - "allow_subdomains": r.AllowSubdomains, - "allow_glob_domains": r.AllowGlobDomains, - "allow_wildcard_certificates": r.AllowWildcardCertificates, - "allow_any_name": r.AllowAnyName, - "allowed_uri_sans_template": r.AllowedURISANsTemplate, - "enforce_hostnames": r.EnforceHostnames, - "allow_ip_sans": r.AllowIPSANs, - "server_flag": r.ServerFlag, - "client_flag": r.ClientFlag, - "code_signing_flag": r.CodeSigningFlag, - "email_protection_flag": r.EmailProtectionFlag, - "use_csr_common_name": r.UseCSRCommonName, - "use_csr_sans": r.UseCSRSANs, - "key_type": r.KeyType, - "key_bits": r.KeyBits, - "signature_bits": r.SignatureBits, - "use_pss": r.UsePSS, - "key_usage": r.KeyUsage, - "ext_key_usage": r.ExtKeyUsage, - "ext_key_usage_oids": r.ExtKeyUsageOIDs, - "ou": r.OU, - "organization": r.Organization, - "country": r.Country, - "locality": r.Locality, - "province": r.Province, - "street_address": r.StreetAddress, - "postal_code": r.PostalCode, - "no_store": r.NoStore, - "allowed_other_sans": r.AllowedOtherSANs, - "allowed_serial_numbers": r.AllowedSerialNumbers, - "allowed_user_ids": r.AllowedUserIDs, - "allowed_uri_sans": r.AllowedURISANs, - "require_cn": r.RequireCN, - "cn_validations": r.CNValidations, - "policy_identifiers": r.PolicyIdentifiers, - "basic_constraints_valid_for_non_ca": r.BasicConstraintsValidForNonCA, - "not_before_duration": int64(r.NotBeforeDuration.Seconds()), - "not_after": r.NotAfter, - "issuer_ref": r.Issuer, - } - if r.MaxPathLength != nil { - responseData["max_path_length"] = r.MaxPathLength - } - if r.GenerateLease != nil { - responseData["generate_lease"] = r.GenerateLease - } - return responseData -} - func checkCNValidations(validations []string) ([]string, error) { var haveDisabled bool var haveEmail bool diff --git a/builtin/logical/pki/path_roles_test.go b/builtin/logical/pki/path_roles_test.go index fb729e83619b..3b3d911c8fbd 100644 --- a/builtin/logical/pki/path_roles_test.go +++ b/builtin/logical/pki/path_roles_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -12,10 +12,10 @@ import ( "fmt" "testing" - "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" - "github.com/go-errors/errors" "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" "github.com/hashicorp/vault/sdk/logical" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -70,7 +70,7 @@ func TestPki_RoleGenerateLease(t *testing.T) { t.Fatal(err) } - var role roleEntry + var role issuing.RoleEntry if err := entry.DecodeJSON(&role); err != nil { t.Fatal(err) } @@ -146,14 +146,14 @@ func TestPki_RoleKeyUsage(t *testing.T) { } resp, err = b.HandleRequest(context.Background(), roleReq) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("roles/testrole"), logical.UpdateOperation), resp, true) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route(roleReq.Path), logical.UpdateOperation), resp, true) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: err: %v resp: %#v", err, resp) } roleReq.Operation = logical.ReadOperation resp, err = b.HandleRequest(context.Background(), roleReq) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("roles/testrole"), logical.ReadOperation), resp, true) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route(roleReq.Path), logical.ReadOperation), resp, true) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: err: %v resp: %#v", err, resp) } @@ -171,7 +171,7 @@ func TestPki_RoleKeyUsage(t *testing.T) { t.Fatal(err) } - var role roleEntry + var role issuing.RoleEntry if err := entry.DecodeJSON(&role); err != nil { t.Fatal(err) } @@ -206,7 +206,7 @@ func TestPki_RoleKeyUsage(t *testing.T) { if entry == nil { t.Fatalf("role should not be nil") } - var result roleEntry + var result issuing.RoleEntry if err := entry.DecodeJSON(&result); err != nil { t.Fatalf("err: %v", err) } @@ -266,7 +266,7 @@ func TestPki_RoleOUOrganizationUpgrade(t *testing.T) { t.Fatal(err) } - var role roleEntry + var role issuing.RoleEntry if err := entry.DecodeJSON(&role); err != nil { t.Fatal(err) } @@ -306,7 +306,7 @@ func TestPki_RoleOUOrganizationUpgrade(t *testing.T) { if entry == nil { t.Fatalf("role should not be nil") } - var result roleEntry + var result issuing.RoleEntry if err := entry.DecodeJSON(&result); err != nil { t.Fatalf("err: %v", err) } @@ -366,7 +366,7 @@ func TestPki_RoleAllowedDomains(t *testing.T) { t.Fatal(err) } - var role roleEntry + var role issuing.RoleEntry if err := entry.DecodeJSON(&role); err != nil { t.Fatal(err) } @@ -400,7 +400,7 @@ func TestPki_RoleAllowedDomains(t *testing.T) { if entry == nil { t.Fatalf("role should not be nil") } - var result roleEntry + var result issuing.RoleEntry if err := entry.DecodeJSON(&result); err != nil { t.Fatalf("err: %v", err) } diff --git a/builtin/logical/pki/path_root.go b/builtin/logical/pki/path_root.go index abcc0672b238..625bcb2946e3 100644 --- a/builtin/logical/pki/path_root.go +++ b/builtin/logical/pki/path_root.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -20,22 +20,38 @@ import ( "strings" "time" - "golang.org/x/crypto/ed25519" - - "github.com/hashicorp/vault/sdk/helper/certutil" - + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/parsing" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" + "golang.org/x/crypto/ed25519" ) +const intCaTruncatationWarning = "the signed intermediary CA certificate's notAfter was truncated to the issuer's notAfter" + func pathGenerateRoot(b *backend) *framework.Path { - return buildPathGenerateRoot(b, "root/generate/"+framework.GenericNameRegex("exported")) + pattern := "root/generate/" + framework.GenericNameRegex("exported") + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "generate", + OperationSuffix: "root", + } + + return buildPathGenerateRoot(b, pattern, displayAttrs) } func pathDeleteRoot(b *backend) *framework.Path { ret := &framework.Path{ Pattern: "root", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "root", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathCADeleteRoot, @@ -64,7 +80,7 @@ func (b *backend) pathCADeleteRoot(ctx context.Context, req *logical.Request, _ defer b.issuersLock.Unlock() sc := b.makeStorageContext(ctx, req.Storage) - if !b.useLegacyBundleCaStorage() { + if !b.UseLegacyBundleCaStorage() { issuers, err := sc.listIssuers() if err != nil { return nil, err @@ -118,7 +134,7 @@ func (b *backend) pathCAGenerateRoot(ctx context.Context, req *logical.Request, var err error - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not create root CA until migration has completed"), nil } @@ -271,22 +287,19 @@ func (b *backend) pathCAGenerateRoot(ctx context.Context, req *logical.Request, // Also store it as just the certificate identified by serial number, so it // can be revoked - key := "certs/" + normalizeSerial(cb.SerialNumber) - certsCounted := b.certsCounted.Load() - err = req.Storage.Put(ctx, &logical.StorageEntry{ - Key: key, - Value: parsedBundle.CertificateBytes, - }) + err = issuing.StoreCertificate(ctx, req.Storage, b.GetCertificateCounter(), parsedBundle) if err != nil { - return nil, fmt.Errorf("unable to store certificate locally: %w", err) + return nil, err } - b.ifCountEnabledIncrementTotalCertificatesCount(certsCounted, key) // Build a fresh CRL - err = b.crlBuilder.rebuild(sc, true) + warnings, err = b.CrlBuilder().Rebuild(sc, true) if err != nil { return nil, err } + for index, warning := range warnings { + resp.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } if parsedBundle.Certificate.MaxPathLen == 0 { resp.AddWarning("Max path length of the generated certificate is zero. This certificate cannot be used to issue intermediate CA certificates.") @@ -310,19 +323,17 @@ func (b *backend) pathCAGenerateRoot(ctx context.Context, req *logical.Request, func (b *backend) pathIssuerSignIntermediate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { var err error - issuerName := getIssuerRef(data) + issuerName := GetIssuerRef(data) if len(issuerName) == 0 { return logical.ErrorResponse("missing issuer reference"), nil } format := getFormat(data) if format == "" { - return logical.ErrorResponse( - `The "format" path parameter must be "pem" or "der"`, - ), nil + return logical.ErrorResponse(`The "format" path parameter must be "pem", "der" or "pem_bundle"`), nil } - role := &roleEntry{ + role := &issuing.RoleEntry{ OU: data.Get("ou").([]string), Organization: data.Get("organization").([]string), Country: data.Get("country").([]string), @@ -345,6 +356,7 @@ func (b *backend) pathIssuerSignIntermediate(ctx context.Context, req *logical.R NotAfter: data.Get("not_after").(string), NotBeforeDuration: time.Duration(data.Get("not_before_duration").(int)) * time.Second, CNValidations: []string{"disabled"}, + KeyUsage: data.Get("key_usage").([]string), } *role.AllowWildcardCertificates = true @@ -354,7 +366,7 @@ func (b *backend) pathIssuerSignIntermediate(ctx context.Context, req *logical.R var caErr error sc := b.makeStorageContext(ctx, req.Storage) - signingBundle, caErr := sc.fetchCAInfo(issuerName, IssuanceUsage) + signingBundle, caErr := sc.fetchCAInfo(issuerName, issuing.IssuanceUsage) if caErr != nil { switch caErr.(type) { case errutil.UserError: @@ -366,10 +378,14 @@ func (b *backend) pathIssuerSignIntermediate(ctx context.Context, req *logical.R } } - // Since we are signing an intermediate, we explicitly want to override - // the leaf NotAfterBehavior to permit issuing intermediates longer than - // the life of this issuer. - signingBundle.LeafNotAfterBehavior = certutil.PermitNotAfterBehavior + warnAboutTruncate := false + if enforceLeafNotAfter := data.Get("enforce_leaf_not_after_behavior").(bool); !enforceLeafNotAfter { + // Since we are signing an intermediate, we will by default truncate the + // signed intermediary in order to generate a valid intermediary chain. This + // was changed in 1.17.x as the default prior was PermitNotAfterBehavior + warnAboutTruncate = true + signingBundle.LeafNotAfterBehavior = certutil.TruncateNotAfterBehavior + } useCSRValues := data.Get("use_csr_values").(bool) @@ -384,7 +400,7 @@ func (b *backend) pathIssuerSignIntermediate(ctx context.Context, req *logical.R apiData: data, role: role, } - parsedBundle, warnings, err := signCert(b, input, signingBundle, true, useCSRValues) + parsedBundle, warnings, err := signCert(b.System(), input, signingBundle, true, useCSRValues) if err != nil { switch err.(type) { case errutil.UserError: @@ -399,6 +415,25 @@ func (b *backend) pathIssuerSignIntermediate(ctx context.Context, req *logical.R return nil, fmt.Errorf("verification of parsed bundle failed: %w", err) } + resp, err := signIntermediateResponse(signingBundle, parsedBundle, format, warnings) + if err != nil { + return nil, err + } + + err = issuing.StoreCertificate(ctx, req.Storage, b.GetCertificateCounter(), parsedBundle) + if err != nil { + return nil, err + } + + if warnAboutTruncate && + signingBundle.Certificate.NotAfter.Equal(parsedBundle.Certificate.NotAfter) { + resp.AddWarning(intCaTruncatationWarning) + } + + return resp, nil +} + +func signIntermediateResponse(signingBundle *certutil.CAInfoBundle, parsedBundle *certutil.ParsedCertBundle, format string, warnings []string) (*logical.Response, error) { signingCB, err := signingBundle.ToCertBundle() if err != nil { return nil, fmt.Errorf("error converting raw signing bundle to cert bundle: %w", err) @@ -468,40 +503,22 @@ func (b *backend) pathIssuerSignIntermediate(ctx context.Context, req *logical.R return nil, fmt.Errorf("unsupported format argument: %s", format) } - key := "certs/" + normalizeSerial(cb.SerialNumber) - certsCounted := b.certsCounted.Load() - err = req.Storage.Put(ctx, &logical.StorageEntry{ - Key: key, - Value: parsedBundle.CertificateBytes, - }) - if err != nil { - return nil, fmt.Errorf("unable to store certificate locally: %w", err) - } - b.ifCountEnabledIncrementTotalCertificatesCount(certsCounted, key) - if parsedBundle.Certificate.MaxPathLen == 0 { resp.AddWarning("Max path length of the signed certificate is zero. This certificate cannot be used to issue intermediate CA certificates.") } resp = addWarnings(resp, warnings) - return resp, nil } func (b *backend) pathIssuerSignSelfIssued(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - var err error - - issuerName := getIssuerRef(data) + issuerName := GetIssuerRef(data) if len(issuerName) == 0 { return logical.ErrorResponse("missing issuer reference"), nil } certPem := data.Get("certificate").(string) - block, _ := pem.Decode([]byte(certPem)) - if block == nil || len(block.Bytes) == 0 { - return logical.ErrorResponse("certificate could not be PEM-decoded"), nil - } - certs, err := x509.ParseCertificates(block.Bytes) + certs, err := parsing.ParseCertificatesFromString(certPem) if err != nil { return logical.ErrorResponse(fmt.Sprintf("error parsing certificate: %s", err)), nil } @@ -517,9 +534,8 @@ func (b *backend) pathIssuerSignSelfIssued(ctx context.Context, req *logical.Req return logical.ErrorResponse("given certificate is not self-issued"), nil } - var caErr error sc := b.makeStorageContext(ctx, req.Storage) - signingBundle, caErr := sc.fetchCAInfo(issuerName, IssuanceUsage) + signingBundle, caErr := sc.fetchCAInfo(issuerName, issuing.IssuanceUsage) if caErr != nil { switch caErr.(type) { case errutil.UserError: diff --git a/builtin/logical/pki/path_sign_issuers.go b/builtin/logical/pki/path_sign_issuers.go index 0e478834d353..b620ac4bbfb0 100644 --- a/builtin/logical/pki/path_sign_issuers.go +++ b/builtin/logical/pki/path_sign_issuers.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -12,19 +12,39 @@ import ( func pathIssuerSignIntermediate(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign-intermediate" - return buildPathIssuerSignIntermediateRaw(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationVerb: "sign", + OperationSuffix: "intermediate", + } + + return buildPathIssuerSignIntermediateRaw(b, pattern, displayAttrs) } func pathSignIntermediate(b *backend) *framework.Path { pattern := "root/sign-intermediate" - return buildPathIssuerSignIntermediateRaw(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIRoot, + OperationVerb: "sign", + OperationSuffix: "intermediate", + } + + return buildPathIssuerSignIntermediateRaw(b, pattern, displayAttrs) } -func buildPathIssuerSignIntermediateRaw(b *backend, pattern string) *framework.Path { +func buildPathIssuerSignIntermediateRaw(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { fields := addIssuerRefField(map[string]*framework.FieldSchema{}) + fields["enforce_leaf_not_after_behavior"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Default: false, + Description: "Do not truncate the NotAfter field, use the issuer's configured leaf_not_after_behavior", + } path := &framework.Path{ - Pattern: pattern, - Fields: fields, + Pattern: pattern, + DisplayAttrs: displayAttrs, + Fields: fields, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathIssuerSignIntermediate, @@ -40,7 +60,7 @@ func buildPathIssuerSignIntermediateRaw(b *backend, pattern string) *framework.P "serial_number": { Type: framework.TypeString, Description: `Serial Number`, - Required: false, + Required: true, }, "certificate": { Type: framework.TypeString, @@ -90,7 +110,7 @@ certs signed by this path; for instance, the non-repudiation flag; 3) Extensions requested in the CSR will be copied into the issued certificate.`, - } + } // TODO: Re-Write This (!) fields["signature_bits"] = &framework.FieldSchema{ Type: framework.TypeInt, @@ -129,6 +149,8 @@ in the above RFC section.`, RSA key-type issuer. Defaults to false.`, } + fields = addCACertKeyUsage(fields) + return path } @@ -150,15 +172,29 @@ See the API documentation for more information about required parameters. func pathIssuerSignSelfIssued(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign-self-issued" - return buildPathIssuerSignSelfIssued(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationVerb: "sign", + OperationSuffix: "self-issued", + } + + return buildPathIssuerSignSelfIssued(b, pattern, displayAttrs) } func pathSignSelfIssued(b *backend) *framework.Path { pattern := "root/sign-self-issued" - return buildPathIssuerSignSelfIssued(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIRoot, + OperationVerb: "sign", + OperationSuffix: "self-issued", + } + + return buildPathIssuerSignSelfIssued(b, pattern, displayAttrs) } -func buildPathIssuerSignSelfIssued(b *backend, pattern string) *framework.Path { +func buildPathIssuerSignSelfIssued(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { fields := map[string]*framework.FieldSchema{ "certificate": { Type: framework.TypeString, @@ -172,8 +208,9 @@ func buildPathIssuerSignSelfIssued(b *backend, pattern string) *framework.Path { } fields = addIssuerRefField(fields) path := &framework.Path{ - Pattern: pattern, - Fields: fields, + Pattern: pattern, + DisplayAttrs: displayAttrs, + Fields: fields, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathIssuerSignSelfIssued, diff --git a/builtin/logical/pki/path_tidy.go b/builtin/logical/pki/path_tidy.go index 330f6ca97d1d..5e7a4b037681 100644 --- a/builtin/logical/pki/path_tidy.go +++ b/builtin/logical/pki/path_tidy.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -8,13 +8,17 @@ import ( "crypto/x509" "errors" "fmt" + "math/rand/v2" "net/http" "sync/atomic" "time" "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" - + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/revocation" + "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" @@ -22,21 +26,25 @@ import ( var tidyCancelledError = errors.New("tidy operation cancelled") +//go:generate enumer -type=tidyStatusState -trimprefix=tidyStatus type tidyStatusState int const ( - tidyStatusInactive tidyStatusState = iota - tidyStatusStarted = iota - tidyStatusFinished = iota - tidyStatusError = iota - tidyStatusCancelling = iota - tidyStatusCancelled = iota + tidyStatusInactive tidyStatusState = iota + tidyStatusStarted + tidyStatusFinished + tidyStatusError + tidyStatusCancelling + tidyStatusCancelled ) type tidyStatus struct { // Parameters used to initiate the operation - safetyBuffer int - issuerSafetyBuffer int + safetyBuffer int + issuerSafetyBuffer int + revQueueSafetyBuffer int + acmeAccountSafetyBuffer int + tidyCertStore bool tidyRevokedCerts bool tidyRevokedAssocs bool @@ -44,6 +52,9 @@ type tidyStatus struct { tidyBackupBundle bool tidyRevocationQueue bool tidyCrossRevokedCerts bool + tidyAcme bool + tidyCertMetadata bool + tidyCMPV2NonceStore bool pauseDuration string // Status @@ -60,48 +71,101 @@ type tidyStatus struct { missingIssuerCertCount uint revQueueDeletedCount uint crossRevokedDeletedCount uint + certMetadataDeletedCount uint + cmpv2NonceDeletedCount uint + + acmeAccountsCount uint + acmeAccountsRevokedCount uint + acmeAccountsDeletedCount uint + acmeOrdersDeletedCount uint } type tidyConfig struct { - Enabled bool `json:"enabled"` - Interval time.Duration `json:"interval_duration"` - CertStore bool `json:"tidy_cert_store"` - RevokedCerts bool `json:"tidy_revoked_certs"` - IssuerAssocs bool `json:"tidy_revoked_cert_issuer_associations"` - ExpiredIssuers bool `json:"tidy_expired_issuers"` - BackupBundle bool `json:"tidy_move_legacy_ca_bundle"` - SafetyBuffer time.Duration `json:"safety_buffer"` - IssuerSafetyBuffer time.Duration `json:"issuer_safety_buffer"` - PauseDuration time.Duration `json:"pause_duration"` - MaintainCount bool `json:"maintain_stored_certificate_counts"` - PublishMetrics bool `json:"publish_stored_certificate_count_metrics"` - RevocationQueue bool `json:"tidy_revocation_queue"` - QueueSafetyBuffer time.Duration `json:"revocation_queue_safety_buffer"` - CrossRevokedCerts bool `json:"tidy_cross_cluster_revoked_certs"` + // AutoTidy config + Enabled bool `json:"enabled"` + Interval time.Duration `json:"interval_duration"` + MinStartupBackoff time.Duration `json:"min_startup_backoff_duration"` + MaxStartupBackoff time.Duration `json:"max_startup_backoff_duration"` + + // Tidy Operations + CertStore bool `json:"tidy_cert_store"` + RevokedCerts bool `json:"tidy_revoked_certs"` + IssuerAssocs bool `json:"tidy_revoked_cert_issuer_associations"` + ExpiredIssuers bool `json:"tidy_expired_issuers"` + BackupBundle bool `json:"tidy_move_legacy_ca_bundle"` + RevocationQueue bool `json:"tidy_revocation_queue"` + CrossRevokedCerts bool `json:"tidy_cross_cluster_revoked_certs"` + TidyAcme bool `json:"tidy_acme"` + CertMetadata bool `json:"tidy_cert_metadata"` + CMPV2NonceStore bool `json:"tidy_cmpv2_nonce_store"` + + // Safety Buffers + SafetyBuffer time.Duration `json:"safety_buffer"` + IssuerSafetyBuffer time.Duration `json:"issuer_safety_buffer"` + QueueSafetyBuffer time.Duration `json:"revocation_queue_safety_buffer"` + AcmeAccountSafetyBuffer time.Duration `json:"acme_account_safety_buffer"` + PauseDuration time.Duration `json:"pause_duration"` + + // Metrics. + MaintainCount bool `json:"maintain_stored_certificate_counts"` + PublishMetrics bool `json:"publish_stored_certificate_count_metrics"` +} + +func (tc *tidyConfig) IsAnyTidyEnabled() bool { + return tc.CertStore || tc.RevokedCerts || tc.IssuerAssocs || tc.ExpiredIssuers || tc.BackupBundle || tc.TidyAcme || tc.CrossRevokedCerts || tc.RevocationQueue || tc.CertMetadata || tc.CMPV2NonceStore +} + +func (tc *tidyConfig) AnyTidyConfig() string { + return "tidy_cert_store / tidy_revoked_certs / tidy_revoked_cert_issuer_associations / tidy_expired_issuers / tidy_move_legacy_ca_bundle / tidy_revocation_queue / tidy_cross_cluster_revoked_certs / tidy_acme" +} + +func (tc *tidyConfig) CalculateStartupBackoff(mountStartup time.Time) time.Time { + minBackoff := int64(tc.MinStartupBackoff.Seconds()) + maxBackoff := int64(tc.MaxStartupBackoff.Seconds()) + + maxNumber := maxBackoff - minBackoff + if maxNumber <= 0 { + return mountStartup.Add(tc.MinStartupBackoff) + } + + backoffSecs := rand.Int64N(maxNumber) + minBackoff + return mountStartup.Add(time.Duration(backoffSecs) * time.Second) } var defaultTidyConfig = tidyConfig{ - Enabled: false, - Interval: 12 * time.Hour, - CertStore: false, - RevokedCerts: false, - IssuerAssocs: false, - ExpiredIssuers: false, - BackupBundle: false, - SafetyBuffer: 72 * time.Hour, - IssuerSafetyBuffer: 365 * 24 * time.Hour, - PauseDuration: 0 * time.Second, - MaintainCount: false, - PublishMetrics: false, - RevocationQueue: false, - QueueSafetyBuffer: 48 * time.Hour, - CrossRevokedCerts: false, + Enabled: false, + Interval: 12 * time.Hour, + MinStartupBackoff: 5 * time.Minute, + MaxStartupBackoff: 15 * time.Minute, + CertStore: false, + RevokedCerts: false, + IssuerAssocs: false, + ExpiredIssuers: false, + BackupBundle: false, + TidyAcme: false, + SafetyBuffer: 72 * time.Hour, + IssuerSafetyBuffer: 365 * 24 * time.Hour, + AcmeAccountSafetyBuffer: 30 * 24 * time.Hour, + PauseDuration: 0 * time.Second, + MaintainCount: false, + PublishMetrics: false, + RevocationQueue: false, + QueueSafetyBuffer: 48 * time.Hour, + CrossRevokedCerts: false, + CertMetadata: false, + CMPV2NonceStore: false, } func pathTidy(b *backend) *framework.Path { return &framework.Path{ Pattern: "tidy$", - Fields: addTidyFields(map[string]*framework.FieldSchema{}), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "tidy", + }, + + Fields: addTidyFields(map[string]*framework.FieldSchema{}), Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathTidyWrite, @@ -122,6 +186,13 @@ func pathTidy(b *backend) *framework.Path { func pathTidyCancel(b *backend) *framework.Path { return &framework.Path{ Pattern: "tidy-cancel$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "tidy", + OperationSuffix: "cancel", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathTidyCancelWrite, @@ -139,6 +210,11 @@ func pathTidyCancel(b *backend) *framework.Path { Description: `Issuer safety buffer`, Required: false, }, + "revocation_queue_safety_buffer": { + Type: framework.TypeInt, + Description: `Revocation queue safety buffer`, + Required: true, + }, "tidy_cert_store": { Type: framework.TypeBool, Description: `Tidy certificate store`, @@ -154,11 +230,31 @@ func pathTidyCancel(b *backend) *framework.Path { Description: `Tidy revoked certificate issuer associations`, Required: false, }, + "tidy_acme": { + Type: framework.TypeBool, + Description: `Tidy Unused Acme Accounts, and Orders`, + Required: false, + }, + "acme_account_safety_buffer": { + Type: framework.TypeInt, + Description: `Safety buffer after creation after which accounts lacking orders are revoked`, + Required: false, + }, "tidy_expired_issuers": { Type: framework.TypeBool, Description: `Tidy expired issuers`, Required: false, }, + "tidy_cert_metadata": { + Type: framework.TypeBool, + Description: `Tidy cert metadata`, + Required: false, + }, + "tidy_cmpv2_nonce_store": { + Type: framework.TypeBool, + Description: `Tidy CMPv2 nonce store`, + Required: false, + }, "pause_duration": { Type: framework.TypeString, Description: `Duration to pause between tidying certificates`, @@ -184,6 +280,11 @@ func pathTidyCancel(b *backend) *framework.Path { Description: `Time the operation finished`, Required: false, }, + "last_auto_tidy_finished": { + Type: framework.TypeString, + Description: `Time the last auto-tidy operation finished`, + Required: true, + }, "message": { Type: framework.TypeString, Description: `Message of the operation`, @@ -218,8 +319,9 @@ func pathTidyCancel(b *backend) *framework.Path { Required: false, }, "tidy_cross_cluster_revoked_certs": { - Type: framework.TypeBool, - Required: false, + Type: framework.TypeBool, + Description: `Tidy the cross-cluster revoked certificate store`, + Required: false, }, "tidy_revocation_queue": { Type: framework.TypeBool, @@ -237,6 +339,36 @@ func pathTidyCancel(b *backend) *framework.Path { Type: framework.TypeString, Required: false, }, + "total_acme_account_count": { + Type: framework.TypeInt, + Description: `Total number of acme accounts iterated over`, + Required: false, + }, + "acme_account_deleted_count": { + Type: framework.TypeInt, + Description: `The number of revoked acme accounts removed`, + Required: false, + }, + "acme_account_revoked_count": { + Type: framework.TypeInt, + Description: `The number of unused acme accounts revoked`, + Required: false, + }, + "acme_orders_deleted_count": { + Type: framework.TypeInt, + Description: `The number of expired, unused acme orders removed`, + Required: false, + }, + "cert_metadata_deleted_count": { + Type: framework.TypeInt, + Description: `The number of metadata entries removed`, + Required: false, + }, + "cmpv2_nonce_deleted_count": { + Type: framework.TypeInt, + Description: `The number of CMPv2 nonces removed`, + Required: false, + }, }, }}, }, @@ -251,6 +383,13 @@ func pathTidyCancel(b *backend) *framework.Path { func pathTidyStatus(b *backend) *framework.Path { return &framework.Path{ Pattern: "tidy-status$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "tidy", + OperationSuffix: "status", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathTidyStatusRead, @@ -268,6 +407,16 @@ func pathTidyStatus(b *backend) *framework.Path { Description: `Issuer safety buffer`, Required: true, }, + "revocation_queue_safety_buffer": { + Type: framework.TypeInt, + Description: `Revocation queue safety buffer`, + Required: true, + }, + "acme_account_safety_buffer": { + Type: framework.TypeInt, + Description: `Safety buffer after creation after which accounts lacking orders are revoked`, + Required: false, + }, "tidy_cert_store": { Type: framework.TypeBool, Description: `Tidy certificate store`, @@ -289,10 +438,25 @@ func pathTidyStatus(b *backend) *framework.Path { Required: true, }, "tidy_cross_cluster_revoked_certs": { - Type: framework.TypeString, - Description: ``, + Type: framework.TypeBool, + Description: `Tidy the cross-cluster revoked certificate store`, Required: false, }, + "tidy_acme": { + Type: framework.TypeBool, + Description: `Tidy Unused Acme Accounts, and Orders`, + Required: true, + }, + "tidy_cert_metadata": { + Type: framework.TypeBool, + Description: `Tidy cert metadata`, + Required: true, + }, + "tidy_cmpv2_nonce_store": { + Type: framework.TypeBool, + Description: `Tidy CMPv2 nonce store`, + Required: true, + }, "pause_duration": { Type: framework.TypeString, Description: `Duration to pause between tidying certificates`, @@ -316,6 +480,11 @@ func pathTidyStatus(b *backend) *framework.Path { "time_finished": { Type: framework.TypeString, Description: `Time the operation finished`, + Required: false, + }, + "last_auto_tidy_finished": { + Type: framework.TypeString, + Description: `Time the last auto-tidy operation finished`, Required: true, }, "message": { @@ -368,6 +537,36 @@ func pathTidyStatus(b *backend) *framework.Path { Type: framework.TypeString, Required: true, }, + "total_acme_account_count": { + Type: framework.TypeInt, + Description: `Total number of acme accounts iterated over`, + Required: false, + }, + "acme_account_deleted_count": { + Type: framework.TypeInt, + Description: `The number of revoked acme accounts removed`, + Required: false, + }, + "acme_account_revoked_count": { + Type: framework.TypeInt, + Description: `The number of unused acme accounts revoked`, + Required: false, + }, + "acme_orders_deleted_count": { + Type: framework.TypeInt, + Description: `The number of expired, unused acme orders removed`, + Required: false, + }, + "cert_metadata_deleted_count": { + Type: framework.TypeInt, + Description: `The number of metadata entries removed`, + Required: false, + }, + "cmpv2_nonce_deleted_count": { + Type: framework.TypeInt, + Description: `The number of CMPv2 nonces removed`, + Required: false, + }, }, }}, }, @@ -380,167 +579,172 @@ func pathTidyStatus(b *backend) *framework.Path { } func pathConfigAutoTidy(b *backend) *framework.Path { + autoTidyResponseFields := map[string]*framework.FieldSchema{ + "enabled": { + Type: framework.TypeBool, + Description: `Specifies whether automatic tidy is enabled or not`, + Required: true, + }, + "min_startup_backoff_duration": { + Type: framework.TypeInt, + Description: `The minimum amount of time in seconds auto-tidy will be delayed after startup`, + Required: true, + }, + "max_startup_backoff_duration": { + Type: framework.TypeInt, + Description: `The maximum amount of time in seconds auto-tidy will be delayed after startup`, + Required: true, + }, + "interval_duration": { + Type: framework.TypeInt, + Description: `Specifies the duration between automatic tidy operation`, + Required: true, + }, + "tidy_cert_store": { + Type: framework.TypeBool, + Description: `Specifies whether to tidy up the certificate store`, + Required: true, + }, + "tidy_revoked_certs": { + Type: framework.TypeBool, + Description: `Specifies whether to remove all invalid and expired certificates from storage`, + Required: true, + }, + "tidy_revoked_cert_issuer_associations": { + Type: framework.TypeBool, + Description: `Specifies whether to associate revoked certificates with their corresponding issuers`, + Required: true, + }, + "tidy_expired_issuers": { + Type: framework.TypeBool, + Description: `Specifies whether tidy expired issuers`, + Required: true, + }, + "tidy_acme": { + Type: framework.TypeBool, + Description: `Tidy Unused Acme Accounts, and Orders`, + Required: true, + }, + "tidy_cert_metadata": { + Type: framework.TypeBool, + Description: `Tidy cert metadata`, + Required: true, + }, + "tidy_cmpv2_nonce_store": { + Type: framework.TypeBool, + Description: `Tidy CMPv2 nonce store`, + Required: true, + }, + "safety_buffer": { + Type: framework.TypeInt, + Description: `Safety buffer time duration`, + Required: true, + }, + "issuer_safety_buffer": { + Type: framework.TypeInt, + Description: `Issuer safety buffer`, + Required: true, + }, + "acme_account_safety_buffer": { + Type: framework.TypeInt, + Description: `Safety buffer after creation after which accounts lacking orders are revoked`, + Required: true, + }, + "pause_duration": { + Type: framework.TypeString, + Description: `Duration to pause between tidying certificates`, + Required: true, + }, + "tidy_cross_cluster_revoked_certs": { + Type: framework.TypeBool, + Description: `Tidy the cross-cluster revoked certificate store`, + Required: true, + }, + "tidy_revocation_queue": { + Type: framework.TypeBool, + Required: true, + }, + "tidy_move_legacy_ca_bundle": { + Type: framework.TypeBool, + Required: true, + }, + "revocation_queue_safety_buffer": { + Type: framework.TypeInt, + Required: true, + }, + "publish_stored_certificate_count_metrics": { + Type: framework.TypeBool, + Required: true, + }, + "maintain_stored_certificate_counts": { + Type: framework.TypeBool, + Required: true, + }, + } return &framework.Path{ Pattern: "config/auto-tidy", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + }, Fields: addTidyFields(map[string]*framework.FieldSchema{ "enabled": { Type: framework.TypeBool, Description: `Set to true to enable automatic tidy operations.`, }, + "min_startup_backoff_duration": { + Type: framework.TypeDurationSecond, + Description: `The minimum amount of time in seconds auto-tidy will be delayed after startup.`, + Default: int(defaultTidyConfig.MinStartupBackoff.Seconds()), + }, + "max_startup_backoff_duration": { + Type: framework.TypeDurationSecond, + Description: `The maximum amount of time in seconds auto-tidy will be delayed after startup.`, + Default: int(defaultTidyConfig.MaxStartupBackoff.Seconds()), + }, "interval_duration": { Type: framework.TypeDurationSecond, Description: `Interval at which to run an auto-tidy operation. This is the time between tidy invocations (after one finishes to the start of the next). Running a manual tidy will reset this duration.`, Default: int(defaultTidyConfig.Interval / time.Second), // TypeDurationSecond currently requires the default to be an int. }, + "maintain_stored_certificate_counts": { + Type: framework.TypeBool, + Description: `This configures whether stored certificates +are counted upon initialization of the backend, and whether during +normal operation, a running count of certificates stored is maintained.`, + Default: false, + }, + "publish_stored_certificate_count_metrics": { + Type: framework.TypeBool, + Description: `This configures whether the stored certificate +count is published to the metrics consumer. It does not affect if the +stored certificate count is maintained, and if maintained, it will be +available on the tidy-status endpoint.`, + Default: false, + }, }), Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathConfigAutoTidyRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "auto-tidy-configuration", + }, Responses: map[int][]framework.Response{ http.StatusOK: {{ Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "enabled": { - Type: framework.TypeBool, - Description: `Specifies whether automatic tidy is enabled or not`, - Required: true, - }, - "interval_duration": { - Type: framework.TypeInt, - Description: `Specifies the duration between automatic tidy operation`, - Required: true, - }, - "tidy_cert_store": { - Type: framework.TypeBool, - Description: `Specifies whether to tidy up the certificate store`, - Required: true, - }, - "tidy_revoked_certs": { - Type: framework.TypeBool, - Description: `Specifies whether to remove all invalid and expired certificates from storage`, - Required: true, - }, - "tidy_revoked_cert_issuer_associations": { - Type: framework.TypeBool, - Description: `Specifies whether to associate revoked certificates with their corresponding issuers`, - Required: true, - }, - "tidy_expired_issuers": { - Type: framework.TypeBool, - Description: `Specifies whether tidy expired issuers`, - Required: true, - }, - "safety_buffer": { - Type: framework.TypeInt, - Description: `Safety buffer time duration`, - Required: true, - }, - "issuer_safety_buffer": { - Type: framework.TypeInt, - Description: `Issuer safety buffer`, - Required: true, - }, - "pause_duration": { - Type: framework.TypeString, - Description: `Duration to pause between tidying certificates`, - Required: true, - }, - "tidy_move_legacy_ca_bundle": { - Type: framework.TypeBool, - Required: true, - }, - "tidy_cross_cluster_revoked_certs": { - Type: framework.TypeBool, - Required: true, - }, - "tidy_revocation_queue": { - Type: framework.TypeBool, - Required: true, - }, - "revocation_queue_safety_buffer": { - Type: framework.TypeDurationSecond, - Required: true, - }, - "publish_stored_certificate_count_metrics": { - Type: framework.TypeBool, - Required: true, - }, - "maintain_stored_certificate_counts": { - Type: framework.TypeBool, - Required: true, - }, - }, + Fields: autoTidyResponseFields, }}, }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigAutoTidyWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "auto-tidy", + }, Responses: map[int][]framework.Response{ http.StatusOK: {{ Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "enabled": { - Type: framework.TypeBool, - Description: `Specifies whether automatic tidy is enabled or not`, - Required: true, - }, - "interval_duration": { - Type: framework.TypeInt, - Description: `Specifies the duration between automatic tidy operation`, - Required: true, - }, - "tidy_cert_store": { - Type: framework.TypeBool, - Description: `Specifies whether to tidy up the certificate store`, - Required: true, - }, - "tidy_revoked_certs": { - Type: framework.TypeBool, - Description: `Specifies whether to remove all invalid and expired certificates from storage`, - Required: true, - }, - "tidy_revoked_cert_issuer_associations": { - Type: framework.TypeBool, - Description: `Specifies whether to associate revoked certificates with their corresponding issuers`, - Required: true, - }, - "tidy_expired_issuers": { - Type: framework.TypeBool, - Description: `Specifies whether tidy expired issuers`, - Required: true, - }, - "safety_buffer": { - Type: framework.TypeInt, - Description: `Safety buffer time duration`, - Required: true, - }, - "issuer_safety_buffer": { - Type: framework.TypeInt, - Description: `Issuer safety buffer`, - Required: true, - }, - "pause_duration": { - Type: framework.TypeString, - Description: `Duration to pause between tidying certificates`, - Required: true, - }, - "tidy_cross_cluster_revoked_certs": { - Type: framework.TypeBool, - Required: true, - }, - "tidy_revocation_queue": { - Type: framework.TypeBool, - Required: true, - }, - "tidy_move_legacy_ca_bundle": { - Type: framework.TypeBool, - Required: true, - }, - "revocation_queue_safety_buffer": { - Type: framework.TypeDurationSecond, - Required: true, - }, - }, + Fields: autoTidyResponseFields, }}, }, // Read more about why these flags are set in backend.go. @@ -566,6 +770,10 @@ func (b *backend) pathTidyWrite(ctx context.Context, req *logical.Request, d *fr tidyRevocationQueue := d.Get("tidy_revocation_queue").(bool) queueSafetyBuffer := d.Get("revocation_queue_safety_buffer").(int) tidyCrossRevokedCerts := d.Get("tidy_cross_cluster_revoked_certs").(bool) + tidyAcme := d.Get("tidy_acme").(bool) + acmeAccountSafetyBuffer := d.Get("acme_account_safety_buffer").(int) + tidyCertMetadata := d.Get("tidy_cert_metadata").(bool) + tidyCMPV2NonceStore := d.Get("tidy_cmpv2_nonce_store").(bool) if safetyBuffer < 1 { return logical.ErrorResponse("safety_buffer must be greater than zero"), nil @@ -579,9 +787,13 @@ func (b *backend) pathTidyWrite(ctx context.Context, req *logical.Request, d *fr return logical.ErrorResponse("revocation_queue_safety_buffer must be greater than zero"), nil } + if acmeAccountSafetyBuffer < 1 { + return logical.ErrorResponse("acme_account_safety_buffer must be greater than zero"), nil + } + if pauseDurationStr != "" { var err error - pauseDuration, err = time.ParseDuration(pauseDurationStr) + pauseDuration, err = parseutil.ParseDurationSecond(pauseDurationStr) if err != nil { return logical.ErrorResponse(fmt.Sprintf("Error parsing pause_duration: %v", err)), nil } @@ -591,25 +803,34 @@ func (b *backend) pathTidyWrite(ctx context.Context, req *logical.Request, d *fr } } + if tidyCertMetadata && !constants.IsEnterprise { + return logical.ErrorResponse("certificate metadata is only supported on Vault Enterprise"), nil + } + bufferDuration := time.Duration(safetyBuffer) * time.Second issuerBufferDuration := time.Duration(issuerSafetyBuffer) * time.Second queueSafetyBufferDuration := time.Duration(queueSafetyBuffer) * time.Second + acmeAccountSafetyBufferDuration := time.Duration(acmeAccountSafetyBuffer) * time.Second // Manual run with constructed configuration. config := &tidyConfig{ - Enabled: true, - Interval: 0 * time.Second, - CertStore: tidyCertStore, - RevokedCerts: tidyRevokedCerts, - IssuerAssocs: tidyRevokedAssocs, - ExpiredIssuers: tidyExpiredIssuers, - BackupBundle: tidyBackupBundle, - SafetyBuffer: bufferDuration, - IssuerSafetyBuffer: issuerBufferDuration, - PauseDuration: pauseDuration, - RevocationQueue: tidyRevocationQueue, - QueueSafetyBuffer: queueSafetyBufferDuration, - CrossRevokedCerts: tidyCrossRevokedCerts, + Enabled: true, + Interval: 0 * time.Second, + CertStore: tidyCertStore, + RevokedCerts: tidyRevokedCerts, + IssuerAssocs: tidyRevokedAssocs, + ExpiredIssuers: tidyExpiredIssuers, + BackupBundle: tidyBackupBundle, + SafetyBuffer: bufferDuration, + IssuerSafetyBuffer: issuerBufferDuration, + PauseDuration: pauseDuration, + RevocationQueue: tidyRevocationQueue, + QueueSafetyBuffer: queueSafetyBufferDuration, + CrossRevokedCerts: tidyCrossRevokedCerts, + TidyAcme: tidyAcme, + AcmeAccountSafetyBuffer: acmeAccountSafetyBufferDuration, + CertMetadata: tidyCertMetadata, + CMPV2NonceStore: tidyCMPV2NonceStore, } if !atomic.CompareAndSwapUint32(b.tidyCASGuard, 0, 1) { @@ -624,18 +845,22 @@ func (b *backend) pathTidyWrite(ctx context.Context, req *logical.Request, d *fr Storage: req.Storage, } + resp := &logical.Response{} // Mark the last tidy operation as relatively recent, to ensure we don't // try to trigger the periodic function. - b.tidyStatusLock.Lock() - b.lastTidy = time.Now() - b.tidyStatusLock.Unlock() + // NOTE: not sure this is correct as we are updating the auto tidy time with this manual run. Ideally we + // could track when we ran each type of tidy was last run which would allow manual runs and auto + // runs to properly impact each other. + sc := b.makeStorageContext(ctx, req.Storage) + if err := b.updateLastAutoTidyTime(sc, time.Now()); err != nil { + resp.AddWarning(fmt.Sprintf("failed persisting tidy last run time: %v", err)) + } // Kick off the actual tidy. b.startTidyOperation(req, config) - resp := &logical.Response{} - if !tidyCertStore && !tidyRevokedCerts && !tidyRevokedAssocs && !tidyExpiredIssuers && !tidyBackupBundle && !tidyRevocationQueue && !tidyCrossRevokedCerts { - resp.AddWarning("No targets to tidy; specify tidy_cert_store=true or tidy_revoked_certs=true or tidy_revoked_cert_issuer_associations=true or tidy_expired_issuers=true or tidy_move_legacy_ca_bundle=true or tidy_revocation_queue=true or tidy_cross_cluster_revoked_certs=true to start a tidy operation.") + if !config.IsAnyTidyEnabled() { + resp.AddWarning("Manual tidy requested but no tidy operations were set. Enable at least one tidy operation to be run (" + config.AnyTidyConfig() + ").") } else { resp.AddWarning("Tidy operation successfully started. Any information from the operation will be printed to Vault's server logs.") } @@ -725,6 +950,39 @@ func (b *backend) startTidyOperation(req *logical.Request, config *tidyConfig) { } } + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + if config.TidyAcme { + if err := b.doTidyAcme(ctx, req, logger, config); err != nil { + return err + } + } + + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + if config.CertMetadata { + if err := b.doTidyCertMetadata(ctx, req, logger, config); err != nil { + return err + } + } + + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + if config.CMPV2NonceStore { + if err := b.doTidyCMPV2NonceStore(ctx, req.Storage); err != nil { + return err + } + } + return nil } @@ -737,15 +995,16 @@ func (b *backend) startTidyOperation(req *logical.Request, config *tidyConfig) { // Since the tidy operation finished without an error, we don't // really want to start another tidy right away (if the interval // is too short). So mark the last tidy as now. - b.tidyStatusLock.Lock() - b.lastTidy = time.Now() - b.tidyStatusLock.Unlock() + sc := b.makeStorageContext(ctx, req.Storage) + if err := b.updateLastAutoTidyTime(sc, time.Now()); err != nil { + logger.Error("error persisting last tidy run time", "error", err) + } } }() } func (b *backend) doTidyCertStore(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { - serials, err := req.Storage.List(ctx, "certs/") + serials, err := req.Storage.List(ctx, issuing.PathCerts) if err != nil { return fmt.Errorf("error fetching list of certs: %w", err) } @@ -766,14 +1025,14 @@ func (b *backend) doTidyCertStore(ctx context.Context, req *logical.Request, log time.Sleep(config.PauseDuration) } - certEntry, err := req.Storage.Get(ctx, "certs/"+serial) + certEntry, err := req.Storage.Get(ctx, issuing.PathCerts+serial) if err != nil { return fmt.Errorf("error fetching certificate %q: %w", serial, err) } if certEntry == nil { logger.Warn("certificate entry is nil; tidying up since it is no longer useful for any server operations", "serial", serial) - if err := req.Storage.Delete(ctx, "certs/"+serial); err != nil { + if err := req.Storage.Delete(ctx, issuing.PathCerts+serial); err != nil { return fmt.Errorf("error deleting nil entry with serial %s: %w", serial, err) } b.tidyStatusIncCertStoreCount() @@ -782,7 +1041,7 @@ func (b *backend) doTidyCertStore(ctx context.Context, req *logical.Request, log if certEntry.Value == nil || len(certEntry.Value) == 0 { logger.Warn("certificate entry has no value; tidying up since it is no longer useful for any server operations", "serial", serial) - if err := req.Storage.Delete(ctx, "certs/"+serial); err != nil { + if err := req.Storage.Delete(ctx, issuing.PathCerts+serial); err != nil { return fmt.Errorf("error deleting entry with nil value with serial %s: %w", serial, err) } b.tidyStatusIncCertStoreCount() @@ -795,7 +1054,7 @@ func (b *backend) doTidyCertStore(ctx context.Context, req *logical.Request, log } if time.Since(cert.NotAfter) > config.SafetyBuffer { - if err := req.Storage.Delete(ctx, "certs/"+serial); err != nil { + if err := req.Storage.Delete(ctx, issuing.PathCerts+serial); err != nil { return fmt.Errorf("error deleting serial %q from storage: %w", serial, err) } b.tidyStatusIncCertStoreCount() @@ -810,12 +1069,12 @@ func (b *backend) doTidyCertStore(ctx context.Context, req *logical.Request, log } func (b *backend) doTidyRevocationStore(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { - b.revokeStorageLock.Lock() - defer b.revokeStorageLock.Unlock() + b.GetRevokeStorageLock().Lock() + defer b.GetRevokeStorageLock().Unlock() // Fetch and parse our issuers so we can associate them if necessary. sc := b.makeStorageContext(ctx, req.Storage) - issuerIDCertMap, err := fetchIssuerMapForRevocationChecking(sc) + issuerIDCertMap, err := revocation.FetchIssuerMapForRevocationChecking(sc) if err != nil { return err } @@ -832,7 +1091,7 @@ func (b *backend) doTidyRevocationStore(ctx context.Context, req *logical.Reques fixedIssuers := 0 - var revInfo revocationInfo + var revInfo revocation.RevocationInfo for i, serial := range revokedSerials { b.tidyStatusMessage(fmt.Sprintf("Tidying revoked certificates: checking certificate %d of %d", i, len(revokedSerials))) metrics.SetGauge([]string{"secrets", "pki", "tidy", "revoked_cert_current_entry"}, float32(i)) @@ -844,9 +1103,9 @@ func (b *backend) doTidyRevocationStore(ctx context.Context, req *logical.Reques // Check for pause duration to reduce resource consumption. if config.PauseDuration > (0 * time.Second) { - b.revokeStorageLock.Unlock() + b.GetRevokeStorageLock().Unlock() time.Sleep(config.PauseDuration) - b.revokeStorageLock.Lock() + b.GetRevokeStorageLock().Lock() } revokedEntry, err := req.Storage.Get(ctx, "revoked/"+serial) @@ -889,9 +1148,9 @@ func (b *backend) doTidyRevocationStore(ctx context.Context, req *logical.Reques if config.IssuerAssocs { if !isRevInfoIssuerValid(&revInfo, issuerIDCertMap) { b.tidyStatusIncMissingIssuerCertCount() - revInfo.CertificateIssuer = issuerID("") + revInfo.CertificateIssuer = issuing.IssuerID("") storeCert = true - if associateRevokedCertWithIsssuer(&revInfo, revokedCert, issuerIDCertMap) { + if revInfo.AssociateRevokedCertWithIsssuer(revokedCert, issuerIDCertMap) { fixedIssuers += 1 } } @@ -906,7 +1165,7 @@ func (b *backend) doTidyRevocationStore(ctx context.Context, req *logical.Reques if err := req.Storage.Delete(ctx, "revoked/"+serial); err != nil { return fmt.Errorf("error deleting serial %q from revoked list: %w", serial, err) } - if err := req.Storage.Delete(ctx, "certs/"+serial); err != nil { + if err := req.Storage.Delete(ctx, issuing.PathCerts+serial); err != nil { return fmt.Errorf("error deleting serial %q from store when tidying revoked: %w", serial, err) } rebuildCRL = true @@ -947,9 +1206,17 @@ func (b *backend) doTidyRevocationStore(ctx context.Context, req *logical.Reques } if !config.AutoRebuild { - if err := b.crlBuilder.rebuild(sc, false); err != nil { + warnings, err := b.CrlBuilder().Rebuild(sc, false) + if err != nil { return err } + if len(warnings) > 0 { + msg := "During rebuild of CRL for tidy, got the following warnings:" + for index, warning := range warnings { + msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) + } + b.Logger().Warn(msg) + } } } @@ -969,7 +1236,7 @@ func (b *backend) doTidyExpiredIssuers(ctx context.Context, req *logical.Request // Short-circuit to avoid having to deal with the legacy mounts. While we // could handle this case and remove these issuers, its somewhat // unexpected behavior and we'd prefer to finish the migration first. - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return nil } @@ -978,7 +1245,7 @@ func (b *backend) doTidyExpiredIssuers(ctx context.Context, req *logical.Request // Fetch and parse our issuers so we have their expiration date. sc := b.makeStorageContext(ctx, req.Storage) - issuerIDCertMap, err := fetchIssuerMapForRevocationChecking(sc) + issuerIDCertMap, err := revocation.FetchIssuerMapForRevocationChecking(sc) if err != nil { return err } @@ -1048,12 +1315,20 @@ func (b *backend) doTidyExpiredIssuers(ctx context.Context, req *logical.Request // Removal of issuers is generally a good reason to rebuild the CRL, // even if auto-rebuild is enabled. - b.revokeStorageLock.Lock() - defer b.revokeStorageLock.Unlock() + b.GetRevokeStorageLock().Lock() + defer b.GetRevokeStorageLock().Unlock() - if err := b.crlBuilder.rebuild(sc, false); err != nil { + warnings, err := b.CrlBuilder().Rebuild(sc, false) + if err != nil { return err } + if len(warnings) > 0 { + msg := "During rebuild of CRL for tidy, got the following warnings:" + for index, warning := range warnings { + msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) + } + b.Logger().Warn(msg) + } } return nil @@ -1071,7 +1346,7 @@ func (b *backend) doTidyMoveCABundle(ctx context.Context, req *logical.Request, // Short-circuit to avoid moving the legacy bundle from under a legacy // mount. - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return nil } @@ -1134,8 +1409,8 @@ func (b *backend) doTidyRevocationQueue(ctx context.Context, req *logical.Reques } // Grab locks as we're potentially modifying revocation-related storage. - b.revokeStorageLock.Lock() - defer b.revokeStorageLock.Unlock() + b.GetRevokeStorageLock().Lock() + defer b.GetRevokeStorageLock().Unlock() for cIndex, cluster := range clusters { if cluster[len(cluster)-1] == '/' { @@ -1156,9 +1431,9 @@ func (b *backend) doTidyRevocationQueue(ctx context.Context, req *logical.Reques // Check for pause duration to reduce resource consumption. if config.PauseDuration > (0 * time.Second) { - b.revokeStorageLock.Unlock() + b.GetRevokeStorageLock().Unlock() time.Sleep(config.PauseDuration) - b.revokeStorageLock.Lock() + b.GetRevokeStorageLock().Lock() } // Confirmation entries _should_ be handled by this cluster's @@ -1256,8 +1531,8 @@ func (b *backend) doTidyCrossRevocationStore(ctx context.Context, req *logical.R } // Grab locks as we're potentially modifying revocation-related storage. - b.revokeStorageLock.Lock() - defer b.revokeStorageLock.Unlock() + b.GetRevokeStorageLock().Lock() + defer b.GetRevokeStorageLock().Unlock() for cIndex, cluster := range clusters { if cluster[len(cluster)-1] == '/' { @@ -1278,9 +1553,9 @@ func (b *backend) doTidyCrossRevocationStore(ctx context.Context, req *logical.R // Check for pause duration to reduce resource consumption. if config.PauseDuration > (0 * time.Second) { - b.revokeStorageLock.Unlock() + b.GetRevokeStorageLock().Unlock() time.Sleep(config.PauseDuration) - b.revokeStorageLock.Lock() + b.GetRevokeStorageLock().Lock() } ePath := cPath + serial @@ -1292,7 +1567,7 @@ func (b *backend) doTidyCrossRevocationStore(ctx context.Context, req *logical.R continue } - var details unifiedRevocationEntry + var details revocation.UnifiedRevocationEntry if err := entry.DecodeJSON(&details); err != nil { return fmt.Errorf("error decoding cross-cluster revocation entry (%v) to tidy: %w", ePath, err) } @@ -1313,6 +1588,80 @@ func (b *backend) doTidyCrossRevocationStore(ctx context.Context, req *logical.R return nil } +func (b *backend) doTidyAcme(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { + b.acmeAccountLock.Lock() + defer b.acmeAccountLock.Unlock() + + sc := b.makeStorageContext(ctx, req.Storage) + thumbprints, err := sc.Storage.List(ctx, acmeThumbprintPrefix) + if err != nil { + return err + } + + b.tidyStatusLock.Lock() + b.tidyStatus.acmeAccountsCount = uint(len(thumbprints)) + b.tidyStatusLock.Unlock() + + for _, thumbprint := range thumbprints { + err := b.tidyAcmeAccountByThumbprint(b.GetAcmeState(), sc, thumbprint, config.SafetyBuffer, config.AcmeAccountSafetyBuffer) + if err != nil { + logger.Warn("error tidying account %v: %v", thumbprint, err.Error()) + } + + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + // Check for pause duration to reduce resource consumption. + if config.PauseDuration > (0 * time.Second) { + b.acmeAccountLock.Unlock() // Correct the Lock + time.Sleep(config.PauseDuration) + b.acmeAccountLock.Lock() + } + + } + + // Clean up any unused EAB + eabIds, err := b.GetAcmeState().ListEabIds(sc) + if err != nil { + return fmt.Errorf("failed listing EAB ids: %w", err) + } + + for _, eabId := range eabIds { + eab, err := b.GetAcmeState().LoadEab(sc, eabId) + if err != nil { + if errors.Is(err, ErrStorageItemNotFound) { + // We don't need to worry about a consumed EAB + continue + } + return err + } + + eabExpiration := eab.CreatedOn.Add(config.AcmeAccountSafetyBuffer) + if time.Now().After(eabExpiration) { + _, err := b.GetAcmeState().DeleteEab(sc, eabId) + if err != nil { + return fmt.Errorf("failed to tidy eab %s: %w", eabId, err) + } + } + + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + // Check for pause duration to reduce resource consumption. + if config.PauseDuration > (0 * time.Second) { + b.acmeAccountLock.Unlock() // Correct the Lock + time.Sleep(config.PauseDuration) + b.acmeAccountLock.Lock() + } + } + + return nil +} + func (b *backend) pathTidyCancelWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { if atomic.LoadUint32(b.tidyCASGuard) == 0 { resp := &logical.Response{} @@ -1351,6 +1700,9 @@ func (b *backend) pathTidyStatusRead(_ context.Context, _ *logical.Request, _ *f "tidy_move_legacy_ca_bundle": nil, "tidy_revocation_queue": nil, "tidy_cross_cluster_revoked_certs": nil, + "tidy_acme": nil, + "tidy_cert_metadata": nil, + "tidy_cmpv2_nonce_store": nil, "pause_duration": nil, "state": "Inactive", "error": nil, @@ -1365,20 +1717,30 @@ func (b *backend) pathTidyStatusRead(_ context.Context, _ *logical.Request, _ *f "internal_backend_uuid": nil, "revocation_queue_deleted_count": nil, "cross_revoked_cert_deleted_count": nil, + "total_acme_account_count": nil, + "acme_account_deleted_count": nil, + "acme_account_revoked_count": nil, + "acme_orders_deleted_count": nil, + "acme_account_safety_buffer": nil, + "cert_metadata_deleted_count": nil, + "cmpv2_nonce_deleted_count": nil, + "last_auto_tidy_finished": b.getLastAutoTidyTimeWithoutLock(), // we acquired the tidyStatusLock above. }, } resp.Data["internal_backend_uuid"] = b.backendUUID - if b.certCountEnabled.Load() { - resp.Data["current_cert_store_count"] = b.certCount.Load() - resp.Data["current_revoked_cert_count"] = b.revokedCertCount.Load() - if !b.certsCounted.Load() { + certCounter := b.GetCertificateCounter() + if certCounter.IsEnabled() { + resp.Data["current_cert_store_count"] = certCounter.CertificateCount() + resp.Data["current_revoked_cert_count"] = certCounter.RevokedCount() + if !certCounter.IsInitialized() { resp.AddWarning("Certificates in storage are still being counted, current counts provided may be " + "inaccurate") } - if b.certCountError != "" { - resp.Data["certificate_counting_error"] = b.certCountError + certError := certCounter.Error() + if certError != nil { + resp.Data["certificate_counting_error"] = certError.Error() } } @@ -1395,6 +1757,9 @@ func (b *backend) pathTidyStatusRead(_ context.Context, _ *logical.Request, _ *f resp.Data["tidy_move_legacy_ca_bundle"] = b.tidyStatus.tidyBackupBundle resp.Data["tidy_revocation_queue"] = b.tidyStatus.tidyRevocationQueue resp.Data["tidy_cross_cluster_revoked_certs"] = b.tidyStatus.tidyCrossRevokedCerts + resp.Data["tidy_acme"] = b.tidyStatus.tidyAcme + resp.Data["tidy_cert_metadata"] = b.tidyStatus.tidyCertMetadata + resp.Data["tidy_cmpv2_nonce_store"] = b.tidyStatus.tidyCMPV2NonceStore resp.Data["pause_duration"] = b.tidyStatus.pauseDuration resp.Data["time_started"] = b.tidyStatus.timeStarted resp.Data["message"] = b.tidyStatus.message @@ -1403,6 +1768,14 @@ func (b *backend) pathTidyStatusRead(_ context.Context, _ *logical.Request, _ *f resp.Data["missing_issuer_cert_count"] = b.tidyStatus.missingIssuerCertCount resp.Data["revocation_queue_deleted_count"] = b.tidyStatus.revQueueDeletedCount resp.Data["cross_revoked_cert_deleted_count"] = b.tidyStatus.crossRevokedDeletedCount + resp.Data["revocation_queue_safety_buffer"] = b.tidyStatus.revQueueSafetyBuffer + resp.Data["total_acme_account_count"] = b.tidyStatus.acmeAccountsCount + resp.Data["acme_account_deleted_count"] = b.tidyStatus.acmeAccountsDeletedCount + resp.Data["acme_account_revoked_count"] = b.tidyStatus.acmeAccountsRevokedCount + resp.Data["acme_orders_deleted_count"] = b.tidyStatus.acmeOrdersDeletedCount + resp.Data["acme_account_safety_buffer"] = b.tidyStatus.acmeAccountSafetyBuffer + resp.Data["cert_metadata_deleted_count"] = b.tidyStatus.certMetadataDeletedCount + resp.Data["cmpv2_nonce_deleted_count"] = b.tidyStatus.cmpv2NonceDeletedCount switch b.tidyStatus.state { case tidyStatusStarted: @@ -1435,23 +1808,7 @@ func (b *backend) pathConfigAutoTidyRead(ctx context.Context, req *logical.Reque } return &logical.Response{ - Data: map[string]interface{}{ - "enabled": config.Enabled, - "interval_duration": int(config.Interval / time.Second), - "tidy_cert_store": config.CertStore, - "tidy_revoked_certs": config.RevokedCerts, - "tidy_revoked_cert_issuer_associations": config.IssuerAssocs, - "tidy_expired_issuers": config.ExpiredIssuers, - "safety_buffer": int(config.SafetyBuffer / time.Second), - "issuer_safety_buffer": int(config.IssuerSafetyBuffer / time.Second), - "pause_duration": config.PauseDuration.String(), - "publish_stored_certificate_count_metrics": config.PublishMetrics, - "maintain_stored_certificate_counts": config.MaintainCount, - "tidy_move_legacy_ca_bundle": config.BackupBundle, - "tidy_revocation_queue": config.RevocationQueue, - "revocation_queue_safety_buffer": int(config.QueueSafetyBuffer / time.Second), - "tidy_cross_cluster_revoked_certs": config.CrossRevokedCerts, - }, + Data: getTidyConfigData(*config), }, nil } @@ -1462,8 +1819,44 @@ func (b *backend) pathConfigAutoTidyWrite(ctx context.Context, req *logical.Requ return nil, err } + isAutoTidyBeingEnabled := false + if enabledRaw, ok := d.GetOk("enabled"); ok { - config.Enabled = enabledRaw.(bool) + enabled, err := parseutil.ParseBool(enabledRaw) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("failed to parse enabled flag as a boolean: %s", err.Error())), nil + } + if !config.Enabled && enabled { + // we are turning on auto-tidy reset our persisted time to now + isAutoTidyBeingEnabled = true + } + config.Enabled = enabled + } + + if minStartupBackoffRaw, ok := d.GetOk("min_startup_backoff_duration"); ok { + minDuration, err := parseutil.ParseDurationSecond(minStartupBackoffRaw) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("failed to parse min_startup_backoff_duration flag as a duration: %s", err.Error())), nil + } + if minDuration.Seconds() < 1 { + return logical.ErrorResponse(fmt.Sprintf("min_startup_backoff_duration must be at least 1 second: parsed: %v", minDuration)), nil + } + config.MinStartupBackoff = minDuration + } + + if maxStartupBackoffRaw, ok := d.GetOk("max_startup_backoff_duration"); ok { + maxDuration, err := parseutil.ParseDurationSecond(maxStartupBackoffRaw) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("failed to parse max_startup_backoff_duration flag as a duration: %s", err.Error())), nil + } + if maxDuration.Seconds() < 1 { + return logical.ErrorResponse(fmt.Sprintf("max_startup_backoff_duration must be at least 1 second: parsed: %v", maxDuration)), nil + } + config.MaxStartupBackoff = maxDuration + } + + if config.MinStartupBackoff > config.MaxStartupBackoff { + return logical.ErrorResponse(fmt.Sprintf("max_startup_backoff_duration %v must be greater or equal to min_startup_backoff_duration %v", config.MaxStartupBackoff, config.MinStartupBackoff)), nil } if intervalRaw, ok := d.GetOk("interval_duration"); ok { @@ -1493,7 +1886,7 @@ func (b *backend) pathConfigAutoTidyWrite(ctx context.Context, req *logical.Requ } if pauseDurationRaw, ok := d.GetOk("pause_duration"); ok { - config.PauseDuration, err = time.ParseDuration(pauseDurationRaw.(string)) + config.PauseDuration, err = parseutil.ParseDurationSecond(pauseDurationRaw.(string)) if err != nil { return logical.ErrorResponse(fmt.Sprintf("unable to parse given pause_duration: %v", err)), nil } @@ -1533,8 +1926,27 @@ func (b *backend) pathConfigAutoTidyWrite(ctx context.Context, req *logical.Requ config.CrossRevokedCerts = crossRevokedRaw.(bool) } - if config.Enabled && !(config.CertStore || config.RevokedCerts || config.IssuerAssocs || config.ExpiredIssuers || config.BackupBundle || config.RevocationQueue || config.CrossRevokedCerts) { - return logical.ErrorResponse("Auto-tidy enabled but no tidy operations were requested. Enable at least one tidy operation to be run (tidy_cert_store / tidy_revoked_certs / tidy_revoked_cert_issuer_associations / tidy_expired_issuers / tidy_move_legacy_ca_bundle / tidy_revocation_queue / tidy_cross_cluster_revoked_certs)."), nil + if tidyAcmeRaw, ok := d.GetOk("tidy_acme"); ok { + config.TidyAcme = tidyAcmeRaw.(bool) + } + + if acmeAccountSafetyBufferRaw, ok := d.GetOk("acme_account_safety_buffer"); ok { + config.AcmeAccountSafetyBuffer = time.Duration(acmeAccountSafetyBufferRaw.(int)) * time.Second + if config.AcmeAccountSafetyBuffer < 1*time.Second { + return logical.ErrorResponse(fmt.Sprintf("given acme_account_safety_buffer must be at least one second; got: %v", acmeAccountSafetyBufferRaw)), nil + } + } + + if tidyCertMetadataRaw, ok := d.GetOk("tidy_cert_metadata"); ok { + config.CertMetadata = tidyCertMetadataRaw.(bool) + + if config.CertMetadata && !constants.IsEnterprise { + return logical.ErrorResponse("certificate metadata is only supported on Vault Enterprise"), nil + } + } + + if config.Enabled && !config.IsAnyTidyEnabled() { + return logical.ErrorResponse("Auto-tidy enabled but no tidy operations were requested. Enable at least one tidy operation to be run (" + config.AnyTidyConfig() + ")."), nil } if maintainCountEnabledRaw, ok := d.GetOk("maintain_stored_certificate_counts"); ok { @@ -1542,32 +1954,26 @@ func (b *backend) pathConfigAutoTidyWrite(ctx context.Context, req *logical.Requ } if runningStorageMetricsEnabledRaw, ok := d.GetOk("publish_stored_certificate_count_metrics"); ok { - if config.MaintainCount == false { - return logical.ErrorResponse("Can not publish a running storage metrics count to metrics without first maintaining that count. Enable `maintain_stored_certificate_counts` to enable `publish_stored_certificate_count_metrics."), nil - } config.PublishMetrics = runningStorageMetricsEnabledRaw.(bool) } + if config.PublishMetrics && !config.MaintainCount { + return logical.ErrorResponse("Can not publish a running storage metrics count to metrics without first maintaining that count. Enable `maintain_stored_certificate_counts` to enable `publish_stored_certificate_count_metrics`."), nil + } + if err := sc.writeAutoTidyConfig(config); err != nil { return nil, err } + if isAutoTidyBeingEnabled { + if err := b.updateLastAutoTidyTime(sc, time.Now()); err != nil { + b.Logger().Warn("failed to update last auto tidy run time to now, the first auto-tidy "+ + "might run soon and not at the next delay provided", "error", err.Error()) + } + } + return &logical.Response{ - Data: map[string]interface{}{ - "enabled": config.Enabled, - "interval_duration": int(config.Interval / time.Second), - "tidy_cert_store": config.CertStore, - "tidy_revoked_certs": config.RevokedCerts, - "tidy_revoked_cert_issuer_associations": config.IssuerAssocs, - "tidy_expired_issuers": config.ExpiredIssuers, - "tidy_move_legacy_ca_bundle": config.BackupBundle, - "safety_buffer": int(config.SafetyBuffer / time.Second), - "issuer_safety_buffer": int(config.IssuerSafetyBuffer / time.Second), - "pause_duration": config.PauseDuration.String(), - "tidy_revocation_queue": config.RevocationQueue, - "revocation_queue_safety_buffer": int(config.QueueSafetyBuffer / time.Second), - "tidy_cross_cluster_revoked_certs": config.CrossRevokedCerts, - }, + Data: getTidyConfigData(*config), }, nil } @@ -1576,16 +1982,20 @@ func (b *backend) tidyStatusStart(config *tidyConfig) { defer b.tidyStatusLock.Unlock() b.tidyStatus = &tidyStatus{ - safetyBuffer: int(config.SafetyBuffer / time.Second), - issuerSafetyBuffer: int(config.IssuerSafetyBuffer / time.Second), - tidyCertStore: config.CertStore, - tidyRevokedCerts: config.RevokedCerts, - tidyRevokedAssocs: config.IssuerAssocs, - tidyExpiredIssuers: config.ExpiredIssuers, - tidyBackupBundle: config.BackupBundle, - tidyRevocationQueue: config.RevocationQueue, - tidyCrossRevokedCerts: config.CrossRevokedCerts, - pauseDuration: config.PauseDuration.String(), + safetyBuffer: int(config.SafetyBuffer / time.Second), + issuerSafetyBuffer: int(config.IssuerSafetyBuffer / time.Second), + revQueueSafetyBuffer: int(config.QueueSafetyBuffer / time.Second), + acmeAccountSafetyBuffer: int(config.AcmeAccountSafetyBuffer / time.Second), + tidyCertStore: config.CertStore, + tidyRevokedCerts: config.RevokedCerts, + tidyRevokedAssocs: config.IssuerAssocs, + tidyExpiredIssuers: config.ExpiredIssuers, + tidyBackupBundle: config.BackupBundle, + tidyRevocationQueue: config.RevocationQueue, + tidyCrossRevokedCerts: config.CrossRevokedCerts, + tidyAcme: config.TidyAcme, + tidyCertMetadata: config.CertMetadata, + pauseDuration: config.PauseDuration.String(), state: tidyStatusStarted, timeStarted: time.Now(), @@ -1633,7 +2043,7 @@ func (b *backend) tidyStatusIncCertStoreCount() { b.tidyStatus.certStoreDeletedCount++ - b.ifCountEnabledDecrementTotalCertificatesCountReport() + b.GetCertificateCounter().DecrementTotalCertificatesCountReport() } func (b *backend) tidyStatusIncRevokedCertCount() { @@ -1642,7 +2052,7 @@ func (b *backend) tidyStatusIncRevokedCertCount() { b.tidyStatus.revokedCertDeletedCount++ - b.ifCountEnabledDecrementTotalRevokedCertificatesCountReport() + b.GetCertificateCounter().DecrementTotalRevokedCertificatesCountReport() } func (b *backend) tidyStatusIncMissingIssuerCertCount() { @@ -1666,6 +2076,65 @@ func (b *backend) tidyStatusIncCrossRevCertCount() { b.tidyStatus.crossRevokedDeletedCount++ } +func (b *backend) tidyStatusIncRevAcmeAccountCount() { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.tidyStatus.acmeAccountsRevokedCount++ +} + +func (b *backend) tidyStatusIncDeletedAcmeAccountCount() { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.tidyStatus.acmeAccountsDeletedCount++ +} + +func (b *backend) tidyStatusIncDelAcmeOrderCount() { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.tidyStatus.acmeOrdersDeletedCount++ +} + +func (b *backend) tidyStatusIncCertMetadataCount() { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.tidyStatus.certMetadataDeletedCount++ +} + +func (b *backend) tidyStatusIncCMPV2NonceDeletedCount() { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.tidyStatus.cmpv2NonceDeletedCount++ +} + +// updateLastAutoTidyTime should be used to update b.lastAutoTidy as the required locks +// are acquired and the auto tidy time is persisted to storage to work across restarts +func (b *backend) updateLastAutoTidyTime(sc *storageContext, lastRunTime time.Time) error { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.lastAutoTidy = lastRunTime + return sc.writeAutoTidyLastRun(lastRunTime) +} + +// getLastAutoTidyTime should be used to read from b.lastAutoTidy as the required locks +// are acquired prior to reading +func (b *backend) getLastAutoTidyTime() time.Time { + b.tidyStatusLock.RLock() + defer b.tidyStatusLock.RUnlock() + return b.getLastAutoTidyTimeWithoutLock() +} + +// getLastAutoTidyTimeWithoutLock should be used to read from b.lastAutoTidy with the +// b.tidyStatusLock being acquired, normally use getLastAutoTidyTime +func (b *backend) getLastAutoTidyTimeWithoutLock() time.Time { + return b.lastAutoTidy +} + const pathTidyHelpSyn = ` Tidy up the backend by removing expired certificates, revocation information, or both. @@ -1734,6 +2203,13 @@ The result includes the following fields: * 'revocation_queue_deleted_count': the number of revocation queue entries deleted * 'tidy_cross_cluster_revoked_certs': the value of this parameter when initiating the tidy operation * 'cross_revoked_cert_deleted_count': the number of cross-cluster revoked certificate entries deleted +* 'revocation_queue_safety_buffer': the value of this parameter when initiating the tidy operation +* 'tidy_acme': the value of this parameter when initiating the tidy operation +* 'acme_account_safety_buffer': the value of this parameter when initiating the tidy operation +* 'total_acme_account_count': the total number of acme accounts in the list to be iterated over +* 'acme_account_deleted_count': the number of revoked acme accounts deleted during the operation +* 'acme_account_revoked_count': the number of acme accounts revoked during the operation +* 'acme_orders_deleted_count': the number of acme orders deleted during the operation ` const pathConfigAutoTidySyn = ` @@ -1749,3 +2225,30 @@ controls the frequency of auto-tidy execution). Once enabled, a tidy operation will be kicked off automatically, as if it were executed with the posted configuration. ` + +func getTidyConfigData(config tidyConfig) map[string]interface{} { + return map[string]interface{}{ + // This map is in the same order as tidyConfig to ensure that all fields are accounted for + "enabled": config.Enabled, + "interval_duration": int(config.Interval / time.Second), + "min_startup_backoff_duration": int(config.MinStartupBackoff.Seconds()), + "max_startup_backoff_duration": int(config.MaxStartupBackoff.Seconds()), + "tidy_cert_store": config.CertStore, + "tidy_revoked_certs": config.RevokedCerts, + "tidy_revoked_cert_issuer_associations": config.IssuerAssocs, + "tidy_expired_issuers": config.ExpiredIssuers, + "tidy_move_legacy_ca_bundle": config.BackupBundle, + "tidy_acme": config.TidyAcme, + "safety_buffer": int(config.SafetyBuffer / time.Second), + "issuer_safety_buffer": int(config.IssuerSafetyBuffer / time.Second), + "acme_account_safety_buffer": int(config.AcmeAccountSafetyBuffer / time.Second), + "pause_duration": config.PauseDuration.String(), + "publish_stored_certificate_count_metrics": config.PublishMetrics, + "maintain_stored_certificate_counts": config.MaintainCount, + "tidy_revocation_queue": config.RevocationQueue, + "revocation_queue_safety_buffer": int(config.QueueSafetyBuffer / time.Second), + "tidy_cross_cluster_revoked_certs": config.CrossRevokedCerts, + "tidy_cert_metadata": config.CertMetadata, + "tidy_cmpv2_nonce_store": config.CMPV2NonceStore, + } +} diff --git a/builtin/logical/pki/path_tidy_test.go b/builtin/logical/pki/path_tidy_test.go index d24555bddca2..f32bc880a59e 100644 --- a/builtin/logical/pki/path_tidy_test.go +++ b/builtin/logical/pki/path_tidy_test.go @@ -1,28 +1,172 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/base64" "encoding/json" "errors" "fmt" + "path" + "strings" "testing" "time" - "github.com/hashicorp/vault/helper/testhelpers" - "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" - "github.com/armon/go-metrics" - "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/testhelpers" vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" - "github.com/stretchr/testify/require" + "golang.org/x/crypto/acme" ) +func TestTidyConfigs(t *testing.T) { + t.Parallel() + + var cfg tidyConfig + operations := strings.Split(cfg.AnyTidyConfig(), " / ") + require.Greater(t, len(operations), 1, "expected more than one operation") + t.Logf("Got tidy operations: %v", operations) + + lastOp := operations[len(operations)-1] + + for _, operation := range operations { + b, s := CreateBackendWithStorage(t) + + resp, err := CBWrite(b, s, "config/auto-tidy", map[string]interface{}{ + "enabled": true, + operation: true, + }) + requireSuccessNonNilResponse(t, resp, err, "expected to be able to enable auto-tidy operation "+operation) + + resp, err = CBRead(b, s, "config/auto-tidy") + requireSuccessNonNilResponse(t, resp, err, "expected to be able to read auto-tidy operation for operation "+operation) + require.True(t, resp.Data[operation].(bool), "expected operation to be enabled after reading auto-tidy config "+operation) + + resp, err = CBWrite(b, s, "config/auto-tidy", map[string]interface{}{ + "enabled": true, + operation: false, + lastOp: true, + }) + requireSuccessNonNilResponse(t, resp, err, "expected to be able to disable auto-tidy operation "+operation) + + resp, err = CBRead(b, s, "config/auto-tidy") + requireSuccessNonNilResponse(t, resp, err, "expected to be able to read auto-tidy operation for operation "+operation) + require.False(t, resp.Data[operation].(bool), "expected operation to be disabled after reading auto-tidy config "+operation) + + resp, err = CBWrite(b, s, "tidy", map[string]interface{}{ + operation: true, + }) + requireSuccessNonNilResponse(t, resp, err, "expected to be able to start tidy operation with "+operation) + if len(resp.Warnings) > 0 { + t.Logf("got warnings while starting manual tidy: %v", resp.Warnings) + for _, warning := range resp.Warnings { + if strings.Contains(warning, "Manual tidy requested but no tidy operations were set.") { + t.Fatalf("expected to be able to enable tidy operation with just %v but got warning: %v / (resp=%v)", operation, warning, resp) + } + } + } + + lastOp = operation + } + + // pause_duration is tested elsewhere in other tests. + type configSafetyBufferValueStr struct { + Config string + FirstValue int + SecondValue int + DefaultValue int + } + configSafetyBufferValues := []configSafetyBufferValueStr{ + { + Config: "safety_buffer", + FirstValue: 1, + SecondValue: 2, + DefaultValue: int(defaultTidyConfig.SafetyBuffer / time.Second), + }, + { + Config: "issuer_safety_buffer", + FirstValue: 1, + SecondValue: 2, + DefaultValue: int(defaultTidyConfig.IssuerSafetyBuffer / time.Second), + }, + { + Config: "acme_account_safety_buffer", + FirstValue: 1, + SecondValue: 2, + DefaultValue: int(defaultTidyConfig.AcmeAccountSafetyBuffer / time.Second), + }, + { + Config: "revocation_queue_safety_buffer", + FirstValue: 1, + SecondValue: 2, + DefaultValue: int(defaultTidyConfig.QueueSafetyBuffer / time.Second), + }, + } + + for _, flag := range configSafetyBufferValues { + b, s := CreateBackendWithStorage(t) + + resp, err := CBRead(b, s, "config/auto-tidy") + requireSuccessNonNilResponse(t, resp, err, "expected to be able to read auto-tidy operation for flag "+flag.Config) + require.Equal(t, resp.Data[flag.Config].(int), flag.DefaultValue, "expected initial auto-tidy config to match default value for "+flag.Config) + + resp, err = CBWrite(b, s, "config/auto-tidy", map[string]interface{}{ + "enabled": true, + "tidy_cert_store": true, + flag.Config: flag.FirstValue, + }) + requireSuccessNonNilResponse(t, resp, err, "expected to be able to set auto-tidy config option "+flag.Config) + + resp, err = CBRead(b, s, "config/auto-tidy") + requireSuccessNonNilResponse(t, resp, err, "expected to be able to read auto-tidy operation for config "+flag.Config) + require.Equal(t, resp.Data[flag.Config].(int), flag.FirstValue, "expected value to be set after reading auto-tidy config "+flag.Config) + + resp, err = CBWrite(b, s, "config/auto-tidy", map[string]interface{}{ + "enabled": true, + "tidy_cert_store": true, + flag.Config: flag.SecondValue, + }) + requireSuccessNonNilResponse(t, resp, err, "expected to be able to set auto-tidy config option "+flag.Config) + + resp, err = CBRead(b, s, "config/auto-tidy") + requireSuccessNonNilResponse(t, resp, err, "expected to be able to read auto-tidy operation for config "+flag.Config) + require.Equal(t, resp.Data[flag.Config].(int), flag.SecondValue, "expected value to be set after reading auto-tidy config "+flag.Config) + + resp, err = CBWrite(b, s, "tidy", map[string]interface{}{ + "tidy_cert_store": true, + flag.Config: flag.FirstValue, + }) + t.Logf("tidy run results: resp=%v/err=%v", resp, err) + requireSuccessNonNilResponse(t, resp, err, "expected to be able to start tidy operation with "+flag.Config) + if len(resp.Warnings) > 0 { + for _, warning := range resp.Warnings { + if strings.Contains(warning, "unrecognized parameter") && strings.Contains(warning, flag.Config) { + t.Fatalf("warning '%v' claims parameter '%v' is unknown", warning, flag.Config) + } + } + } + + time.Sleep(2 * time.Second) + + resp, err = CBRead(b, s, "tidy-status") + requireSuccessNonNilResponse(t, resp, err, "expected to be able to start tidy operation with "+flag.Config) + t.Logf("got response: %v for config: %v", resp, flag.Config) + require.Equal(t, resp.Data[flag.Config].(int), flag.FirstValue, "expected flag to be set in tidy-status for config "+flag.Config) + } +} + func TestAutoTidy(t *testing.T) { t.Parallel() @@ -92,11 +236,13 @@ func TestAutoTidy(t *testing.T) { // Write the auto-tidy config. _, err = client.Logical().Write("pki/config/auto-tidy", map[string]interface{}{ - "enabled": true, - "interval_duration": "1s", - "tidy_cert_store": true, - "tidy_revoked_certs": true, - "safety_buffer": "1s", + "enabled": true, + "interval_duration": "1s", + "tidy_cert_store": true, + "tidy_revoked_certs": true, + "safety_buffer": "1s", + "min_startup_backoff_duration": "1s", + "max_startup_backoff_duration": "1s", }) require.NoError(t, err) @@ -155,41 +301,9 @@ func TestAutoTidy(t *testing.T) { // Wait for cert to expire and the safety buffer to elapse. time.Sleep(time.Until(leafCert.NotAfter) + 3*time.Second) - // Wait for auto-tidy to run afterwards. - var foundTidyRunning string - var foundTidyFinished bool - timeoutChan := time.After(120 * time.Second) - for { - if foundTidyRunning != "" && foundTidyFinished { - break - } - - select { - case <-timeoutChan: - t.Fatalf("expected auto-tidy to run (%v) and finish (%v) before 120 seconds elapsed", foundTidyRunning, foundTidyFinished) - default: - time.Sleep(250 * time.Millisecond) - - resp, err = client.Logical().Read("pki/tidy-status") - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["state"]) - require.NotEmpty(t, resp.Data["time_started"]) - state := resp.Data["state"].(string) - started := resp.Data["time_started"].(string) - t.Logf("Resp: %v", resp.Data) - - // We want the _next_ tidy run after the cert expires. This - // means if we're currently finished when we hit this the - // first time, we want to wait for the next run. - if foundTidyRunning == "" { - foundTidyRunning = started - } else if foundTidyRunning != started && !foundTidyFinished && state == "Finished" { - foundTidyFinished = true - } - } - } + // We run this twice to make absolutely sure we didn't read a previous run of tidy + _, lastRun := waitForTidyToFinish(t, client, "pki") + waitForTidyToFinishWithLastRun(t, client, "pki", lastRun) // Cert should no longer exist. resp, err = client.Logical().Read("pki/cert/" + leafSerial) @@ -197,6 +311,67 @@ func TestAutoTidy(t *testing.T) { require.Nil(t, resp) } +// TestAutoTidyPersistsAcrossRestarts validates that on initial +// startup of a mount we persisted the current auto tidy time so that +// our counter that auto-tidy is based on isn't reset everytime Vault restarts +func TestAutoTidyPersistsAcrossRestarts(t *testing.T) { + t.Parallel() + + newPeriod := 1 * time.Second + + // This test requires the periodicFunc to trigger, which requires we stand + // up a full test cluster. + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + RollbackPeriod: newPeriod, + } + opts := &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: 1, + } + cluster := vault.NewTestCluster(t, coreConfig, opts) + cluster.Start() + defer cluster.Cleanup() + + client := cluster.Cores[0].Client + + // Mount PKI + err := client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + }) + require.NoError(t, err, "failed mounting pki") + + // Run a tidy that should set us up + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_cert_store": "true", + }) + require.NoError(t, err, "failed running tidy") + + waitForTidyToFinish(t, client, "pki") + + resp, err := client.Logical().Read("pki/tidy-status") + require.NoError(t, err, "failed reading tidy status") + require.NotNil(t, resp, "response from tidy-status was nil") + lastAutoTidy, exists := resp.Data["last_auto_tidy_finished"] + require.True(t, exists, "did not find last_auto_tidy_finished") + + cluster.StopCore(t, 0) + cluster.StartCore(t, 0, opts) + cluster.UnsealCore(t, cluster.Cores[0]) + vault.TestWaitActive(t, cluster.Cores[0].Core) + + client = cluster.Cores[0].Client + resp, err = client.Logical().Read("pki/tidy-status") + require.NoError(t, err, "failed reading tidy status") + require.NotNil(t, resp, "response from tidy-status was nil") + postRestartLastAutoTidy, exists := resp.Data["last_auto_tidy_finished"] + require.True(t, exists, "did not find last_auto_tidy_finished") + + require.Equal(t, lastAutoTidy, postRestartLastAutoTidy, "values for last_auto_tidy_finished did not match on restart") +} + func TestTidyCancellation(t *testing.T) { t.Parallel() @@ -408,6 +583,9 @@ func TestTidyIssuerConfig(t *testing.T) { defaultConfigMap["safety_buffer"] = int(time.Duration(defaultConfigMap["safety_buffer"].(float64)) / time.Second) defaultConfigMap["pause_duration"] = time.Duration(defaultConfigMap["pause_duration"].(float64)).String() defaultConfigMap["revocation_queue_safety_buffer"] = int(time.Duration(defaultConfigMap["revocation_queue_safety_buffer"].(float64)) / time.Second) + defaultConfigMap["acme_account_safety_buffer"] = int(time.Duration(defaultConfigMap["acme_account_safety_buffer"].(float64)) / time.Second) + defaultConfigMap["min_startup_backoff_duration"] = int(time.Duration(defaultConfigMap["min_startup_backoff_duration"].(float64)) / time.Second) + defaultConfigMap["max_startup_backoff_duration"] = int(time.Duration(defaultConfigMap["max_startup_backoff_duration"].(float64)) / time.Second) require.Equal(t, defaultConfigMap, resp.Data) @@ -459,6 +637,7 @@ func TestCertStorageMetrics(t *testing.T) { } cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, + NumCores: 1, }) cluster.Start() defer cluster.Cleanup() @@ -501,9 +680,8 @@ func TestCertStorageMetrics(t *testing.T) { // Since certificate counts are off by default, we shouldn't see counts in the tidy status tidyStatus, err := client.Logical().Read("pki/tidy-status") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err, "failed reading from tidy-status") + // backendUUID should exist, we need this for metrics backendUUID := tidyStatus.Data["internal_backend_uuid"].(string) // "current_cert_store_count", "current_revoked_cert_count" @@ -517,7 +695,8 @@ func TestCertStorageMetrics(t *testing.T) { } // Since certificate counts are off by default, those metrics should not exist yet - mostRecentInterval := inmemSink.Data()[len(inmemSink.Data())-1] + stableMetric := inmemSink.Data() + mostRecentInterval := stableMetric[len(stableMetric)-1] _, ok = mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_revoked_certificates_stored"] if ok { t.Fatalf("Certificate counting should be off by default, but revoked cert count was emitted as a metric in an unconfigured mount") @@ -536,22 +715,22 @@ func TestCertStorageMetrics(t *testing.T) { "safety_buffer": "1s", "maintain_stored_certificate_counts": true, "publish_stored_certificate_count_metrics": false, + "min_startup_backoff_duration": "1s", + "max_startup_backoff_duration": "1s", }) require.NoError(t, err) // Reload the Mount - Otherwise Stored Certificate Counts Will Not Be Populated - _, err = client.Logical().Write("/sys/plugins/reload/backend", map[string]interface{}{ - "plugin": "pki", - }) + // Sealing cores as plugin reload triggers the race detector - VAULT-13635 + testhelpers.EnsureCoresSealed(t, cluster) + testhelpers.EnsureCoresUnsealed(t, cluster) - // By reading the auto-tidy endpoint, we ensure that initialize has completed (which has a write lock on auto-tidy) - _, err = client.Logical().Read("/pki/config/auto-tidy") - if err != nil { - t.Fatal(err) - } + // Wait until a tidy run has completed. + tidyStatus, _ = waitForTidyToFinish(t, client, "pki") // Since publish_stored_certificate_count_metrics is still false, these metrics should still not exist yet - mostRecentInterval = inmemSink.Data()[len(inmemSink.Data())-1] + stableMetric = inmemSink.Data() + mostRecentInterval = stableMetric[len(stableMetric)-1] _, ok = mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_revoked_certificates_stored"] if ok { t.Fatalf("Certificate counting should be off by default, but revoked cert count was emitted as a metric in an unconfigured mount") @@ -562,10 +741,6 @@ func TestCertStorageMetrics(t *testing.T) { } // But since certificate counting is on, the metrics should exist on tidyStatus endpoint: - tidyStatus, err = client.Logical().Read("pki/tidy-status") - if err != nil { - t.Fatal(err) - } // backendUUID should exist, we need this for metrics backendUUID = tidyStatus.Data["internal_backend_uuid"].(string) // "current_cert_store_count", "current_revoked_cert_count" @@ -594,16 +769,16 @@ func TestCertStorageMetrics(t *testing.T) { "maintain_stored_certificate_counts": true, "publish_stored_certificate_count_metrics": true, }) - require.NoError(t, err) + require.NoError(t, err, "failed updating auto-tidy configuration") // Issue a cert and revoke it. resp, err = client.Logical().Write("pki/issue/local-testing", map[string]interface{}{ "common_name": "example.com", "ttl": "10s", }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) + require.NoError(t, err, "failed to issue leaf certificate") + require.NotNil(t, resp, "nil response without error on issuing leaf certificate") + require.NotNil(t, resp.Data, "empty Data without error on issuing leaf certificate") require.NotEmpty(t, resp.Data["serial_number"]) require.NotEmpty(t, resp.Data["certificate"]) leafSerial := resp.Data["serial_number"].(string) @@ -620,22 +795,25 @@ func TestCertStorageMetrics(t *testing.T) { require.Empty(t, resp.Data["revocation_time_rfc3339"], "revocation_time_rfc3339 was not empty") require.Empty(t, resp.Data["issuer_id"], "issuer_id was not empty") - _, err = client.Logical().Write("pki/revoke", map[string]interface{}{ + revokeResp, err := client.Logical().Write("pki/revoke", map[string]interface{}{ "serial_number": leafSerial, }) - require.NoError(t, err) + require.NoError(t, err, "failed revoking serial number: %s", leafSerial) + + for _, warning := range revokeResp.Warnings { + if strings.Contains(warning, "already expired; refusing to add to CRL") { + t.Skipf("Skipping test as we missed the revocation window of our leaf cert") + } + } // We read the auto-tidy endpoint again, to ensure any metrics logic has completed (lock on config) _, err = client.Logical().Read("/pki/config/auto-tidy") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err, "failed to read auto-tidy configuration") // Check Metrics After Cert Has Be Created and Revoked tidyStatus, err = client.Logical().Read("pki/tidy-status") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err, "failed to read tidy-status") + backendUUID = tidyStatus.Data["internal_backend_uuid"].(string) certStoreCount, ok = tidyStatus.Data["current_cert_store_count"] if !ok { @@ -649,7 +827,7 @@ func TestCertStorageMetrics(t *testing.T) { t.Fatalf("Certificate counting has been turned on, but revoked cert store count does not appear in tidy status") } if revokedCertCount != json.Number("1") { - t.Fatalf("Revoked one certificate, but got a revoked cert store count of %v", revokedCertCount) + t.Fatalf("Revoked one certificate, but got a revoked cert store count of %v\n:%v", revokedCertCount, tidyStatus) } // This should now be initialized certCountError, ok := tidyStatus.Data["certificate_counting_error"] @@ -658,7 +836,8 @@ func TestCertStorageMetrics(t *testing.T) { } testhelpers.RetryUntil(t, newPeriod*5, func() error { - mostRecentInterval = inmemSink.Data()[len(inmemSink.Data())-1] + stableMetric = inmemSink.Data() + mostRecentInterval = stableMetric[len(stableMetric)-1] revokedCertCountGaugeValue, ok := mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_revoked_certificates_stored"] if !ok { return errors.New("turned on metrics, but revoked cert count was not emitted") @@ -677,50 +856,15 @@ func TestCertStorageMetrics(t *testing.T) { }) // Wait for cert to expire and the safety buffer to elapse. - time.Sleep(time.Until(leafCert.NotAfter) + 3*time.Second) + sleepFor := time.Until(leafCert.NotAfter) + 3*time.Second + t.Logf("%v: Sleeping for %v, leaf certificate expires: %v", time.Now().Format(time.RFC3339), sleepFor, leafCert.NotAfter) + time.Sleep(sleepFor) - // Wait for auto-tidy to run afterwards. - var foundTidyRunning string - var foundTidyFinished bool - timeoutChan := time.After(120 * time.Second) - for { - if foundTidyRunning != "" && foundTidyFinished { - break - } - - select { - case <-timeoutChan: - t.Fatalf("expected auto-tidy to run (%v) and finish (%v) before 120 seconds elapsed", foundTidyRunning, foundTidyFinished) - default: - time.Sleep(250 * time.Millisecond) - - resp, err = client.Logical().Read("pki/tidy-status") - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["state"]) - require.NotEmpty(t, resp.Data["time_started"]) - state := resp.Data["state"].(string) - started := resp.Data["time_started"].(string) - t.Logf("Resp: %v", resp.Data) - - // We want the _next_ tidy run after the cert expires. This - // means if we're currently finished when we hit this the - // first time, we want to wait for the next run. - if foundTidyRunning == "" { - foundTidyRunning = started - } else if foundTidyRunning != started && !foundTidyFinished && state == "Finished" { - foundTidyFinished = true - } - } - } + _, lastRun := waitForTidyToFinish(t, client, "pki") + tidyStatus, _ = waitForTidyToFinishWithLastRun(t, client, "pki", lastRun) // After Tidy, Cert Store Count Should Still Be Available, and Be Updated: // Check Metrics After Cert Has Be Created and Revoked - tidyStatus, err = client.Logical().Read("pki/tidy-status") - if err != nil { - t.Fatal(err) - } backendUUID = tidyStatus.Data["internal_backend_uuid"].(string) // "current_cert_store_count", "current_revoked_cert_count" certStoreCount, ok = tidyStatus.Data["current_cert_store_count"] @@ -739,7 +883,8 @@ func TestCertStorageMetrics(t *testing.T) { } testhelpers.RetryUntil(t, newPeriod*5, func() error { - mostRecentInterval = inmemSink.Data()[len(inmemSink.Data())-1] + stableMetric = inmemSink.Data() + mostRecentInterval = stableMetric[len(stableMetric)-1] revokedCertCountGaugeValue, ok := mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_revoked_certificates_stored"] if !ok { return errors.New("turned on metrics, but revoked cert count was not emitted") @@ -757,3 +902,423 @@ func TestCertStorageMetrics(t *testing.T) { return nil }) } + +// This test uses the default safety buffer with backdating. +func TestTidyAcmeWithBackdate(t *testing.T) { + t.Parallel() + + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + testCtx := context.Background() + + // Grab the mount UUID for sys/raw invocations. + pkiMount := findStorageMountUuid(t, client, "pki") + + // Register an Account, do nothing with it + baseAcmeURL := "/v1/pki/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account with order/cert + t.Logf("Testing register on %s", baseAcmeURL) + acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + t.Logf("got account URI: %v", acct.URI) + identifiers := []string{"*.localdomain"} + order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + }) + require.NoError(t, err, "failed creating order") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow test. + markAuthorizationSuccess(t, client, acmeClient, acct, order) + + goodCr := &x509.CertificateRequest{DNSNames: []string{identifiers[0]}} + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey) + require.NoError(t, err, "failed generating csr") + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) + require.NoError(t, err, "order finalization failed") + require.GreaterOrEqual(t, len(certs), 1, "expected at least one cert in bundle") + + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert") + + // -> Ensure we see it in storage. Since we don't have direct storage + // access, use sys/raw interface. + acmeThumbprintsPath := path.Join("sys/raw/logical", pkiMount, acmeThumbprintPrefix) + listResp, err := client.Logical().ListWithContext(testCtx, acmeThumbprintsPath) + require.NoError(t, err, "failed listing ACME thumbprints") + require.NotEmpty(t, listResp.Data["keys"], "expected non-empty list response") + + // Run Tidy + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_acme": true, + }) + require.NoError(t, err) + + // Wait for tidy to finish. + waitForTidyToFinish(t, client, "pki") + + // Check that the Account is Still There, Still Valid. + account, err := acmeClient.GetReg(context.Background(), "" /* legacy unused param*/) + require.NoError(t, err, "received account looking up acme account") + require.Equal(t, acme.StatusValid, account.Status) + + // Find the associated thumbprint + listResp, err = client.Logical().ListWithContext(testCtx, acmeThumbprintsPath) + require.NoError(t, err) + require.NotNil(t, listResp) + thumbprintEntries := listResp.Data["keys"].([]interface{}) + require.Equal(t, len(thumbprintEntries), 1) + thumbprint := thumbprintEntries[0].(string) + + // Let "Time Pass"; this is a HACK, this function sys-writes to overwrite the date on objects in storage + duration := time.Until(acmeCert.NotAfter) + 31*24*time.Hour + accountId := acmeClient.KID[strings.LastIndex(string(acmeClient.KID), "/")+1:] + orderId := order.URI[strings.LastIndex(order.URI, "/")+1:] + backDateAcmeOrderSys(t, testCtx, client, string(accountId), orderId, duration, pkiMount) + + // Run Tidy -> clean up order + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_acme": true, + }) + require.NoError(t, err) + + // Wait for tidy to finish. + tidyResp, _ := waitForTidyToFinish(t, client, "pki") + + require.Equal(t, tidyResp.Data["acme_orders_deleted_count"], json.Number("1"), + "expected to revoke a single ACME order: %v", tidyResp) + require.Equal(t, tidyResp.Data["acme_account_revoked_count"], json.Number("0"), + "no ACME account should have been revoked: %v", tidyResp) + require.Equal(t, tidyResp.Data["acme_account_deleted_count"], json.Number("0"), + "no ACME account should have been revoked: %v", tidyResp) + + // Make sure our order is indeed deleted. + _, err = acmeClient.GetOrder(context.Background(), order.URI) + require.ErrorContains(t, err, "order does not exist") + + // Check that the Account is Still There, Still Valid. + account, err = acmeClient.GetReg(context.Background(), "" /* legacy unused param*/) + require.NoError(t, err, "received account looking up acme account") + require.Equal(t, acme.StatusValid, account.Status) + + // Now back date the account to make sure we revoke it + backDateAcmeAccountSys(t, testCtx, client, thumbprint, duration, pkiMount) + + // Run Tidy -> mark account revoked + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_acme": true, + }) + require.NoError(t, err) + + // Wait for tidy to finish. + tidyResp, _ = waitForTidyToFinish(t, client, "pki") + require.Equal(t, tidyResp.Data["acme_orders_deleted_count"], json.Number("0"), + "no ACME orders should have been deleted: %v", tidyResp) + require.Equal(t, tidyResp.Data["acme_account_revoked_count"], json.Number("1"), + "expected to revoke a single ACME account: %v", tidyResp) + require.Equal(t, tidyResp.Data["acme_account_deleted_count"], json.Number("0"), + "no ACME account should have been revoked: %v", tidyResp) + + // Lookup our account to make sure we get the appropriate revoked status + account, err = acmeClient.GetReg(context.Background(), "" /* legacy unused param*/) + require.NoError(t, err, "received account looking up acme account") + require.Equal(t, acme.StatusRevoked, account.Status) + + // Let "Time Pass"; this is a HACK, this function sys-writes to overwrite the date on objects in storage + backDateAcmeAccountSys(t, testCtx, client, thumbprint, duration, pkiMount) + + // Run Tidy -> remove account + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_acme": true, + }) + require.NoError(t, err) + + // Wait for tidy to finish. + waitForTidyToFinish(t, client, "pki") + + // Check Account No Longer Appears + listResp, err = client.Logical().ListWithContext(testCtx, acmeThumbprintsPath) + require.NoError(t, err) + if listResp != nil { + thumbprintEntries = listResp.Data["keys"].([]interface{}) + require.Equal(t, 0, len(thumbprintEntries)) + } + + // Nor Under Account + _, acctKID := path.Split(acct.URI) + acctPath := path.Join("sys/raw/logical", pkiMount, acmeAccountPrefix, acctKID) + t.Logf("account path: %v", acctPath) + getResp, err := client.Logical().ReadWithContext(testCtx, acctPath) + require.NoError(t, err) + require.Nil(t, getResp) +} + +// This test uses a smaller safety buffer. +func TestTidyAcmeWithSafetyBuffer(t *testing.T) { + t.Parallel() + + // This would still be way easier if I could do both sides + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + testCtx := context.Background() + + // Grab the mount UUID for sys/raw invocations. + pkiMount := findStorageMountUuid(t, client, "pki") + + // Register an Account, do nothing with it + baseAcmeURL := "/v1/pki/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account + t.Logf("Testing register on %s", baseAcmeURL) + acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + t.Logf("got account URI: %v", acct.URI) + + // -> Ensure we see it in storage. Since we don't have direct storage + // access, use sys/raw interface. + acmeThumbprintsPath := path.Join("sys/raw/logical", pkiMount, acmeThumbprintPrefix) + listResp, err := client.Logical().ListWithContext(testCtx, acmeThumbprintsPath) + require.NoError(t, err, "failed listing ACME thumbprints") + require.NotEmpty(t, listResp.Data["keys"], "expected non-empty list response") + thumbprintEntries := listResp.Data["keys"].([]interface{}) + require.Equal(t, len(thumbprintEntries), 1) + + // Wait for the account to expire. + time.Sleep(2 * time.Second) + + // Run Tidy -> mark account revoked + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_acme": true, + "acme_account_safety_buffer": "1s", + }) + require.NoError(t, err) + + // Wait for tidy to finish. + statusResp, _ := waitForTidyToFinish(t, client, "pki") + require.Equal(t, statusResp.Data["acme_account_revoked_count"], json.Number("1"), "expected to revoke a single ACME account") + + // Wait for the account to expire. + time.Sleep(2 * time.Second) + + // Run Tidy -> remove account + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_acme": true, + "acme_account_safety_buffer": "1s", + }) + require.NoError(t, err) + + // Wait for tidy to finish. + waitForTidyToFinish(t, client, "pki") + + // Check Account No Longer Appears + listResp, err = client.Logical().ListWithContext(testCtx, acmeThumbprintsPath) + require.NoError(t, err) + if listResp != nil { + thumbprintEntries = listResp.Data["keys"].([]interface{}) + require.Equal(t, 0, len(thumbprintEntries)) + } + + // Nor Under Account + _, acctKID := path.Split(acct.URI) + acctPath := path.Join("sys/raw/logical", pkiMount, acmeAccountPrefix, acctKID) + t.Logf("account path: %v", acctPath) + getResp, err := client.Logical().ReadWithContext(testCtx, acctPath) + require.NoError(t, err) + require.Nil(t, getResp) +} + +// The sys tests refer to all of the tests using sys/raw/logical which work off of a client +func backDateAcmeAccountSys(t *testing.T, testContext context.Context, client *api.Client, thumbprintString string, backdateAmount time.Duration, mount string) { + rawThumbprintPath := path.Join("sys/raw/logical/", mount, acmeThumbprintPrefix+thumbprintString) + thumbprintResp, err := client.Logical().ReadWithContext(testContext, rawThumbprintPath) + if err != nil { + t.Fatalf("unable to fetch thumbprint response at %v: %v", rawThumbprintPath, err) + } + + var thumbprint acmeThumbprint + err = jsonutil.DecodeJSON([]byte(thumbprintResp.Data["value"].(string)), &thumbprint) + if err != nil { + t.Fatalf("unable to decode thumbprint response %v to find account entry: %v", thumbprintResp.Data, err) + } + + accountPath := path.Join("sys/raw/logical", mount, acmeAccountPrefix+thumbprint.Kid) + accountResp, err := client.Logical().ReadWithContext(testContext, accountPath) + if err != nil { + t.Fatalf("unable to fetch account entry %v: %v", thumbprint.Kid, err) + } + + var account acmeAccount + err = jsonutil.DecodeJSON([]byte(accountResp.Data["value"].(string)), &account) + if err != nil { + t.Fatalf("unable to decode acme account %v: %v", accountResp, err) + } + + t.Logf("got account before update: %v", account) + + account.AccountCreatedDate = backDate(account.AccountCreatedDate, backdateAmount) + account.MaxCertExpiry = backDate(account.MaxCertExpiry, backdateAmount) + account.AccountRevokedDate = backDate(account.AccountRevokedDate, backdateAmount) + + t.Logf("got account after update: %v", account) + + encodeJSON, err := jsonutil.EncodeJSON(account) + _, err = client.Logical().WriteWithContext(context.Background(), accountPath, map[string]interface{}{ + "value": base64.StdEncoding.EncodeToString(encodeJSON), + "encoding": "base64", + }) + if err != nil { + t.Fatalf("error saving backdated account entry at %v: %v", accountPath, err) + } + + ordersPath := path.Join("sys/raw/logical", mount, acmeAccountPrefix, thumbprint.Kid, "/orders/") + ordersRaw, err := client.Logical().ListWithContext(context.Background(), ordersPath) + require.NoError(t, err, "failed listing orders") + + if ordersRaw == nil { + t.Logf("skipping backdating orders as there are none") + return + } + + require.NotNil(t, ordersRaw, "got no response data") + require.NotNil(t, ordersRaw.Data, "got no response data") + + orders := ordersRaw.Data + + for _, orderId := range orders["keys"].([]interface{}) { + backDateAcmeOrderSys(t, testContext, client, thumbprint.Kid, orderId.(string), backdateAmount, mount) + } + + // No need to change certificates entries here - no time is stored on AcmeCertEntry +} + +func backDateAcmeOrderSys(t *testing.T, testContext context.Context, client *api.Client, accountKid string, orderId string, backdateAmount time.Duration, mount string) { + rawOrderPath := path.Join("sys/raw/logical/", mount, acmeAccountPrefix, accountKid, "orders", orderId) + orderResp, err := client.Logical().ReadWithContext(testContext, rawOrderPath) + if err != nil { + t.Fatalf("unable to fetch order entry %v on account %v at %v", orderId, accountKid, rawOrderPath) + } + + var order *acmeOrder + err = jsonutil.DecodeJSON([]byte(orderResp.Data["value"].(string)), &order) + if err != nil { + t.Fatalf("error decoding order entry %v on account %v, %v produced: %v", orderId, accountKid, orderResp, err) + } + + order.Expires = backDate(order.Expires, backdateAmount) + order.CertificateExpiry = backDate(order.CertificateExpiry, backdateAmount) + + encodeJSON, err := jsonutil.EncodeJSON(order) + _, err = client.Logical().WriteWithContext(context.Background(), rawOrderPath, map[string]interface{}{ + "value": base64.StdEncoding.EncodeToString(encodeJSON), + "encoding": "base64", + }) + if err != nil { + t.Fatalf("error saving backdated order entry %v on account %v : %v", orderId, accountKid, err) + } + + for _, authId := range order.AuthorizationIds { + backDateAcmeAuthorizationSys(t, testContext, client, accountKid, authId, backdateAmount, mount) + } +} + +func backDateAcmeAuthorizationSys(t *testing.T, testContext context.Context, client *api.Client, accountKid string, authId string, backdateAmount time.Duration, mount string) { + rawAuthPath := path.Join("sys/raw/logical/", mount, acmeAccountPrefix, accountKid, "/authorizations/", authId) + + authResp, err := client.Logical().ReadWithContext(testContext, rawAuthPath) + if err != nil { + t.Fatalf("unable to fetch authorization %v : %v", rawAuthPath, err) + } + + var auth *ACMEAuthorization + err = jsonutil.DecodeJSON([]byte(authResp.Data["value"].(string)), &auth) + if err != nil { + t.Fatalf("error decoding auth %v, auth entry %v produced %v", rawAuthPath, authResp, err) + } + + expiry, err := auth.GetExpires() + if err != nil { + t.Fatalf("could not get expiry on %v: %v", rawAuthPath, err) + } + newExpiry := backDate(expiry, backdateAmount) + auth.Expires = time.Time.Format(newExpiry, time.RFC3339) + + encodeJSON, err := jsonutil.EncodeJSON(auth) + _, err = client.Logical().WriteWithContext(context.Background(), rawAuthPath, map[string]interface{}{ + "value": base64.StdEncoding.EncodeToString(encodeJSON), + "encoding": "base64", + }) + if err != nil { + t.Fatalf("error updating authorization date on %v: %v", rawAuthPath, err) + } +} + +func backDate(original time.Time, change time.Duration) time.Time { + if original.IsZero() { + return original + } + + zeroTime := time.Time{} + + if original.Before(zeroTime.Add(change)) { + return zeroTime + } + + return original.Add(-change) +} + +func waitForTidyToFinish(t *testing.T, client *api.Client, mount string) (*api.Secret, time.Time) { + return waitForTidyToFinishWithLastRun(t, client, mount, time.Time{}) +} + +func waitForTidyToFinishWithLastRun(t *testing.T, client *api.Client, mount string, previousFinishTime time.Time) (*api.Secret, time.Time) { + t.Helper() + + var statusResp *api.Secret + var currentFinishTime time.Time + testhelpers.RetryUntil(t, 30*time.Second, func() error { + var err error + tidyStatusPath := mount + "/tidy-status" + statusResp, err = client.Logical().Read(tidyStatusPath) + if err != nil { + return fmt.Errorf("failed reading path: %s: %w", tidyStatusPath, err) + } + if statusResp == nil { + return fmt.Errorf("got nil, nil response from: %s", tidyStatusPath) + } + if state, ok := statusResp.Data["state"]; !ok || state != "Finished" { + return fmt.Errorf("tidy has not finished got state: %v", state) + } + + if currentFinishTimeRaw, ok := statusResp.Data["time_finished"]; !ok { + return fmt.Errorf("tidy status did not contain a time_finished field") + } else { + if currentFinishTimeStr, ok := currentFinishTimeRaw.(string); !ok { + return fmt.Errorf("tidy status time_finished field was not a string was %T", currentFinishTimeRaw) + } else { + currentFinishTime, err = time.Parse(time.RFC3339, currentFinishTimeStr) + if !currentFinishTime.After(previousFinishTime) { + return fmt.Errorf("tidy status time_finished %v was not after previous time %v", currentFinishTime, previousFinishTime) + } + } + } + + if errorOccurred, ok := statusResp.Data["error"]; !ok || !(errorOccurred == nil || errorOccurred == "") { + return fmt.Errorf("tidy status returned an error: %s", errorOccurred) + } + + return nil + }) + + t.Logf("got tidy status: %v", statusResp.Data) + return statusResp, currentFinishTime +} diff --git a/builtin/logical/pki/periodic.go b/builtin/logical/pki/periodic.go index 18032fbef09e..05c37b660eae 100644 --- a/builtin/logical/pki/periodic.go +++ b/builtin/logical/pki/periodic.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -10,75 +10,78 @@ import ( "sync/atomic" "time" + "github.com/hashicorp/vault/builtin/logical/pki/revocation" "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" ) const ( minUnifiedTransferDelay = 30 * time.Minute ) -type unifiedTransferStatus struct { +type UnifiedTransferStatus struct { isRunning atomic.Bool lastRun time.Time forceRerun atomic.Bool } -func (uts *unifiedTransferStatus) forceRun() { +func (uts *UnifiedTransferStatus) forceRun() { uts.forceRerun.Store(true) } -func newUnifiedTransferStatus() *unifiedTransferStatus { - return &unifiedTransferStatus{} +func newUnifiedTransferStatus() *UnifiedTransferStatus { + return &UnifiedTransferStatus{} } // runUnifiedTransfer meant to run as a background, this will process all and // send all missing local revocation entries to the unified space if the feature // is enabled. func runUnifiedTransfer(sc *storageContext) { - b := sc.Backend - status := b.unifiedTransferStatus + status := sc.GetUnifiedTransferStatus() - isPerfStandby := b.System().ReplicationState().HasState(consts.ReplicationDRSecondary | consts.ReplicationPerformanceStandby) + isPerfStandby := sc.System().ReplicationState().HasState(consts.ReplicationDRSecondary | consts.ReplicationPerformanceStandby) - if isPerfStandby || b.System().LocalMount() { + if isPerfStandby || sc.System().LocalMount() { // We only do this on active enterprise nodes, when we aren't a local mount return } - config, err := b.crlBuilder.getConfigWithUpdate(sc) + config, err := sc.CrlBuilder().GetConfigWithUpdate(sc) if err != nil { - b.Logger().Error("failed to retrieve crl config from storage for unified transfer background process", + sc.Logger().Error("failed to retrieve crl config from storage for unified transfer background process", "error", err) return } - if !status.lastRun.IsZero() { - // We have run before, we only run again if we have - // been requested to forceRerun, and we haven't run since our - // minimum delay - if !(status.forceRerun.Load() && time.Since(status.lastRun) < minUnifiedTransferDelay) { - return - } - } - if !config.UnifiedCRL { // Feature is disabled, no need to run return } - clusterId, err := b.System().ClusterID(sc.Context) + clusterId, err := sc.System().ClusterID(sc.Context) if err != nil { - b.Logger().Error("failed to fetch cluster id for unified transfer background process", + sc.Logger().Error("failed to fetch cluster id for unified transfer background process", "error", err) return } if !status.isRunning.CompareAndSwap(false, true) { - b.Logger().Debug("an existing unified transfer process is already running") + sc.Logger().Debug("an existing unified transfer process is already running") return } defer status.isRunning.Store(false) + // Because access to lastRun is not locked, we need to delay this check + // until after we grab the isRunning CAS lock. + if !status.lastRun.IsZero() { + // We have run before, we only run again if we have + // been requested to forceRerun, and we haven't run since our + // minimum delay. + if !(status.forceRerun.Load() && time.Since(status.lastRun) < minUnifiedTransferDelay) { + return + } + } + // Reset our flag before we begin, we do this before we start as // we can't guarantee that we can properly parse/fix the error from an // error that comes in from the revoke API after that. This will @@ -88,9 +91,18 @@ func runUnifiedTransfer(sc *storageContext) { err = doUnifiedTransferMissingLocalSerials(sc, clusterId) if err != nil { - b.Logger().Error("an error occurred running unified transfer", "error", err.Error()) + sc.Logger().Error("an error occurred running unified transfer", "error", err.Error()) status.forceRerun.Store(true) + } else { + if config.EnableDelta { + err = doUnifiedTransferMissingDeltaWALSerials(sc, clusterId) + if err != nil { + sc.Logger().Error("an error occurred running unified transfer", "error", err.Error()) + status.forceRerun.Store(true) + } + } } + status.lastRun = time.Now() } @@ -113,7 +125,7 @@ func doUnifiedTransferMissingLocalSerials(sc *storageContext, clusterId string) errCount := 0 for i, serialNum := range localRevokedSerialNums { if i%25 == 0 { - config, _ := sc.Backend.crlBuilder.getConfigWithUpdate(sc) + config, _ := sc.CrlBuilder().GetConfigWithUpdate(sc) if config != nil && !config.UnifiedCRL { return errors.New("unified crl has been disabled after we started, stopping") } @@ -122,14 +134,160 @@ func doUnifiedTransferMissingLocalSerials(sc *storageContext, clusterId string) err := readRevocationEntryAndTransfer(sc, serialNum) if err != nil { errCount++ - sc.Backend.Logger().Debug("Failed transferring local revocation to unified space", + sc.Logger().Error("Failed transferring local revocation to unified space", "serial", serialNum, "error", err) } } } if errCount > 0 { - sc.Backend.Logger().Warn(fmt.Sprintf("Failed transfering %d local serials to unified storage", errCount)) + sc.Logger().Warn(fmt.Sprintf("Failed transfering %d local serials to unified storage", errCount)) + } + + return nil +} + +func doUnifiedTransferMissingDeltaWALSerials(sc *storageContext, clusterId string) error { + // We need to do a similar thing for Delta WAL entry certificates. + // When the delta WAL failed to write for one or more entries, + // we'll need to replicate these up to the primary cluster. When it + // has performed a new delta WAL build, it will empty storage and + // update to a last written WAL entry that exceeds what we've seen + // locally. + thisUnifiedWALEntryPath := unifiedDeltaWALPath + deltaWALLastRevokedSerialName + lastUnifiedWALEntry, err := getLastWALSerial(sc, thisUnifiedWALEntryPath) + if err != nil { + return fmt.Errorf("failed to fetch last cross-cluster unified revoked delta WAL serial number: %w", err) + } + + lastLocalWALEntry, err := getLastWALSerial(sc, localDeltaWALLastRevokedSerial) + if err != nil { + return fmt.Errorf("failed to fetch last locally revoked delta WAL serial number: %w", err) + } + + // We now need to transfer all the entries and then write the last WAL + // entry at the end. Start by listing all certificates; any missing + // certificates will be copied over and then the WAL entry will be + // updated once. + // + // We do not delete entries either locally or remotely, as either + // cluster could've rebuilt delta CRLs with out-of-sync information, + // removing some entries (and, we cannot differentiate between these + // two cases). On next full CRL rebuild (on either cluster), the state + // should get synchronized, and future delta CRLs after this function + // returns without issue will see the remaining entries. + // + // Lastly, we need to ensure we don't accidentally write any unified + // delta WAL entries that aren't present in the main cross-cluster + // revoked storage location. This would mean the above function failed + // to copy them for some reason, despite them presumably appearing + // locally. + _unifiedWALEntries, err := sc.Storage.List(sc.Context, unifiedDeltaWALPath) + if err != nil { + return fmt.Errorf("failed to list cross-cluster unified delta WAL storage: %w", err) + } + unifiedWALEntries := sliceToMapKey(_unifiedWALEntries) + + _unifiedRevokedSerials, err := listClusterSpecificUnifiedRevokedCerts(sc, clusterId) + if err != nil { + return fmt.Errorf("failed to list cross-cluster revoked certificates: %w", err) + } + unifiedRevokedSerials := sliceToMapKey(_unifiedRevokedSerials) + + localWALEntries, err := sc.Storage.List(sc.Context, localDeltaWALPath) + if err != nil { + return fmt.Errorf("failed to list local delta WAL storage: %w", err) + } + + if lastUnifiedWALEntry == lastLocalWALEntry && len(_unifiedWALEntries) == len(localWALEntries) { + // Writing the last revoked WAL entry is the last thing that we do. + // Because these entries match (across clusters) and we have the same + // number of entries, assume we don't have anything to sync and exit + // early. + // + // We need both checks as, in the event of PBPWF failing and then + // returning while more revocations are happening, we could have + // been schedule to run, but then skip running (if only the first + // condition was checked) because a later revocation succeeded + // in writing a unified WAL entry, before we started replicating + // the rest back up. + // + // The downside of this approach is that, if the main cluster + // does a full rebuild in the mean time, we could re-sync more + // entries back up to the primary cluster that are already + // included in the complete CRL. Users can manually rebuild the + // full CRL (clearing these duplicate delta CRL entries) if this + // affects them. + return nil + } + + errCount := 0 + for index, serial := range localWALEntries { + if index%25 == 0 { + config, _ := sc.CrlBuilder().GetConfigWithUpdate(sc) + if config != nil && (!config.UnifiedCRL || !config.EnableDelta) { + return errors.New("unified or delta CRLs have been disabled after we started, stopping") + } + } + + if serial == deltaWALLastBuildSerialName || serial == deltaWALLastRevokedSerialName { + // Skip our special serial numbers. + continue + } + + _, isAlreadyPresent := unifiedWALEntries[serial] + if isAlreadyPresent { + // Serial exists on both local and unified cluster. We're + // presuming we don't need to read and re-write these entries + // and that only missing entries need to be updated. + continue + } + + _, isRevokedCopied := unifiedRevokedSerials[serial] + if !isRevokedCopied { + // We need to wait here to copy over. + errCount += 1 + sc.Logger().Debug("Delta WAL exists locally, but corresponding cross-cluster full revocation entry is missing; skipping", "serial", serial) + continue + } + + // All good: read the local entry and write to the remote variant. + localPath := localDeltaWALPath + serial + unifiedPath := unifiedDeltaWALPath + serial + + entry, err := sc.Storage.Get(sc.Context, localPath) + if err != nil || entry == nil { + errCount += 1 + sc.Logger().Error("Failed reading local delta WAL entry to copy to cross-cluster", "serial", serial, "err", err) + continue + } + + entry.Key = unifiedPath + err = sc.Storage.Put(sc.Context, entry) + if err != nil { + errCount += 1 + sc.Logger().Error("Failed sync local delta WAL entry to cross-cluster unified delta WAL location", "serial", serial, "err", err) + continue + } + } + + if errCount > 0 { + // See note above about why we don't fail here. + sc.Logger().Warn(fmt.Sprintf("Failed transfering %d local delta WAL serials to unified storage", errCount)) + return nil + } + + // Everything worked. Here, we can write over the delta WAL last revoked + // value. By using the earlier value, even if new revocations have + // occurred, we ensure any further missing entries can be handled in the + // next round. + lastRevSerial := lastWALInfo{Serial: lastLocalWALEntry} + lastWALEntry, err := logical.StorageEntryJSON(thisUnifiedWALEntryPath, lastRevSerial) + if err != nil { + return fmt.Errorf("unable to create cross-cluster unified last delta CRL WAL entry: %w", err) + } + if err = sc.Storage.Put(sc.Context, lastWALEntry); err != nil { + return fmt.Errorf("error saving cross-cluster unified last delta CRL WAL entry: %w", err) } return nil @@ -137,17 +295,17 @@ func doUnifiedTransferMissingLocalSerials(sc *storageContext, clusterId string) func readRevocationEntryAndTransfer(sc *storageContext, serial string) error { hyphenSerial := normalizeSerial(serial) - revInfo, err := sc.fetchRevocationInfo(hyphenSerial) + revInfo, err := fetchRevocationInfo(sc, hyphenSerial) if err != nil { return fmt.Errorf("failed loading revocation entry for serial: %s: %w", serial, err) } if revInfo == nil { - sc.Backend.Logger().Debug("no certificate revocation entry for serial", "serial", serial) + sc.Logger().Debug("no certificate revocation entry for serial", "serial", serial) return nil } cert, err := x509.ParseCertificate(revInfo.CertificateBytes) if err != nil { - sc.Backend.Logger().Debug("failed parsing certificate stored in revocation entry for serial", + sc.Logger().Debug("failed parsing certificate stored in revocation entry for serial", "serial", serial, "error", err) return nil } @@ -168,12 +326,12 @@ func readRevocationEntryAndTransfer(sc *storageContext, serial string) error { return nil } - entry := &unifiedRevocationEntry{ + entry := &revocation.UnifiedRevocationEntry{ SerialNumber: hyphenSerial, CertExpiration: cert.NotAfter, RevocationTimeUTC: revocationTime, CertificateIssuer: revInfo.CertificateIssuer, } - return writeUnifiedRevocationEntry(sc, entry) + return revocation.WriteUnifiedRevocationEntry(sc.GetContext(), sc.GetStorage(), entry) } diff --git a/builtin/logical/pki/pki_backend/common.go b/builtin/logical/pki/pki_backend/common.go new file mode 100644 index 000000000000..6b7c642ada06 --- /dev/null +++ b/builtin/logical/pki/pki_backend/common.go @@ -0,0 +1,105 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki_backend + +import ( + "context" + "fmt" + "strings" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +type SystemViewGetter interface { + System() logical.SystemView +} + +type MountInfo interface { + BackendUUID() string +} + +type Logger interface { + Logger() log.Logger +} + +//go:generate enumer -type=RolePathPolicy -text -json -transform=kebab-case +type RolePathPolicy int + +const ( + RPPUnknown RolePathPolicy = iota + RPPSignVerbatim + RPPRole +) + +var ( + pathPolicyRolePrefix = "role:" + pathPolicyRolePrefixLength = len(pathPolicyRolePrefix) +) + +// GetRoleByPathOrPathPolicy loads an existing role based on if the data field data contains a 'role' parameter +// or by the values within the pathPolicy +func GetRoleByPathOrPathPolicy(ctx context.Context, s logical.Storage, data *framework.FieldData, pathPolicy string) (*issuing.RoleEntry, error) { + var role *issuing.RoleEntry + + // The role name from the path is the highest priority + if roleName, ok := getRoleNameFromPath(data); ok { + var err error + role, err = issuing.GetRole(ctx, s, roleName) + if err != nil { + return nil, err + } + } else { + policyType, policyVal, err := GetPathPolicyType(pathPolicy) + if err != nil { + return nil, err + } + + switch policyType { + case RPPRole: + role, err = issuing.GetRole(ctx, s, policyVal) + if err != nil { + return nil, err + } + case RPPSignVerbatim: + role = issuing.SignVerbatimRole() + default: + return nil, fmt.Errorf("unsupported policy type returned: %s from policy path: %s", policyType, pathPolicy) + } + } + + return role, nil +} + +func GetPathPolicyType(pathPolicy string) (RolePathPolicy, string, error) { + policy := strings.TrimSpace(pathPolicy) + + switch { + case policy == "sign-verbatim": + return RPPSignVerbatim, "", nil + case strings.HasPrefix(policy, pathPolicyRolePrefix): + if policy == pathPolicyRolePrefix { + return RPPUnknown, "", fmt.Errorf("no role specified by policy %v", pathPolicy) + } + roleName := pathPolicy[pathPolicyRolePrefixLength:] + return RPPRole, roleName, nil + default: + return RPPUnknown, "", fmt.Errorf("string %v was not a valid default path policy", pathPolicy) + } +} + +func getRoleNameFromPath(data *framework.FieldData) (string, bool) { + // If our schema doesn't include the parameter bail + if _, ok := data.Schema["role"]; !ok { + return "", false + } + + if roleName, ok := data.GetOk("role"); ok { + return roleName.(string), true + } + + return "", false +} diff --git a/builtin/logical/pki/pki_backend/crl_builder.go b/builtin/logical/pki/pki_backend/crl_builder.go new file mode 100644 index 000000000000..58c048094a59 --- /dev/null +++ b/builtin/logical/pki/pki_backend/crl_builder.go @@ -0,0 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki_backend + +import "time" + +type CrlBuilderType interface { + RebuildIfForced(sc StorageContext) ([]string, error) + Rebuild(sc StorageContext, forceNew bool) ([]string, error) + RebuildDeltaCRLsHoldingLock(sc StorageContext, forceNew bool) ([]string, error) + GetPresentLocalDeltaWALForClearing(sc StorageContext) ([]string, error) + GetPresentUnifiedDeltaWALForClearing(sc StorageContext) ([]string, error) + GetConfigWithUpdate(sc StorageContext) (*CrlConfig, error) + ClearLocalDeltaWAL(sc StorageContext, walSerials []string) error + ClearUnifiedDeltaWAL(sc StorageContext, walSerials []string) error + + SetLastDeltaRebuildCheckTime(t time.Time) + ShouldInvalidate() bool +} diff --git a/builtin/logical/pki/pki_backend/crl_config.go b/builtin/logical/pki/pki_backend/crl_config.go new file mode 100644 index 000000000000..f37fbbbb3321 --- /dev/null +++ b/builtin/logical/pki/pki_backend/crl_config.go @@ -0,0 +1,38 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki_backend + +const latestCrlConfigVersion = 1 + +// CRLConfig holds basic CRL configuration information +type CrlConfig struct { + Version int `json:"version"` + Expiry string `json:"expiry"` + Disable bool `json:"disable"` + OcspDisable bool `json:"ocsp_disable"` + AutoRebuild bool `json:"auto_rebuild"` + AutoRebuildGracePeriod string `json:"auto_rebuild_grace_period"` + OcspExpiry string `json:"ocsp_expiry"` + EnableDelta bool `json:"enable_delta"` + DeltaRebuildInterval string `json:"delta_rebuild_interval"` + UseGlobalQueue bool `json:"cross_cluster_revocation"` + UnifiedCRL bool `json:"unified_crl"` + UnifiedCRLOnExistingPaths bool `json:"unified_crl_on_existing_paths"` +} + +// Implicit default values for the config if it does not exist. +var DefaultCrlConfig = CrlConfig{ + Version: latestCrlConfigVersion, + Expiry: "72h", + Disable: false, + OcspDisable: false, + OcspExpiry: "12h", + AutoRebuild: false, + AutoRebuildGracePeriod: "12h", + EnableDelta: false, + DeltaRebuildInterval: "15m", + UseGlobalQueue: false, + UnifiedCRL: false, + UnifiedCRLOnExistingPaths: false, +} diff --git a/builtin/logical/pki/pki_backend/rolepathpolicy_enumer.go b/builtin/logical/pki/pki_backend/rolepathpolicy_enumer.go new file mode 100644 index 000000000000..305e34a6b547 --- /dev/null +++ b/builtin/logical/pki/pki_backend/rolepathpolicy_enumer.go @@ -0,0 +1,80 @@ +// Code generated by "enumer -type=RolePathPolicy -text -json -transform=kebab-case"; DO NOT EDIT. + +package pki_backend + +import ( + "encoding/json" + "fmt" +) + +const _RolePathPolicyName = "RPPUnknownRPPSignVerbatimRPPRole" + +var _RolePathPolicyIndex = [...]uint8{0, 10, 25, 32} + +func (i RolePathPolicy) String() string { + if i < 0 || i >= RolePathPolicy(len(_RolePathPolicyIndex)-1) { + return fmt.Sprintf("RolePathPolicy(%d)", i) + } + return _RolePathPolicyName[_RolePathPolicyIndex[i]:_RolePathPolicyIndex[i+1]] +} + +var _RolePathPolicyValues = []RolePathPolicy{0, 1, 2} + +var _RolePathPolicyNameToValueMap = map[string]RolePathPolicy{ + _RolePathPolicyName[0:10]: 0, + _RolePathPolicyName[10:25]: 1, + _RolePathPolicyName[25:32]: 2, +} + +// RolePathPolicyString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func RolePathPolicyString(s string) (RolePathPolicy, error) { + if val, ok := _RolePathPolicyNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to RolePathPolicy values", s) +} + +// RolePathPolicyValues returns all values of the enum +func RolePathPolicyValues() []RolePathPolicy { + return _RolePathPolicyValues +} + +// IsARolePathPolicy returns "true" if the value is listed in the enum definition. "false" otherwise +func (i RolePathPolicy) IsARolePathPolicy() bool { + for _, v := range _RolePathPolicyValues { + if i == v { + return true + } + } + return false +} + +// MarshalJSON implements the json.Marshaler interface for RolePathPolicy +func (i RolePathPolicy) MarshalJSON() ([]byte, error) { + return json.Marshal(i.String()) +} + +// UnmarshalJSON implements the json.Unmarshaler interface for RolePathPolicy +func (i *RolePathPolicy) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return fmt.Errorf("RolePathPolicy should be a string, got %s", data) + } + + var err error + *i, err = RolePathPolicyString(s) + return err +} + +// MarshalText implements the encoding.TextMarshaler interface for RolePathPolicy +func (i RolePathPolicy) MarshalText() ([]byte, error) { + return []byte(i.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for RolePathPolicy +func (i *RolePathPolicy) UnmarshalText(text []byte) error { + var err error + *i, err = RolePathPolicyString(string(text)) + return err +} diff --git a/builtin/logical/pki/pki_backend/storage_context.go b/builtin/logical/pki/pki_backend/storage_context.go new file mode 100644 index 000000000000..d05b53306221 --- /dev/null +++ b/builtin/logical/pki/pki_backend/storage_context.go @@ -0,0 +1,25 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki_backend + +import ( + "context" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/managed_key" + "github.com/hashicorp/vault/sdk/logical" +) + +type StorageContext interface { + GetContext() context.Context + GetStorage() logical.Storage + + UseLegacyBundleCaStorage() bool + GetPkiManagedView() managed_key.PkiManagedKeyView + CrlBuilder() CrlBuilderType + GetCertificateCounter() issuing.CertificateCounter + + Logger() hclog.Logger +} diff --git a/builtin/logical/pki/revocation/revocation_entry.go b/builtin/logical/pki/revocation/revocation_entry.go new file mode 100644 index 000000000000..bdbd3c389cac --- /dev/null +++ b/builtin/logical/pki/revocation/revocation_entry.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package revocation + +import ( + "context" + "time" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/parsing" + "github.com/hashicorp/vault/sdk/logical" +) + +type UnifiedRevocationEntry struct { + SerialNumber string `json:"-"` + CertExpiration time.Time `json:"certificate_expiration_utc"` + RevocationTimeUTC time.Time `json:"revocation_time_utc"` + CertificateIssuer issuing.IssuerID `json:"issuer_id"` +} + +const ( + UnifiedRevocationReadPathPrefix = "unified-revocation/" + UnifiedRevocationWritePathPrefix = UnifiedRevocationReadPathPrefix + "{{clusterId}}/" +) + +func WriteUnifiedRevocationEntry(ctx context.Context, storage logical.Storage, ure *UnifiedRevocationEntry) error { + json, err := logical.StorageEntryJSON(UnifiedRevocationWritePathPrefix+parsing.NormalizeSerialForStorage(ure.SerialNumber), ure) + if err != nil { + return err + } + + return storage.Put(ctx, json) +} diff --git a/builtin/logical/pki/revocation/revoke.go b/builtin/logical/pki/revocation/revoke.go new file mode 100644 index 000000000000..deb786f7fad6 --- /dev/null +++ b/builtin/logical/pki/revocation/revoke.go @@ -0,0 +1,100 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package revocation + +import ( + "bytes" + "context" + "crypto/x509" + "fmt" + "time" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/pki_backend" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + RevokedPath = "revoked/" +) + +type RevokerFactory interface { + GetRevoker(context.Context, logical.Storage) (Revoker, error) +} + +type RevokeCertInfo struct { + RevocationTime time.Time + Warnings []string +} + +type Revoker interface { + RevokeCert(cert *x509.Certificate) (RevokeCertInfo, error) + RevokeCertBySerial(serial string) (RevokeCertInfo, error) +} + +type RevocationInfo struct { + CertificateBytes []byte `json:"certificate_bytes"` + RevocationTime int64 `json:"revocation_time"` + RevocationTimeUTC time.Time `json:"revocation_time_utc"` + CertificateIssuer issuing.IssuerID `json:"issuer_id"` +} + +func (ri *RevocationInfo) AssociateRevokedCertWithIsssuer(revokedCert *x509.Certificate, issuerIDCertMap map[issuing.IssuerID]*x509.Certificate) bool { + for issuerId, issuerCert := range issuerIDCertMap { + if bytes.Equal(revokedCert.RawIssuer, issuerCert.RawSubject) { + if err := revokedCert.CheckSignatureFrom(issuerCert); err == nil { + // Valid mapping. Add it to the specified entry. + ri.CertificateIssuer = issuerId + return true + } + } + } + + return false +} + +// FetchIssuerMapForRevocationChecking fetches a map of IssuerID->parsed cert for revocation +// usage. Unlike other paths, this needs to handle the legacy bundle +// more gracefully than rejecting it outright. +func FetchIssuerMapForRevocationChecking(sc pki_backend.StorageContext) (map[issuing.IssuerID]*x509.Certificate, error) { + var err error + var issuers []issuing.IssuerID + + if !sc.UseLegacyBundleCaStorage() { + issuers, err = issuing.ListIssuers(sc.GetContext(), sc.GetStorage()) + if err != nil { + return nil, fmt.Errorf("could not fetch issuers list: %w", err) + } + } else { + // Hack: this isn't a real IssuerID, but it works for fetchCAInfo + // since it resolves the reference. + issuers = []issuing.IssuerID{issuing.LegacyBundleShimID} + } + + issuerIDCertMap := make(map[issuing.IssuerID]*x509.Certificate, len(issuers)) + for _, issuer := range issuers { + _, bundle, caErr := issuing.FetchCertBundleByIssuerId(sc.GetContext(), sc.GetStorage(), issuer, false) + if caErr != nil { + return nil, fmt.Errorf("error fetching CA certificate for issuer id %v: %w", issuer, caErr) + } + + if bundle == nil { + return nil, fmt.Errorf("faulty reference: %v - CA info not found", issuer) + } + + parsedBundle, err := issuing.ParseCABundle(sc.GetContext(), sc.GetPkiManagedView(), bundle) + if err != nil { + return nil, errutil.InternalError{Err: err.Error()} + } + + if parsedBundle.Certificate == nil { + return nil, errutil.InternalError{Err: "stored CA information not able to be parsed"} + } + + issuerIDCertMap[issuer] = parsedBundle.Certificate + } + + return issuerIDCertMap, nil +} diff --git a/builtin/logical/pki/secret_certs.go b/builtin/logical/pki/secret_certs.go index 11ebcd2ac7cb..7f75203b1f46 100644 --- a/builtin/logical/pki/secret_certs.go +++ b/builtin/logical/pki/secret_certs.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -8,6 +8,7 @@ import ( "crypto/x509" "fmt" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) @@ -49,13 +50,13 @@ func (b *backend) secretCredsRevoke(ctx context.Context, req *logical.Request, _ return nil, fmt.Errorf("could not find serial in internal secret data") } - b.revokeStorageLock.Lock() - defer b.revokeStorageLock.Unlock() + b.GetRevokeStorageLock().Lock() + defer b.GetRevokeStorageLock().Unlock() sc := b.makeStorageContext(ctx, req.Storage) serial := serialInt.(string) - certEntry, err := fetchCertBySerial(sc, "certs/", serial) + certEntry, err := fetchCertBySerial(sc, issuing.PathCerts, serial) if err != nil { return nil, err } @@ -77,7 +78,7 @@ func (b *backend) secretCredsRevoke(ctx context.Context, req *logical.Request, _ return nil, nil } - config, err := sc.Backend.crlBuilder.getConfigWithUpdate(sc) + config, err := sc.CrlBuilder().GetConfigWithUpdate(sc) if err != nil { return nil, fmt.Errorf("error revoking serial: %s: failed reading config: %w", serial, err) } diff --git a/builtin/logical/pki/storage.go b/builtin/logical/pki/storage.go index 5b231c1a8584..43c4853ba34c 100644 --- a/builtin/logical/pki/storage.go +++ b/builtin/logical/pki/storage.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -8,347 +8,144 @@ import ( "context" "crypto" "crypto/x509" + "errors" "fmt" - "sort" "strings" + "sync" "time" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/managed_key" + "github.com/hashicorp/vault/builtin/logical/pki/pki_backend" + "github.com/hashicorp/vault/builtin/logical/pki/revocation" "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" ) +var ErrStorageItemNotFound = errors.New("storage item not found") + const ( - storageKeyConfig = "config/keys" - storageIssuerConfig = "config/issuers" - keyPrefix = "config/key/" - issuerPrefix = "config/issuer/" - storageLocalCRLConfig = "crls/config" - storageUnifiedCRLConfig = "unified-crls/config" + storageKeyConfig = issuing.StorageKeyConfig + storageIssuerConfig = issuing.StorageIssuerConfig + keyPrefix = issuing.KeyPrefix + issuerPrefix = issuing.IssuerPrefix + storageLocalCRLConfig = issuing.StorageLocalCRLConfig + storageUnifiedCRLConfig = issuing.StorageUnifiedCRLConfig legacyMigrationBundleLogKey = "config/legacyMigrationBundleLog" - legacyCertBundlePath = "config/ca_bundle" + legacyCertBundlePath = issuing.LegacyCertBundlePath legacyCertBundleBackupPath = "config/ca_bundle.bak" - legacyCRLPath = "crl" - deltaCRLPath = "delta-crl" - deltaCRLPathSuffix = "-delta" - unifiedCRLPath = "unified-crl" - unifiedDeltaCRLPath = "unified-delta-crl" - unifiedCRLPathPrefix = "unified-" + + legacyCRLPath = issuing.LegacyCRLPath + deltaCRLPath = issuing.DeltaCRLPath + deltaCRLPathSuffix = issuing.DeltaCRLPathSuffix + unifiedCRLPath = issuing.UnifiedCRLPath + unifiedDeltaCRLPath = issuing.UnifiedDeltaCRLPath + unifiedCRLPathPrefix = issuing.UnifiedCRLPathPrefix autoTidyConfigPath = "config/auto-tidy" clusterConfigPath = "config/cluster" - // Used as a quick sanity check for a reference id lookups... - uuidLength = 36 + autoTidyLastRunPath = "config/auto-tidy-last-run" maxRolesToScanOnIssuerChange = 100 maxRolesToFindOnIssuerChange = 10 - - latestIssuerVersion = 1 ) -type keyID string - -func (p keyID) String() string { - return string(p) +func ToURLEntries(sc *storageContext, issuer issuing.IssuerID, c *issuing.AiaConfigEntry) (*certutil.URLEntries, error) { + return issuing.ToURLEntries(sc.Context, sc.Storage, issuer, c) } -type issuerID string - -func (p issuerID) String() string { - return string(p) -} - -type crlID string - -func (p crlID) String() string { - return string(p) +type storageContext struct { + Context context.Context + Storage logical.Storage + Backend *backend } -const ( - IssuerRefNotFound = issuerID("not-found") - KeyRefNotFound = keyID("not-found") -) - -type keyEntry struct { - ID keyID `json:"id"` - Name string `json:"name"` - PrivateKeyType certutil.PrivateKeyType `json:"private_key_type"` - PrivateKey string `json:"private_key"` -} +var _ pki_backend.StorageContext = (*storageContext)(nil) -func (e keyEntry) getManagedKeyUUID() (UUIDKey, error) { - if !e.isManagedPrivateKey() { - return "", errutil.InternalError{Err: "getManagedKeyId called on a key id %s (%s) "} +func (b *backend) makeStorageContext(ctx context.Context, s logical.Storage) *storageContext { + return &storageContext{ + Context: ctx, + Storage: s, + Backend: b, } - return extractManagedKeyId([]byte(e.PrivateKey)) } -func (e keyEntry) isManagedPrivateKey() bool { - return e.PrivateKeyType == certutil.ManagedPrivateKey +func (sc *storageContext) WithFreshTimeout(timeout time.Duration) (*storageContext, context.CancelFunc) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + return &storageContext{ + Context: ctx, + Storage: sc.Storage, + Backend: sc.Backend, + }, cancel } -type issuerUsage uint - -const ( - ReadOnlyUsage issuerUsage = iota - IssuanceUsage issuerUsage = 1 << iota - CRLSigningUsage issuerUsage = 1 << iota - OCSPSigningUsage issuerUsage = 1 << iota - - // When adding a new usage in the future, we'll need to create a usage - // mask field on the IssuerEntry and handle migrations to a newer mask, - // inferring a value for the new bits. - AllIssuerUsages = ReadOnlyUsage | IssuanceUsage | CRLSigningUsage | OCSPSigningUsage -) - -var namedIssuerUsages = map[string]issuerUsage{ - "read-only": ReadOnlyUsage, - "issuing-certificates": IssuanceUsage, - "crl-signing": CRLSigningUsage, - "ocsp-signing": OCSPSigningUsage, +func (sc *storageContext) GetContext() context.Context { + return sc.Context } -func (i *issuerUsage) ToggleUsage(usages ...issuerUsage) { - for _, usage := range usages { - *i ^= usage - } +func (sc *storageContext) GetStorage() logical.Storage { + return sc.Storage } -func (i issuerUsage) HasUsage(usage issuerUsage) bool { - return (i & usage) == usage +func (sc *storageContext) Logger() hclog.Logger { + return sc.Backend.Logger() } -func (i issuerUsage) Names() string { - var names []string - var builtUsage issuerUsage - - // Return the known set of usages in a sorted order to not have Terraform state files flipping - // saying values are different when it's the same list in a different order. - keys := make([]string, 0, len(namedIssuerUsages)) - for k := range namedIssuerUsages { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, name := range keys { - usage := namedIssuerUsages[name] - if i.HasUsage(usage) { - names = append(names, name) - builtUsage.ToggleUsage(usage) - } - } - - if i != builtUsage { - // Found some unknown usage, we should indicate this in the names. - names = append(names, fmt.Sprintf("unknown:%v", i^builtUsage)) - } - - return strings.Join(names, ",") +func (sc *storageContext) System() logical.SystemView { + return sc.Backend.System() } -func NewIssuerUsageFromNames(names []string) (issuerUsage, error) { - var result issuerUsage - for index, name := range names { - usage, ok := namedIssuerUsages[name] - if !ok { - return ReadOnlyUsage, fmt.Errorf("unknown name for usage at index %v: %v", index, name) - } - - result.ToggleUsage(usage) - } - - return result, nil +func (sc *storageContext) CrlBuilder() pki_backend.CrlBuilderType { + return sc.Backend.CrlBuilder() } -type issuerEntry struct { - ID issuerID `json:"id"` - Name string `json:"name"` - KeyID keyID `json:"key_id"` - Certificate string `json:"certificate"` - CAChain []string `json:"ca_chain"` - ManualChain []issuerID `json:"manual_chain"` - SerialNumber string `json:"serial_number"` - LeafNotAfterBehavior certutil.NotAfterBehavior `json:"not_after_behavior"` - Usage issuerUsage `json:"usage"` - RevocationSigAlg x509.SignatureAlgorithm `json:"revocation_signature_algorithm"` - Revoked bool `json:"revoked"` - RevocationTime int64 `json:"revocation_time"` - RevocationTimeUTC time.Time `json:"revocation_time_utc"` - AIAURIs *aiaConfigEntry `json:"aia_uris,omitempty"` - LastModified time.Time `json:"last_modified"` - Version uint `json:"version"` +func (sc *storageContext) GetUnifiedTransferStatus() *UnifiedTransferStatus { + return sc.Backend.GetUnifiedTransferStatus() } -type internalCRLConfigEntry struct { - IssuerIDCRLMap map[issuerID]crlID `json:"issuer_id_crl_map"` - CRLNumberMap map[crlID]int64 `json:"crl_number_map"` - LastCompleteNumberMap map[crlID]int64 `json:"last_complete_number_map"` - CRLExpirationMap map[crlID]time.Time `json:"crl_expiration_map"` - LastModified time.Time `json:"last_modified"` - DeltaLastModified time.Time `json:"delta_last_modified"` - UseGlobalQueue bool `json:"cross_cluster_revocation"` +func (sc *storageContext) GetPkiManagedView() managed_key.PkiManagedKeyView { + return sc.Backend } -type keyConfigEntry struct { - DefaultKeyId keyID `json:"default"` +func (sc *storageContext) GetCertificateCounter() issuing.CertificateCounter { + return sc.Backend.GetCertificateCounter() } -type issuerConfigEntry struct { - // This new fetchedDefault field allows us to detect if the default - // issuer was modified, in turn dispatching the timestamp updater - // if necessary. - fetchedDefault issuerID `json:"-"` - DefaultIssuerId issuerID `json:"default"` - DefaultFollowsLatestIssuer bool `json:"default_follows_latest_issuer"` +func (sc *storageContext) UseLegacyBundleCaStorage() bool { + return sc.Backend.UseLegacyBundleCaStorage() } -type clusterConfigEntry struct { - Path string `json:"path"` - AIAPath string `json:"aia_path"` +func (sc *storageContext) GetRevokeStorageLock() *sync.RWMutex { + return sc.Backend.GetRevokeStorageLock() } -type aiaConfigEntry struct { - IssuingCertificates []string `json:"issuing_certificates"` - CRLDistributionPoints []string `json:"crl_distribution_points"` - OCSPServers []string `json:"ocsp_servers"` - EnableTemplating bool `json:"enable_templating"` +func (sc *storageContext) GetRole(name string) (*issuing.RoleEntry, error) { + return sc.Backend.GetRole(sc.Context, sc.Storage, name) } -func (c *aiaConfigEntry) toURLEntries(sc *storageContext, issuer issuerID) (*certutil.URLEntries, error) { - if len(c.IssuingCertificates) == 0 && len(c.CRLDistributionPoints) == 0 && len(c.OCSPServers) == 0 { - return &certutil.URLEntries{}, nil - } - - result := certutil.URLEntries{ - IssuingCertificates: c.IssuingCertificates[:], - CRLDistributionPoints: c.CRLDistributionPoints[:], - OCSPServers: c.OCSPServers[:], - } - - if c.EnableTemplating { - cfg, err := sc.getClusterConfig() - if err != nil { - return nil, fmt.Errorf("error fetching cluster-local address config: %w", err) - } - - for name, source := range map[string]*[]string{ - "issuing_certificates": &result.IssuingCertificates, - "crl_distribution_points": &result.CRLDistributionPoints, - "ocsp_servers": &result.OCSPServers, - } { - templated := make([]string, len(*source)) - for index, uri := range *source { - if strings.Contains(uri, "{{cluster_path}}") && len(cfg.Path) == 0 { - return nil, fmt.Errorf("unable to template AIA URLs as we lack local cluster address information (path)") - } - if strings.Contains(uri, "{{cluster_aia_path}}") && len(cfg.AIAPath) == 0 { - return nil, fmt.Errorf("unable to template AIA URLs as we lack local cluster address information (aia_path)") - } - if strings.Contains(uri, "{{issuer_id}}") && len(issuer) == 0 { - // Elide issuer AIA info as we lack an issuer_id. - return nil, fmt.Errorf("unable to template AIA URLs as we lack an issuer_id for this operation") - } - - uri = strings.ReplaceAll(uri, "{{cluster_path}}", cfg.Path) - uri = strings.ReplaceAll(uri, "{{cluster_aia_path}}", cfg.AIAPath) - uri = strings.ReplaceAll(uri, "{{issuer_id}}", issuer.String()) - templated[index] = uri - } - - if uri := validateURLs(templated); uri != "" { - return nil, fmt.Errorf("error validating templated %v; invalid URI: %v", name, uri) - } - - *source = templated - } - } - - return &result, nil -} - -type storageContext struct { - Context context.Context - Storage logical.Storage - Backend *backend +func (sc *storageContext) listKeys() ([]issuing.KeyID, error) { + return issuing.ListKeys(sc.Context, sc.Storage) } -func (b *backend) makeStorageContext(ctx context.Context, s logical.Storage) *storageContext { - return &storageContext{ - Context: ctx, - Storage: s, - Backend: b, - } -} - -func (sc *storageContext) listKeys() ([]keyID, error) { - strList, err := sc.Storage.List(sc.Context, keyPrefix) - if err != nil { - return nil, err - } - - keyIds := make([]keyID, 0, len(strList)) - for _, entry := range strList { - keyIds = append(keyIds, keyID(entry)) - } - - return keyIds, nil -} - -func (sc *storageContext) fetchKeyById(keyId keyID) (*keyEntry, error) { - if len(keyId) == 0 { - return nil, errutil.InternalError{Err: "unable to fetch pki key: empty key identifier"} - } - - entry, err := sc.Storage.Get(sc.Context, keyPrefix+keyId.String()) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch pki key: %v", err)} - } - if entry == nil { - return nil, errutil.UserError{Err: fmt.Sprintf("pki key id %s does not exist", keyId.String())} - } - - var key keyEntry - if err := entry.DecodeJSON(&key); err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode pki key with id %s: %v", keyId.String(), err)} - } - - return &key, nil +func (sc *storageContext) fetchKeyById(keyId issuing.KeyID) (*issuing.KeyEntry, error) { + return issuing.FetchKeyById(sc.Context, sc.Storage, keyId) } -func (sc *storageContext) writeKey(key keyEntry) error { - keyId := key.ID - - json, err := logical.StorageEntryJSON(keyPrefix+keyId.String(), key) - if err != nil { - return err - } - - return sc.Storage.Put(sc.Context, json) +func (sc *storageContext) writeKey(key issuing.KeyEntry) error { + return issuing.WriteKey(sc.Context, sc.Storage, key) } -func (sc *storageContext) deleteKey(id keyID) (bool, error) { - config, err := sc.getKeysConfig() - if err != nil { - return false, err - } - - wasDefault := false - if config.DefaultKeyId == id { - wasDefault = true - config.DefaultKeyId = keyID("") - if err := sc.setKeysConfig(config); err != nil { - return wasDefault, err - } - } - - return wasDefault, sc.Storage.Delete(sc.Context, keyPrefix+id.String()) +func (sc *storageContext) deleteKey(id issuing.KeyID) (bool, error) { + return issuing.DeleteKey(sc.Context, sc.Storage, id) } -func (sc *storageContext) importKey(keyValue string, keyName string, keyType certutil.PrivateKeyType) (*keyEntry, bool, error) { +func (sc *storageContext) importKey(keyValue string, keyName string, keyType certutil.PrivateKeyType) (*issuing.KeyEntry, bool, error) { // importKey imports the specified PEM-format key (from keyValue) into // the new PKI storage format. The first return field is a reference to // the new key; the second is whether or not the key already existed @@ -371,11 +168,7 @@ func (sc *storageContext) importKey(keyValue string, keyName string, keyType cer // Get our public key from the current inbound key, to compare against all the other keys. var pkForImportingKey crypto.PublicKey if keyType == certutil.ManagedPrivateKey { - managedKeyUUID, err := extractManagedKeyId([]byte(keyValue)) - if err != nil { - return nil, false, errutil.InternalError{Err: fmt.Sprintf("failed extracting managed key uuid from key: %v", err)} - } - pkForImportingKey, err = getManagedKeyPublicKey(sc.Context, sc.Backend, managedKeyUUID) + pkForImportingKey, err = managed_key.GetPublicKeyFromKeyBytes(sc.Context, sc.Backend, []byte(keyValue)) if err != nil { return nil, false, err } @@ -416,7 +209,7 @@ func (sc *storageContext) importKey(keyValue string, keyName string, keyType cer } // Haven't found a key, so we've gotta create it and write it into storage. - var result keyEntry + var result issuing.KeyEntry result.ID = genKeyId() result.Name = keyName result.PrivateKey = keyValue @@ -503,263 +296,32 @@ func (sc *storageContext) importKey(keyValue string, keyName string, keyType cer return &result, false, nil } -func (i issuerEntry) GetCertificate() (*x509.Certificate, error) { - cert, err := parseCertificateFromBytes([]byte(i.Certificate)) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse certificate from issuer: %s: %v", err.Error(), i.ID)} - } - - return cert, nil +func GetAIAURLs(sc *storageContext, i *issuing.IssuerEntry) (*certutil.URLEntries, error) { + return issuing.GetAIAURLs(sc.Context, sc.Storage, i) } -func (i issuerEntry) EnsureUsage(usage issuerUsage) error { - // We want to spit out a nice error message about missing usages. - if i.Usage.HasUsage(usage) { - return nil - } - - issuerRef := fmt.Sprintf("id:%v", i.ID) - if len(i.Name) > 0 { - issuerRef = fmt.Sprintf("%v / name:%v", issuerRef, i.Name) - } - - // These usages differ at some point in time. We've gotta find the first - // usage that differs and return a logical-sounding error message around - // that difference. - for name, candidate := range namedIssuerUsages { - if usage.HasUsage(candidate) && !i.Usage.HasUsage(candidate) { - return fmt.Errorf("requested usage %v for issuer [%v] but only had usage %v", name, issuerRef, i.Usage.Names()) - } - } - - // Maybe we have an unnamed usage that's requested. - return fmt.Errorf("unknown delta between usages: %v -> %v / for issuer [%v]", usage.Names(), i.Usage.Names(), issuerRef) +func (sc *storageContext) listIssuers() ([]issuing.IssuerID, error) { + return issuing.ListIssuers(sc.Context, sc.Storage) } -func (i issuerEntry) CanMaybeSignWithAlgo(algo x509.SignatureAlgorithm) error { - // Hack: Go isn't kind enough expose its lovely signatureAlgorithmDetails - // informational struct for our usage. However, we don't want to actually - // fetch the private key and attempt a signature with this algo (as we'll - // mint new, previously unsigned material in the process that could maybe - // be potentially abused if it leaks). - // - // So... - // - // ...we maintain our own mapping of cert.PKI<->sigAlgos. Notably, we - // exclude DSA support as the PKI engine has never supported DSA keys. - if algo == x509.UnknownSignatureAlgorithm { - // Special cased to indicate upgrade and letting Go automatically - // chose the correct value. - return nil - } - - cert, err := i.GetCertificate() - if err != nil { - return fmt.Errorf("unable to parse issuer's potential signature algorithm types: %w", err) - } - - switch cert.PublicKeyAlgorithm { - case x509.RSA: - switch algo { - case x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA, - x509.SHA256WithRSAPSS, x509.SHA384WithRSAPSS, - x509.SHA512WithRSAPSS: - return nil - } - case x509.ECDSA: - switch algo { - case x509.ECDSAWithSHA256, x509.ECDSAWithSHA384, x509.ECDSAWithSHA512: - return nil - } - case x509.Ed25519: - switch algo { - case x509.PureEd25519: - return nil - } - } - - return fmt.Errorf("unable to use issuer of type %v to sign with %v key type", cert.PublicKeyAlgorithm.String(), algo.String()) -} - -func (i issuerEntry) GetAIAURLs(sc *storageContext) (*certutil.URLEntries, error) { - // Default to the per-issuer AIA URLs. - entries := i.AIAURIs - - // If none are set (either due to a nil entry or because no URLs have - // been provided), fall back to the global AIA URL config. - if entries == nil || (len(entries.IssuingCertificates) == 0 && len(entries.CRLDistributionPoints) == 0 && len(entries.OCSPServers) == 0) { - var err error - - entries, err = getGlobalAIAURLs(sc.Context, sc.Storage) - if err != nil { - return nil, err - } - } - - if entries == nil { - return &certutil.URLEntries{}, nil - } - - return entries.toURLEntries(sc, i.ID) -} - -func (sc *storageContext) listIssuers() ([]issuerID, error) { - strList, err := sc.Storage.List(sc.Context, issuerPrefix) - if err != nil { - return nil, err - } - - issuerIds := make([]issuerID, 0, len(strList)) - for _, entry := range strList { - issuerIds = append(issuerIds, issuerID(entry)) - } - - return issuerIds, nil -} - -func (sc *storageContext) resolveKeyReference(reference string) (keyID, error) { - if reference == defaultRef { - // Handle fetching the default key. - config, err := sc.getKeysConfig() - if err != nil { - return keyID("config-error"), err - } - if len(config.DefaultKeyId) == 0 { - return KeyRefNotFound, fmt.Errorf("no default key currently configured") - } - - return config.DefaultKeyId, nil - } - - // Lookup by a direct get first to see if our reference is an ID, this is quick and cached. - if len(reference) == uuidLength { - entry, err := sc.Storage.Get(sc.Context, keyPrefix+reference) - if err != nil { - return keyID("key-read"), err - } - if entry != nil { - return keyID(reference), nil - } - } - - // ... than to pull all keys from storage. - keys, err := sc.listKeys() - if err != nil { - return keyID("list-error"), err - } - for _, keyId := range keys { - key, err := sc.fetchKeyById(keyId) - if err != nil { - return keyID("key-read"), err - } - - if key.Name == reference { - return key.ID, nil - } - } - - // Otherwise, we must not have found the key. - return KeyRefNotFound, errutil.UserError{Err: fmt.Sprintf("unable to find PKI key for reference: %v", reference)} -} - -// fetchIssuerById returns an issuerEntry based on issuerId, if none found an error is returned. -func (sc *storageContext) fetchIssuerById(issuerId issuerID) (*issuerEntry, error) { - if len(issuerId) == 0 { - return nil, errutil.InternalError{Err: "unable to fetch pki issuer: empty issuer identifier"} - } - - entry, err := sc.Storage.Get(sc.Context, issuerPrefix+issuerId.String()) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch pki issuer: %v", err)} - } - if entry == nil { - return nil, errutil.UserError{Err: fmt.Sprintf("pki issuer id %s does not exist", issuerId.String())} - } - - var issuer issuerEntry - if err := entry.DecodeJSON(&issuer); err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode pki issuer with id %s: %v", issuerId.String(), err)} - } - - return sc.upgradeIssuerIfRequired(&issuer), nil +func (sc *storageContext) resolveKeyReference(reference string) (issuing.KeyID, error) { + return issuing.ResolveKeyReference(sc.Context, sc.Storage, reference) } -func (sc *storageContext) upgradeIssuerIfRequired(issuer *issuerEntry) *issuerEntry { - // *NOTE*: Don't attempt to write out the issuer here as it may cause ErrReadOnly that will direct the - // request all the way up to the primary cluster which would be horrible for local cluster operations such - // as generating a leaf cert or a revoke. - // Also even though we could tell if we are the primary cluster's active node, we can't tell if we have the - // a full rw issuer lock, so it might not be safe to write. - if issuer.Version == latestIssuerVersion { - return issuer - } - - if issuer.Version == 0 { - // Upgrade at this step requires interrogating the certificate itself; - // if this decode fails, it indicates internal problems and the - // request will subsequently fail elsewhere. However, decoding this - // certificate is mildly expensive, so we only do it in the event of - // a Version 0 certificate. - cert, err := issuer.GetCertificate() - if err != nil { - return issuer - } - - hadCRL := issuer.Usage.HasUsage(CRLSigningUsage) - // Remove CRL signing usage if it exists on the issuer but doesn't - // exist in the KU of the x509 certificate. - if hadCRL && (cert.KeyUsage&x509.KeyUsageCRLSign) == 0 { - issuer.Usage.ToggleUsage(OCSPSigningUsage) - } - - // Handle our new OCSPSigning usage flag for earlier versions. If we - // had it (prior to removing it in this upgrade), we'll add the OCSP - // flag since EKUs don't matter. - if hadCRL && !issuer.Usage.HasUsage(OCSPSigningUsage) { - issuer.Usage.ToggleUsage(OCSPSigningUsage) - } - } - - issuer.Version = latestIssuerVersion - return issuer +// fetchIssuerById returns an IssuerEntry based on issuerId, if none found an error is returned. +func (sc *storageContext) fetchIssuerById(issuerId issuing.IssuerID) (*issuing.IssuerEntry, error) { + return issuing.FetchIssuerById(sc.Context, sc.Storage, issuerId) } -func (sc *storageContext) writeIssuer(issuer *issuerEntry) error { - issuerId := issuer.ID - if issuer.LastModified.IsZero() { - issuer.LastModified = time.Now().UTC() - } - - json, err := logical.StorageEntryJSON(issuerPrefix+issuerId.String(), issuer) - if err != nil { - return err - } - - return sc.Storage.Put(sc.Context, json) +func (sc *storageContext) writeIssuer(issuer *issuing.IssuerEntry) error { + return issuing.WriteIssuer(sc.Context, sc.Storage, issuer) } -func (sc *storageContext) deleteIssuer(id issuerID) (bool, error) { - config, err := sc.getIssuersConfig() - if err != nil { - return false, err - } - - wasDefault := false - if config.DefaultIssuerId == id { - wasDefault = true - // Overwrite the fetched default issuer as we're going to remove this - // entry. - config.fetchedDefault = issuerID("") - config.DefaultIssuerId = issuerID("") - if err := sc.setIssuersConfig(config); err != nil { - return wasDefault, err - } - } - - return wasDefault, sc.Storage.Delete(sc.Context, issuerPrefix+id.String()) +func (sc *storageContext) deleteIssuer(id issuing.IssuerID) (bool, error) { + return issuing.DeleteIssuer(sc.Context, sc.Storage, id) } -func (sc *storageContext) importIssuer(certValue string, issuerName string) (*issuerEntry, bool, error) { +func (sc *storageContext) importIssuer(certValue string, issuerName string) (*issuing.IssuerEntry, bool, error) { // importIssuers imports the specified PEM-format certificate (from // certValue) into the new PKI storage format. The first return field is a // reference to the new issuer; the second is whether or not the issuer @@ -837,18 +399,18 @@ func (sc *storageContext) importIssuer(certValue string, issuerName string) (*is // Haven't found an issuer, so we've gotta create it and write it into // storage. - var result issuerEntry + var result issuing.IssuerEntry result.ID = genIssuerId() result.Name = issuerName result.Certificate = certValue result.LeafNotAfterBehavior = certutil.ErrNotAfterBehavior - result.Usage.ToggleUsage(AllIssuerUsages) - result.Version = latestIssuerVersion + result.Usage.ToggleUsage(issuing.AllIssuerUsages) + result.Version = issuing.LatestIssuerVersion // If we lack relevant bits for CRL, prohibit it from being set // on the usage side. - if (issuerCert.KeyUsage&x509.KeyUsageCRLSign) == 0 && result.Usage.HasUsage(CRLSigningUsage) { - result.Usage.ToggleUsage(CRLSigningUsage) + if (issuerCert.KeyUsage&x509.KeyUsageCRLSign) == 0 && result.Usage.HasUsage(issuing.CRLSigningUsage) { + result.Usage.ToggleUsage(issuing.CRLSigningUsage) } // We shouldn't add CSRs or multiple certificates in this @@ -918,261 +480,54 @@ func areCertificatesEqual(cert1 *x509.Certificate, cert2 *x509.Certificate) bool return bytes.Equal(cert1.Raw, cert2.Raw) } -func (sc *storageContext) _setInternalCRLConfig(mapping *internalCRLConfigEntry, path string) error { - json, err := logical.StorageEntryJSON(path, mapping) - if err != nil { - return err - } - - return sc.Storage.Put(sc.Context, json) +func (sc *storageContext) setLocalCRLConfig(mapping *issuing.InternalCRLConfigEntry) error { + return issuing.SetLocalCRLConfig(sc.Context, sc.Storage, mapping) } -func (sc *storageContext) setLocalCRLConfig(mapping *internalCRLConfigEntry) error { - return sc._setInternalCRLConfig(mapping, storageLocalCRLConfig) +func (sc *storageContext) setUnifiedCRLConfig(mapping *issuing.InternalCRLConfigEntry) error { + return issuing.SetUnifiedCRLConfig(sc.Context, sc.Storage, mapping) } -func (sc *storageContext) setUnifiedCRLConfig(mapping *internalCRLConfigEntry) error { - return sc._setInternalCRLConfig(mapping, storageUnifiedCRLConfig) +func (sc *storageContext) getLocalCRLConfig() (*issuing.InternalCRLConfigEntry, error) { + return issuing.GetLocalCRLConfig(sc.Context, sc.Storage) } -func (sc *storageContext) _getInternalCRLConfig(path string) (*internalCRLConfigEntry, error) { - entry, err := sc.Storage.Get(sc.Context, path) - if err != nil { - return nil, err - } - - mapping := &internalCRLConfigEntry{} - if entry != nil { - if err := entry.DecodeJSON(mapping); err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode cluster-local CRL configuration: %v", err)} - } - } - - if len(mapping.IssuerIDCRLMap) == 0 { - mapping.IssuerIDCRLMap = make(map[issuerID]crlID) - } - - if len(mapping.CRLNumberMap) == 0 { - mapping.CRLNumberMap = make(map[crlID]int64) - } - - if len(mapping.LastCompleteNumberMap) == 0 { - mapping.LastCompleteNumberMap = make(map[crlID]int64) - - // Since this might not exist on migration, we want to guess as - // to the last full CRL number was. This was likely the last - // value from CRLNumberMap if it existed, since we're just adding - // the mapping here in this block. - // - // After the next full CRL build, we will have set this value - // correctly, so it doesn't really matter in the long term if - // we're off here. - for id, number := range mapping.CRLNumberMap { - // Decrement by one, since CRLNumberMap is the future number, - // not the last built number. - mapping.LastCompleteNumberMap[id] = number - 1 - } - } - - if len(mapping.CRLExpirationMap) == 0 { - mapping.CRLExpirationMap = make(map[crlID]time.Time) - } - - return mapping, nil +func (sc *storageContext) getUnifiedCRLConfig() (*issuing.InternalCRLConfigEntry, error) { + return issuing.GetUnifiedCRLConfig(sc.Context, sc.Storage) } -func (sc *storageContext) getLocalCRLConfig() (*internalCRLConfigEntry, error) { - return sc._getInternalCRLConfig(storageLocalCRLConfig) +func (sc *storageContext) setKeysConfig(config *issuing.KeyConfigEntry) error { + return issuing.SetKeysConfig(sc.Context, sc.Storage, config) } -func (sc *storageContext) getUnifiedCRLConfig() (*internalCRLConfigEntry, error) { - return sc._getInternalCRLConfig(storageUnifiedCRLConfig) +func (sc *storageContext) getKeysConfig() (*issuing.KeyConfigEntry, error) { + return issuing.GetKeysConfig(sc.Context, sc.Storage) } -func (sc *storageContext) setKeysConfig(config *keyConfigEntry) error { - json, err := logical.StorageEntryJSON(storageKeyConfig, config) - if err != nil { - return err - } - - return sc.Storage.Put(sc.Context, json) -} - -func (sc *storageContext) getKeysConfig() (*keyConfigEntry, error) { - entry, err := sc.Storage.Get(sc.Context, storageKeyConfig) - if err != nil { - return nil, err - } - - keyConfig := &keyConfigEntry{} - if entry != nil { - if err := entry.DecodeJSON(keyConfig); err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode key configuration: %v", err)} - } - } - - return keyConfig, nil -} - -func (sc *storageContext) setIssuersConfig(config *issuerConfigEntry) error { - json, err := logical.StorageEntryJSON(storageIssuerConfig, config) - if err != nil { - return err - } - - if err := sc.Storage.Put(sc.Context, json); err != nil { - return err - } - - if err := sc.changeDefaultIssuerTimestamps(config.fetchedDefault, config.DefaultIssuerId); err != nil { - return err - } - - return nil +func (sc *storageContext) setIssuersConfig(config *issuing.IssuerConfigEntry) error { + return issuing.SetIssuersConfig(sc.Context, sc.Storage, config) } -func (sc *storageContext) getIssuersConfig() (*issuerConfigEntry, error) { - entry, err := sc.Storage.Get(sc.Context, storageIssuerConfig) - if err != nil { - return nil, err - } - - issuerConfig := &issuerConfigEntry{} - if entry != nil { - if err := entry.DecodeJSON(issuerConfig); err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode issuer configuration: %v", err)} - } - } - issuerConfig.fetchedDefault = issuerConfig.DefaultIssuerId - - return issuerConfig, nil +func (sc *storageContext) getIssuersConfig() (*issuing.IssuerConfigEntry, error) { + return issuing.GetIssuersConfig(sc.Context, sc.Storage) } // Lookup within storage the value of reference, assuming the string is a reference to an issuer entry, -// returning the converted issuerID or an error if not found. This method will not properly resolve the +// returning the converted IssuerID or an error if not found. This method will not properly resolve the // special legacyBundleShimID value as we do not want to confuse our special value and a user-provided name of the // same value. -func (sc *storageContext) resolveIssuerReference(reference string) (issuerID, error) { - if reference == defaultRef { - // Handle fetching the default issuer. - config, err := sc.getIssuersConfig() - if err != nil { - return issuerID("config-error"), err - } - if len(config.DefaultIssuerId) == 0 { - return IssuerRefNotFound, fmt.Errorf("no default issuer currently configured") - } - - return config.DefaultIssuerId, nil - } - - // Lookup by a direct get first to see if our reference is an ID, this is quick and cached. - if len(reference) == uuidLength { - entry, err := sc.Storage.Get(sc.Context, issuerPrefix+reference) - if err != nil { - return issuerID("issuer-read"), err - } - if entry != nil { - return issuerID(reference), nil - } - } - - // ... than to pull all issuers from storage. - issuers, err := sc.listIssuers() - if err != nil { - return issuerID("list-error"), err - } - - for _, issuerId := range issuers { - issuer, err := sc.fetchIssuerById(issuerId) - if err != nil { - return issuerID("issuer-read"), err - } - - if issuer.Name == reference { - return issuer.ID, nil - } - } - - // Otherwise, we must not have found the issuer. - return IssuerRefNotFound, errutil.UserError{Err: fmt.Sprintf("unable to find PKI issuer for reference: %v", reference)} -} - -func (sc *storageContext) resolveIssuerCRLPath(reference string, unified bool) (string, error) { - if sc.Backend.useLegacyBundleCaStorage() { - return legacyCRLPath, nil - } - - issuer, err := sc.resolveIssuerReference(reference) - if err != nil { - return legacyCRLPath, err - } - - configPath := storageLocalCRLConfig - if unified { - configPath = storageUnifiedCRLConfig - } - - crlConfig, err := sc._getInternalCRLConfig(configPath) - if err != nil { - return legacyCRLPath, err - } - - if crlId, ok := crlConfig.IssuerIDCRLMap[issuer]; ok && len(crlId) > 0 { - path := fmt.Sprintf("crls/%v", crlId) - if unified { - path = unifiedCRLPathPrefix + path - } - - return path, nil - } - - return legacyCRLPath, fmt.Errorf("unable to find CRL for issuer: id:%v/ref:%v", issuer, reference) +func (sc *storageContext) resolveIssuerReference(reference string) (issuing.IssuerID, error) { + return issuing.ResolveIssuerReference(sc.Context, sc.Storage, reference) } // Builds a certutil.CertBundle from the specified issuer identifier, // optionally loading the key or not. This method supports loading legacy // bundles using the legacyBundleShimID issuerId, and if no entry is found will return an error. -func (sc *storageContext) fetchCertBundleByIssuerId(id issuerID, loadKey bool) (*issuerEntry, *certutil.CertBundle, error) { - if id == legacyBundleShimID { - // We have not completed the migration, or started a request in legacy mode, so - // attempt to load the bundle from the legacy location - issuer, bundle, err := getLegacyCertBundle(sc.Context, sc.Storage) - if err != nil { - return nil, nil, err - } - if issuer == nil || bundle == nil { - return nil, nil, errutil.UserError{Err: "no legacy cert bundle exists"} - } - - return issuer, bundle, err - } - - issuer, err := sc.fetchIssuerById(id) - if err != nil { - return nil, nil, err - } - - var bundle certutil.CertBundle - bundle.Certificate = issuer.Certificate - bundle.CAChain = issuer.CAChain - bundle.SerialNumber = issuer.SerialNumber - - // Fetch the key if it exists. Sometimes we don't need the key immediately. - if loadKey && issuer.KeyID != keyID("") { - key, err := sc.fetchKeyById(issuer.KeyID) - if err != nil { - return nil, nil, err - } - - bundle.PrivateKeyType = key.PrivateKeyType - bundle.PrivateKey = key.PrivateKey - } - - return issuer, &bundle, nil +func (sc *storageContext) fetchCertBundleByIssuerId(id issuing.IssuerID, loadKey bool) (*issuing.IssuerEntry, *certutil.CertBundle, error) { + return issuing.FetchCertBundleByIssuerId(sc.Context, sc.Storage, id, loadKey) } -func (sc *storageContext) writeCaBundle(caBundle *certutil.CertBundle, issuerName string, keyName string) (*issuerEntry, *keyEntry, error) { +func (sc *storageContext) writeCaBundle(caBundle *certutil.CertBundle, issuerName string, keyName string) (*issuing.IssuerEntry, *issuing.KeyEntry, error) { myKey, _, err := sc.importKey(caBundle.PrivateKey, keyName, caBundle.PrivateKeyType) if err != nil { return nil, nil, err @@ -1181,7 +536,7 @@ func (sc *storageContext) writeCaBundle(caBundle *certutil.CertBundle, issuerNam // We may have existing mounts that only contained a key with no certificate yet as a signed CSR // was never setup within the mount. if caBundle.Certificate == "" { - return &issuerEntry{}, myKey, nil + return &issuing.IssuerEntry{}, myKey, nil } myIssuer, _, err := sc.importIssuer(caBundle.Certificate, issuerName) @@ -1198,16 +553,16 @@ func (sc *storageContext) writeCaBundle(caBundle *certutil.CertBundle, issuerNam return myIssuer, myKey, nil } -func genIssuerId() issuerID { - return issuerID(genUuid()) +func genIssuerId() issuing.IssuerID { + return issuing.IssuerID(genUuid()) } -func genKeyId() keyID { - return keyID(genUuid()) +func genKeyId() issuing.KeyID { + return issuing.KeyID(genUuid()) } -func genCRLId() crlID { - return crlID(genUuid()) +func genCRLId() issuing.CrlID { + return issuing.CrlID(genUuid()) } func genUuid() string { @@ -1255,7 +610,7 @@ func (sc *storageContext) checkForRolesReferencing(issuerId string) (timeout boo return false, 0, err } if entry != nil { // If nil, someone deleted an entry since we haven't taken a lock here so just continue - var role roleEntry + var role issuing.RoleEntry err = entry.DecodeJSON(&role) if err != nil { return false, inUseBy, err @@ -1276,15 +631,15 @@ func (sc *storageContext) checkForRolesReferencing(issuerId string) (timeout boo return false, inUseBy, nil } -func (sc *storageContext) getRevocationConfig() (*crlConfig, error) { +func (sc *storageContext) getRevocationConfig() (*pki_backend.CrlConfig, error) { entry, err := sc.Storage.Get(sc.Context, "config/crl") if err != nil { return nil, err } - var result crlConfig + var result pki_backend.CrlConfig if entry == nil { - result = defaultCrlConfig + result = pki_backend.DefaultCrlConfig return &result, nil } @@ -1294,15 +649,15 @@ func (sc *storageContext) getRevocationConfig() (*crlConfig, error) { if result.Version == 0 { // Automatically update existing configurations. - result.OcspDisable = defaultCrlConfig.OcspDisable - result.OcspExpiry = defaultCrlConfig.OcspExpiry - result.AutoRebuild = defaultCrlConfig.AutoRebuild - result.AutoRebuildGracePeriod = defaultCrlConfig.AutoRebuildGracePeriod + result.OcspDisable = pki_backend.DefaultCrlConfig.OcspDisable + result.OcspExpiry = pki_backend.DefaultCrlConfig.OcspExpiry + result.AutoRebuild = pki_backend.DefaultCrlConfig.AutoRebuild + result.AutoRebuildGracePeriod = pki_backend.DefaultCrlConfig.AutoRebuildGracePeriod result.Version = 1 } if result.Version == 1 { if result.DeltaRebuildInterval == "" { - result.DeltaRebuildInterval = defaultCrlConfig.DeltaRebuildInterval + result.DeltaRebuildInterval = pki_backend.DefaultCrlConfig.DeltaRebuildInterval } result.Version = 2 } @@ -1310,12 +665,13 @@ func (sc *storageContext) getRevocationConfig() (*crlConfig, error) { // Depending on client version, it's possible that the expiry is unset. // This sets the default value to prevent issues in downstream code. if result.Expiry == "" { - result.Expiry = defaultCrlConfig.Expiry + result.Expiry = pki_backend.DefaultCrlConfig.Expiry } - if !constants.IsEnterprise && (result.UnifiedCRLOnExistingPaths || result.UnifiedCRL || result.UseGlobalQueue) { + isLocalMount := sc.System().LocalMount() + if (!constants.IsEnterprise || isLocalMount) && (result.UnifiedCRLOnExistingPaths || result.UnifiedCRL || result.UseGlobalQueue) { // An end user must have had Enterprise, enabled the unified config args and then downgraded to OSS. - sc.Backend.Logger().Warn("Not running Vault Enterprise, " + + sc.Logger().Warn("Not running Vault Enterprise or using a local mount, " + "disabling unified_crl, unified_crl_on_existing_paths and cross_cluster_revocation config flags.") result.UnifiedCRLOnExistingPaths = false result.UnifiedCRL = false @@ -1325,6 +681,20 @@ func (sc *storageContext) getRevocationConfig() (*crlConfig, error) { return &result, nil } +func (sc *storageContext) setRevocationConfig(config *pki_backend.CrlConfig) error { + entry, err := logical.StorageEntryJSON("config/crl", config) + if err != nil { + return fmt.Errorf("failed building storage entry JSON: %w", err) + } + + err = sc.Storage.Put(sc.Context, entry) + if err != nil { + return fmt.Errorf("failed writing storage entry: %w", err) + } + + return nil +} + func (sc *storageContext) getAutoTidyConfig() (*tidyConfig, error) { entry, err := sc.Storage.Get(sc.Context, autoTidyConfigPath) if err != nil { @@ -1345,6 +715,14 @@ func (sc *storageContext) getAutoTidyConfig() (*tidyConfig, error) { result.IssuerSafetyBuffer = defaultTidyConfig.IssuerSafetyBuffer } + if result.MinStartupBackoff == 0 { + result.MinStartupBackoff = defaultTidyConfig.MinStartupBackoff + } + + if result.MaxStartupBackoff == 0 { + result.MaxStartupBackoff = defaultTidyConfig.MaxStartupBackoff + } + return &result, nil } @@ -1359,26 +737,8 @@ func (sc *storageContext) writeAutoTidyConfig(config *tidyConfig) error { return err } - sc.Backend.publishCertCountMetrics.Store(config.PublishMetrics) - - // To Potentially Disable Certificate Counting - if config.MaintainCount == false { - certCountWasEnabled := sc.Backend.certCountEnabled.Swap(config.MaintainCount) - if certCountWasEnabled { - sc.Backend.certsCounted.Store(true) - sc.Backend.certCountError = "Cert Count is Disabled: enable via Tidy Config maintain_stored_certificate_counts" - sc.Backend.possibleDoubleCountedSerials = nil // This won't stop a list operation, but will stop an expensive clean-up during initialize - sc.Backend.possibleDoubleCountedRevokedSerials = nil // This won't stop a list operation, but will stop an expensive clean-up during initialize - sc.Backend.certCount.Store(0) - sc.Backend.revokedCertCount.Store(0) - } - } else { // To Potentially Enable Certificate Counting - if sc.Backend.certCountEnabled.Load() == false { - // We haven't written "re-enable certificate counts" outside the initialize function - // Any call derived call to do so is likely to time out on ~2 million certs - sc.Backend.certCountError = "Certificate Counting Has Not Been Initialized, re-initialize this mount" - } - } + certCounter := sc.Backend.GetCertificateCounter() + certCounter.ReconfigureWithTidyConfig(config) return nil } @@ -1392,13 +752,13 @@ func (sc *storageContext) listRevokedCerts() ([]string, error) { return list, err } -func (sc *storageContext) getClusterConfig() (*clusterConfigEntry, error) { +func (sc *storageContext) getClusterConfig() (*issuing.ClusterConfigEntry, error) { entry, err := sc.Storage.Get(sc.Context, clusterConfigPath) if err != nil { return nil, err } - var result clusterConfigEntry + var result issuing.ClusterConfigEntry if entry == nil { return &result, nil } @@ -1410,7 +770,7 @@ func (sc *storageContext) getClusterConfig() (*clusterConfigEntry, error) { return &result, nil } -func (sc *storageContext) writeClusterConfig(config *clusterConfigEntry) error { +func (sc *storageContext) writeClusterConfig(config *issuing.ClusterConfigEntry) error { entry, err := logical.StorageEntryJSON(clusterConfigPath, config) if err != nil { return err @@ -1419,9 +779,44 @@ func (sc *storageContext) writeClusterConfig(config *clusterConfigEntry) error { return sc.Storage.Put(sc.Context, entry) } -func (sc *storageContext) fetchRevocationInfo(serial string) (*revocationInfo, error) { - var revInfo *revocationInfo - revEntry, err := fetchCertBySerial(sc, revokedPath, serial) +// tidyLastRun Track the various pieces of information around tidy on a specific cluster +type tidyLastRun struct { + LastRunTime time.Time +} + +func (sc *storageContext) getAutoTidyLastRun() (time.Time, error) { + entry, err := sc.Storage.Get(sc.Context, autoTidyLastRunPath) + if err != nil { + return time.Time{}, fmt.Errorf("failed getting auto tidy last run: %w", err) + } + if entry == nil { + return time.Time{}, nil + } + + var result tidyLastRun + if err = entry.DecodeJSON(&result); err != nil { + return time.Time{}, fmt.Errorf("failed parsing auto tidy last run: %w", err) + } + return result.LastRunTime, nil +} + +func (sc *storageContext) writeAutoTidyLastRun(lastRunTime time.Time) error { + lastRun := tidyLastRun{LastRunTime: lastRunTime} + entry, err := logical.StorageEntryJSON(autoTidyLastRunPath, lastRun) + if err != nil { + return fmt.Errorf("failed generating json for auto tidy last run: %w", err) + } + + if err := sc.Storage.Put(sc.Context, entry); err != nil { + return fmt.Errorf("failed writing auto tidy last run: %w", err) + } + + return nil +} + +func fetchRevocationInfo(sc pki_backend.StorageContext, serial string) (*revocation.RevocationInfo, error) { + var revInfo *revocation.RevocationInfo + revEntry, err := fetchCertBySerial(sc, revocation.RevokedPath, serial) if err != nil { return nil, err } diff --git a/builtin/logical/pki/storage_migrations.go b/builtin/logical/pki/storage_migrations.go index f4b9237266b7..de9b61a91587 100644 --- a/builtin/logical/pki/storage_migrations.go +++ b/builtin/logical/pki/storage_migrations.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -10,6 +10,7 @@ import ( "fmt" "time" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -19,16 +20,16 @@ import ( // and we need to perform it again... const ( latestMigrationVersion = 2 - legacyBundleShimID = issuerID("legacy-entry-shim-id") - legacyBundleShimKeyID = keyID("legacy-entry-shim-key-id") + legacyBundleShimID = issuing.LegacyBundleShimID + legacyBundleShimKeyID = issuing.LegacyBundleShimKeyID ) type legacyBundleMigrationLog struct { - Hash string `json:"hash"` - Created time.Time `json:"created"` - CreatedIssuer issuerID `json:"issuer_id"` - CreatedKey keyID `json:"key_id"` - MigrationVersion int `json:"migrationVersion"` + Hash string `json:"hash"` + Created time.Time `json:"created"` + CreatedIssuer issuing.IssuerID `json:"issuer_id"` + CreatedKey issuing.KeyID `json:"key_id"` + MigrationVersion int `json:"migrationVersion"` } type migrationInfo struct { @@ -84,27 +85,44 @@ func migrateStorage(ctx context.Context, b *backend, s logical.Storage) error { return nil } - var issuerIdentifier issuerID - var keyIdentifier keyID + var issuerIdentifier issuing.IssuerID + var keyIdentifier issuing.KeyID sc := b.makeStorageContext(ctx, s) if migrationInfo.legacyBundle != nil { - // Generate a unique name for the migrated items in case things were to be re-migrated again - // for some weird reason in the future... - migrationName := fmt.Sprintf("current-%d", time.Now().Unix()) - - b.Logger().Info("performing PKI migration to new keys/issuers layout") - anIssuer, aKey, err := sc.writeCaBundle(migrationInfo.legacyBundle, migrationName, migrationName) - if err != nil { - return err + // When the legacy bundle still exists, there's three scenarios we + // need to worry about: + // + // 1. When we have no migration log, we definitely want to migrate. + haveNoLog := migrationInfo.migrationLog == nil + // 2. When we have an (empty) log and the version is zero, we want to + // migrate. + haveOldVersion := !haveNoLog && migrationInfo.migrationLog.MigrationVersion == 0 + // 3. When we have a log and the version is at least 1 (where this + // migration was introduced), we want to run the migration again + // only if the legacy bundle hash has changed. + isCurrentOrBetterVersion := !haveNoLog && migrationInfo.migrationLog.MigrationVersion >= 1 + haveChange := !haveNoLog && migrationInfo.migrationLog.Hash != migrationInfo.legacyBundleHash + haveVersionWithChange := isCurrentOrBetterVersion && haveChange + + if haveNoLog || haveOldVersion || haveVersionWithChange { + // Generate a unique name for the migrated items in case things were to be re-migrated again + // for some weird reason in the future... + migrationName := fmt.Sprintf("current-%d", time.Now().Unix()) + + b.Logger().Info("performing PKI migration to new keys/issuers layout") + anIssuer, aKey, err := sc.writeCaBundle(migrationInfo.legacyBundle, migrationName, migrationName) + if err != nil { + return err + } + b.Logger().Info("Migration generated the following ids and set them as defaults", + "issuer id", anIssuer.ID, "key id", aKey.ID) + issuerIdentifier = anIssuer.ID + keyIdentifier = aKey.ID + + // Since we do not have all the mount information available we must schedule + // the CRL to be rebuilt at a later time. + b.CrlBuilder().requestRebuildIfActiveNode(b) } - b.Logger().Info("Migration generated the following ids and set them as defaults", - "issuer id", anIssuer.ID, "key id", aKey.ID) - issuerIdentifier = anIssuer.ID - keyIdentifier = aKey.ID - - // Since we do not have all the mount information available we must schedule - // the CRL to be rebuilt at a later time. - b.crlBuilder.requestRebuildIfActiveNode(b) } if migrationInfo.migrationLog != nil && migrationInfo.migrationLog.MigrationVersion == 1 { @@ -185,33 +203,6 @@ func setLegacyBundleMigrationLog(ctx context.Context, s logical.Storage, lbm *le return s.Put(ctx, json) } -func getLegacyCertBundle(ctx context.Context, s logical.Storage) (*issuerEntry, *certutil.CertBundle, error) { - entry, err := s.Get(ctx, legacyCertBundlePath) - if err != nil { - return nil, nil, err - } - - if entry == nil { - return nil, nil, nil - } - - cb := &certutil.CertBundle{} - err = entry.DecodeJSON(cb) - if err != nil { - return nil, nil, err - } - - // Fake a storage entry with backwards compatibility in mind. - issuer := &issuerEntry{ - ID: legacyBundleShimID, - KeyID: legacyBundleShimKeyID, - Name: "legacy-entry-shim", - Certificate: cb.Certificate, - CAChain: cb.CAChain, - SerialNumber: cb.SerialNumber, - LeafNotAfterBehavior: certutil.ErrNotAfterBehavior, - } - issuer.Usage.ToggleUsage(AllIssuerUsages) - - return issuer, cb, nil +func getLegacyCertBundle(ctx context.Context, s logical.Storage) (*issuing.IssuerEntry, *certutil.CertBundle, error) { + return issuing.GetLegacyCertBundle(ctx, s) } diff --git a/builtin/logical/pki/storage_migrations_test.go b/builtin/logical/pki/storage_migrations_test.go index 754f3993d14b..d5f297874f80 100644 --- a/builtin/logical/pki/storage_migrations_test.go +++ b/builtin/logical/pki/storage_migrations_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" "github.com/stretchr/testify/require" @@ -23,7 +24,7 @@ func Test_migrateStorageEmptyStorage(t *testing.T) { // Reset the version the helper above set to 1. b.pkiStorageVersion.Store(0) - require.True(t, b.useLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") + require.True(t, b.UseLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") request := &logical.InitializationRequest{Storage: s} err := b.initialize(ctx, request) @@ -48,7 +49,7 @@ func Test_migrateStorageEmptyStorage(t *testing.T) { require.Empty(t, logEntry.CreatedIssuer) require.Empty(t, logEntry.CreatedKey) - require.False(t, b.useLegacyBundleCaStorage(), "post migration we are still told to use legacy storage") + require.False(t, b.UseLegacyBundleCaStorage(), "post migration we are still told to use legacy storage") // Make sure we can re-run the migration without issues request = &logical.InitializationRequest{Storage: s} @@ -72,7 +73,7 @@ func Test_migrateStorageOnlyKey(t *testing.T) { // Reset the version the helper above set to 1. b.pkiStorageVersion.Store(0) - require.True(t, b.useLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") + require.True(t, b.UseLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") bundle := genCertBundle(t, b, s) // Clear everything except for the key @@ -106,7 +107,7 @@ func Test_migrateStorageOnlyKey(t *testing.T) { "Hash value (%s) should not have been empty", logEntry.Hash) require.True(t, startTime.Before(logEntry.Created), "created log entry time (%v) was before our start time(%v)?", logEntry.Created, startTime) - require.Equal(t, logEntry.CreatedIssuer, issuerID("")) + require.Equal(t, logEntry.CreatedIssuer, issuing.IssuerID("")) require.Equal(t, logEntry.CreatedKey, keyIds[0]) keyId := keyIds[0] @@ -126,11 +127,11 @@ func Test_migrateStorageOnlyKey(t *testing.T) { // Make sure we setup the default values keysConfig, err := sc.getKeysConfig() require.NoError(t, err) - require.Equal(t, &keyConfigEntry{DefaultKeyId: keyId}, keysConfig) + require.Equal(t, &issuing.KeyConfigEntry{DefaultKeyId: keyId}, keysConfig) issuersConfig, err := sc.getIssuersConfig() require.NoError(t, err) - require.Equal(t, issuerID(""), issuersConfig.DefaultIssuerId) + require.Equal(t, issuing.IssuerID(""), issuersConfig.DefaultIssuerId) // Make sure if we attempt to re-run the migration nothing happens... err = migrateStorage(ctx, b, s) @@ -142,7 +143,7 @@ func Test_migrateStorageOnlyKey(t *testing.T) { require.Equal(t, logEntry.Created, logEntry2.Created) require.Equal(t, logEntry.Hash, logEntry2.Hash) - require.False(t, b.useLegacyBundleCaStorage(), "post migration we are still told to use legacy storage") + require.False(t, b.UseLegacyBundleCaStorage(), "post migration we are still told to use legacy storage") } func Test_migrateStorageSimpleBundle(t *testing.T) { @@ -154,7 +155,7 @@ func Test_migrateStorageSimpleBundle(t *testing.T) { // Reset the version the helper above set to 1. b.pkiStorageVersion.Store(0) - require.True(t, b.useLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") + require.True(t, b.UseLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") bundle := genCertBundle(t, b, s) json, err := logical.StorageEntryJSON(legacyCertBundlePath, bundle) @@ -204,7 +205,7 @@ func Test_migrateStorageSimpleBundle(t *testing.T) { require.Equal(t, keyId, issuer.KeyID) require.Empty(t, issuer.ManualChain) require.Equal(t, []string{bundle.Certificate + "\n"}, issuer.CAChain) - require.Equal(t, AllIssuerUsages, issuer.Usage) + require.Equal(t, issuing.AllIssuerUsages, issuer.Usage) require.Equal(t, certutil.ErrNotAfterBehavior, issuer.LeafNotAfterBehavior) require.Equal(t, keyId, key.ID) @@ -219,7 +220,7 @@ func Test_migrateStorageSimpleBundle(t *testing.T) { // Make sure we setup the default values keysConfig, err := sc.getKeysConfig() require.NoError(t, err) - require.Equal(t, &keyConfigEntry{DefaultKeyId: keyId}, keysConfig) + require.Equal(t, &issuing.KeyConfigEntry{DefaultKeyId: keyId}, keysConfig) issuersConfig, err := sc.getIssuersConfig() require.NoError(t, err) @@ -235,7 +236,7 @@ func Test_migrateStorageSimpleBundle(t *testing.T) { require.Equal(t, logEntry.Created, logEntry2.Created) require.Equal(t, logEntry.Hash, logEntry2.Hash) - require.False(t, b.useLegacyBundleCaStorage(), "post migration we are still told to use legacy storage") + require.False(t, b.UseLegacyBundleCaStorage(), "post migration we are still told to use legacy storage") // Make sure we can re-process a migration from scratch for whatever reason err = s.Delete(ctx, legacyMigrationBundleLogKey) @@ -296,8 +297,8 @@ func TestMigration_OnceChainRebuild(t *testing.T) { // // Afterwards, we mutate these issuers to only point at themselves and // write back out. - var rootIssuerId issuerID - var intIssuerId issuerID + var rootIssuerId issuing.IssuerID + var intIssuerId issuing.IssuerID for _, issuerId := range issuerIds { issuer, err := sc.fetchIssuerById(issuerId) require.NoError(t, err) @@ -368,7 +369,7 @@ func TestExpectedOpsWork_PreMigration(t *testing.T) { b, s := CreateBackendWithStorage(t) // Reset the version the helper above set to 1. b.pkiStorageVersion.Store(0) - require.True(t, b.useLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") + require.True(t, b.UseLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") bundle := genCertBundle(t, b, s) json, err := logical.StorageEntryJSON(legacyCertBundlePath, bundle) @@ -601,7 +602,7 @@ func TestBackupBundle(t *testing.T) { // Reset the version the helper above set to 1. b.pkiStorageVersion.Store(0) - require.True(t, b.useLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") + require.True(t, b.UseLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") // Create an empty request and tidy configuration for us. req := &logical.Request{ @@ -777,6 +778,98 @@ func TestBackupBundle(t *testing.T) { require.NotEmpty(t, keyIds) } +func TestDeletedIssuersPostMigration(t *testing.T) { + // We want to simulate the following scenario: + // + // 1.10.x: -> Create a CA. + // 1.11.0: -> Migrate to new issuer layout but version 1. + // -> Delete existing issuers, create new ones. + // (now): -> Migrate to version 2 layout, make sure we don't see + // re-migration. + + t.Parallel() + ctx := context.Background() + b, s := CreateBackendWithStorage(t) + sc := b.makeStorageContext(ctx, s) + + // Reset the version the helper above set to 1. + b.pkiStorageVersion.Store(0) + require.True(t, b.UseLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") + + // Create a legacy CA bundle and write it out. + bundle := genCertBundle(t, b, s) + json, err := logical.StorageEntryJSON(legacyCertBundlePath, bundle) + require.NoError(t, err) + err = s.Put(ctx, json) + require.NoError(t, err) + legacyContents := requireFileExists(t, sc, legacyCertBundlePath, nil) + + // Do a migration; this should provision an issuer and key. + initReq := &logical.InitializationRequest{Storage: s} + err = b.initialize(ctx, initReq) + require.NoError(t, err) + requireFileExists(t, sc, legacyCertBundlePath, legacyContents) + issuerIds, err := sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, issuerIds) + keyIds, err := sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, keyIds) + + // Hack: reset the version to 1, to simulate a pre-version-2 migration + // log. + info, err := getMigrationInfo(sc.Context, sc.Storage) + require.NoError(t, err, "failed to read migration info") + info.migrationLog.MigrationVersion = 1 + err = setLegacyBundleMigrationLog(sc.Context, sc.Storage, info.migrationLog) + require.NoError(t, err, "failed to write migration info") + + // Now delete all issuers and keys and create some new ones. + for _, issuerId := range issuerIds { + deleted, err := sc.deleteIssuer(issuerId) + require.True(t, deleted, "expected it to be deleted") + require.NoError(t, err, "error removing issuer") + } + for _, keyId := range keyIds { + deleted, err := sc.deleteKey(keyId) + require.True(t, deleted, "expected it to be deleted") + require.NoError(t, err, "error removing key") + } + emptyIssuers, err := sc.listIssuers() + require.NoError(t, err) + require.Empty(t, emptyIssuers) + emptyKeys, err := sc.listKeys() + require.NoError(t, err) + require.Empty(t, emptyKeys) + + // Create a new issuer + key. + bundle = genCertBundle(t, b, s) + _, _, err = sc.writeCaBundle(bundle, "", "") + require.NoError(t, err) + + // List which issuers + keys we currently have. + postDeletionIssuers, err := sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, postDeletionIssuers) + postDeletionKeys, err := sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, postDeletionKeys) + + // Now do another migration from 1->2. This should retain the newly + // created issuers+keys, but not revive any deleted ones. + err = b.initialize(ctx, initReq) + require.NoError(t, err) + requireFileExists(t, sc, legacyCertBundlePath, legacyContents) + postMigrationIssuers, err := sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, postMigrationIssuers) + require.Equal(t, postMigrationIssuers, postDeletionIssuers, "regression failed: expected second migration from v1->v2 to not introduce new issuers") + postMigrationKeys, err := sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, postMigrationKeys) + require.Equal(t, postMigrationKeys, postDeletionKeys, "regression failed: expected second migration from v1->v2 to not introduce new keys") +} + // requireFailInMigration validate that we fail the operation with the appropriate error message to the end-user func requireFailInMigration(t *testing.T, b *backend, s logical.Storage, operation logical.Operation, path string) { resp, err := b.HandleRequest(context.Background(), &logical.Request{ diff --git a/builtin/logical/pki/storage_test.go b/builtin/logical/pki/storage_test.go index 625c046d00c8..f51ed6b496d3 100644 --- a/builtin/logical/pki/storage_test.go +++ b/builtin/logical/pki/storage_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -8,6 +8,7 @@ import ( "strings" "testing" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" @@ -22,27 +23,27 @@ func Test_ConfigsRoundTrip(t *testing.T) { sc := b.makeStorageContext(ctx, s) // Create an empty key, issuer for testing. - key := keyEntry{ID: genKeyId()} + key := issuing.KeyEntry{ID: genKeyId()} err := sc.writeKey(key) require.NoError(t, err) - issuer := &issuerEntry{ID: genIssuerId()} + issuer := &issuing.IssuerEntry{ID: genIssuerId()} err = sc.writeIssuer(issuer) require.NoError(t, err) // Verify we handle nothing stored properly keyConfigEmpty, err := sc.getKeysConfig() require.NoError(t, err) - require.Equal(t, &keyConfigEntry{}, keyConfigEmpty) + require.Equal(t, &issuing.KeyConfigEntry{}, keyConfigEmpty) issuerConfigEmpty, err := sc.getIssuersConfig() require.NoError(t, err) - require.Equal(t, &issuerConfigEntry{}, issuerConfigEmpty) + require.Equal(t, &issuing.IssuerConfigEntry{}, issuerConfigEmpty) // Now attempt to store and reload properly - origKeyConfig := &keyConfigEntry{ + origKeyConfig := &issuing.KeyConfigEntry{ DefaultKeyId: key.ID, } - origIssuerConfig := &issuerConfigEntry{ + origIssuerConfig := &issuing.IssuerConfigEntry{ DefaultIssuerId: issuer.ID, } @@ -98,12 +99,12 @@ func Test_IssuerRoundTrip(t *testing.T) { keys, err := sc.listKeys() require.NoError(t, err) - require.ElementsMatch(t, []keyID{key1.ID, key2.ID}, keys) + require.ElementsMatch(t, []issuing.KeyID{key1.ID, key2.ID}, keys) issuers, err := sc.listIssuers() require.NoError(t, err) - require.ElementsMatch(t, []issuerID{issuer1.ID, issuer2.ID}, issuers) + require.ElementsMatch(t, []issuing.IssuerID{issuer1.ID, issuer2.ID}, issuers) } func Test_KeysIssuerImport(t *testing.T) { @@ -183,7 +184,7 @@ func Test_IssuerUpgrade(t *testing.T) { // Make sure that we add OCSP signing to v0 issuers if CRLSigning is enabled issuer, _ := genIssuerAndKey(t, b, s) issuer.Version = 0 - issuer.Usage.ToggleUsage(OCSPSigningUsage) + issuer.Usage.ToggleUsage(issuing.OCSPSigningUsage) err := sc.writeIssuer(&issuer) require.NoError(t, err, "failed writing out issuer") @@ -192,13 +193,13 @@ func Test_IssuerUpgrade(t *testing.T) { require.NoError(t, err, "failed fetching issuer") require.Equal(t, uint(1), newIssuer.Version) - require.True(t, newIssuer.Usage.HasUsage(OCSPSigningUsage)) + require.True(t, newIssuer.Usage.HasUsage(issuing.OCSPSigningUsage)) // If CRLSigning is not present on a v0, we should not have OCSP signing after upgrade. issuer, _ = genIssuerAndKey(t, b, s) issuer.Version = 0 - issuer.Usage.ToggleUsage(OCSPSigningUsage) - issuer.Usage.ToggleUsage(CRLSigningUsage) + issuer.Usage.ToggleUsage(issuing.OCSPSigningUsage) + issuer.Usage.ToggleUsage(issuing.CRLSigningUsage) err = sc.writeIssuer(&issuer) require.NoError(t, err, "failed writing out issuer") @@ -207,15 +208,15 @@ func Test_IssuerUpgrade(t *testing.T) { require.NoError(t, err, "failed fetching issuer") require.Equal(t, uint(1), newIssuer.Version) - require.False(t, newIssuer.Usage.HasUsage(OCSPSigningUsage)) + require.False(t, newIssuer.Usage.HasUsage(issuing.OCSPSigningUsage)) } -func genIssuerAndKey(t *testing.T, b *backend, s logical.Storage) (issuerEntry, keyEntry) { +func genIssuerAndKey(t *testing.T, b *backend, s logical.Storage) (issuing.IssuerEntry, issuing.KeyEntry) { certBundle := genCertBundle(t, b, s) keyId := genKeyId() - pkiKey := keyEntry{ + pkiKey := issuing.KeyEntry{ ID: keyId, PrivateKeyType: certBundle.PrivateKeyType, PrivateKey: strings.TrimSpace(certBundle.PrivateKey) + "\n", @@ -223,14 +224,14 @@ func genIssuerAndKey(t *testing.T, b *backend, s logical.Storage) (issuerEntry, issuerId := genIssuerId() - pkiIssuer := issuerEntry{ + pkiIssuer := issuing.IssuerEntry{ ID: issuerId, KeyID: keyId, Certificate: strings.TrimSpace(certBundle.Certificate) + "\n", CAChain: certBundle.CAChain, SerialNumber: certBundle.SerialNumber, - Usage: AllIssuerUsages, - Version: latestIssuerVersion, + Usage: issuing.AllIssuerUsages, + Version: issuing.LatestIssuerVersion, } return pkiIssuer, pkiKey @@ -241,6 +242,7 @@ func genCertBundle(t *testing.T, b *backend, s logical.Storage) *certutil.CertBu fields := addCACommonFields(map[string]*framework.FieldSchema{}) fields = addCAKeyGenerationFields(fields) fields = addCAIssueFields(fields) + fields = addCACertKeyUsage(fields) apiData := &framework.FieldData{ Schema: fields, Raw: map[string]interface{}{ diff --git a/builtin/logical/pki/storage_unified.go b/builtin/logical/pki/storage_unified.go index 28c656bb8bb6..63f39f1c596a 100644 --- a/builtin/logical/pki/storage_unified.go +++ b/builtin/logical/pki/storage_unified.go @@ -1,29 +1,20 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki import ( "fmt" "strings" - "time" - "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/builtin/logical/pki/revocation" ) const ( - unifiedRevocationReadPathPrefix = "unified-revocation/" - unifiedRevocationWritePathPrefix = unifiedRevocationReadPathPrefix + "{{clusterId}}/" + unifiedRevocationReadPathPrefix = revocation.UnifiedRevocationReadPathPrefix ) -type unifiedRevocationEntry struct { - SerialNumber string `json:"-"` - CertExpiration time.Time `json:"certificate_expiration_utc"` - RevocationTimeUTC time.Time `json:"revocation_time_utc"` - CertificateIssuer issuerID `json:"issuer_id"` -} - -func getUnifiedRevocationBySerial(sc *storageContext, serial string) (*unifiedRevocationEntry, error) { +func getUnifiedRevocationBySerial(sc *storageContext, serial string) (*revocation.UnifiedRevocationEntry, error) { clusterPaths, err := lookupUnifiedClusterPaths(sc) if err != nil { return nil, err @@ -37,7 +28,7 @@ func getUnifiedRevocationBySerial(sc *storageContext, serial string) (*unifiedRe } if entryRaw != nil { - var revEntry unifiedRevocationEntry + var revEntry revocation.UnifiedRevocationEntry if err := entryRaw.DecodeJSON(&revEntry); err != nil { return nil, fmt.Errorf("failed json decoding of unified entry at path %s: %w", serialPath, err) } @@ -49,15 +40,6 @@ func getUnifiedRevocationBySerial(sc *storageContext, serial string) (*unifiedRe return nil, nil } -func writeUnifiedRevocationEntry(sc *storageContext, ure *unifiedRevocationEntry) error { - json, err := logical.StorageEntryJSON(unifiedRevocationWritePathPrefix+normalizeSerial(ure.SerialNumber), ure) - if err != nil { - return err - } - - return sc.Storage.Put(sc.Context, json) -} - // listClusterSpecificUnifiedRevokedCerts returns a list of revoked certificates from a given cluster func listClusterSpecificUnifiedRevokedCerts(sc *storageContext, clusterId string) ([]string, error) { path := unifiedRevocationReadPathPrefix + clusterId + "/" diff --git a/builtin/logical/pki/test_helpers.go b/builtin/logical/pki/test_helpers.go index ef9b46834874..2806a5dcafd2 100644 --- a/builtin/logical/pki/test_helpers.go +++ b/builtin/logical/pki/test_helpers.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki @@ -16,6 +16,7 @@ import ( "io" "math" "math/big" + http2 "net/http" "strings" "testing" "time" @@ -24,6 +25,7 @@ import ( "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" "github.com/stretchr/testify/require" + "golang.org/x/crypto/ocsp" ) // Setup helpers @@ -57,6 +59,19 @@ func mountPKIEndpoint(t testing.TB, client *api.Client, path string) { require.NoError(t, err, "failed mounting pki endpoint") } +func mountCertEndpoint(t testing.TB, client *api.Client, path string) { + t.Helper() + + err := client.Sys().EnableAuthWithOptions(path, &api.MountInput{ + Type: "cert", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "32h", + }, + }) + require.NoError(t, err, "failed mounting cert endpoint") +} + // Signing helpers func requireSignedBy(t *testing.T, cert *x509.Certificate, signingCert *x509.Certificate) { t.Helper() @@ -66,6 +81,21 @@ func requireSignedBy(t *testing.T, cert *x509.Certificate, signingCert *x509.Cer } } +func requireSignedByAtPath(t *testing.T, client *api.Client, leaf *x509.Certificate, path string) { + t.Helper() + + resp, err := client.Logical().Read(path) + require.NoError(t, err, "got unexpected error fetching parent certificate") + require.NotNil(t, resp, "missing response when fetching parent certificate") + require.NotNil(t, resp.Data, "missing data from parent certificate response") + require.NotNil(t, resp.Data["certificate"], "missing certificate field on parent read response") + + parentCert := resp.Data["certificate"].(string) + parent := parseCert(t, parentCert) + + requireSignedBy(t, leaf, parent) +} + // Certificate helper func parseCert(t *testing.T, pemCert string) *x509.Certificate { t.Helper() @@ -211,6 +241,10 @@ func CBReq(b *backend, s logical.Storage, operation logical.Operation, path stri return resp, nil } +func CBHeader(b *backend, s logical.Storage, path string) (*logical.Response, error) { + return CBReq(b, s, logical.HeaderOperation, path, make(map[string]interface{})) +} + func CBRead(b *backend, s logical.Storage, path string) (*logical.Response, error) { return CBReq(b, s, logical.ReadOperation, path, make(map[string]interface{})) } @@ -374,3 +408,68 @@ func summarizeCrl(t *testing.T, crl pkix.TBSCertificateList) string { "Revoked Serial Count: %d\n"+ "Revoked Serials: %v", version, crl.ThisUpdate, crl.NextUpdate, len(serials), serials) } + +// OCSP helpers +func generateRequest(t *testing.T, requestHash crypto.Hash, cert *x509.Certificate, issuer *x509.Certificate) []byte { + t.Helper() + + opts := &ocsp.RequestOptions{Hash: requestHash} + ocspRequestDer, err := ocsp.CreateRequest(cert, issuer, opts) + require.NoError(t, err, "Failed generating OCSP request") + return ocspRequestDer +} + +func requireOcspResponseSignedBy(t *testing.T, ocspResp *ocsp.Response, issuer *x509.Certificate) { + t.Helper() + + err := ocspResp.CheckSignatureFrom(issuer) + require.NoError(t, err, "Failed signature verification of ocsp response: %w", err) +} + +func performOcspPost(t *testing.T, cert *x509.Certificate, issuerCert *x509.Certificate, client *api.Client, ocspPath string) *ocsp.Response { + t.Helper() + + baseClient := client.WithNamespace("") + + ocspReq := generateRequest(t, crypto.SHA256, cert, issuerCert) + ocspPostReq := baseClient.NewRequest(http2.MethodPost, ocspPath) + ocspPostReq.Headers.Set("Content-Type", "application/ocsp-request") + ocspPostReq.BodyBytes = ocspReq + rawResp, err := baseClient.RawRequest(ocspPostReq) + require.NoError(t, err, "failed sending unified-ocsp post request") + + require.Equal(t, 200, rawResp.StatusCode) + require.Equal(t, ocspResponseContentType, rawResp.Header.Get("Content-Type")) + bodyReader := rawResp.Body + respDer, err := io.ReadAll(bodyReader) + bodyReader.Close() + require.NoError(t, err, "failed reading response body") + + ocspResp, err := ocsp.ParseResponse(respDer, issuerCert) + require.NoError(t, err, "parsing ocsp get response") + return ocspResp +} + +func requireCertMissingFromStorage(t *testing.T, client *api.Client, cert *x509.Certificate) { + serial := serialFromCert(cert) + requireSerialMissingFromStorage(t, client, serial) +} + +func requireSerialMissingFromStorage(t *testing.T, client *api.Client, serial string) { + resp, err := client.Logical().ReadWithContext(context.Background(), "pki/cert/"+serial) + require.NoErrorf(t, err, "failed reading certificate with serial %s", serial) + require.Nilf(t, resp, "expected a nil response looking up serial %s got: %v", serial, resp) +} + +func requireCertInStorage(t *testing.T, client *api.Client, cert *x509.Certificate) { + serial := serialFromCert(cert) + requireSerialInStorage(t, client, serial) +} + +func requireSerialInStorage(t *testing.T, client *api.Client, serial string) { + resp, err := client.Logical().ReadWithContext(context.Background(), "pki/cert/"+serial) + require.NoErrorf(t, err, "failed reading certificate with serial %s", serial) + require.NotNilf(t, resp, "reading certificate returned a nil response for serial: %s", serial) + require.NotNilf(t, resp.Data, "reading certificate returned a nil data response for serial: %s", serial) + require.NotEmpty(t, resp.Data["certificate"], "certificate field was empty for serial: %s", serial) +} diff --git a/builtin/logical/pki/tidystatusstate_enumer.go b/builtin/logical/pki/tidystatusstate_enumer.go new file mode 100644 index 000000000000..11db8e64c429 --- /dev/null +++ b/builtin/logical/pki/tidystatusstate_enumer.go @@ -0,0 +1,53 @@ +// Code generated by "enumer -type=tidyStatusState -trimprefix=tidyStatus"; DO NOT EDIT. + +package pki + +import ( + "fmt" +) + +const _tidyStatusStateName = "InactiveStartedFinishedErrorCancellingCancelled" + +var _tidyStatusStateIndex = [...]uint8{0, 8, 15, 23, 28, 38, 47} + +func (i tidyStatusState) String() string { + if i < 0 || i >= tidyStatusState(len(_tidyStatusStateIndex)-1) { + return fmt.Sprintf("tidyStatusState(%d)", i) + } + return _tidyStatusStateName[_tidyStatusStateIndex[i]:_tidyStatusStateIndex[i+1]] +} + +var _tidyStatusStateValues = []tidyStatusState{0, 1, 2, 3, 4, 5} + +var _tidyStatusStateNameToValueMap = map[string]tidyStatusState{ + _tidyStatusStateName[0:8]: 0, + _tidyStatusStateName[8:15]: 1, + _tidyStatusStateName[15:23]: 2, + _tidyStatusStateName[23:28]: 3, + _tidyStatusStateName[28:38]: 4, + _tidyStatusStateName[38:47]: 5, +} + +// tidyStatusStateString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func tidyStatusStateString(s string) (tidyStatusState, error) { + if val, ok := _tidyStatusStateNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to tidyStatusState values", s) +} + +// tidyStatusStateValues returns all values of the enum +func tidyStatusStateValues() []tidyStatusState { + return _tidyStatusStateValues +} + +// IsAtidyStatusState returns "true" if the value is listed in the enum definition. "false" otherwise +func (i tidyStatusState) IsAtidyStatusState() bool { + for _, v := range _tidyStatusStateValues { + if i == v { + return true + } + } + return false +} diff --git a/builtin/logical/pki/util.go b/builtin/logical/pki/util.go index d90e055e6cbc..76f7bfcefcd3 100644 --- a/builtin/logical/pki/util.go +++ b/builtin/logical/pki/util.go @@ -1,10 +1,9 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pki import ( - "crypto" "crypto/x509" "fmt" "math/big" @@ -14,9 +13,10 @@ import ( "sync" "time" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/managed_key" + "github.com/hashicorp/vault/builtin/logical/pki/parsing" "github.com/hashicorp/vault/sdk/framework" - - "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -24,7 +24,7 @@ import ( const ( managedKeyNameArg = "managed_key_name" managedKeyIdArg = "managed_key_id" - defaultRef = "default" + defaultRef = issuing.DefaultRef // Constants for If-Modified-Since operation headerIfModifiedSince = "If-Modified-Since" @@ -39,19 +39,19 @@ var ( ) func serialFromCert(cert *x509.Certificate) string { - return serialFromBigInt(cert.SerialNumber) + return parsing.SerialFromCert(cert) } func serialFromBigInt(serial *big.Int) string { - return strings.TrimSpace(certutil.GetHexFormatted(serial.Bytes(), ":")) + return parsing.SerialFromBigInt(serial) } func normalizeSerialFromBigInt(serial *big.Int) string { - return strings.TrimSpace(certutil.GetHexFormatted(serial.Bytes(), "-")) + return parsing.NormalizeSerialForStorageFromBigInt(serial) } func normalizeSerial(serial string) string { - return strings.ReplaceAll(strings.ToLower(serial), ":", "-") + return parsing.NormalizeSerialForStorage(serial) } func denormalizeSerial(serial string) string { @@ -92,26 +92,6 @@ type managedKeyId interface { String() string } -type ( - UUIDKey string - NameKey string -) - -func (u UUIDKey) String() string { - return string(u) -} - -func (n NameKey) String() string { - return string(n) -} - -type managedKeyInfo struct { - publicKey crypto.PublicKey - keyType certutil.PrivateKeyType - name NameKey - uuid UUIDKey -} - // getManagedKeyId returns a NameKey or a UUIDKey, whichever was specified in the // request API data. func getManagedKeyId(data *framework.FieldData) (managedKeyId, error) { @@ -120,9 +100,9 @@ func getManagedKeyId(data *framework.FieldData) (managedKeyId, error) { return nil, err } - var keyId managedKeyId = NameKey(name) + var keyId managedKeyId = managed_key.NameKey(name) if len(UUID) > 0 { - keyId = UUIDKey(UUID) + keyId = managed_key.UUIDKey(UUID) } return keyId, nil @@ -188,7 +168,7 @@ func getIssuerName(sc *storageContext, data *framework.FieldData) (string, error return issuerName, errIssuerNameInUse } - if err != nil && issuerId != IssuerRefNotFound { + if err != nil && issuerId != issuing.IssuerRefNotFound { return issuerName, errutil.InternalError{Err: err.Error()} } } @@ -213,14 +193,14 @@ func getKeyName(sc *storageContext, data *framework.FieldData) (string, error) { return "", errKeyNameInUse } - if err != nil && keyId != KeyRefNotFound { + if err != nil && keyId != issuing.KeyRefNotFound { return "", errutil.InternalError{Err: err.Error()} } } return keyName, nil } -func getIssuerRef(data *framework.FieldData) string { +func GetIssuerRef(data *framework.FieldData) string { return extractRef(data, issuerRefParam) } @@ -272,21 +252,22 @@ func parseIfNotModifiedSince(req *logical.Request) (time.Time, error) { return headerTimeValue, nil } +//go:generate enumer -type=ifModifiedReqType -trimprefix=ifModified type ifModifiedReqType int const ( - ifModifiedUnknown ifModifiedReqType = iota - ifModifiedCA = iota - ifModifiedCRL = iota - ifModifiedDeltaCRL = iota - ifModifiedUnifiedCRL = iota - ifModifiedUnifiedDeltaCRL = iota + ifModifiedUnknown ifModifiedReqType = iota + ifModifiedCA + ifModifiedCRL + ifModifiedDeltaCRL + ifModifiedUnifiedCRL + ifModifiedUnifiedDeltaCRL ) type IfModifiedSinceHelper struct { req *logical.Request reqType ifModifiedReqType - issuerRef issuerID + issuerRef issuing.IssuerID } func sendNotModifiedResponseIfNecessary(helper *IfModifiedSinceHelper, sc *storageContext, resp *logical.Response) (bool, error) { @@ -326,7 +307,7 @@ func (sc *storageContext) isIfModifiedSinceBeforeLastModified(helper *IfModified switch helper.reqType { case ifModifiedCRL, ifModifiedDeltaCRL: - if sc.Backend.crlBuilder.invalidate.Load() { + if sc.CrlBuilder().ShouldInvalidate() { // When we see the CRL is invalidated, respond with false // regardless of what the local CRL state says. We've likely // renamed some issuers or are about to rebuild a new CRL.... @@ -346,7 +327,7 @@ func (sc *storageContext) isIfModifiedSinceBeforeLastModified(helper *IfModified lastModified = crlConfig.DeltaLastModified } case ifModifiedUnifiedCRL, ifModifiedUnifiedDeltaCRL: - if sc.Backend.crlBuilder.invalidate.Load() { + if sc.CrlBuilder().ShouldInvalidate() { // When we see the CRL is invalidated, respond with false // regardless of what the local CRL state says. We've likely // renamed some issuers or are about to rebuild a new CRL.... diff --git a/builtin/logical/pkiext/nginx_test.go b/builtin/logical/pkiext/nginx_test.go index cc2ca5fbf8ef..70defe42e875 100644 --- a/builtin/logical/pkiext/nginx_test.go +++ b/builtin/logical/pkiext/nginx_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pkiext @@ -17,11 +17,9 @@ import ( "testing" "time" - "github.com/hashicorp/vault/builtin/logical/pki" - "github.com/hashicorp/vault/helper/testhelpers/docker" - "github.com/hashicorp/go-uuid" - + "github.com/hashicorp/vault/builtin/logical/pki" + "github.com/hashicorp/vault/sdk/helper/docker" "github.com/stretchr/testify/require" ) @@ -41,7 +39,7 @@ const ( func buildNginxContainer(t *testing.T, root string, crl string, chain string, private string) (func(), string, int, string, string, int) { containerfile := ` -FROM nginx:latest +FROM nginx:1.27.1 RUN mkdir /www /etc/nginx/ssl && rm /etc/nginx/conf.d/*.conf @@ -232,7 +230,7 @@ func CheckWithClients(t *testing.T, network string, address string, url string, // Start our service with a random name to not conflict with other // threads. ctx := context.Background() - ctr, _, _, err := cwRunner.Start(ctx, true, false) + result, err := cwRunner.Start(ctx, true, false) if err != nil { t.Fatalf("Could not start golang container for wget/curl checks: %s", err) } @@ -258,14 +256,14 @@ func CheckWithClients(t *testing.T, network string, address string, url string, wgetCmd = []string{"wget", "--verbose", "--ca-certificate=/root.pem", "--certificate=/client-cert.pem", "--private-key=/client-privkey.pem", url} curlCmd = []string{"curl", "--verbose", "--cacert", "/root.pem", "--cert", "/client-cert.pem", "--key", "/client-privkey.pem", url} } - if err := cwRunner.CopyTo(ctr.ID, "/", certCtx); err != nil { + if err := cwRunner.CopyTo(result.Container.ID, "/", certCtx); err != nil { t.Fatalf("Could not copy certificate and key into container: %v", err) } for _, cmd := range [][]string{hostPrimeCmd, wgetCmd, curlCmd} { t.Logf("Running client connection command: %v", cmd) - stdout, stderr, retcode, err := cwRunner.RunCmdWithOutput(ctx, ctr.ID, cmd) + stdout, stderr, retcode, err := cwRunner.RunCmdWithOutput(ctx, result.Container.ID, cmd) if err != nil { t.Fatalf("Could not run command (%v) in container: %v", cmd, err) } @@ -295,7 +293,7 @@ func CheckDeltaCRL(t *testing.T, network string, address string, url string, roo // Start our service with a random name to not conflict with other // threads. ctx := context.Background() - ctr, _, _, err := cwRunner.Start(ctx, true, false) + result, err := cwRunner.Start(ctx, true, false) if err != nil { t.Fatalf("Could not start golang container for wget2 delta CRL checks: %s", err) } @@ -313,14 +311,14 @@ func CheckDeltaCRL(t *testing.T, network string, address string, url string, roo certCtx := docker.NewBuildContext() certCtx["root.pem"] = docker.PathContentsFromString(rootCert) certCtx["crls.pem"] = docker.PathContentsFromString(crls) - if err := cwRunner.CopyTo(ctr.ID, "/", certCtx); err != nil { + if err := cwRunner.CopyTo(result.Container.ID, "/", certCtx); err != nil { t.Fatalf("Could not copy certificate and key into container: %v", err) } for index, cmd := range [][]string{hostPrimeCmd, wgetCmd} { t.Logf("Running client connection command: %v", cmd) - stdout, stderr, retcode, err := cwRunner.RunCmdWithOutput(ctx, ctr.ID, cmd) + stdout, stderr, retcode, err := cwRunner.RunCmdWithOutput(ctx, result.Container.ID, cmd) if err != nil { t.Fatalf("Could not run command (%v) in container: %v", cmd, err) } diff --git a/builtin/logical/pkiext/pkiext_binary/acme_test.go b/builtin/logical/pkiext/pkiext_binary/acme_test.go new file mode 100644 index 000000000000..f4a7be0c1d83 --- /dev/null +++ b/builtin/logical/pkiext/pkiext_binary/acme_test.go @@ -0,0 +1,1116 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pkiext_binary + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + _ "embed" + "encoding/hex" + "errors" + "fmt" + "html/template" + "net" + "net/http" + "path" + "strings" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/builtin/logical/pkiext" + "github.com/hashicorp/vault/helper/testhelpers" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/sdk/helper/certutil" + hDocker "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/stretchr/testify/require" + "golang.org/x/crypto/acme" +) + +//go:embed testdata/caddy_http.json +var caddyConfigTemplateHTTP string + +//go:embed testdata/caddy_http_eab.json +var caddyConfigTemplateHTTPEAB string + +//go:embed testdata/caddy_tls_alpn.json +var caddyConfigTemplateTLSALPN string + +// Test_ACME will start a Vault cluster using the docker based binary, and execute +// a bunch of sub-tests against that cluster. It is up to each sub-test to run/configure +// a new pki mount within the cluster to not interfere with each other. +func Test_ACME(t *testing.T) { + cluster := NewVaultPkiClusterWithDNS(t) + defer cluster.Cleanup() + + tc := map[string]func(t *testing.T, cluster *VaultPkiCluster){ + "caddy http": SubtestACMECaddy(caddyConfigTemplateHTTP, false), + "caddy http eab": SubtestACMECaddy(caddyConfigTemplateHTTPEAB, true), + "caddy tls-alpn": SubtestACMECaddy(caddyConfigTemplateTLSALPN, false), + "certbot": SubtestACMECertbot, + "certbot eab": SubtestACMECertbotEab, + "acme ip sans": SubtestACMEIPAndDNS, + "acme wildcard": SubtestACMEWildcardDNS, + "acme prevents ica": SubtestACMEPreventsICADNS, + } + + // Wrap the tests within an outer group, so that we run all tests + // in parallel, but still wait for all tests to finish before completing + // and running the cleanup of the Vault cluster. + t.Run("group", func(gt *testing.T) { + for testName := range tc { + // Trap the function to be embedded later in the run so it + // doesn't get clobbered on the next for iteration + testFunc := tc[testName] + + gt.Run(testName, func(st *testing.T) { + st.Parallel() + testFunc(st, cluster) + }) + } + }) + + // Do not run these tests in parallel. + t.Run("step down", func(gt *testing.T) { SubtestACMEStepDownNode(gt, cluster) }) +} + +// caddyConfig contains information used to render a Caddy configuration file from a template. +type caddyConfig struct { + Hostname string + Directory string + CACert string + EABID string + EABKey string +} + +// SubtestACMECaddy returns an ACME test for Caddy using the provided template. +func SubtestACMECaddy(configTemplate string, enableEAB bool) func(*testing.T, *VaultPkiCluster) { + return func(t *testing.T, cluster *VaultPkiCluster) { + ctx := context.Background() + logger := corehelpers.NewTestLogger(t) + + // Roll a random run ID for mount and hostname uniqueness. + runID, err := uuid.GenerateUUID() + require.NoError(t, err, "failed to generate a unique ID for test run") + runID = strings.Split(runID, "-")[0] + + // Create the PKI mount with ACME enabled + pki, err := cluster.CreateAcmeMount(runID) + require.NoError(t, err, "failed to set up ACME mount") + + // Conditionally enable EAB and retrieve the key. + var eabID, eabKey string + if enableEAB { + err = pki.UpdateAcmeConfig(true, map[string]interface{}{ + "eab_policy": "new-account-required", + }) + require.NoError(t, err, "failed to configure EAB policy in PKI mount") + + eabID, eabKey, err = pki.GetEabKey("acme/") + require.NoError(t, err, "failed to retrieve EAB key from PKI mount") + } + + directory := fmt.Sprintf("https://%s:8200/v1/%s/acme/directory", pki.GetActiveContainerIP(), runID) + vaultNetwork := pki.GetContainerNetworkName() + logger.Trace("dir", "dir", directory) + + logConsumer, logStdout, logStderr := getDockerLog(logger) + + sleepTimer := "45" + + // Kick off Caddy container. + logger.Trace("creating on network", "network", vaultNetwork) + caddyRunner, err := hDocker.NewServiceRunner(hDocker.RunOptions{ + ImageRepo: "docker.mirror.hashicorp.services/library/caddy", + ImageTag: "2.6.4", + ContainerName: fmt.Sprintf("caddy_test_%s", runID), + NetworkName: vaultNetwork, + Ports: []string{"80/tcp", "443/tcp", "443/udp"}, + Entrypoint: []string{"sleep", sleepTimer}, + LogConsumer: logConsumer, + LogStdout: logStdout, + LogStderr: logStderr, + }) + require.NoError(t, err, "failed creating caddy service runner") + + caddyResult, err := caddyRunner.Start(ctx, true, false) + require.NoError(t, err, "could not start Caddy container") + require.NotNil(t, caddyResult, "could not start Caddy container") + + defer caddyRunner.Stop(ctx, caddyResult.Container.ID) + + networks, err := caddyRunner.GetNetworkAndAddresses(caddyResult.Container.ID) + require.NoError(t, err, "could not read caddy container's IP address") + require.Contains(t, networks, vaultNetwork, "expected to contain vault network") + + ipAddr := networks[vaultNetwork] + hostname := fmt.Sprintf("%s.dadgarcorp.com", runID) + + err = pki.AddHostname(hostname, ipAddr) + require.NoError(t, err, "failed to update vault host files") + + // Render the Caddy configuration from the specified template. + tmpl, err := template.New("config").Parse(configTemplate) + require.NoError(t, err, "failed to parse Caddy config template") + var b strings.Builder + err = tmpl.Execute( + &b, + caddyConfig{ + Hostname: hostname, + Directory: directory, + CACert: "/tmp/vault_ca_cert.crt", + EABID: eabID, + EABKey: eabKey, + }, + ) + require.NoError(t, err, "failed to render Caddy config template") + + // Push the Caddy config and the cluster listener's CA certificate over to the docker container. + cpCtx := hDocker.NewBuildContext() + cpCtx["caddy_config.json"] = hDocker.PathContentsFromString(b.String()) + cpCtx["vault_ca_cert.crt"] = hDocker.PathContentsFromString(string(cluster.GetListenerCACertPEM())) + err = caddyRunner.CopyTo(caddyResult.Container.ID, "/tmp/", cpCtx) + require.NoError(t, err, "failed to copy Caddy config and Vault listener CA certificate to container") + + // Start the Caddy server. + caddyCmd := []string{ + "caddy", + "start", + "--config", "/tmp/caddy_config.json", + } + stdout, stderr, retcode, err := caddyRunner.RunCmdWithOutput(ctx, caddyResult.Container.ID, caddyCmd) + logger.Trace("Caddy Start Command", "cmd", caddyCmd, "stdout", string(stdout), "stderr", string(stderr)) + require.NoError(t, err, "got error running Caddy start command") + require.Equal(t, 0, retcode, "expected zero retcode Caddy start command result") + + // Start a cURL container. + curlRunner, err := hDocker.NewServiceRunner(hDocker.RunOptions{ + ImageRepo: "docker.mirror.hashicorp.services/curlimages/curl", + ImageTag: "8.4.0", + ContainerName: fmt.Sprintf("curl_test_%s", runID), + NetworkName: vaultNetwork, + Entrypoint: []string{"sleep", sleepTimer}, + LogConsumer: logConsumer, + LogStdout: logStdout, + LogStderr: logStderr, + }) + require.NoError(t, err, "failed creating cURL service runner") + + curlResult, err := curlRunner.Start(ctx, true, false) + require.NoError(t, err, "could not start cURL container") + require.NotNil(t, curlResult, "could not start cURL container") + + // Retrieve the PKI mount CA cert and copy it over to the cURL container. + mountCACert, err := pki.GetCACertPEM() + require.NoError(t, err, "failed to retrieve PKI mount CA certificate") + + mountCACertCtx := hDocker.NewBuildContext() + mountCACertCtx["ca_cert.crt"] = hDocker.PathContentsFromString(mountCACert) + err = curlRunner.CopyTo(curlResult.Container.ID, "/tmp/", mountCACertCtx) + require.NoError(t, err, "failed to copy PKI mount CA certificate to cURL container") + + // Use cURL to hit the Caddy server and validate that a certificate was retrieved successfully. + curlCmd := []string{ + "curl", + "-L", + "--cacert", "/tmp/ca_cert.crt", + "--resolve", hostname + ":443:" + ipAddr, + "https://" + hostname + "/", + } + stdout, stderr, retcode, err = curlRunner.RunCmdWithOutput(ctx, curlResult.Container.ID, curlCmd) + logger.Trace("cURL Command", "cmd", curlCmd, "stdout", string(stdout), "stderr", string(stderr)) + require.NoError(t, err, "got error running cURL command") + require.Equal(t, 0, retcode, "expected zero retcode cURL command result") + } +} + +func SubtestACMECertbot(t *testing.T, cluster *VaultPkiCluster) { + logger := corehelpers.NewTestLogger(t) + + pki, err := cluster.CreateAcmeMount("pki") + require.NoError(t, err, "failed setting up acme mount") + + directory := "https://" + pki.GetActiveContainerIP() + ":8200/v1/pki/acme/directory" + vaultNetwork := pki.GetContainerNetworkName() + + logConsumer, logStdout, logStderr := getDockerLog(logger) + + // Default to 45 second timeout, but bump to 120 when running locally or if nightly regression + // flag is provided. + sleepTimer := "45" + if testhelpers.IsLocalOrRegressionTests() { + sleepTimer = "120" + } + + logger.Trace("creating on network", "network", vaultNetwork) + runner, err := hDocker.NewServiceRunner(hDocker.RunOptions{ + ImageRepo: "docker.mirror.hashicorp.services/certbot/certbot", + ImageTag: "latest", + ContainerName: "vault_pki_certbot_test", + NetworkName: vaultNetwork, + Entrypoint: []string{"sleep", sleepTimer}, + LogConsumer: logConsumer, + LogStdout: logStdout, + LogStderr: logStderr, + }) + require.NoError(t, err, "failed creating service runner") + + ctx := context.Background() + result, err := runner.Start(ctx, true, false) + require.NoError(t, err, "could not start container") + require.NotNil(t, result, "could not start container") + + defer runner.Stop(context.Background(), result.Container.ID) + + networks, err := runner.GetNetworkAndAddresses(result.Container.ID) + require.NoError(t, err, "could not read container's IP address") + require.Contains(t, networks, vaultNetwork, "expected to contain vault network") + + ipAddr := networks[vaultNetwork] + hostname := "certbot-acme-client.dadgarcorp.com" + + err = pki.AddHostname(hostname, ipAddr) + require.NoError(t, err, "failed to update vault host files") + + // Sinkhole a domain that's invalid just in case it's registered in the future. + cluster.Dns.AddDomain("armoncorp.com") + cluster.Dns.AddRecord("armoncorp.com", "A", "127.0.0.1") + + certbotCmd := []string{ + "certbot", + "certonly", + "--no-eff-email", + "--email", "certbot.client@dadgarcorp.com", + "--agree-tos", + "--no-verify-ssl", + "--standalone", + "--non-interactive", + "--server", directory, + "-d", hostname, + } + logCatCmd := []string{"cat", "/var/log/letsencrypt/letsencrypt.log"} + + stdout, stderr, retcode, err := runner.RunCmdWithOutput(ctx, result.Container.ID, certbotCmd) + logger.Trace("Certbot Issue Command", "cmd", certbotCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + require.NoError(t, err, "got error running issue command") + require.Equal(t, 0, retcode, "expected zero retcode issue command result") + + // N.B. We're using the `certonly` subcommand here because it seems as though the `renew` command + // attempts to install the cert for you. This ends up hanging and getting killed by docker, but is + // also not desired behavior. The certbot docs suggest using `certonly` to renew as seen here: + // https://eff-certbot.readthedocs.io/en/stable/using.html#renewing-certificates + certbotRenewCmd := []string{ + "certbot", + "certonly", + "--no-eff-email", + "--email", "certbot.client@dadgarcorp.com", + "--agree-tos", + "--no-verify-ssl", + "--standalone", + "--non-interactive", + "--server", directory, + "-d", hostname, + "--cert-name", hostname, + "--force-renewal", + } + + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRenewCmd) + logger.Trace("Certbot Renew Command", "cmd", certbotRenewCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + require.NoError(t, err, "got error running renew command") + require.Equal(t, 0, retcode, "expected zero retcode renew command result") + + certbotRevokeCmd := []string{ + "certbot", + "revoke", + "--no-eff-email", + "--email", "certbot.client@dadgarcorp.com", + "--agree-tos", + "--no-verify-ssl", + "--non-interactive", + "--no-delete-after-revoke", + "--cert-name", hostname, + } + + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRevokeCmd) + logger.Trace("Certbot Revoke Command", "cmd", certbotRevokeCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + require.NoError(t, err, "got error running revoke command") + require.Equal(t, 0, retcode, "expected zero retcode revoke command result") + + // Revoking twice should fail. + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRevokeCmd) + logger.Trace("Certbot Double Revoke Command", "cmd", certbotRevokeCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode == 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + + require.NoError(t, err, "got error running double revoke command") + require.NotEqual(t, 0, retcode, "expected non-zero retcode double revoke command result") + + // Attempt to issue against a domain that doesn't match the challenge. + // N.B. This test only runs locally or when the nightly regression env var is provided to CI. + if testhelpers.IsLocalOrRegressionTests() { + certbotInvalidIssueCmd := []string{ + "certbot", + "certonly", + "--no-eff-email", + "--email", "certbot.client@dadgarcorp.com", + "--agree-tos", + "--no-verify-ssl", + "--standalone", + "--non-interactive", + "--server", directory, + "-d", "armoncorp.com", + "--issuance-timeout", "10", + } + + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotInvalidIssueCmd) + logger.Trace("Certbot Invalid Issue Command", "cmd", certbotInvalidIssueCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + require.NoError(t, err, "got error running issue command") + require.NotEqual(t, 0, retcode, "expected non-zero retcode issue command result") + } + + // Attempt to close out our ACME account + certbotUnregisterCmd := []string{ + "certbot", + "unregister", + "--no-verify-ssl", + "--non-interactive", + "--server", directory, + } + + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotUnregisterCmd) + logger.Trace("Certbot Unregister Command", "cmd", certbotUnregisterCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + require.NoError(t, err, "got error running unregister command") + require.Equal(t, 0, retcode, "expected zero retcode unregister command result") + + // Attempting to close out our ACME account twice should fail + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotUnregisterCmd) + logger.Trace("Certbot Double Unregister Command", "cmd", certbotUnregisterCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + require.NoError(t, err, "got error running double unregister command") + require.Equal(t, 1, retcode, "expected non-zero retcode double unregister command result") +} + +func SubtestACMECertbotEab(t *testing.T, cluster *VaultPkiCluster) { + logger := corehelpers.NewTestLogger(t) + + mountName := "pki-certbot-eab" + pki, err := cluster.CreateAcmeMount(mountName) + require.NoError(t, err, "failed setting up acme mount") + + err = pki.UpdateAcmeConfig(true, map[string]interface{}{ + "eab_policy": "new-account-required", + }) + require.NoError(t, err) + + eabId, base64EabKey, err := pki.GetEabKey("acme/") + + directory := "https://" + pki.GetActiveContainerIP() + ":8200/v1/" + mountName + "/acme/directory" + vaultNetwork := pki.GetContainerNetworkName() + + logConsumer, logStdout, logStderr := getDockerLog(logger) + + logger.Trace("creating on network", "network", vaultNetwork) + runner, err := hDocker.NewServiceRunner(hDocker.RunOptions{ + ImageRepo: "docker.mirror.hashicorp.services/certbot/certbot", + ImageTag: "latest", + ContainerName: "vault_pki_certbot_eab_test", + NetworkName: vaultNetwork, + Entrypoint: []string{"sleep", "45"}, + LogConsumer: logConsumer, + LogStdout: logStdout, + LogStderr: logStderr, + }) + require.NoError(t, err, "failed creating service runner") + + ctx := context.Background() + result, err := runner.Start(ctx, true, false) + require.NoError(t, err, "could not start container") + require.NotNil(t, result, "could not start container") + + defer runner.Stop(context.Background(), result.Container.ID) + + networks, err := runner.GetNetworkAndAddresses(result.Container.ID) + require.NoError(t, err, "could not read container's IP address") + require.Contains(t, networks, vaultNetwork, "expected to contain vault network") + + ipAddr := networks[vaultNetwork] + hostname := "certbot-eab-acme-client.dadgarcorp.com" + + err = pki.AddHostname(hostname, ipAddr) + require.NoError(t, err, "failed to update vault host files") + + certbotCmd := []string{ + "certbot", + "certonly", + "--no-eff-email", + "--email", "certbot.client@dadgarcorp.com", + "--eab-kid", eabId, + "--eab-hmac-key='" + base64EabKey + "'", + "--agree-tos", + "--no-verify-ssl", + "--standalone", + "--non-interactive", + "--server", directory, + "-d", hostname, + } + logCatCmd := []string{"cat", "/var/log/letsencrypt/letsencrypt.log"} + + stdout, stderr, retcode, err := runner.RunCmdWithOutput(ctx, result.Container.ID, certbotCmd) + logger.Trace("Certbot Issue Command", "cmd", certbotCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + require.NoError(t, err, "got error running issue command") + require.Equal(t, 0, retcode, "expected zero retcode issue command result") + + certbotRenewCmd := []string{ + "certbot", + "certonly", + "--no-eff-email", + "--email", "certbot.client@dadgarcorp.com", + "--agree-tos", + "--no-verify-ssl", + "--standalone", + "--non-interactive", + "--server", directory, + "-d", hostname, + "--cert-name", hostname, + "--force-renewal", + } + + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRenewCmd) + logger.Trace("Certbot Renew Command", "cmd", certbotRenewCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + require.NoError(t, err, "got error running renew command") + require.Equal(t, 0, retcode, "expected zero retcode renew command result") + + certbotRevokeCmd := []string{ + "certbot", + "revoke", + "--no-eff-email", + "--email", "certbot.client@dadgarcorp.com", + "--agree-tos", + "--no-verify-ssl", + "--non-interactive", + "--no-delete-after-revoke", + "--cert-name", hostname, + } + + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRevokeCmd) + logger.Trace("Certbot Revoke Command", "cmd", certbotRevokeCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + require.NoError(t, err, "got error running revoke command") + require.Equal(t, 0, retcode, "expected zero retcode revoke command result") + + // Revoking twice should fail. + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRevokeCmd) + logger.Trace("Certbot Double Revoke Command", "cmd", certbotRevokeCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode == 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + + require.NoError(t, err, "got error running double revoke command") + require.NotEqual(t, 0, retcode, "expected non-zero retcode double revoke command result") +} + +func SubtestACMEIPAndDNS(t *testing.T, cluster *VaultPkiCluster) { + logger := corehelpers.NewTestLogger(t) + + pki, err := cluster.CreateAcmeMount("pki-ip-dns-sans") + require.NoError(t, err, "failed setting up acme mount") + + // Since we interact with ACME from outside the container network the ACME + // configuration needs to be updated to use the host port and not the internal + // docker ip. + basePath, err := pki.UpdateClusterConfigLocalAddr() + require.NoError(t, err, "failed updating cluster config") + + logConsumer, logStdout, logStderr := getDockerLog(logger) + + // Setup an nginx container that we can have respond the queries for ips + runner, err := hDocker.NewServiceRunner(hDocker.RunOptions{ + ImageRepo: "docker.mirror.hashicorp.services/nginx", + ImageTag: "latest", + ContainerName: "vault_pki_ipsans_test", + NetworkName: pki.GetContainerNetworkName(), + LogConsumer: logConsumer, + LogStdout: logStdout, + LogStderr: logStderr, + }) + require.NoError(t, err, "failed creating service runner") + + ctx := context.Background() + result, err := runner.Start(ctx, true, false) + require.NoError(t, err, "could not start container") + require.NotNil(t, result, "could not start container") + + nginxContainerId := result.Container.ID + defer runner.Stop(context.Background(), nginxContainerId) + networks, err := runner.GetNetworkAndAddresses(nginxContainerId) + + challengeFolder := "/usr/share/nginx/html/.well-known/acme-challenge/" + createChallengeFolderCmd := []string{ + "sh", "-c", + "mkdir -p '" + challengeFolder + "'", + } + stdout, stderr, retcode, err := runner.RunCmdWithOutput(ctx, nginxContainerId, createChallengeFolderCmd) + require.NoError(t, err, "failed to create folder in nginx container") + logger.Trace("Update host file command", "cmd", createChallengeFolderCmd, "stdout", string(stdout), "stderr", string(stderr)) + require.Equal(t, 0, retcode, "expected zero retcode from mkdir in nginx container") + + ipAddr := networks[pki.GetContainerNetworkName()] + hostname := "go-lang-acme-client.dadgarcorp.com" + + err = pki.AddHostname(hostname, ipAddr) + require.NoError(t, err, "failed to update vault host files") + + // Perform an ACME lifecycle with an order that contains both an IP and a DNS name identifier + err = pki.UpdateRole("ip-dns-sans", map[string]interface{}{ + "key_type": "any", + "allowed_domains": "dadgarcorp.com", + "allow_subdomains": true, + "allow_wildcard_certificates": false, + }) + require.NoError(t, err, "failed creating role ip-dns-sans") + + directoryUrl := basePath + "/roles/ip-dns-sans/acme/directory" + acmeOrderIdentifiers := []acme.AuthzID{ + {Type: "ip", Value: ipAddr}, + {Type: "dns", Value: hostname}, + } + cr := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: hostname}, + DNSNames: []string{hostname}, + IPAddresses: []net.IP{net.ParseIP(ipAddr)}, + } + + provisioningFunc := func(acmeClient *acme.Client, auths []*acme.Authorization) []*acme.Challenge { + // For each http-01 challenge, generate the file to place underneath the nginx challenge folder + acmeCtx := hDocker.NewBuildContext() + var challengesToAccept []*acme.Challenge + for _, auth := range auths { + for _, challenge := range auth.Challenges { + if challenge.Status != acme.StatusPending { + logger.Trace("ignoring challenge not in status pending", "challenge", challenge) + continue + } + + if challenge.Type == "http-01" { + challengeBody, err := acmeClient.HTTP01ChallengeResponse(challenge.Token) + require.NoError(t, err, "failed generating challenge response") + + challengePath := acmeClient.HTTP01ChallengePath(challenge.Token) + require.NoError(t, err, "failed generating challenge path") + + challengeFile := path.Base(challengePath) + + acmeCtx[challengeFile] = hDocker.PathContentsFromString(challengeBody) + + challengesToAccept = append(challengesToAccept, challenge) + } + } + } + + require.GreaterOrEqual(t, len(challengesToAccept), 1, "Need at least one challenge, got none") + + // Copy all challenges within the nginx container + err = runner.CopyTo(nginxContainerId, challengeFolder, acmeCtx) + require.NoError(t, err, "failed copying challenges to container") + + return challengesToAccept + } + + acmeCert := doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "") + + require.Len(t, acmeCert.IPAddresses, 1, "expected only a single ip address in cert") + require.Equal(t, ipAddr, acmeCert.IPAddresses[0].String()) + require.Equal(t, []string{hostname}, acmeCert.DNSNames) + require.Equal(t, hostname, acmeCert.Subject.CommonName) + + // Perform an ACME lifecycle with an order that contains just an IP identifier + err = pki.UpdateRole("ip-sans", map[string]interface{}{ + "key_type": "any", + "use_csr_common_name": false, + "require_cn": false, + "client_flag": false, + }) + require.NoError(t, err, "failed creating role ip-sans") + + directoryUrl = basePath + "/roles/ip-sans/acme/directory" + acmeOrderIdentifiers = []acme.AuthzID{ + {Type: "ip", Value: ipAddr}, + } + cr = &x509.CertificateRequest{ + IPAddresses: []net.IP{net.ParseIP(ipAddr)}, + } + + acmeCert = doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "") + + require.Len(t, acmeCert.IPAddresses, 1, "expected only a single ip address in cert") + require.Equal(t, ipAddr, acmeCert.IPAddresses[0].String()) + require.Empty(t, acmeCert.DNSNames, "acme cert dns name field should have been empty") + require.Equal(t, "", acmeCert.Subject.CommonName) +} + +type acmeGoValidatorProvisionerFunc func(acmeClient *acme.Client, auths []*acme.Authorization) []*acme.Challenge + +func doAcmeValidationWithGoLibrary(t *testing.T, directoryUrl string, acmeOrderIdentifiers []acme.AuthzID, cr *x509.CertificateRequest, provisioningFunc acmeGoValidatorProvisionerFunc, expectedFailure string) *x509.Certificate { + logger := corehelpers.NewTestLogger(t) + + // Since we are contacting Vault through the host ip/port, the certificate will not validate properly + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + httpClient := &http.Client{Transport: tr} + + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa account key") + logger.Trace("Using the following url for the ACME directory", "url", directoryUrl) + acmeClient := &acme.Client{ + Key: accountKey, + HTTPClient: httpClient, + DirectoryURL: directoryUrl, + } + + testCtx, cancelFunc := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancelFunc() + + // Create new account + _, err = acmeClient.Register(testCtx, &acme.Account{Contact: []string{"mailto:ipsans@dadgarcorp.com"}}, + func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Create an ACME order + order, err := acmeClient.AuthorizeOrder(testCtx, acmeOrderIdentifiers) + require.NoError(t, err, "failed creating ACME order") + + var auths []*acme.Authorization + for _, authUrl := range order.AuthzURLs { + authorization, err := acmeClient.GetAuthorization(testCtx, authUrl) + require.NoError(t, err, "failed to lookup authorization at url: %s", authUrl) + auths = append(auths, authorization) + } + + // Handle the validation using the external validation mechanism. + challengesToAccept := provisioningFunc(acmeClient, auths) + require.NotEmpty(t, challengesToAccept, "provisioning function failed to return any challenges to accept") + + // Tell the ACME server, that they can now validate those challenges. + for _, challenge := range challengesToAccept { + _, err = acmeClient.Accept(testCtx, challenge) + require.NoError(t, err, "failed to accept challenge: %v", challenge) + } + + // Wait for the order/challenges to be validated. + _, err = acmeClient.WaitOrder(testCtx, order.URI) + require.NoError(t, err, "failed waiting for order to be ready") + + // Create/sign the CSR and ask ACME server to sign it returning us the final certificate + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + csr, err := x509.CreateCertificateRequest(rand.Reader, cr, csrKey) + require.NoError(t, err, "failed generating csr") + + logger.Trace("[TEST-LOG] Created CSR", "csr", hex.EncodeToString(csr)) + + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, false) + if err != nil { + if expectedFailure != "" { + require.Contains(t, err.Error(), expectedFailure, "got a unexpected failure not matching expected value") + return nil + } + + require.NoError(t, err, "failed to get a certificate back from ACME") + } else if expectedFailure != "" { + t.Fatalf("expected failure containing: %s got none", expectedFailure) + } + + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert bytes") + + return acmeCert +} + +func SubtestACMEWildcardDNS(t *testing.T, cluster *VaultPkiCluster) { + logger := corehelpers.NewTestLogger(t) + + pki, err := cluster.CreateAcmeMount("pki-dns-wildcards") + require.NoError(t, err, "failed setting up acme mount") + + // Since we interact with ACME from outside the container network the ACME + // configuration needs to be updated to use the host port and not the internal + // docker ip. + basePath, err := pki.UpdateClusterConfigLocalAddr() + require.NoError(t, err, "failed updating cluster config") + + hostname := "go-lang-wildcard-client.dadgarcorp.com" + wildcard := "*." + hostname + + // Do validation without a role first. + directoryUrl := basePath + "/acme/directory" + acmeOrderIdentifiers := []acme.AuthzID{ + {Type: "dns", Value: hostname}, + {Type: "dns", Value: wildcard}, + } + cr := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: wildcard}, + DNSNames: []string{hostname, wildcard}, + } + + provisioningFunc := func(acmeClient *acme.Client, auths []*acme.Authorization) []*acme.Challenge { + // For each dns-01 challenge, place the record in the associated DNS resolver. + var challengesToAccept []*acme.Challenge + for _, auth := range auths { + for _, challenge := range auth.Challenges { + if challenge.Status != acme.StatusPending { + logger.Trace("ignoring challenge not in status pending", "challenge", challenge) + continue + } + + if challenge.Type == "dns-01" { + challengeBody, err := acmeClient.DNS01ChallengeRecord(challenge.Token) + require.NoError(t, err, "failed generating challenge response") + + err = pki.AddDNSRecord("_acme-challenge."+auth.Identifier.Value, "TXT", challengeBody) + require.NoError(t, err, "failed setting DNS record") + + challengesToAccept = append(challengesToAccept, challenge) + } + } + } + + require.GreaterOrEqual(t, len(challengesToAccept), 1, "Need at least one challenge, got none") + return challengesToAccept + } + + acmeCert := doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "") + require.Contains(t, acmeCert.DNSNames, hostname) + require.Contains(t, acmeCert.DNSNames, wildcard) + require.Equal(t, wildcard, acmeCert.Subject.CommonName) + pki.RemoveDNSRecordsForDomain(hostname) + + // Redo validation with a role this time. + err = pki.UpdateRole("wildcard", map[string]interface{}{ + "key_type": "any", + "allowed_domains": "go-lang-wildcard-client.dadgarcorp.com", + "allow_subdomains": true, + "allow_bare_domains": true, + "allow_wildcard_certificates": true, + "client_flag": false, + }) + require.NoError(t, err, "failed creating role wildcard") + directoryUrl = basePath + "/roles/wildcard/acme/directory" + + acmeCert = doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "") + require.Contains(t, acmeCert.DNSNames, hostname) + require.Contains(t, acmeCert.DNSNames, wildcard) + require.Equal(t, wildcard, acmeCert.Subject.CommonName) + pki.RemoveDNSRecordsForDomain(hostname) +} + +func SubtestACMEPreventsICADNS(t *testing.T, cluster *VaultPkiCluster) { + logger := corehelpers.NewTestLogger(t) + + pki, err := cluster.CreateAcmeMount("pki-dns-ica") + require.NoError(t, err, "failed setting up acme mount") + + // Since we interact with ACME from outside the container network the ACME + // configuration needs to be updated to use the host port and not the internal + // docker ip. + basePath, err := pki.UpdateClusterConfigLocalAddr() + require.NoError(t, err, "failed updating cluster config") + + hostname := "go-lang-intermediate-ca-cert.dadgarcorp.com" + + // Do validation without a role first. + directoryUrl := basePath + "/acme/directory" + acmeOrderIdentifiers := []acme.AuthzID{ + {Type: "dns", Value: hostname}, + } + cr := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: hostname}, + DNSNames: []string{hostname}, + ExtraExtensions: []pkix.Extension{ + // Basic Constraint with IsCA asserted to true. + { + Id: certutil.ExtensionBasicConstraintsOID, + Critical: true, + Value: []byte{0x30, 0x03, 0x01, 0x01, 0xFF}, + }, + }, + } + + provisioningFunc := func(acmeClient *acme.Client, auths []*acme.Authorization) []*acme.Challenge { + // For each dns-01 challenge, place the record in the associated DNS resolver. + var challengesToAccept []*acme.Challenge + for _, auth := range auths { + for _, challenge := range auth.Challenges { + if challenge.Status != acme.StatusPending { + logger.Trace("ignoring challenge not in status pending", "challenge", challenge) + continue + } + + if challenge.Type == "dns-01" { + challengeBody, err := acmeClient.DNS01ChallengeRecord(challenge.Token) + require.NoError(t, err, "failed generating challenge response") + + err = pki.AddDNSRecord("_acme-challenge."+auth.Identifier.Value, "TXT", challengeBody) + require.NoError(t, err, "failed setting DNS record") + + challengesToAccept = append(challengesToAccept, challenge) + } + } + } + + require.GreaterOrEqual(t, len(challengesToAccept), 1, "Need at least one challenge, got none") + return challengesToAccept + } + + doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "refusing to accept CSR with Basic Constraints extension") + pki.RemoveDNSRecordsForDomain(hostname) + + // Redo validation with a role this time. + err = pki.UpdateRole("ica", map[string]interface{}{ + "key_type": "any", + "allowed_domains": "go-lang-intermediate-ca-cert.dadgarcorp.com", + "allow_subdomains": true, + "allow_bare_domains": true, + "allow_wildcard_certificates": true, + "client_flag": false, + }) + require.NoError(t, err, "failed creating role wildcard") + directoryUrl = basePath + "/roles/ica/acme/directory" + + doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "refusing to accept CSR with Basic Constraints extension") + pki.RemoveDNSRecordsForDomain(hostname) +} + +// SubtestACMEStepDownNode Verify that we can properly run an ACME session through a +// secondary node, and midway through the challenge verification process, seal the +// active node and make sure we can complete the ACME session on the new active node. +func SubtestACMEStepDownNode(t *testing.T, cluster *VaultPkiCluster) { + logger := corehelpers.NewTestLogger(t) + + pki, err := cluster.CreateAcmeMount("stepdown-test") + require.NoError(t, err) + + // Since we interact with ACME from outside the container network the ACME + // configuration needs to be updated to use the host port and not the internal + // docker ip. We also grab the non-active node here on purpose to verify + // ACME related APIs are properly forwarded across standby hosts. + nonActiveNodes := pki.GetNonActiveNodes() + require.GreaterOrEqual(t, len(nonActiveNodes), 1, "Need at least one non-active node") + + nonActiveNode := nonActiveNodes[0] + + basePath := fmt.Sprintf("https://%s/v1/%s", nonActiveNode.HostPort, pki.mount) + err = pki.UpdateClusterConfig(map[string]interface{}{ + "path": basePath, + }) + + hostname := "go-lang-stepdown-client.dadgarcorp.com" + + acmeOrderIdentifiers := []acme.AuthzID{ + {Type: "dns", Value: hostname}, + } + cr := &x509.CertificateRequest{ + DNSNames: []string{hostname, hostname}, + } + + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa account key") + + acmeClient := &acme.Client{ + Key: accountKey, + HTTPClient: &http.Client{Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }}, + DirectoryURL: basePath + "/acme/directory", + } + + testCtx, cancelFunc := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancelFunc() + + // Create new account + _, err = acmeClient.Register(testCtx, &acme.Account{Contact: []string{"mailto:ipsans@dadgarcorp.com"}}, + func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Create an ACME order + order, err := acmeClient.AuthorizeOrder(testCtx, acmeOrderIdentifiers) + require.NoError(t, err, "failed creating ACME order") + + require.Len(t, order.AuthzURLs, 1, "expected a single authz url") + authUrl := order.AuthzURLs[0] + + authorization, err := acmeClient.GetAuthorization(testCtx, authUrl) + require.NoError(t, err, "failed to lookup authorization at url: %s", authUrl) + + dnsTxtRecordsToAdd := map[string]string{} + + var challengesToAccept []*acme.Challenge + for _, challenge := range authorization.Challenges { + if challenge.Status != acme.StatusPending { + logger.Trace("ignoring challenge not in status pending", "challenge", challenge) + continue + } + + if challenge.Type == "dns-01" { + challengeBody, err := acmeClient.DNS01ChallengeRecord(challenge.Token) + require.NoError(t, err, "failed generating challenge response") + + // Collect the challenges for us to add the DNS records after step-down + dnsTxtRecordsToAdd["_acme-challenge."+authorization.Identifier.Value] = challengeBody + challengesToAccept = append(challengesToAccept, challenge) + } + } + + // Tell the ACME server, that they can now validate those challenges, this will cause challenge + // verification failures on the main node as the DNS records do not exist. + for _, challenge := range challengesToAccept { + _, err = acmeClient.Accept(testCtx, challenge) + require.NoError(t, err, "failed to accept challenge: %v", challenge) + } + + // Now wait till we start seeing the challenge engine start failing the lookups. + testhelpers.RetryUntil(t, 10*time.Second, func() error { + myAuth, err := acmeClient.GetAuthorization(testCtx, authUrl) + require.NoError(t, err, "failed to lookup authorization at url: %s", authUrl) + + for _, challenge := range myAuth.Challenges { + if challenge.Error != nil { + // The engine failed on one of the challenges, we are done waiting + return nil + } + } + + return fmt.Errorf("no challenges for auth %v contained any errors", myAuth.Identifier) + }) + + // Seal the active node now and wait for the next node to appear + previousActiveNode := pki.GetActiveClusterNode() + logger.Trace("Stepping down node", "node_id", previousActiveNode.NodeID) + + haStatus, _ := previousActiveNode.APIClient().Sys().HAStatus() + logger.Trace("HA Status", "node", previousActiveNode.NodeID, "ha_status", haStatus) + + testhelpers.RetryUntil(t, 2*time.Minute, func() error { + state, err := previousActiveNode.APIClient().Sys().RaftAutopilotState() + if err != nil { + return err + } + + logger.Trace("Raft AutoPilotState", "node", previousActiveNode.NodeID, "state", state) + if !state.Healthy { + return fmt.Errorf("raft auto pilot state is not healthy") + } + + // Make sure that we have at least one node that can take over prior to sealing the current active node. + if state.FailureTolerance < 1 { + msg := fmt.Sprintf("there is no fault tolerance within raft state yet: %d", state.FailureTolerance) + logger.Trace(msg) + return errors.New(msg) + } + + return nil + }) + + logger.Trace("Sealing active node") + err = previousActiveNode.APIClient().Sys().Seal() + require.NoError(t, err, "failed stepping down node") + + // Add our DNS records now + logger.Trace("Adding DNS records") + for dnsHost, dnsValue := range dnsTxtRecordsToAdd { + err = pki.AddDNSRecord(dnsHost, "TXT", dnsValue) + require.NoError(t, err, "failed adding DNS record: %s:%s", dnsHost, dnsValue) + } + + // Wait for our new active node to come up + testhelpers.RetryUntil(t, 2*time.Minute, func() error { + newNode := pki.GetActiveClusterNode() + if newNode.NodeID == previousActiveNode.NodeID { + return fmt.Errorf("existing node is still the leader after stepdown: %s", newNode.NodeID) + } + + logger.Trace("New active node", "node_id", newNode.NodeID) + return nil + }) + + // Wait for the order/challenges to be validated. + _, err = acmeClient.WaitOrder(testCtx, order.URI) + if err != nil { + // We failed waiting for the order to become ready, lets print out current challenge statuses to help debugging + myAuth, authErr := acmeClient.GetAuthorization(testCtx, authUrl) + require.NoError(t, authErr, "failed to lookup authorization at url: %s and wait order failed with: %v", authUrl, err) + + logger.Trace("Authorization Status", "status", myAuth.Status) + for _, challenge := range myAuth.Challenges { + // The engine failed on one of the challenges, we are done waiting + logger.Trace("challenge", "type", challenge.Type, "status", challenge.Status, "error", challenge.Error) + } + + require.NoError(t, err, "failed waiting for order to be ready") + } + + // Create/sign the CSR and ask ACME server to sign it returning us the final certificate + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + csr, err := x509.CreateCertificateRequest(rand.Reader, cr, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, false) + require.NoError(t, err, "failed to get a certificate back from ACME") + + _, err = x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert bytes") +} + +func getDockerLog(logger hclog.Logger) (func(s string), *pkiext.LogConsumerWriter, *pkiext.LogConsumerWriter) { + logConsumer := func(s string) { + logger.Trace(s) + } + + logStdout := &pkiext.LogConsumerWriter{logConsumer} + logStderr := &pkiext.LogConsumerWriter{logConsumer} + return logConsumer, logStdout, logStderr +} diff --git a/builtin/logical/pkiext/pkiext_binary/pki_cluster.go b/builtin/logical/pkiext/pkiext_binary/pki_cluster.go new file mode 100644 index 000000000000..4462f5103879 --- /dev/null +++ b/builtin/logical/pkiext/pkiext_binary/pki_cluster.go @@ -0,0 +1,316 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pkiext_binary + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/pki/dnstest" + dockhelper "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/sdk/helper/testcluster" + "github.com/hashicorp/vault/sdk/helper/testcluster/docker" +) + +type VaultPkiCluster struct { + cluster *docker.DockerCluster + Dns *dnstest.TestServer +} + +func NewVaultPkiCluster(t *testing.T) *VaultPkiCluster { + binary := os.Getenv("VAULT_BINARY") + if binary == "" { + t.Skip("only running docker test when $VAULT_BINARY present") + } + + opts := &docker.DockerClusterOptions{ + ImageRepo: "docker.mirror.hashicorp.services/hashicorp/vault", + // We're replacing the binary anyway, so we're not too particular about + // the docker image version tag. + ImageTag: "latest", + VaultBinary: binary, + ClusterOptions: testcluster.ClusterOptions{ + VaultNodeConfig: &testcluster.VaultNodeConfig{ + LogLevel: "TRACE", + }, + NumCores: 3, + }, + } + + cluster := docker.NewTestDockerCluster(t, opts) + + return &VaultPkiCluster{cluster: cluster} +} + +func NewVaultPkiClusterWithDNS(t *testing.T) *VaultPkiCluster { + cluster := NewVaultPkiCluster(t) + dns := dnstest.SetupResolverOnNetwork(t, "dadgarcorp.com", cluster.GetContainerNetworkName()) + cluster.Dns = dns + return cluster +} + +func (vpc *VaultPkiCluster) Cleanup() { + vpc.cluster.Cleanup() + if vpc.Dns != nil { + vpc.Dns.Cleanup() + } +} + +func (vpc *VaultPkiCluster) GetActiveClusterNode() *docker.DockerClusterNode { + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + node, err := testcluster.WaitForActiveNode(ctx, vpc.cluster) + if err != nil { + panic(fmt.Sprintf("no cluster node became active in timeout window: %v", err)) + } + + return vpc.cluster.ClusterNodes[node] +} + +func (vpc *VaultPkiCluster) GetNonActiveNodes() []*docker.DockerClusterNode { + nodes := []*docker.DockerClusterNode{} + for _, node := range vpc.cluster.ClusterNodes { + leader, err := node.APIClient().Sys().Leader() + if err != nil { + continue + } + + if !leader.IsSelf { + nodes = append(nodes, node) + } + } + + return nodes +} + +func (vpc *VaultPkiCluster) GetActiveContainerHostPort() string { + return vpc.GetActiveClusterNode().HostPort +} + +func (vpc *VaultPkiCluster) GetContainerNetworkName() string { + return vpc.cluster.ClusterNodes[0].ContainerNetworkName +} + +func (vpc *VaultPkiCluster) GetActiveContainerIP() string { + return vpc.GetActiveClusterNode().ContainerIPAddress +} + +func (vpc *VaultPkiCluster) GetActiveContainerID() string { + return vpc.GetActiveClusterNode().Container.ID +} + +func (vpc *VaultPkiCluster) GetActiveNode() *api.Client { + return vpc.GetActiveClusterNode().APIClient() +} + +// GetListenerCACertPEM returns the Vault cluster's PEM-encoded CA certificate. +func (vpc *VaultPkiCluster) GetListenerCACertPEM() []byte { + return vpc.cluster.CACertPEM +} + +func (vpc *VaultPkiCluster) AddHostname(hostname, ip string) error { + if vpc.Dns != nil { + vpc.Dns.AddRecord(hostname, "A", ip) + vpc.Dns.PushConfig() + return nil + } else { + return vpc.AddNameToHostFiles(hostname, ip) + } +} + +func (vpc *VaultPkiCluster) AddNameToHostFiles(hostname, ip string) error { + updateHostsCmd := []string{ + "sh", "-c", + "echo '" + ip + " " + hostname + "' >> /etc/hosts", + } + for _, node := range vpc.cluster.ClusterNodes { + containerID := node.Container.ID + _, _, retcode, err := dockhelper.RunCmdWithOutput(vpc.cluster.DockerAPI, context.Background(), containerID, updateHostsCmd) + if err != nil { + return fmt.Errorf("failed updating container %s host file: %w", containerID, err) + } + + if retcode != 0 { + return fmt.Errorf("expected zero retcode from updating vault host file in container %s got: %d", containerID, retcode) + } + } + + return nil +} + +func (vpc *VaultPkiCluster) AddDNSRecord(hostname, recordType, ip string) error { + if vpc.Dns == nil { + return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to provision custom records") + } + + vpc.Dns.AddRecord(hostname, recordType, ip) + vpc.Dns.PushConfig() + return nil +} + +func (vpc *VaultPkiCluster) RemoveDNSRecord(domain string, record string, value string) error { + if vpc.Dns == nil { + return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to remove specific record") + } + + vpc.Dns.RemoveRecord(domain, record, value) + return nil +} + +func (vpc *VaultPkiCluster) RemoveDNSRecordsOfTypeForDomain(domain string, record string) error { + if vpc.Dns == nil { + return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to remove all records of type") + } + + vpc.Dns.RemoveRecordsOfTypeForDomain(domain, record) + return nil +} + +func (vpc *VaultPkiCluster) RemoveDNSRecordsForDomain(domain string) error { + if vpc.Dns == nil { + return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to remove records for domain") + } + + vpc.Dns.RemoveRecordsForDomain(domain) + return nil +} + +func (vpc *VaultPkiCluster) RemoveAllDNSRecords() error { + if vpc.Dns == nil { + return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to remove all records") + } + + vpc.Dns.RemoveAllRecords() + return nil +} + +func (vpc *VaultPkiCluster) CreateMount(name string) (*VaultPkiMount, error) { + err := vpc.GetActiveNode().Sys().Mount(name, &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "32h", + AllowedResponseHeaders: []string{ + "Last-Modified", "Replay-Nonce", + "Link", "Location", + }, + }, + }) + if err != nil { + return nil, err + } + + return &VaultPkiMount{ + vpc, + name, + }, nil +} + +func (vpc *VaultPkiCluster) CreateAcmeMount(mountName string) (*VaultPkiMount, error) { + pki, err := vpc.CreateMount(mountName) + if err != nil { + return nil, fmt.Errorf("failed creating mount %s: %w", mountName, err) + } + + err = pki.UpdateClusterConfig(nil) + if err != nil { + return nil, fmt.Errorf("failed updating cluster config: %w", err) + } + + cfg := map[string]interface{}{ + "eab_policy": "not-required", + } + if vpc.Dns != nil { + cfg["dns_resolver"] = vpc.Dns.GetRemoteAddr() + } + + err = pki.UpdateAcmeConfig(true, cfg) + if err != nil { + return nil, fmt.Errorf("failed updating acme config: %w", err) + } + + // Setup root+intermediate CA hierarchy within this mount. + resp, err := pki.GenerateRootInternal(map[string]interface{}{ + "common_name": "Root X1", + "country": "US", + "organization": "Dadgarcorp", + "ou": "QA", + "key_type": "ec", + "key_bits": 256, + "use_pss": false, + "issuer_name": "root", + }) + if err != nil { + return nil, fmt.Errorf("failed generating root internal: %w", err) + } + if resp == nil || len(resp.Data) == 0 { + return nil, fmt.Errorf("failed generating root internal: nil or empty response but no error") + } + + resp, err = pki.GenerateIntermediateInternal(map[string]interface{}{ + "common_name": "Intermediate I1", + "country": "US", + "organization": "Dadgarcorp", + "ou": "QA", + "key_type": "ec", + "key_bits": 256, + "use_pss": false, + }) + if err != nil { + return nil, fmt.Errorf("failed generating int csr: %w", err) + } + if resp == nil || len(resp.Data) == 0 { + return nil, fmt.Errorf("failed generating int csr: nil or empty response but no error") + } + + resp, err = pki.SignIntermediary("default", resp.Data["csr"], map[string]interface{}{ + "common_name": "Intermediate I1", + "country": "US", + "organization": "Dadgarcorp", + "ou": "QA", + "key_type": "ec", + "csr": resp.Data["csr"], + }) + if err != nil { + return nil, fmt.Errorf("failed signing int csr: %w", err) + } + if resp == nil || len(resp.Data) == 0 { + return nil, fmt.Errorf("failed signing int csr: nil or empty response but no error") + } + intCert := resp.Data["certificate"].(string) + + resp, err = pki.ImportBundle(intCert, nil) + if err != nil { + return nil, fmt.Errorf("failed importing signed cert: %w", err) + } + if resp == nil || len(resp.Data) == 0 { + return nil, fmt.Errorf("failed importing signed cert: nil or empty response but no error") + } + + err = pki.UpdateDefaultIssuer(resp.Data["imported_issuers"].([]interface{})[0].(string), nil) + if err != nil { + return nil, fmt.Errorf("failed to set intermediate as default: %w", err) + } + + err = pki.UpdateIssuer("default", map[string]interface{}{ + "leaf_not_after_behavior": "truncate", + }) + if err != nil { + return nil, fmt.Errorf("failed to update intermediate ttl behavior: %w", err) + } + + err = pki.UpdateIssuer("root", map[string]interface{}{ + "leaf_not_after_behavior": "truncate", + }) + if err != nil { + return nil, fmt.Errorf("failed to update root ttl behavior: %w", err) + } + + return pki, nil +} diff --git a/builtin/logical/pkiext/pkiext_binary/pki_mount.go b/builtin/logical/pkiext/pkiext_binary/pki_mount.go new file mode 100644 index 000000000000..15ce16b2a3c4 --- /dev/null +++ b/builtin/logical/pkiext/pkiext_binary/pki_mount.go @@ -0,0 +1,160 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pkiext_binary + +import ( + "context" + "encoding/base64" + "fmt" + "path" + + "github.com/hashicorp/vault/api" +) + +type VaultPkiMount struct { + *VaultPkiCluster + mount string +} + +func (vpm *VaultPkiMount) UpdateClusterConfig(config map[string]interface{}) error { + defaultPath := "https://" + vpm.cluster.ClusterNodes[0].ContainerIPAddress + ":8200/v1/" + vpm.mount + defaults := map[string]interface{}{ + "path": defaultPath, + "aia_path": defaultPath, + } + + _, err := vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/config/cluster", mergeWithDefaults(config, defaults)) + return err +} + +func (vpm *VaultPkiMount) UpdateClusterConfigLocalAddr() (string, error) { + basePath := fmt.Sprintf("https://%s/v1/%s", vpm.GetActiveContainerHostPort(), vpm.mount) + return basePath, vpm.UpdateClusterConfig(map[string]interface{}{ + "path": basePath, + }) +} + +func (vpm *VaultPkiMount) UpdateAcmeConfig(enable bool, config map[string]interface{}) error { + defaults := map[string]interface{}{ + "enabled": enable, + } + + _, err := vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/config/acme", mergeWithDefaults(config, defaults)) + return err +} + +func (vpm *VaultPkiMount) GenerateRootInternal(props map[string]interface{}) (*api.Secret, error) { + defaults := map[string]interface{}{ + "common_name": "root-test.com", + "key_type": "ec", + "issuer_name": "root", + } + + return vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/root/generate/internal", mergeWithDefaults(props, defaults)) +} + +func (vpm *VaultPkiMount) GenerateIntermediateInternal(props map[string]interface{}) (*api.Secret, error) { + defaults := map[string]interface{}{ + "common_name": "intermediary-test.com", + "key_type": "ec", + "issuer_name": "intermediary", + } + + return vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/intermediate/generate/internal", mergeWithDefaults(props, defaults)) +} + +func (vpm *VaultPkiMount) SignIntermediary(signingIssuer string, csr interface{}, props map[string]interface{}) (*api.Secret, error) { + defaults := map[string]interface{}{ + "csr": csr, + } + + return vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/issuer/"+signingIssuer+"/sign-intermediate", + mergeWithDefaults(props, defaults)) +} + +func (vpm *VaultPkiMount) ImportBundle(pemBundle interface{}, props map[string]interface{}) (*api.Secret, error) { + defaults := map[string]interface{}{ + "pem_bundle": pemBundle, + } + + return vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/issuers/import/bundle", mergeWithDefaults(props, defaults)) +} + +func (vpm *VaultPkiMount) UpdateDefaultIssuer(issuerId string, props map[string]interface{}) error { + defaults := map[string]interface{}{ + "default": issuerId, + } + + _, err := vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/config/issuers", mergeWithDefaults(props, defaults)) + + return err +} + +func (vpm *VaultPkiMount) UpdateIssuer(issuerRef string, props map[string]interface{}) error { + defaults := map[string]interface{}{} + + _, err := vpm.GetActiveNode().Logical().JSONMergePatch(context.Background(), + vpm.mount+"/issuer/"+issuerRef, mergeWithDefaults(props, defaults)) + + return err +} + +func (vpm *VaultPkiMount) UpdateRole(roleName string, config map[string]interface{}) error { + defaults := map[string]interface{}{} + + _, err := vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/roles/"+roleName, mergeWithDefaults(config, defaults)) + + return err +} + +func (vpm *VaultPkiMount) GetEabKey(acmeDirectory string) (string, string, error) { + eabPath := path.Join(vpm.mount, acmeDirectory, "/new-eab") + resp, err := vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), eabPath, map[string]interface{}{}) + if err != nil { + return "", "", fmt.Errorf("failed fetching eab from %s: %w", eabPath, err) + } + eabId := resp.Data["id"].(string) + base64EabKey := resp.Data["key"].(string) + // just make sure we get something valid back from the server, we still want to pass back the base64 version + // to the caller... + _, err = base64.RawURLEncoding.DecodeString(base64EabKey) + if err != nil { + return "", "", fmt.Errorf("failed decoding key response field: %s: %w", base64EabKey, err) + } + return eabId, base64EabKey, nil +} + +// GetCACertPEM retrieves the PKI mount's PEM-encoded CA certificate. +func (vpm *VaultPkiMount) GetCACertPEM() (string, error) { + caCertPath := path.Join(vpm.mount, "/cert/ca") + resp, err := vpm.GetActiveNode().Logical().ReadWithContext(context.Background(), caCertPath) + if err != nil { + return "", err + } + return resp.Data["certificate"].(string), nil +} + +func mergeWithDefaults(config map[string]interface{}, defaults map[string]interface{}) map[string]interface{} { + myConfig := config + if myConfig == nil { + myConfig = map[string]interface{}{} + } + for key, value := range defaults { + if origVal, exists := config[key]; !exists { + myConfig[key] = value + } else { + myConfig[key] = origVal + } + } + + return myConfig +} diff --git a/builtin/logical/pkiext/pkiext_binary/testdata/caddy_http.json b/builtin/logical/pkiext/pkiext_binary/testdata/caddy_http.json new file mode 100644 index 000000000000..272ecd102575 --- /dev/null +++ b/builtin/logical/pkiext/pkiext_binary/testdata/caddy_http.json @@ -0,0 +1,66 @@ +{ + "apps": { + "http": { + "servers": { + "srv0": { + "listen": [ + ":80", + ":443" + ], + "routes": [ + { + "match": [ + { + "host": [ + "{{.Hostname}}" + ] + } + ], + "handle": [ + { + "handler": "subroute", + "routes": [ + { + "handle": [ + { + "body": "Hello!", + "handler": "static_response" + } + ] + } + ] + } + ], + "terminal": true + } + ] + } + } + }, + "tls": { + "automation": { + "policies": [ + { + "subjects": [ + "{{.Hostname}}" + ], + "issuers": [ + { + "ca": "{{.Directory}}", + "module": "acme", + "challenges": { + "tls-alpn": { + "disabled": true + } + }, + "trusted_roots_pem_files": [ + "{{.CACert}}" + ] + } + ] + } + ] + } + } + } +} diff --git a/builtin/logical/pkiext/pkiext_binary/testdata/caddy_http_eab.json b/builtin/logical/pkiext/pkiext_binary/testdata/caddy_http_eab.json new file mode 100644 index 000000000000..61cab8894958 --- /dev/null +++ b/builtin/logical/pkiext/pkiext_binary/testdata/caddy_http_eab.json @@ -0,0 +1,70 @@ +{ + "apps": { + "http": { + "servers": { + "srv0": { + "listen": [ + ":80", + ":443" + ], + "routes": [ + { + "match": [ + { + "host": [ + "{{.Hostname}}" + ] + } + ], + "handle": [ + { + "handler": "subroute", + "routes": [ + { + "handle": [ + { + "body": "Hello!", + "handler": "static_response" + } + ] + } + ] + } + ], + "terminal": true + } + ] + } + } + }, + "tls": { + "automation": { + "policies": [ + { + "subjects": [ + "{{.Hostname}}" + ], + "issuers": [ + { + "ca": "{{.Directory}}", + "module": "acme", + "external_account": { + "key_id": "{{.EABID}}", + "mac_key": "{{.EABKey}}" + }, + "challenges": { + "tls-alpn": { + "disabled": true + } + }, + "trusted_roots_pem_files": [ + "{{.CACert}}" + ] + } + ] + } + ] + } + } + } +} diff --git a/builtin/logical/pkiext/pkiext_binary/testdata/caddy_tls_alpn.json b/builtin/logical/pkiext/pkiext_binary/testdata/caddy_tls_alpn.json new file mode 100644 index 000000000000..0bc0ea9112e8 --- /dev/null +++ b/builtin/logical/pkiext/pkiext_binary/testdata/caddy_tls_alpn.json @@ -0,0 +1,66 @@ +{ + "apps": { + "http": { + "servers": { + "srv0": { + "listen": [ + ":80", + ":443" + ], + "routes": [ + { + "match": [ + { + "host": [ + "{{.Hostname}}" + ] + } + ], + "handle": [ + { + "handler": "subroute", + "routes": [ + { + "handle": [ + { + "body": "Hello!", + "handler": "static_response" + } + ] + } + ] + } + ], + "terminal": true + } + ] + } + } + }, + "tls": { + "automation": { + "policies": [ + { + "subjects": [ + "{{.Hostname}}" + ], + "issuers": [ + { + "ca": "{{.Directory}}", + "module": "acme", + "challenges": { + "http": { + "disabled": true + } + }, + "trusted_roots_pem_files": [ + "{{.CACert}}" + ] + } + ] + } + ] + } + } + } +} diff --git a/builtin/logical/pkiext/test_helpers.go b/builtin/logical/pkiext/test_helpers.go index 9931b1613ba3..0950923e3140 100644 --- a/builtin/logical/pkiext/test_helpers.go +++ b/builtin/logical/pkiext/test_helpers.go @@ -1,9 +1,11 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pkiext import ( + "bufio" + "bytes" "crypto" "crypto/x509" "encoding/pem" @@ -12,7 +14,6 @@ import ( "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" - "github.com/stretchr/testify/require" ) @@ -66,3 +67,19 @@ func parseKey(t *testing.T, pemKey string) crypto.Signer { require.NoError(t, err) return key } + +type LogConsumerWriter struct { + Consumer func(string) +} + +func (l LogConsumerWriter) Write(p []byte) (n int, err error) { + // TODO this assumes that we're never passed partial log lines, which + // seems a safe assumption for now based on how docker looks to implement + // logging, but might change in the future. + scanner := bufio.NewScanner(bytes.NewReader(p)) + scanner.Buffer(make([]byte, 64*1024), bufio.MaxScanTokenSize) + for scanner.Scan() { + l.Consumer(scanner.Text()) + } + return len(p), nil +} diff --git a/builtin/logical/pkiext/zlint_test.go b/builtin/logical/pkiext/zlint_test.go index 7c7902b01fff..38f39e0a7112 100644 --- a/builtin/logical/pkiext/zlint_test.go +++ b/builtin/logical/pkiext/zlint_test.go @@ -1,17 +1,19 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pkiext import ( "context" "encoding/json" + "fmt" "sync" "testing" + "time" "github.com/hashicorp/vault/builtin/logical/pki" - "github.com/hashicorp/vault/helper/testhelpers/docker" - + "github.com/hashicorp/vault/helper/testhelpers" + "github.com/hashicorp/vault/sdk/helper/docker" "github.com/stretchr/testify/require" ) @@ -24,7 +26,7 @@ func buildZLintContainer(t *testing.T) { containerfile := ` FROM docker.mirror.hashicorp.services/library/golang:latest -RUN go install github.com/zmap/zlint/v3/cmd/zlint@latest +RUN go install github.com/zmap/zlint/v3/cmd/zlint@v3.6.2 ` bCtx := docker.NewBuildContext() @@ -51,15 +53,21 @@ RUN go install github.com/zmap/zlint/v3/cmd/zlint@latest } ctx := context.Background() - output, err := zRunner.BuildImage(ctx, containerfile, bCtx, - docker.BuildRemove(true), docker.BuildForceRemove(true), - docker.BuildPullParent(true), - docker.BuildTags([]string{imageName + ":" + imageTag})) - if err != nil { - t.Fatalf("Could not build new image: %v", err) - } - t.Logf("Image build output: %v", string(output)) + // Sometimes we see timeouts and issues pulling the zlint code from GitHub + testhelpers.RetryUntil(t, 30*time.Second, func() error { + output, err := zRunner.BuildImage(ctx, containerfile, bCtx, + docker.BuildRemove(true), + docker.BuildForceRemove(true), + docker.BuildPullParent(true), + docker.BuildTags([]string{imageName + ":" + imageTag})) + if err != nil { + return fmt.Errorf("could not build new image with zlint: %w", err) + } + + t.Logf("Image build output: %v", string(output)) + return nil + }) } func RunZLintContainer(t *testing.T, certificate string) []byte { @@ -67,11 +75,12 @@ func RunZLintContainer(t *testing.T, certificate string) []byte { buildZLintContainer(t) }) + ctx := context.Background() // We don't actually care about the address, we just want to start the // container so we can run commands in it. We'd ideally like to skip this // step and only build a new image, but the zlint output would be // intermingled with container build stages, so its not that useful. - ctr, _, _, err := zRunner.Start(context.Background(), true, false) + result, err := zRunner.Start(ctx, true, false) if err != nil { t.Fatalf("Could not start golang container for zlint: %s", err) } @@ -79,13 +88,13 @@ func RunZLintContainer(t *testing.T, certificate string) []byte { // Copy the cert into the newly running container. certCtx := docker.NewBuildContext() certCtx["cert.pem"] = docker.PathContentsFromBytes([]byte(certificate)) - if err := zRunner.CopyTo(ctr.ID, "/go/", certCtx); err != nil { + if err := zRunner.CopyTo(result.Container.ID, "/go/", certCtx); err != nil { t.Fatalf("Could not copy certificate into container: %v", err) } // Run the zlint command and save the output. cmd := []string{"/go/bin/zlint", "/go/cert.pem"} - stdout, stderr, retcode, err := zRunner.RunCmdWithOutput(context.Background(), ctr.ID, cmd) + stdout, stderr, retcode, err := zRunner.RunCmdWithOutput(ctx, result.Container.ID, cmd) if err != nil { t.Fatalf("Could not run command in container: %v", err) } @@ -100,7 +109,7 @@ func RunZLintContainer(t *testing.T, certificate string) []byte { } // Clean up after ourselves. - if err := zRunner.Stop(context.Background(), ctr.ID); err != nil { + if err := zRunner.Stop(context.Background(), result.Container.ID); err != nil { t.Fatalf("failed to stop container: %v", err) } diff --git a/builtin/logical/rabbitmq/backend.go b/builtin/logical/rabbitmq/backend.go index 548a5540275d..e6a093d54264 100644 --- a/builtin/logical/rabbitmq/backend.go +++ b/builtin/logical/rabbitmq/backend.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package rabbitmq @@ -14,6 +14,8 @@ import ( rabbithole "github.com/michaelklishin/rabbit-hole/v2" ) +const operationPrefixRabbitMQ = "rabbit-mq" + // Factory creates and configures the backend func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() diff --git a/builtin/logical/rabbitmq/backend_test.go b/builtin/logical/rabbitmq/backend_test.go index ef052cdbd98f..21510b2f9881 100644 --- a/builtin/logical/rabbitmq/backend_test.go +++ b/builtin/logical/rabbitmq/backend_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package rabbitmq @@ -11,8 +11,8 @@ import ( "testing" "github.com/hashicorp/go-secure-stdlib/base62" - "github.com/hashicorp/vault/helper/testhelpers/docker" logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/helper/docker" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/logical" rabbithole "github.com/michaelklishin/rabbit-hole/v2" diff --git a/builtin/logical/rabbitmq/cmd/rabbitmq/main.go b/builtin/logical/rabbitmq/cmd/rabbitmq/main.go index 2cb62daca51a..942db13dc141 100644 --- a/builtin/logical/rabbitmq/cmd/rabbitmq/main.go +++ b/builtin/logical/rabbitmq/cmd/rabbitmq/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/builtin/logical/rabbitmq/passwords.go b/builtin/logical/rabbitmq/passwords.go index 8ba08a0afa4b..ee6b9d02fcac 100644 --- a/builtin/logical/rabbitmq/passwords.go +++ b/builtin/logical/rabbitmq/passwords.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package rabbitmq diff --git a/builtin/logical/rabbitmq/path_config_connection.go b/builtin/logical/rabbitmq/path_config_connection.go index dd735ac45366..d586ffc035a7 100644 --- a/builtin/logical/rabbitmq/path_config_connection.go +++ b/builtin/logical/rabbitmq/path_config_connection.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package rabbitmq @@ -20,6 +20,13 @@ const ( func pathConfigConnection(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/connection", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRabbitMQ, + OperationVerb: "configure", + OperationSuffix: "connection", + }, + Fields: map[string]*framework.FieldSchema{ "connection_uri": { Type: framework.TypeString, diff --git a/builtin/logical/rabbitmq/path_config_connection_test.go b/builtin/logical/rabbitmq/path_config_connection_test.go index 55e6b2cd042c..8e7de881c07a 100644 --- a/builtin/logical/rabbitmq/path_config_connection_test.go +++ b/builtin/logical/rabbitmq/path_config_connection_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package rabbitmq diff --git a/builtin/logical/rabbitmq/path_config_lease.go b/builtin/logical/rabbitmq/path_config_lease.go index f885d68b7115..cf82a2024cc1 100644 --- a/builtin/logical/rabbitmq/path_config_lease.go +++ b/builtin/logical/rabbitmq/path_config_lease.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package rabbitmq @@ -15,6 +15,11 @@ import ( func pathConfigLease(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/lease", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRabbitMQ, + }, + Fields: map[string]*framework.FieldSchema{ "ttl": { Type: framework.TypeDurationSecond, @@ -28,9 +33,21 @@ func pathConfigLease(b *backend) *framework.Path { }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathLeaseRead, - logical.UpdateOperation: b.pathLeaseUpdate, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathLeaseRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "read", + OperationSuffix: "lease-configuration", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathLeaseUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "lease", + }, + }, }, HelpSynopsis: pathConfigLeaseHelpSyn, diff --git a/builtin/logical/rabbitmq/path_config_lease_test.go b/builtin/logical/rabbitmq/path_config_lease_test.go index 9e565c56f69b..542a5d284e79 100644 --- a/builtin/logical/rabbitmq/path_config_lease_test.go +++ b/builtin/logical/rabbitmq/path_config_lease_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package rabbitmq diff --git a/builtin/logical/rabbitmq/path_role_create.go b/builtin/logical/rabbitmq/path_role_create.go index 3f3b3ba7cbac..c06e82442812 100644 --- a/builtin/logical/rabbitmq/path_role_create.go +++ b/builtin/logical/rabbitmq/path_role_create.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package rabbitmq import ( "context" "fmt" - "io/ioutil" + "io" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/template" @@ -21,6 +21,13 @@ const ( func pathCreds(b *backend) *framework.Path { return &framework.Path{ Pattern: "creds/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRabbitMQ, + OperationVerb: "request", + OperationSuffix: "credentials", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -106,7 +113,7 @@ func (b *backend) pathCredsRead(ctx context.Context, req *logical.Request, d *fr } }() if !isIn200s(resp.StatusCode) { - body, _ := ioutil.ReadAll(resp.Body) + body, _ := io.ReadAll(resp.Body) return nil, fmt.Errorf("error creating user %s - %d: %s", username, resp.StatusCode, body) } @@ -121,7 +128,7 @@ func (b *backend) pathCredsRead(ctx context.Context, req *logical.Request, d *fr b.Logger().Error(fmt.Sprintf("deleting %s due to permissions being in an unknown state, but failed: %s", username, err)) } if !isIn200s(resp.StatusCode) { - body, _ := ioutil.ReadAll(resp.Body) + body, _ := io.ReadAll(resp.Body) b.Logger().Error(fmt.Sprintf("deleting %s due to permissions being in an unknown state, but error deleting: %d: %s", username, resp.StatusCode, body)) } }() @@ -144,7 +151,7 @@ func (b *backend) pathCredsRead(ctx context.Context, req *logical.Request, d *fr } }() if !isIn200s(resp.StatusCode) { - body, _ := ioutil.ReadAll(resp.Body) + body, _ := io.ReadAll(resp.Body) return fmt.Errorf("error updating vhost permissions for %s - %d: %s", vhost, resp.StatusCode, body) } return nil @@ -173,7 +180,7 @@ func (b *backend) pathCredsRead(ctx context.Context, req *logical.Request, d *fr } }() if !isIn200s(resp.StatusCode) { - body, _ := ioutil.ReadAll(resp.Body) + body, _ := io.ReadAll(resp.Body) return fmt.Errorf("error updating vhost permissions for %s - %d: %s", vhost, resp.StatusCode, body) } return nil diff --git a/builtin/logical/rabbitmq/path_role_create_test.go b/builtin/logical/rabbitmq/path_role_create_test.go index ecb974687251..0f2591caf7dc 100644 --- a/builtin/logical/rabbitmq/path_role_create_test.go +++ b/builtin/logical/rabbitmq/path_role_create_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package rabbitmq diff --git a/builtin/logical/rabbitmq/path_roles.go b/builtin/logical/rabbitmq/path_roles.go index c236e33d22de..9164d5780b68 100644 --- a/builtin/logical/rabbitmq/path_roles.go +++ b/builtin/logical/rabbitmq/path_roles.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package rabbitmq @@ -16,6 +16,10 @@ import ( func pathListRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRabbitMQ, + OperationSuffix: "roles", + }, Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, }, @@ -27,6 +31,10 @@ func pathListRoles(b *backend) *framework.Path { func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/" + framework.GenericNameRegex("name"), + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRabbitMQ, + OperationSuffix: "role", + }, Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, diff --git a/builtin/logical/rabbitmq/secret_creds.go b/builtin/logical/rabbitmq/secret_creds.go index 2d0cce30858b..eaaf2afd0a62 100644 --- a/builtin/logical/rabbitmq/secret_creds.go +++ b/builtin/logical/rabbitmq/secret_creds.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package rabbitmq diff --git a/builtin/logical/ssh/backend.go b/builtin/logical/ssh/backend.go index 53105e70f739..f750e79fac56 100644 --- a/builtin/logical/ssh/backend.go +++ b/builtin/logical/ssh/backend.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ssh @@ -13,6 +13,8 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) +const operationPrefixSSH = "ssh" + type backend struct { *framework.Backend view logical.Storage diff --git a/builtin/logical/ssh/backend_test.go b/builtin/logical/ssh/backend_test.go index 075ecd056c53..80f3300e4a90 100644 --- a/builtin/logical/ssh/backend_test.go +++ b/builtin/logical/ssh/backend_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ssh @@ -16,18 +16,16 @@ import ( "time" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/helper/testhelpers/corehelpers" - "github.com/hashicorp/vault/sdk/logical" - "golang.org/x/crypto/ssh" - "github.com/hashicorp/vault/builtin/credential/userpass" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" "github.com/mitchellh/mapstructure" - "github.com/stretchr/testify/require" + "golang.org/x/crypto/ssh" ) const ( @@ -134,6 +132,8 @@ SjOQL/GkH1nkRcDS9++aAAAAAmNhAQID dockerImageTagSupportsNoRSA1 = "8.4_p1-r3-ls48" ) +var ctx = context.Background() + func prepareTestContainer(t *testing.T, tag, caPublicKeyPEM string) (func(), string) { if tag == "" { tag = dockerImageTagSupportsNoRSA1 @@ -948,12 +948,63 @@ cKumubUxOfFdy1ZvAAAAEm5jY0BtYnAudWJudC5sb2NhbA== return nil }, }, + testIssueCert("testcarole", "ec", testUserName, sshAddress, expectError), + testIssueCert("testcarole", "ed25519", testUserName, sshAddress, expectError), + testIssueCert("testcarole", "rsa", testUserName, sshAddress, expectError), }, } logicaltest.Test(t, testCase) } +func testIssueCert(role string, keyType string, testUserName string, sshAddress string, expectError bool) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "issue/" + role, + ErrorOk: expectError, + Data: map[string]interface{}{ + "key_type": keyType, + "valid_principals": testUserName, + }, + + Check: func(resp *logical.Response) error { + // Tolerate nil response if an error was expected + if expectError && resp == nil { + return nil + } + + signedKey := strings.TrimSpace(resp.Data["signed_key"].(string)) + if signedKey == "" { + return errors.New("no signed key in response") + } + + privKey, err := ssh.ParsePrivateKey([]byte(resp.Data["private_key"].(string))) + if err != nil { + return fmt.Errorf("error parsing private key: %v", err) + } + + parsedKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(signedKey)) + if err != nil { + return fmt.Errorf("error parsing signed key: %v", err) + } + certSigner, err := ssh.NewCertSigner(parsedKey.(*ssh.Certificate), privKey) + if err != nil { + return err + } + + err = testSSH(testUserName, sshAddress, ssh.PublicKeys(certSigner), "date") + if expectError && err == nil { + return fmt.Errorf("expected error but got none") + } + if !expectError && err != nil { + return err + } + + return nil + }, + } +} + func TestSSHBackend_CAUpgradeAlgorithmSigner(t *testing.T) { cleanup, sshAddress := prepareTestContainer(t, dockerImageTagSupportsRSA1, testCAPublicKey) defer cleanup() @@ -1247,6 +1298,80 @@ func TestBackend_OptionsOverrideDefaults(t *testing.T) { logicaltest.Test(t, testCase) } +func TestBackend_EmptyPrincipals(t *testing.T) { + config := logical.TestBackendConfig() + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatalf("Cannot create backend: %s", err) + } + testCase := logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + configCaStep(testCAPublicKey, testCAPrivateKey), + createRoleStep("no_user_principals", map[string]interface{}{ + "key_type": "ca", + "allow_user_certificates": true, + "allowed_user_key_lengths": map[string]interface{}{ + "rsa": 2048, + }, + "allowed_users": "no_principals", + }), + { + Operation: logical.UpdateOperation, + Path: "sign/no_user_principals", + Data: map[string]interface{}{ + "public_key": testCAPublicKey, + }, + ErrorOk: true, + Check: func(resp *logical.Response) error { + if resp.Data["error"] != "empty valid principals not allowed by role" { + return errors.New("expected empty valid principals not allowed by role") + } + return nil + }, + }, + createRoleStep("no_host_principals", map[string]interface{}{ + "key_type": "ca", + "allow_host_certificates": true, + "allowed_domains": "*", + }), + { + Operation: logical.UpdateOperation, + Path: "sign/no_host_principals", + Data: map[string]interface{}{ + "cert_type": "host", + "public_key": testCAPublicKeyEd25519, + }, + ErrorOk: true, + Check: func(resp *logical.Response) error { + if resp.Data["error"] != "empty valid principals not allowed by role" { + return errors.New("expected empty valid principals not allowed by role") + } + return nil + }, + }, + { + Operation: logical.UpdateOperation, + Path: "sign/no_host_principals", + Data: map[string]interface{}{ + "cert_type": "host", + "public_key": testCAPublicKeyEd25519, + "valid_principals": "example.com", + }, + ErrorOk: true, + Check: func(resp *logical.Response) error { + if resp.Data["error"] != nil { + return errors.New("expected no error") + } + return nil + }, + }, + }, + } + logicaltest.Test(t, testCase) +} + func TestBackend_AllowedUserKeyLengths(t *testing.T) { config := logical.TestBackendConfig() @@ -1264,6 +1389,7 @@ func TestBackend_AllowedUserKeyLengths(t *testing.T) { "allowed_user_key_lengths": map[string]interface{}{ "rsa": 4096, }, + "allowed_users": "guest", }), { Operation: logical.UpdateOperation, @@ -1285,13 +1411,15 @@ func TestBackend_AllowedUserKeyLengths(t *testing.T) { "allowed_user_key_lengths": map[string]interface{}{ "rsa": 2048, }, + "allowed_users": "guest", }), // Pass with 2048 key { Operation: logical.UpdateOperation, Path: "sign/stdkey", Data: map[string]interface{}{ - "public_key": testCAPublicKey, + "public_key": testCAPublicKey, + "valid_principals": "guest", }, }, // Fail with 4096 key @@ -1299,7 +1427,8 @@ func TestBackend_AllowedUserKeyLengths(t *testing.T) { Operation: logical.UpdateOperation, Path: "sign/stdkey", Data: map[string]interface{}{ - "public_key": publicKey4096, + "public_key": publicKey4096, + "valid_principals": "guest", }, ErrorOk: true, Check: func(resp *logical.Response) error { @@ -1315,13 +1444,15 @@ func TestBackend_AllowedUserKeyLengths(t *testing.T) { "allowed_user_key_lengths": map[string]interface{}{ "rsa": []int{2048, 4096}, }, + "allowed_users": "guest", }), // Pass with 2048-bit key { Operation: logical.UpdateOperation, Path: "sign/multikey", Data: map[string]interface{}{ - "public_key": testCAPublicKey, + "public_key": testCAPublicKey, + "valid_principals": "guest", }, }, // Pass with 4096-bit key @@ -1329,7 +1460,8 @@ func TestBackend_AllowedUserKeyLengths(t *testing.T) { Operation: logical.UpdateOperation, Path: "sign/multikey", Data: map[string]interface{}{ - "public_key": publicKey4096, + "public_key": publicKey4096, + "valid_principals": "guest", }, }, // Fail with 3072-bit key @@ -1352,7 +1484,8 @@ func TestBackend_AllowedUserKeyLengths(t *testing.T) { Operation: logical.UpdateOperation, Path: "sign/multikey", Data: map[string]interface{}{ - "public_key": publicKeyECDSA256, + "public_key": publicKeyECDSA256, + "valid_principals": "guest", }, ErrorOk: true, Check: func(resp *logical.Response) error { @@ -1369,13 +1502,15 @@ func TestBackend_AllowedUserKeyLengths(t *testing.T) { "ec": []int{256}, "ecdsa-sha2-nistp521": 0, }, + "allowed_users": "guest", }), // Pass with ECDSA P-256 { Operation: logical.UpdateOperation, Path: "sign/ectypes", Data: map[string]interface{}{ - "public_key": publicKeyECDSA256, + "public_key": publicKeyECDSA256, + "valid_principals": "guest", }, }, // Pass with ECDSA P-521 @@ -1383,7 +1518,8 @@ func TestBackend_AllowedUserKeyLengths(t *testing.T) { Operation: logical.UpdateOperation, Path: "sign/ectypes", Data: map[string]interface{}{ - "public_key": publicKeyECDSA521, + "public_key": publicKeyECDSA521, + "valid_principals": "guest", }, }, // Fail with RSA key @@ -1391,7 +1527,8 @@ func TestBackend_AllowedUserKeyLengths(t *testing.T) { Operation: logical.UpdateOperation, Path: "sign/ectypes", Data: map[string]interface{}{ - "public_key": publicKey3072, + "public_key": publicKey3072, + "valid_principals": "guest", }, ErrorOk: true, Check: func(resp *logical.Response) error { @@ -1845,6 +1982,7 @@ func TestSSHBackend_IssueSign(t *testing.T) { "ecdsa-sha2-nistp521": 0, "ed25519": 0, }, + "allow_empty_principals": true, }), // Key_type not in allowed_user_key_types_lengths issueSSHKeyPairStep("testing", "ec", 256, true, "provided key_type value not in allowed_user_key_types"), @@ -2465,3 +2603,328 @@ func TestBackend_CleanupDynamicHostKeys(t *testing.T) { require.NotNil(t, resp.Data["message"]) require.Contains(t, resp.Data["message"], "0 of 0") } + +type pathAuthCheckerFunc func(t *testing.T, client *api.Client, path string, token string) + +func isPermDenied(err error) bool { + return strings.Contains(err.Error(), "permission denied") +} + +func isUnsupportedPathOperation(err error) bool { + return strings.Contains(err.Error(), "unsupported path") || strings.Contains(err.Error(), "unsupported operation") +} + +func isDeniedOp(err error) bool { + return isPermDenied(err) || isUnsupportedPathOperation(err) +} + +func pathShouldBeAuthed(t *testing.T, client *api.Client, path string, token string) { + client.SetToken("") + resp, err := client.Logical().ReadWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to read %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to list %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to write %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to delete %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to patch %v while unauthed: %v / %v", path, err, resp) + } +} + +func pathShouldBeUnauthedReadList(t *testing.T, client *api.Client, path string, token string) { + // Should be able to read both with and without a token. + client.SetToken("") + resp, err := client.Logical().ReadWithContext(ctx, path) + if err != nil && isPermDenied(err) { + // Read will sometimes return permission denied, when the handler + // does not support the given operation. Retry with the token. + client.SetToken(token) + resp2, err2 := client.Logical().ReadWithContext(ctx, path) + if err2 != nil && !isUnsupportedPathOperation(err2) { + t.Fatalf("unexpected failure to read %v while unauthed: %v / %v\nWhile authed: %v / %v", path, err, resp, err2, resp2) + } + client.SetToken("") + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err != nil && isPermDenied(err) { + // List will sometimes return permission denied, when the handler + // does not support the given operation. Retry with the token. + client.SetToken(token) + resp2, err2 := client.Logical().ListWithContext(ctx, path) + if err2 != nil && !isUnsupportedPathOperation(err2) { + t.Fatalf("unexpected failure to list %v while unauthed: %v / %v\nWhile authed: %v / %v", path, err, resp, err2, resp2) + } + client.SetToken("") + } + + // These should all be denied. + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during write on read-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on read-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on read-only path %v while unauthed: %v / %v", path, err, resp) + } + + // Retrying with token should allow read/list, but not modification still. + client.SetToken(token) + resp, err = client.Logical().ReadWithContext(ctx, path) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to read %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to list %v while authed: %v / %v", path, err, resp) + } + + // Should all be denied. + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during write on read-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on read-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on read-only path %v while authed: %v / %v", path, err, resp) + } +} + +func pathShouldBeUnauthedWriteOnly(t *testing.T, client *api.Client, path string, token string) { + client.SetToken("") + resp, err := client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to write %v while unauthed: %v / %v", path, err, resp) + } + + // These should all be denied. + resp, err = client.Logical().ReadWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during read on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during list on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on write-only path %v while unauthed: %v / %v", path, err, resp) + } + + // Retrying with token should allow writing, but nothing else. + client.SetToken(token) + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to write %v while unauthed: %v / %v", path, err, resp) + } + + // These should all be denied. + resp, err = client.Logical().ReadWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during read on write-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + if resp != nil || err != nil { + t.Fatalf("unexpected failure during list on write-only path %v while authed: %v / %v", path, err, resp) + } + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on write-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on write-only path %v while authed: %v / %v", path, err, resp) + } +} + +type pathAuthChecker int + +const ( + shouldBeAuthed pathAuthChecker = iota + shouldBeUnauthedReadList + shouldBeUnauthedWriteOnly +) + +var pathAuthChckerMap = map[pathAuthChecker]pathAuthCheckerFunc{ + shouldBeAuthed: pathShouldBeAuthed, + shouldBeUnauthedReadList: pathShouldBeUnauthedReadList, + shouldBeUnauthedWriteOnly: pathShouldBeUnauthedWriteOnly, +} + +func TestProperAuthing(t *testing.T) { + t.Parallel() + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "ssh": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + token := client.Token() + + // Mount SSH. + err := client.Sys().MountWithContext(ctx, "ssh", &api.MountInput{ + Type: "ssh", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Setup basic configuration. + _, err = client.Logical().WriteWithContext(ctx, "ssh/config/ca", map[string]interface{}{ + "generate_signing_key": true, + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().WriteWithContext(ctx, "ssh/roles/test-ca", map[string]interface{}{ + "key_type": "ca", + "allow_user_certificates": true, + "allowed_users": "toor", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().WriteWithContext(ctx, "ssh/issue/test-ca", map[string]interface{}{ + "valid_principals": "toor", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().WriteWithContext(ctx, "ssh/roles/test-otp", map[string]interface{}{ + "key_type": "otp", + "default_user": "toor", + "cidr_list": "127.0.0.0/24", + }) + if err != nil { + t.Fatal(err) + } + + resp, err := client.Logical().WriteWithContext(ctx, "ssh/creds/test-otp", map[string]interface{}{ + "username": "toor", + "ip": "127.0.0.1", + }) + if err != nil || resp == nil { + t.Fatal(err) + } + // key := resp.Data["key"].(string) + + paths := map[string]pathAuthChecker{ + "config/ca": shouldBeAuthed, + "config/zeroaddress": shouldBeAuthed, + "creds/test-otp": shouldBeAuthed, + "issue/test-ca": shouldBeAuthed, + "lookup": shouldBeAuthed, + "public_key": shouldBeUnauthedReadList, + "roles/test-ca": shouldBeAuthed, + "roles/test-otp": shouldBeAuthed, + "roles/": shouldBeAuthed, + "sign/test-ca": shouldBeAuthed, + "tidy/dynamic-keys": shouldBeAuthed, + "verify": shouldBeUnauthedWriteOnly, + } + for path, checkerType := range paths { + checker := pathAuthChckerMap[checkerType] + checker(t, client, "ssh/"+path, token) + } + + client.SetToken(token) + openAPIResp, err := client.Logical().ReadWithContext(ctx, "sys/internal/specs/openapi") + if err != nil { + t.Fatalf("failed to get openapi data: %v", err) + } + + if len(openAPIResp.Data["paths"].(map[string]interface{})) == 0 { + t.Fatalf("expected to get response from OpenAPI; got empty path list") + } + + validatedPath := false + for openapi_path, raw_data := range openAPIResp.Data["paths"].(map[string]interface{}) { + if !strings.HasPrefix(openapi_path, "/ssh/") { + t.Logf("Skipping path: %v", openapi_path) + continue + } + + t.Logf("Validating path: %v", openapi_path) + validatedPath = true + + // Substitute values in from our testing map. + raw_path := openapi_path[5:] + if strings.Contains(raw_path, "{role}") && strings.Contains(raw_path, "roles/") { + raw_path = strings.ReplaceAll(raw_path, "{role}", "test-ca") + } + if strings.Contains(raw_path, "{role}") && (strings.Contains(raw_path, "sign/") || strings.Contains(raw_path, "issue/")) { + raw_path = strings.ReplaceAll(raw_path, "{role}", "test-ca") + } + if strings.Contains(raw_path, "{role}") && strings.Contains(raw_path, "creds") { + raw_path = strings.ReplaceAll(raw_path, "{role}", "test-otp") + } + + handler, present := paths[raw_path] + if !present { + t.Fatalf("OpenAPI reports SSH mount contains %v -> %v but was not tested to be authed or not authed.", + openapi_path, raw_path) + } + + openapi_data := raw_data.(map[string]interface{}) + hasList := false + rawGetData, hasGet := openapi_data["get"] + if hasGet { + getData := rawGetData.(map[string]interface{}) + getParams, paramsPresent := getData["parameters"].(map[string]interface{}) + if getParams != nil && paramsPresent { + if _, hasList = getParams["list"]; hasList { + // LIST is exclusive from GET on the same endpoint usually. + hasGet = false + } + } + } + _, hasPost := openapi_data["post"] + _, hasDelete := openapi_data["delete"] + + if handler == shouldBeUnauthedReadList { + if hasPost || hasDelete { + t.Fatalf("Unauthed read-only endpoints should not have POST/DELETE capabilities") + } + } + } + + if !validatedPath { + t.Fatalf("Expected to have validated at least one path.") + } +} diff --git a/builtin/logical/ssh/cmd/ssh/main.go b/builtin/logical/ssh/cmd/ssh/main.go index a9cf8b269696..4a2163d99b68 100644 --- a/builtin/logical/ssh/cmd/ssh/main.go +++ b/builtin/logical/ssh/cmd/ssh/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/builtin/logical/ssh/path_cleanup_dynamic_host_keys.go b/builtin/logical/ssh/path_cleanup_dynamic_host_keys.go index 08d6ee5f41c0..7d028def309a 100644 --- a/builtin/logical/ssh/path_cleanup_dynamic_host_keys.go +++ b/builtin/logical/ssh/path_cleanup_dynamic_host_keys.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ssh @@ -16,6 +16,11 @@ const keysStoragePrefix = "keys/" func pathCleanupKeys(b *backend) *framework.Path { return &framework.Path{ Pattern: "tidy/dynamic-keys", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationVerb: "tidy", + OperationSuffix: "dynamic-host-keys", + }, Callbacks: map[logical.Operation]framework.OperationFunc{ logical.DeleteOperation: b.handleCleanupKeys, }, diff --git a/builtin/logical/ssh/path_config_ca.go b/builtin/logical/ssh/path_config_ca.go index 973d60f8dc15..6d003c0ae5c0 100644 --- a/builtin/logical/ssh/path_config_ca.go +++ b/builtin/logical/ssh/path_config_ca.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ssh @@ -20,9 +20,8 @@ import ( multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" - "golang.org/x/crypto/ssh" - "github.com/mikesmitty/edkey" + "golang.org/x/crypto/ssh" ) const ( @@ -41,6 +40,11 @@ type keyStorageEntry struct { func pathConfigCA(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/ca", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + }, + Fields: map[string]*framework.FieldSchema{ "private_key": { Type: framework.TypeString, @@ -67,10 +71,26 @@ func pathConfigCA(b *backend) *framework.Path { }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathConfigCAUpdate, - logical.DeleteOperation: b.pathConfigCADelete, - logical.ReadOperation: b.pathConfigCARead, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigCAUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "ca", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathConfigCADelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "ca-configuration", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigCARead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "ca-configuration", + }, + }, }, HelpSynopsis: `Set the SSH private key used for signing certificates.`, diff --git a/builtin/logical/ssh/path_config_ca_test.go b/builtin/logical/ssh/path_config_ca_test.go index 4c33fc80892d..269af3631e5d 100644 --- a/builtin/logical/ssh/path_config_ca_test.go +++ b/builtin/logical/ssh/path_config_ca_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ssh @@ -259,6 +259,7 @@ func TestSSH_ConfigCAKeyTypes(t *testing.T) { "key_type": "ca", "ttl": "30s", "not_before_duration": "2h", + "allow_empty_principals": true, } roleReq := &logical.Request{ Operation: logical.UpdateOperation, diff --git a/builtin/logical/ssh/path_config_zeroaddress.go b/builtin/logical/ssh/path_config_zeroaddress.go index fda59fec4824..a84d03d7c49a 100644 --- a/builtin/logical/ssh/path_config_zeroaddress.go +++ b/builtin/logical/ssh/path_config_zeroaddress.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ssh @@ -8,7 +8,6 @@ import ( "fmt" "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) @@ -21,6 +20,11 @@ type zeroAddressRoles struct { func pathConfigZeroAddress(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/zeroaddress", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + }, + Fields: map[string]*framework.FieldSchema{ "roles": { Type: framework.TypeCommaStringSlice, @@ -29,10 +33,27 @@ func pathConfigZeroAddress(b *backend) *framework.Path { previously registered under these roles will be ignored.`, }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathConfigZeroAddressWrite, - logical.ReadOperation: b.pathConfigZeroAddressRead, - logical.DeleteOperation: b.pathConfigZeroAddressDelete, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigZeroAddressWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "zero-address", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigZeroAddressRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "zero-address-configuration", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathConfigZeroAddressDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "zero-address-configuration", + }, + }, }, HelpSynopsis: pathConfigZeroAddressSyn, HelpDescription: pathConfigZeroAddressDesc, diff --git a/builtin/logical/ssh/path_creds_create.go b/builtin/logical/ssh/path_creds_create.go index 2a0698145d33..781ce056f9dc 100644 --- a/builtin/logical/ssh/path_creds_create.go +++ b/builtin/logical/ssh/path_creds_create.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ssh @@ -23,6 +23,13 @@ type sshOTP struct { func pathCredsCreate(b *backend) *framework.Path { return &framework.Path{ Pattern: "creds/" + framework.GenericNameWithAtRegex("role"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationVerb: "generate", + OperationSuffix: "credentials", + }, + Fields: map[string]*framework.FieldSchema{ "role": { Type: framework.TypeString, @@ -37,9 +44,11 @@ func pathCredsCreate(b *backend) *framework.Path { Description: "[Required] IP of the remote host", }, }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.pathCredsCreateWrite, }, + HelpSynopsis: pathCredsCreateHelpSyn, HelpDescription: pathCredsCreateHelpDesc, } diff --git a/builtin/logical/ssh/path_fetch.go b/builtin/logical/ssh/path_fetch.go index 57763529886d..da5935bbd44c 100644 --- a/builtin/logical/ssh/path_fetch.go +++ b/builtin/logical/ssh/path_fetch.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ssh @@ -14,6 +14,11 @@ func pathFetchPublicKey(b *backend) *framework.Path { return &framework.Path{ Pattern: `public_key`, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationSuffix: "public-key", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ReadOperation: b.pathFetchPublicKey, }, diff --git a/builtin/logical/ssh/path_issue.go b/builtin/logical/ssh/path_issue.go index b6cb6b7c3ead..54741e30ab21 100644 --- a/builtin/logical/ssh/path_issue.go +++ b/builtin/logical/ssh/path_issue.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ssh @@ -22,6 +22,12 @@ func pathIssue(b *backend) *framework.Path { return &framework.Path{ Pattern: "issue/" + framework.GenericNameWithAtRegex("role"), + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationVerb: "issue", + OperationSuffix: "certificate", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathIssue, @@ -52,7 +58,7 @@ be later than the role max TTL.`, }, "valid_principals": { Type: framework.TypeString, - Description: `Valid principals, either usernames or hostnames, that the certificate should be signed for.`, + Description: `Valid principals, either usernames or hostnames, that the certificate should be signed for. Must be non-empty unless allow_empty_principals=true (not recommended) or a value for DefaultUser has been set in the role`, }, "cert_type": { Type: framework.TypeString, diff --git a/builtin/logical/ssh/path_issue_sign.go b/builtin/logical/ssh/path_issue_sign.go index c4e68e4721f0..60c6d44f7ace 100644 --- a/builtin/logical/ssh/path_issue_sign.go +++ b/builtin/logical/ssh/path_issue_sign.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ssh @@ -189,10 +189,19 @@ func (b *backend) calculateValidPrincipals(data *framework.FieldData, req *logic allowedPrincipals = strutil.RemoveDuplicates(strutil.ParseStringSlice(principalsAllowedByRole, ","), false) } + if len(parsedPrincipals) == 0 && defaultPrincipal != "" { + // defaultPrincipal will either be the defaultUser or a rendered defaultUserTemplate + parsedPrincipals = []string{defaultPrincipal} + } + switch { case len(parsedPrincipals) == 0: - // There is nothing to process - return nil, nil + if role.AllowEmptyPrincipals { + // There is nothing to process + return nil, nil + } else { + return nil, fmt.Errorf("empty valid principals not allowed by role") + } case len(allowedPrincipals) == 0: // User has requested principals to be set, but role is not configured // with any principals diff --git a/builtin/logical/ssh/path_lookup.go b/builtin/logical/ssh/path_lookup.go index 119daf807f4c..f3a6b58ecc3e 100644 --- a/builtin/logical/ssh/path_lookup.go +++ b/builtin/logical/ssh/path_lookup.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ssh @@ -15,15 +15,24 @@ import ( func pathLookup(b *backend) *framework.Path { return &framework.Path{ Pattern: "lookup", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationVerb: "list", + OperationSuffix: "roles-by-ip", + }, + Fields: map[string]*framework.FieldSchema{ "ip": { Type: framework.TypeString, Description: "[Required] IP address of remote host", }, }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.pathLookupWrite, }, + HelpSynopsis: pathLookupSyn, HelpDescription: pathLookupDesc, } diff --git a/builtin/logical/ssh/path_roles.go b/builtin/logical/ssh/path_roles.go index 2a6f2ae95d43..28fa9a92486d 100644 --- a/builtin/logical/ssh/path_roles.go +++ b/builtin/logical/ssh/path_roles.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ssh @@ -66,12 +66,18 @@ type sshRole struct { AlgorithmSigner string `mapstructure:"algorithm_signer" json:"algorithm_signer"` Version int `mapstructure:"role_version" json:"role_version"` NotBeforeDuration time.Duration `mapstructure:"not_before_duration" json:"not_before_duration"` + AllowEmptyPrincipals bool `mapstructure:"allow_empty_principals" json:"allow_empty_principals"` } func pathListRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationSuffix: "roles", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, }, @@ -84,6 +90,12 @@ func pathListRoles(b *backend) *framework.Path { func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/" + framework.GenericNameWithAtRegex("role"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationSuffix: "role", + }, + Fields: map[string]*framework.FieldSchema{ "role": { Type: framework.TypeString, @@ -352,6 +364,11 @@ func pathRoles(b *backend) *framework.Path { Value: 30, }, }, + "allow_empty_principals": { + Type: framework.TypeBool, + Description: `Whether to allow issuing certificates with no valid principals (meaning any valid principal). Exists for backwards compatibility only, the default of false is highly recommended.`, + Default: false, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -488,6 +505,7 @@ func (b *backend) createCARole(allowedUsers, defaultUser, signer string, data *f AlgorithmSigner: signer, Version: roleEntryVersion, NotBeforeDuration: time.Duration(data.Get("not_before_duration").(int)) * time.Second, + AllowEmptyPrincipals: data.Get("allow_empty_principals").(bool), } if !role.AllowUserCertificates && !role.AllowHostCertificates { diff --git a/builtin/logical/ssh/path_sign.go b/builtin/logical/ssh/path_sign.go index 13a35879770d..20bd2259a34d 100644 --- a/builtin/logical/ssh/path_sign.go +++ b/builtin/logical/ssh/path_sign.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ssh @@ -15,6 +15,12 @@ func pathSign(b *backend) *framework.Path { return &framework.Path{ Pattern: "sign/" + framework.GenericNameWithAtRegex("role"), + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationVerb: "sign", + OperationSuffix: "certificate", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.pathSign, }, diff --git a/builtin/logical/ssh/path_verify.go b/builtin/logical/ssh/path_verify.go index 7efd5fec487c..323fecd02791 100644 --- a/builtin/logical/ssh/path_verify.go +++ b/builtin/logical/ssh/path_verify.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ssh @@ -14,6 +14,11 @@ import ( func pathVerify(b *backend) *framework.Path { return &framework.Path{ Pattern: "verify", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationVerb: "verify", + OperationSuffix: "otp", + }, Fields: map[string]*framework.FieldSchema{ "otp": { Type: framework.TypeString, diff --git a/builtin/logical/ssh/secret_otp.go b/builtin/logical/ssh/secret_otp.go index a70cf601cfe5..522c60e2dfdb 100644 --- a/builtin/logical/ssh/secret_otp.go +++ b/builtin/logical/ssh/secret_otp.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ssh diff --git a/builtin/logical/ssh/util.go b/builtin/logical/ssh/util.go index b88675067907..89980ada0132 100644 --- a/builtin/logical/ssh/util.go +++ b/builtin/logical/ssh/util.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ssh diff --git a/builtin/logical/totp/backend.go b/builtin/logical/totp/backend.go index d3bbaf529551..08cbe385a01d 100644 --- a/builtin/logical/totp/backend.go +++ b/builtin/logical/totp/backend.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package totp @@ -13,6 +13,8 @@ import ( cache "github.com/patrickmn/go-cache" ) +const operationPrefixTOTP = "totp" + func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() if err := b.Setup(ctx, conf); err != nil { diff --git a/builtin/logical/totp/backend_test.go b/builtin/logical/totp/backend_test.go index 12600427f27f..1d3ba4d4f9ca 100644 --- a/builtin/logical/totp/backend_test.go +++ b/builtin/logical/totp/backend_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package totp diff --git a/builtin/logical/totp/cmd/totp/main.go b/builtin/logical/totp/cmd/totp/main.go index 9a2a49bd2755..c051e133a4c6 100644 --- a/builtin/logical/totp/cmd/totp/main.go +++ b/builtin/logical/totp/cmd/totp/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/builtin/logical/totp/path_code.go b/builtin/logical/totp/path_code.go index e1ad3dc04b80..7e7278c10f87 100644 --- a/builtin/logical/totp/path_code.go +++ b/builtin/logical/totp/path_code.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package totp @@ -17,6 +17,12 @@ import ( func pathCode(b *backend) *framework.Path { return &framework.Path{ Pattern: "code/" + framework.GenericNameWithAtRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTOTP, + OperationSuffix: "code", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -28,9 +34,19 @@ func pathCode(b *backend) *framework.Path { }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathReadCode, - logical.UpdateOperation: b.pathValidateCode, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathReadCode, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "generate", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathValidateCode, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "validate", + }, + }, }, HelpSynopsis: pathCodeHelpSyn, diff --git a/builtin/logical/totp/path_keys.go b/builtin/logical/totp/path_keys.go index 86d9438a4946..049ddddad3e5 100644 --- a/builtin/logical/totp/path_keys.go +++ b/builtin/logical/totp/path_keys.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package totp @@ -24,6 +24,11 @@ func pathListKeys(b *backend) *framework.Path { return &framework.Path{ Pattern: "keys/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTOTP, + OperationSuffix: "keys", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathKeyList, }, @@ -36,6 +41,12 @@ func pathListKeys(b *backend) *framework.Path { func pathKeys(b *backend) *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameWithAtRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTOTP, + OperationSuffix: "key", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -111,10 +122,25 @@ func pathKeys(b *backend) *framework.Path { }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathKeyRead, - logical.UpdateOperation: b.pathKeyCreate, - logical.DeleteOperation: b.pathKeyDelete, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathKeyRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "read", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathKeyCreate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "create", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathKeyDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "delete", + }, + }, }, HelpSynopsis: pathKeyHelpSyn, diff --git a/builtin/logical/transit/backend.go b/builtin/logical/transit/backend.go index faad5bb939db..40f6819181fb 100644 --- a/builtin/logical/transit/backend.go +++ b/builtin/logical/transit/backend.go @@ -1,10 +1,11 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit import ( "context" + "errors" "fmt" "io" "strconv" @@ -13,14 +14,21 @@ import ( "time" "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/keysutil" "github.com/hashicorp/vault/sdk/logical" ) -// Minimum cache size for transit backend -const minCacheSize = 10 +const ( + operationPrefixTransit = "transit" + + // Minimum cache size for transit backend + minCacheSize = 10 +) + +var ErrCmacEntOnly = errors.New("CMAC operations are only available in enterprise versions of Vault") func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b, err := Backend(ctx, conf) @@ -53,6 +61,7 @@ func Backend(ctx context.Context, conf *logical.BackendConfig) (*backend, error) b.pathImportVersion(), b.pathKeys(), b.pathListKeys(), + b.pathBYOKExportKeys(), b.pathExportKeys(), b.pathKeysConfig(), b.pathEncrypt(), @@ -68,12 +77,16 @@ func Backend(ctx context.Context, conf *logical.BackendConfig) (*backend, error) b.pathTrim(), b.pathCacheConfig(), b.pathConfigKeys(), + b.pathCreateCsr(), + b.pathImportCertChain(), }, - Secrets: []*framework.Secret{}, - Invalidate: b.invalidate, - BackendType: logical.TypeLogical, - PeriodicFunc: b.periodicFunc, + Secrets: []*framework.Secret{}, + Invalidate: b.invalidate, + BackendType: logical.TypeLogical, + PeriodicFunc: b.periodicFunc, + InitializeFunc: b.initialize, + Clean: b.cleanup, } b.backendUUID = conf.BackendUUID @@ -100,11 +113,15 @@ func Backend(ctx context.Context, conf *logical.BackendConfig) (*backend, error) return nil, err } + b.setupEnt() + return &b, nil } type backend struct { *framework.Backend + entBackend + lm *keysutil.LockManager // Lock to make changes to any of the backend's cache configuration. configMutex sync.RWMutex @@ -161,6 +178,11 @@ func (b *backend) GetPolicy(ctx context.Context, polReq keysutil.PolicyRequest, if err != nil { return p, false, err } + + if p != nil && p.Type.CMACSupported() && !constants.IsEnterprise { + return nil, false, ErrCmacEntOnly + } + return p, true, nil } @@ -178,6 +200,8 @@ func (b *backend) invalidate(ctx context.Context, key string) { defer b.configMutex.Unlock() b.cacheSizeChanged = true } + + b.invalidateEnt(ctx, key) } // periodicFunc is a central collection of functions that run on an interval. @@ -196,7 +220,11 @@ func (b *backend) periodicFunc(ctx context.Context, req *logical.Request) error b.autoRotateOnce = sync.Once{} } - return err + if err != nil { + return err + } + + return b.periodicFuncEnt(ctx, req) } // autoRotateKeys retrieves all transit keys and rotates those which have an @@ -241,6 +269,7 @@ func (b *backend) autoRotateKeys(ctx context.Context, req *logical.Request) erro continue } + // rotateIfRequired properly acquires/releases the lock on p err = b.rotateIfRequired(ctx, req, key, p) if err != nil { errs = multierror.Append(errs, err) @@ -268,6 +297,11 @@ func (b *backend) rotateIfRequired(ctx context.Context, req *logical.Request, ke return nil } + // We can't auto-rotate managed keys + if p.Type == keysutil.KeyType_MANAGED_KEY { + return nil + } + // Retrieve the latest version of the policy and determine if it is time to rotate. latestKey := p.Keys[strconv.Itoa(p.LatestVersion)] if time.Now().After(latestKey.CreationTime.Add(p.AutoRotatePeriod)) { @@ -279,3 +313,11 @@ func (b *backend) rotateIfRequired(ctx context.Context, req *logical.Request, ke } return nil } + +func (b *backend) initialize(ctx context.Context, request *logical.InitializationRequest) error { + return b.initializeEnt(ctx, request) +} + +func (b *backend) cleanup(ctx context.Context) { + b.cleanupEnt(ctx) +} diff --git a/builtin/logical/transit/backend_ce.go b/builtin/logical/transit/backend_ce.go new file mode 100644 index 000000000000..4c88fc30a35a --- /dev/null +++ b/builtin/logical/transit/backend_ce.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package transit + +import ( + "context" + + "github.com/hashicorp/vault/sdk/logical" +) + +type entBackend struct{} + +func (b *backend) initializeEnt(_ context.Context, _ *logical.InitializationRequest) error { + return nil +} + +func (b *backend) invalidateEnt(_ context.Context, _ string) {} + +func (b *backend) periodicFuncEnt(_ context.Context, _ *logical.Request) error { return nil } + +func (b *backend) cleanupEnt(_ context.Context) {} + +func (b *backend) setupEnt() {} diff --git a/builtin/logical/transit/backend_test.go b/builtin/logical/transit/backend_test.go index 44d9014560be..528ccf68218c 100644 --- a/builtin/logical/transit/backend_test.go +++ b/builtin/logical/transit/backend_test.go @@ -1,11 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit import ( "context" "crypto" + "crypto/ed25519" cryptoRand "crypto/rand" "crypto/x509" "encoding/base64" @@ -22,6 +23,7 @@ import ( "testing" "time" + uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/builtin/logical/pki" logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" @@ -31,10 +33,7 @@ import ( "github.com/hashicorp/vault/sdk/helper/keysutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" - - uuid "github.com/hashicorp/go-uuid" "github.com/mitchellh/mapstructure" - "github.com/stretchr/testify/require" ) @@ -1071,9 +1070,7 @@ func testConvergentEncryptionCommon(t *testing.T, ver int, keyType keysutil.KeyT if err != nil { t.Fatal(err) } - if resp != nil { - t.Fatal("expected nil response") - } + require.NotNil(t, resp, "expected populated request") p, err := keysutil.LoadPolicy(context.Background(), storage, path.Join("policy", "testkey")) if err != nil { @@ -1154,10 +1151,12 @@ func testConvergentEncryptionCommon(t *testing.T, ver int, keyType keysutil.KeyT // Now test encrypting the same value twice req.Data = map[string]interface{}{ - "plaintext": "emlwIHphcA==", // "zip zap" - "nonce": "b25ldHdvdGhyZWVl", // "onetwothreee" + "plaintext": "emlwIHphcA==", // "zip zap" "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } + if ver == 0 { + req.Data["nonce"] = "b25ldHdvdGhyZWVl" // "onetwothreee" + } resp, err = b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) @@ -1188,11 +1187,10 @@ func testConvergentEncryptionCommon(t *testing.T, ver int, keyType keysutil.KeyT // For sanity, also check a different nonce value... req.Data = map[string]interface{}{ - "plaintext": "emlwIHphcA==", // "zip zap" - "nonce": "dHdvdGhyZWVmb3Vy", // "twothreefour" + "plaintext": "emlwIHphcA==", // "zip zap" "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } - if ver < 2 { + if ver == 0 { req.Data["nonce"] = "dHdvdGhyZWVmb3Vy" // "twothreefour" } else { req.Data["context"] = "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOldandSdd7S" @@ -1231,10 +1229,12 @@ func testConvergentEncryptionCommon(t *testing.T, ver int, keyType keysutil.KeyT // ...and a different context value req.Data = map[string]interface{}{ - "plaintext": "emlwIHphcA==", // "zip zap" - "nonce": "dHdvdGhyZWVmb3Vy", // "twothreefour" + "plaintext": "emlwIHphcA==", // "zip zap" "context": "qV4h9iQyvn+raODOer4JNAsOhkXBwdT4HZ677Ql4KLqXSU+Jk4C/fXBWbv6xkSYT", } + if ver == 0 { + req.Data["nonce"] = "dHdvdGhyZWVmb3Vy" // "twothreefour" + } resp, err = b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) @@ -1346,9 +1346,11 @@ func testConvergentEncryptionCommon(t *testing.T, ver int, keyType keysutil.KeyT // Finally, check operations on empty values // First, check without setting a plaintext at all req.Data = map[string]interface{}{ - "nonce": "b25ldHdvdGhyZWVl", // "onetwothreee" "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } + if ver == 0 { + req.Data["nonce"] = "dHdvdGhyZWVmb3Vy" // "twothreefour" + } resp, err = b.HandleRequest(context.Background(), req) if err == nil { t.Fatal("expected error, got nil") @@ -1363,9 +1365,11 @@ func testConvergentEncryptionCommon(t *testing.T, ver int, keyType keysutil.KeyT // Now set plaintext to empty req.Data = map[string]interface{}{ "plaintext": "", - "nonce": "b25ldHdvdGhyZWVl", // "onetwothreee" "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } + if ver == 0 { + req.Data["nonce"] = "dHdvdGhyZWVmb3Vy" // "twothreefour" + } resp, err = b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) @@ -1559,9 +1563,7 @@ func TestBadInput(t *testing.T) { if err != nil { t.Fatal(err) } - if resp != nil { - t.Fatal("expected nil response") - } + require.NotNil(t, resp, "expected populated request") req.Path = "decrypt/test" req.Data = map[string]interface{}{ @@ -1650,9 +1652,7 @@ func TestTransit_AutoRotateKeys(t *testing.T) { if err != nil { t.Fatal(err) } - if resp != nil { - t.Fatal("expected nil response") - } + require.NotNil(t, resp, "expected populated request") // Write a key with an auto rotate value one day in the future req = &logical.Request{ @@ -1667,9 +1667,7 @@ func TestTransit_AutoRotateKeys(t *testing.T) { if err != nil { t.Fatal(err) } - if resp != nil { - t.Fatal("expected nil response") - } + require.NotNil(t, resp, "expected populated request") // Run the rotation check and ensure none of the keys have rotated b.checkAutoRotateAfter = time.Now() @@ -2022,3 +2020,284 @@ func TestTransitPKICSR(t *testing.T) { t.Logf("root: %v", rootCertPEM) t.Logf("leaf: %v", leafCertPEM) } + +func TestTransit_ReadPublicKeyImported(t *testing.T) { + testTransit_ReadPublicKeyImported(t, "rsa-2048") + testTransit_ReadPublicKeyImported(t, "ecdsa-p256") + testTransit_ReadPublicKeyImported(t, "ed25519") +} + +func testTransit_ReadPublicKeyImported(t *testing.T, keyType string) { + generateKeys(t) + b, s := createBackendWithStorage(t) + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get key + privateKey := getKey(t, keyType) + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatalf("failed to extract the public key: %s", err) + } + + // Import key + importReq := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + importResp, err := b.HandleRequest(context.Background(), importReq) + if err != nil || (importResp != nil && importResp.IsError()) { + t.Fatalf("failed to import public key. err: %s\nresp: %#v", err, importResp) + } + + // Read key + readReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "keys/" + keyID, + Storage: s, + } + + readResp, err := b.HandleRequest(context.Background(), readReq) + if err != nil || (readResp != nil && readResp.IsError()) { + t.Fatalf("failed to read key. err: %s\nresp: %#v", err, readResp) + } +} + +func TestTransit_SignWithImportedPublicKey(t *testing.T) { + testTransit_SignWithImportedPublicKey(t, "rsa-2048") + testTransit_SignWithImportedPublicKey(t, "ecdsa-p256") + testTransit_SignWithImportedPublicKey(t, "ed25519") +} + +func testTransit_SignWithImportedPublicKey(t *testing.T, keyType string) { + generateKeys(t) + b, s := createBackendWithStorage(t) + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get key + privateKey := getKey(t, keyType) + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatalf("failed to extract the public key: %s", err) + } + + // Import key + importReq := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + importResp, err := b.HandleRequest(context.Background(), importReq) + if err != nil || (importResp != nil && importResp.IsError()) { + t.Fatalf("failed to import public key. err: %s\nresp: %#v", err, importResp) + } + + // Sign text + signReq := &logical.Request{ + Path: "sign/" + keyID, + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "plaintext": base64.StdEncoding.EncodeToString([]byte(testPlaintext)), + }, + } + + _, err = b.HandleRequest(context.Background(), signReq) + if err == nil { + t.Fatalf("expected error, should have failed to sign input") + } +} + +func TestTransit_VerifyWithImportedPublicKey(t *testing.T) { + generateKeys(t) + keyType := "rsa-2048" + b, s := createBackendWithStorage(t) + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get key + privateKey := getKey(t, keyType) + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatal(err) + } + + // Retrieve public wrapping key + wrappingKey, err := b.getWrappingKey(context.Background(), s) + if err != nil || wrappingKey == nil { + t.Fatalf("failed to retrieve public wrapping key: %s", err) + } + + privWrappingKey := wrappingKey.Keys[strconv.Itoa(wrappingKey.LatestVersion)].RSAKey + pubWrappingKey := &privWrappingKey.PublicKey + + // generate ciphertext + importBlob := wrapTargetKeyForImport(t, pubWrappingKey, privateKey, keyType, "SHA256") + + // Import private key + importReq := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob, + "type": keyType, + }, + } + importResp, err := b.HandleRequest(context.Background(), importReq) + if err != nil || (importResp != nil && importResp.IsError()) { + t.Fatalf("failed to import key. err: %s\nresp: %#v", err, importResp) + } + + // Sign text + signReq := &logical.Request{ + Storage: s, + Path: "sign/" + keyID, + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "plaintext": base64.StdEncoding.EncodeToString([]byte(testPlaintext)), + }, + } + + signResp, err := b.HandleRequest(context.Background(), signReq) + if err != nil || (signResp != nil && signResp.IsError()) { + t.Fatalf("failed to sign plaintext. err: %s\nresp: %#v", err, signResp) + } + + // Get signature + signature := signResp.Data["signature"].(string) + + // Import new key as public key + importPubReq := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", "public-key-rsa"), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + importPubResp, err := b.HandleRequest(context.Background(), importPubReq) + if err != nil || (importPubResp != nil && importPubResp.IsError()) { + t.Fatalf("failed to import public key. err: %s\nresp: %#v", err, importPubResp) + } + + // Verify signed text + verifyReq := &logical.Request{ + Path: "verify/public-key-rsa", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "input": base64.StdEncoding.EncodeToString([]byte(testPlaintext)), + "signature": signature, + }, + } + + verifyResp, err := b.HandleRequest(context.Background(), verifyReq) + if err != nil || (importResp != nil && verifyResp.IsError()) { + t.Fatalf("failed to verify signed data. err: %s\nresp: %#v", err, importResp) + } +} + +func TestTransit_ExportPublicKeyImported(t *testing.T) { + testTransit_ExportPublicKeyImported(t, "rsa-2048") + testTransit_ExportPublicKeyImported(t, "ecdsa-p256") + testTransit_ExportPublicKeyImported(t, "ed25519") +} + +func testTransit_ExportPublicKeyImported(t *testing.T, keyType string) { + generateKeys(t) + b, s := createBackendWithStorage(t) + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get key + privateKey := getKey(t, keyType) + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatalf("failed to extract the public key: %s", err) + } + + t.Logf("generated key: %v", string(publicKeyBytes)) + + // Import key + importReq := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + "exportable": true, + }, + } + importResp, err := b.HandleRequest(context.Background(), importReq) + if err != nil || (importResp != nil && importResp.IsError()) { + t.Fatalf("failed to import public key. err: %s\nresp: %#v", err, importResp) + } + + t.Logf("importing key: %v", importResp) + + // Export key + exportReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: fmt.Sprintf("export/public-key/%s/latest", keyID), + Storage: s, + } + + exportResp, err := b.HandleRequest(context.Background(), exportReq) + if err != nil || (exportResp != nil && exportResp.IsError()) { + t.Fatalf("failed to export key. err: %v\nresp: %#v", err, exportResp) + } + + t.Logf("exporting key: %v", exportResp) + + responseKeys, exist := exportResp.Data["keys"] + if !exist { + t.Fatal("expected response data to hold a 'keys' field") + } + + exportedKeyBytes := responseKeys.(map[string]string)["1"] + + if keyType != "ed25519" { + exportedKeyBlock, _ := pem.Decode([]byte(exportedKeyBytes)) + publicKeyBlock, _ := pem.Decode(publicKeyBytes) + + if !reflect.DeepEqual(publicKeyBlock.Bytes, exportedKeyBlock.Bytes) { + t.Fatalf("exported key bytes should have matched with imported key for key type: %v\nexported: %v\nimported: %v", keyType, exportedKeyBlock.Bytes, publicKeyBlock.Bytes) + } + } else { + exportedKey, err := base64.StdEncoding.DecodeString(exportedKeyBytes) + if err != nil { + t.Fatalf("error decoding exported key bytes (%v) to base64 for key type %v: %v", exportedKeyBytes, keyType, err) + } + + publicKeyBlock, _ := pem.Decode(publicKeyBytes) + publicKeyParsed, err := x509.ParsePKIXPublicKey(publicKeyBlock.Bytes) + if err != nil { + t.Fatalf("error decoding source key bytes (%v) from PKIX marshaling for key type %v: %v", publicKeyBlock.Bytes, keyType, err) + } + + if !reflect.DeepEqual([]byte(publicKeyParsed.(ed25519.PublicKey)), exportedKey) { + t.Fatalf("exported key bytes should have matched with imported key for key type: %v\nexported: %v\nimported: %v", keyType, exportedKey, publicKeyParsed) + } + } +} diff --git a/builtin/logical/transit/cmd/transit/main.go b/builtin/logical/transit/cmd/transit/main.go index 7e2ae8777bdc..701f7f00e763 100644 --- a/builtin/logical/transit/cmd/transit/main.go +++ b/builtin/logical/transit/cmd/transit/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/builtin/logical/transit/key_utils.go b/builtin/logical/transit/key_utils.go new file mode 100644 index 000000000000..05dfe8b0b812 --- /dev/null +++ b/builtin/logical/transit/key_utils.go @@ -0,0 +1,62 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package transit + +import ( + "context" + "errors" + "fmt" + + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) getReadLockedPolicy(ctx context.Context, s logical.Storage, name string) (*keysutil.Policy, error) { + p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: s, + Name: name, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return nil, fmt.Errorf("%w: key %s not found", logical.ErrInvalidRequest, name) + } + if !b.System().CachingDisabled() { + p.Lock(false) + } + return p, nil +} + +// runWithReadLockedPolicy runs a function passing in the policy specified by keyName that has been +// locked in a read only fashion without the ability to upsert the policy +func (b *backend) runWithReadLockedPolicy(ctx context.Context, s logical.Storage, keyName string, f func(p *keysutil.Policy) (*logical.Response, error)) (*logical.Response, error) { + p, err := b.getReadLockedPolicy(ctx, s, keyName) + if err != nil { + if errors.Is(err, logical.ErrInvalidRequest) { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + return nil, err + } + defer p.Unlock() + return f(p) +} + +// validateKeyVersion verifies that the passed in key version is valid for our +// current key policy, returning correct version to use within the policy. +func validateKeyVersion(p *keysutil.Policy, ver int) (int, error) { + switch { + case ver < 0: + return 0, fmt.Errorf("cannot use negative key version %d", ver) + case ver == 0: + // Allowed, will use latest; set explicitly here to ensure the string + // is generated properly + ver = p.LatestVersion + case ver == p.LatestVersion: + // Allowed + case p.MinEncryptionVersion > 0 && ver < p.MinEncryptionVersion: + return 0, fmt.Errorf("cannot use key version %d: version is too old (disallowed by policy) for key %s", ver, p.Name) + } + return ver, nil +} diff --git a/builtin/logical/transit/managed_key_util.go b/builtin/logical/transit/managed_key_util.go index c4dc1e9fe0d3..ccc1c324f2ec 100644 --- a/builtin/logical/transit/managed_key_util.go +++ b/builtin/logical/transit/managed_key_util.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !enterprise diff --git a/builtin/logical/transit/path_backup.go b/builtin/logical/transit/path_backup.go index f750a337b513..019a28408e67 100644 --- a/builtin/logical/transit/path_backup.go +++ b/builtin/logical/transit/path_backup.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit @@ -13,6 +13,13 @@ import ( func (b *backend) pathBackup() *framework.Path { return &framework.Path{ Pattern: "backup/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "back-up", + OperationSuffix: "key", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, diff --git a/builtin/logical/transit/path_backup_test.go b/builtin/logical/transit/path_backup_test.go index ed9a7477d6d3..05d7a10f600b 100644 --- a/builtin/logical/transit/path_backup_test.go +++ b/builtin/logical/transit/path_backup_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit @@ -39,6 +39,7 @@ func TestTransit_BackupRestore(t *testing.T) { testBackupRestore(t, "rsa-2048", "hmac-verify") testBackupRestore(t, "rsa-3072", "hmac-verify") testBackupRestore(t, "rsa-4096", "hmac-verify") + testBackupRestore(t, "hmac", "hmac-verify") } func testBackupRestore(t *testing.T, keyType, feature string) { @@ -57,6 +58,9 @@ func testBackupRestore(t *testing.T, keyType, feature string) { "exportable": true, }, } + if keyType == "hmac" { + keyReq.Data["key_size"] = 32 + } resp, err = b.HandleRequest(context.Background(), keyReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("resp: %#v\nerr: %v", resp, err) diff --git a/builtin/logical/transit/path_byok.go b/builtin/logical/transit/path_byok.go new file mode 100644 index 000000000000..c419dc6dbf1e --- /dev/null +++ b/builtin/logical/transit/path_byok.go @@ -0,0 +1,211 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package transit + +import ( + "context" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathBYOKExportKeys() *framework.Path { + return &framework.Path{ + Pattern: "byok-export/" + framework.GenericNameRegex("destination") + "/" + framework.GenericNameRegex("source") + framework.OptionalParamRegex("version"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "byok", + OperationSuffix: "key|key-version", + }, + + Fields: map[string]*framework.FieldSchema{ + "destination": { + Type: framework.TypeString, + Description: "Destination key to export to; usually the public wrapping key of another Transit instance.", + }, + "source": { + Type: framework.TypeString, + Description: "Source key to export; could be any present key within Transit.", + }, + "version": { + Type: framework.TypeString, + Description: "Optional version of the key to export, else all key versions are exported.", + }, + "hash": { + Type: framework.TypeString, + Description: "Hash function to use for inner OAEP encryption. Defaults to SHA256.", + Default: "SHA256", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathPolicyBYOKExportRead, + }, + + HelpSynopsis: pathBYOKExportHelpSyn, + HelpDescription: pathBYOKExportHelpDesc, + } +} + +func (b *backend) pathPolicyBYOKExportRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + dst := d.Get("destination").(string) + src := d.Get("source").(string) + version := d.Get("version").(string) + hash := d.Get("hash").(string) + + dstP, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: dst, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if dstP == nil { + return nil, fmt.Errorf("no such destination key to export to") + } + if !b.System().CachingDisabled() { + dstP.Lock(false) + } + defer dstP.Unlock() + + srcP, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: src, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if srcP == nil { + return nil, fmt.Errorf("no such source key for export") + } + if !b.System().CachingDisabled() { + srcP.Lock(false) + } + defer srcP.Unlock() + + if !srcP.Exportable { + return logical.ErrorResponse("key is not exportable"), nil + } + + if srcP.Type.CMACSupported() && !constants.IsEnterprise { + return logical.ErrorResponse(ErrCmacEntOnly.Error()), logical.ErrInvalidRequest + } + + retKeys := map[string]string{} + switch version { + case "": + for k, v := range srcP.Keys { + exportKey, err := getBYOKExportKey(dstP, srcP, &v, hash) + if err != nil { + return nil, err + } + retKeys[k] = exportKey + } + + default: + var versionValue int + if version == "latest" { + versionValue = srcP.LatestVersion + } else { + version = strings.TrimPrefix(version, "v") + versionValue, err = strconv.Atoi(version) + if err != nil { + return logical.ErrorResponse("invalid key version"), logical.ErrInvalidRequest + } + } + + if versionValue < srcP.MinDecryptionVersion { + return logical.ErrorResponse("version for export is below minimum decryption version"), logical.ErrInvalidRequest + } + key, ok := srcP.Keys[strconv.Itoa(versionValue)] + if !ok { + return logical.ErrorResponse("version does not exist or cannot be found"), logical.ErrInvalidRequest + } + + exportKey, err := getBYOKExportKey(dstP, srcP, &key, hash) + if err != nil { + return nil, err + } + + retKeys[strconv.Itoa(versionValue)] = exportKey + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "name": srcP.Name, + "type": srcP.Type.String(), + "keys": retKeys, + }, + } + + return resp, nil +} + +func getBYOKExportKey(dstP *keysutil.Policy, srcP *keysutil.Policy, key *keysutil.KeyEntry, hash string) (string, error) { + if dstP == nil || srcP == nil { + return "", errors.New("nil policy provided") + } + + var targetKey interface{} + switch srcP.Type { + case keysutil.KeyType_AES128_GCM96, keysutil.KeyType_AES256_GCM96, keysutil.KeyType_ChaCha20_Poly1305, keysutil.KeyType_HMAC, keysutil.KeyType_AES128_CMAC, keysutil.KeyType_AES256_CMAC: + targetKey = key.Key + case keysutil.KeyType_RSA2048, keysutil.KeyType_RSA3072, keysutil.KeyType_RSA4096: + targetKey = key.RSAKey + case keysutil.KeyType_ECDSA_P256, keysutil.KeyType_ECDSA_P384, keysutil.KeyType_ECDSA_P521: + var curve elliptic.Curve + switch srcP.Type { + case keysutil.KeyType_ECDSA_P384: + curve = elliptic.P384() + case keysutil.KeyType_ECDSA_P521: + curve = elliptic.P521() + default: + curve = elliptic.P256() + } + pubKey := ecdsa.PublicKey{ + Curve: curve, + X: key.EC_X, + Y: key.EC_Y, + } + targetKey = &ecdsa.PrivateKey{ + PublicKey: pubKey, + D: key.EC_D, + } + case keysutil.KeyType_ED25519: + targetKey = ed25519.PrivateKey(key.Key) + default: + return "", fmt.Errorf("unable to export to unknown key type: %v", srcP.Type) + } + + hasher, err := parseHashFn(hash) + if err != nil { + return "", err + } + + return dstP.WrapKey(0, targetKey, srcP.Type, hasher) +} + +const pathBYOKExportHelpSyn = `Securely export named encryption or signing key` + +const pathBYOKExportHelpDesc = ` +This path is used to export the named keys that are configured as +exportable. + +Unlike the regular /export/:name[/:version] paths, this path uses +the same encryption specification /import, allowing secure migration +of keys between clusters to enable workloads to communicate between +them. + +Presently this only works for RSA destination keys. +` diff --git a/builtin/logical/transit/path_byok_test.go b/builtin/logical/transit/path_byok_test.go new file mode 100644 index 000000000000..44dbafa18be8 --- /dev/null +++ b/builtin/logical/transit/path_byok_test.go @@ -0,0 +1,229 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package transit + +import ( + "context" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestTransit_BYOKExportImport(t *testing.T) { + // Test encryption/decryption after a restore for supported keys + testBYOKExportImport(t, "aes128-gcm96", "encrypt-decrypt") + testBYOKExportImport(t, "aes256-gcm96", "encrypt-decrypt") + testBYOKExportImport(t, "chacha20-poly1305", "encrypt-decrypt") + testBYOKExportImport(t, "rsa-2048", "encrypt-decrypt") + testBYOKExportImport(t, "rsa-3072", "encrypt-decrypt") + testBYOKExportImport(t, "rsa-4096", "encrypt-decrypt") + + // Test signing/verification after a restore for supported keys + testBYOKExportImport(t, "ecdsa-p256", "sign-verify") + testBYOKExportImport(t, "ecdsa-p384", "sign-verify") + testBYOKExportImport(t, "ecdsa-p521", "sign-verify") + testBYOKExportImport(t, "ed25519", "sign-verify") + testBYOKExportImport(t, "rsa-2048", "sign-verify") + testBYOKExportImport(t, "rsa-3072", "sign-verify") + testBYOKExportImport(t, "rsa-4096", "sign-verify") + + // Test HMAC sign/verify after a restore for supported keys. + testBYOKExportImport(t, "hmac", "hmac-verify") +} + +func testBYOKExportImport(t *testing.T, keyType, feature string) { + var resp *logical.Response + var err error + + b, s := createBackendWithStorage(t) + + // Create a key + keyReq := &logical.Request{ + Path: "keys/test-source", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "type": keyType, + "exportable": true, + }, + } + if keyType == "hmac" { + keyReq.Data["key_size"] = 32 + } + resp, err = b.HandleRequest(context.Background(), keyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + // Read the wrapping key. + wrapKeyReq := &logical.Request{ + Path: "wrapping_key", + Operation: logical.ReadOperation, + Storage: s, + } + resp, err = b.HandleRequest(context.Background(), wrapKeyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + // Import the wrapping key. + wrapKeyImportReq := &logical.Request{ + Path: "keys/wrapper/import", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "public_key": resp.Data["public_key"], + "type": "rsa-4096", + }, + } + resp, err = b.HandleRequest(context.Background(), wrapKeyImportReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + // Export the key + backupReq := &logical.Request{ + Path: "byok-export/wrapper/test-source", + Operation: logical.ReadOperation, + Storage: s, + } + resp, err = b.HandleRequest(context.Background(), backupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + keys := resp.Data["keys"].(map[string]string) + + // Import the key to a new name. + restoreReq := &logical.Request{ + Path: "keys/test/import", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "ciphertext": keys["1"], + "type": keyType, + }, + } + resp, err = b.HandleRequest(context.Background(), restoreReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + plaintextB64 := "dGhlIHF1aWNrIGJyb3duIGZveA==" // "the quick brown fox" + // Perform encryption, signing or hmac-ing based on the set 'feature' + var encryptReq, signReq, hmacReq *logical.Request + var ciphertext, signature, hmac string + switch feature { + case "encrypt-decrypt": + encryptReq = &logical.Request{ + Path: "encrypt/test-source", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "plaintext": plaintextB64, + }, + } + resp, err = b.HandleRequest(context.Background(), encryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + ciphertext = resp.Data["ciphertext"].(string) + + case "sign-verify": + signReq = &logical.Request{ + Path: "sign/test-source", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "input": plaintextB64, + }, + } + resp, err = b.HandleRequest(context.Background(), signReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + signature = resp.Data["signature"].(string) + + case "hmac-verify": + hmacReq = &logical.Request{ + Path: "hmac/test-source", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "input": plaintextB64, + }, + } + resp, err = b.HandleRequest(context.Background(), hmacReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + hmac = resp.Data["hmac"].(string) + } + + // validationFunc verifies the ciphertext, signature or hmac based on the + // set 'feature' + validationFunc := func(keyName string) { + var decryptReq *logical.Request + var verifyReq *logical.Request + switch feature { + case "encrypt-decrypt": + decryptReq = &logical.Request{ + Path: "decrypt/" + keyName, + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "ciphertext": ciphertext, + }, + } + resp, err = b.HandleRequest(context.Background(), decryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + if resp.Data["plaintext"].(string) != plaintextB64 { + t.Fatalf("bad: plaintext; expected: %q, actual: %q", plaintextB64, resp.Data["plaintext"].(string)) + } + case "sign-verify": + verifyReq = &logical.Request{ + Path: "verify/" + keyName, + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "signature": signature, + "input": plaintextB64, + }, + } + resp, err = b.HandleRequest(context.Background(), verifyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + if resp.Data["valid"].(bool) != true { + t.Fatalf("bad: signature verification failed for key type %q", keyType) + } + + case "hmac-verify": + verifyReq = &logical.Request{ + Path: "verify/" + keyName, + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "hmac": hmac, + "input": plaintextB64, + }, + } + resp, err = b.HandleRequest(context.Background(), verifyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + if resp.Data["valid"].(bool) != true { + t.Fatalf("bad: HMAC verification failed for key type %q", keyType) + } + } + } + + // Ensure that the restored key is functional + validationFunc("test") + + // Ensure the original key is functional + validationFunc("test-source") +} diff --git a/builtin/logical/transit/path_cache_config.go b/builtin/logical/transit/path_cache_config.go index 0208f14c5989..941f98250ad2 100644 --- a/builtin/logical/transit/path_cache_config.go +++ b/builtin/logical/transit/path_cache_config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit @@ -14,6 +14,11 @@ import ( func (b *backend) pathCacheConfig() *framework.Path { return &framework.Path{ Pattern: "cache-config", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + }, + Fields: map[string]*framework.FieldSchema{ "size": { Type: framework.TypeInt, @@ -27,16 +32,18 @@ func (b *backend) pathCacheConfig() *framework.Path { logical.ReadOperation: &framework.PathOperation{ Callback: b.pathCacheConfigRead, Summary: "Returns the size of the active cache", + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "cache-configuration", + }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathCacheConfigWrite, Summary: "Configures a new cache of the specified size", - }, - - logical.CreateOperation: &framework.PathOperation{ - Callback: b.pathCacheConfigWrite, - Summary: "Configures a new cache of the specified size", + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "cache", + }, }, }, @@ -68,7 +75,11 @@ func (b *backend) pathCacheConfigWrite(ctx context.Context, req *logical.Request return nil, err } - return nil, nil + return &logical.Response{ + Data: map[string]interface{}{ + "size": cacheSize, + }, + }, nil } type configCache struct { diff --git a/builtin/logical/transit/path_cache_config_test.go b/builtin/logical/transit/path_cache_config_test.go index f5c8316d8789..0141f6a32def 100644 --- a/builtin/logical/transit/path_cache_config_test.go +++ b/builtin/logical/transit/path_cache_config_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit diff --git a/builtin/logical/transit/path_certificates.go b/builtin/logical/transit/path_certificates.go new file mode 100644 index 000000000000..aa4aeb55f6cf --- /dev/null +++ b/builtin/logical/transit/path_certificates.go @@ -0,0 +1,291 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package transit + +import ( + "context" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathCreateCsr() *framework.Path { + return &framework.Path{ + Pattern: "keys/" + framework.GenericNameRegex("name") + "/csr", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "generate-csr-for-key", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Required: true, + Description: "Name of the key", + }, + "version": { + Type: framework.TypeInt, + Required: false, + Description: "Optional version of key, 'latest' if not set", + }, + "csr": { + Type: framework.TypeString, + Required: false, + Description: `PEM encoded CSR template. The information attributes +will be used as a basis for the CSR with the key in transit. If not set, an empty CSR is returned.`, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathCreateCsrWrite, + }, + }, + HelpSynopsis: pathCreateCsrHelpSyn, + HelpDescription: pathCreateCsrHelpDesc, + } +} + +func (b *backend) pathImportCertChain() *framework.Path { + return &framework.Path{ + // NOTE: `set-certificate` or `set_certificate`? Paths seem to use different + // case, such as `transit/wrapping_key` and `transit/cache-config`. + Pattern: "keys/" + framework.GenericNameRegex("name") + "/set-certificate", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "set-certificate-for-key", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Required: true, + Description: "Name of the key", + }, + "version": { + Type: framework.TypeInt, + Required: false, + Description: "Optional version of key, 'latest' if not set", + }, + "certificate_chain": { + Type: framework.TypeString, + Required: true, + Description: `PEM encoded certificate chain. It should be composed +by one or more concatenated PEM blocks and ordered starting from the end-entity certificate.`, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathImportCertChainWrite, + }, + }, + HelpSynopsis: pathImportCertChainHelpSyn, + HelpDescription: pathImportCertChainHelpDesc, + } +} + +func (b *backend) pathCreateCsrWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + + p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return logical.ErrorResponse(fmt.Sprintf("key with provided name '%s' not found", name)), logical.ErrInvalidRequest + } + if !b.System().CachingDisabled() { + p.Lock(false) // NOTE: No lock on "read" operations? + } + defer p.Unlock() + + // Check if transit key supports signing + if !p.Type.SigningSupported() { + return logical.ErrorResponse(fmt.Sprintf("key type '%s' does not support signing", p.Type)), logical.ErrInvalidRequest + } + + // Check if key can be derived + if p.Derived { + return logical.ErrorResponse("operation not supported on keys with derivation enabled"), logical.ErrInvalidRequest + } + + // Transit key version + signingKeyVersion := p.LatestVersion + // NOTE: BYOK endpoints seem to remove "v" prefix from version, + // are versions like that also supported? + if version, ok := d.GetOk("version"); ok { + signingKeyVersion = version.(int) + } + + // Read and parse CSR template + pemCsrTemplate := d.Get("csr").(string) + csrTemplate, err := parseCsr(pemCsrTemplate) + if err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + pemCsr, err := p.CreateCsr(signingKeyVersion, csrTemplate) + if err != nil { + prefixedErr := fmt.Errorf("could not create the csr: %w", err) + switch err.(type) { + case errutil.UserError: + return logical.ErrorResponse(prefixedErr.Error()), logical.ErrInvalidRequest + default: + return nil, prefixedErr + } + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "name": p.Name, + "type": p.Type.String(), + "csr": string(pemCsr), + }, + } + + return resp, nil +} + +func (b *backend) pathImportCertChainWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + + p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return logical.ErrorResponse(fmt.Sprintf("key with provided name '%s' not found", name)), logical.ErrInvalidRequest + } + if !b.System().CachingDisabled() { + p.Lock(true) // NOTE: Lock as we are might write to the policy + } + defer p.Unlock() + + // Check if transit key supports signing + if !p.Type.SigningSupported() { + return logical.ErrorResponse(fmt.Sprintf("key type %s does not support signing", p.Type)), logical.ErrInvalidRequest + } + + // Check if key can be derived + if p.Derived { + return logical.ErrorResponse("operation not supported on keys with derivation enabled"), logical.ErrInvalidRequest + } + + // Transit key version + keyVersion := p.LatestVersion + if version, ok := d.GetOk("version"); ok { + keyVersion = version.(int) + } + + // Get certificate chain + pemCertChain := d.Get("certificate_chain").(string) + certChain, err := parseCertificateChain(pemCertChain) + if err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + err = p.ValidateAndPersistCertificateChain(ctx, keyVersion, certChain, req.Storage) + if err != nil { + prefixedErr := fmt.Errorf("failed to persist certificate chain: %w", err) + switch err.(type) { + case errutil.UserError: + return logical.ErrorResponse(prefixedErr.Error()), logical.ErrInvalidRequest + default: + return nil, prefixedErr + } + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "name": p.Name, + "type": p.Type.String(), + "certificate-chain": pemCertChain, + }, + } + + return resp, nil +} + +func parseCsr(csrStr string) (*x509.CertificateRequest, error) { + if csrStr == "" { + return &x509.CertificateRequest{}, nil + } + + block, _ := pem.Decode([]byte(csrStr)) + if block == nil { + return nil, errors.New("could not decode PEM certificate request") + } + + csr, err := x509.ParseCertificateRequest(block.Bytes) + if err != nil { + return nil, err + } + + return csr, nil +} + +func parseCertificateChain(certChainString string) ([]*x509.Certificate, error) { + var certificates []*x509.Certificate + + var pemCertBlocks []*pem.Block + pemBytes := []byte(strings.TrimSpace(certChainString)) + for len(pemBytes) > 0 { + var pemCertBlock *pem.Block + pemCertBlock, pemBytes = pem.Decode(pemBytes) + if pemCertBlock == nil { + return nil, errors.New("could not decode PEM block in certificate chain") + } + + switch pemCertBlock.Type { + case "CERTIFICATE", "X05 CERTIFICATE": + pemCertBlocks = append(pemCertBlocks, pemCertBlock) + default: + // Ignore any other entries + } + } + + if len(pemCertBlocks) == 0 { + return nil, errors.New("provided certificate chain did not contain any valid PEM certificate") + } + + for _, certBlock := range pemCertBlocks { + cert, err := x509.ParseCertificate(certBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate in certificate chain: %w", err) + } + + certificates = append(certificates, cert) + } + + return certificates, nil +} + +const pathCreateCsrHelpSyn = `Create a CSR from a key in transit` + +const pathCreateCsrHelpDesc = `This path is used to create a CSR from a key in +transit. If a CSR template is provided, its significant information, expect key +related data, are included in the CSR otherwise an empty CSR is returned. +` + +const pathImportCertChainHelpSyn = `Imports an externally-signed certificate +chain into an existing key version` + +const pathImportCertChainHelpDesc = `This path is used to import an externally- +signed certificate chain into a key in transit. The leaf certificate key has to +match the selected key in transit. +` diff --git a/builtin/logical/transit/path_certificates_test.go b/builtin/logical/transit/path_certificates_test.go new file mode 100644 index 000000000000..9a6305e7a048 --- /dev/null +++ b/builtin/logical/transit/path_certificates_test.go @@ -0,0 +1,379 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package transit + +import ( + "context" + cryptoRand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/pki" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/stretchr/testify/require" +) + +func TestTransit_Certs_CreateCsr(t *testing.T) { + // NOTE: Use an existing CSR or generate one here? + templateCsr := ` +-----BEGIN CERTIFICATE REQUEST----- +MIICRTCCAS0CAQAwADCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM49 +McW7u3ILuAJfSFLUtGOMGBytHmMFcjTiX+5JcajFj0Uszb+HQ7eIsJJNXhVc/7fg +Z01DZvcCqb9ChEWE3xi4GEkPMXay7p7G1ooSLnQp6Z0lL5CuIFfMVOTvjfhTwRaJ +l9v2mMlm80BeiAUBqeoyGVrIh5fKASxaE0jrhjAxhGzqrXdDnL8A4na6ArprV4iS +aEAziODd2WmplSKgUwEaFdeG1t1bJf3o5ZQRCnKNtQcAk8UmgtvFEO8ohGMln/Fj +O7u7s6iRhOGf1g1NCAP5pGqxNx3bjz5f/CUcTSIGAReEomg41QTIhD9muCTL8qnm +6lS87wkGTv7qbeIGB7sCAwEAAaAAMA0GCSqGSIb3DQEBCwUAA4IBAQAfjE+jNqIk +4V1tL3g5XPjxr2+QcwddPf8opmbAzgt0+TiIHcDGBAxsXyi7sC9E5AFfFp7W07Zv +r5+v4i529K9q0BgGtHFswoEnhd4dC8Ye53HtSoEtXkBpZMDrtbS7eZa9WccT6zNx +4taTkpptZVrmvPj+jLLFkpKJJ3d+Gbrp6hiORPadT+igLKkqvTeocnhOdAtt427M +RXTVgN14pV3tqO+5MXzNw5tGNPcwWARWwPH9eCRxLwLUuxE4Qu73pUeEFjDEfGkN +iBnlTsTXBOMqSGryEkmRaZslWDvblvYeObYw+uc3kCbJ7jRy9soVwkbb5FueF/yC +O1aQIm23HrrG +-----END CERTIFICATE REQUEST----- +` + + testTransit_CreateCsr(t, "rsa-2048", templateCsr) + testTransit_CreateCsr(t, "rsa-3072", templateCsr) + testTransit_CreateCsr(t, "rsa-4096", templateCsr) + testTransit_CreateCsr(t, "ecdsa-p256", templateCsr) + testTransit_CreateCsr(t, "ecdsa-p384", templateCsr) + testTransit_CreateCsr(t, "ecdsa-p521", templateCsr) + testTransit_CreateCsr(t, "ed25519", templateCsr) + testTransit_CreateCsr(t, "aes256-gcm96", templateCsr) +} + +func testTransit_CreateCsr(t *testing.T, keyType, pemTemplateCsr string) { + var resp *logical.Response + var err error + b, s := createBackendWithStorage(t) + + // Create the policy + policyReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/test-key", + Storage: s, + Data: map[string]interface{}{ + "type": keyType, + }, + } + resp, err = b.HandleRequest(context.Background(), policyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + csrSignReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/test-key/csr", + Storage: s, + Data: map[string]interface{}{ + "csr": pemTemplateCsr, + }, + } + + resp, err = b.HandleRequest(context.Background(), csrSignReq) + + switch keyType { + case "rsa-2048", "rsa-3072", "rsa-4096", "ecdsa-p256", "ecdsa-p384", "ecdsa-p521", "ed25519": + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("failed to sign CSR, err:%v resp:%#v", err, resp) + } + + signedCsrBytes, ok := resp.Data["csr"] + if !ok { + t.Fatal("expected response data to hold a 'csr' field") + } + + signedCsr, err := parseCsr(signedCsrBytes.(string)) + if err != nil { + t.Fatalf("failed to parse returned csr, err:%v", err) + } + + templateCsr, err := parseCsr(pemTemplateCsr) + if err != nil { + t.Fatalf("failed to parse returned template csr, err:%v", err) + } + + // NOTE: Check other fields? + if !reflect.DeepEqual(signedCsr.Subject, templateCsr.Subject) { + t.Fatalf("subjects should have matched, err:%v", err) + } + + default: + if err == nil || (resp != nil && !resp.IsError()) { + t.Fatalf("should have failed to sign CSR, provided key type does not support signing") + } + } +} + +func TestTransit_Certs_ImportCertChain(t *testing.T) { + // Create Cluster + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "transit": Factory, + "pki": pki.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + // Mount transit backend + err := client.Sys().Mount("transit", &api.MountInput{ + Type: "transit", + }) + require.NoError(t, err) + + // Mount PKI backend + err = client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + }) + require.NoError(t, err) + + testTransit_ImportCertChain(t, client, "rsa-2048") + testTransit_ImportCertChain(t, client, "rsa-3072") + testTransit_ImportCertChain(t, client, "rsa-4096") + testTransit_ImportCertChain(t, client, "ecdsa-p256") + testTransit_ImportCertChain(t, client, "ecdsa-p384") + testTransit_ImportCertChain(t, client, "ecdsa-p521") + testTransit_ImportCertChain(t, client, "ed25519") +} + +func testTransit_ImportCertChain(t *testing.T, apiClient *api.Client, keyType string) { + keyName := fmt.Sprintf("%s", keyType) + issuerName := fmt.Sprintf("%s-issuer", keyType) + + // Create transit key + _, err := apiClient.Logical().Write(fmt.Sprintf("transit/keys/%s", keyName), map[string]interface{}{ + "type": keyType, + }) + require.NoError(t, err) + + // Setup a new CSR + privKey, err := rsa.GenerateKey(cryptoRand.Reader, 3072) + require.NoError(t, err) + + var csrTemplate x509.CertificateRequest + csrTemplate.Subject.CommonName = "example.com" + reqCsrBytes, err := x509.CreateCertificateRequest(cryptoRand.Reader, &csrTemplate, privKey) + require.NoError(t, err) + + pemTemplateCsr := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: reqCsrBytes, + }) + t.Logf("csr: %v", string(pemTemplateCsr)) + + // Create CSR from template CSR fields and key in transit + resp, err := apiClient.Logical().Write(fmt.Sprintf("transit/keys/%s/csr", keyName), map[string]interface{}{ + "csr": string(pemTemplateCsr), + }) + require.NoError(t, err) + require.NotNil(t, resp) + pemCsr := resp.Data["csr"].(string) + + // Generate PKI root + resp, err = apiClient.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "issuer_name": issuerName, + "common_name": "PKI Root X1", + }) + require.NoError(t, err) + require.NotNil(t, resp) + + rootCertPEM := resp.Data["certificate"].(string) + pemBlock, _ := pem.Decode([]byte(rootCertPEM)) + require.NotNil(t, pemBlock) + + rootCert, err := x509.ParseCertificate(pemBlock.Bytes) + require.NoError(t, err) + + // Create role to be used in the certificate issuing + resp, err = apiClient.Logical().Write("pki/roles/example-dot-com", map[string]interface{}{ + "issuer_ref": issuerName, + "allowed_domains": "example.com", + "allow_bare_domains": true, + "basic_constraints_valid_for_non_ca": true, + "key_type": "any", + }) + require.NoError(t, err) + + // Sign the CSR + resp, err = apiClient.Logical().Write("pki/sign/example-dot-com", map[string]interface{}{ + "issuer_ref": issuerName, + "csr": pemCsr, + "ttl": "10m", + }) + require.NoError(t, err) + require.NotNil(t, resp) + + leafCertPEM := resp.Data["certificate"].(string) + pemBlock, _ = pem.Decode([]byte(leafCertPEM)) + require.NotNil(t, pemBlock) + + leafCert, err := x509.ParseCertificate(pemBlock.Bytes) + require.NoError(t, err) + + require.NoError(t, leafCert.CheckSignatureFrom(rootCert)) + t.Logf("root: %v", rootCertPEM) + t.Logf("leaf: %v", leafCertPEM) + + certificateChain := strings.Join([]string{leafCertPEM, rootCertPEM}, "\n") + // Import certificate chain to transit key version + resp, err = apiClient.Logical().Write(fmt.Sprintf("transit/keys/%s/set-certificate", keyName), map[string]interface{}{ + "certificate_chain": certificateChain, + }) + require.NoError(t, err) + require.NotNil(t, resp) + + resp, err = apiClient.Logical().Read(fmt.Sprintf("transit/keys/%s", keyName)) + require.NoError(t, err) + require.NotNil(t, resp) + keys, ok := resp.Data["keys"].(map[string]interface{}) + if !ok { + t.Fatalf("could not cast Keys value") + } + keyData, ok := keys["1"].(map[string]interface{}) + if !ok { + t.Fatalf("could not cast key version 1 from keys") + } + _, present := keyData["certificate_chain"] + if !present { + t.Fatalf("certificate chain not present in key version 1") + } +} + +func TestTransit_Certs_ImportInvalidCertChain(t *testing.T) { + // Create Cluster + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "transit": Factory, + "pki": pki.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + // Mount transit backend + err := client.Sys().Mount("transit", &api.MountInput{ + Type: "transit", + }) + require.NoError(t, err) + + // Mount PKI backend + err = client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + }) + require.NoError(t, err) + + testTransit_ImportInvalidCertChain(t, client, "rsa-2048") + testTransit_ImportInvalidCertChain(t, client, "rsa-3072") + testTransit_ImportInvalidCertChain(t, client, "rsa-4096") + testTransit_ImportInvalidCertChain(t, client, "ecdsa-p256") + testTransit_ImportInvalidCertChain(t, client, "ecdsa-p384") + testTransit_ImportInvalidCertChain(t, client, "ecdsa-p521") + testTransit_ImportInvalidCertChain(t, client, "ed25519") +} + +func testTransit_ImportInvalidCertChain(t *testing.T, apiClient *api.Client, keyType string) { + keyName := fmt.Sprintf("%s", keyType) + issuerName := fmt.Sprintf("%s-issuer", keyType) + + // Create transit key + _, err := apiClient.Logical().Write(fmt.Sprintf("transit/keys/%s", keyName), map[string]interface{}{ + "type": keyType, + }) + require.NoError(t, err) + + // Generate PKI root + resp, err := apiClient.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "issuer_name": issuerName, + "common_name": "PKI Root X1", + }) + require.NoError(t, err) + require.NotNil(t, resp) + + rootCertPEM := resp.Data["certificate"].(string) + pemBlock, _ := pem.Decode([]byte(rootCertPEM)) + require.NotNil(t, pemBlock) + + rootCert, err := x509.ParseCertificate(pemBlock.Bytes) + require.NoError(t, err) + + pkiKeyType := "rsa" + pkiKeyBits := "0" + if strings.HasPrefix(keyType, "rsa") { + pkiKeyBits = keyType[4:] + } else if strings.HasPrefix(keyType, "ecdas") { + pkiKeyType = "ec" + pkiKeyBits = keyType[7:] + } else if keyType == "ed25519" { + pkiKeyType = "ed25519" + pkiKeyBits = "0" + } + + // Create role to be used in the certificate issuing + resp, err = apiClient.Logical().Write("pki/roles/example-dot-com", map[string]interface{}{ + "issuer_ref": issuerName, + "allowed_domains": "example.com", + "allow_bare_domains": true, + "basic_constraints_valid_for_non_ca": true, + "key_type": pkiKeyType, + "key_bits": pkiKeyBits, + }) + require.NoError(t, err) + + // XXX -- Note subtle error: we issue a certificate with a new key, + // not using a CSR from Transit. + resp, err = apiClient.Logical().Write("pki/issue/example-dot-com", map[string]interface{}{ + "common_name": "example.com", + "issuer_ref": issuerName, + "ttl": "10m", + }) + require.NoError(t, err) + require.NotNil(t, resp) + + leafCertPEM := resp.Data["certificate"].(string) + pemBlock, _ = pem.Decode([]byte(leafCertPEM)) + require.NotNil(t, pemBlock) + + leafCert, err := x509.ParseCertificate(pemBlock.Bytes) + require.NoError(t, err) + + require.NoError(t, leafCert.CheckSignatureFrom(rootCert)) + t.Logf("root: %v", rootCertPEM) + t.Logf("leaf: %v", leafCertPEM) + + certificateChain := strings.Join([]string{leafCertPEM, rootCertPEM}, "\n") + + // Import certificate chain to transit key version + resp, err = apiClient.Logical().Write(fmt.Sprintf("transit/keys/%s/set-certificate", keyName), map[string]interface{}{ + "certificate_chain": certificateChain, + }) + require.Error(t, err) +} diff --git a/builtin/logical/transit/path_cmac_ce.go b/builtin/logical/transit/path_cmac_ce.go new file mode 100644 index 000000000000..6bd388fbdd54 --- /dev/null +++ b/builtin/logical/transit/path_cmac_ce.go @@ -0,0 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package transit + +import ( + "context" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathCMACVerify(_ context.Context, _ *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + return logical.ErrorResponse(ErrCmacEntOnly.Error()), nil +} diff --git a/builtin/logical/transit/path_config_keys.go b/builtin/logical/transit/path_config_keys.go index cc8c78d26ed3..45c38ac49f83 100644 --- a/builtin/logical/transit/path_config_keys.go +++ b/builtin/logical/transit/path_config_keys.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit @@ -24,6 +24,11 @@ var defaultKeysConfig = keysConfig{ func (b *backend) pathConfigKeys() *framework.Path { return &framework.Path{ Pattern: "config/keys", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + }, + Fields: map[string]*framework.FieldSchema{ "disable_upsert": { Type: framework.TypeBool, @@ -32,9 +37,20 @@ keys on the encrypt endpoint.`, }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathConfigKeysWrite, - logical.ReadOperation: b.pathConfigKeysRead, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigKeysWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "keys", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigKeysRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "keys-configuration", + }, + }, }, HelpSynopsis: pathConfigKeysHelpSyn, diff --git a/builtin/logical/transit/path_config_keys_test.go b/builtin/logical/transit/path_config_keys_test.go index dde7c58a0a94..d5aa12b9cfdf 100644 --- a/builtin/logical/transit/path_config_keys_test.go +++ b/builtin/logical/transit/path_config_keys_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit diff --git a/builtin/logical/transit/path_datakey.go b/builtin/logical/transit/path_datakey.go index 283432fc4e57..53aff54690bb 100644 --- a/builtin/logical/transit/path_datakey.go +++ b/builtin/logical/transit/path_datakey.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit @@ -7,6 +7,7 @@ import ( "context" "crypto/rand" "encoding/base64" + "errors" "fmt" "github.com/hashicorp/vault/helper/constants" @@ -19,6 +20,13 @@ import ( func (b *backend) pathDatakey() *framework.Path { return &framework.Path{ Pattern: "datakey/" + framework.GenericNameRegex("plaintext") + "/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "generate", + OperationSuffix: "data-key", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -134,7 +142,23 @@ func (b *backend) pathDatakeyWrite(ctx context.Context, req *logical.Request, d return nil, err } - ciphertext, err := p.Encrypt(ver, context, nonce, base64.StdEncoding.EncodeToString(newKey)) + var managedKeyFactory ManagedKeyFactory + if p.Type == keysutil.KeyType_MANAGED_KEY { + managedKeySystemView, ok := b.System().(logical.ManagedKeySystemView) + if !ok { + return nil, errors.New("unsupported system view") + } + + managedKeyFactory = ManagedKeyFactory{ + managedKeyParams: keysutil.ManagedKeyParameters{ + ManagedKeySystemView: managedKeySystemView, + BackendUUID: b.backendUUID, + Context: ctx, + }, + } + } + + ciphertext, err := p.EncryptWithFactory(ver, context, nonce, base64.StdEncoding.EncodeToString(newKey), nil, managedKeyFactory) if err != nil { switch err.(type) { case errutil.UserError: @@ -163,6 +187,10 @@ func (b *backend) pathDatakeyWrite(ctx context.Context, req *logical.Request, d }, } + if len(nonce) > 0 && !nonceAllowed(p) { + return nil, ErrNonceNotAllowed + } + if constants.IsFIPS() && shouldWarnAboutNonceUsage(p, nonce) { resp.AddWarning("A provided nonce value was used within FIPS mode, this violates FIPS 140 compliance.") } diff --git a/builtin/logical/transit/path_decrypt.go b/builtin/logical/transit/path_decrypt.go index b199bf195d53..1daf74daf5d1 100644 --- a/builtin/logical/transit/path_decrypt.go +++ b/builtin/logical/transit/path_decrypt.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit @@ -32,6 +32,12 @@ type DecryptBatchResponseItem struct { func (b *backend) pathDecrypt() *framework.Path { return &framework.Path{ Pattern: "decrypt/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "decrypt", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -178,6 +184,7 @@ func (b *backend) pathDecryptWrite(ctx context.Context, req *logical.Request, d if !b.System().CachingDisabled() { p.Lock(false) } + defer p.Unlock() successesInBatch := false for i, item := range batchInputItems { @@ -237,8 +244,6 @@ func (b *backend) pathDecryptWrite(ctx context.Context, req *logical.Request, d } } else { if batchResponseItems[0].Error != "" { - p.Unlock() - if internalErrorInBatch { return nil, errutil.InternalError{Err: batchResponseItems[0].Error} } @@ -250,8 +255,6 @@ func (b *backend) pathDecryptWrite(ctx context.Context, req *logical.Request, d } } - p.Unlock() - return batchRequestResponse(d, resp, req, successesInBatch, userErrorInBatch, internalErrorInBatch) } diff --git a/builtin/logical/transit/path_decrypt_bench_test.go b/builtin/logical/transit/path_decrypt_bench_test.go index c4dc72837ab6..d0816fdb6444 100644 --- a/builtin/logical/transit/path_decrypt_bench_test.go +++ b/builtin/logical/transit/path_decrypt_bench_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit diff --git a/builtin/logical/transit/path_decrypt_test.go b/builtin/logical/transit/path_decrypt_test.go index e69402c7d514..a61d85ddc2aa 100644 --- a/builtin/logical/transit/path_decrypt_test.go +++ b/builtin/logical/transit/path_decrypt_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit diff --git a/builtin/logical/transit/path_encrypt.go b/builtin/logical/transit/path_encrypt.go index 8755e36793af..38c618f9b363 100644 --- a/builtin/logical/transit/path_encrypt.go +++ b/builtin/logical/transit/path_encrypt.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit @@ -13,7 +13,6 @@ import ( "reflect" "github.com/hashicorp/vault/helper/constants" - "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/helper/keysutil" @@ -89,6 +88,12 @@ func (m ManagedKeyFactory) GetManagedKeyParameters() keysutil.ManagedKeyParamete func (b *backend) pathEncrypt() *framework.Path { return &framework.Path{ Pattern: "encrypt/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "encrypt", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -454,6 +459,7 @@ func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d if !b.System().CachingDisabled() { p.Lock(false) } + defer p.Unlock() // Process batch request items. If encryption of any request // item fails, respectively mark the error in the response @@ -462,6 +468,13 @@ func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d successesInBatch := false for i, item := range batchInputItems { if batchResponseItems[i].Error != "" { + userErrorInBatch = true + continue + } + + if item.Nonce != "" && !nonceAllowed(p) { + userErrorInBatch = true + batchResponseItems[i].Error = ErrNonceNotAllowed.Error() continue } @@ -534,8 +547,6 @@ func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d } } else { if batchResponseItems[0].Error != "" { - p.Unlock() - if internalErrorInBatch { return nil, errutil.InternalError{Err: batchResponseItems[0].Error} } @@ -557,11 +568,28 @@ func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d resp.AddWarning("Attempted creation of the key during the encrypt operation, but it was created beforehand") } - p.Unlock() - return batchRequestResponse(d, resp, req, successesInBatch, userErrorInBatch, internalErrorInBatch) } +func nonceAllowed(p *keysutil.Policy) bool { + var supportedKeyType bool + switch p.Type { + case keysutil.KeyType_MANAGED_KEY: + return true + case keysutil.KeyType_AES128_GCM96, keysutil.KeyType_AES256_GCM96, keysutil.KeyType_ChaCha20_Poly1305: + supportedKeyType = true + default: + supportedKeyType = false + } + + if supportedKeyType && p.ConvergentEncryption && p.ConvergentVersion == 1 { + // We only use the user supplied nonce for v1 convergent encryption keys + return true + } + + return false +} + // Depending on the errors in the batch, different status codes should be returned. User errors // will return a 400 and precede internal errors which return a 500. The reasoning behind this is // that user errors are non-retryable without making changes to the request, and should be surfaced diff --git a/builtin/logical/transit/path_encrypt_bench_test.go b/builtin/logical/transit/path_encrypt_bench_test.go index 8aef39dd4b72..a57c90fa7d63 100644 --- a/builtin/logical/transit/path_encrypt_bench_test.go +++ b/builtin/logical/transit/path_encrypt_bench_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit diff --git a/builtin/logical/transit/path_encrypt_test.go b/builtin/logical/transit/path_encrypt_test.go index 131517fda861..e46d32a5f06f 100644 --- a/builtin/logical/transit/path_encrypt_test.go +++ b/builtin/logical/transit/path_encrypt_test.go @@ -1,17 +1,19 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit import ( "context" "encoding/json" + "fmt" + "net/http" "reflect" "strings" "testing" + uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/helper/keysutil" - "github.com/hashicorp/vault/sdk/logical" "github.com/mitchellh/mapstructure" ) @@ -652,13 +654,26 @@ func TestTransit_BatchEncryptionCase12(t *testing.T) { } // Case13: Incorrect input for nonce when we aren't in convergent encryption should fail the operation -func TestTransit_BatchEncryptionCase13(t *testing.T) { +func TestTransit_EncryptionCase13(t *testing.T) { var err error b, s := createBackendWithStorage(t) + // Non-batch first + data := map[string]interface{}{"plaintext": "bXkgc2VjcmV0IGRhdGE=", "nonce": "R80hr9eNUIuFV52e"} + req := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/my-key", + Storage: s, + Data: data, + } + resp, err := b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatal("expected invalid request") + } + batchInput := []interface{}{ - map[string]interface{}{"plaintext": "bXkgc2VjcmV0IGRhdGE=", "nonce": "YmFkbm9uY2U="}, + map[string]interface{}{"plaintext": "bXkgc2VjcmV0IGRhdGE=", "nonce": "R80hr9eNUIuFV52e"}, } batchData := map[string]interface{}{ @@ -670,10 +685,71 @@ func TestTransit_BatchEncryptionCase13(t *testing.T) { Storage: s, Data: batchData, } - _, err = b.HandleRequest(context.Background(), batchReq) + resp, err = b.HandleRequest(context.Background(), batchReq) + if err != nil { + t.Fatal(err) + } + + if v, ok := resp.Data["http_status_code"]; !ok || v.(int) != http.StatusBadRequest { + t.Fatal("expected request error") + } +} + +// Case14: Incorrect input for nonce when we are in convergent version 3 should fail +func TestTransit_EncryptionCase14(t *testing.T) { + var err error + + b, s := createBackendWithStorage(t) + + cReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/my-key", + Storage: s, + Data: map[string]interface{}{ + "convergent_encryption": "true", + "derived": "true", + }, + } + resp, err := b.HandleRequest(context.Background(), cReq) + if err != nil { + t.Fatal(err) + } + + // Non-batch first + data := map[string]interface{}{"plaintext": "bXkgc2VjcmV0IGRhdGE=", "context": "SGVsbG8sIFdvcmxkCg==", "nonce": "R80hr9eNUIuFV52e"} + req := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/my-key", + Storage: s, + Data: data, + } + + resp, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatal("expected invalid request") + } + + batchInput := []interface{}{ + data, + } + + batchData := map[string]interface{}{ + "batch_input": batchInput, + } + batchReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/my-key", + Storage: s, + Data: batchData, + } + resp, err = b.HandleRequest(context.Background(), batchReq) if err != nil { t.Fatal(err) } + + if v, ok := resp.Data["http_status_code"]; !ok || v.(int) != http.StatusBadRequest { + t.Fatal("expected request error") + } } // Test that the fast path function decodeBatchRequestItems behave like mapstructure.Decode() to decode []BatchRequestItem. @@ -944,3 +1020,48 @@ func TestShouldWarnAboutNonceUsage(t *testing.T) { } } } + +func TestTransit_EncryptWithRSAPublicKey(t *testing.T) { + generateKeys(t) + b, s := createBackendWithStorage(t) + keyType := "rsa-2048" + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get key + privateKey := getKey(t, keyType) + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatal(err) + } + + // Import key + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import public key: %s", err) + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: fmt.Sprintf("encrypt/%s", keyID), + Storage: s, + Data: map[string]interface{}{ + "plaintext": "bXkgc2VjcmV0IGRhdGE=", + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } +} diff --git a/builtin/logical/transit/path_export.go b/builtin/logical/transit/path_export.go index 5f1089db1dd7..30735c2c749a 100644 --- a/builtin/logical/transit/path_export.go +++ b/builtin/logical/transit/path_export.go @@ -1,13 +1,13 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit import ( "context" + "crypto" "crypto/ecdsa" "crypto/elliptic" - "crypto/rsa" "crypto/x509" "encoding/base64" "encoding/pem" @@ -16,24 +16,35 @@ import ( "strconv" "strings" + "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/keysutil" "github.com/hashicorp/vault/sdk/logical" ) const ( - exportTypeEncryptionKey = "encryption-key" - exportTypeSigningKey = "signing-key" - exportTypeHMACKey = "hmac-key" + exportTypeEncryptionKey = "encryption-key" + exportTypeSigningKey = "signing-key" + exportTypeHMACKey = "hmac-key" + exportTypePublicKey = "public-key" + exportTypeCertificateChain = "certificate-chain" + exportTypeCMACKey = "cmac-key" ) func (b *backend) pathExportKeys() *framework.Path { return &framework.Path{ Pattern: "export/" + framework.GenericNameRegex("type") + "/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("version"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "export", + OperationSuffix: "key|key-version", + }, + Fields: map[string]*framework.FieldSchema{ "type": { Type: framework.TypeString, - Description: "Type of key to export (encryption-key, signing-key, hmac-key)", + Description: "Type of key to export (encryption-key, signing-key, hmac-key, public-key, cmac-key)", }, "name": { Type: framework.TypeString, @@ -63,6 +74,12 @@ func (b *backend) pathPolicyExportRead(ctx context.Context, req *logical.Request case exportTypeEncryptionKey: case exportTypeSigningKey: case exportTypeHMACKey: + case exportTypePublicKey: + case exportTypeCertificateChain: + case exportTypeCMACKey: + if !constants.IsEnterprise { + return logical.ErrorResponse(ErrCmacEntOnly.Error()), logical.ErrInvalidRequest + } default: return logical.ErrorResponse(fmt.Sprintf("invalid export type: %s", exportType)), logical.ErrInvalidRequest } @@ -82,8 +99,8 @@ func (b *backend) pathPolicyExportRead(ctx context.Context, req *logical.Request } defer p.Unlock() - if !p.Exportable { - return logical.ErrorResponse("key is not exportable"), nil + if !p.Exportable && exportType != exportTypePublicKey && exportType != exportTypeCertificateChain { + return logical.ErrorResponse("private key material is not exportable"), nil } switch exportType { @@ -95,6 +112,10 @@ func (b *backend) pathPolicyExportRead(ctx context.Context, req *logical.Request if !p.Type.SigningSupported() { return logical.ErrorResponse("signing not supported for the key"), logical.ErrInvalidRequest } + case exportTypeCertificateChain: + if !p.Type.SigningSupported() { + return logical.ErrorResponse("certificate chain not supported for keys that do not support signing"), logical.ErrInvalidRequest + } } retKeys := map[string]string{} @@ -154,7 +175,11 @@ func getExportKey(policy *keysutil.Policy, key *keysutil.KeyEntry, exportType st switch exportType { case exportTypeHMACKey: - return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.HMACKey)), nil + src := key.HMACKey + if policy.Type == keysutil.KeyType_HMAC { + src = key.Key + } + return strings.TrimSpace(base64.StdEncoding.EncodeToString(src)), nil case exportTypeEncryptionKey: switch policy.Type { @@ -162,7 +187,11 @@ func getExportKey(policy *keysutil.Policy, key *keysutil.KeyEntry, exportType st return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.Key)), nil case keysutil.KeyType_RSA2048, keysutil.KeyType_RSA3072, keysutil.KeyType_RSA4096: - return encodeRSAPrivateKey(key.RSAKey), nil + rsaKey, err := encodeRSAPrivateKey(key) + if err != nil { + return "", err + } + return rsaKey, nil } case exportTypeSigningKey: @@ -184,26 +213,127 @@ func getExportKey(policy *keysutil.Policy, key *keysutil.KeyEntry, exportType st return ecKey, nil case keysutil.KeyType_ED25519: + if len(key.Key) == 0 { + return "", nil + } + return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.Key)), nil case keysutil.KeyType_RSA2048, keysutil.KeyType_RSA3072, keysutil.KeyType_RSA4096: - return encodeRSAPrivateKey(key.RSAKey), nil + rsaKey, err := encodeRSAPrivateKey(key) + if err != nil { + return "", err + } + return rsaKey, nil + } + case exportTypePublicKey: + switch policy.Type { + case keysutil.KeyType_ECDSA_P256, keysutil.KeyType_ECDSA_P384, keysutil.KeyType_ECDSA_P521: + var curve elliptic.Curve + switch policy.Type { + case keysutil.KeyType_ECDSA_P384: + curve = elliptic.P384() + case keysutil.KeyType_ECDSA_P521: + curve = elliptic.P521() + default: + curve = elliptic.P256() + } + ecKey, err := keyEntryToECPublicKey(key, curve) + if err != nil { + return "", err + } + return ecKey, nil + + case keysutil.KeyType_ED25519: + return strings.TrimSpace(key.FormattedPublicKey), nil + + case keysutil.KeyType_RSA2048, keysutil.KeyType_RSA3072, keysutil.KeyType_RSA4096: + rsaKey, err := encodeRSAPublicKey(key) + if err != nil { + return "", err + } + return rsaKey, nil + } + case exportTypeCertificateChain: + if key.CertificateChain == nil { + return "", errors.New("selected key version does not have a certificate chain imported") + } + + var pemCerts []string + for _, derCertBytes := range key.CertificateChain { + pemCert := strings.TrimSpace(string(pem.EncodeToMemory( + &pem.Block{ + Type: "CERTIFICATE", + Bytes: derCertBytes, + }))) + pemCerts = append(pemCerts, pemCert) + } + certChain := strings.Join(pemCerts, "\n") + + return certChain, nil + case exportTypeCMACKey: + switch policy.Type { + case keysutil.KeyType_AES128_CMAC, keysutil.KeyType_AES256_CMAC: + return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.Key)), nil } } - return "", fmt.Errorf("unknown key type %v", policy.Type) + return "", fmt.Errorf("unknown key type %v for export type %v", policy.Type, exportType) } -func encodeRSAPrivateKey(key *rsa.PrivateKey) string { +func encodeRSAPrivateKey(key *keysutil.KeyEntry) (string, error) { + if key == nil { + return "", errors.New("nil KeyEntry provided") + } + + if key.IsPrivateKeyMissing() { + return "", nil + } + // When encoding PKCS1, the PEM header should be `RSA PRIVATE KEY`. When Go // has PKCS8 encoding support, we may want to change this. - derBytes := x509.MarshalPKCS1PrivateKey(key) + blockType := "RSA PRIVATE KEY" + derBytes := x509.MarshalPKCS1PrivateKey(key.RSAKey) + pemBlock := pem.Block{ + Type: blockType, + Bytes: derBytes, + } + + pemBytes := pem.EncodeToMemory(&pemBlock) + return string(pemBytes), nil +} + +func encodeRSAPublicKey(key *keysutil.KeyEntry) (string, error) { + if key == nil { + return "", errors.New("nil KeyEntry provided") + } + + var publicKey crypto.PublicKey + publicKey = key.RSAPublicKey + if key.RSAKey != nil { + // Prefer the private key if it exists + publicKey = key.RSAKey.Public() + } + + if publicKey == nil { + return "", errors.New("requested to encode an RSA public key with no RSA key present") + } + + // Encode the RSA public key in PEM format to return over the API + derBytes, err := x509.MarshalPKIXPublicKey(publicKey) + if err != nil { + return "", fmt.Errorf("error marshaling RSA public key: %w", err) + } pemBlock := &pem.Block{ - Type: "RSA PRIVATE KEY", + Type: "PUBLIC KEY", Bytes: derBytes, } pemBytes := pem.EncodeToMemory(pemBlock) - return string(pemBytes) + if pemBytes == nil || len(pemBytes) == 0 { + return "", fmt.Errorf("failed to PEM-encode RSA public key") + } + + return string(pemBytes), nil } func keyEntryToECPrivateKey(k *keysutil.KeyEntry, curve elliptic.Curve) (string, error) { @@ -211,27 +341,57 @@ func keyEntryToECPrivateKey(k *keysutil.KeyEntry, curve elliptic.Curve) (string, return "", errors.New("nil KeyEntry provided") } + if k.IsPrivateKeyMissing() { + return "", nil + } + + pubKey := ecdsa.PublicKey{ + Curve: curve, + X: k.EC_X, + Y: k.EC_Y, + } + + blockType := "EC PRIVATE KEY" privKey := &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: curve, - X: k.EC_X, - Y: k.EC_Y, - }, - D: k.EC_D, + PublicKey: pubKey, + D: k.EC_D, } - ecder, err := x509.MarshalECPrivateKey(privKey) + derBytes, err := x509.MarshalECPrivateKey(privKey) if err != nil { return "", err } - if ecder == nil { - return "", errors.New("no data returned when marshalling to private key") + + pemBlock := pem.Block{ + Type: blockType, + Bytes: derBytes, } - block := pem.Block{ - Type: "EC PRIVATE KEY", - Bytes: ecder, + return strings.TrimSpace(string(pem.EncodeToMemory(&pemBlock))), nil +} + +func keyEntryToECPublicKey(k *keysutil.KeyEntry, curve elliptic.Curve) (string, error) { + if k == nil { + return "", errors.New("nil KeyEntry provided") } - return strings.TrimSpace(string(pem.EncodeToMemory(&block))), nil + + pubKey := ecdsa.PublicKey{ + Curve: curve, + X: k.EC_X, + Y: k.EC_Y, + } + + blockType := "PUBLIC KEY" + derBytes, err := x509.MarshalPKIXPublicKey(&pubKey) + if err != nil { + return "", err + } + + pemBlock := pem.Block{ + Type: blockType, + Bytes: derBytes, + } + + return strings.TrimSpace(string(pem.EncodeToMemory(&pemBlock))), nil } const pathExportHelpSyn = `Export named encryption or signing key` diff --git a/builtin/logical/transit/path_export_test.go b/builtin/logical/transit/path_export_test.go index df9cd8e55853..b91ef47fb420 100644 --- a/builtin/logical/transit/path_export_test.go +++ b/builtin/logical/transit/path_export_test.go @@ -1,26 +1,80 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit import ( "context" + cryptoRand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" "fmt" "reflect" "strconv" + "strings" "testing" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/pki" + vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/stretchr/testify/require" ) +func TestTransit_Export_Unknown_ExportType(t *testing.T) { + t.Parallel() + + b, storage := createBackendWithSysView(t) + keyType := "ed25519" + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo", + Data: map[string]interface{}{ + "exportable": true, + "type": keyType, + }, + } + _, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed creating key %s: %v", keyType, err) + } + + req = &logical.Request{ + Storage: storage, + Operation: logical.ReadOperation, + Path: "export/bad-export-type/foo", + } + rsp, err := b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatalf("did not error on bad export type got: %v", rsp) + } + if rsp == nil || !rsp.IsError() { + t.Fatalf("response did not contain an error on bad export type got: %v", rsp) + } + if !strings.Contains(rsp.Error().Error(), "invalid export type") { + t.Fatalf("failed with unexpected error: %v", err) + } +} + func TestTransit_Export_KeyVersion_ExportsCorrectVersion(t *testing.T) { + t.Parallel() + verifyExportsCorrectVersion(t, "encryption-key", "aes128-gcm96") verifyExportsCorrectVersion(t, "encryption-key", "aes256-gcm96") verifyExportsCorrectVersion(t, "encryption-key", "chacha20-poly1305") + verifyExportsCorrectVersion(t, "encryption-key", "rsa-2048") + verifyExportsCorrectVersion(t, "encryption-key", "rsa-3072") + verifyExportsCorrectVersion(t, "encryption-key", "rsa-4096") verifyExportsCorrectVersion(t, "signing-key", "ecdsa-p256") verifyExportsCorrectVersion(t, "signing-key", "ecdsa-p384") verifyExportsCorrectVersion(t, "signing-key", "ecdsa-p521") verifyExportsCorrectVersion(t, "signing-key", "ed25519") + verifyExportsCorrectVersion(t, "signing-key", "rsa-2048") + verifyExportsCorrectVersion(t, "signing-key", "rsa-3072") + verifyExportsCorrectVersion(t, "signing-key", "rsa-4096") verifyExportsCorrectVersion(t, "hmac-key", "aes128-gcm96") verifyExportsCorrectVersion(t, "hmac-key", "aes256-gcm96") verifyExportsCorrectVersion(t, "hmac-key", "chacha20-poly1305") @@ -28,6 +82,14 @@ func TestTransit_Export_KeyVersion_ExportsCorrectVersion(t *testing.T) { verifyExportsCorrectVersion(t, "hmac-key", "ecdsa-p384") verifyExportsCorrectVersion(t, "hmac-key", "ecdsa-p521") verifyExportsCorrectVersion(t, "hmac-key", "ed25519") + verifyExportsCorrectVersion(t, "hmac-key", "hmac") + verifyExportsCorrectVersion(t, "public-key", "rsa-2048") + verifyExportsCorrectVersion(t, "public-key", "rsa-3072") + verifyExportsCorrectVersion(t, "public-key", "rsa-4096") + verifyExportsCorrectVersion(t, "public-key", "ecdsa-p256") + verifyExportsCorrectVersion(t, "public-key", "ecdsa-p384") + verifyExportsCorrectVersion(t, "public-key", "ecdsa-p521") + verifyExportsCorrectVersion(t, "public-key", "ed25519") } func verifyExportsCorrectVersion(t *testing.T, exportType, keyType string) { @@ -43,6 +105,9 @@ func verifyExportsCorrectVersion(t *testing.T, exportType, keyType string) { "exportable": true, "type": keyType, } + if keyType == "hmac" { + req.Data["key_size"] = 32 + } _, err := b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) @@ -121,6 +186,8 @@ func verifyExportsCorrectVersion(t *testing.T, exportType, keyType string) { } func TestTransit_Export_ValidVersionsOnly(t *testing.T) { + t.Parallel() + b, storage := createBackendWithSysView(t) // First create a key, v1 @@ -221,6 +288,8 @@ func TestTransit_Export_ValidVersionsOnly(t *testing.T) { } func TestTransit_Export_KeysNotMarkedExportable_ReturnsError(t *testing.T) { + t.Parallel() + b, storage := createBackendWithSysView(t) req := &logical.Request{ @@ -251,6 +320,8 @@ func TestTransit_Export_KeysNotMarkedExportable_ReturnsError(t *testing.T) { } func TestTransit_Export_SigningDoesNotSupportSigning_ReturnsError(t *testing.T) { + t.Parallel() + b, storage := createBackendWithSysView(t) req := &logical.Request{ @@ -279,6 +350,8 @@ func TestTransit_Export_SigningDoesNotSupportSigning_ReturnsError(t *testing.T) } func TestTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t *testing.T) { + t.Parallel() + testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t, "ecdsa-p256") testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t, "ecdsa-p384") testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t, "ecdsa-p521") @@ -309,11 +382,55 @@ func testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t *testi } _, err = b.HandleRequest(context.Background(), req) if err == nil { - t.Fatal("Key does not support encryption but was exported without error.") + t.Fatalf("Key %s does not support encryption but was exported without error.", keyType) + } +} + +func TestTransit_Export_PublicKeyDoesNotSupportEncryption_ReturnsError(t *testing.T) { + t.Parallel() + + testTransit_Export_PublicKeyNotSupported_ReturnsError(t, "chacha20-poly1305") + testTransit_Export_PublicKeyNotSupported_ReturnsError(t, "aes128-gcm96") + testTransit_Export_PublicKeyNotSupported_ReturnsError(t, "aes256-gcm96") + testTransit_Export_PublicKeyNotSupported_ReturnsError(t, "hmac") +} + +func testTransit_Export_PublicKeyNotSupported_ReturnsError(t *testing.T, keyType string) { + b, storage := createBackendWithSysView(t) + + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo", + Data: map[string]interface{}{ + "type": keyType, + }, + } + if keyType == "hmac" { + req.Data["key_size"] = 32 + } + _, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed creating key %s: %v", keyType, err) + } + + req = &logical.Request{ + Storage: storage, + Operation: logical.ReadOperation, + Path: "export/public-key/foo", + } + _, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatalf("Key %s does not support public key exporting but was exported without error.", keyType) + } + if !strings.Contains(err.Error(), fmt.Sprintf("unknown key type %s for export type public-key", keyType)) { + t.Fatalf("unexpected error value for key type: %s: %v", keyType, err) } } func TestTransit_Export_KeysDoesNotExist_ReturnsNotFound(t *testing.T) { + t.Parallel() + b, storage := createBackendWithSysView(t) req := &logical.Request{ @@ -329,6 +446,8 @@ func TestTransit_Export_KeysDoesNotExist_ReturnsNotFound(t *testing.T) { } func TestTransit_Export_EncryptionKey_DoesNotExportHMACKey(t *testing.T) { + t.Parallel() + b, storage := createBackendWithSysView(t) req := &logical.Request{ @@ -377,3 +496,135 @@ func TestTransit_Export_EncryptionKey_DoesNotExportHMACKey(t *testing.T) { t.Fatal("Encryption key data matched hmac key data") } } + +func TestTransit_Export_CertificateChain(t *testing.T) { + t.Parallel() + + generateKeys(t) + + // Create Cluster + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "transit": Factory, + "pki": pki.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + // Mount transit backend + err := client.Sys().Mount("transit", &api.MountInput{ + Type: "transit", + }) + require.NoError(t, err) + + // Mount PKI backend + err = client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + }) + require.NoError(t, err) + + testTransit_exportCertificateChain(t, client, "rsa-2048") + testTransit_exportCertificateChain(t, client, "rsa-3072") + testTransit_exportCertificateChain(t, client, "rsa-4096") + testTransit_exportCertificateChain(t, client, "ecdsa-p256") + testTransit_exportCertificateChain(t, client, "ecdsa-p384") + testTransit_exportCertificateChain(t, client, "ecdsa-p521") + testTransit_exportCertificateChain(t, client, "ed25519") +} + +func testTransit_exportCertificateChain(t *testing.T, apiClient *api.Client, keyType string) { + keyName := fmt.Sprintf("%s", keyType) + issuerName := fmt.Sprintf("%s-issuer", keyType) + + // Get key to be imported + privKey := getKey(t, keyType) + privKeyBytes, err := x509.MarshalPKCS8PrivateKey(privKey) + require.NoError(t, err, fmt.Sprintf("failed to marshal private key: %s", err)) + + // Create CSR + var csrTemplate x509.CertificateRequest + csrTemplate.Subject.CommonName = "example.com" + csrBytes, err := x509.CreateCertificateRequest(cryptoRand.Reader, &csrTemplate, privKey) + require.NoError(t, err, fmt.Sprintf("failed to create CSR: %s", err)) + + pemCsr := string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csrBytes, + })) + + // Generate PKI root + _, err = apiClient.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "issuer_name": issuerName, + "common_name": "PKI Root X1", + }) + require.NoError(t, err) + + // Create role to be used in the certificate issuing + _, err = apiClient.Logical().Write("pki/roles/example-dot-com", map[string]interface{}{ + "issuer_ref": issuerName, + "allowed_domains": "example.com", + "allow_bare_domains": true, + "basic_constraints_valid_for_non_ca": true, + "key_type": "any", + }) + require.NoError(t, err) + + // Sign the CSR + resp, err := apiClient.Logical().Write("pki/sign/example-dot-com", map[string]interface{}{ + "issuer_ref": issuerName, + "csr": pemCsr, + "ttl": "10m", + }) + require.NoError(t, err) + require.NotNil(t, resp) + + leafCertPEM := resp.Data["certificate"].(string) + + // Get wrapping key + resp, err = apiClient.Logical().Read("transit/wrapping_key") + require.NoError(t, err) + require.NotNil(t, resp) + + pubWrappingKeyString := strings.TrimSpace(resp.Data["public_key"].(string)) + wrappingKeyPemBlock, _ := pem.Decode([]byte(pubWrappingKeyString)) + + pubWrappingKey, err := x509.ParsePKIXPublicKey(wrappingKeyPemBlock.Bytes) + require.NoError(t, err, "failed to parse wrapping key") + + blob := wrapTargetPKCS8ForImport(t, pubWrappingKey.(*rsa.PublicKey), privKeyBytes, "SHA256") + + // Import key + _, err = apiClient.Logical().Write(fmt.Sprintf("/transit/keys/%s/import", keyName), map[string]interface{}{ + "ciphertext": blob, + "type": keyType, + }) + require.NoError(t, err) + + // Import cert chain + _, err = apiClient.Logical().Write(fmt.Sprintf("transit/keys/%s/set-certificate", keyName), map[string]interface{}{ + "certificate_chain": leafCertPEM, + }) + require.NoError(t, err) + + // Export cert chain + resp, err = apiClient.Logical().Read(fmt.Sprintf("transit/export/certificate-chain/%s", keyName)) + require.NoError(t, err) + require.NotNil(t, resp) + + exportedKeys := resp.Data["keys"].(map[string]interface{}) + exportedCertChainPEM := exportedKeys["1"].(string) + + if exportedCertChainPEM != leafCertPEM { + t.Fatalf("expected exported cert chain to match with imported value") + } +} diff --git a/builtin/logical/transit/path_hash.go b/builtin/logical/transit/path_hash.go index b5f1d7de569e..250822d0b9d5 100644 --- a/builtin/logical/transit/path_hash.go +++ b/builtin/logical/transit/path_hash.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit @@ -12,15 +12,21 @@ import ( "fmt" "hash" - "golang.org/x/crypto/sha3" - "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" + "golang.org/x/crypto/sha3" ) func (b *backend) pathHash() *framework.Path { return &framework.Path{ Pattern: "hash" + framework.OptionalParamRegex("urlalgorithm"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "hash", + OperationSuffix: "|with-algorithm", + }, + Fields: map[string]*framework.FieldSchema{ "input": { Type: framework.TypeString, diff --git a/builtin/logical/transit/path_hash_test.go b/builtin/logical/transit/path_hash_test.go index 084012dd4baf..9ded6721a8d5 100644 --- a/builtin/logical/transit/path_hash_test.go +++ b/builtin/logical/transit/path_hash_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit diff --git a/builtin/logical/transit/path_hmac.go b/builtin/logical/transit/path_hmac.go index a9a5797ffc3d..f71c9516ea5f 100644 --- a/builtin/logical/transit/path_hmac.go +++ b/builtin/logical/transit/path_hmac.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit @@ -48,6 +48,13 @@ type batchResponseHMACItem struct { func (b *backend) pathHMAC() *framework.Path { return &framework.Path{ Pattern: "hmac/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "generate", + OperationSuffix: "hmac|hmac-with-algorithm", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -129,6 +136,7 @@ func (b *backend) pathHMACWrite(ctx context.Context, req *logical.Request, d *fr if !b.System().CachingDisabled() { p.Lock(false) } + defer p.Unlock() switch { case ver == 0: @@ -138,23 +146,19 @@ func (b *backend) pathHMACWrite(ctx context.Context, req *logical.Request, d *fr case ver == p.LatestVersion: // Allowed case p.MinEncryptionVersion > 0 && ver < p.MinEncryptionVersion: - p.Unlock() return logical.ErrorResponse("cannot generate HMAC: version is too old (disallowed by policy)"), logical.ErrInvalidRequest } key, err := p.HMACKey(ver) if err != nil { - p.Unlock() return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest } if key == nil && p.Type != keysutil.KeyType_MANAGED_KEY { - p.Unlock() return nil, fmt.Errorf("HMAC key value could not be computed") } hashAlgorithm, ok := keysutil.HashTypeMap[algorithm] if !ok { - p.Unlock() return logical.ErrorResponse("unsupported algorithm %q", hashAlgorithm), nil } @@ -165,18 +169,15 @@ func (b *backend) pathHMACWrite(ctx context.Context, req *logical.Request, d *fr if batchInputRaw != nil { err = mapstructure.Decode(batchInputRaw, &batchInputItems) if err != nil { - p.Unlock() return nil, fmt.Errorf("failed to parse batch input: %w", err) } if len(batchInputItems) == 0 { - p.Unlock() return logical.ErrorResponse("missing batch input to process"), logical.ErrInvalidRequest } } else { valueRaw, ok := d.GetOk("input") if !ok { - p.Unlock() return logical.ErrorResponse("missing input for HMAC"), logical.ErrInvalidRequest } @@ -226,8 +227,6 @@ func (b *backend) pathHMACWrite(ctx context.Context, req *logical.Request, d *fr response[i].HMAC = retStr } - p.Unlock() - // Generate the response resp := &logical.Response{} if batchInputRaw != nil { @@ -258,7 +257,19 @@ func (b *backend) pathHMACVerify(ctx context.Context, req *logical.Request, d *f name := d.Get("name").(string) algorithm := d.Get("urlalgorithm").(string) if algorithm == "" { - algorithm = d.Get("algorithm").(string) + hashAlgorithmRaw, hasHashAlgorithm := d.GetOk("hash_algorithm") + algorithmRaw, hasAlgorithm := d.GetOk("algorithm") + + // As `algorithm` is deprecated, make sure we only read it if + // `hash_algorithm` is not present. + switch { + case hasHashAlgorithm: + algorithm = hashAlgorithmRaw.(string) + case hasAlgorithm: + algorithm = algorithmRaw.(string) + default: + algorithm = d.Get("hash_algorithm").(string) + } } // Get the policy @@ -275,10 +286,10 @@ func (b *backend) pathHMACVerify(ctx context.Context, req *logical.Request, d *f if !b.System().CachingDisabled() { p.Lock(false) } + defer p.Unlock() hashAlgorithm, ok := keysutil.HashTypeMap[algorithm] if !ok { - p.Unlock() return logical.ErrorResponse("unsupported algorithm %q", hashAlgorithm), nil } @@ -289,12 +300,10 @@ func (b *backend) pathHMACVerify(ctx context.Context, req *logical.Request, d *f if batchInputRaw != nil { err := mapstructure.Decode(batchInputRaw, &batchInputItems) if err != nil { - p.Unlock() return nil, fmt.Errorf("failed to parse batch input: %w", err) } if len(batchInputItems) == 0 { - p.Unlock() return logical.ErrorResponse("missing batch input to process"), logical.ErrInvalidRequest } } else { @@ -391,8 +400,6 @@ func (b *backend) pathHMACVerify(ctx context.Context, req *logical.Request, d *f response[i].Valid = hmac.Equal(retBytes, verBytes) } - p.Unlock() - // Generate the response resp := &logical.Response{} if batchInputRaw != nil { diff --git a/builtin/logical/transit/path_hmac_test.go b/builtin/logical/transit/path_hmac_test.go index af98dd2ca081..3f21106c4cc9 100644 --- a/builtin/logical/transit/path_hmac_test.go +++ b/builtin/logical/transit/path_hmac_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit @@ -94,17 +94,40 @@ func TestTransit_HMAC(t *testing.T) { } // Now verify + verify := func() { + t.Helper() + + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("%v: %v", err, resp) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if errStr, ok := resp.Data["error"]; ok { + t.Fatalf("error validating hmac: %s", errStr) + } + if resp.Data["valid"].(bool) == false { + t.Fatalf(fmt.Sprintf("error validating hmac;\nreq:\n%#v\nresp:\n%#v", *req, *resp)) + } + } req.Path = strings.ReplaceAll(req.Path, "hmac", "verify") req.Data["hmac"] = value.(string) - resp, err = b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("%v: %v", err, resp) - } - if resp == nil { - t.Fatal("expected non-nil response") - } - if resp.Data["valid"].(bool) == false { - panic(fmt.Sprintf("error validating hmac;\nreq:\n%#v\nresp:\n%#v", *req, *resp)) + verify() + + // If `algorithm` parameter is used, try with `hash_algorithm` as well + if algorithm, ok := req.Data["algorithm"]; ok { + // Note that `hash_algorithm` takes precedence over `algorithm`, since the + // latter is deprecated. + req.Data["hash_algorithm"] = algorithm + req.Data["algorithm"] = "xxx" + defer func() { + // Restore the req fields, since it is re-used by the tests below + delete(req.Data, "hash_algorithm") + req.Data["algorithm"] = algorithm + }() + + verify() } } diff --git a/builtin/logical/transit/path_import.go b/builtin/logical/transit/path_import.go index 1bbe75637a66..a38167e98d57 100644 --- a/builtin/logical/transit/path_import.go +++ b/builtin/logical/transit/path_import.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit @@ -17,10 +17,11 @@ import ( "strings" "time" - "github.com/google/tink/go/kwp/subtle" + "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/keysutil" "github.com/hashicorp/vault/sdk/logical" + "github.com/tink-crypto/tink-go/v2/kwp/subtle" ) const EncryptedKeyBytes = 512 @@ -28,6 +29,13 @@ const EncryptedKeyBytes = 512 func (b *backend) pathImport() *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("name") + "/import", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "import", + OperationSuffix: "key", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -38,7 +46,7 @@ func (b *backend) pathImport() *framework.Path { Default: "aes256-gcm96", Description: `The type of key being imported. Currently, "aes128-gcm96" (symmetric), "aes256-gcm96" (symmetric), "ecdsa-p256" (asymmetric), "ecdsa-p384" (asymmetric), "ecdsa-p521" (asymmetric), "ed25519" (asymmetric), "rsa-2048" (asymmetric), "rsa-3072" -(asymmetric), "rsa-4096" (asymmetric) are supported. Defaults to "aes256-gcm96". +(asymmetric), "rsa-4096" (asymmetric), "hmac", "aes128-cmac", "aes256-cmac" are supported. Defaults to "aes256-gcm96". `, }, "hash_function": { @@ -52,6 +60,10 @@ ephemeral AES key. Can be one of "SHA1", "SHA224", "SHA256" (default), "SHA384", Description: `The base64-encoded ciphertext of the keys. The AES key should be encrypted using OAEP with the wrapping key and then concatenated with the import key, wrapped by the AES key.`, }, + "public_key": { + Type: framework.TypeString, + Description: `The plaintext PEM public key to be imported. If "ciphertext" is set, this field is ignored.`, + }, "allow_rotation": { Type: framework.TypeBool, Description: "True if the imported key may be rotated within Vault; false otherwise.", @@ -104,6 +116,13 @@ key.`, func (b *backend) pathImportVersion() *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("name") + "/import_version", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "import", + OperationSuffix: "key-version", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -114,12 +133,21 @@ func (b *backend) pathImportVersion() *framework.Path { Description: `The base64-encoded ciphertext of the keys. The AES key should be encrypted using OAEP with the wrapping key and then concatenated with the import key, wrapped by the AES key.`, }, + "public_key": { + Type: framework.TypeString, + Description: `The plaintext public key to be imported. If "ciphertext" is set, this field is ignored.`, + }, "hash_function": { Type: framework.TypeString, Default: "SHA256", Description: `The hash function used as a random oracle in the OAEP wrapping of the user-generated, ephemeral AES key. Can be one of "SHA1", "SHA224", "SHA256" (default), "SHA384", or "SHA512"`, }, + "version": { + Type: framework.TypeInt, + Description: `Key version to be updated, if left empty, a new version will be created unless +a private key is specified and the 'Latest' key is missing a private key.`, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.pathImportVersionWrite, @@ -133,11 +161,9 @@ func (b *backend) pathImportWrite(ctx context.Context, req *logical.Request, d * name := d.Get("name").(string) derived := d.Get("derived").(bool) keyType := d.Get("type").(string) - hashFnStr := d.Get("hash_function").(string) exportable := d.Get("exportable").(bool) allowPlaintextBackup := d.Get("allow_plaintext_backup").(bool) autoRotatePeriod := time.Second * time.Duration(d.Get("auto_rotate_period").(int)) - ciphertextString := d.Get("ciphertext").(string) allowRotation := d.Get("allow_rotation").(bool) // Ensure the caller didn't supply "convergent_encryption" as a field, since it's not supported on import. @@ -149,6 +175,12 @@ func (b *backend) pathImportWrite(ctx context.Context, req *logical.Request, d * return nil, errors.New("allow_rotation must be set to true if auto-rotation is enabled") } + // Ensure that at least on `key` field has been set + isCiphertextSet, err := checkKeyFieldsSet(d) + if err != nil { + return nil, err + } + polReq := keysutil.PolicyRequest{ Storage: req.Storage, Name: name, @@ -157,6 +189,7 @@ func (b *backend) pathImportWrite(ctx context.Context, req *logical.Request, d * AllowPlaintextBackup: allowPlaintextBackup, AutoRotatePeriod: autoRotatePeriod, AllowImportedKeyRotation: allowRotation, + IsPrivateKey: isCiphertextSet, } switch strings.ToLower(keyType) { @@ -182,13 +215,16 @@ func (b *backend) pathImportWrite(ctx context.Context, req *logical.Request, d * polReq.KeyType = keysutil.KeyType_RSA4096 case "hmac": polReq.KeyType = keysutil.KeyType_HMAC + case "aes128-cmac": + polReq.KeyType = keysutil.KeyType_AES128_CMAC + case "aes256-cmac": + polReq.KeyType = keysutil.KeyType_AES256_CMAC default: return logical.ErrorResponse(fmt.Sprintf("unknown key type: %v", keyType)), logical.ErrInvalidRequest } - hashFn, err := parseHashFn(hashFnStr) - if err != nil { - return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + if polReq.KeyType.CMACSupported() && !constants.IsEnterprise { + return logical.ErrorResponse(ErrCmacEntOnly.Error()), logical.ErrInvalidRequest } p, _, err := b.GetPolicy(ctx, polReq, b.GetRandomReader()) @@ -203,14 +239,9 @@ func (b *backend) pathImportWrite(ctx context.Context, req *logical.Request, d * return nil, errors.New("the import path cannot be used with an existing key; use import-version to rotate an existing imported key") } - ciphertext, err := base64.StdEncoding.DecodeString(ciphertextString) + key, resp, err := b.extractKeyFromFields(ctx, req, d, polReq.KeyType, isCiphertextSet) if err != nil { - return nil, err - } - - key, err := b.decryptImportedKey(ctx, req.Storage, ciphertext, hashFn) - if err != nil { - return nil, err + return resp, err } err = b.lm.ImportPolicy(ctx, polReq, key, b.GetRandomReader()) @@ -223,20 +254,18 @@ func (b *backend) pathImportWrite(ctx context.Context, req *logical.Request, d * func (b *backend) pathImportVersionWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) - hashFnStr := d.Get("hash_function").(string) - ciphertextString := d.Get("ciphertext").(string) - - polReq := keysutil.PolicyRequest{ - Storage: req.Storage, - Name: name, - Upsert: false, - } - hashFn, err := parseHashFn(hashFnStr) + isCiphertextSet, err := checkKeyFieldsSet(d) if err != nil { - return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + return nil, err } + polReq := keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + Upsert: false, + IsPrivateKey: isCiphertextSet, + } p, _, err := b.GetPolicy(ctx, polReq, b.GetRandomReader()) if err != nil { return nil, err @@ -256,15 +285,24 @@ func (b *backend) pathImportVersionWrite(ctx context.Context, req *logical.Reque } defer p.Unlock() - ciphertext, err := base64.StdEncoding.DecodeString(ciphertextString) + key, resp, err := b.extractKeyFromFields(ctx, req, d, p.Type, isCiphertextSet) if err != nil { - return nil, err + return resp, err } - importKey, err := b.decryptImportedKey(ctx, req.Storage, ciphertext, hashFn) - if err != nil { - return nil, err + + // Get param version if set else import a new version. + if version, ok := d.GetOk("version"); ok { + versionToUpdate := version.(int) + + // Check if given version can be updated given input + err = p.KeyVersionCanBeUpdated(versionToUpdate, isCiphertextSet) + if err == nil { + err = p.ImportPrivateKeyForVersion(ctx, req.Storage, versionToUpdate, key) + } + } else { + err = p.ImportPublicOrPrivate(ctx, req.Storage, key, isCiphertextSet, b.GetRandomReader()) } - err = p.Import(ctx, req.Storage, importKey, b.GetRandomReader()) + if err != nil { return nil, err } @@ -322,6 +360,36 @@ func (b *backend) decryptImportedKey(ctx context.Context, storage logical.Storag return importKey, nil } +func (b *backend) extractKeyFromFields(ctx context.Context, req *logical.Request, d *framework.FieldData, keyType keysutil.KeyType, isPrivateKey bool) ([]byte, *logical.Response, error) { + var key []byte + if isPrivateKey { + hashFnStr := d.Get("hash_function").(string) + hashFn, err := parseHashFn(hashFnStr) + if err != nil { + return key, logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + ciphertextString := d.Get("ciphertext").(string) + ciphertext, err := base64.StdEncoding.DecodeString(ciphertextString) + if err != nil { + return key, nil, err + } + + key, err = b.decryptImportedKey(ctx, req.Storage, ciphertext, hashFn) + if err != nil { + return key, nil, err + } + } else { + publicKeyString := d.Get("public_key").(string) + if !keyType.ImportPublicKeySupported() { + return key, nil, errors.New("provided type does not support public_key import") + } + key = []byte(publicKeyString) + } + + return key, nil, nil +} + func parseHashFn(hashFn string) (hash.Hash, error) { switch strings.ToUpper(hashFn) { case "SHA1": @@ -339,6 +407,29 @@ func parseHashFn(hashFn string) (hash.Hash, error) { } } +// checkKeyFieldsSet: Checks which key fields are set. If both are set, an error is returned +func checkKeyFieldsSet(d *framework.FieldData) (bool, error) { + ciphertextSet := isFieldSet("ciphertext", d) + publicKeySet := isFieldSet("publicKey", d) + + if ciphertextSet && publicKeySet { + return false, errors.New("only one of the following fields, ciphertext and public_key, can be set") + } else if ciphertextSet { + return true, nil + } else { + return false, nil + } +} + +func isFieldSet(fieldName string, d *framework.FieldData) bool { + _, fieldSet := d.Raw[fieldName] + if !fieldSet { + return false + } + + return true +} + const ( pathImportWriteSyn = "Imports an externally-generated key into a new transit key" pathImportWriteDesc = "This path is used to import an externally-generated " + diff --git a/builtin/logical/transit/path_import_test.go b/builtin/logical/transit/path_import_test.go index 67b7a9ce4fd1..d26f5ff95f64 100644 --- a/builtin/logical/transit/path_import_test.go +++ b/builtin/logical/transit/path_import_test.go @@ -1,10 +1,11 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit import ( "context" + "crypto" "crypto/ecdsa" "crypto/ed25519" "crypto/elliptic" @@ -12,14 +13,15 @@ import ( "crypto/rsa" "crypto/x509" "encoding/base64" + "encoding/pem" "fmt" "strconv" "sync" "testing" - "github.com/google/tink/go/kwp/subtle" uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/logical" + "github.com/tink-crypto/tink-go/v2/kwp/subtle" ) var keyTypes = []string{ @@ -427,6 +429,70 @@ func TestTransit_Import(t *testing.T) { } }, ) + + t.Run( + "import public key ed25519", + func(t *testing.T) { + keyType := "ed25519" + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get keys + privateKey := getKey(t, keyType) + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatal(err) + } + + // Import key + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import ed25519 key: %v", err) + } + }) + + t.Run( + "import public key ecdsa", + func(t *testing.T) { + keyType := "ecdsa-p256" + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get keys + privateKey := getKey(t, keyType) + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatal(err) + } + + // Import key + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import public key: %s", err) + } + }) } func TestTransit_ImportVersion(t *testing.T) { @@ -573,6 +639,313 @@ func TestTransit_ImportVersion(t *testing.T) { } }, ) + + t.Run( + "import rsa public key and update version with private counterpart", + func(t *testing.T) { + keyType := "rsa-2048" + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get keys + privateKey := getKey(t, keyType) + importBlob := wrapTargetKeyForImport(t, pubWrappingKey, privateKey, keyType, "SHA256") + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatal(err) + } + + // Import RSA public key + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import public key: %s", err) + } + + // Update version - import RSA private key + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to update key: %s", err) + } + }, + ) +} + +func TestTransit_ImportVersionWithPublicKeys(t *testing.T) { + generateKeys(t) + b, s := createBackendWithStorage(t) + + // Retrieve public wrapping key + wrappingKey, err := b.getWrappingKey(context.Background(), s) + if err != nil || wrappingKey == nil { + t.Fatalf("failed to retrieve public wrapping key: %s", err) + } + privWrappingKey := wrappingKey.Keys[strconv.Itoa(wrappingKey.LatestVersion)].RSAKey + pubWrappingKey := &privWrappingKey.PublicKey + + // Import a public key then import private should give us one key + t.Run( + "import rsa public key and update version with private counterpart", + func(t *testing.T) { + keyType := "ecdsa-p256" + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get keys + privateKey := getKey(t, keyType) + importBlob := wrapTargetKeyForImport(t, pubWrappingKey, privateKey, keyType, "SHA256") + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatal(err) + } + + // Import EC public key + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import public key: %s", err) + } + + // Update version - import EC private key + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to update key: %s", err) + } + + // We should have one key on export + req = &logical.Request{ + Storage: s, + Operation: logical.ReadOperation, + Path: fmt.Sprintf("export/public-key/%s", keyID), + } + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to export key: %s", err) + } + + if len(resp.Data["keys"].(map[string]string)) != 1 { + t.Fatalf("expected 1 key but got %v: %v", len(resp.Data["keys"].(map[string]string)), resp) + } + }, + ) + + // Import a private and then public should give us two keys + t.Run( + "import ec private key and then its public counterpart", + func(t *testing.T) { + keyType := "ecdsa-p256" + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get keys + privateKey := getKey(t, keyType) + importBlob := wrapTargetKeyForImport(t, pubWrappingKey, privateKey, keyType, "SHA256") + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatal(err) + } + + // Import EC private key + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob, + "type": keyType, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to update key: %s", err) + } + + // Update version - Import EC public key + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import public key: %s", err) + } + + // We should have two keys on export + req = &logical.Request{ + Storage: s, + Operation: logical.ReadOperation, + Path: fmt.Sprintf("export/public-key/%s", keyID), + } + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to export key: %s", err) + } + + if len(resp.Data["keys"].(map[string]string)) != 2 { + t.Fatalf("expected 2 key but got %v: %v", len(resp.Data["keys"].(map[string]string)), resp) + } + }, + ) + + // Import a public and another public should allow us to insert two private key. + t.Run( + "import two public keys and two private keys in reverse order", + func(t *testing.T) { + keyType := "ecdsa-p256" + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get keys + privateKey1 := getKey(t, keyType) + importBlob1 := wrapTargetKeyForImport(t, pubWrappingKey, privateKey1, keyType, "SHA256") + publicKeyBytes1, err := getPublicKey(privateKey1, keyType) + if err != nil { + t.Fatal(err) + } + + privateKey2, err := generateKey(keyType) + if err != nil { + t.Fatal(err) + } + importBlob2 := wrapTargetKeyForImport(t, pubWrappingKey, privateKey2, keyType, "SHA256") + publicKeyBytes2, err := getPublicKey(privateKey2, keyType) + if err != nil { + t.Fatal(err) + } + + // Import EC public key + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes1, + "type": keyType, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to update key: %s", err) + } + + // Update version - Import second EC public key + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes2, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import public key: %s", err) + } + + // We should have two keys on export + req = &logical.Request{ + Storage: s, + Operation: logical.ReadOperation, + Path: fmt.Sprintf("export/public-key/%s", keyID), + } + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to export key: %s", err) + } + + if len(resp.Data["keys"].(map[string]string)) != 2 { + t.Fatalf("expected 2 key but got %v: %v", len(resp.Data["keys"].(map[string]string)), resp) + } + + // Import second private key first, with no options. + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob2, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import private key: %s", err) + } + + // Import first private key second, with a version + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob1, + "version": 1, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import private key: %s", err) + } + + // We should still have two keys on export + req = &logical.Request{ + Storage: s, + Operation: logical.ReadOperation, + Path: fmt.Sprintf("export/public-key/%s", keyID), + } + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to export key: %s", err) + } + + if len(resp.Data["keys"].(map[string]string)) != 2 { + t.Fatalf("expected 2 key but got %v: %v", len(resp.Data["keys"].(map[string]string)), resp) + } + }, + ) } func wrapTargetKeyForImport(t *testing.T, wrappingKey *rsa.PublicKey, targetKey interface{}, targetKeyType string, hashFnName string) string { @@ -663,3 +1036,40 @@ func generateKey(keyType string) (interface{}, error) { return nil, fmt.Errorf("failed to generate unsupported key type: %s", keyType) } } + +func getPublicKey(privateKey crypto.PrivateKey, keyType string) ([]byte, error) { + var publicKey crypto.PublicKey + var publicKeyBytes []byte + switch keyType { + case "rsa-2048", "rsa-3072", "rsa-4096": + publicKey = privateKey.(*rsa.PrivateKey).Public() + case "ecdsa-p256", "ecdsa-p384", "ecdsa-p521": + publicKey = privateKey.(*ecdsa.PrivateKey).Public() + case "ed25519": + publicKey = privateKey.(ed25519.PrivateKey).Public() + default: + return publicKeyBytes, fmt.Errorf("failed to get public key from %s key", keyType) + } + + publicKeyBytes, err := publicKeyToBytes(publicKey) + if err != nil { + return publicKeyBytes, err + } + + return publicKeyBytes, nil +} + +func publicKeyToBytes(publicKey crypto.PublicKey) ([]byte, error) { + var publicKeyBytesPem []byte + publicKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey) + if err != nil { + return publicKeyBytesPem, fmt.Errorf("failed to marshal public key: %s", err) + } + + pemBlock := &pem.Block{ + Type: "PUBLIC KEY", + Bytes: publicKeyBytes, + } + + return pem.EncodeToMemory(pemBlock), nil +} diff --git a/builtin/logical/transit/path_keys.go b/builtin/logical/transit/path_keys.go index 295634dc4603..ded75f57d874 100644 --- a/builtin/logical/transit/path_keys.go +++ b/builtin/logical/transit/path_keys.go @@ -1,30 +1,35 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit import ( "context" "crypto/elliptic" - "crypto/x509" "encoding/base64" "encoding/pem" "fmt" "strconv" + "strings" "time" - "golang.org/x/crypto/ed25519" - "github.com/fatih/structs" + "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/keysutil" "github.com/hashicorp/vault/sdk/logical" + "golang.org/x/crypto/ed25519" ) func (b *backend) pathListKeys() *framework.Path { return &framework.Path{ Pattern: "keys/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationSuffix: "keys", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathKeysList, }, @@ -37,6 +42,12 @@ func (b *backend) pathListKeys() *framework.Path { func (b *backend) pathKeys() *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationSuffix: "key", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -121,10 +132,25 @@ key.`, }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathPolicyWrite, - logical.DeleteOperation: b.pathPolicyDelete, - logical.ReadOperation: b.pathPolicyRead, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathPolicyWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "create", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathPolicyDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "delete", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathPolicyRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "read", + }, + }, }, HelpSynopsis: pathPolicyHelpSyn, @@ -197,6 +223,10 @@ func (b *backend) pathPolicyWrite(ctx context.Context, req *logical.Request, d * polReq.KeyType = keysutil.KeyType_HMAC case "managed_key": polReq.KeyType = keysutil.KeyType_MANAGED_KEY + case "aes128-cmac": + polReq.KeyType = keysutil.KeyType_AES128_CMAC + case "aes256-cmac": + polReq.KeyType = keysutil.KeyType_AES256_CMAC default: return logical.ErrorResponse(fmt.Sprintf("unknown key type %v", keyType)), logical.ErrInvalidRequest } @@ -219,6 +249,10 @@ func (b *backend) pathPolicyWrite(ctx context.Context, req *logical.Request, d * polReq.ManagedKeyUUID = keyId } + if polReq.KeyType.CMACSupported() && !constants.IsEnterprise { + return logical.ErrorResponse(ErrCmacEntOnly.Error()), logical.ErrInvalidRequest + } + p, upserted, err := b.GetPolicy(ctx, polReq, b.GetRandomReader()) if err != nil { return nil, err @@ -230,19 +264,22 @@ func (b *backend) pathPolicyWrite(ctx context.Context, req *logical.Request, d * p.Unlock() } - resp := &logical.Response{} + resp, err := b.formatKeyPolicy(p, nil) + if err != nil { + return nil, err + } if !upserted { resp.AddWarning(fmt.Sprintf("key %s already existed", name)) } - - return nil, nil + return resp, nil } // Built-in helper type for returning asymmetric keys type asymKey struct { - Name string `json:"name" structs:"name" mapstructure:"name"` - PublicKey string `json:"public_key" structs:"public_key" mapstructure:"public_key"` - CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time"` + Name string `json:"name" structs:"name" mapstructure:"name"` + PublicKey string `json:"public_key" structs:"public_key" mapstructure:"public_key"` + CertificateChain string `json:"certificate_chain" structs:"certificate_chain" mapstructure:"certificate_chain"` + CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time"` } func (b *backend) pathPolicyRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { @@ -263,6 +300,19 @@ func (b *backend) pathPolicyRead(ctx context.Context, req *logical.Request, d *f } defer p.Unlock() + contextRaw := d.Get("context").(string) + var context []byte + if len(contextRaw) != 0 { + context, err = base64.StdEncoding.DecodeString(contextRaw) + if err != nil { + return logical.ErrorResponse("failed to base64-decode context"), logical.ErrInvalidRequest + } + } + + return b.formatKeyPolicy(p, context) +} + +func (b *backend) formatKeyPolicy(p *keysutil.Policy, context []byte) (*logical.Response, error) { // Return the response resp := &logical.Response{ Data: map[string]interface{}{ @@ -319,15 +369,6 @@ func (b *backend) pathPolicyRead(ctx context.Context, req *logical.Request, d *f } } - contextRaw := d.Get("context").(string) - var context []byte - if len(contextRaw) != 0 { - context, err = base64.StdEncoding.DecodeString(contextRaw) - if err != nil { - return logical.ErrorResponse("failed to base64-decode context"), logical.ErrInvalidRequest - } - } - switch p.Type { case keysutil.KeyType_AES128_GCM96, keysutil.KeyType_AES256_GCM96, keysutil.KeyType_ChaCha20_Poly1305: retKeys := map[string]int64{} @@ -346,6 +387,18 @@ func (b *backend) pathPolicyRead(ctx context.Context, req *logical.Request, d *f if key.CreationTime.IsZero() { key.CreationTime = time.Unix(v.DeprecatedCreationTime, 0) } + if v.CertificateChain != nil { + var pemCerts []string + for _, derCertBytes := range v.CertificateChain { + pemCert := strings.TrimSpace(string(pem.EncodeToMemory( + &pem.Block{ + Type: "CERTIFICATE", + Bytes: derCertBytes, + }))) + pemCerts = append(pemCerts, pemCert) + } + key.CertificateChain = strings.Join(pemCerts, "\n") + } switch p.Type { case keysutil.KeyType_ECDSA_P256: @@ -382,21 +435,11 @@ func (b *backend) pathPolicyRead(ctx context.Context, req *logical.Request, d *f key.Name = "rsa-4096" } - // Encode the RSA public key in PEM format to return over the - // API - derBytes, err := x509.MarshalPKIXPublicKey(v.RSAKey.Public()) + pubKey, err := encodeRSAPublicKey(&v) if err != nil { - return nil, fmt.Errorf("error marshaling RSA public key: %w", err) - } - pemBlock := &pem.Block{ - Type: "PUBLIC KEY", - Bytes: derBytes, - } - pemBytes := pem.EncodeToMemory(pemBlock) - if pemBytes == nil || len(pemBytes) == 0 { - return nil, fmt.Errorf("failed to PEM-encode RSA public key") + return nil, err } - key.PublicKey = string(pemBytes) + key.PublicKey = pubKey } retKeys[k] = structs.New(key).Map() diff --git a/builtin/logical/transit/path_keys_config.go b/builtin/logical/transit/path_keys_config.go index 957934a16619..ed91d236dc92 100644 --- a/builtin/logical/transit/path_keys_config.go +++ b/builtin/logical/transit/path_keys_config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit @@ -16,6 +16,13 @@ import ( func (b *backend) pathKeysConfig() *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("name") + "/config", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "configure", + OperationSuffix: "key", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -90,6 +97,8 @@ func (b *backend) pathKeysConfigWrite(ctx context.Context, req *logical.Request, } defer p.Unlock() + var warning string + originalMinDecryptionVersion := p.MinDecryptionVersion originalMinEncryptionVersion := p.MinEncryptionVersion originalDeletionAllowed := p.DeletionAllowed @@ -106,8 +115,6 @@ func (b *backend) pathKeysConfigWrite(ctx context.Context, req *logical.Request, } }() - resp = &logical.Response{} - persistNeeded := false minDecryptionVersionRaw, ok := d.GetOk("min_decryption_version") @@ -120,7 +127,7 @@ func (b *backend) pathKeysConfigWrite(ctx context.Context, req *logical.Request, if minDecryptionVersion == 0 { minDecryptionVersion = 1 - resp.AddWarning("since Vault 0.3, transit key numbering starts at 1; forcing minimum to 1") + warning = "since Vault 0.3, transit key numbering starts at 1; forcing minimum to 1" } if minDecryptionVersion != p.MinDecryptionVersion { @@ -211,10 +218,21 @@ func (b *backend) pathKeysConfigWrite(ctx context.Context, req *logical.Request, p.AutoRotatePeriod = autoRotatePeriod persistNeeded = true } + + if p.Type == keysutil.KeyType_MANAGED_KEY && autoRotatePeriod != 0 { + return logical.ErrorResponse("Auto rotation can not be set for managed keys"), nil + } } if !persistNeeded { - return nil, nil + resp, err := b.formatKeyPolicy(p, nil) + if err != nil { + return nil, err + } + if warning != "" { + resp.AddWarning(warning) + } + return resp, nil } switch { @@ -224,11 +242,18 @@ func (b *backend) pathKeysConfigWrite(ctx context.Context, req *logical.Request, return logical.ErrorResponse("min decryption version should not be less then min available version"), nil } - if len(resp.Warnings) == 0 { - return nil, p.Persist(ctx, req.Storage) + if err := p.Persist(ctx, req.Storage); err != nil { + return nil, err } - return resp, p.Persist(ctx, req.Storage) + resp, err = b.formatKeyPolicy(p, nil) + if err != nil { + return nil, err + } + if warning != "" { + resp.AddWarning(warning) + } + return resp, nil } const pathKeysConfigHelpSyn = `Configure a named encryption key` diff --git a/builtin/logical/transit/path_keys_config_test.go b/builtin/logical/transit/path_keys_config_test.go index 335607c3b0e1..98bcbe448539 100644 --- a/builtin/logical/transit/path_keys_config_test.go +++ b/builtin/logical/transit/path_keys_config_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit diff --git a/builtin/logical/transit/path_keys_test.go b/builtin/logical/transit/path_keys_test.go index 4b3303988eed..6e9bf4f37402 100644 --- a/builtin/logical/transit/path_keys_test.go +++ b/builtin/logical/transit/path_keys_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit_test @@ -10,11 +10,11 @@ import ( "testing" "time" - uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/audit" - "github.com/hashicorp/vault/builtin/audit/file" "github.com/hashicorp/vault/builtin/logical/transit" + "github.com/hashicorp/vault/helper/constants" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" @@ -26,7 +26,7 @@ func TestTransit_Issue_2958(t *testing.T) { "transit": transit.Factory, }, AuditBackends: map[string]audit.Factory{ - "file": file.Factory, + "file": audit.NewFileBackend, }, } @@ -196,3 +196,117 @@ func TestTransit_CreateKeyWithAutorotation(t *testing.T) { }) } } + +// TestTransit_CreateKey validates transit key creation functionality +func TestTransit_CreateKey(t *testing.T) { + testCases := map[string]struct { + creationParams map[string]interface{} + shouldError bool + entOnly bool + }{ + "AES-128": { + creationParams: map[string]interface{}{"type": "aes128-gcm96"}, + }, + "AES-256": { + creationParams: map[string]interface{}{"type": "aes256-gcm96"}, + }, + "CHACHA20": { + creationParams: map[string]interface{}{"type": "chacha20-poly1305"}, + }, + "ECDSA-P256": { + creationParams: map[string]interface{}{"type": "ecdsa-p256"}, + }, + "ECDSA-P384": { + creationParams: map[string]interface{}{"type": "ecdsa-p384"}, + }, + "ECDSA-P521": { + creationParams: map[string]interface{}{"type": "ecdsa-p521"}, + }, + "RSA_2048": { + creationParams: map[string]interface{}{"type": "rsa-2048"}, + }, + "RSA_3072": { + creationParams: map[string]interface{}{"type": "rsa-3072"}, + }, + "RSA_4096": { + creationParams: map[string]interface{}{"type": "rsa-4096"}, + }, + "HMAC": { + creationParams: map[string]interface{}{"type": "hmac", "key_size": 128}, + }, + "AES-128 CMAC": { + creationParams: map[string]interface{}{"type": "aes128-cmac"}, + entOnly: true, + }, + "AES-256 CMAC": { + creationParams: map[string]interface{}{"type": "aes256-cmac"}, + entOnly: true, + }, + "bad key type": { + creationParams: map[string]interface{}{"type": "fake-key-type"}, + shouldError: true, + }, + } + + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "transit": transit.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + err := client.Sys().Mount("transit", &api.MountInput{ + Type: "transit", + }) + if err != nil { + t.Fatal(err) + } + + for name, tt := range testCases { + t.Run(name, func(t *testing.T) { + keyName, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("error generating key name: %s", err) + } + + resp, err := client.Logical().Write(fmt.Sprintf("transit/keys/%s", keyName), tt.creationParams) + if err != nil { + if !constants.IsEnterprise && tt.entOnly { + // key type is only available on ent and we aren't on ENT + return + } + + if !tt.shouldError { + t.Fatalf("unexpected error creating key: %s", err) + } + } + + if err == nil { + if !constants.IsEnterprise && tt.entOnly { + t.Fatal("key type should be enterprise only but did not fail creation on CE") + } + + if tt.shouldError { + t.Fatal("expected error but got nil") + } + } + + if err == nil { + keyType, ok := resp.Data["type"] + if !ok { + t.Fatal("missing key type in response") + } + + if keyType != tt.creationParams["type"] { + t.Fatalf("incorrect key type: expected %s, got %s", tt.creationParams["type"], keyType) + } + } + }) + } +} diff --git a/builtin/logical/transit/path_random.go b/builtin/logical/transit/path_random.go index c4dde5f9a38f..6e057a975fd8 100644 --- a/builtin/logical/transit/path_random.go +++ b/builtin/logical/transit/path_random.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit @@ -14,6 +14,13 @@ import ( func (b *backend) pathRandom() *framework.Path { return &framework.Path{ Pattern: "random(/" + framework.GenericNameRegex("source") + ")?" + framework.OptionalParamRegex("urlbytes"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "generate", + OperationSuffix: "random|random-with-source|random-with-bytes|random-with-source-and-bytes", + }, + Fields: map[string]*framework.FieldSchema{ "urlbytes": { Type: framework.TypeString, diff --git a/builtin/logical/transit/path_random_test.go b/builtin/logical/transit/path_random_test.go index 35782ec3eadc..a58820b987b3 100644 --- a/builtin/logical/transit/path_random_test.go +++ b/builtin/logical/transit/path_random_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit diff --git a/builtin/logical/transit/path_restore.go b/builtin/logical/transit/path_restore.go index bbaa7ce1c8ba..1b3b34599909 100644 --- a/builtin/logical/transit/path_restore.go +++ b/builtin/logical/transit/path_restore.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit @@ -15,6 +15,13 @@ import ( func (b *backend) pathRestore() *framework.Path { return &framework.Path{ Pattern: "restore" + framework.OptionalParamRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "restore", + OperationSuffix: "key|and-rename-key", + }, + Fields: map[string]*framework.FieldSchema{ "backup": { Type: framework.TypeString, diff --git a/builtin/logical/transit/path_restore_test.go b/builtin/logical/transit/path_restore_test.go index 3dcc552d98b8..1cd0dcd61eb4 100644 --- a/builtin/logical/transit/path_restore_test.go +++ b/builtin/logical/transit/path_restore_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit diff --git a/builtin/logical/transit/path_rewrap.go b/builtin/logical/transit/path_rewrap.go index dedf0a9d345d..49b69c7255e1 100644 --- a/builtin/logical/transit/path_rewrap.go +++ b/builtin/logical/transit/path_rewrap.go @@ -1,11 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit import ( "context" "encoding/base64" + "errors" "fmt" "github.com/hashicorp/vault/helper/constants" @@ -16,9 +17,17 @@ import ( "github.com/mitchellh/mapstructure" ) +var ErrNonceNotAllowed = errors.New("provided nonce not allowed for this key") + func (b *backend) pathRewrap() *framework.Path { return &framework.Path{ Pattern: "rewrap/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "rewrap", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -139,6 +148,7 @@ func (b *backend) pathRewrapWrite(ctx context.Context, req *logical.Request, d * if !b.System().CachingDisabled() { p.Lock(false) } + defer p.Unlock() warnAboutNonceUsage := false for i, item := range batchInputItems { @@ -146,6 +156,11 @@ func (b *backend) pathRewrapWrite(ctx context.Context, req *logical.Request, d * continue } + if item.Nonce != "" && !nonceAllowed(p) { + batchResponseItems[i].Error = ErrNonceNotAllowed.Error() + continue + } + plaintext, err := p.Decrypt(item.DecodedContext, item.DecodedNonce, item.Ciphertext) if err != nil { switch err.(type) { @@ -153,7 +168,6 @@ func (b *backend) pathRewrapWrite(ctx context.Context, req *logical.Request, d * batchResponseItems[i].Error = err.Error() continue default: - p.Unlock() return nil, err } } @@ -169,16 +183,13 @@ func (b *backend) pathRewrapWrite(ctx context.Context, req *logical.Request, d * batchResponseItems[i].Error = err.Error() continue case errutil.InternalError: - p.Unlock() return nil, err default: - p.Unlock() return nil, err } } if ciphertext == "" { - p.Unlock() return nil, fmt.Errorf("empty ciphertext returned for input item %d", i) } @@ -202,7 +213,6 @@ func (b *backend) pathRewrapWrite(ctx context.Context, req *logical.Request, d * } } else { if batchResponseItems[0].Error != "" { - p.Unlock() return logical.ErrorResponse(batchResponseItems[0].Error), logical.ErrInvalidRequest } resp.Data = map[string]interface{}{ @@ -215,7 +225,6 @@ func (b *backend) pathRewrapWrite(ctx context.Context, req *logical.Request, d * resp.AddWarning("A provided nonce value was used within FIPS mode, this violates FIPS 140 compliance.") } - p.Unlock() return resp, nil } diff --git a/builtin/logical/transit/path_rewrap_test.go b/builtin/logical/transit/path_rewrap_test.go index 097626c1c28a..55f28874656e 100644 --- a/builtin/logical/transit/path_rewrap_test.go +++ b/builtin/logical/transit/path_rewrap_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit diff --git a/builtin/logical/transit/path_rotate.go b/builtin/logical/transit/path_rotate.go index a693a5abb326..c024aede4bc0 100644 --- a/builtin/logical/transit/path_rotate.go +++ b/builtin/logical/transit/path_rotate.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit @@ -14,6 +14,13 @@ import ( func (b *backend) pathRotate() *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("name") + "/rotate", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "rotate", + OperationSuffix: "key", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -57,6 +64,7 @@ func (b *backend) pathRotateWrite(ctx context.Context, req *logical.Request, d * if !b.System().CachingDisabled() { p.Lock(true) } + defer p.Unlock() if p.Type == keysutil.KeyType_MANAGED_KEY { var keyId string @@ -71,8 +79,11 @@ func (b *backend) pathRotateWrite(ctx context.Context, req *logical.Request, d * err = p.Rotate(ctx, req.Storage, b.GetRandomReader()) } - p.Unlock() - return nil, err + if err != nil { + return nil, err + } + + return b.formatKeyPolicy(p, nil) } const pathRotateHelpSyn = `Rotate named encryption key` diff --git a/builtin/logical/transit/path_sign_verify.go b/builtin/logical/transit/path_sign_verify.go index 3623007e11ef..2043c8724e99 100644 --- a/builtin/logical/transit/path_sign_verify.go +++ b/builtin/logical/transit/path_sign_verify.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit @@ -12,6 +12,7 @@ import ( "strconv" "strings" + "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/helper/keysutil" @@ -29,12 +30,12 @@ type batchResponseSignItem struct { // request item Signature string `json:"signature,omitempty" mapstructure:"signature"` - // The key version to be used for encryption + // The key version to be used for signing KeyVersion int `json:"key_version" mapstructure:"key_version"` PublicKey []byte `json:"publickey,omitempty" mapstructure:"publickey"` - // Error, if set represents a failure encountered while encrypting a + // Error, if set represents a failure encountered while signing a // corresponding batch request item Error string `json:"error,omitempty" mapstructure:"error"` @@ -51,14 +52,14 @@ type batchResponseSignItem struct { // BatchRequestVerifyItem represents a request item for batch processing. // A map type allows us to distinguish between empty and missing values. -type batchRequestVerifyItem map[string]string +type batchRequestVerifyItem map[string]interface{} // BatchResponseVerifyItem represents a response item for batch processing type batchResponseVerifyItem struct { // Valid indicates whether signature matches the signature derived from the input string Valid bool `json:"valid" mapstructure:"valid"` - // Error, if set represents a failure encountered while encrypting a + // Error, if set represents a failure encountered while verifying a // corresponding batch request item Error string `json:"error,omitempty" mapstructure:"error"` @@ -78,6 +79,13 @@ const defaultHashAlgorithm = "sha2-256" func (b *backend) pathSign() *framework.Path { return &framework.Path{ Pattern: "sign/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "sign", + OperationSuffix: "|with-algorithm", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -180,6 +188,13 @@ preserve the order of the batch input`, func (b *backend) pathVerify() *framework.Path { return &framework.Path{ Pattern: "verify/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "verify", + OperationSuffix: "|with-algorithm", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -202,6 +217,11 @@ derivation is enabled; currently only available with ed25519 keys.`, Description: "The HMAC, including vault header/key version", }, + "cmac": { + Type: framework.TypeString, + Description: "The CMAC, including vault header/key version", + }, + "input": { Type: framework.TypeString, Description: "The base64-encoded input data to verify", @@ -212,6 +232,11 @@ derivation is enabled; currently only available with ed25519 keys.`, Description: `Hash algorithm to use (POST URL parameter)`, }, + "mac_length": { + Type: framework.TypeInt, + Description: `MAC length to use (POST body parameter). Valid values are:`, + }, + "hash_algorithm": { Type: framework.TypeString, Default: defaultHashAlgorithm, @@ -245,7 +270,7 @@ none on signing path.`, "signature_algorithm": { Type: framework.TypeString, - Description: `The signature algorithm to use for signature verification. Currently only applies to RSA key types. + Description: `The signature algorithm to use for signature verification. Currently only applies to RSA key types. Options are 'pss' or 'pkcs1v15'. Defaults to 'pss'`, }, @@ -265,7 +290,7 @@ Options are 'auto' (the default used by Golang, causing the salt to be as large "batch_input": { Type: framework.TypeSlice, Description: `Specifies a list of items for processing. When this parameter is set, -any supplied 'input', 'hmac' or 'signature' parameters will be ignored. Responses are returned in the +any supplied 'input', 'hmac', 'cmac' or 'signature' parameters will be ignored. Responses are returned in the 'batch_results' array component of the 'data' element of the response. Any batch output will preserve the order of the batch input`, }, @@ -339,10 +364,6 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest } - if hashAlgorithm == keysutil.HashTypeNone && (!prehashed || sigAlgorithm != "pkcs1v15") { - return logical.ErrorResponse("hash_algorithm=none requires both prehashed=true and signature_algorithm=pkcs1v15"), logical.ErrInvalidRequest - } - // Get the policy p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ Storage: req.Storage, @@ -352,28 +373,33 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr return nil, err } if p == nil { - return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest + return logical.ErrorResponse("signing key not found"), logical.ErrInvalidRequest } if !b.System().CachingDisabled() { p.Lock(false) } + defer p.Unlock() if !p.Type.SigningSupported() { - p.Unlock() return logical.ErrorResponse(fmt.Sprintf("key type %v does not support signing", p.Type)), logical.ErrInvalidRequest } + // Allow managed keys to specify no hash algo without additional conditions. + if hashAlgorithm == keysutil.HashTypeNone && p.Type != keysutil.KeyType_MANAGED_KEY { + if !prehashed || sigAlgorithm != "pkcs1v15" { + return logical.ErrorResponse("hash_algorithm=none requires both prehashed=true and signature_algorithm=pkcs1v15"), logical.ErrInvalidRequest + } + } + batchInputRaw := d.Raw["batch_input"] var batchInputItems []batchRequestSignItem if batchInputRaw != nil { err = mapstructure.Decode(batchInputRaw, &batchInputItems) if err != nil { - p.Unlock() return nil, fmt.Errorf("failed to parse batch input: %w", err) } if len(batchInputItems) == 0 { - p.Unlock() return logical.ErrorResponse("missing batch input to process"), logical.ErrInvalidRequest } } else { @@ -386,7 +412,6 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr } response := make([]batchResponseSignItem, len(batchInputItems)) - for i, item := range batchInputItems { rawInput, ok := item["input"] @@ -405,8 +430,10 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr if p.Type.HashSignatureInput() && !prehashed { hf := keysutil.HashFuncMap[hashAlgorithm]() - hf.Write(input) - input = hf.Sum(nil) + if hf != nil { + hf.Write(input) + input = hf.Sum(nil) + } } contextRaw := item["context"] @@ -472,7 +499,6 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr } } else { if response[0].Error != "" || response[0].err != nil { - p.Unlock() if response[0].Error != "" { return logical.ErrorResponse(response[0].Error), response[0].err } @@ -490,7 +516,6 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr } } - p.Unlock() return resp, nil } @@ -520,6 +545,9 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d * if hmac, ok := d.GetOk("hmac"); ok { batchInputItems[0]["hmac"] = hmac.(string) } + if cmac, ok := d.GetOk("cmac"); ok { + batchInputItems[0]["cmac"] = cmac.(string) + } batchInputItems[0]["context"] = d.Get("context").(string) } @@ -528,26 +556,30 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d * // If one batch_input item is 'hmac', they all must be 'hmac'. sigFound := false hmacFound := false + cmacFound := false missing := false for _, v := range batchInputItems { if _, ok := v["signature"]; ok { sigFound = true } else if _, ok := v["hmac"]; ok { hmacFound = true + } else if _, ok := v["cmac"]; ok { + cmacFound = true } else { missing = true } } + optionsSet := numBooleansTrue(sigFound, hmacFound, cmacFound) switch { - case batchInputRaw == nil && sigFound && hmacFound: - return logical.ErrorResponse("provide one of 'signature' or 'hmac'"), logical.ErrInvalidRequest + case batchInputRaw == nil && optionsSet > 1: + return logical.ErrorResponse("provide one of 'signature', 'hmac' or 'cmac'"), logical.ErrInvalidRequest - case batchInputRaw == nil && !sigFound && !hmacFound: - return logical.ErrorResponse("neither a 'signature' nor an 'hmac' were given to verify"), logical.ErrInvalidRequest + case batchInputRaw == nil && optionsSet == 0: + return logical.ErrorResponse("missing 'signature', 'hmac' or 'cmac' were given to verify"), logical.ErrInvalidRequest - case sigFound && hmacFound: - return logical.ErrorResponse("elements of batch_input must all provide 'signature' or all provide 'hmac'"), logical.ErrInvalidRequest + case optionsSet > 1: + return logical.ErrorResponse("elements of batch_input must all provide either 'signature', 'hmac' or 'cmac'"), logical.ErrInvalidRequest case missing && sigFound: return logical.ErrorResponse("some elements of batch_input are missing 'signature'"), logical.ErrInvalidRequest @@ -555,11 +587,17 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d * case missing && hmacFound: return logical.ErrorResponse("some elements of batch_input are missing 'hmac'"), logical.ErrInvalidRequest - case missing: - return logical.ErrorResponse("no batch_input elements have 'signature' or 'hmac'"), logical.ErrInvalidRequest + case missing && cmacFound: + return logical.ErrorResponse("some elements of batch_input are missing 'cmac'"), logical.ErrInvalidRequest + + case optionsSet == 0: + return logical.ErrorResponse("no batch_input elements have 'signature', 'hmac' or 'cmac'"), logical.ErrInvalidRequest case hmacFound: return b.pathHMACVerify(ctx, req, d) + + case cmacFound: + return b.pathCMACVerify(ctx, req, d) } name := d.Get("name").(string) @@ -592,10 +630,6 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d * return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest } - if hashAlgorithm == keysutil.HashTypeNone && (!prehashed || sigAlgorithm != "pkcs1v15") { - return logical.ErrorResponse("hash_algorithm=none requires both prehashed=true and signature_algorithm=pkcs1v15"), logical.ErrInvalidRequest - } - // Get the policy p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ Storage: req.Storage, @@ -605,49 +639,74 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d * return nil, err } if p == nil { - return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest + return logical.ErrorResponse("signature verification key not found"), logical.ErrInvalidRequest } if !b.System().CachingDisabled() { p.Lock(false) } + defer p.Unlock() if !p.Type.SigningSupported() { - p.Unlock() return logical.ErrorResponse(fmt.Sprintf("key type %v does not support verification", p.Type)), logical.ErrInvalidRequest } + // Allow managed keys to specify no hash algo without additional conditions. + if hashAlgorithm == keysutil.HashTypeNone && p.Type != keysutil.KeyType_MANAGED_KEY { + if !prehashed || sigAlgorithm != "pkcs1v15" { + return logical.ErrorResponse("hash_algorithm=none requires both prehashed=true and signature_algorithm=pkcs1v15"), logical.ErrInvalidRequest + } + } + response := make([]batchResponseVerifyItem, len(batchInputItems)) for i, item := range batchInputItems { - rawInput, ok := item["input"] if !ok { response[i].Error = "missing input" response[i].err = logical.ErrInvalidRequest continue } + strInput, err := parseutil.ParseString(rawInput) + if err != nil { + response[i].Error = fmt.Sprintf("unable to parse input as string: %s", err) + response[i].err = logical.ErrInvalidRequest + continue + } - input, err := base64.StdEncoding.DecodeString(rawInput) + input, err := base64.StdEncoding.DecodeString(strInput) if err != nil { response[i].Error = fmt.Sprintf("unable to decode input as base64: %s", err) response[i].err = logical.ErrInvalidRequest continue } - sig, ok := item["signature"] + sigRaw, ok := item["signature"].(string) if !ok { response[i].Error = "missing signature" response[i].err = logical.ErrInvalidRequest continue } + sig, err := parseutil.ParseString(sigRaw) + if err != nil { + response[i].Error = fmt.Sprintf("failed to parse signature as a string: %s", err) + response[i].err = logical.ErrInvalidRequest + continue + } if p.Type.HashSignatureInput() && !prehashed { hf := keysutil.HashFuncMap[hashAlgorithm]() - hf.Write(input) - input = hf.Sum(nil) + if hf != nil { + hf.Write(input) + input = hf.Sum(nil) + } } - contextRaw := item["context"] + contextRaw, err := parseutil.ParseString(item["context"]) + if err != nil { + response[i].Error = fmt.Sprintf("failed to parse context as a string: %s", err) + response[i].err = logical.ErrInvalidRequest + continue + } var context []byte if len(contextRaw) != 0 { context, err = base64.StdEncoding.DecodeString(contextRaw) @@ -701,14 +760,15 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d * if batchInputRaw != nil { // Copy the references for i := range batchInputItems { - response[i].Reference = batchInputItems[i]["reference"] + if ref, err := parseutil.ParseString(batchInputItems[i]["reference"]); err == nil { + response[i].Reference = ref + } } resp.Data = map[string]interface{}{ "batch_results": response, } } else { if response[0].Error != "" || response[0].err != nil { - p.Unlock() if response[0].Error != "" { return logical.ErrorResponse(response[0].Error), response[0].err } @@ -719,10 +779,56 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d * } } - p.Unlock() return resp, nil } +func numBooleansTrue(bools ...bool) int { + numSet := 0 + for _, value := range bools { + if value { + numSet++ + } + } + return numSet +} + +func decodeTransitSignature(sig string) ([]byte, int, error) { + if !strings.HasPrefix(sig, "vault:v") { + return nil, 0, fmt.Errorf("prefix is not vault:v") + } + + splitVerification := strings.SplitN(strings.TrimPrefix(sig, "vault:v"), ":", 2) + if len(splitVerification) != 2 { + return nil, 0, fmt.Errorf("wrong number of fields delimited by ':', got %d expected 2", len(splitVerification)) + } + + ver, err := strconv.Atoi(splitVerification[0]) + if err != nil { + return nil, 0, fmt.Errorf("key version number %s count not be decoded", splitVerification[0]) + } + + if ver < 1 { + return nil, 0, fmt.Errorf("key version less than 1 are invalid got: %d", ver) + } + + if len(strings.TrimSpace(splitVerification[1])) == 0 { + return nil, 0, fmt.Errorf("missing base64 verification string from vault signature") + } + + verBytes, err := base64.StdEncoding.DecodeString(splitVerification[1]) + if err != nil { + return nil, 0, fmt.Errorf("unable to decode verification string as base64: %s", err) + } + + return verBytes, ver, nil +} + +func encodeTransitSignature(value []byte, keyVersion int) string { + retStr := base64.StdEncoding.EncodeToString(value) + retStr = fmt.Sprintf("vault:v%d:%s", keyVersion, retStr) + return retStr +} + const pathSignHelpSyn = `Generate a signature for input data using the named key` const pathSignHelpDesc = ` diff --git a/builtin/logical/transit/path_sign_verify_test.go b/builtin/logical/transit/path_sign_verify_test.go index 63aef9c80b6f..1421f71c34dd 100644 --- a/builtin/logical/transit/path_sign_verify_test.go +++ b/builtin/logical/transit/path_sign_verify_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit @@ -12,12 +12,10 @@ import ( "testing" "github.com/hashicorp/vault/helper/constants" - - "golang.org/x/crypto/ed25519" - "github.com/hashicorp/vault/sdk/helper/keysutil" "github.com/hashicorp/vault/sdk/logical" "github.com/mitchellh/mapstructure" + "golang.org/x/crypto/ed25519" ) // The outcome of processing a request includes diff --git a/builtin/logical/transit/path_trim.go b/builtin/logical/transit/path_trim.go index 6c464e7d0080..3f0a4df0ad24 100644 --- a/builtin/logical/transit/path_trim.go +++ b/builtin/logical/transit/path_trim.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit @@ -14,6 +14,13 @@ import ( func (b *backend) pathTrim() *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("name") + "/trim", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "trim", + OperationSuffix: "key", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -93,7 +100,7 @@ func (b *backend) pathTrimUpdate() framework.OperationFunc { return nil, err } - return nil, nil + return b.formatKeyPolicy(p, nil) } } diff --git a/builtin/logical/transit/path_trim_test.go b/builtin/logical/transit/path_trim_test.go index b63d644cba8c..448d0fba34b0 100644 --- a/builtin/logical/transit/path_trim_test.go +++ b/builtin/logical/transit/path_trim_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit diff --git a/builtin/logical/transit/path_wrapping_key.go b/builtin/logical/transit/path_wrapping_key.go index 6ccf90f2c191..42ccb888a245 100644 --- a/builtin/logical/transit/path_wrapping_key.go +++ b/builtin/logical/transit/path_wrapping_key.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit @@ -20,6 +20,10 @@ const WrappingKeyName = "wrapping-key" func (b *backend) pathWrappingKey() *framework.Path { return &framework.Path{ Pattern: "wrapping_key", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationSuffix: "wrapping-key", + }, Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ReadOperation: b.pathWrappingKeyRead, }, diff --git a/builtin/logical/transit/path_wrapping_key_test.go b/builtin/logical/transit/path_wrapping_key_test.go index 468c3f4de7ee..9ed58e45c284 100644 --- a/builtin/logical/transit/path_wrapping_key_test.go +++ b/builtin/logical/transit/path_wrapping_key_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit diff --git a/builtin/logical/transit/stepwise_test.go b/builtin/logical/transit/stepwise_test.go index 2b40cea21322..77cf093b991a 100644 --- a/builtin/logical/transit/stepwise_test.go +++ b/builtin/logical/transit/stepwise_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package transit diff --git a/builtin/plugin/backend.go b/builtin/plugin/backend.go index 0ca6a627fc09..4ab5c593df68 100644 --- a/builtin/plugin/backend.go +++ b/builtin/plugin/backend.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package plugin @@ -86,23 +86,15 @@ func Backend(ctx context.Context, conf *logical.BackendConfig) (*PluginBackend, runningVersion = versioner.PluginVersion().Version } - external := false - if externaler, ok := raw.(logical.Externaler); ok { - external = externaler.IsExternal() - } - // Cleanup meta plugin backend raw.Cleanup(ctx) // Initialize b.Backend with placeholder backend since plugin // backends will need to be lazy loaded. - b.Backend = &placeholderBackend{ - Backend: framework.Backend{ - PathsSpecial: paths, - BackendType: btype, - RunningVersion: runningVersion, - }, - external: external, + b.Backend = &framework.Backend{ + PathsSpecial: paths, + BackendType: btype, + RunningVersion: runningVersion, } b.config = conf @@ -110,23 +102,6 @@ func Backend(ctx context.Context, conf *logical.BackendConfig) (*PluginBackend, return &b, nil } -// placeholderBackend is used a placeholder before a backend is lazy-loaded. -// It is mostly used to mark that the backend is an external backend. -type placeholderBackend struct { - framework.Backend - - external bool -} - -func (p *placeholderBackend) IsExternal() bool { - return p.external -} - -var ( - _ logical.Externaler = (*placeholderBackend)(nil) - _ logical.PluginVersioner = (*placeholderBackend)(nil) -) - // PluginBackend is a thin wrapper around plugin.BackendPluginClient type PluginBackend struct { Backend logical.Backend @@ -326,14 +301,4 @@ func (b *PluginBackend) PluginVersion() logical.PluginVersion { return logical.EmptyPluginVersion } -func (b *PluginBackend) IsExternal() bool { - if externaler, ok := b.Backend.(logical.Externaler); ok { - return externaler.IsExternal() - } - return false -} - -var ( - _ logical.PluginVersioner = (*PluginBackend)(nil) - _ logical.Externaler = (*PluginBackend)(nil) -) +var _ logical.PluginVersioner = (*PluginBackend)(nil) diff --git a/builtin/plugin/backend_lazyLoad_test.go b/builtin/plugin/backend_lazyLoad_test.go index b2f6303ba6ac..3e52cc14ac4d 100644 --- a/builtin/plugin/backend_lazyLoad_test.go +++ b/builtin/plugin/backend_lazyLoad_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package plugin @@ -8,12 +8,10 @@ import ( "errors" "testing" - "github.com/hashicorp/vault/sdk/helper/logging" - - "github.com/hashicorp/vault/sdk/helper/pluginutil" - "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/plugin" ) diff --git a/builtin/plugin/backend_test.go b/builtin/plugin/backend_test.go index 28dd1e348331..713444061286 100644 --- a/builtin/plugin/backend_test.go +++ b/builtin/plugin/backend_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package plugin_test @@ -12,6 +12,7 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/builtin/plugin" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/logging" @@ -115,7 +116,11 @@ func TestBackend_PluginMain_Multiplexed(t *testing.T) { } func testConfig(t *testing.T, pluginCmd string) (*logical.BackendConfig, func()) { - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + t.Helper() + pluginDir := corehelpers.MakeTestPluginDir(t) + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + PluginDirectory: pluginDir, + }, &vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, }) cluster.Start() @@ -135,9 +140,8 @@ func testConfig(t *testing.T, pluginCmd string) (*logical.BackendConfig, func()) }, } - os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile) - - vault.TestAddTestPlugin(t, core.Core, "mock-plugin", consts.PluginTypeSecrets, "", pluginCmd, []string{}, "") + vault.TestAddTestPlugin(t, core.Core, "mock-plugin", consts.PluginTypeSecrets, "", pluginCmd, + []string{fmt.Sprintf("%s=%s", pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)}) return config, func() { cluster.Cleanup() diff --git a/builtin/plugin/mock_plugin_test.go b/builtin/plugin/mock_plugin_test.go index 9279c828f1da..6c189a846a89 100644 --- a/builtin/plugin/mock_plugin_test.go +++ b/builtin/plugin/mock_plugin_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package plugin diff --git a/builtin/plugin/v5/backend.go b/builtin/plugin/v5/backend.go index eac311b4ad60..38433dff71bb 100644 --- a/builtin/plugin/v5/backend.go +++ b/builtin/plugin/v5/backend.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package plugin @@ -80,7 +80,6 @@ func (b *backend) reloadBackend(ctx context.Context, storage logical.Storage) er err = b.Backend.Initialize(ctx, &logical.InitializationRequest{ Storage: storage, }) - if err != nil { return err } diff --git a/changelog/10624.txt b/changelog/10624.txt new file mode 100644 index 000000000000..fdb473b80d09 --- /dev/null +++ b/changelog/10624.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/azure: Updated metadata endpoint to `GetMSIEndpoint`, which supports more than just the metadata service. +``` diff --git a/changelog/10961.txt b/changelog/10961.txt new file mode 100644 index 000000000000..5387a53d38ae --- /dev/null +++ b/changelog/10961.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/aws: Added support for signed GET requests for authenticating to vault using the aws iam method. +``` diff --git a/changelog/11084.txt b/changelog/11084.txt new file mode 100644 index 000000000000..444c93da9955 --- /dev/null +++ b/changelog/11084.txt @@ -0,0 +1,3 @@ +```release-note:improvement +serviceregistration: Added support for Consul ServiceMeta tags from config file from the new `service_meta` config field. +``` diff --git a/changelog/12666.txt b/changelog/12666.txt new file mode 100644 index 000000000000..a6e008773a36 --- /dev/null +++ b/changelog/12666.txt @@ -0,0 +1,4 @@ + +```release-note:improvement +storage/etcd: Make etcd parameter MaxCallSendMsgSize configurable +``` diff --git a/changelog/12684.txt b/changelog/12684.txt new file mode 100644 index 000000000000..7abf3d43d29a --- /dev/null +++ b/changelog/12684.txt @@ -0,0 +1,3 @@ +```release-note:bug +Doc: Expanded the description of "What is Vault?" to align with CSA team's content. +``` diff --git a/changelog/14723.txt b/changelog/14723.txt new file mode 100644 index 000000000000..04953430d866 --- /dev/null +++ b/changelog/14723.txt @@ -0,0 +1,4 @@ +```release-note:improvement +core/identity: allow identity backend to be tuned using standard secrets backend tuning parameters. +``` + diff --git a/changelog/14998.txt b/changelog/14998.txt new file mode 100644 index 000000000000..64615f2e1742 --- /dev/null +++ b/changelog/14998.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update mount backend form to use selectable cards +``` diff --git a/changelog/16484.txt b/changelog/16484.txt new file mode 100644 index 000000000000..055f214dabdc --- /dev/null +++ b/changelog/16484.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/appid, auth/cert, auth/github, auth/ldap, auth/okta, auth/radius, auth/userpass: fixed an issue with policy name normalization that would prevent a token associated with a policy containing an uppercase character to be renewed. +``` diff --git a/changelog/16974.txt b/changelog/16974.txt new file mode 100644 index 000000000000..202670ea4364 --- /dev/null +++ b/changelog/16974.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: Add a `--dev-no-kv` flag to prevent auto mounting a key-value secret backend when running a dev server +``` diff --git a/changelog/17007.txt b/changelog/17007.txt new file mode 100644 index 000000000000..6c2a2801b24c --- /dev/null +++ b/changelog/17007.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/token: fixes an edge case bug that "identity_policies" is nil and causes cli vault login error +``` diff --git a/changelog/17076.txt b/changelog/17076.txt new file mode 100644 index 000000000000..93e7c1eacbe3 --- /dev/null +++ b/changelog/17076.txt @@ -0,0 +1,4 @@ +```release-note:improvement +core/cli: Warning related to VAULT_ADDR & -address not set with CLI requests. +``` + diff --git a/changelog/17272.txt b/changelog/17272.txt new file mode 100644 index 000000000000..9d580d80e311 --- /dev/null +++ b/changelog/17272.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/cert: Adds support for TLS certificate authenticaion through a reverse proxy that terminates the SSL connection +``` diff --git a/changelog/17598.txt b/changelog/17598.txt new file mode 100644 index 000000000000..8171255f8e95 --- /dev/null +++ b/changelog/17598.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/config: reload service registration configuration on SIGHUP +``` diff --git a/changelog/17934.txt b/changelog/17934.txt new file mode 100644 index 000000000000..7f087a915a28 --- /dev/null +++ b/changelog/17934.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/transit: Add support to import public keys in transit engine and allow encryption and verification of signed data +``` diff --git a/changelog/18492.txt b/changelog/18492.txt new file mode 100644 index 000000000000..6b0b3b50771a --- /dev/null +++ b/changelog/18492.txt @@ -0,0 +1,3 @@ +```release-note:improvement +framework: Make it an error for `CreateOperation` to be defined without an `ExistenceCheck`, thereby fixing misleading `x-vault-createSupported` in OpenAPI +``` diff --git a/changelog/18513.txt b/changelog/18513.txt new file mode 100644 index 000000000000..6b3ca2fe486f --- /dev/null +++ b/changelog/18513.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: latest version of chrome does not automatically redirect back to the app after authentication unless triggered by the user, hence added a link to redirect back to the app. +``` \ No newline at end of file diff --git a/changelog/18556.txt b/changelog/18556.txt new file mode 100644 index 000000000000..a48dacde5ba2 --- /dev/null +++ b/changelog/18556.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/token: Fix parsing of `auth/token/create` fields to avoid incorrect warnings about ignored parameters +``` diff --git a/changelog/18571.txt b/changelog/18571.txt new file mode 100644 index 000000000000..dd811d9fd441 --- /dev/null +++ b/changelog/18571.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/token, sys: Fix path-help being unavailable for some list-only endpoints +``` diff --git a/changelog/18624.txt b/changelog/18624.txt new file mode 100644 index 000000000000..91209bb46d9e --- /dev/null +++ b/changelog/18624.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response definitions to /sys/rotate endpoints +``` diff --git a/changelog/18625.txt b/changelog/18625.txt new file mode 100644 index 000000000000..526d6b63e6f6 --- /dev/null +++ b/changelog/18625.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response definitions to /sys/seal endpoints +``` \ No newline at end of file diff --git a/changelog/18626.txt b/changelog/18626.txt new file mode 100644 index 000000000000..6bb2ba0f4d89 --- /dev/null +++ b/changelog/18626.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response definitions to /sys/tool endpoints +``` \ No newline at end of file diff --git a/changelog/18627.txt b/changelog/18627.txt new file mode 100644 index 000000000000..e2a4dfb5f2e6 --- /dev/null +++ b/changelog/18627.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response definitions to /sys/wrapping endpoints +``` \ No newline at end of file diff --git a/changelog/18628.txt b/changelog/18628.txt new file mode 100644 index 000000000000..0722856c93b9 --- /dev/null +++ b/changelog/18628.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response definitions to /sys/version-history, /sys/leader, /sys/ha-status, /sys/host-info, /sys/in-flight-req +``` \ No newline at end of file diff --git a/changelog/18809.txt b/changelog/18809.txt new file mode 100644 index 000000000000..a1ec06f5799d --- /dev/null +++ b/changelog/18809.txt @@ -0,0 +1,3 @@ +```release-note:bug +activity (enterprise): Fix misattribution of entities to no or child namespace auth methods +``` diff --git a/changelog/18870.txt b/changelog/18870.txt new file mode 100644 index 000000000000..1b694895fec6 --- /dev/null +++ b/changelog/18870.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core:provide more descriptive error message when calling enterprise feature paths in open-source +``` \ No newline at end of file diff --git a/changelog/19002.txt b/changelog/19002.txt new file mode 100644 index 000000000000..d1a1ff5371ab --- /dev/null +++ b/changelog/19002.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Added `reload` option to cert auth configuration in case of external renewals of local x509 key-pairs. +``` \ No newline at end of file diff --git a/changelog/19032.txt b/changelog/19032.txt new file mode 100644 index 000000000000..a474c22ce6b7 --- /dev/null +++ b/changelog/19032.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/ldap: Add max_page_size configurable to LDAP configuration +``` diff --git a/changelog/19064.txt b/changelog/19064.txt new file mode 100644 index 000000000000..cf31525e25ee --- /dev/null +++ b/changelog/19064.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/cli: Example 'help' pages for vault read / write docs improved. +``` diff --git a/changelog/19247.txt b/changelog/19247.txt new file mode 100644 index 000000000000..f51e8479c97f --- /dev/null +++ b/changelog/19247.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/oidc: Adds support for group membership parsing when using IBM ISAM as an OIDC provider. +``` diff --git a/changelog/19296.txt b/changelog/19296.txt new file mode 100644 index 000000000000..1ef62a0cde2e --- /dev/null +++ b/changelog/19296.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Sidebar Navigation in UI**: A new sidebar navigation panel has been added in the UI to replace the top navigation bar. +``` \ No newline at end of file diff --git a/changelog/19319.txt b/changelog/19319.txt new file mode 100644 index 000000000000..4702344afb08 --- /dev/null +++ b/changelog/19319.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Improve operationId/request/response naming strategy +``` diff --git a/changelog/19365.txt b/changelog/19365.txt new file mode 100644 index 000000000000..774c750f4951 --- /dev/null +++ b/changelog/19365.txt @@ -0,0 +1,7 @@ +```release-note: enhancement +auth/aws: Support request cancellation with AWS requests +``` + +```release-note: enhancement +secrets/aws: Support request cancellation with AWS requests +``` diff --git a/changelog/19460.txt b/changelog/19460.txt new file mode 100644 index 000000000000..6334c7fdc5d2 --- /dev/null +++ b/changelog/19460.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url +``` diff --git a/changelog/19472.txt b/changelog/19472.txt new file mode 100644 index 000000000000..db9ec7276550 --- /dev/null +++ b/changelog/19472.txt @@ -0,0 +1,3 @@ +```release-note:improvement +autopilot: Update version to v0.2.0 to add better support for respecting min quorum +``` diff --git a/changelog/19495.txt b/changelog/19495.txt new file mode 100644 index 000000000000..dac2ca00dfb8 --- /dev/null +++ b/changelog/19495.txt @@ -0,0 +1,3 @@ +```release-note:bug +shamir: change mul and div implementations to be constant-time +``` \ No newline at end of file diff --git a/changelog/19520.txt b/changelog/19520.txt new file mode 100644 index 000000000000..726be2c13a60 --- /dev/null +++ b/changelog/19520.txt @@ -0,0 +1,3 @@ +```release-note:improvement +http: Support responding to HEAD operation from plugins +``` diff --git a/changelog/19541.txt b/changelog/19541.txt new file mode 100644 index 000000000000..9bdecc35832d --- /dev/null +++ b/changelog/19541.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes oidc tabs in auth form submitting with the root's default_role value after a namespace has been inputted +``` diff --git a/changelog/19616.txt b/changelog/19616.txt new file mode 100644 index 000000000000..3afcc608d19a --- /dev/null +++ b/changelog/19616.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/postgresql: Add configuration to scram-sha-256 encrypt passwords on Vault before sending them to PostgreSQL +``` \ No newline at end of file diff --git a/changelog/19625.txt b/changelog/19625.txt deleted file mode 100644 index b0cb558e3cd1..000000000000 --- a/changelog/19625.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:feature -core (enterprise): Add background worker for automatic reporting of billing -information. -``` diff --git a/changelog/19676.txt b/changelog/19676.txt new file mode 100644 index 000000000000..090dc801b2df --- /dev/null +++ b/changelog/19676.txt @@ -0,0 +1,4 @@ +```release-note:improvement +core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch +option in case all else fails for some replication issues we may not have fully reproduced. +``` diff --git a/changelog/19703.txt b/changelog/19703.txt new file mode 100644 index 000000000000..6bf8e5c18989 --- /dev/null +++ b/changelog/19703.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes issue navigating back a level using the breadcrumb from secret metadata view +``` \ No newline at end of file diff --git a/changelog/19721.txt b/changelog/19721.txt new file mode 100644 index 000000000000..9818a0facfe2 --- /dev/null +++ b/changelog/19721.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. +``` \ No newline at end of file diff --git a/changelog/19776.txt b/changelog/19776.txt new file mode 100644 index 000000000000..786cfd321673 --- /dev/null +++ b/changelog/19776.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Vault Agent now reports its name and version as part of the User-Agent header in all requests issued. +``` diff --git a/changelog/19791.txt b/changelog/19791.txt new file mode 100644 index 000000000000..26722cde3133 --- /dev/null +++ b/changelog/19791.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: add allowed_managed_keys field to secret engine mount options +``` diff --git a/changelog/19798.txt b/changelog/19798.txt new file mode 100644 index 000000000000..4bae8b637897 --- /dev/null +++ b/changelog/19798.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/terraform: upgrades dependencies +``` \ No newline at end of file diff --git a/changelog/19799.txt b/changelog/19799.txt new file mode 100644 index 000000000000..aee76ca689aa --- /dev/null +++ b/changelog/19799.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix bad link to namespace when namespace name includes `.` +``` \ No newline at end of file diff --git a/changelog/19811.txt b/changelog/19811.txt new file mode 100644 index 000000000000..49af10ccebcf --- /dev/null +++ b/changelog/19811.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli/kv: Undelete now properly handles KV-V2 mount paths that are more than one layer deep. +``` diff --git a/changelog/19814.txt b/changelog/19814.txt new file mode 100644 index 000000000000..687527efca8a --- /dev/null +++ b/changelog/19814.txt @@ -0,0 +1,3 @@ +```release-note:improvement +audit: add plugin metadata, including plugin name, type, version, sha256, and whether plugin is external, to audit logging +``` \ No newline at end of file diff --git a/changelog/19829.txt b/changelog/19829.txt new file mode 100644 index 000000000000..e8472b2717ed --- /dev/null +++ b/changelog/19829.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/ad: upgrades dependencies +``` \ No newline at end of file diff --git a/changelog/19846.txt b/changelog/19846.txt new file mode 100644 index 000000000000..269b11797b9e --- /dev/null +++ b/changelog/19846.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/alicloud: upgrades dependencies +``` diff --git a/changelog/19861.txt b/changelog/19861.txt new file mode 100644 index 000000000000..ee5bc703e9cb --- /dev/null +++ b/changelog/19861.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/mongodbatlas: upgrades dependencies +``` \ No newline at end of file diff --git a/changelog/19862.txt b/changelog/19862.txt new file mode 100644 index 000000000000..c1ce6d8bb710 --- /dev/null +++ b/changelog/19862.txt @@ -0,0 +1,3 @@ +```release-note:improvement +build: Prefer GOBIN when set over GOPATH/bin when building the binary +``` diff --git a/changelog/19875.txt b/changelog/19875.txt new file mode 100644 index 000000000000..1167e39b3ee7 --- /dev/null +++ b/changelog/19875.txt @@ -0,0 +1,3 @@ +```release-note:bug +helper/random: Fix race condition in string generator helper +``` diff --git a/changelog/19878.txt b/changelog/19878.txt new file mode 100644 index 000000000000..4135434b7923 --- /dev/null +++ b/changelog/19878.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Remove the Bulma CSS framework. +``` \ No newline at end of file diff --git a/changelog/19891.txt b/changelog/19891.txt new file mode 100644 index 000000000000..b030151e858b --- /dev/null +++ b/changelog/19891.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core (enterprise): add configuration for license reporting +``` \ No newline at end of file diff --git a/changelog/19901.txt b/changelog/19901.txt new file mode 100644 index 000000000000..8e0bbbddb5ec --- /dev/null +++ b/changelog/19901.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Updates UI javascript dependencies +``` \ No newline at end of file diff --git a/changelog/19913.txt b/changelog/19913.txt new file mode 100644 index 000000000000..eccdec6533ad --- /dev/null +++ b/changelog/19913.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds whitespace warning to secrets engine and auth method path inputs +``` \ No newline at end of file diff --git a/changelog/19954.txt b/changelog/19954.txt new file mode 100644 index 000000000000..e0ff45f87d22 --- /dev/null +++ b/changelog/19954.txt @@ -0,0 +1,3 @@ +```release-note:improvement +database/mongodb: upgrade mongo driver to 1.11 +``` diff --git a/changelog/19993.txt b/changelog/19993.txt new file mode 100644 index 000000000000..90650863ab88 --- /dev/null +++ b/changelog/19993.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/openldap: upgrades dependencies +``` \ No newline at end of file diff --git a/changelog/20019.txt b/changelog/20019.txt new file mode 100644 index 000000000000..0483d1763fae --- /dev/null +++ b/changelog/20019.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/activity: add an endpoint to write test activity log data, guarded by a build flag +``` \ No newline at end of file diff --git a/changelog/20034.txt b/changelog/20034.txt new file mode 100644 index 000000000000..c1050795bdc4 --- /dev/null +++ b/changelog/20034.txt @@ -0,0 +1,3 @@ +```release-note: bug +secrets/aws: Revert changes that removed the lease on STS credentials, while leaving the new ttl field in place. +``` diff --git a/changelog/20044.txt b/changelog/20044.txt new file mode 100644 index 000000000000..014e61b46743 --- /dev/null +++ b/changelog/20044.txt @@ -0,0 +1,4 @@ +```release-note:improvement +core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the +`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. +``` diff --git a/changelog/20057.txt b/changelog/20057.txt new file mode 100644 index 000000000000..585a07d91b3a --- /dev/null +++ b/changelog/20057.txt @@ -0,0 +1,3 @@ +```release-note: bug +secrets/pki: Ensure cross-cluster delta WAL write failure only logs to avoid unattended forwarding. +``` diff --git a/changelog/20058.txt b/changelog/20058.txt new file mode 100644 index 000000000000..e43a1f4adf93 --- /dev/null +++ b/changelog/20058.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fix building of unified delta CRLs and recovery during unified delta WAL write failures. +``` diff --git a/changelog/20064.txt b/changelog/20064.txt new file mode 100644 index 000000000000..c539119f713d --- /dev/null +++ b/changelog/20064.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes browser console formatting for help command output +``` \ No newline at end of file diff --git a/changelog/20070.txt b/changelog/20070.txt new file mode 100644 index 000000000000..34e6e5540d69 --- /dev/null +++ b/changelog/20070.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes remaining doc links to include /vault in path +``` \ No newline at end of file diff --git a/changelog/20073.txt b/changelog/20073.txt new file mode 100644 index 000000000000..10c21a58ba52 --- /dev/null +++ b/changelog/20073.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/activity: refactor the activity log's generation of precomputed queries +``` \ No newline at end of file diff --git a/changelog/20078.txt b/changelog/20078.txt new file mode 100644 index 000000000000..8749354b315d --- /dev/null +++ b/changelog/20078.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/activity: error when attempting to update retention configuration below the minimum +``` \ No newline at end of file diff --git a/changelog/20086.txt b/changelog/20086.txt new file mode 100644 index 000000000000..9511c97b66e3 --- /dev/null +++ b/changelog/20086.txt @@ -0,0 +1,4 @@ +```release-note:improvement +api: `/sys/internal/counters/config` endpoint now contains read-only +`reporting_enabled` and `billing_start_timestamp` fields. +``` diff --git a/changelog/20109.txt b/changelog/20109.txt new file mode 100644 index 000000000000..8c7cb3b32de1 --- /dev/null +++ b/changelog/20109.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sys/wrapping: Add example how to unwrap without authentication in Vault +``` diff --git a/changelog/20125.txt b/changelog/20125.txt new file mode 100644 index 000000000000..07dd8201dba8 --- /dev/null +++ b/changelog/20125.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: updates clients configuration edit form state based on census reporting configuration +``` \ No newline at end of file diff --git a/changelog/20144.txt b/changelog/20144.txt new file mode 100644 index 000000000000..ef8b9a01810c --- /dev/null +++ b/changelog/20144.txt @@ -0,0 +1,4 @@ +```release-note:improvement +sdk/ldaputil: added `connection_timeout` to tune connection timeout duration +for all LDAP plugins. +``` diff --git a/changelog/20150.txt b/changelog/20150.txt new file mode 100644 index 000000000000..0ea8259f9e66 --- /dev/null +++ b/changelog/20150.txt @@ -0,0 +1,4 @@ +```release-note:improvement +api: `/sys/internal/counters/config` endpoint now contains read-only +`minimum_retention_months`. +``` diff --git a/changelog/20154.txt b/changelog/20154.txt new file mode 100644 index 000000000000..7bda3624fba1 --- /dev/null +++ b/changelog/20154.txt @@ -0,0 +1,2 @@ +```release-note:bug +auth/cert: Include OCSP parameters in read CA certificate role response. diff --git a/changelog/20163.txt b/changelog/20163.txt new file mode 100644 index 000000000000..0b845fbae0db --- /dev/null +++ b/changelog/20163.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: adds warning for commas in stringArray inputs and updates tooltip help text to remove references to comma separation +``` diff --git a/changelog/20181.txt b/changelog/20181.txt new file mode 100644 index 000000000000..121c869e4aaf --- /dev/null +++ b/changelog/20181.txt @@ -0,0 +1,4 @@ +```release-note:bug +sdk/helper/ocsp: Workaround bug in Go's ocsp.ParseResponse(...), causing validation to fail with embedded CA certificates. +auth/cert: Fix OCSP validation against Vault's PKI engine. +``` diff --git a/changelog/20201.txt b/changelog/20201.txt new file mode 100644 index 000000000000..d50c9bcb9da8 --- /dev/null +++ b/changelog/20201.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Decrease size and improve compatibility of OCSP responses by removing issuer certificate. +``` diff --git a/changelog/20216.txt b/changelog/20216.txt new file mode 100644 index 000000000000..59ee78c889e3 --- /dev/null +++ b/changelog/20216.txt @@ -0,0 +1,3 @@ +```release-note:bug +website/docs: Fix Kubernetes Auth Code Example to use the correct whitespace in import. +``` diff --git a/changelog/20220.txt b/changelog/20220.txt new file mode 100644 index 000000000000..1cf72aa81ceb --- /dev/null +++ b/changelog/20220.txt @@ -0,0 +1,3 @@ +```release-note:bug +pki: Fix automatically turning off CRL signing on upgrade to Vault >= 1.12, if CA Key Usage disallows it +``` diff --git a/changelog/20224.txt b/changelog/20224.txt new file mode 100644 index 000000000000..7ec5bf612177 --- /dev/null +++ b/changelog/20224.txt @@ -0,0 +1,3 @@ +```release-note:improvement +command/server: New -dev-cluster-json writes a file describing the dev cluster in -dev and -dev-three-node modes, plus -dev-three-node now enables unauthenticated metrics and pprof requests. +``` diff --git a/changelog/20234.txt b/changelog/20234.txt new file mode 100644 index 000000000000..1f20bdc5a920 --- /dev/null +++ b/changelog/20234.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/cert: Better return OCSP validation errors during login to the caller. +``` diff --git a/changelog/20235.txt b/changelog/20235.txt new file mode 100644 index 000000000000..d1b9f8a6e923 --- /dev/null +++ b/changelog/20235.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: remove use of htmlSafe except when first sanitized +``` diff --git a/changelog/20243.txt b/changelog/20243.txt new file mode 100644 index 000000000000..8d5b04420b97 --- /dev/null +++ b/changelog/20243.txt @@ -0,0 +1,4 @@ +```release-note:improvement +cli/namespace: Add detailed flag to output additional namespace information +such as namespace IDs and custom metadata. +``` diff --git a/changelog/20247.txt b/changelog/20247.txt new file mode 100644 index 000000000000..91f2f0d23fcd --- /dev/null +++ b/changelog/20247.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk: Add new docker-based cluster testing framework to the sdk. +``` diff --git a/changelog/20253.txt b/changelog/20253.txt new file mode 100644 index 000000000000..19edae1bc4f2 --- /dev/null +++ b/changelog/20253.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add warning when issuer lacks KeyUsage during CRL rebuilds; expose in logs and on rotation. +``` diff --git a/changelog/20257.txt b/changelog/20257.txt new file mode 100644 index 000000000000..c2dba4579126 --- /dev/null +++ b/changelog/20257.txt @@ -0,0 +1,3 @@ +```release-note:bug +command/server: Fix incorrect paths in generated config for `-dev-tls` flag on Windows +``` diff --git a/changelog/20261.txt b/changelog/20261.txt new file mode 100644 index 000000000000..5f4eb977cce1 --- /dev/null +++ b/changelog/20261.txt @@ -0,0 +1,3 @@ +```release-note:improvement +* physical/etcd: Upgrade etcd3 client to v3.5.7 +``` \ No newline at end of file diff --git a/changelog/20263.txt b/changelog/20263.txt new file mode 100644 index 000000000000..8556fe8865b3 --- /dev/null +++ b/changelog/20263.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix OIDC provider logo showing when domain doesn't match +``` diff --git a/changelog/20265.txt b/changelog/20265.txt new file mode 100644 index 000000000000..8e27875f627f --- /dev/null +++ b/changelog/20265.txt @@ -0,0 +1,3 @@ +```release-note:improvement +* api: Add Config.TLSConfig method to fetch the TLS configuration from a client config. +``` \ No newline at end of file diff --git a/changelog/20276.txt b/changelog/20276.txt new file mode 100644 index 000000000000..71f288ab9a0d --- /dev/null +++ b/changelog/20276.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Include CA serial number, key UUID on issuers list endpoint. +``` diff --git a/changelog/20285.txt b/changelog/20285.txt new file mode 100644 index 000000000000..2bc2241dfe0b --- /dev/null +++ b/changelog/20285.txt @@ -0,0 +1,3 @@ +```release-note:bug +openapi: Small fixes for OpenAPI display attributes. Changed "log-in" to "login" +``` diff --git a/changelog/20294.txt b/changelog/20294.txt new file mode 100644 index 000000000000..92f7c291892b --- /dev/null +++ b/changelog/20294.txt @@ -0,0 +1,3 @@ +```release-note:improvement +Add debug symbols back to builds to fix Dynatrace support +``` diff --git a/changelog/20341.txt b/changelog/20341.txt new file mode 100644 index 000000000000..652e5735ea7b --- /dev/null +++ b/changelog/20341.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fix patching of leaf_not_after_behavior on issuers. +``` diff --git a/changelog/20354.txt b/changelog/20354.txt new file mode 100644 index 000000000000..abdacb7dac45 --- /dev/null +++ b/changelog/20354.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Include per-issuer enable_aia_url_templating in issuer read endpoint. +``` diff --git a/changelog/20368.txt b/changelog/20368.txt new file mode 100644 index 000000000000..bca5957d1d29 --- /dev/null +++ b/changelog/20368.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/identity: Allow updates of only the custom-metadata for entity alias. +``` \ No newline at end of file diff --git a/changelog/20375.txt b/changelog/20375.txt new file mode 100644 index 000000000000..92caf1e57642 --- /dev/null +++ b/changelog/20375.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: prevent panic on login after namespace is deleted that had mfa enforcement +``` \ No newline at end of file diff --git a/changelog/20411.txt b/changelog/20411.txt new file mode 100644 index 000000000000..093509040c02 --- /dev/null +++ b/changelog/20411.txt @@ -0,0 +1,3 @@ +```release-note:improvement +audit: add a `mount_point` field to audit requests and response entries +``` diff --git a/changelog/20418.txt b/changelog/20418.txt new file mode 100644 index 000000000000..596b7e461d23 --- /dev/null +++ b/changelog/20418.txt @@ -0,0 +1,3 @@ +```release-note:bug +command/server: fixes panic in Vault server command when running in recovery mode +``` \ No newline at end of file diff --git a/changelog/20425.txt b/changelog/20425.txt new file mode 100644 index 000000000000..20869fc19f70 --- /dev/null +++ b/changelog/20425.txt @@ -0,0 +1,3 @@ +```release-note:feature +**MongoDB Atlas Database Secrets**: Adds support for client certificate credentials +``` diff --git a/changelog/20430.txt b/changelog/20430.txt new file mode 100644 index 000000000000..5ac95f104cdb --- /dev/null +++ b/changelog/20430.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix secret render when path includes %. Resolves #11616. +``` diff --git a/changelog/20431.txt b/changelog/20431.txt new file mode 100644 index 000000000000..a0083d879ecd --- /dev/null +++ b/changelog/20431.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add download button for each secret value in KV v2 +``` diff --git a/changelog/20441.txt b/changelog/20441.txt new file mode 100644 index 000000000000..628784883f8c --- /dev/null +++ b/changelog/20441.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Allow determining existing issuers and keys on import. +``` diff --git a/changelog/20442.txt b/changelog/20442.txt new file mode 100644 index 000000000000..09636b69b060 --- /dev/null +++ b/changelog/20442.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add missing fields to tidy-status, include new last_auto_tidy_finished field. +``` diff --git a/changelog/20453.txt b/changelog/20453.txt new file mode 100644 index 000000000000..e605791bc6b5 --- /dev/null +++ b/changelog/20453.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/ldap: Set default value for `max_page_size` properly +``` diff --git a/changelog/20464.txt b/changelog/20464.txt new file mode 100644 index 000000000000..6b58153fccf6 --- /dev/null +++ b/changelog/20464.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: Add walkSecretsTree helper function, which recursively walks secrets rooted at the given path +``` diff --git a/changelog/20477.txt b/changelog/20477.txt new file mode 100644 index 000000000000..e95305a70bec --- /dev/null +++ b/changelog/20477.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: CLI should take days as a unit of time for ttl like flags +``` diff --git a/changelog/20481.txt b/changelog/20481.txt new file mode 100644 index 000000000000..c6f27116311b --- /dev/null +++ b/changelog/20481.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add filtering by engine type and engine name to the Secret Engine list view. +``` diff --git a/changelog/20488.txt b/changelog/20488.txt new file mode 100644 index 000000000000..5ea0f78b3928 --- /dev/null +++ b/changelog/20488.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: Improve addPrefixToKVPath helper +``` diff --git a/changelog/20502.txt b/changelog/20502.txt new file mode 100644 index 000000000000..153309ab84ce --- /dev/null +++ b/changelog/20502.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: disable printing flags warnings messages for the ssh command +``` diff --git a/changelog/20519.txt b/changelog/20519.txt new file mode 100644 index 000000000000..92f7c291892b --- /dev/null +++ b/changelog/20519.txt @@ -0,0 +1,3 @@ +```release-note:improvement +Add debug symbols back to builds to fix Dynatrace support +``` diff --git a/changelog/20530.txt b/changelog/20530.txt new file mode 100644 index 000000000000..6f6d04bf17e8 --- /dev/null +++ b/changelog/20530.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Environment Variables through Vault Agent**: Introducing a new process-supervisor mode for Vault Agent which allows injecting secrets as environment variables into a child process using a new `env_template` configuration stanza. The process-supervisor configuration can be generated with a new `vault agent generate-config` helper tool. +``` diff --git a/changelog/20536.txt b/changelog/20536.txt new file mode 100644 index 000000000000..62aa93605c38 --- /dev/null +++ b/changelog/20536.txt @@ -0,0 +1,3 @@ +```release-note:feature +**AWS Static Roles**: The AWS Secrets Engine can manage static roles configured by users. +``` diff --git a/changelog/20548.txt b/changelog/20548.txt new file mode 100644 index 000000000000..fed5d2b4506e --- /dev/null +++ b/changelog/20548.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Vault Proxy**: Introduced Vault Proxy, a new subcommand of the Vault binary that can be invoked using `vault proxy -config=config.hcl`. It currently has the same feature set as Vault Agent's API proxy, but the two may diverge in the future. We plan to deprecate the API proxy functionality of Vault Agent in a future release. +``` diff --git a/changelog/20559.txt b/changelog/20559.txt new file mode 100644 index 000000000000..2ff6422db0db --- /dev/null +++ b/changelog/20559.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core, secrets/pki, audit: Update dependency go-jose to v3 due to v2 deprecation. +``` diff --git a/changelog/20569.txt b/changelog/20569.txt new file mode 100644 index 000000000000..e10a4643ea7f --- /dev/null +++ b/changelog/20569.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Add logic to validate env_template entries in configuration +``` diff --git a/changelog/20590.txt b/changelog/20590.txt new file mode 100644 index 000000000000..c1c7c9e2b526 --- /dev/null +++ b/changelog/20590.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update Web CLI with examples and a new `kv-get` command for reading kv v2 data and metadata +``` diff --git a/changelog/20595.txt b/changelog/20595.txt new file mode 100644 index 000000000000..982f41498f13 --- /dev/null +++ b/changelog/20595.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Add possibility to decode a generated encoded root token via the rest API +``` diff --git a/changelog/20603.txt b/changelog/20603.txt new file mode 100644 index 000000000000..c3e7e2bbe7db --- /dev/null +++ b/changelog/20603.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes issue creating mfa login enforcement from method enforcements tab +``` \ No newline at end of file diff --git a/changelog/20609.txt b/changelog/20609.txt new file mode 100644 index 000000000000..fe92833da52d --- /dev/null +++ b/changelog/20609.txt @@ -0,0 +1,4 @@ +```release-note:improvement +command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when +`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. +``` \ No newline at end of file diff --git a/changelog/20626.txt b/changelog/20626.txt new file mode 100644 index 000000000000..2a13cee1735d --- /dev/null +++ b/changelog/20626.txt @@ -0,0 +1,4 @@ +```release-note:improvement +activitylog: EntityRecord protobufs now contain a ClientType field for +distinguishing client sources. +``` diff --git a/changelog/20628.txt b/changelog/20628.txt new file mode 100644 index 000000000000..978814601a30 --- /dev/null +++ b/changelog/20628.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: initial implementation of a process runner for injecting secrets via environment variables via vault agent +``` \ No newline at end of file diff --git a/changelog/20629.txt b/changelog/20629.txt new file mode 100644 index 000000000000..f5692f7691e0 --- /dev/null +++ b/changelog/20629.txt @@ -0,0 +1,3 @@ +```release-note:improvement +command/server (enterprise): -dev-three-node now creates perf standbys instead of regular standbys. +``` \ No newline at end of file diff --git a/changelog/20636.txt b/changelog/20636.txt new file mode 100644 index 000000000000..6e20fcdbdfa0 --- /dev/null +++ b/changelog/20636.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: Properly Handle nil identity_policies in Secret Data +``` \ No newline at end of file diff --git a/changelog/20642.txt b/changelog/20642.txt new file mode 100644 index 000000000000..8b8bc40a112b --- /dev/null +++ b/changelog/20642.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: add subject key identifier to read key response +``` diff --git a/changelog/20643.txt b/changelog/20643.txt new file mode 100644 index 000000000000..340ec5b547ff --- /dev/null +++ b/changelog/20643.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: report intermediate error messages during request forwarding +``` diff --git a/changelog/20652.txt b/changelog/20652.txt new file mode 100644 index 000000000000..c41e750c0472 --- /dev/null +++ b/changelog/20652.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/transit: Respond to writes with updated key policy, cache configuration. +``` diff --git a/changelog/20654.txt b/changelog/20654.txt new file mode 100644 index 000000000000..91e567477b5b --- /dev/null +++ b/changelog/20654.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/pki: Warning when issuing leafs from CSRs with basic constraints. In the future, issuance of non-CA leaf certs from CSRs with asserted IsCA Basic Constraints will be prohibited. +``` diff --git a/changelog/20664.txt b/changelog/20664.txt new file mode 100644 index 000000000000..6f2b4abe61ae --- /dev/null +++ b/changelog/20664.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Support setting both maintain_stored_certificate_counts=false and publish_stored_certificate_count_metrics=false explicitly in tidy config. +``` diff --git a/changelog/20668.txt b/changelog/20668.txt new file mode 100644 index 000000000000..f3f840c47d1d --- /dev/null +++ b/changelog/20668.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transform: Added importing of keys and key versions into the Transform secrets engine using the command 'vault transform import' and 'vault transform import-version'. +``` diff --git a/changelog/20680.txt b/changelog/20680.txt new file mode 100644 index 000000000000..ff80ac466092 --- /dev/null +++ b/changelog/20680.txt @@ -0,0 +1,6 @@ +```release-note:improvement +core (enterprise): support reloading configuration for automated reporting via SIGHUP +``` +```release-note:improvement +core (enterprise): license updates trigger a reload of reporting and the activity log +``` \ No newline at end of file diff --git a/changelog/20694.txt b/changelog/20694.txt new file mode 100644 index 000000000000..07f790a666dd --- /dev/null +++ b/changelog/20694.txt @@ -0,0 +1,4 @@ +```release-note:improvement +api: GET ... /sys/internal/counters/activity?current_billing_period=true now +results in a response which contains the full billing period +``` diff --git a/changelog/20697.txt b/changelog/20697.txt new file mode 100644 index 000000000000..be80443714da --- /dev/null +++ b/changelog/20697.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: update detail views that render ttl durations to display full unit instead of letter (i.e. 'days' instead of 'd') +``` diff --git a/changelog/20701.txt b/changelog/20701.txt new file mode 100644 index 000000000000..24942d5d066c --- /dev/null +++ b/changelog/20701.txt @@ -0,0 +1,3 @@ +```release-notes:bug +secrets/pki: Fix race during runUnifiedTransfer when deciding to skip re-running a test within a short window. +``` diff --git a/changelog/20725.txt b/changelog/20725.txt new file mode 100644 index 000000000000..04399cca8f63 --- /dev/null +++ b/changelog/20725.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/gcp: Updated plugin from v0.15.0 to v0.16.0 +``` diff --git a/changelog/20731.txt b/changelog/20731.txt new file mode 100644 index 000000000000..1896c199add9 --- /dev/null +++ b/changelog/20731.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes auto_rotate_period ttl input for transit keys +``` diff --git a/changelog/20736.txt b/changelog/20736.txt new file mode 100644 index 000000000000..1c4c3d4d256e --- /dev/null +++ b/changelog/20736.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/transit: Support BYOK-encrypted export of keys to securely allow synchronizing specific keys and version across clusters. +``` diff --git a/changelog/20741.txt b/changelog/20741.txt new file mode 100644 index 000000000000..8034e456e0c6 --- /dev/null +++ b/changelog/20741.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Add integration tests for agent running in process supervisor mode +``` diff --git a/changelog/20742.txt b/changelog/20742.txt new file mode 100644 index 000000000000..d91237e1d391 --- /dev/null +++ b/changelog/20742.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/mongodbatlas: Updated plugin from v0.9.1 to v0.10.0 +``` diff --git a/changelog/20745.txt b/changelog/20745.txt new file mode 100644 index 000000000000..57a4391ba22d --- /dev/null +++ b/changelog/20745.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/centrify: Updated plugin from v0.14.0 to v0.15.1 +``` diff --git a/changelog/20747.txt b/changelog/20747.txt new file mode 100644 index 000000000000..4c600d203fb3 --- /dev/null +++ b/changelog/20747.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add filtering by auth type and auth name to the Authentication Method list view. +``` diff --git a/changelog/20750.txt b/changelog/20750.txt new file mode 100644 index 000000000000..75a3e1da364e --- /dev/null +++ b/changelog/20750.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/ad: Updated plugin from v0.10.1-0.20230329210417-0b2cdb26cf5d to v0.16.0 +``` \ No newline at end of file diff --git a/changelog/20751.txt b/changelog/20751.txt new file mode 100644 index 000000000000..9b78b3dfe5a2 --- /dev/null +++ b/changelog/20751.txt @@ -0,0 +1,3 @@ +```release-note:change +database/redis-elasticache: Updated plugin from v0.2.0 to v0.2.1 +``` diff --git a/changelog/20752.txt b/changelog/20752.txt new file mode 100644 index 000000000000..667bc37f37b3 --- /dev/null +++ b/changelog/20752.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Vault PKI ACME Server**: Support for the ACME certificate lifecycle management protocol has been added to the Vault PKI Plugin. This allows standard ACME clients, such as the EFF's certbot and the CNCF's k8s cert-manager, to request certificates from a Vault server with no knowledge of Vault APIs or authentication mechanisms. For public-facing Vault instances, we recommend requiring External Account Bindings (EAB) to limit the ability to request certificates to only authenticated clients. +``` diff --git a/changelog/20758.txt b/changelog/20758.txt new file mode 100644 index 000000000000..7eed0b075191 --- /dev/null +++ b/changelog/20758.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/alicloud: Updated plugin from v0.14.0 to v0.15.0 +``` \ No newline at end of file diff --git a/changelog/20763.txt b/changelog/20763.txt new file mode 100644 index 000000000000..311dcb0a62f4 --- /dev/null +++ b/changelog/20763.txt @@ -0,0 +1,3 @@ +```release-note:improvement +database/redis: Upgrade plugin dependencies +``` diff --git a/changelog/20764.txt b/changelog/20764.txt new file mode 100644 index 000000000000..adc14e07f152 --- /dev/null +++ b/changelog/20764.txt @@ -0,0 +1,3 @@ +```release-note:change +database/couchbase: Updated plugin from v0.9.0 to v0.9.2 +``` diff --git a/changelog/20767.txt b/changelog/20767.txt new file mode 100644 index 000000000000..b6d853a63903 --- /dev/null +++ b/changelog/20767.txt @@ -0,0 +1,3 @@ +```release-note:improvement +database/elasticsearch: Upgrade plugin dependencies +``` diff --git a/changelog/20771.txt b/changelog/20771.txt new file mode 100644 index 000000000000..5cc1ee2d472c --- /dev/null +++ b/changelog/20771.txt @@ -0,0 +1,4 @@ +```release-note:improvement +auth/kerberos: Enable plugin multiplexing +auth/kerberos: Upgrade plugin dependencies +``` diff --git a/changelog/20777.txt b/changelog/20777.txt new file mode 100644 index 000000000000..ec3c9e42b58b --- /dev/null +++ b/changelog/20777.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/aure: Updated plugin from v0.15.0 to v0.16.0 +``` \ No newline at end of file diff --git a/changelog/20783.txt b/changelog/20783.txt new file mode 100644 index 000000000000..372d36cb7b1e --- /dev/null +++ b/changelog/20783.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix writes to readonly storage on performance standbys when user lockout feature is enabled. +``` \ No newline at end of file diff --git a/changelog/20784.txt b/changelog/20784.txt new file mode 100644 index 000000000000..b24a857a2002 --- /dev/null +++ b/changelog/20784.txt @@ -0,0 +1,4 @@ +```release-note:improvement +secrets/gcpkms: Enable plugin multiplexing +secrets/gcpkms: Upgrade plugin dependencies +``` diff --git a/changelog/20787.txt b/changelog/20787.txt new file mode 100644 index 000000000000..a69b90d7de82 --- /dev/null +++ b/changelog/20787.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/alicloud: Updated plugin from v0.5.4-beta1.0.20230330124709-3fcfc5914a22 to v0.15.0 +``` \ No newline at end of file diff --git a/changelog/20790.txt b/changelog/20790.txt new file mode 100644 index 000000000000..1e185e3fc317 --- /dev/null +++ b/changelog/20790.txt @@ -0,0 +1,3 @@ +```release-note:feature +**UI LDAP secrets engine**: Add LDAP secrets engine to the UI. +``` diff --git a/changelog/20799.txt b/changelog/20799.txt new file mode 100644 index 000000000000..2e17ff921d7b --- /dev/null +++ b/changelog/20799.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/jwt: Updated plugin from v0.15.0 to v0.16.0 +``` diff --git a/changelog/20802.txt b/changelog/20802.txt new file mode 100644 index 000000000000..de8e1b90dc06 --- /dev/null +++ b/changelog/20802.txt @@ -0,0 +1,6 @@ +```release-note:change +secrets/kubernetes: Update plugin to v0.5.0 +``` +```release-note:change +auth/kubernetes: Update plugin to v0.16.0 +``` diff --git a/changelog/20807.txt b/changelog/20807.txt new file mode 100644 index 000000000000..3a3c1f4cdad3 --- /dev/null +++ b/changelog/20807.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/database/snowflake: Updated plugin from v0.7.0 to v0.8.0 +``` \ No newline at end of file diff --git a/changelog/20816.txt b/changelog/20816.txt new file mode 100644 index 000000000000..aae4b59c48dc --- /dev/null +++ b/changelog/20816.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/azure: Updated plugin from v0.13.0 to v0.15.0 +``` diff --git a/changelog/20818.txt b/changelog/20818.txt new file mode 100644 index 000000000000..885ee92ce8aa --- /dev/null +++ b/changelog/20818.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/gcp: Updated plugin from v0.15.0 to v0.16.0 +``` diff --git a/changelog/20825.txt b/changelog/20825.txt new file mode 100644 index 000000000000..da993696b048 --- /dev/null +++ b/changelog/20825.txt @@ -0,0 +1,3 @@ +```release-note:change +storage/aerospike: Aerospike storage shouldn't be used on 32-bit architectures and is now unsupported on them. +``` \ No newline at end of file diff --git a/changelog/20826.txt b/changelog/20826.txt new file mode 100644 index 000000000000..8a693d9fc94c --- /dev/null +++ b/changelog/20826.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. +``` \ No newline at end of file diff --git a/changelog/20834.txt b/changelog/20834.txt new file mode 100644 index 000000000000..f17f1d326b58 --- /dev/null +++ b/changelog/20834.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Remove feature toggle for SSCTs, i.e. the env var VAULT_DISABLE_SERVER_SIDE_CONSISTENT_TOKENS. +``` \ No newline at end of file diff --git a/changelog/20841.txt b/changelog/20841.txt new file mode 100644 index 000000000000..26a8d6316312 --- /dev/null +++ b/changelog/20841.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Fix generated types for duration strings +``` diff --git a/changelog/20864.txt b/changelog/20864.txt new file mode 100644 index 000000000000..7193c6b81fb9 --- /dev/null +++ b/changelog/20864.txt @@ -0,0 +1,5 @@ +```release-note:bug +secrets/transit: Fix export of HMAC-only key, correctly exporting the key used for sign operations. For consumers of the previously incorrect key, use the plaintext export to retrieve these incorrect keys and import them as new versions. +secrets/transit: Fix bug related to shorter dedicated HMAC key sizing. +sdk/helper/keysutil: New HMAC type policies will have HMACKey equal to Key and be copied over on import. +``` diff --git a/changelog/20879.txt b/changelog/20879.txt new file mode 100644 index 000000000000..12bb1e4a4448 --- /dev/null +++ b/changelog/20879.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity/mfa: Fixes to OpenAPI representation and returned error codes for `identity/mfa/method/*` APIs +``` diff --git a/changelog/20881.txt b/changelog/20881.txt new file mode 100644 index 000000000000..fd3e6d5fa44f --- /dev/null +++ b/changelog/20881.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk/framework: Fix non-deterministic ordering of 'required' fields in OpenAPI spec +``` diff --git a/changelog/20882.txt b/changelog/20882.txt new file mode 100644 index 000000000000..3694468641da --- /dev/null +++ b/changelog/20882.txt @@ -0,0 +1,6 @@ +```release-note:change +secrets/database/mongodbatlas: Updated plugin from v0.9.0 to v0.10.0 +``` +```release-note:feature +**MongoDB Atlas Database Secrets**: Adds support for generating X.509 certificates on dynamic roles for user authentication +``` \ No newline at end of file diff --git a/changelog/20891.txt b/changelog/20891.txt new file mode 100644 index 000000000000..3057ec56f40d --- /dev/null +++ b/changelog/20891.txt @@ -0,0 +1,4 @@ +```release-note:improvement +secrets/consul: Improve error message when ACL bootstrapping fails. +``` + diff --git a/changelog/20897.txt b/changelog/20897.txt new file mode 100644 index 000000000000..01be5ac718ca --- /dev/null +++ b/changelog/20897.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue unsealing cluster for seal types other than shamir +``` \ No newline at end of file diff --git a/changelog/20907.txt b/changelog/20907.txt new file mode 100644 index 000000000000..3f13a659de2b --- /dev/null +++ b/changelog/20907.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes key_bits and signature_bits reverting to default values when editing a pki role +``` \ No newline at end of file diff --git a/changelog/20933.txt b/changelog/20933.txt new file mode 100644 index 000000000000..580475e2b5d5 --- /dev/null +++ b/changelog/20933.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: remove unnecessary *BarrierView field from backendEntry struct +``` \ No newline at end of file diff --git a/changelog/20934.txt b/changelog/20934.txt new file mode 100644 index 000000000000..72c22574d615 --- /dev/null +++ b/changelog/20934.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Fix bug with 'cache' stanza validation +``` diff --git a/changelog/20943.txt b/changelog/20943.txt new file mode 100644 index 000000000000..7cf186d18420 --- /dev/null +++ b/changelog/20943.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Support TLS-ALPN-01 challenge type in ACME for DNS certificate identifiers. +``` diff --git a/changelog/20964.txt b/changelog/20964.txt new file mode 100644 index 000000000000..8bd9563c1623 --- /dev/null +++ b/changelog/20964.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity: Fixes duplicate groups creation with the same name but unique IDs. +``` \ No newline at end of file diff --git a/changelog/20965.txt b/changelog/20965.txt new file mode 100644 index 000000000000..43c1d97cc803 --- /dev/null +++ b/changelog/20965.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity: Remove caseSensitivityKey to prevent errors while loading groups which could result in missing groups in memDB when duplicates are found. +``` \ No newline at end of file diff --git a/changelog/20966.txt b/changelog/20966.txt new file mode 100644 index 000000000000..f9a3b8b26f29 --- /dev/null +++ b/changelog/20966.txt @@ -0,0 +1,3 @@ +```release-note:improvement +Bump github.com/hashicorp/go-plugin version v1.4.9 -> v1.4.10 +``` \ No newline at end of file diff --git a/changelog/20981.txt b/changelog/20981.txt new file mode 100644 index 000000000000..26a5304c5d3d --- /dev/null +++ b/changelog/20981.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Limit ACME issued certificates NotAfter TTL to a maximum of 90 days +``` diff --git a/changelog/20986.txt b/changelog/20986.txt new file mode 100644 index 000000000000..c0615f9a3933 --- /dev/null +++ b/changelog/20986.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: Fix race where new follower joining can get pruned by dead server cleanup. +``` \ No newline at end of file diff --git a/changelog/20995.txt b/changelog/20995.txt new file mode 100644 index 000000000000..76653d4d5433 --- /dev/null +++ b/changelog/20995.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: adding a new api sys method for replication status +``` diff --git a/changelog/21010.txt b/changelog/21010.txt new file mode 100644 index 000000000000..bcd218794df9 --- /dev/null +++ b/changelog/21010.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Add a new periodic metric to track the number of available policies, `vault.policy.configured.count`. +``` \ No newline at end of file diff --git a/changelog/21057.txt b/changelog/21057.txt new file mode 100644 index 000000000000..7ca81cd37632 --- /dev/null +++ b/changelog/21057.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Dashboard UI**: Dashboard is now available in the UI as the new landing page. +``` \ No newline at end of file diff --git a/changelog/21081.txt b/changelog/21081.txt new file mode 100644 index 000000000000..ecf1713eb67a --- /dev/null +++ b/changelog/21081.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/transit: Add support to create CSRs from keys in transit engine and import/export x509 certificates +``` diff --git a/changelog/21100.txt b/changelog/21100.txt new file mode 100644 index 000000000000..50024c9c2d3a --- /dev/null +++ b/changelog/21100.txt @@ -0,0 +1,4 @@ +```release-note:bug +replication (enterprise): Fix regression causing token creation against a role +with a new entity alias to be incorrectly forwarded from perf standbys. +``` diff --git a/changelog/21110.txt b/changelog/21110.txt new file mode 100644 index 000000000000..2471fac770de --- /dev/null +++ b/changelog/21110.txt @@ -0,0 +1,4 @@ +```release-note:bug +core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. +Also fix a related potential deadlock. +``` \ No newline at end of file diff --git a/changelog/21159.txt b/changelog/21159.txt new file mode 100644 index 000000000000..25c1e53d90d5 --- /dev/null +++ b/changelog/21159.txt @@ -0,0 +1,4 @@ +```release-note:improvement +storage/dynamodb: Speed up list and delete of large directories by only requesting keys from DynamoDB +``` + diff --git a/changelog/21165.txt b/changelog/21165.txt new file mode 100644 index 000000000000..dd6b6d05de0d --- /dev/null +++ b/changelog/21165.txt @@ -0,0 +1,3 @@ +```release-note:bug +raft/autopilot: Add dr-token flag for raft autopilot cli commands +``` diff --git a/changelog/21209.txt b/changelog/21209.txt new file mode 100644 index 000000000000..31ddf413c070 --- /dev/null +++ b/changelog/21209.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/pki: Allow issuance of root CAs without AIA, when templated AIA information includes issuer_id. +``` diff --git a/changelog/21215.txt b/changelog/21215.txt new file mode 100644 index 000000000000..ec4a63af9ebd --- /dev/null +++ b/changelog/21215.txt @@ -0,0 +1,4 @@ +```release-note:change +core/namespace (enterprise): Introduce the concept of high-privilege namespace (administrative namespace), +which will have access to some system backend paths that were previously only accessible in the root namespace. +``` \ No newline at end of file diff --git a/changelog/21223.txt b/changelog/21223.txt new file mode 100644 index 000000000000..96605f0a4a3f --- /dev/null +++ b/changelog/21223.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Do not cache seal configuration to fix a bug that resulted in sporadic auto unseal failures. +``` diff --git a/changelog/21249.txt b/changelog/21249.txt new file mode 100644 index 000000000000..a088677ad8af --- /dev/null +++ b/changelog/21249.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix panic in sealed nodes using raft storage trying to emit raft metrics +``` diff --git a/changelog/21260.txt b/changelog/21260.txt new file mode 100644 index 000000000000..b291ec7b4bd5 --- /dev/null +++ b/changelog/21260.txt @@ -0,0 +1,4 @@ +```release-note:bug +core: Change where we evaluate filtered paths as part of mount operations; this is part of an enterprise bugfix that will +have its own changelog entry. Fix wrong lock used in ListAuths link meta interface implementation. +``` diff --git a/changelog/21282.txt b/changelog/21282.txt new file mode 100644 index 000000000000..03f22e4856b9 --- /dev/null +++ b/changelog/21282.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/ldap: Normalize HTTP response codes when invalid credentials are provided +``` diff --git a/changelog/21297.txt b/changelog/21297.txt new file mode 100644 index 000000000000..9f98fd3e0d48 --- /dev/null +++ b/changelog/21297.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Fix agent generate-config to accept -namespace, VAULT_NAMESPACE, and other client-modifying flags. +``` diff --git a/changelog/21316.txt b/changelog/21316.txt new file mode 100644 index 000000000000..5573c7e4d319 --- /dev/null +++ b/changelog/21316.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Prevent deleted issuers from reappearing when migrating from a version 1 bundle to a version 2 bundle (versions including 1.13.0, 1.12.2, and 1.11.6); when managed keys were removed but referenced in the Vault 1.10 legacy CA bundle, this the error: `no managed key found with uuid`. +``` diff --git a/changelog/21342.txt b/changelog/21342.txt new file mode 100644 index 000000000000..c1d8cd018bb3 --- /dev/null +++ b/changelog/21342.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Don't exit just because we think there's a potential deadlock. +``` diff --git a/changelog/21357.txt b/changelog/21357.txt new file mode 100644 index 000000000000..3b3bffddfc29 --- /dev/null +++ b/changelog/21357.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fixed issue with some durations not being properly parsed to include days. +``` \ No newline at end of file diff --git a/changelog/21375.txt b/changelog/21375.txt new file mode 100644 index 000000000000..fc427b0cd3a6 --- /dev/null +++ b/changelog/21375.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: implement hashicorp design system [alert](https://helios.hashicorp.design/components/alert) component +``` \ No newline at end of file diff --git a/changelog/21424.txt b/changelog/21424.txt new file mode 100644 index 000000000000..229e97e4d3fc --- /dev/null +++ b/changelog/21424.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: add support for cloning a Client's tls.Config. +``` diff --git a/changelog/21449.txt b/changelog/21449.txt new file mode 100644 index 000000000000..7711909a4c84 --- /dev/null +++ b/changelog/21449.txt @@ -0,0 +1,3 @@ +```release-note:bug +openapi: Fix response schema for PKI Issue requests +``` diff --git a/changelog/21458.txt b/changelog/21458.txt new file mode 100644 index 000000000000..352b8a04b1e0 --- /dev/null +++ b/changelog/21458.txt @@ -0,0 +1,3 @@ +```release-note:bug +openapi: Fix schema definitions for PKI EAB APIs +``` diff --git a/changelog/21460.txt b/changelog/21460.txt new file mode 100644 index 000000000000..79cd7bc2293b --- /dev/null +++ b/changelog/21460.txt @@ -0,0 +1,3 @@ +```release-note:feature +**raft-wal**: Add experimental support for raft-wal, a new backend engine for integrated storage. +``` diff --git a/changelog/21466.txt b/changelog/21466.txt new file mode 100644 index 000000000000..94d0af99a77c --- /dev/null +++ b/changelog/21466.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Fix "generate-config" command documentation URL +``` diff --git a/changelog/21470.txt b/changelog/21470.txt new file mode 100644 index 000000000000..9f047a9d6758 --- /dev/null +++ b/changelog/21470.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. +``` diff --git a/changelog/21495.txt b/changelog/21495.txt new file mode 100644 index 000000000000..645c947c61a6 --- /dev/null +++ b/changelog/21495.txt @@ -0,0 +1,3 @@ +```release-note:improvement +website/docs: Updating operator step-down docs to include info about possible failed requests during leader step down. +``` \ No newline at end of file diff --git a/changelog/21503.txt b/changelog/21503.txt new file mode 100644 index 000000000000..a61b22ba8a7d --- /dev/null +++ b/changelog/21503.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Surface DOMException error when browser settings prevent localStorage. +``` diff --git a/changelog/21520.txt b/changelog/21520.txt new file mode 100644 index 000000000000..38ab73523ba4 --- /dev/null +++ b/changelog/21520.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Replace inline confirm alert inside a popup-menu dropdown with confirm alert modal +``` diff --git a/changelog/21531.txt b/changelog/21531.txt new file mode 100644 index 000000000000..dff421a83a97 --- /dev/null +++ b/changelog/21531.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes styling of private key input when configuring an SSH key +``` \ No newline at end of file diff --git a/changelog/21546.txt b/changelog/21546.txt new file mode 100644 index 000000000000..8eaf53ed3929 --- /dev/null +++ b/changelog/21546.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: Avoid printing "Success" message when `-field` flag is provided during a `vault write`. +``` diff --git a/changelog/21562.txt b/changelog/21562.txt new file mode 100644 index 000000000000..c41d727da36b --- /dev/null +++ b/changelog/21562.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue with certain navigational links incorrectly displaying in child namespaces +``` \ No newline at end of file diff --git a/changelog/21563.txt b/changelog/21563.txt new file mode 100644 index 000000000000..7426ed24fa56 --- /dev/null +++ b/changelog/21563.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Better mount points for kv-v1 and kv-v2 in openapi.json +``` diff --git a/changelog/21578.txt b/changelog/21578.txt new file mode 100644 index 000000000000..30d4fac8cc08 --- /dev/null +++ b/changelog/21578.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add API Explorer link to Sidebar, under Tools. +``` diff --git a/changelog/21582.txt b/changelog/21582.txt new file mode 100644 index 000000000000..6a9d9a4276c1 --- /dev/null +++ b/changelog/21582.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes login screen display issue with Safari browser +``` \ No newline at end of file diff --git a/changelog/21583.txt b/changelog/21583.txt new file mode 100644 index 000000000000..f73feb96b7e2 --- /dev/null +++ b/changelog/21583.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: add example modal to policy form +``` \ No newline at end of file diff --git a/changelog/21623.txt b/changelog/21623.txt new file mode 100644 index 000000000000..7fc272d13b5d --- /dev/null +++ b/changelog/21623.txt @@ -0,0 +1,3 @@ +```release-note:improvement +eventbus: updated go-eventlogger library to allow removal of nodes referenced by pipelines (used for subscriptions) +``` \ No newline at end of file diff --git a/changelog/21628.txt b/changelog/21628.txt new file mode 100644 index 000000000000..888108b27667 --- /dev/null +++ b/changelog/21628.txt @@ -0,0 +1,3 @@ +```release-note:improvement +audit: add core audit events experiment +``` \ No newline at end of file diff --git a/changelog/21631.txt b/changelog/21631.txt new file mode 100644 index 000000000000..ffdb4bba4673 --- /dev/null +++ b/changelog/21631.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. +``` \ No newline at end of file diff --git a/changelog/21635.txt b/changelog/21635.txt new file mode 100644 index 000000000000..6d19e8da9688 --- /dev/null +++ b/changelog/21635.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Adds missing values to details view after generating PKI certificate +``` \ No newline at end of file diff --git a/changelog/21640.txt b/changelog/21640.txt new file mode 100644 index 000000000000..458561ae1aae --- /dev/null +++ b/changelog/21640.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: fix race when updating a mount's route entry tainted status and incoming requests +``` diff --git a/changelog/21641.txt b/changelog/21641.txt new file mode 100644 index 000000000000..e615445a3e6f --- /dev/null +++ b/changelog/21641.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auto-auth: added support for LDAP auto-auth +``` diff --git a/changelog/21642.txt b/changelog/21642.txt new file mode 100644 index 000000000000..84af5b694f10 --- /dev/null +++ b/changelog/21642.txt @@ -0,0 +1,3 @@ +```release-note:bug +serviceregistration: Fix bug where multiple nodes in a secondary cluster could be labelled active after updating the cluster's primary +``` \ No newline at end of file diff --git a/changelog/21681.txt b/changelog/21681.txt new file mode 100644 index 000000000000..8d684423a440 --- /dev/null +++ b/changelog/21681.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sys/metrics (enterprise): Adds a gauge metric that tracks whether enterprise builtin secret plugins are enabled. +``` diff --git a/changelog/21702.txt b/changelog/21702.txt new file mode 100644 index 000000000000..5475a486df0e --- /dev/null +++ b/changelog/21702.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add a parameter to allow ExtKeyUsage field usage from a role within ACME. +``` diff --git a/changelog/21723.txt b/changelog/21723.txt new file mode 100644 index 000000000000..cefe5e1c5ad3 --- /dev/null +++ b/changelog/21723.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: List operations are now given first-class representation in the OpenAPI document, rather than sometimes being overlaid with a read operation at the same path +``` diff --git a/changelog/21739.txt b/changelog/21739.txt new file mode 100644 index 000000000000..7b559d97cd4c --- /dev/null +++ b/changelog/21739.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixed an issue where editing an SSH role would clear `default_critical_options` and `default_extension` if left unchanged. +``` diff --git a/changelog/21742.txt b/changelog/21742.txt new file mode 100644 index 000000000000..713ce3c885d4 --- /dev/null +++ b/changelog/21742.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/dynamodb: Added three permit pool metrics for the DynamoDB backend, `pending_permits`, `active_permits`, and `pool_size`. +``` diff --git a/changelog/21743.txt b/changelog/21743.txt new file mode 100644 index 000000000000..1bb8279543ba --- /dev/null +++ b/changelog/21743.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk/framework: Adds replication state helper for backends to check for read-only storage +``` diff --git a/changelog/21760.txt b/changelog/21760.txt new file mode 100644 index 000000000000..2285cda4464b --- /dev/null +++ b/changelog/21760.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Fix regexes for `sys/raw/` and `sys/leases/lookup/` to match prevailing conventions +``` diff --git a/changelog/21767.txt b/changelog/21767.txt new file mode 100644 index 000000000000..2092442e462f --- /dev/null +++ b/changelog/21767.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixed secrets, leases, and policies filter dropping focus after a single character +``` diff --git a/changelog/21771.txt b/changelog/21771.txt new file mode 100644 index 000000000000..55252dcb3666 --- /dev/null +++ b/changelog/21771.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix styling for username input when editing a user +``` \ No newline at end of file diff --git a/changelog/21772.txt b/changelog/21772.txt new file mode 100644 index 000000000000..2ebdbf39565c --- /dev/null +++ b/changelog/21772.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Fix OpenAPI representation and `-output-policy` recognition of some non-standard sudo paths +``` diff --git a/changelog/21800.txt b/changelog/21800.txt new file mode 100644 index 000000000000..bfe8f6721d9c --- /dev/null +++ b/changelog/21800.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. +``` \ No newline at end of file diff --git a/changelog/21830.txt b/changelog/21830.txt new file mode 100644 index 000000000000..6e1972d447be --- /dev/null +++ b/changelog/21830.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/cert: Adds support for requiring hexadecimal-encoded non-string certificate extension values +``` \ No newline at end of file diff --git a/changelog/21854.txt b/changelog/21854.txt new file mode 100644 index 000000000000..2ab5acde88e9 --- /dev/null +++ b/changelog/21854.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: use Go stdlib functionalities instead of explicit byte/string conversions +``` diff --git a/changelog/21870.txt b/changelog/21870.txt new file mode 100644 index 000000000000..3cb9856ffca9 --- /dev/null +++ b/changelog/21870.txt @@ -0,0 +1,6 @@ +```release-note:bug +secrets/pki: Fix bug with ACME tidy, 'unable to determine acme base folder path'. +``` +```release-note:bug +secrets/pki: Fix preserving acme_account_safety_buffer on config/auto-tidy. +``` diff --git a/changelog/21871.txt b/changelog/21871.txt new file mode 100644 index 000000000000..8333603efc53 --- /dev/null +++ b/changelog/21871.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: update unseal and DR operation token flow components +``` \ No newline at end of file diff --git a/changelog/21925.txt b/changelog/21925.txt new file mode 100644 index 000000000000..ca89ff75a76e --- /dev/null +++ b/changelog/21925.txt @@ -0,0 +1,3 @@ +```release-note:improvement +kmip (enterprise): Add namespace lock and unlock support +``` diff --git a/changelog/21926.txt b/changelog/21926.txt new file mode 100644 index 000000000000..a6020204b043 --- /dev/null +++ b/changelog/21926.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes problem displaying certificates issued with unsupported signature algorithms (i.e. ed25519) +``` \ No newline at end of file diff --git a/changelog/21934.txt b/changelog/21934.txt new file mode 100644 index 000000000000..d4ce0b08a630 --- /dev/null +++ b/changelog/21934.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Fix response definitions for list operations +``` diff --git a/changelog/21942.txt b/changelog/21942.txt new file mode 100644 index 000000000000..4e2828efb43c --- /dev/null +++ b/changelog/21942.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Fix generation of correct fields in some rarer cases +``` diff --git a/changelog/21951.txt b/changelog/21951.txt new file mode 100644 index 000000000000..d53c0f18ea45 --- /dev/null +++ b/changelog/21951.txt @@ -0,0 +1,4 @@ +```release-note:bug +awsutil: Update awsutil to v0.2.3 to fix a regression where Vault no longer +respects `AWS_ROLE_ARN`, `AWS_WEB_IDENTITY_TOKEN_FILE`, and `AWS_ROLE_SESSION_NAME`. +``` diff --git a/changelog/21960.txt b/changelog/21960.txt new file mode 100644 index 000000000000..cab19fab96f3 --- /dev/null +++ b/changelog/21960.txt @@ -0,0 +1,3 @@ +```release-note:improvement +aws/auth: Adds a new config field `use_sts_region_from_client` which allows for using dynamic regional sts endpoints based on Authorization header when using IAM-based authentication. +``` diff --git a/changelog/21968.txt b/changelog/21968.txt new file mode 100644 index 000000000000..3ba650d585c4 --- /dev/null +++ b/changelog/21968.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix styling for viewing certificate in kubernetes configuration +``` \ No newline at end of file diff --git a/changelog/21993.txt b/changelog/21993.txt new file mode 100644 index 000000000000..856cfc9662e0 --- /dev/null +++ b/changelog/21993.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: Allow vault CLI HTTP headers to be specified using the JSON-encoded VAULT_HEADERS environment variable +``` \ No newline at end of file diff --git a/changelog/22039.txt b/changelog/22039.txt new file mode 100644 index 000000000000..09c3e6ad8039 --- /dev/null +++ b/changelog/22039.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Display minus icon for empty MaskedInput value. Show MaskedInput for KV secrets without values +``` \ No newline at end of file diff --git a/changelog/22040.txt b/changelog/22040.txt new file mode 100644 index 000000000000..e96a428b95af --- /dev/null +++ b/changelog/22040.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. +``` diff --git a/changelog/22122.txt b/changelog/22122.txt new file mode 100644 index 000000000000..a7e723090caf --- /dev/null +++ b/changelog/22122.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: upgrade Ember to 4.12 +``` diff --git a/changelog/22126.txt b/changelog/22126.txt new file mode 100644 index 000000000000..e6633ec3a050 --- /dev/null +++ b/changelog/22126.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: allowed_domains are now compared in a case-insensitive manner if they use glob patterns +``` \ No newline at end of file diff --git a/changelog/22137.txt b/changelog/22137.txt new file mode 100644 index 000000000000..6f5a3bee945a --- /dev/null +++ b/changelog/22137.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. +``` \ No newline at end of file diff --git a/changelog/22153.txt b/changelog/22153.txt new file mode 100644 index 000000000000..4c51718dc783 --- /dev/null +++ b/changelog/22153.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: correct doctype for index.html +``` diff --git a/changelog/22160.txt b/changelog/22160.txt new file mode 100644 index 000000000000..19f590bfc539 --- /dev/null +++ b/changelog/22160.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: display CertificateCard instead of MaskedInput for certificates in PKI +``` \ No newline at end of file diff --git a/changelog/22185.txt b/changelog/22185.txt new file mode 100644 index 000000000000..1fae58d96ede --- /dev/null +++ b/changelog/22185.txt @@ -0,0 +1,5 @@ +```release-note:improvement +auth/ldap: introduce cap/ldap.Client for LDAP authentication +auth/ldap: deprecates `connection_timeout` in favor of `request_timeout` for timeouts +sdk/ldaputil: deprecates Client in favor of cap/ldap.Client +``` diff --git a/changelog/22191.txt b/changelog/22191.txt new file mode 100644 index 000000000000..9fa7c85d5910 --- /dev/null +++ b/changelog/22191.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: adds allowed_user_ids field to create role form and user_ids to generate certificates form in pki +``` \ No newline at end of file diff --git a/changelog/22233.txt b/changelog/22233.txt new file mode 100644 index 000000000000..f6b1a5c33a70 --- /dev/null +++ b/changelog/22233.txt @@ -0,0 +1,3 @@ +```release-note:improvement +docs: Clarify when a entity is created +``` diff --git a/changelog/22235.txt b/changelog/22235.txt new file mode 100644 index 000000000000..3d62e70cb11b --- /dev/null +++ b/changelog/22235.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. +``` diff --git a/changelog/22249.txt b/changelog/22249.txt new file mode 100644 index 000000000000..d470b9743ff5 --- /dev/null +++ b/changelog/22249.txt @@ -0,0 +1,4 @@ +```release-note:bug +sdk/ldaputil: Properly escape user filters when using UPN domains +sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap +``` \ No newline at end of file diff --git a/changelog/22253.txt b/changelog/22253.txt new file mode 100644 index 000000000000..c3a9ab039c4e --- /dev/null +++ b/changelog/22253.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/database: Improves error logging for static role rotations by including the database and role names. +``` \ No newline at end of file diff --git a/changelog/22264.txt b/changelog/22264.txt new file mode 100644 index 000000000000..5ee53785d3a6 --- /dev/null +++ b/changelog/22264.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auto-auth/azure: Added Azure Workload Identity Federation support to auto-auth (for Vault Agent and Vault Proxy). +``` diff --git a/changelog/22277.txt b/changelog/22277.txt new file mode 100644 index 000000000000..0d0dbf2dcf1d --- /dev/null +++ b/changelog/22277.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/azure: Update plugin to v0.16.0 +``` diff --git a/changelog/22304.txt b/changelog/22304.txt new file mode 100644 index 000000000000..eeec038ae9ed --- /dev/null +++ b/changelog/22304.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: add a listener configuration "chroot_namespace" that forces requests to use a namespace hierarchy +``` \ No newline at end of file diff --git a/changelog/22322.txt b/changelog/22322.txt new file mode 100644 index 000000000000..8df620c385dc --- /dev/null +++ b/changelog/22322.txt @@ -0,0 +1,4 @@ +```release-note:bug +agent: Environment variable VAULT_CACERT_BYTES now works for Vault Agent templates. +``` + diff --git a/changelog/22330.txt b/changelog/22330.txt new file mode 100644 index 000000000000..427fe398a707 --- /dev/null +++ b/changelog/22330.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. +``` \ No newline at end of file diff --git a/changelog/22333.txt b/changelog/22333.txt new file mode 100644 index 000000000000..67debb7421f8 --- /dev/null +++ b/changelog/22333.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Implement Helios Design System copy button component making copy buttons accessible +``` \ No newline at end of file diff --git a/changelog/22355.txt b/changelog/22355.txt new file mode 100644 index 000000000000..d748796c1d92 --- /dev/null +++ b/changelog/22355.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix bug where background thread to update locked user entries runs on DR secondaries. +``` \ No newline at end of file diff --git a/changelog/22362.txt b/changelog/22362.txt new file mode 100644 index 000000000000..0de5440efe39 --- /dev/null +++ b/changelog/22362.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix readonly errors that could occur while loading mounts/auths during unseal +``` diff --git a/changelog/22363.txt b/changelog/22363.txt new file mode 100644 index 000000000000..faa5a24462a0 --- /dev/null +++ b/changelog/22363.txt @@ -0,0 +1,3 @@ +```release-note:bug +license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. +``` diff --git a/changelog/22374.txt b/changelog/22374.txt new file mode 100644 index 000000000000..2f744c5c3386 --- /dev/null +++ b/changelog/22374.txt @@ -0,0 +1,3 @@ +```release-note:bug +expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. +``` diff --git a/changelog/22390.txt b/changelog/22390.txt new file mode 100644 index 000000000000..449a8a2d2278 --- /dev/null +++ b/changelog/22390.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes text readability issue in revoke token confirmation dialog +``` \ No newline at end of file diff --git a/changelog/22394.txt b/changelog/22394.txt new file mode 100644 index 000000000000..4f5a2b9c89ff --- /dev/null +++ b/changelog/22394.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults +``` \ No newline at end of file diff --git a/changelog/22396.txt b/changelog/22396.txt new file mode 100644 index 000000000000..d05cbb7acaa4 --- /dev/null +++ b/changelog/22396.txt @@ -0,0 +1,3 @@ +```release-note:improvement +website/docs: Fix link formatting in Vault lambda extension docs +``` diff --git a/changelog/22400.txt b/changelog/22400.txt new file mode 100644 index 000000000000..54e8cd299c9e --- /dev/null +++ b/changelog/22400.txt @@ -0,0 +1,3 @@ +```release-note:change +telemetry: Replace `vault.rollback.attempt.{MOUNT_POINT}` and `vault.route.rollback.{MOUNT_POINT}` metrics with `vault.rollback.attempt` and `vault.route.rollback metrics` by default. Added a telemetry configuration `add_mount_point_rollback_metrics` which, when set to true, causes vault to emit the metrics with mount points in their names. +``` diff --git a/changelog/22410.txt b/changelog/22410.txt new file mode 100644 index 000000000000..25fcc3335f5d --- /dev/null +++ b/changelog/22410.txt @@ -0,0 +1,3 @@ +```release-note:bug +api/client: Fix deadlock in client.CloneWithHeaders when used alongside other client methods. +``` \ No newline at end of file diff --git a/changelog/22445.txt b/changelog/22445.txt new file mode 100644 index 000000000000..11b310d6898c --- /dev/null +++ b/changelog/22445.txt @@ -0,0 +1,3 @@ +```release-note:feature +**GCP IAM Support**: Adds support for IAM-based authentication to MySQL and PostgreSQL backends using Google Cloud SQL. +``` \ No newline at end of file diff --git a/changelog/22452.txt b/changelog/22452.txt new file mode 100644 index 000000000000..88657b284d3d --- /dev/null +++ b/changelog/22452.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core : Add field that allows rate-limit namespace quotas to be inherited by child namespaces. +``` diff --git a/changelog/22458.txt b/changelog/22458.txt new file mode 100644 index 000000000000..6ce09295099a --- /dev/null +++ b/changelog/22458.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes model defaults overwriting input value when user tries to clear form input +``` \ No newline at end of file diff --git a/changelog/22460.txt b/changelog/22460.txt new file mode 100644 index 000000000000..dba53a78ed25 --- /dev/null +++ b/changelog/22460.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/aws: add canonical ARN as entity alias option +``` diff --git a/changelog/22468.txt b/changelog/22468.txt new file mode 100644 index 000000000000..538da1482497 --- /dev/null +++ b/changelog/22468.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/metrics: vault.raft_storage.bolt.write.time should be a counter not a summary +``` diff --git a/changelog/22471.txt b/changelog/22471.txt new file mode 100644 index 000000000000..67b110cd67d8 --- /dev/null +++ b/changelog/22471.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: enables create and update KV secret workflow when control group present +``` \ No newline at end of file diff --git a/changelog/22474.txt b/changelog/22474.txt new file mode 100644 index 000000000000..9f18050a418d --- /dev/null +++ b/changelog/22474.txt @@ -0,0 +1,3 @@ +```release-note:feature +Add subscribe capability and subscribe_event_types to policies for events. +``` diff --git a/changelog/22484.txt b/changelog/22484.txt new file mode 100644 index 000000000000..6992e7c2fa56 --- /dev/null +++ b/changelog/22484.txt @@ -0,0 +1,4 @@ +```release-note:feature +**Database Static Role Advanced TTL Management**: Adds the ability to rotate +static roles on a defined schedule. +``` diff --git a/changelog/22487.txt b/changelog/22487.txt new file mode 100644 index 000000000000..bc555f05d525 --- /dev/null +++ b/changelog/22487.txt @@ -0,0 +1,6 @@ +```release-note:change +events: `data_path` will include full data path of secret, including name. +``` +```release-note:change +sdk/logical/events: `EventSender` interface method is now `SendEvent` instead of `Send`. +``` diff --git a/changelog/22502.txt b/changelog/22502.txt new file mode 100644 index 000000000000..b9d21c2ce277 --- /dev/null +++ b/changelog/22502.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: KV View Secret card will link to list view if input ends in "/" +``` \ No newline at end of file diff --git a/changelog/22516.txt b/changelog/22516.txt new file mode 100644 index 000000000000..661b77d25687 --- /dev/null +++ b/changelog/22516.txt @@ -0,0 +1,3 @@ +```release-note:change +database/snowflake: Update plugin to v0.9.0 +``` diff --git a/changelog/22519.txt b/changelog/22519.txt new file mode 100644 index 000000000000..5882cfb25fb9 --- /dev/null +++ b/changelog/22519.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix display for "Last Vault Rotation" timestamp for static database roles which was not rendering or copyable +``` \ No newline at end of file diff --git a/changelog/22521.txt b/changelog/22521.txt new file mode 100644 index 000000000000..9310b64c1406 --- /dev/null +++ b/changelog/22521.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: do not check TLS validity on ACME requests redirected to https +``` diff --git a/changelog/22523.txt b/changelog/22523.txt new file mode 100644 index 000000000000..e53ab652b2e5 --- /dev/null +++ b/changelog/22523.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. +``` diff --git a/changelog/22533.txt b/changelog/22533.txt new file mode 100644 index 000000000000..8c9fb6dbc321 --- /dev/null +++ b/changelog/22533.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/alicloud: Update plugin to v0.15.1 +``` diff --git a/changelog/22540.txt b/changelog/22540.txt new file mode 100644 index 000000000000..191342bd2913 --- /dev/null +++ b/changelog/22540.txt @@ -0,0 +1,3 @@ +```release-note:improvement +events: Allow subscriptions to multiple namespaces +``` diff --git a/changelog/22541.txt b/changelog/22541.txt new file mode 100644 index 000000000000..918af3eac1ee --- /dev/null +++ b/changelog/22541.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix blank page or ghost secret when canceling KV secret create +``` diff --git a/changelog/22551.txt b/changelog/22551.txt new file mode 100644 index 000000000000..fa3c9483ae50 --- /dev/null +++ b/changelog/22551.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Copyable KV v2 paths in UI**: KV v2 secret paths are copyable for use in CLI commands or API calls +``` \ No newline at end of file diff --git a/changelog/22559.txt b/changelog/22559.txt new file mode 100644 index 000000000000..162e6afe0d7e --- /dev/null +++ b/changelog/22559.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Improved KV V2 UI**: Updated and restructured secret engine for KV (version 2 only) +``` \ No newline at end of file diff --git a/changelog/22567.txt b/changelog/22567.txt new file mode 100644 index 000000000000..d9e5570139bc --- /dev/null +++ b/changelog/22567.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Use a worker pool for the rollback manager. Add new metrics for the rollback manager to track the queued tasks. +``` \ No newline at end of file diff --git a/changelog/22583.txt b/changelog/22583.txt new file mode 100644 index 000000000000..0bc29d60fea8 --- /dev/null +++ b/changelog/22583.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/quotas: Reduce overhead for role calculation when using cloud auth methods. +``` \ No newline at end of file diff --git a/changelog/22584.txt b/changelog/22584.txt new file mode 100644 index 000000000000..4820498fa69a --- /dev/null +++ b/changelog/22584.txt @@ -0,0 +1,3 @@ +```release-note:change +database/redis-elasticache: Update plugin to v0.2.2 +``` diff --git a/changelog/22593.txt b/changelog/22593.txt new file mode 100644 index 000000000000..8f5ee5f76de5 --- /dev/null +++ b/changelog/22593.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: JSON diff view available in "Create New Version" form for KV v2 +``` diff --git a/changelog/22597.txt b/changelog/22597.txt new file mode 100644 index 000000000000..0c37e561be28 --- /dev/null +++ b/changelog/22597.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/quotas: Only perform ResolveRoleOperation for role-based quotas and lease creation. +``` diff --git a/changelog/22598.txt b/changelog/22598.txt new file mode 100644 index 000000000000..1c36e9960a2f --- /dev/null +++ b/changelog/22598.txt @@ -0,0 +1,3 @@ +```release-note:change +database/redis-elasticache: Update plugin to v0.2.3 +``` diff --git a/changelog/22612.txt b/changelog/22612.txt new file mode 100644 index 000000000000..d7852d1e9c91 --- /dev/null +++ b/changelog/22612.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/gcp: Update plugin to v0.16.1 +``` diff --git a/changelog/22646.txt b/changelog/22646.txt new file mode 100644 index 000000000000..08673e713b04 --- /dev/null +++ b/changelog/22646.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/alicloud: Update plugin to v0.16.0 +``` diff --git a/changelog/22651.txt b/changelog/22651.txt new file mode 100644 index 000000000000..5ca281983767 --- /dev/null +++ b/changelog/22651.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/quotas: Add configuration to allow skipping of expensive role calculations +``` \ No newline at end of file diff --git a/changelog/22654.txt b/changelog/22654.txt new file mode 100644 index 000000000000..97c81d7ef8d9 --- /dev/null +++ b/changelog/22654.txt @@ -0,0 +1,3 @@ +```release-note:change +database/redis: Update plugin to v0.2.2 +``` diff --git a/changelog/22655.txt b/changelog/22655.txt new file mode 100644 index 000000000000..e9cc88a974b7 --- /dev/null +++ b/changelog/22655.txt @@ -0,0 +1,3 @@ +```release-note:change +database/mongodbatlas: Update plugin to v0.10.1 +``` diff --git a/changelog/22657.txt b/changelog/22657.txt new file mode 100644 index 000000000000..89a8ab440936 --- /dev/null +++ b/changelog/22657.txt @@ -0,0 +1,3 @@ +```release-note:improvement +command/server: add `-dev-tls-san` flag to configure subject alternative names for the certificate generated when using `-dev-tls`. +``` diff --git a/changelog/22659.txt b/changelog/22659.txt new file mode 100644 index 000000000000..501fb4ecc866 --- /dev/null +++ b/changelog/22659.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/ldap: improved login speed by adding concurrency to LDAP token group searches +``` diff --git a/changelog/22678.txt b/changelog/22678.txt new file mode 100644 index 000000000000..b711e406921c --- /dev/null +++ b/changelog/22678.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/jwt: Update plugin to v0.17.0 +``` diff --git a/changelog/22694.txt b/changelog/22694.txt new file mode 100644 index 000000000000..26f61b866066 --- /dev/null +++ b/changelog/22694.txt @@ -0,0 +1,3 @@ +```release-note:bug +audit: Prevent panic due to nil pointer receiver for audit header formatting. +``` diff --git a/changelog/22696.txt b/changelog/22696.txt new file mode 100644 index 000000000000..3bdeacc8a140 --- /dev/null +++ b/changelog/22696.txt @@ -0,0 +1,3 @@ +```release-note:change +database/elasticsearch: Update plugin to v0.13.3 +``` diff --git a/changelog/22709.txt b/changelog/22709.txt new file mode 100644 index 000000000000..68684f80d17b --- /dev/null +++ b/changelog/22709.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/kubernetes: Update plugin to v0.17.0 +``` diff --git a/changelog/22712.txt b/changelog/22712.txt new file mode 100644 index 000000000000..ece09c9f7153 --- /dev/null +++ b/changelog/22712.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Plugin Containers**: Vault supports registering, managing, and running plugins inside a container on Linux. +``` \ No newline at end of file diff --git a/changelog/22716.txt b/changelog/22716.txt new file mode 100644 index 000000000000..1f6664759377 --- /dev/null +++ b/changelog/22716.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/kv: Update plugin to v0.16.1 +``` diff --git a/changelog/22734.txt b/changelog/22734.txt new file mode 100644 index 000000000000..82067fefbf91 --- /dev/null +++ b/changelog/22734.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/openldap: Update plugin to v0.11.2 +``` diff --git a/changelog/22746.txt b/changelog/22746.txt new file mode 100644 index 000000000000..09879609074a --- /dev/null +++ b/changelog/22746.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/gcp: Update plugin to v0.17.0 +``` diff --git a/changelog/22748.txt b/changelog/22748.txt new file mode 100644 index 000000000000..d466eaa19896 --- /dev/null +++ b/changelog/22748.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/mongodbatlas: Update plugin to v0.10.1 +``` diff --git a/changelog/22753.txt b/changelog/22753.txt new file mode 100644 index 000000000000..a297337f92b7 --- /dev/null +++ b/changelog/22753.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transit: fix panic when providing non-PEM formatted public key for import +``` diff --git a/changelog/22757.txt b/changelog/22757.txt new file mode 100644 index 000000000000..5917de17aefb --- /dev/null +++ b/changelog/22757.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/gcpkms: Update plugin to v0.15.1 +``` diff --git a/changelog/22758.txt b/changelog/22758.txt new file mode 100644 index 000000000000..2ce3a1516849 --- /dev/null +++ b/changelog/22758.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/cf: Update plugin to v0.15.1 +``` diff --git a/changelog/22774.txt b/changelog/22774.txt new file mode 100644 index 000000000000..7ef69177a03d --- /dev/null +++ b/changelog/22774.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/oci: Update plugin to v0.14.1 +``` diff --git a/changelog/22790.txt b/changelog/22790.txt new file mode 100644 index 000000000000..1ac145f6dffd --- /dev/null +++ b/changelog/22790.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/kv: Update plugin to v0.16.2 +``` diff --git a/changelog/22795.txt b/changelog/22795.txt new file mode 100644 index 000000000000..372e2d696920 --- /dev/null +++ b/changelog/22795.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/azure: Update plugin to v0.16.1 +``` diff --git a/changelog/22797.txt b/changelog/22797.txt new file mode 100644 index 000000000000..373a572c87d1 --- /dev/null +++ b/changelog/22797.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/kerberos: Update plugin to v0.10.1 +``` diff --git a/changelog/22799.txt b/changelog/22799.txt new file mode 100644 index 000000000000..2242fbf33735 --- /dev/null +++ b/changelog/22799.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/azure: Update plugin to v0.16.2 +``` diff --git a/changelog/22805.txt b/changelog/22805.txt new file mode 100644 index 000000000000..62ef17aa75c4 --- /dev/null +++ b/changelog/22805.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/oci: Update plugin to v0.14.2 +``` diff --git a/changelog/22812.txt b/changelog/22812.txt new file mode 100644 index 000000000000..a0161af8068f --- /dev/null +++ b/changelog/22812.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: allow users to specify files for child process stdout/stderr +``` diff --git a/changelog/22815.txt b/changelog/22815.txt new file mode 100644 index 000000000000..478fa4c8cb0b --- /dev/null +++ b/changelog/22815.txt @@ -0,0 +1,3 @@ +```release-note:improvement +events: Enabled by default +``` diff --git a/changelog/22818.txt b/changelog/22818.txt new file mode 100644 index 000000000000..1ef9b6440b99 --- /dev/null +++ b/changelog/22818.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. +``` diff --git a/changelog/22823.txt b/changelog/22823.txt new file mode 100644 index 000000000000..fa98bf501598 --- /dev/null +++ b/changelog/22823.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/kubernetes: Update plugin to v0.6.0 +``` diff --git a/changelog/22824.txt b/changelog/22824.txt new file mode 100644 index 000000000000..5ab3deb632ef --- /dev/null +++ b/changelog/22824.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/azure: Update plugin to v0.16.3 +``` diff --git a/changelog/22832.txt b/changelog/22832.txt new file mode 100644 index 000000000000..7153e7694bf2 --- /dev/null +++ b/changelog/22832.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes form field label tooltip alignment +``` \ No newline at end of file diff --git a/changelog/22835.txt b/changelog/22835.txt new file mode 100644 index 000000000000..c8e3d46cea36 --- /dev/null +++ b/changelog/22835.txt @@ -0,0 +1,3 @@ +```release-note:improvement +events: WebSocket subscriptions add support for boolean filter expressions +``` diff --git a/changelog/22852.txt b/changelog/22852.txt new file mode 100644 index 000000000000..3a667eb23bb0 --- /dev/null +++ b/changelog/22852.txt @@ -0,0 +1,3 @@ +```release-note:security +secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. +``` diff --git a/changelog/22854.txt b/changelog/22854.txt new file mode 100644 index 000000000000..71db25095da7 --- /dev/null +++ b/changelog/22854.txt @@ -0,0 +1,3 @@ +```release-note:change +database/couchbase: Update plugin to v0.9.3 +``` diff --git a/changelog/22855.txt b/changelog/22855.txt new file mode 100644 index 000000000000..a911e2112387 --- /dev/null +++ b/changelog/22855.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: don't exclude features present on license +``` \ No newline at end of file diff --git a/changelog/22856.txt b/changelog/22856.txt new file mode 100644 index 000000000000..a4596e3a18b2 --- /dev/null +++ b/changelog/22856.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/ad: Update plugin to v0.16.1 +``` diff --git a/changelog/22871.txt b/changelog/22871.txt new file mode 100644 index 000000000000..0b7048f49ca5 --- /dev/null +++ b/changelog/22871.txt @@ -0,0 +1,3 @@ +```release-note:change +database/couchbase: Update plugin to v0.9.4 +``` diff --git a/changelog/22879.txt b/changelog/22879.txt new file mode 100644 index 000000000000..335b099ce46a --- /dev/null +++ b/changelog/22879.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/kubernetes: Update plugin to v0.17.1 +``` diff --git a/changelog/22907.txt b/changelog/22907.txt new file mode 100644 index 000000000000..dfaa4e1b0431 --- /dev/null +++ b/changelog/22907.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/terraform: Update plugin to v0.7.3 +``` diff --git a/changelog/22914.txt b/changelog/22914.txt new file mode 100644 index 000000000000..2764d4856938 --- /dev/null +++ b/changelog/22914.txt @@ -0,0 +1,6 @@ +```release-note:bug +plugins: Fix instance where broken/unresponsive plugins could cause Vault to hang. +``` +```release-note:bug +plugins: Fix instance where Vault could fail to kill broken/unresponsive plugins. +``` diff --git a/changelog/22926.txt b/changelog/22926.txt new file mode 100644 index 000000000000..69da688a10d5 --- /dev/null +++ b/changelog/22926.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds mount configuration details to Kubernetes secrets engine configuration view +``` \ No newline at end of file diff --git a/changelog/22994.txt b/changelog/22994.txt new file mode 100644 index 000000000000..f84bc5d74d6d --- /dev/null +++ b/changelog/22994.txt @@ -0,0 +1,5 @@ +```release-note:improvement +auth/azure: Add support for azure workload identity authentication (see issue +#18257). Update go-kms-wrapping dependency to include [PR +#155](https://github.com/hashicorp/go-kms-wrapping/pull/155) +``` \ No newline at end of file diff --git a/changelog/22996.txt b/changelog/22996.txt new file mode 100644 index 000000000000..7b67605864d7 --- /dev/null +++ b/changelog/22996.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auto-auth/azure: Support setting the `authenticate_from_environment` variable to "true" and "false" string literals, too. +``` diff --git a/changelog/22997.txt b/changelog/22997.txt new file mode 100644 index 000000000000..41a162eb623e --- /dev/null +++ b/changelog/22997.txt @@ -0,0 +1,4 @@ +```release-note:change +events: Log level for processing an event dropped from info to debug. +``` + diff --git a/changelog/23007.txt b/changelog/23007.txt new file mode 100644 index 000000000000..02fee8c150b5 --- /dev/null +++ b/changelog/23007.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fix removal of issuers to clean up unreferenced CRLs. +``` diff --git a/changelog/23010.txt b/changelog/23010.txt new file mode 100644 index 000000000000..f6a72ecf9eeb --- /dev/null +++ b/changelog/23010.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/consul: Fix revocations when Vault has an access token using specific namespace and admin partition policies +``` \ No newline at end of file diff --git a/changelog/23013.txt b/changelog/23013.txt new file mode 100644 index 000000000000..78987e659ea1 --- /dev/null +++ b/changelog/23013.txt @@ -0,0 +1,7 @@ +```release-note:bug +storage/consul: fix a bug where an active node in a specific sort of network +partition could continue to write data to Consul after a new leader is elected +potentially causing data loss or corruption for keys with many concurrent +writers. For Enterprise clusters this could cause corruption of the merkle trees +leading to failure to complete merkle sync without a full re-index. +``` diff --git a/changelog/23022.txt b/changelog/23022.txt new file mode 100644 index 000000000000..9d58a95d3ed0 --- /dev/null +++ b/changelog/23022.txt @@ -0,0 +1,5 @@ +```release-note:improvement +core: update sys/seal-status (and CLI vault status) to report the type of +the seal when unsealed, as well as the type of the recovery seal if an +auto-seal. +``` \ No newline at end of file diff --git a/changelog/23025.txt b/changelog/23025.txt new file mode 100644 index 000000000000..5392c75f730c --- /dev/null +++ b/changelog/23025.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui (enterprise): Fix error message when generating SSH credential with control group +``` \ No newline at end of file diff --git a/changelog/23042.txt b/changelog/23042.txt new file mode 100644 index 000000000000..da73a307539e --- /dev/null +++ b/changelog/23042.txt @@ -0,0 +1,4 @@ +```release-note:bug +events: Ensure subscription resources are cleaned up on close. +``` + diff --git a/changelog/23047.txt b/changelog/23047.txt new file mode 100644 index 000000000000..7c8c4471c69f --- /dev/null +++ b/changelog/23047.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: added new API field to Vault responses, `mount_type`, returning mount information (e.g. `kv` for KVV1/KVV2) for mount when appropriate. +``` \ No newline at end of file diff --git a/changelog/23050.txt b/changelog/23050.txt new file mode 100644 index 000000000000..391d6c6c4057 --- /dev/null +++ b/changelog/23050.txt @@ -0,0 +1,3 @@ +```release-note:deprecation +auth/centrify: Centrify plugin is deprecated as of 1.15, slated for removal in 1.17 +``` diff --git a/changelog/23059.txt b/changelog/23059.txt new file mode 100644 index 000000000000..96a2dc461208 --- /dev/null +++ b/changelog/23059.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/azure: Added Azure API configurable retry options +``` diff --git a/changelog/23060.txt b/changelog/23060.txt new file mode 100644 index 000000000000..0df1086057e6 --- /dev/null +++ b/changelog/23060.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/azure: Update plugin to v0.16.2 +``` diff --git a/changelog/23066.txt b/changelog/23066.txt new file mode 100644 index 000000000000..f4636b97dbb0 --- /dev/null +++ b/changelog/23066.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix the issue where confirm delete dropdown is being cut off +``` diff --git a/changelog/23103.txt b/changelog/23103.txt new file mode 100644 index 000000000000..a66533281dfc --- /dev/null +++ b/changelog/23103.txt @@ -0,0 +1,3 @@ +```release-note:bug +cap/ldap: Downgrade go-ldap client from v3.4.5 to v3.4.4 due to race condition found +``` diff --git a/changelog/23118.txt b/changelog/23118.txt new file mode 100644 index 000000000000..f93138652919 --- /dev/null +++ b/changelog/23118.txt @@ -0,0 +1,3 @@ +```release-note:bug +ldaputil: Disable tests for ARM64 +``` diff --git a/changelog/23119.txt b/changelog/23119.txt new file mode 100644 index 000000000000..fd5f694db3a0 --- /dev/null +++ b/changelog/23119.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Added allowed_domains_template field for CA type role in SSH engine +``` diff --git a/changelog/23123.txt b/changelog/23123.txt new file mode 100644 index 000000000000..4bfc0c0e7935 --- /dev/null +++ b/changelog/23123.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes filter and search bug in secrets engines +``` diff --git a/changelog/23143.txt b/changelog/23143.txt new file mode 100644 index 000000000000..5db4d66d159e --- /dev/null +++ b/changelog/23143.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Surface warning banner if UI has stopped auto-refreshing token +``` diff --git a/changelog/23155.txt b/changelog/23155.txt new file mode 100644 index 000000000000..0c6914a7820f --- /dev/null +++ b/changelog/23155.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fixes list password policy to include those with names containing / characters. +``` \ No newline at end of file diff --git a/changelog/23160.txt b/changelog/23160.txt new file mode 100644 index 000000000000..66e97bfaf653 --- /dev/null +++ b/changelog/23160.txt @@ -0,0 +1,3 @@ +```release-note:improvement +replication: Add re-index status metric to telemetry +``` diff --git a/changelog/23166.txt b/changelog/23166.txt new file mode 100644 index 000000000000..c3377679242d --- /dev/null +++ b/changelog/23166.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update pki mount configuration details to match the new mount configuration details pattern +``` diff --git a/changelog/23169.txt b/changelog/23169.txt new file mode 100644 index 000000000000..4f7d266dfb6f --- /dev/null +++ b/changelog/23169.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Implement Helios Design System pagination component +``` \ No newline at end of file diff --git a/changelog/23171.txt b/changelog/23171.txt new file mode 100644 index 000000000000..75bd32d4cd24 --- /dev/null +++ b/changelog/23171.txt @@ -0,0 +1,6 @@ +```release-note:bug +plugins: Runtime catalog returns 404 instead of 500 when reading a runtime that does not exist +``` +```release-note:bug +plugins: `vault plugin runtime list` can successfully list plugin runtimes with GET +``` diff --git a/changelog/23193.txt b/changelog/23193.txt new file mode 100644 index 000000000000..b895907ec6e4 --- /dev/null +++ b/changelog/23193.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add pagination to PKI roles, keys, issuers, and certificates list pages +``` diff --git a/changelog/23200.txt b/changelog/23200.txt new file mode 100644 index 000000000000..245cc694afbb --- /dev/null +++ b/changelog/23200.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Move access to KV V2 version diff view to toolbar in Version History +``` \ No newline at end of file diff --git a/changelog/23215.txt b/changelog/23215.txt new file mode 100644 index 000000000000..8c0ee8ccaf92 --- /dev/null +++ b/changelog/23215.txt @@ -0,0 +1,6 @@ +```release-note:bug +plugins: Containerized plugins can be run with mlock enabled. +``` +```release-note:improvement +plugins: Containerized plugins can be configured to still work when running with systemd's PrivateTmp=true setting. +``` \ No newline at end of file diff --git a/changelog/23225.txt b/changelog/23225.txt new file mode 100644 index 000000000000..31d5b6490a01 --- /dev/null +++ b/changelog/23225.txt @@ -0,0 +1,3 @@ +```release-note:bug +docs: fix wrong api path for ldap secrets cli-commands +``` diff --git a/changelog/23232.txt b/changelog/23232.txt new file mode 100644 index 000000000000..8084391cc799 --- /dev/null +++ b/changelog/23232.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds tidy_revoked_certs to PKI tidy status page +``` \ No newline at end of file diff --git a/changelog/23240.txt b/changelog/23240.txt new file mode 100644 index 000000000000..da202c7a9111 --- /dev/null +++ b/changelog/23240.txt @@ -0,0 +1,3 @@ +```release-note:bug +mongo-db: allow non-admin database for root credential rotation +``` diff --git a/changelog/23256.txt b/changelog/23256.txt new file mode 100644 index 000000000000..ef21e0bbd36a --- /dev/null +++ b/changelog/23256.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/db: Remove the `service_account_json` parameter when reading DB connection details +``` \ No newline at end of file diff --git a/changelog/23260.txt b/changelog/23260.txt new file mode 100644 index 000000000000..52de9b805275 --- /dev/null +++ b/changelog/23260.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds warning before downloading KV v2 secret values +``` \ No newline at end of file diff --git a/changelog/23272.txt b/changelog/23272.txt new file mode 100644 index 000000000000..39eddda18674 --- /dev/null +++ b/changelog/23272.txt @@ -0,0 +1,3 @@ +```release-note:improvement +.release/linux: add LimitCORE=0 to vault.service +``` \ No newline at end of file diff --git a/changelog/23277.txt b/changelog/23277.txt new file mode 100644 index 000000000000..329b3ebc6fb1 --- /dev/null +++ b/changelog/23277.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add warning message to the namespace picker warning users about the behavior when logging in with a root token. +``` \ No newline at end of file diff --git a/changelog/23278.txt b/changelog/23278.txt new file mode 100644 index 000000000000..cd02679e7687 --- /dev/null +++ b/changelog/23278.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Stop processing in-flight ACME verifications when an active node steps down +``` diff --git a/changelog/23282.txt b/changelog/23282.txt new file mode 100644 index 000000000000..1026ccf41913 --- /dev/null +++ b/changelog/23282.txt @@ -0,0 +1,3 @@ +```release-note:bug +expiration: Prevent large lease loads from delaying state changes, e.g. becoming active or standby. +``` \ No newline at end of file diff --git a/changelog/23287.txt b/changelog/23287.txt new file mode 100644 index 000000000000..6d3229fb1b07 --- /dev/null +++ b/changelog/23287.txt @@ -0,0 +1,3 @@ +```release-note:improvement +website/docs: fix inaccuracies with unauthenticated_in_flight_requests_access parameter +``` \ No newline at end of file diff --git a/changelog/23297.txt b/changelog/23297.txt new file mode 100644 index 000000000000..64bf55fb2075 --- /dev/null +++ b/changelog/23297.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes policy input toolbar scrolling by default +``` \ No newline at end of file diff --git a/changelog/23331.txt b/changelog/23331.txt new file mode 100644 index 000000000000..f2734a5cbb32 --- /dev/null +++ b/changelog/23331.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix the copy token button in the sidebar navigation window when in a collapsed state. +``` diff --git a/changelog/23382.txt b/changelog/23382.txt new file mode 100644 index 000000000000..50a7d4773bc3 --- /dev/null +++ b/changelog/23382.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Makes modals accessible by implementing Helios Design System modal component +``` diff --git a/changelog/23446.txt b/changelog/23446.txt new file mode 100644 index 000000000000..e290eb2951f7 --- /dev/null +++ b/changelog/23446.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix bug where a change on OpenAPI added a double forward slash on some LIST endpoints. +``` diff --git a/changelog/23457.txt b/changelog/23457.txt new file mode 100644 index 000000000000..a41ec10b5029 --- /dev/null +++ b/changelog/23457.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Add Snapshot Inspector Tool**: Add CLI tool to inspect Vault snapshots +``` \ No newline at end of file diff --git a/changelog/23470.txt b/changelog/23470.txt new file mode 100644 index 000000000000..744fa76c7d45 --- /dev/null +++ b/changelog/23470.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix AWS secret engine to allow empty policy_document field. +``` \ No newline at end of file diff --git a/changelog/23500.txt b/changelog/23500.txt new file mode 100644 index 000000000000..52f95c9c4c47 --- /dev/null +++ b/changelog/23500.txt @@ -0,0 +1,3 @@ +```release-note:bug +events: Ignore sending context to give more time for events to send +``` diff --git a/changelog/23503.txt b/changelog/23503.txt new file mode 100644 index 000000000000..962693d1afcf --- /dev/null +++ b/changelog/23503.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: show banner when resultant-acl check fails due to permissions or wrong namespace. +``` \ No newline at end of file diff --git a/changelog/23516.txt b/changelog/23516.txt new file mode 100644 index 000000000000..f87ab2092710 --- /dev/null +++ b/changelog/23516.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue with sidebar navigation links disappearing when navigating to policies when a user is not authorized +``` \ No newline at end of file diff --git a/changelog/23528.txt b/changelog/23528.txt new file mode 100644 index 000000000000..ad9ec4f4b7bd --- /dev/null +++ b/changelog/23528.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/aws: update credential rotation deadline when static role rotation period is updated +``` diff --git a/changelog/23534.txt b/changelog/23534.txt new file mode 100644 index 000000000000..5f101cb18a75 --- /dev/null +++ b/changelog/23534.txt @@ -0,0 +1,3 @@ +```release-note:feature +config/listener: allow per-listener configuration settings to redact sensitive parts of response to unauthenticated endpoints. +``` \ No newline at end of file diff --git a/changelog/23547.txt b/changelog/23547.txt new file mode 100644 index 000000000000..f5ddb19938b6 --- /dev/null +++ b/changelog/23547.txt @@ -0,0 +1,3 @@ +```release-note:feature +config/listener: allow per-listener configuration setting to disable replication status endpoints. +``` \ No newline at end of file diff --git a/changelog/23549.txt b/changelog/23549.txt new file mode 100644 index 000000000000..078cc232d0ed --- /dev/null +++ b/changelog/23549.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api/plugins: add `tls-server-name` arg for plugin registration +``` diff --git a/changelog/23555.txt b/changelog/23555.txt new file mode 100644 index 000000000000..32405057f54e --- /dev/null +++ b/changelog/23555.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/aws: Fixes a panic that can occur in IAM-based login when a [client config](https://developer.hashicorp.com/vault/api-docs/auth/aws#configure-client) does not exist. +``` \ No newline at end of file diff --git a/changelog/23565.txt b/changelog/23565.txt new file mode 100644 index 000000000000..5447d34c7db6 --- /dev/null +++ b/changelog/23565.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix regression that broke the oktaNumberChallenge on the ui. +``` diff --git a/changelog/23571.txt b/changelog/23571.txt new file mode 100644 index 000000000000..62185d25b4d5 --- /dev/null +++ b/changelog/23571.txt @@ -0,0 +1,4 @@ +```release-note:feature +**Reload seal configuration on SIGHUP**: Seal configuration is reloaded on SIGHUP so that seal configuration can +be changed without shutting down vault +``` diff --git a/changelog/23573.txt b/changelog/23573.txt new file mode 100644 index 000000000000..6bb0562b971f --- /dev/null +++ b/changelog/23573.txt @@ -0,0 +1,5 @@ +```release-note:bug +Seal HA (enterprise/beta): Fix rejection of a seal configuration change +from two to one auto seal due to persistence of the previous seal type being +"multiseal". +``` diff --git a/changelog/23580.txt b/changelog/23580.txt new file mode 100644 index 000000000000..f0f9129ba0b4 --- /dev/null +++ b/changelog/23580.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Upgrade HDS version to fix sidebar navigation issues when it collapses in smaller viewports. +``` diff --git a/changelog/23585.txt b/changelog/23585.txt new file mode 100644 index 000000000000..42c9a498c67b --- /dev/null +++ b/changelog/23585.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Assumes version 1 for kv engines when options are null because no version is specified +``` \ No newline at end of file diff --git a/changelog/23598.txt b/changelog/23598.txt new file mode 100644 index 000000000000..9f260d83efdb --- /dev/null +++ b/changelog/23598.txt @@ -0,0 +1,3 @@ +```release-note:bug +audit: Fix bug reopening 'file' audit devices on SIGHUP. +``` diff --git a/changelog/23620.txt b/changelog/23620.txt new file mode 100644 index 000000000000..60667c28062d --- /dev/null +++ b/changelog/23620.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue where you could not share the list view URL from the KV v2 secrets engine. +``` \ No newline at end of file diff --git a/changelog/23621.txt b/changelog/23621.txt new file mode 100644 index 000000000000..2af5f337e635 --- /dev/null +++ b/changelog/23621.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Vault Proxy Static Secret Caching (enterprise)**: Adds support for static secret (KVv1 and KVv2) caching to Vault Proxy. +``` diff --git a/changelog/23636.txt b/changelog/23636.txt new file mode 100644 index 000000000000..26255607251b --- /dev/null +++ b/changelog/23636.txt @@ -0,0 +1,3 @@ +```release-note:bug +command/server: Fix bug with sigusr2 where pprof files were not closed correctly +``` diff --git a/changelog/23667.txt b/changelog/23667.txt new file mode 100644 index 000000000000..63cd2cf2c305 --- /dev/null +++ b/changelog/23667.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Secrets Sync UI (enterprise)**: Adds secret syncing for KV v2 secrets to external destinations using the UI. +``` \ No newline at end of file diff --git a/changelog/23673.txt b/changelog/23673.txt new file mode 100644 index 000000000000..33bd6de01903 --- /dev/null +++ b/changelog/23673.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/aws: fix requeueing of rotation entry in cases where rotation fails +``` diff --git a/changelog/23690.txt b/changelog/23690.txt new file mode 100644 index 000000000000..8e0708f28827 --- /dev/null +++ b/changelog/23690.txt @@ -0,0 +1,3 @@ +```release-note:feature +**secrets/aws**: Support issuing an STS Session Token directly from the root credential. +``` \ No newline at end of file diff --git a/changelog/23695.txt b/changelog/23695.txt new file mode 100644 index 000000000000..104670645fb8 --- /dev/null +++ b/changelog/23695.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Decode the connection url for display on the connection details page +``` diff --git a/changelog/23700.txt b/changelog/23700.txt new file mode 100644 index 000000000000..ca7e7c839aa9 --- /dev/null +++ b/changelog/23700.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update flat, shell-quote and swagger-ui-dist packages. Remove swagger-ui styling overrides. +``` diff --git a/changelog/23702.txt b/changelog/23702.txt new file mode 100644 index 000000000000..3fee98a1e341 --- /dev/null +++ b/changelog/23702.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds a warning when whitespace is detected in a key of a KV secret +``` \ No newline at end of file diff --git a/changelog/23703.txt b/changelog/23703.txt new file mode 100644 index 000000000000..57d1fb3c4fa2 --- /dev/null +++ b/changelog/23703.txt @@ -0,0 +1,6 @@ +```release-note:change +Upgrade grpc to v1.58.3 +``` +```release-note:change +Upgrade x/net to v0.17.0 +``` diff --git a/changelog/23723.txt b/changelog/23723.txt new file mode 100644 index 000000000000..25828f99655d --- /dev/null +++ b/changelog/23723.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transit: Do not allow auto rotation on managed_key key types +``` diff --git a/changelog/23726.txt b/changelog/23726.txt new file mode 100644 index 000000000000..f4e21989fcd6 --- /dev/null +++ b/changelog/23726.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issues displaying accurate TLS state in dashboard configuration details +``` \ No newline at end of file diff --git a/changelog/23747.txt b/changelog/23747.txt new file mode 100644 index 000000000000..bf611ed142fc --- /dev/null +++ b/changelog/23747.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds toggle to KV secrets engine value download modal to optionally stringify value in downloaded file +``` \ No newline at end of file diff --git a/changelog/23771.txt b/changelog/23771.txt new file mode 100644 index 000000000000..9b8bd8ad4457 --- /dev/null +++ b/changelog/23771.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk/plugin: Fix an issue where external plugins were not reporting logs below INFO level +``` diff --git a/changelog/23781.txt b/changelog/23781.txt new file mode 100644 index 000000000000..32d3b51e95e3 --- /dev/null +++ b/changelog/23781.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/activity: Fixes segments fragment loss due to exceeding entry record size limit +``` \ No newline at end of file diff --git a/changelog/23786.txt b/changelog/23786.txt new file mode 100644 index 000000000000..b6e73142eb6c --- /dev/null +++ b/changelog/23786.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/approle: Normalized error response messages when invalid credentials are provided +``` diff --git a/changelog/23797.txt b/changelog/23797.txt new file mode 100644 index 000000000000..32369b0fbd5e --- /dev/null +++ b/changelog/23797.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Allow users in userpass auth mount to update their own password +``` \ No newline at end of file diff --git a/changelog/23802.txt b/changelog/23802.txt new file mode 100644 index 000000000000..49caebc4fce0 --- /dev/null +++ b/changelog/23802.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/mounts: Fix reading an "auth" mount using "sys/internal/ui/mounts/" when filter paths are enforced returns 500 error code from the secondary +``` diff --git a/changelog/23837.txt b/changelog/23837.txt new file mode 100644 index 000000000000..b3e17a00c928 --- /dev/null +++ b/changelog/23837.txt @@ -0,0 +1,3 @@ +```release-note:change +telemetry: Seal wrap encrypt/decrypt metrics now differentiate between seals using a metrics label of seal name rather than separate metric names. +``` \ No newline at end of file diff --git a/changelog/23849.txt b/changelog/23849.txt new file mode 100644 index 000000000000..e5d89a3030c4 --- /dev/null +++ b/changelog/23849.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/mongodbatlas: Update plugin to v0.10.2 +``` diff --git a/changelog/23861.txt b/changelog/23861.txt new file mode 100644 index 000000000000..8c4ac70380eb --- /dev/null +++ b/changelog/23861.txt @@ -0,0 +1,4 @@ +```release-note:bug +api/seal-status: Fix deadlock on calls to sys/seal-status with a namespace configured +on the request. +``` diff --git a/changelog/23872.txt b/changelog/23872.txt new file mode 100644 index 000000000000..b486fd258a80 --- /dev/null +++ b/changelog/23872.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/etcd: etcd should only return keys when calling List() +``` diff --git a/changelog/23874.txt b/changelog/23874.txt new file mode 100644 index 000000000000..34ac61d56795 --- /dev/null +++ b/changelog/23874.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: fix policies with wildcards not matching list operations due to the policy path not having a trailing slash +``` \ No newline at end of file diff --git a/changelog/23894.txt b/changelog/23894.txt new file mode 100644 index 000000000000..a94e1428eadd --- /dev/null +++ b/changelog/23894.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Skip unnecessary deriving of policies during Login MFA Check. +``` \ No newline at end of file diff --git a/changelog/23897.txt b/changelog/23897.txt new file mode 100644 index 000000000000..28f2f75c5b03 --- /dev/null +++ b/changelog/23897.txt @@ -0,0 +1,4 @@ +```release-note:feature +cli: introduce new command group hcp which groups subcommands for authentication of users or machines to HCP using +either provided arguments or retrieved HCP token through browser login. +``` \ No newline at end of file diff --git a/changelog/23902.txt b/changelog/23902.txt new file mode 100644 index 000000000000..cbfec65096aa --- /dev/null +++ b/changelog/23902.txt @@ -0,0 +1,5 @@ +```release-note:bug +core: fix bug where deadlock detection was always on for expiration and quotas. +These can now be configured individually with `detect_deadlocks`. +``` + diff --git a/changelog/23906.txt b/changelog/23906.txt new file mode 100644 index 000000000000..ed3671dbf421 --- /dev/null +++ b/changelog/23906.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: fix rare panic due to a race condition with metrics collection during seal +``` diff --git a/changelog/23908.txt b/changelog/23908.txt new file mode 100644 index 000000000000..6bd39bb1a0a8 --- /dev/null +++ b/changelog/23908.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Updates OIDC/JWT login error handling to surface all role related errors +``` \ No newline at end of file diff --git a/changelog/23913.txt b/changelog/23913.txt new file mode 100644 index 000000000000..b01525edf205 --- /dev/null +++ b/changelog/23913.txt @@ -0,0 +1,5 @@ +```release-note:change +sdk: Upgrade dependent packages by sdk. +This includes github.com/docker/docker to v24.0.7+incompatible, +google.golang.org/grpc to v1.57.2 and golang.org/x/net to v0.17.0. +``` diff --git a/changelog/23921.txt b/changelog/23921.txt new file mode 100644 index 000000000000..cd03142227d0 --- /dev/null +++ b/changelog/23921.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: show error from API when seal fails +``` diff --git a/changelog/23942.txt b/changelog/23942.txt new file mode 100644 index 000000000000..a4d43d48f091 --- /dev/null +++ b/changelog/23942.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix broken GUI when accessing from listener with chroot_namespace defined +``` diff --git a/changelog/23964.txt b/changelog/23964.txt new file mode 100644 index 000000000000..7dcdf884dc90 --- /dev/null +++ b/changelog/23964.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update sidebar Secrets engine to title case. +``` diff --git a/changelog/23994.txt b/changelog/23994.txt new file mode 100644 index 000000000000..6eff0ae1d72b --- /dev/null +++ b/changelog/23994.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Increase base font-size from 14px to 16px and update use of rem vs pixels for size variables +``` diff --git a/changelog/24010.txt b/changelog/24010.txt new file mode 100644 index 000000000000..aa72bc977912 --- /dev/null +++ b/changelog/24010.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/raft: Upgrade to bbolt 1.3.8, along with an extra patch to reduce time scanning large freelist maps. +``` diff --git a/changelog/24027.txt b/changelog/24027.txt new file mode 100644 index 000000000000..d276928f93fb --- /dev/null +++ b/changelog/24027.txt @@ -0,0 +1,3 @@ +```release-note:bug +expiration: Fix fatal error "concurrent map iteration and map write" when collecting metrics from leases. +``` diff --git a/changelog/24054.txt b/changelog/24054.txt new file mode 100644 index 000000000000..2680d114ce45 --- /dev/null +++ b/changelog/24054.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transit: Fix a panic when attempting to export a public RSA key +``` diff --git a/changelog/24056.txt b/changelog/24056.txt new file mode 100644 index 000000000000..baa7fa98bb8e --- /dev/null +++ b/changelog/24056.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/config: Use correct HCL config value when configuring `log_requests_level`. +``` \ No newline at end of file diff --git a/changelog/24099.txt b/changelog/24099.txt new file mode 100644 index 000000000000..bc33a184f988 --- /dev/null +++ b/changelog/24099.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Rotate Root for LDAP auth**: Rotate root operations are now supported for the LDAP auth engine. +``` diff --git a/changelog/24103.txt b/changelog/24103.txt new file mode 100644 index 000000000000..f86bfd996949 --- /dev/null +++ b/changelog/24103.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Sort list view of entities and aliases alphabetically using the item name +``` diff --git a/changelog/24108.txt b/changelog/24108.txt new file mode 100644 index 000000000000..0fcb8ac2e51a --- /dev/null +++ b/changelog/24108.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/quotas: Close rate-limit blocked client purge goroutines when sealing +``` \ No newline at end of file diff --git a/changelog/24136.txt b/changelog/24136.txt new file mode 100644 index 000000000000..eaf2e2521681 --- /dev/null +++ b/changelog/24136.txt @@ -0,0 +1,3 @@ +```release-note:bug +sdk: Return error when failure occurs setting up node 0 in NewDockerCluster, instead of ignoring it. +``` \ No newline at end of file diff --git a/changelog/24147.txt b/changelog/24147.txt new file mode 100644 index 000000000000..960ae2250ca4 --- /dev/null +++ b/changelog/24147.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix error when tuning token auth configuration within namespace +``` \ No newline at end of file diff --git a/changelog/24152.txt b/changelog/24152.txt new file mode 100644 index 000000000000..80ef3015377b --- /dev/null +++ b/changelog/24152.txt @@ -0,0 +1,3 @@ +```release-note:improvement +website/docs: Add note about eventual consietency with the MongoDB Atlas database secrets engine +``` diff --git a/changelog/24165.txt b/changelog/24165.txt new file mode 100644 index 000000000000..04c0b9223431 --- /dev/null +++ b/changelog/24165.txt @@ -0,0 +1,4 @@ +```release-note:bug +core: Fix an error that resulted in the wrong seal type being returned by sys/seal-status while +Vault is in seal migration mode. +``` diff --git a/changelog/24168.txt b/changelog/24168.txt new file mode 100644 index 000000000000..09f34ce8621c --- /dev/null +++ b/changelog/24168.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: capabilities-self is always called in the user's root namespace +``` \ No newline at end of file diff --git a/changelog/24191.txt b/changelog/24191.txt new file mode 100644 index 000000000000..2fe98e926d05 --- /dev/null +++ b/changelog/24191.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Implement Helios Design System footer component +``` diff --git a/changelog/24192.txt b/changelog/24192.txt new file mode 100644 index 000000000000..97a26746bd0f --- /dev/null +++ b/changelog/24192.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Do not set nextUpdate field in OCSP responses when ocsp_expiry is 0 +``` diff --git a/changelog/24193.txt b/changelog/24193.txt new file mode 100644 index 000000000000..67ea1d0ae974 --- /dev/null +++ b/changelog/24193.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/cert: Handle errors related to expired OCSP server responses +``` diff --git a/changelog/24201.txt b/changelog/24201.txt new file mode 100644 index 000000000000..9253e44ab8c0 --- /dev/null +++ b/changelog/24201.txt @@ -0,0 +1,3 @@ +```release-note:change +events: Source URL is now `vault://{vault node}` +``` diff --git a/changelog/24224.txt b/changelog/24224.txt new file mode 100644 index 000000000000..040b42d94da8 --- /dev/null +++ b/changelog/24224.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix JSON editor in KV V2 unable to handle pasted values +``` diff --git a/changelog/24236.txt b/changelog/24236.txt new file mode 100644 index 000000000000..215c7c6d8f11 --- /dev/null +++ b/changelog/24236.txt @@ -0,0 +1,3 @@ +```release-note:improvement +plugins: Containerized plugins can be run fully rootless with the runsc runtime. +``` diff --git a/changelog/24238.txt b/changelog/24238.txt new file mode 100644 index 000000000000..207a61d60952 --- /dev/null +++ b/changelog/24238.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/audit: Audit logging a Vault response will now use a 5 second context timeout, separate from the original request. +``` \ No newline at end of file diff --git a/changelog/24246.txt b/changelog/24246.txt new file mode 100644 index 000000000000..424a006f2da3 --- /dev/null +++ b/changelog/24246.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: Fix a race whereby a new leader may present inconsistent node data to Autopilot. +``` \ No newline at end of file diff --git a/changelog/24250.txt b/changelog/24250.txt new file mode 100644 index 000000000000..e6aca7096ac3 --- /dev/null +++ b/changelog/24250.txt @@ -0,0 +1,6 @@ +```release-note:change +cli: `vault plugin info` and `vault plugin deregister` now require 2 positional arguments instead of accepting either 1 or 2. +``` +```release-note:improvement +cli: Improved error messages for `vault plugin` sub-commands. +``` diff --git a/changelog/24252.txt b/changelog/24252.txt new file mode 100644 index 000000000000..343811bfd050 --- /dev/null +++ b/changelog/24252.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent/logging: Agent should now honor correct -log-format and -log-file settings in logs generated by the consul-template library. +``` \ No newline at end of file diff --git a/changelog/24256.txt b/changelog/24256.txt new file mode 100644 index 000000000000..74124710b8a8 --- /dev/null +++ b/changelog/24256.txt @@ -0,0 +1,4 @@ +```release-note:bug +api: Fix deadlock on calls to sys/leader with a namespace configured +on the request. +``` diff --git a/changelog/24270.txt b/changelog/24270.txt new file mode 100644 index 000000000000..eb8e4c04fb7c --- /dev/null +++ b/changelog/24270.txt @@ -0,0 +1,3 @@ +```release-note:change +api: add the `enterprise` parameter to the `/sys/health` endpoint +``` diff --git a/changelog/24280.txt b/changelog/24280.txt new file mode 100644 index 000000000000..dd3c42fe4c8d --- /dev/null +++ b/changelog/24280.txt @@ -0,0 +1,3 @@ +```release-note:improvement +command/server: display logs on startup immediately if disable-gated-logs flag is set +``` diff --git a/changelog/24281.txt b/changelog/24281.txt new file mode 100644 index 000000000000..7d24c296a1f2 --- /dev/null +++ b/changelog/24281.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Correctly handle directory redirects from pre 1.15.0 Kv v2 list view urls. +``` diff --git a/changelog/24283.txt b/changelog/24283.txt new file mode 100644 index 000000000000..f8f885f3e11e --- /dev/null +++ b/changelog/24283.txt @@ -0,0 +1,3 @@ +```release-note:change +ui: add subnav for replication items +``` diff --git a/changelog/24290.txt b/changelog/24290.txt new file mode 100644 index 000000000000..3533146b7d23 --- /dev/null +++ b/changelog/24290.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: When Kv v2 secret is an object, fix so details view defaults to readOnly JSON editor. +``` \ No newline at end of file diff --git a/changelog/24292.txt b/changelog/24292.txt new file mode 100644 index 000000000000..784e2e38f4c7 --- /dev/null +++ b/changelog/24292.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix payload sent when disabling replication +``` diff --git a/changelog/24297.txt b/changelog/24297.txt new file mode 100644 index 000000000000..d1433cfcd1f9 --- /dev/null +++ b/changelog/24297.txt @@ -0,0 +1,2 @@ +```release-note:change +logging: Vault server, Agent and Proxy now honor log file value and only add a timestamp on rotation. \ No newline at end of file diff --git a/changelog/24299.txt b/changelog/24299.txt new file mode 100644 index 000000000000..1b295b985687 --- /dev/null +++ b/changelog/24299.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update AlertInline component to use Helios Design System Alert component +``` diff --git a/changelog/24305.txt b/changelog/24305.txt new file mode 100644 index 000000000000..5fcde924fae2 --- /dev/null +++ b/changelog/24305.txt @@ -0,0 +1,3 @@ +```release-note:bug +eventlogger: Update library to v0.2.7 to address race condition +``` diff --git a/changelog/24325.txt b/changelog/24325.txt new file mode 100644 index 000000000000..ab5ce613c404 --- /dev/null +++ b/changelog/24325.txt @@ -0,0 +1,4 @@ +```release-note:change +identity (enterprise): POST requests to the `/identity/entity/merge` endpoint +are now always forwarded from standbys to the active node. +``` \ No newline at end of file diff --git a/changelog/24336.txt b/changelog/24336.txt new file mode 100644 index 000000000000..63594dc6cee0 --- /dev/null +++ b/changelog/24336.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix a timeout initializing Vault by only using a short timeout persisting barrier keyring encryption counts. +``` diff --git a/changelog/24339.txt b/changelog/24339.txt new file mode 100644 index 000000000000..7c103de7d17c --- /dev/null +++ b/changelog/24339.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Correctly handle redirects from pre 1.15.0 Kv v2 edit, create, and show urls. +``` diff --git a/changelog/24343.txt b/changelog/24343.txt new file mode 100644 index 000000000000..b77b3afc1938 --- /dev/null +++ b/changelog/24343.txt @@ -0,0 +1,5 @@ +```release-note:improvement +api: sys/health and sys/ha-status now expose information about how long +the last heartbeat took, and the estimated clock skew between standby and +active node based on that heartbeat duration. +``` \ No newline at end of file diff --git a/changelog/24352.txt b/changelog/24352.txt new file mode 100644 index 000000000000..c6cf651daeb9 --- /dev/null +++ b/changelog/24352.txt @@ -0,0 +1,3 @@ +```release-note:improvement +events: Add support for event subscription plugins, including SQS +``` diff --git a/changelog/24373.txt b/changelog/24373.txt new file mode 100644 index 000000000000..ae77aee6cac0 --- /dev/null +++ b/changelog/24373.txt @@ -0,0 +1,3 @@ +```release-note:bug +http: Include PATCH in the list of allowed CORS methods +``` \ No newline at end of file diff --git a/changelog/24382.txt b/changelog/24382.txt new file mode 100644 index 000000000000..4c76944e18a9 --- /dev/null +++ b/changelog/24382.txt @@ -0,0 +1,4 @@ +```release-note:feature +**Default Lease Count Quota (enterprise)**: Apply a new global default lease count quota of 300k leases for all +new installs of Vault. +``` diff --git a/changelog/24387.txt b/changelog/24387.txt new file mode 100644 index 000000000000..3e7fe85c2581 --- /dev/null +++ b/changelog/24387.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Implement Helios Design System Breadcrumbs +``` \ No newline at end of file diff --git a/changelog/24404.txt b/changelog/24404.txt new file mode 100644 index 000000000000..6fab70d0bf12 --- /dev/null +++ b/changelog/24404.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix issue where kv v2 capabilities checks were not passing in the full secret path if secret was inside a directory. +``` diff --git a/changelog/24441.txt b/changelog/24441.txt new file mode 100644 index 000000000000..5a4d491c64ea --- /dev/null +++ b/changelog/24441.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/ha: fix panic that can occur when an HA cluster contains an active node with version >=1.12.0 and another node with version <1.10 +``` diff --git a/changelog/24472.txt b/changelog/24472.txt new file mode 100644 index 000000000000..538bb2b4b707 --- /dev/null +++ b/changelog/24472.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/database: Add new reload/:plugin_name API to reload database plugins by name for a specific mount. +``` diff --git a/changelog/24476.txt b/changelog/24476.txt new file mode 100644 index 000000000000..797ed9a48d47 --- /dev/null +++ b/changelog/24476.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: improve accessibility - color contrast, labels, and automatic testing +``` diff --git a/changelog/24479.txt b/changelog/24479.txt new file mode 100644 index 000000000000..e053e74d6793 --- /dev/null +++ b/changelog/24479.txt @@ -0,0 +1,3 @@ +```release-note:improvement +command/token-capabilities: allow using accessor when listing token capabilities on a path +``` diff --git a/changelog/24492.txt b/changelog/24492.txt new file mode 100644 index 000000000000..d61c901a2c14 --- /dev/null +++ b/changelog/24492.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix navigation items shown to user when chroot_namespace configured +``` diff --git a/changelog/24512.txt b/changelog/24512.txt new file mode 100644 index 000000000000..efed04a22535 --- /dev/null +++ b/changelog/24512.txt @@ -0,0 +1,6 @@ +```release-note:change +plugins: Add a warning to the response from sys/plugins/reload/backend if no plugins were reloaded. +``` +```release-note:improvement +secrets/database: Support reloading named database plugins using the sys/plugins/reload/backend API endpoint. +``` diff --git a/changelog/24513.txt b/changelog/24513.txt new file mode 100644 index 000000000000..41b47f6be4e5 --- /dev/null +++ b/changelog/24513.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix KV v2 details view defaulting to JSON view when secret value includes `{` +``` diff --git a/changelog/24529.txt b/changelog/24529.txt new file mode 100644 index 000000000000..97d6904f1442 --- /dev/null +++ b/changelog/24529.txt @@ -0,0 +1,3 @@ +```release-note:improvement +website/docs: Update references to Key Value secrets engine from 'K/V' to 'KV' +``` diff --git a/changelog/24530.txt b/changelog/24530.txt new file mode 100644 index 000000000000..12525d48b87f --- /dev/null +++ b/changelog/24530.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: obscure JSON values when KV v2 secret has nested objects +``` diff --git a/changelog/24548.txt b/changelog/24548.txt new file mode 100644 index 000000000000..59882fb11b6e --- /dev/null +++ b/changelog/24548.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent/template: Added max_connections_per_host to limit total number of connections per Vault host. +``` diff --git a/changelog/24549.txt b/changelog/24549.txt new file mode 100644 index 000000000000..6838b024c782 --- /dev/null +++ b/changelog/24549.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: sys/leader ActiveTime field no longer gets reset when we do an internal state change that doesn't change our active status. +``` diff --git a/changelog/24616.txt b/changelog/24616.txt new file mode 100644 index 000000000000..54f0f1edfcd8 --- /dev/null +++ b/changelog/24616.txt @@ -0,0 +1,3 @@ +```release-note:bug +fairshare: fix a race condition in JobManager.GetWorkerCounts +``` \ No newline at end of file diff --git a/changelog/24649.txt b/changelog/24649.txt new file mode 100644 index 000000000000..2e0161e20d1d --- /dev/null +++ b/changelog/24649.txt @@ -0,0 +1,3 @@ +```release-note:bug +cassandra: Update Cassandra to set consistency prior to calling CreateSession, ensuring consistency setting is correct when opening connection. +``` diff --git a/changelog/24660.txt b/changelog/24660.txt new file mode 100644 index 000000000000..415944299e1a --- /dev/null +++ b/changelog/24660.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: The UI can now be used to create or update database roles by operator without permission on the database connection. +``` diff --git a/changelog/24667.txt b/changelog/24667.txt new file mode 100644 index 000000000000..b3e83d71f49b --- /dev/null +++ b/changelog/24667.txt @@ -0,0 +1,6 @@ +```release-note:improvement +agent: Added new namespace top level configuration parameter, which can be used to make requests made by Agent to go to that namespace. +``` +```release-note:improvement +proxy: Added new namespace top level configuration parameter, and prepend_configured_namespace API Proxy configuration parameter, which can be used to make requests made to Proxy get proxied to that namespace. +``` diff --git a/changelog/24686.txt b/changelog/24686.txt new file mode 100644 index 000000000000..30ef696f491e --- /dev/null +++ b/changelog/24686.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix incorrectly calculated capabilities on PKI issuer endpoints +``` diff --git a/changelog/24697.txt b/changelog/24697.txt new file mode 100644 index 000000000000..49492d19b290 --- /dev/null +++ b/changelog/24697.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes input for jwks_ca_pem when configuring a JWT auth method +``` \ No newline at end of file diff --git a/changelog/24710.txt b/changelog/24710.txt new file mode 100644 index 000000000000..4985cda86580 --- /dev/null +++ b/changelog/24710.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/activity: Include secret_syncs in activity log responses +``` \ No newline at end of file diff --git a/changelog/24718.txt b/changelog/24718.txt new file mode 100644 index 000000000000..990de52941f9 --- /dev/null +++ b/changelog/24718.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Database Event Notifications**: The database plugin now emits event notifications. +``` diff --git a/changelog/24752.txt b/changelog/24752.txt new file mode 100644 index 000000000000..736684af282a --- /dev/null +++ b/changelog/24752.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Separates out client counts dashboard to overview and entity/non-entity tabs +``` \ No newline at end of file diff --git a/changelog/24790.txt b/changelog/24790.txt new file mode 100644 index 000000000000..14bc913c84a0 --- /dev/null +++ b/changelog/24790.txt @@ -0,0 +1,6 @@ +```release-note:bug +agent: Fixed incorrect parsing of boolean environment variables for configuration. +``` +```release-note:bug +proxy: Fixed incorrect parsing of boolean environment variables for configuration. +``` \ No newline at end of file diff --git a/changelog/24823.txt b/changelog/24823.txt new file mode 100644 index 000000000000..a15c8c7f1513 --- /dev/null +++ b/changelog/24823.txt @@ -0,0 +1,3 @@ +```release-note:change +ui: Update icons to use Flight icons where available. +``` \ No newline at end of file diff --git a/changelog/24864.txt b/changelog/24864.txt new file mode 100644 index 000000000000..c29db4f54c05 --- /dev/null +++ b/changelog/24864.txt @@ -0,0 +1,3 @@ +```release-note:change +plugins: `/sys/plugins/runtimes/catalog` response will always include a list of "runtimes" in the response, even if empty. +``` diff --git a/changelog/24878.txt b/changelog/24878.txt new file mode 100644 index 000000000000..d7f03e4d0532 --- /dev/null +++ b/changelog/24878.txt @@ -0,0 +1,6 @@ +```release-note:improvement +plugins: New API `sys/plugins/reload/:type/:name` available in the root namespace for reloading a specific plugin across all namespaces. +``` +```release-note:change +cli: Using `vault plugin reload` with `-plugin` in the root namespace will now reload the plugin across all namespaces instead of just the root namespace. +``` diff --git a/changelog/24891.txt b/changelog/24891.txt new file mode 100644 index 000000000000..6f84e14290a5 --- /dev/null +++ b/changelog/24891.txt @@ -0,0 +1,3 @@ +```release-note:bug +helper/pkcs7: Fix slice out-of-bounds panic +``` diff --git a/changelog/24898.txt b/changelog/24898.txt new file mode 100644 index 000000000000..8180d72d8e25 --- /dev/null +++ b/changelog/24898.txt @@ -0,0 +1,3 @@ +```release-note:improvement +identity/tokens: adds plugin issuer with openid-configuration and keys APIs +``` \ No newline at end of file diff --git a/changelog/24925.txt b/changelog/24925.txt new file mode 100644 index 000000000000..7bce8d0bdebc --- /dev/null +++ b/changelog/24925.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk: Add identity token helpers to consistently apply new plugin WIF fields across integrations. +``` \ No newline at end of file diff --git a/changelog/24929.txt b/changelog/24929.txt new file mode 100644 index 000000000000..c6eac214f618 --- /dev/null +++ b/changelog/24929.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk: adds new method to system view to allow plugins to request identity tokens +``` \ No newline at end of file diff --git a/changelog/24947.txt b/changelog/24947.txt new file mode 100644 index 000000000000..498158e2c530 --- /dev/null +++ b/changelog/24947.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixed minor bugs with database secrets engine +``` \ No newline at end of file diff --git a/changelog/24954.txt b/changelog/24954.txt new file mode 100644 index 000000000000..8c023f1b57e5 --- /dev/null +++ b/changelog/24954.txt @@ -0,0 +1,4 @@ +```release-note:bug +core: upgrade github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 to +support azure workload identities. +``` \ No newline at end of file diff --git a/changelog/24962.txt b/changelog/24962.txt new file mode 100644 index 000000000000..7a7cffb19d65 --- /dev/null +++ b/changelog/24962.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sys: adds configuration of the key used to sign plugin identity tokens during mount enable and tune +``` \ No newline at end of file diff --git a/changelog/24968.txt b/changelog/24968.txt new file mode 100644 index 000000000000..47c71eaa82e5 --- /dev/null +++ b/changelog/24968.txt @@ -0,0 +1,3 @@ +```release-note:bug +audit: Fix bug where use of 'log_raw' option could result in other devices logging raw audit data +``` diff --git a/changelog/24972.txt b/changelog/24972.txt new file mode 100644 index 000000000000..b2a7bda303cf --- /dev/null +++ b/changelog/24972.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/jwt: Update plugin to v0.19.0 +``` diff --git a/changelog/24978.txt b/changelog/24978.txt new file mode 100644 index 000000000000..8bad557e316e --- /dev/null +++ b/changelog/24978.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Added new `plugin_tmpdir` config option for containerized plugins, in addition to the existing `VAULT_PLUGIN_TMPDIR` environment variable. +``` diff --git a/changelog/24979.txt b/changelog/24979.txt new file mode 100644 index 000000000000..bbbcedce077a --- /dev/null +++ b/changelog/24979.txt @@ -0,0 +1,3 @@ +```release-note:improvement +oidc/provider: Adds `code_challenge_methods_supported` to OpenID Connect Metadata +``` \ No newline at end of file diff --git a/changelog/24980.txt b/changelog/24980.txt new file mode 100644 index 000000000000..536bdb32c652 --- /dev/null +++ b/changelog/24980.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: adds plugin identity token to enable and tune commands for secret engines and auth methods +``` \ No newline at end of file diff --git a/changelog/24987.txt b/changelog/24987.txt new file mode 100644 index 000000000000..2eecf033f4d8 --- /dev/null +++ b/changelog/24987.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Plugin Identity Tokens**: Adds secret-less configuration of AWS secret engine using web identity federation. +``` diff --git a/changelog/24990.txt b/changelog/24990.txt new file mode 100644 index 000000000000..8079a4605c50 --- /dev/null +++ b/changelog/24990.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: fixes plugin register CLI failure to error when plugin image doesn't exist +``` \ No newline at end of file diff --git a/changelog/24991.txt b/changelog/24991.txt new file mode 100644 index 000000000000..28df55379bee --- /dev/null +++ b/changelog/24991.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/raft: Add support for larger transactions when using raft storage. +``` diff --git a/changelog/25001.txt b/changelog/25001.txt new file mode 100644 index 000000000000..de5f82d05562 --- /dev/null +++ b/changelog/25001.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Allows users to search within KV v2 directories from the Dashboard's quick action card. +``` \ No newline at end of file diff --git a/changelog/25004.txt b/changelog/25004.txt new file mode 100644 index 000000000000..9836a6f26aaf --- /dev/null +++ b/changelog/25004.txt @@ -0,0 +1,3 @@ +```release-note:bug +audit/socket: Provide socket based audit backends with 'prefix' configuration option when supplied. +``` \ No newline at end of file diff --git a/changelog/25014.txt b/changelog/25014.txt new file mode 100644 index 000000000000..780da7a02b0f --- /dev/null +++ b/changelog/25014.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/alicloud: Update plugin to v0.16.1 +``` diff --git a/changelog/25020.txt b/changelog/25020.txt new file mode 100644 index 000000000000..de1bab99fe01 --- /dev/null +++ b/changelog/25020.txt @@ -0,0 +1,3 @@ +```release-note:change +database/snowflake: Update plugin to v0.9.1 +``` diff --git a/changelog/25040.txt b/changelog/25040.txt new file mode 100644 index 000000000000..cca7f2b7f9c2 --- /dev/null +++ b/changelog/25040.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/openldap: Update plugin to v0.11.3 +``` diff --git a/changelog/25058.txt b/changelog/25058.txt new file mode 100644 index 000000000000..bd2a704cf0ce --- /dev/null +++ b/changelog/25058.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/ad: Update plugin to v0.16.2 +``` diff --git a/changelog/25080.txt b/changelog/25080.txt new file mode 100644 index 000000000000..ad43618d8281 --- /dev/null +++ b/changelog/25080.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: Fix auto_join not working with mDNS provider. +``` diff --git a/changelog/25093.txt b/changelog/25093.txt new file mode 100644 index 000000000000..a0691e03e14c --- /dev/null +++ b/changelog/25093.txt @@ -0,0 +1,5 @@ +```release-note:feature +**Request Limiter (enterprise)**: Add adaptive concurrency limits to +write-based HTTP methods and special-case `pki/issue` requests to prevent +overloading the Vault server. +``` diff --git a/changelog/25095.txt b/changelog/25095.txt new file mode 100644 index 000000000000..69251984a99a --- /dev/null +++ b/changelog/25095.txt @@ -0,0 +1,3 @@ +```release-note:improvement +limits: Introduce a reloadable opt-in configuration for the Request Limiter. +``` diff --git a/changelog/25098.txt b/changelog/25098.txt new file mode 100644 index 000000000000..ab487d63148b --- /dev/null +++ b/changelog/25098.txt @@ -0,0 +1,4 @@ +```release-note:improvement +limits: Add a listener configuration option `disable_request_limiter` to allow +disabling the request limiter per-listener. +``` diff --git a/changelog/25105.txt b/changelog/25105.txt new file mode 100644 index 000000000000..4a9ae100c3e3 --- /dev/null +++ b/changelog/25105.txt @@ -0,0 +1,6 @@ +```release-note:change +plugins/database: Reading connection config at `database/config/:name` will now return a computed `running_plugin_version` field if a non-builtin version is running. +``` +```release-note:improvement +plugins: Add new pin version APIs to enforce all plugins of a specific type and name to run the same version. +``` diff --git a/changelog/25106.txt b/changelog/25106.txt new file mode 100644 index 000000000000..d861b1a98109 --- /dev/null +++ b/changelog/25106.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Allows users to dismiss the resultant-acl banner. +``` \ No newline at end of file diff --git a/changelog/25128.txt b/changelog/25128.txt new file mode 100644 index 000000000000..c9003ffea88d --- /dev/null +++ b/changelog/25128.txt @@ -0,0 +1,6 @@ +```release-note:change +plugins: By default, environment variables provided during plugin registration will now take precedence over system environment variables. +Use the environment variable `VAULT_PLUGIN_USE_LEGACY_ENV_LAYERING=true` to opt out and keep higher preference for system environment +variables. When this flag is set, Vault will check during unseal for conflicts and print warnings for any plugins with environment +variables that conflict with system environment variables. +``` diff --git a/changelog/25143.txt b/changelog/25143.txt new file mode 100644 index 000000000000..17d9b713c1bd --- /dev/null +++ b/changelog/25143.txt @@ -0,0 +1,3 @@ +```release-note:change +database/snowflake: Update plugin to v0.10.0 +``` diff --git a/changelog/25152.txt b/changelog/25152.txt new file mode 100644 index 000000000000..7a89d5fc8289 --- /dev/null +++ b/changelog/25152.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Update the KV secret data when you change the version you're viewing of a nested secret. +``` \ No newline at end of file diff --git a/changelog/25171.txt b/changelog/25171.txt new file mode 100644 index 000000000000..b2c0424dbc82 --- /dev/null +++ b/changelog/25171.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core (enterprise): Improve seal unwrap performance when in degraded mode with one or more unhealthy seals. +``` \ No newline at end of file diff --git a/changelog/25173.txt b/changelog/25173.txt new file mode 100644 index 000000000000..4ca773c63dd7 --- /dev/null +++ b/changelog/25173.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/gcp: Update plugin to v0.18.0 +``` diff --git a/changelog/25187.txt b/changelog/25187.txt new file mode 100644 index 000000000000..e90d97fdc406 --- /dev/null +++ b/changelog/25187.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/ad: Update plugin to v0.17.0 +``` diff --git a/changelog/25189.txt b/changelog/25189.txt new file mode 100644 index 000000000000..b537437ec161 --- /dev/null +++ b/changelog/25189.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/azure: Update plugin to v0.17.0 +``` diff --git a/changelog/25191.txt b/changelog/25191.txt new file mode 100644 index 000000000000..a28e3e2d8eeb --- /dev/null +++ b/changelog/25191.txt @@ -0,0 +1,3 @@ +```release-note:bug +router: Fix missing lock in MatchingSystemView. +``` diff --git a/changelog/25196.txt b/changelog/25196.txt new file mode 100644 index 000000000000..7ed634d7bc25 --- /dev/null +++ b/changelog/25196.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/cf: Update plugin to v0.16.0 +``` diff --git a/changelog/25204.txt b/changelog/25204.txt new file mode 100644 index 000000000000..0e09b6ea830e --- /dev/null +++ b/changelog/25204.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/kubernetes: Update plugin to v0.7.0 +``` diff --git a/changelog/25207.txt b/changelog/25207.txt new file mode 100644 index 000000000000..f67b9fa843de --- /dev/null +++ b/changelog/25207.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/kubernetes: Update plugin to v0.18.0 +``` \ No newline at end of file diff --git a/changelog/25209.txt b/changelog/25209.txt new file mode 100644 index 000000000000..178a09cbc8d3 --- /dev/null +++ b/changelog/25209.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix inconsistent empty state action link styles +``` \ No newline at end of file diff --git a/changelog/25212.txt b/changelog/25212.txt new file mode 100644 index 000000000000..bd1b74f5642a --- /dev/null +++ b/changelog/25212.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Added a new config option, `lease_renewal_threshold`, that controls the refresh rate of non-renewable leases in Agent's template engine. +``` diff --git a/changelog/25217.txt b/changelog/25217.txt new file mode 100644 index 000000000000..fb60850c0598 --- /dev/null +++ b/changelog/25217.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/alicloud: Update plugin to v0.17.0 +``` diff --git a/changelog/25231.txt b/changelog/25231.txt new file mode 100644 index 000000000000..315c0e80828a --- /dev/null +++ b/changelog/25231.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/gcpkms: Update plugin to v0.16.0 +``` diff --git a/changelog/25232.txt b/changelog/25232.txt new file mode 100644 index 000000000000..9c76439b729b --- /dev/null +++ b/changelog/25232.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/kerberos: Update plugin to v0.11.0 +``` diff --git a/changelog/25233.txt b/changelog/25233.txt new file mode 100644 index 000000000000..dab2a92c96bc --- /dev/null +++ b/changelog/25233.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/gcp: Update plugin to v0.16.2 +``` diff --git a/changelog/25235.txt b/changelog/25235.txt new file mode 100644 index 000000000000..d4df23ebf49b --- /dev/null +++ b/changelog/25235.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Do not disable JSON display toggle for KV version 2 secrets +``` \ No newline at end of file diff --git a/changelog/25245.txt b/changelog/25245.txt new file mode 100644 index 000000000000..5bf3cc3cae04 --- /dev/null +++ b/changelog/25245.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/oci: Update plugin to v0.15.1 +``` diff --git a/changelog/25251.txt b/changelog/25251.txt new file mode 100644 index 000000000000..e737505e0903 --- /dev/null +++ b/changelog/25251.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/openldap: Update plugin to v0.12.0 +``` diff --git a/changelog/25253.txt b/changelog/25253.txt new file mode 100644 index 000000000000..7b989009b9c3 --- /dev/null +++ b/changelog/25253.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/mongodbatlas: Update plugin to v0.11.0 +``` diff --git a/changelog/25256.txt b/changelog/25256.txt new file mode 100644 index 000000000000..f616e7a487cf --- /dev/null +++ b/changelog/25256.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Do not show resultant-acl banner on namespaces a user has access to +``` \ No newline at end of file diff --git a/changelog/25257.txt b/changelog/25257.txt new file mode 100644 index 000000000000..ae200b764bcd --- /dev/null +++ b/changelog/25257.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/alicloud: Update plugin to v0.16.0 +``` diff --git a/changelog/25258.txt b/changelog/25258.txt new file mode 100644 index 000000000000..8bf8fb7411fd --- /dev/null +++ b/changelog/25258.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/azure: Update plugin to v0.17.0 +``` diff --git a/changelog/25263.txt b/changelog/25263.txt new file mode 100644 index 000000000000..aea6faf033ee --- /dev/null +++ b/changelog/25263.txt @@ -0,0 +1,3 @@ +```release-note:change +database/elasticsearch: Update plugin to v0.14.0 +``` diff --git a/changelog/25264.txt b/changelog/25264.txt new file mode 100644 index 000000000000..827c0ede41d1 --- /dev/null +++ b/changelog/25264.txt @@ -0,0 +1,3 @@ +```release-note:change +database/mongodbatlas: Update plugin to v0.11.0 +``` diff --git a/changelog/25269.txt b/changelog/25269.txt new file mode 100644 index 000000000000..706c5abe4c67 --- /dev/null +++ b/changelog/25269.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix copy button not working on masked input when value is not a string +``` \ No newline at end of file diff --git a/changelog/25270.txt b/changelog/25270.txt new file mode 100644 index 000000000000..60ccf37dc69f --- /dev/null +++ b/changelog/25270.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk/helper/testhelpers: add namespace helpers +``` diff --git a/changelog/25272.txt b/changelog/25272.txt new file mode 100644 index 000000000000..77006057bfcf --- /dev/null +++ b/changelog/25272.txt @@ -0,0 +1,3 @@ +```release-note:change +ui: Upgrade Ember data from 4.11.3 to 4.12.4 +``` \ No newline at end of file diff --git a/changelog/25275.txt b/changelog/25275.txt new file mode 100644 index 000000000000..34b04ec1a93c --- /dev/null +++ b/changelog/25275.txt @@ -0,0 +1,3 @@ +```release-note:change +database/couchbase: Update plugin to v0.10.1 +``` diff --git a/changelog/25277.txt b/changelog/25277.txt new file mode 100644 index 000000000000..200c136b17fe --- /dev/null +++ b/changelog/25277.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/kv: Update plugin to v0.17.0 +``` diff --git a/changelog/25288.txt b/changelog/25288.txt new file mode 100644 index 000000000000..0b13a0b22073 --- /dev/null +++ b/changelog/25288.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/terraform: Update plugin to v0.7.5 +``` diff --git a/changelog/25289.txt b/changelog/25289.txt new file mode 100644 index 000000000000..a230871df50e --- /dev/null +++ b/changelog/25289.txt @@ -0,0 +1,3 @@ +```release-note:change +database/redis: Update plugin to v0.2.3 +``` diff --git a/changelog/25296.txt b/changelog/25296.txt new file mode 100644 index 000000000000..914080b26d11 --- /dev/null +++ b/changelog/25296.txt @@ -0,0 +1,3 @@ +```release-note:change +database/redis-elasticache: Update plugin to v0.3.0 +``` diff --git a/changelog/25321.txt b/changelog/25321.txt new file mode 100644 index 000000000000..247861c69caa --- /dev/null +++ b/changelog/25321.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Use Hds::Dropdown component to replace list view popup menus +``` \ No newline at end of file diff --git a/changelog/25326.txt b/changelog/25326.txt new file mode 100644 index 000000000000..587636b055a0 --- /dev/null +++ b/changelog/25326.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/jwt: Update plugin to v0.20.0 +``` diff --git a/changelog/25329.txt b/changelog/25329.txt new file mode 100644 index 000000000000..e0f5aab2fac3 --- /dev/null +++ b/changelog/25329.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk/helper/testcluster: add some new helpers, improve some error messages. +``` diff --git a/changelog/25335.txt b/changelog/25335.txt new file mode 100644 index 000000000000..b931d47f4a7a --- /dev/null +++ b/changelog/25335.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: redirect back to current route after reauthentication when token expires +``` diff --git a/changelog/25336.txt b/changelog/25336.txt new file mode 100644 index 000000000000..a1f32a444a4c --- /dev/null +++ b/changelog/25336.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transit: When provided an invalid input with hash_algorithm=none, a lock was not released properly before reporting an error leading to deadlocks on a subsequent key configuration update. +``` diff --git a/changelog/25364.txt b/changelog/25364.txt new file mode 100644 index 000000000000..b56fe9fbae02 --- /dev/null +++ b/changelog/25364.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: remove unnecessary OpenAPI calls for unmanaged auth methods +``` diff --git a/changelog/25387.txt b/changelog/25387.txt new file mode 100644 index 000000000000..46f91f4879ef --- /dev/null +++ b/changelog/25387.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Fix issue where Vault Agent was unable to render KVv2 secrets with delete_version_after set. +``` diff --git a/changelog/25390.txt b/changelog/25390.txt new file mode 100644 index 000000000000..d78a3432469f --- /dev/null +++ b/changelog/25390.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: prevent writes from impeding leader transfers, e.g. during automated upgrades +``` diff --git a/changelog/25391.txt b/changelog/25391.txt new file mode 100644 index 000000000000..522cbe849fb1 --- /dev/null +++ b/changelog/25391.txt @@ -0,0 +1,4 @@ +```release-note:improvement +command/server: Removed environment variable requirement to generate pprof +files using SIGUSR2. Added CPU profile support. +``` diff --git a/changelog/25395.txt b/changelog/25395.txt new file mode 100644 index 000000000000..cd2ca5137e0f --- /dev/null +++ b/changelog/25395.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/file: Fixing spuriously deleting storage keys ending with .temp +``` diff --git a/changelog/25399.txt b/changelog/25399.txt new file mode 100644 index 000000000000..d0b6405cc5fc --- /dev/null +++ b/changelog/25399.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix PKI ca_chain display so value can be copied to clipboard +``` diff --git a/changelog/25418.txt b/changelog/25418.txt new file mode 100644 index 000000000000..7bee2fa79b65 --- /dev/null +++ b/changelog/25418.txt @@ -0,0 +1,4 @@ + +```release-note:improvement +auth/aws: Add inferred_hostname metadata for IAM AWS authentication method. +``` diff --git a/changelog/25421.txt b/changelog/25421.txt new file mode 100644 index 000000000000..06adb1403e30 --- /dev/null +++ b/changelog/25421.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/cert: Cache trusted certs to reduce memory usage and improve performance of logins. +``` \ No newline at end of file diff --git a/changelog/25436.txt b/changelog/25436.txt new file mode 100644 index 000000000000..132af39f1466 --- /dev/null +++ b/changelog/25436.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add `deletion_allowed` param to transformations and include `tokenization` as a type option +``` diff --git a/changelog/25439.txt b/changelog/25439.txt new file mode 100644 index 000000000000..7b1775c9b996 --- /dev/null +++ b/changelog/25439.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/quotas: Deleting a namespace that contains a rate limit quota no longer breaks replication +``` diff --git a/changelog/25443.txt b/changelog/25443.txt new file mode 100644 index 000000000000..301824d8105b --- /dev/null +++ b/changelog/25443.txt @@ -0,0 +1,3 @@ +```release-note:bug +audit: Resolve potential race condition when auditing entries which use SSCT. +``` \ No newline at end of file diff --git a/changelog/25448.txt b/changelog/25448.txt new file mode 100644 index 000000000000..537cc8cdd1f8 --- /dev/null +++ b/changelog/25448.txt @@ -0,0 +1,3 @@ +```release-note:bug +core (enterprise): Fix a deadlock that can occur on performance secondary clusters when there are many mounts and a mount is deleted or filtered +``` diff --git a/changelog/25459.txt b/changelog/25459.txt new file mode 100644 index 000000000000..67787b67a586 --- /dev/null +++ b/changelog/25459.txt @@ -0,0 +1,3 @@ +```release-note:change +ui: flash messages render on right side of page +``` diff --git a/changelog/25479.txt b/changelog/25479.txt new file mode 100644 index 000000000000..5c23d2bcb7ae --- /dev/null +++ b/changelog/25479.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: add error message when copy action fails +``` \ No newline at end of file diff --git a/changelog/25497.txt b/changelog/25497.txt new file mode 100644 index 000000000000..d4bfe8fb2db8 --- /dev/null +++ b/changelog/25497.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Fixes a high Vault load issue, by restarting the Conusl template server after backing off instead of immediately. +``` diff --git a/changelog/25499.txt b/changelog/25499.txt new file mode 100644 index 000000000000..f2ef3e54aafd --- /dev/null +++ b/changelog/25499.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Add wrapper functions for GET /sys/mounts/:path and GET /sys/auth/:path +``` diff --git a/changelog/25500.txt b/changelog/25500.txt new file mode 100644 index 000000000000..22711e8f4478 --- /dev/null +++ b/changelog/25500.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: add granularity param to sync destinations +``` diff --git a/changelog/25509.txt b/changelog/25509.txt new file mode 100644 index 000000000000..e668d1903dc0 --- /dev/null +++ b/changelog/25509.txt @@ -0,0 +1,3 @@ +```release-note:bug +openapi: Fixing response fields for rekey operations +``` diff --git a/changelog/25510.txt b/changelog/25510.txt new file mode 100644 index 000000000000..5eda94b0e609 --- /dev/null +++ b/changelog/25510.txt @@ -0,0 +1,3 @@ +```release-note:bug +openapi: Fixing approle reponse duration types +``` diff --git a/changelog/25524.txt b/changelog/25524.txt new file mode 100644 index 000000000000..0a46aa91d615 --- /dev/null +++ b/changelog/25524.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/openldap: Update plugin to v0.12.1 +``` diff --git a/changelog/25550.txt b/changelog/25550.txt new file mode 100644 index 000000000000..8a325837e569 --- /dev/null +++ b/changelog/25550.txt @@ -0,0 +1,3 @@ +```release-note:bug +openapi: added the missing migrate parameter for the unseal endpoint in vault/logical_system_paths.go +``` diff --git a/changelog/25588.txt b/changelog/25588.txt new file mode 100644 index 000000000000..95a45130cd25 --- /dev/null +++ b/changelog/25588.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: replace popup menu on list items (namespaces, auth items, KMIP, K8S, LDAP) +``` \ No newline at end of file diff --git a/changelog/25605.txt b/changelog/25605.txt new file mode 100644 index 000000000000..a152ce45c221 --- /dev/null +++ b/changelog/25605.txt @@ -0,0 +1,3 @@ +```release-note:bug +audit: Handle a potential panic while formatting audit entries for an audit log +``` diff --git a/changelog/25614.txt b/changelog/25614.txt new file mode 100644 index 000000000000..852db412f929 --- /dev/null +++ b/changelog/25614.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue with no active tab when viewing transit encryption key +``` \ No newline at end of file diff --git a/changelog/25636.txt b/changelog/25636.txt new file mode 100644 index 000000000000..d5528fb5df33 --- /dev/null +++ b/changelog/25636.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: make the best effort timeout for encryption count tracking persistence configurable via an environment variable. +``` \ No newline at end of file diff --git a/changelog/25637.txt b/changelog/25637.txt new file mode 100644 index 000000000000..b0f5ec496e3f --- /dev/null +++ b/changelog/25637.txt @@ -0,0 +1,3 @@ +```release-note:deprecation +secrets/azure: Deprecate field "password_policy" as we are not able to set it anymore with the new MS Graph API. +``` \ No newline at end of file diff --git a/changelog/25640.txt b/changelog/25640.txt new file mode 100644 index 000000000000..8a213a1d8716 --- /dev/null +++ b/changelog/25640.txt @@ -0,0 +1,3 @@ +```release-note:change +events: Remove event noficiations websocket endpoint in non-Enterprise +``` diff --git a/changelog/25646.txt b/changelog/25646.txt new file mode 100644 index 000000000000..d8c659a1dd82 --- /dev/null +++ b/changelog/25646.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds allowed_response_headers, plugin_version and user_lockout_config params to auth method configuration +``` \ No newline at end of file diff --git a/changelog/25649.txt b/changelog/25649.txt new file mode 100644 index 000000000000..2ce669886201 --- /dev/null +++ b/changelog/25649.txt @@ -0,0 +1,5 @@ +```release-note:security +auth/cert: compare public keys of trusted non-CA certificates with incoming +client certificates to prevent trusting certs with the same serial number +but not the same public/private key. +``` \ No newline at end of file diff --git a/changelog/25697.txt b/changelog/25697.txt new file mode 100644 index 000000000000..ecc2ac186766 --- /dev/null +++ b/changelog/25697.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/login: Fixed a potential deadlock when a login fails and user lockout is enabled. +``` diff --git a/changelog/25713.txt b/changelog/25713.txt new file mode 100644 index 000000000000..250045059ec5 --- /dev/null +++ b/changelog/25713.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/metrics: add metrics for secret sync client count +``` diff --git a/changelog/25744.txt b/changelog/25744.txt new file mode 100644 index 000000000000..274b228bc208 --- /dev/null +++ b/changelog/25744.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Move CLI token helper functions to importable packages in `api` module. +``` diff --git a/changelog/25751.txt b/changelog/25751.txt new file mode 100644 index 000000000000..cfde6d9de06e --- /dev/null +++ b/changelog/25751.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: include secret syncs counts in the `vault operator usage` command output +``` \ No newline at end of file diff --git a/changelog/25766.txt b/changelog/25766.txt new file mode 100644 index 000000000000..7166fc3a3559 --- /dev/null +++ b/changelog/25766.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: call resultant-acl without namespace header when user mounted at root namespace +``` diff --git a/changelog/25862.txt b/changelog/25862.txt new file mode 100644 index 000000000000..ea772623b2a7 --- /dev/null +++ b/changelog/25862.txt @@ -0,0 +1,3 @@ +```release-note: enhancement +auth/userpass: Support supplying of a pre-hashed password instead of the password itself +``` \ No newline at end of file diff --git a/changelog/25867.txt b/changelog/25867.txt new file mode 100644 index 000000000000..c7611aaa86c7 --- /dev/null +++ b/changelog/25867.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: remove user_lockout_config settings for unsupported methods +``` diff --git a/changelog/25874.txt b/changelog/25874.txt new file mode 100644 index 000000000000..bf9ae37f02ba --- /dev/null +++ b/changelog/25874.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: remove leading slash from KV version 2 secret paths +``` diff --git a/changelog/25912.txt b/changelog/25912.txt new file mode 100644 index 000000000000..fdb419c8f463 --- /dev/null +++ b/changelog/25912.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/cert: Allow validation with OCSP responses with no NextUpdate time +``` diff --git a/changelog/25937.txt b/changelog/25937.txt new file mode 100644 index 000000000000..d2ff2e8057cc --- /dev/null +++ b/changelog/25937.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/jwt: Update plugin to v0.20.1 +``` diff --git a/changelog/25953.txt b/changelog/25953.txt new file mode 100644 index 000000000000..42132176bc61 --- /dev/null +++ b/changelog/25953.txt @@ -0,0 +1,4 @@ +```release-note:change +core: return an additional "invalid token" error message in 403 response when the provided request token is expired, +exceeded the number of uses, or is a bogus value +``` \ No newline at end of file diff --git a/changelog/25968.txt b/changelog/25968.txt new file mode 100644 index 000000000000..e048b706dea7 --- /dev/null +++ b/changelog/25968.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Do not require sudo for API wrapper functions GetAuth and GetAuthWithContext +``` diff --git a/changelog/25982.txt b/changelog/25982.txt new file mode 100644 index 000000000000..59a7d0512b77 --- /dev/null +++ b/changelog/25982.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/cert: Allow cert auth login attempts if ocsp_fail_open is true and OCSP servers are unreachable +``` diff --git a/changelog/25986.txt b/changelog/25986.txt new file mode 100644 index 000000000000..3f64fe3c871a --- /dev/null +++ b/changelog/25986.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/cert: Address an issue in which OCSP query responses were not cached +``` diff --git a/changelog/25991.txt b/changelog/25991.txt new file mode 100644 index 000000000000..410732d43f7a --- /dev/null +++ b/changelog/25991.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/raft: panic on unknown Raft operations +``` diff --git a/changelog/25992.txt b/changelog/25992.txt new file mode 100644 index 000000000000..14f528c665bd --- /dev/null +++ b/changelog/25992.txt @@ -0,0 +1,7 @@ +```release-note:improvement + +storage/raft (enterprise): add support for separate entry size limit for mount +and namespace table paths in storage to allow increased mount table size without +allowing other user storage entries to become larger. + +``` \ No newline at end of file diff --git a/changelog/25999.txt b/changelog/25999.txt new file mode 100644 index 000000000000..5999f7976acf --- /dev/null +++ b/changelog/25999.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix kubernetes auth method roles tab +``` diff --git a/changelog/26020.txt b/changelog/26020.txt new file mode 100644 index 000000000000..2833bf64df31 --- /dev/null +++ b/changelog/26020.txt @@ -0,0 +1,7 @@ +```release-note:improvement +core/activity: Include ACME clients in activity log responses +``` + +```release-note:improvement +ui: Display ACME clients on a separate page in the UI. +``` diff --git a/changelog/26073.txt b/changelog/26073.txt new file mode 100644 index 000000000000..90a964db99c3 --- /dev/null +++ b/changelog/26073.txt @@ -0,0 +1,3 @@ +```release-note:improvement +identity: alias metadata is now returned when listing entity aliases +``` diff --git a/changelog/26088.txt b/changelog/26088.txt new file mode 100644 index 000000000000..1bce05fa8124 --- /dev/null +++ b/changelog/26088.txt @@ -0,0 +1,3 @@ +```release-note:improvement +audit: timestamps across multiple audit devices for an audit entry will now match. +``` \ No newline at end of file diff --git a/changelog/26091.txt b/changelog/26091.txt new file mode 100644 index 000000000000..31a219247ae3 --- /dev/null +++ b/changelog/26091.txt @@ -0,0 +1,3 @@ +```release-note:security +auth/cert: validate OCSP response was signed by the expected issuer and serial number matched request +``` diff --git a/changelog/26110.txt b/changelog/26110.txt new file mode 100644 index 000000000000..5a05cc214676 --- /dev/null +++ b/changelog/26110.txt @@ -0,0 +1,3 @@ +```release-note:change +sdk: String templates now have a maximum size of 100,000 characters. +``` \ No newline at end of file diff --git a/changelog/26135.txt b/changelog/26135.txt new file mode 100644 index 000000000000..75cc1dc750e6 --- /dev/null +++ b/changelog/26135.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/azure: Perform validation on Azure account name and container name +``` diff --git a/changelog/26147.txt b/changelog/26147.txt new file mode 100644 index 000000000000..efc179ee70fa --- /dev/null +++ b/changelog/26147.txt @@ -0,0 +1,3 @@ +```release-note:bug +secret/database: Fixed race condition where database mounts may leak connections +``` diff --git a/changelog/26166.txt b/changelog/26166.txt new file mode 100644 index 000000000000..430a5a5fec6f --- /dev/null +++ b/changelog/26166.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Only reload seal configuration when enable_multiseal is set to true. +``` diff --git a/changelog/26167.txt b/changelog/26167.txt new file mode 100644 index 000000000000..f53d18c9fef6 --- /dev/null +++ b/changelog/26167.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli/debug: Fix resource leak in CLI debug command. +``` \ No newline at end of file diff --git a/changelog/26172.txt b/changelog/26172.txt new file mode 100644 index 000000000000..86e855dfb0a6 --- /dev/null +++ b/changelog/26172.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Agent will re-trigger auto auth if token used for rendering templates has been revoked, has exceeded the number of uses, or is a bogus value. +``` \ No newline at end of file diff --git a/changelog/26200.txt b/changelog/26200.txt new file mode 100644 index 000000000000..3d1e03a257fc --- /dev/null +++ b/changelog/26200.txt @@ -0,0 +1,6 @@ +```release-note:bug +auth/ldap: Fix login error missing entity alias attribute value. +``` +```release-note:bug +auth/ldap: Fix login error for group search anonymous bind. +``` diff --git a/changelog/26243.txt b/changelog/26243.txt new file mode 100644 index 000000000000..9a2dc3963491 --- /dev/null +++ b/changelog/26243.txt @@ -0,0 +1,4 @@ +```release-note:bug +cli: fixed a bug where the Vault CLI would error out if +HOME was not set. +``` diff --git a/changelog/26263.txt b/changelog/26263.txt new file mode 100644 index 000000000000..4d5eb1239357 --- /dev/null +++ b/changelog/26263.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: fixes cases where inputs did not have associated labels +``` \ No newline at end of file diff --git a/changelog/26291.txt b/changelog/26291.txt new file mode 100644 index 000000000000..f5fa6cca1e08 --- /dev/null +++ b/changelog/26291.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/jwt: Update plugin to v0.20.2 +``` diff --git a/changelog/26307.txt b/changelog/26307.txt new file mode 100644 index 000000000000..1684c8d42805 --- /dev/null +++ b/changelog/26307.txt @@ -0,0 +1,4 @@ +```release-note:improvement +proxy: Proxy will re-trigger auto auth if the token used for requests has been revoked, has exceeded the number of uses, +or is an otherwise invalid value. +``` diff --git a/changelog/26325.txt b/changelog/26325.txt new file mode 100644 index 000000000000..cbfc6c1f9c64 --- /dev/null +++ b/changelog/26325.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixed a bug where the replication pages did not update display when navigating between DR and performance +``` diff --git a/changelog/26346.txt b/changelog/26346.txt new file mode 100644 index 000000000000..1f6a8a486a99 --- /dev/null +++ b/changelog/26346.txt @@ -0,0 +1,3 @@ +```release-note:change +ui: Update dependencies including D3 libraries +``` diff --git a/changelog/26381.txt b/changelog/26381.txt new file mode 100644 index 000000000000..c1d55e4781b5 --- /dev/null +++ b/changelog/26381.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/seal: During a seal reload through SIGHUP, only write updated seal barrier on an active node +``` diff --git a/changelog/26383.txt b/changelog/26383.txt new file mode 100644 index 000000000000..8b675a9ef416 --- /dev/null +++ b/changelog/26383.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: fixed a bug where LifetimeWatcher routines weren't respecting exponential backoff in the presence of unexpected errors +``` diff --git a/changelog/26384.txt b/changelog/26384.txt new file mode 100644 index 000000000000..56fb0a57757c --- /dev/null +++ b/changelog/26384.txt @@ -0,0 +1,3 @@ +```release-note:bug +plugin/wif: fix a bug where the namespace was not set for external plugins using workload identity federation +``` diff --git a/changelog/26396.txt b/changelog/26396.txt new file mode 100644 index 000000000000..7f66e5cd58cf --- /dev/null +++ b/changelog/26396.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: show banner instead of permission denied error when batch token is expired +``` diff --git a/changelog/26406.txt b/changelog/26406.txt new file mode 100644 index 000000000000..8e55cb7e40e6 --- /dev/null +++ b/changelog/26406.txt @@ -0,0 +1,3 @@ +```release-note:improvement +replication (enterprise): Periodically write current time on the primary to storage, use that downstream to measure replication lag in time, expose that in health and replication status endpoints. +``` diff --git a/changelog/26427.txt b/changelog/26427.txt new file mode 100644 index 000000000000..615e9e15f06a --- /dev/null +++ b/changelog/26427.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: `vault.namespace` no longer gets incorrectly overridden by `auto_auth.namespace`, if set +``` diff --git a/changelog/26464.txt b/changelog/26464.txt new file mode 100644 index 000000000000..c033d8cf4558 --- /dev/null +++ b/changelog/26464.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk/decompression: DecompressWithCanary will now chunk the decompression in memory to prevent loading it all at once. +``` diff --git a/changelog/26477.txt b/changelog/26477.txt new file mode 100644 index 000000000000..f24fca805cd2 --- /dev/null +++ b/changelog/26477.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: fixed validation bug which rejected ldap schemed URLs in crl_distribution_points. +``` \ No newline at end of file diff --git a/changelog/26485.txt b/changelog/26485.txt new file mode 100644 index 000000000000..6cc54cfb9984 --- /dev/null +++ b/changelog/26485.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes undefined start time in filename for downloaded client count attribution csv +``` diff --git a/changelog/26507.txt b/changelog/26507.txt new file mode 100644 index 000000000000..3f3c8f17a8b1 --- /dev/null +++ b/changelog/26507.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Plugin Identity Tokens**: Adds secret-less configuration of AWS auth engine using web identity federation. +``` \ No newline at end of file diff --git a/changelog/26519.txt b/changelog/26519.txt new file mode 100644 index 000000000000..2104c03c1880 --- /dev/null +++ b/changelog/26519.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/activity: Include ACME client metrics to precomputed queries +``` diff --git a/changelog/26523.txt b/changelog/26523.txt new file mode 100644 index 000000000000..3b3ef6427d4b --- /dev/null +++ b/changelog/26523.txt @@ -0,0 +1,3 @@ +```release-note:bug +core (enterprise): fix bug where raft followers disagree with the seal type after returning to one seal from two. +``` diff --git a/changelog/26525.txt b/changelog/26525.txt new file mode 100644 index 000000000000..df087f2b208f --- /dev/null +++ b/changelog/26525.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/activity: Include ACME clients in vault operator usage response +``` diff --git a/changelog/26527.txt b/changelog/26527.txt new file mode 100644 index 000000000000..ff3b3c35d034 --- /dev/null +++ b/changelog/26527.txt @@ -0,0 +1,3 @@ +```release-note:change +api: Upgrade from github.com/go-jose/go-jose/v3 v3.0.3 to github.com/go-jose/go-jose/v4 v4.0.1. +``` \ No newline at end of file diff --git a/changelog/26528.txt b/changelog/26528.txt new file mode 100644 index 000000000000..ce43d4c86dac --- /dev/null +++ b/changelog/26528.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/azure: Update plugin to v0.17.1 +``` \ No newline at end of file diff --git a/changelog/26570.txt b/changelog/26570.txt new file mode 100644 index 000000000000..79efe31414f9 --- /dev/null +++ b/changelog/26570.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Add metric (vault.agent.authenticated) that is set to 1 when vault agent has a valid token and zero if it does not. +``` \ No newline at end of file diff --git a/changelog/26577.txt b/changelog/26577.txt new file mode 100644 index 000000000000..8293f5403f2b --- /dev/null +++ b/changelog/26577.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update userpass user form to allow setting `password_hash` field. +``` \ No newline at end of file diff --git a/changelog/26607.txt b/changelog/26607.txt new file mode 100644 index 000000000000..b28c3d405977 --- /dev/null +++ b/changelog/26607.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix `redact_version` listener parameter being ignored for some OpenAPI related endpoints. +``` diff --git a/changelog/26616.txt b/changelog/26616.txt new file mode 100644 index 000000000000..af8e85e91940 --- /dev/null +++ b/changelog/26616.txt @@ -0,0 +1,5 @@ +```release-note:bug +core/audit: Audit logging a Vault request/response will now use a minimum 5 second context timeout. +If the existing context deadline occurs later than 5s in the future, it will be used, otherwise a +new context, separate from the original will be used. +``` \ No newline at end of file diff --git a/changelog/26628.txt b/changelog/26628.txt new file mode 100644 index 000000000000..2ab067f3ef09 --- /dev/null +++ b/changelog/26628.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/aws: add support for external_ids in AWS assume-role +``` \ No newline at end of file diff --git a/changelog/26653.txt b/changelog/26653.txt new file mode 100644 index 000000000000..5ba7c2196d51 --- /dev/null +++ b/changelog/26653.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui (enterprise): Update filters on the custom messages list view. +``` \ No newline at end of file diff --git a/changelog/26660.txt b/changelog/26660.txt new file mode 100644 index 000000000000..9490cd0b6efa --- /dev/null +++ b/changelog/26660.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/etcd: Update etcd3 client to v3.5.13 to allow use of TLSv1.3. +``` diff --git a/changelog/26663.txt b/changelog/26663.txt new file mode 100644 index 000000000000..ac29ce0ecee2 --- /dev/null +++ b/changelog/26663.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Show computed values from `sys/internal/ui/mounts` endpoint for auth mount configuration view +``` diff --git a/changelog/26693.txt b/changelog/26693.txt new file mode 100644 index 000000000000..7e768789f352 --- /dev/null +++ b/changelog/26693.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Remove possibility of returning an undefined timezone from date-format helper +``` \ No newline at end of file diff --git a/changelog/26708.txt b/changelog/26708.txt new file mode 100644 index 000000000000..8bcd10d57fd4 --- /dev/null +++ b/changelog/26708.txt @@ -0,0 +1,3 @@ +```release-note:change +ui: Upgrade Ember to version 5.4 +``` \ No newline at end of file diff --git a/changelog/26729.txt b/changelog/26729.txt new file mode 100644 index 000000000000..4277b50a98ef --- /dev/null +++ b/changelog/26729.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui (enterprise): Update dashboard to make activity log query using the same start time as the metrics overview +``` \ No newline at end of file diff --git a/changelog/26777.txt b/changelog/26777.txt new file mode 100644 index 000000000000..1d04c1646624 --- /dev/null +++ b/changelog/26777.txt @@ -0,0 +1,4 @@ +```release-note:change +audit: breaking change - Vault now allows audit logs to contain 'correlation-id' and 'x-correlation-id' headers when they +are present in the incoming request. By default they are not HMAC'ed (but can be configured to HMAC by Vault Operators). +``` diff --git a/changelog/26790.txt b/changelog/26790.txt new file mode 100644 index 000000000000..593d2de67d97 --- /dev/null +++ b/changelog/26790.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Refresh model within a namespace on the Secrets Sync overview page. +``` \ No newline at end of file diff --git a/changelog/26796.txt b/changelog/26796.txt new file mode 100644 index 000000000000..1c9809a8c899 --- /dev/null +++ b/changelog/26796.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/pki: sign-intermediate API will truncate notAfter if calculated to go beyond the signing issuer's notAfter. Previously the notAfter was permitted to go beyond leading to invalid chains. +``` diff --git a/changelog/26797.txt b/changelog/26797.txt new file mode 100644 index 000000000000..72b6b3802401 --- /dev/null +++ b/changelog/26797.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add a new ACME configuration parameter that allows increasing the maximum TTL for ACME leaf certificates +``` diff --git a/changelog/26828.txt b/changelog/26828.txt new file mode 100644 index 000000000000..4c9a004c4601 --- /dev/null +++ b/changelog/26828.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/database: Add support for GCP CloudSQL private IP's. +``` diff --git a/changelog/26841.txt b/changelog/26841.txt new file mode 100644 index 000000000000..a65e5affbef9 --- /dev/null +++ b/changelog/26841.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui (enterprise): Allow HVD users to access Secrets Sync. +``` \ No newline at end of file diff --git a/changelog/26844.txt b/changelog/26844.txt new file mode 100644 index 000000000000..49f7bf2f1611 --- /dev/null +++ b/changelog/26844.txt @@ -0,0 +1,3 @@ +```release-note:bug +auto-auth: Addressed issue where having no permissions to renew a renewable token caused auto-auth to attempt to renew constantly with no backoff +``` diff --git a/changelog/26845.txt b/changelog/26845.txt new file mode 100644 index 000000000000..2f19eaf8f28f --- /dev/null +++ b/changelog/26845.txt @@ -0,0 +1,3 @@ +```release-note:change +ui: deleting a nested secret will no longer redirect you to the nearest path segment +``` \ No newline at end of file diff --git a/changelog/26848.txt b/changelog/26848.txt new file mode 100644 index 000000000000..6cce04d94346 --- /dev/null +++ b/changelog/26848.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Hide dashboard client count card if user does not have permission to view clients. +``` \ No newline at end of file diff --git a/changelog/26858.txt b/changelog/26858.txt new file mode 100644 index 000000000000..911fd20c174d --- /dev/null +++ b/changelog/26858.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix broken help link in console for the web command. +``` diff --git a/changelog/26863.txt b/changelog/26863.txt new file mode 100644 index 000000000000..75d864e6c84a --- /dev/null +++ b/changelog/26863.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Correctly constructs kv-v2 secret paths in nested namespaces. +``` \ No newline at end of file diff --git a/changelog/26868.txt b/changelog/26868.txt new file mode 100644 index 000000000000..3a25d05efdbb --- /dev/null +++ b/changelog/26868.txt @@ -0,0 +1,3 @@ +```release-note:change +api: Update backoff/v3 to backoff/v4.3.0 +``` \ No newline at end of file diff --git a/changelog/26872.txt b/changelog/26872.txt new file mode 100644 index 000000000000..3309a2310619 --- /dev/null +++ b/changelog/26872.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Resolved accessibility issues with Web REPL. Associated label and help text with input, added a conditional to show the console/ui-panel only when toggled open, added keyboard focus trap. +``` \ No newline at end of file diff --git a/changelog/26876.txt b/changelog/26876.txt new file mode 100644 index 000000000000..6522b0ecd9a6 --- /dev/null +++ b/changelog/26876.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Add missing field delegated_auth_accessors to GET /sys/mounts/:path API response +``` diff --git a/changelog/26877.txt b/changelog/26877.txt new file mode 100644 index 000000000000..959e78251480 --- /dev/null +++ b/changelog/26877.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/kv: Update plugin to v0.18.0 +``` diff --git a/changelog/26878.txt b/changelog/26878.txt new file mode 100644 index 000000000000..a8cfc576480b --- /dev/null +++ b/changelog/26878.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/metrics: store cluster name in unencrypted storage to prevent blank cluster name +``` diff --git a/changelog/26890.txt b/changelog/26890.txt new file mode 100644 index 000000000000..74d06a9cf781 --- /dev/null +++ b/changelog/26890.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/jwt: Update plugin to v0.20.3 +``` diff --git a/changelog/26896.txt b/changelog/26896.txt new file mode 100644 index 000000000000..6147953d0b27 --- /dev/null +++ b/changelog/26896.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/azure: Update vault-plugin-secrets-azure to 0.17.2 to include a bug fix for azure role creation +``` diff --git a/changelog/26985.txt b/changelog/26985.txt new file mode 100644 index 000000000000..7894bd3d407d --- /dev/null +++ b/changelog/26985.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Prevent perpetual loading screen when Vault needs initialization +``` diff --git a/changelog/26993.txt b/changelog/26993.txt new file mode 100644 index 000000000000..35acaa79a8ad --- /dev/null +++ b/changelog/26993.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update PGP display and show error for Generate Operation Token flow with PGP +``` \ No newline at end of file diff --git a/changelog/27014.txt b/changelog/27014.txt new file mode 100644 index 000000000000..94f6ebbe075a --- /dev/null +++ b/changelog/27014.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Address a data race updating a seal's last seen healthy time attribute +``` diff --git a/changelog/27019.txt b/changelog/27019.txt new file mode 100644 index 000000000000..722e0d46c9ec --- /dev/null +++ b/changelog/27019.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix link to v2 generic secrets engine from secrets list page. +``` \ No newline at end of file diff --git a/changelog/27093.txt b/changelog/27093.txt new file mode 100644 index 000000000000..a24becec3eac --- /dev/null +++ b/changelog/27093.txt @@ -0,0 +1,3 @@ +```release-note:bug +pki: Fix error in cross-signing using ed25519 keys +``` diff --git a/changelog/27094.txt b/changelog/27094.txt new file mode 100644 index 000000000000..9cd743f55f94 --- /dev/null +++ b/changelog/27094.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix KVv2 json editor to allow null values. +``` \ No newline at end of file diff --git a/changelog/27117.txt b/changelog/27117.txt new file mode 100644 index 000000000000..21ced168556a --- /dev/null +++ b/changelog/27117.txt @@ -0,0 +1,3 @@ +```release-note:change +database/redis: Update plugin to v0.3.0 +``` diff --git a/changelog/27120.txt b/changelog/27120.txt new file mode 100644 index 000000000000..3a9630b986c5 --- /dev/null +++ b/changelog/27120.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix KVv2 cursor jumping inside json editor after initial input. +``` \ No newline at end of file diff --git a/changelog/27123.txt b/changelog/27123.txt new file mode 100644 index 000000000000..f23dd835c5ed --- /dev/null +++ b/changelog/27123.txt @@ -0,0 +1,6 @@ +```release-note:improvement +agent/sink: Allow configuration of the user and group ID of the file sink. +``` +```release-note:improvement +proxy/sink: Allow configuration of the user and group ID of the file sink. +``` diff --git a/changelog/27130.txt b/changelog/27130.txt new file mode 100644 index 000000000000..ba3352c4128e --- /dev/null +++ b/changelog/27130.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/centrify: Remove the deprecated Centrify auth method plugin +``` diff --git a/changelog/27131.txt b/changelog/27131.txt new file mode 100644 index 000000000000..465da55fe963 --- /dev/null +++ b/changelog/27131.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix configuration link from Secret Engine list view for Ember engines. +``` \ No newline at end of file diff --git a/changelog/27132.txt b/changelog/27132.txt new file mode 100644 index 000000000000..2c5194504527 --- /dev/null +++ b/changelog/27132.txt @@ -0,0 +1,3 @@ +```release-note:change +database/snowflake: Update plugin to v0.11.0 +``` diff --git a/changelog/27133.txt b/changelog/27133.txt new file mode 100644 index 000000000000..d19c4b849b3d --- /dev/null +++ b/changelog/27133.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/alicloud: Update plugin to v0.18.0 +``` diff --git a/changelog/27134.txt b/changelog/27134.txt new file mode 100644 index 000000000000..e477acb87d17 --- /dev/null +++ b/changelog/27134.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/alicloud: Update plugin to v0.17.0 +``` diff --git a/changelog/27136.txt b/changelog/27136.txt new file mode 100644 index 000000000000..24be2b0e0a29 --- /dev/null +++ b/changelog/27136.txt @@ -0,0 +1,3 @@ +```release-note:change +database/elasticsearch: Update plugin to v0.15.0 +``` diff --git a/changelog/27137.txt b/changelog/27137.txt new file mode 100644 index 000000000000..e977dd429865 --- /dev/null +++ b/changelog/27137.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/openldap: Update plugin to v0.13.0 +``` diff --git a/changelog/27139.txt b/changelog/27139.txt new file mode 100644 index 000000000000..1b7915fc6d02 --- /dev/null +++ b/changelog/27139.txt @@ -0,0 +1,3 @@ +```release-note:change +database/redis-elasticache: Update plugin to v0.4.0 +``` diff --git a/changelog/27140.txt b/changelog/27140.txt new file mode 100644 index 000000000000..4910718fb825 --- /dev/null +++ b/changelog/27140.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/gcp: Update plugin to v0.18.0 +``` diff --git a/changelog/27141.txt b/changelog/27141.txt new file mode 100644 index 000000000000..b34464a80947 --- /dev/null +++ b/changelog/27141.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/azure: Update plugin to v0.19.0 +``` diff --git a/changelog/27142.txt b/changelog/27142.txt new file mode 100644 index 000000000000..9a36667fc35f --- /dev/null +++ b/changelog/27142.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/oci: Update plugin to v0.16.0 +``` diff --git a/changelog/27143.txt b/changelog/27143.txt new file mode 100644 index 000000000000..9e0f83835572 --- /dev/null +++ b/changelog/27143.txt @@ -0,0 +1,3 @@ +```release-note:change +database/mongodbatlas: Update plugin to v0.12.0 +``` diff --git a/changelog/27145.txt b/changelog/27145.txt new file mode 100644 index 000000000000..45bca4ba1ebc --- /dev/null +++ b/changelog/27145.txt @@ -0,0 +1,3 @@ +```release-note:change +database/couchbase: Update plugin to v0.11.0 +``` diff --git a/changelog/27146.txt b/changelog/27146.txt new file mode 100644 index 000000000000..0e106b92f682 --- /dev/null +++ b/changelog/27146.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/azure: Update plugin to v0.18.0 +``` diff --git a/changelog/27147.txt b/changelog/27147.txt new file mode 100644 index 000000000000..30d13d3661ef --- /dev/null +++ b/changelog/27147.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/terraform: Update plugin to v0.8.0 +``` diff --git a/changelog/27149.txt b/changelog/27149.txt new file mode 100644 index 000000000000..653851c940a3 --- /dev/null +++ b/changelog/27149.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/mongodbatlas: Update plugin to v0.12.0 +``` diff --git a/changelog/27159.txt b/changelog/27159.txt new file mode 100644 index 000000000000..a21f894d051b --- /dev/null +++ b/changelog/27159.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/kv: Update plugin to v0.19.0 +``` diff --git a/changelog/27161.txt b/changelog/27161.txt new file mode 100644 index 000000000000..5764f028bdd3 --- /dev/null +++ b/changelog/27161.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/cf: Update plugin to v0.17.0 +``` diff --git a/changelog/27163.txt b/changelog/27163.txt new file mode 100644 index 000000000000..307cb66bf902 --- /dev/null +++ b/changelog/27163.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/gcpkms: Update plugin to v0.17.0 +``` diff --git a/changelog/27164.txt b/changelog/27164.txt new file mode 100644 index 000000000000..4c942e1b9950 --- /dev/null +++ b/changelog/27164.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/gcp: Update plugin to v0.19.0 +``` diff --git a/changelog/27172.txt b/changelog/27172.txt new file mode 100644 index 000000000000..60e88ec4797d --- /dev/null +++ b/changelog/27172.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/ad: Update plugin to v0.18.0 +``` diff --git a/changelog/27177.txt b/changelog/27177.txt new file mode 100644 index 000000000000..c01050b6fa2a --- /dev/null +++ b/changelog/27177.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/kerberos: Update plugin to v0.12.0 +``` diff --git a/changelog/27178.txt b/changelog/27178.txt new file mode 100644 index 000000000000..c84c67f34e27 --- /dev/null +++ b/changelog/27178.txt @@ -0,0 +1,3 @@ +```release-note:change +ui/kubernetes: Update the roles filter-input to use explicit search. +``` diff --git a/changelog/27184.txt b/changelog/27184.txt new file mode 100644 index 000000000000..500045efb5af --- /dev/null +++ b/changelog/27184.txt @@ -0,0 +1,3 @@ +```release-note:change +core/identity: improve performance for secondary nodes receiving identity related updates through replication +``` diff --git a/changelog/27186.txt b/changelog/27186.txt new file mode 100644 index 000000000000..b8d6adaf2534 --- /dev/null +++ b/changelog/27186.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/kubernetes: Update plugin to v0.19.0 +``` diff --git a/changelog/27187.txt b/changelog/27187.txt new file mode 100644 index 000000000000..a723b1b4c5b0 --- /dev/null +++ b/changelog/27187.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/kubernetes: Update plugin to v0.8.0 +``` diff --git a/changelog/27202.txt b/changelog/27202.txt new file mode 100644 index 000000000000..224f976bba09 --- /dev/null +++ b/changelog/27202.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/cert: Merge error messages returned in login failures and include error when present +``` diff --git a/changelog/27203.txt b/changelog/27203.txt new file mode 100644 index 000000000000..b0dee9ee38f4 --- /dev/null +++ b/changelog/27203.txt @@ -0,0 +1,3 @@ +```release-note:feature +**LDAP Secrets engine hierarchical path support**: Hierarchical path handling is now supported for role and set APIs. +``` diff --git a/changelog/27211.txt b/changelog/27211.txt new file mode 100644 index 000000000000..26bf725ebff3 --- /dev/null +++ b/changelog/27211.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transit: Use 'hash_algorithm' parameter if present in HMAC verify requests. Otherwise fall back to deprecated 'algorithm' parameter. +``` diff --git a/changelog/27262.txt b/changelog/27262.txt new file mode 100644 index 000000000000..93c2fbe3f0d1 --- /dev/null +++ b/changelog/27262.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui/secrets-sync: Hide Secrets Sync from the sidebar nav if user does not have access to the feature. +``` diff --git a/changelog/27263.txt b/changelog/27263.txt new file mode 100644 index 000000000000..cb008c59faee --- /dev/null +++ b/changelog/27263.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Do not show resultant-ACL banner when ancestor namespace grants wildcard access. +``` diff --git a/changelog/27265.txt b/changelog/27265.txt new file mode 100644 index 000000000000..926b2c7a07d8 --- /dev/null +++ b/changelog/27265.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: Fixed an erroneous warning appearing about `-address` not being set when it is. +``` diff --git a/changelog/27277.txt b/changelog/27277.txt new file mode 100644 index 000000000000..1a7abc5a6f05 --- /dev/null +++ b/changelog/27277.txt @@ -0,0 +1,4 @@ +```release-note:bug +storage/raft (enterprise): Fix a regression introduced in 1.15.8 that causes +autopilot to fail to discover new server versions and so not trigger an upgrade. +``` \ No newline at end of file diff --git a/changelog/27289.txt b/changelog/27289.txt new file mode 100644 index 000000000000..3e10cf0a02f6 --- /dev/null +++ b/changelog/27289.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Allow users to wrap inputted data again instead of resetting form +``` diff --git a/changelog/27346.txt b/changelog/27346.txt new file mode 100644 index 000000000000..17f50216cb91 --- /dev/null +++ b/changelog/27346.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update language in Transit secret engine to reflect that not all keys are for encyryption +``` diff --git a/changelog/27348.txt b/changelog/27348.txt new file mode 100644 index 000000000000..ec7ece0b851a --- /dev/null +++ b/changelog/27348.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Mask obfuscated fields when creating/editing a Secrets sync destination. +``` diff --git a/changelog/27350.txt b/changelog/27350.txt new file mode 100644 index 000000000000..ddf95bc0052b --- /dev/null +++ b/changelog/27350.txt @@ -0,0 +1,4 @@ +```release-note:change +activity log: Deprecated the field "default_report_months". Instead, the billing start time will be used to determine the start time +when querying the activity log endpoints. +``` \ No newline at end of file diff --git a/changelog/27352.txt b/changelog/27352.txt new file mode 100644 index 000000000000..70a7fa366126 --- /dev/null +++ b/changelog/27352.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix issue where a month without new clients breaks the client count dashboard +``` diff --git a/changelog/27366.txt b/changelog/27366.txt new file mode 100644 index 000000000000..16b936e5dfb5 --- /dev/null +++ b/changelog/27366.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix a bug where disabling TTL on the AWS credential form would still send TTL value +``` \ No newline at end of file diff --git a/changelog/27379.txt b/changelog/27379.txt new file mode 100644 index 000000000000..1744873c2839 --- /dev/null +++ b/changelog/27379.txt @@ -0,0 +1,5 @@ +```release-note:change +activity: The startTime will be set to the start of the current billing period by default. +The endTime will be set to the end of the current month. This applies to /sys/internal/counters/activity, +/sys/internal/counters/activity/export, and the vault operator usage command that utilizes /sys/internal/counters/activity. +``` diff --git a/changelog/27394.txt b/changelog/27394.txt new file mode 100644 index 000000000000..81a04305f487 --- /dev/null +++ b/changelog/27394.txt @@ -0,0 +1,4 @@ +```release-note:feature +**Vault Minimal Version**: Add the ability to build a minimal version of Vault +with only core features using the BUILD_MINIMAL environment variable. +``` diff --git a/changelog/27405.txt b/changelog/27405.txt new file mode 100644 index 000000000000..ac5162e8010e --- /dev/null +++ b/changelog/27405.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: AWS credentials form sets credential_type from backing role +``` diff --git a/changelog/27424.txt b/changelog/27424.txt new file mode 100644 index 000000000000..fcfb93ea4bc8 --- /dev/null +++ b/changelog/27424.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Allow creation of session_token type roles for AWS secret backend +``` \ No newline at end of file diff --git a/changelog/27426.txt b/changelog/27426.txt new file mode 100644 index 000000000000..ac18d438622d --- /dev/null +++ b/changelog/27426.txt @@ -0,0 +1,4 @@ +```release-note:change +activity log: Deprecates the current_billing_period field for /sys/internal/counters/activity. The default start time +will automatically be set the billing period start date. +``` \ No newline at end of file diff --git a/changelog/27435.txt b/changelog/27435.txt new file mode 100644 index 000000000000..916a81c9017e --- /dev/null +++ b/changelog/27435.txt @@ -0,0 +1,3 @@ +```release-note:bug +helper/pkcs7: Fix parsing certain messages containing only certificates +``` diff --git a/changelog/27455.txt b/changelog/27455.txt new file mode 100644 index 000000000000..38b7c92ae6db --- /dev/null +++ b/changelog/27455.txt @@ -0,0 +1,3 @@ +```release-note:change +ui: Uses the internal/counters/activity/export endpoint for client count export data. +``` \ No newline at end of file diff --git a/changelog/27457.txt b/changelog/27457.txt new file mode 100644 index 000000000000..e3cf89a76524 --- /dev/null +++ b/changelog/27457.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk/helper: Allow setting environment variables when using NewTestDockerCluster +``` diff --git a/changelog/27459.txt b/changelog/27459.txt new file mode 100644 index 000000000000..d46570f5a411 --- /dev/null +++ b/changelog/27459.txt @@ -0,0 +1,4 @@ +```release-note:bug +config: Vault TCP listener config now correctly supports the documented proxy_protocol_behavior +setting of 'deny_unauthorized' +``` diff --git a/changelog/27464.txt b/changelog/27464.txt new file mode 100644 index 000000000000..ff9d9f508bc7 --- /dev/null +++ b/changelog/27464.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/raft: Improve autopilot logging on startup to show config values clearly and avoid spurious logs +``` diff --git a/changelog/27474.txt b/changelog/27474.txt new file mode 100644 index 000000000000..86c773517a3e --- /dev/null +++ b/changelog/27474.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft (enterprise): Fix issue with namespace cache not getting cleared on snapshot restore, resulting in namespaces not found in the snapshot being inaccurately represented by API responses. +``` diff --git a/changelog/27479.txt b/changelog/27479.txt new file mode 100644 index 000000000000..355fbbafbe44 --- /dev/null +++ b/changelog/27479.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Ensure token expired banner displays when batch token expires +``` \ No newline at end of file diff --git a/changelog/27498.txt b/changelog/27498.txt new file mode 100644 index 000000000000..ac191560732d --- /dev/null +++ b/changelog/27498.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/jwt: Update plugin to v0.21.0 +``` diff --git a/changelog/27510.txt b/changelog/27510.txt new file mode 100644 index 000000000000..af574898b604 --- /dev/null +++ b/changelog/27510.txt @@ -0,0 +1,6 @@ +```release-note:improvement +agent: Add the ability to dump pprof to the filesystem using SIGUSR2 +``` +```release-note:improvement +proxy: Add the ability to dump pprof to the filesystem using SIGUSR2 +``` diff --git a/changelog/27518.txt b/changelog/27518.txt new file mode 100644 index 000000000000..c8412c62b02e --- /dev/null +++ b/changelog/27518.txt @@ -0,0 +1,7 @@ +```release-note:bug +agent: Fixed an issue causing excessive CPU usage during normal operation +``` + +```release-note:bug +proxy: Fixed an issue causing excessive CPU usage during normal operation +``` \ No newline at end of file diff --git a/changelog/27531.txt b/changelog/27531.txt new file mode 100644 index 000000000000..3dda984d1d7d --- /dev/null +++ b/changelog/27531.txt @@ -0,0 +1,5 @@ +```release-note:bug +core/audit: Audit logging a Vault request/response checks if the existing context +is cancelled and will now use a new context with a 5 second timeout. +If the existing context is cancelled a new context, will be used. +``` \ No newline at end of file diff --git a/changelog/27538.txt b/changelog/27538.txt new file mode 100644 index 000000000000..e1d14481502c --- /dev/null +++ b/changelog/27538.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Creates separate section for updating sensitive creds for Secrets sync create/edit view. +``` diff --git a/changelog/27547.txt b/changelog/27547.txt new file mode 100644 index 000000000000..ca18d264a43f --- /dev/null +++ b/changelog/27547.txt @@ -0,0 +1,4 @@ +```release-note:improvement +activity log: Changes how new client counts in the current month are estimated, in order to return more +visibly sensible totals. +``` \ No newline at end of file diff --git a/changelog/27559.txt b/changelog/27559.txt new file mode 100644 index 000000000000..a9afccdc98d5 --- /dev/null +++ b/changelog/27559.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Remove deprecated `current_billing_period` from dashboard activity log request +``` diff --git a/changelog/27563.txt b/changelog/27563.txt new file mode 100644 index 000000000000..294b5e7cf642 --- /dev/null +++ b/changelog/27563.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/azure: Fix invalid account name initialization bug +``` \ No newline at end of file diff --git a/changelog/27569.txt b/changelog/27569.txt new file mode 100644 index 000000000000..cb81aa23df73 --- /dev/null +++ b/changelog/27569.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix cursor jump on KVv2 json editor that would occur after pressing ENTER. +``` diff --git a/changelog/27574.txt b/changelog/27574.txt new file mode 100644 index 000000000000..8c1f888242c8 --- /dev/null +++ b/changelog/27574.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Display an error and force a timeout when TOTP passcode is incorrect +``` \ No newline at end of file diff --git a/changelog/27578.txt b/changelog/27578.txt new file mode 100644 index 000000000000..3a1b62a2a512 --- /dev/null +++ b/changelog/27578.txt @@ -0,0 +1,3 @@ +```release-note:change +cli: The undocumented `-dev-three-node` and `-dev-four-cluster` CLI options have been removed. +``` diff --git a/changelog/27589.txt b/changelog/27589.txt new file mode 100644 index 000000000000..844857ff1e77 --- /dev/null +++ b/changelog/27589.txt @@ -0,0 +1,4 @@ +```release-note:bug +core/config: fix issue when using `proxy_protocol_behavior` with `deny_unauthorized`, +which causes the Vault TCP listener to close after receiving an untrusted upstream proxy connection. +``` \ No newline at end of file diff --git a/changelog/27605.txt b/changelog/27605.txt new file mode 100644 index 000000000000..b449daf78920 --- /dev/null +++ b/changelog/27605.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/raft: Bump raft to v1.7.0 which includes pre-vote. This should make clusters more stable during network partitions. +``` diff --git a/changelog/27620.txt b/changelog/27620.txt new file mode 100644 index 000000000000..e808a0b4e0e7 --- /dev/null +++ b/changelog/27620.txt @@ -0,0 +1,5 @@ +```release-note:feature +**AWS secrets engine STS session tags support**: Adds support for setting STS +session tags when generating temporary credentials using the AWS secrets +engine. +``` diff --git a/changelog/27631.txt b/changelog/27631.txt new file mode 100644 index 000000000000..d71fbb5508a3 --- /dev/null +++ b/changelog/27631.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fixed an issue with performance standbys not being able to handle rotate root requests. +``` \ No newline at end of file diff --git a/changelog/27633.txt b/changelog/27633.txt new file mode 100644 index 000000000000..1f5156b3bdcb --- /dev/null +++ b/changelog/27633.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: make authLock and mountsLock in Core configurable via the detect_deadlocks configuration parameter. +``` \ No newline at end of file diff --git a/changelog/27652.txt b/changelog/27652.txt new file mode 100644 index 000000000000..7c9d257fd906 --- /dev/null +++ b/changelog/27652.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/azure: Update plugin to v0.19.2 +``` diff --git a/changelog/27656.txt b/changelog/27656.txt new file mode 100644 index 000000000000..e4445f826af8 --- /dev/null +++ b/changelog/27656.txt @@ -0,0 +1,3 @@ +```release-note:improvement +license utilization reporting (enterprise): Auto-roll billing start date. +``` \ No newline at end of file diff --git a/changelog/27660.txt b/changelog/27660.txt new file mode 100644 index 000000000000..e754dbbfa360 --- /dev/null +++ b/changelog/27660.txt @@ -0,0 +1,3 @@ +```release-note:bug +core (enterprise): Fix HTTP redirects in namespaces to use the correct path and (in the case of event subscriptions) the correct URI scheme. +``` \ No newline at end of file diff --git a/changelog/27681.txt b/changelog/27681.txt new file mode 100644 index 000000000000..13f5f829d523 --- /dev/null +++ b/changelog/27681.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: fix lack of serial number to a certificate read resulting in a server side error. +``` \ No newline at end of file diff --git a/changelog/27694.txt b/changelog/27694.txt new file mode 100644 index 000000000000..6f5d03e9161a --- /dev/null +++ b/changelog/27694.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: Fixed issue with `vault hcp connect` where HCP resources with uppercase letters were inaccessible when entering the correct project name. +``` \ No newline at end of file diff --git a/changelog/27696.txt b/changelog/27696.txt new file mode 100644 index 000000000000..42956c336015 --- /dev/null +++ b/changelog/27696.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/cert: Use subject's serial number, not issuer's within error message text in OCSP request errors +``` diff --git a/changelog/27724.txt b/changelog/27724.txt new file mode 100644 index 000000000000..0e58502bbce9 --- /dev/null +++ b/changelog/27724.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/cf: Update plugin to v0.18.0 +``` diff --git a/changelog/27726.txt b/changelog/27726.txt new file mode 100644 index 000000000000..80f2cc850fe6 --- /dev/null +++ b/changelog/27726.txt @@ -0,0 +1,3 @@ +```release-note:bug +raft/autopilot: Fixed panic that may occur during shutdown +``` diff --git a/changelog/27728.txt b/changelog/27728.txt new file mode 100644 index 000000000000..6c1c18ef50a6 --- /dev/null +++ b/changelog/27728.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix namespace picker not working when in small screen where the sidebar is collapsed by default. +``` \ No newline at end of file diff --git a/changelog/27730.txt b/changelog/27730.txt new file mode 100644 index 000000000000..d2653062939b --- /dev/null +++ b/changelog/27730.txt @@ -0,0 +1,3 @@ +```release-note:bug +proxy/cache (enterprise): Fixed an issue where cached static secrets could fail to update if the secrets belonged to a non-root namespace. +``` diff --git a/changelog/27742.txt b/changelog/27742.txt new file mode 100644 index 000000000000..6f958a02a7b5 --- /dev/null +++ b/changelog/27742.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix issue where enabling then disabling "Tidy ACME" in PKI results in failed API call. +``` \ No newline at end of file diff --git a/changelog/27750.txt b/changelog/27750.txt new file mode 100644 index 000000000000..04c24fe59e7f --- /dev/null +++ b/changelog/27750.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/identity: Fixed an issue where deleted/reassigned entity-aliases were not removed from in-memory database. +``` diff --git a/changelog/27790.txt b/changelog/27790.txt new file mode 100644 index 000000000000..1475d0831a2b --- /dev/null +++ b/changelog/27790.txt @@ -0,0 +1,3 @@ +```release-note:change +activity (enterprise): filter all fields in client count responses by the request namespace +``` \ No newline at end of file diff --git a/changelog/27796.txt b/changelog/27796.txt new file mode 100644 index 000000000000..7a1e7ebac3b0 --- /dev/null +++ b/changelog/27796.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: simplify the date range editing experience in the client counts dashboard. +``` \ No newline at end of file diff --git a/changelog/27799.txt b/changelog/27799.txt new file mode 100644 index 000000000000..217f6d78d91c --- /dev/null +++ b/changelog/27799.txt @@ -0,0 +1,3 @@ +```release-note:change +consul-template: updated to version 0.39.1 +``` \ No newline at end of file diff --git a/changelog/27809.txt b/changelog/27809.txt new file mode 100644 index 000000000000..332c9155d95a --- /dev/null +++ b/changelog/27809.txt @@ -0,0 +1,3 @@ +```release-note:improvement +audit: Ensure that any underyling errors from audit devices are logged even if we consider auditing to be a success. +``` \ No newline at end of file diff --git a/changelog/27816.txt b/changelog/27816.txt new file mode 100644 index 000000000000..92dd2d7bb90c --- /dev/null +++ b/changelog/27816.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: remove initial start/end parameters on the activity call for client counts dashboard. +``` \ No newline at end of file diff --git a/changelog/27830.txt b/changelog/27830.txt new file mode 100644 index 000000000000..6a3d7e3041f7 --- /dev/null +++ b/changelog/27830.txt @@ -0,0 +1,3 @@ +```release-note:change +activity (enterprise): remove deprecated fields distinct_entities and non_entity_tokens +``` \ No newline at end of file diff --git a/changelog/27831.txt b/changelog/27831.txt new file mode 100644 index 000000000000..27224ef7db66 --- /dev/null +++ b/changelog/27831.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: For AWS and SSH secret engines hide mount configuration details in toggle and display configuration details or cta. +``` \ No newline at end of file diff --git a/changelog/27846.txt b/changelog/27846.txt new file mode 100644 index 000000000000..50cba99062fb --- /dev/null +++ b/changelog/27846.txt @@ -0,0 +1,7 @@ +```release-note:change +activity: The [activity export API](https://developer.hashicorp.com/vault/api-docs/system/internal-counters#activity-export) now requires the `sudo` ACL capability. +``` + +```release-note:improvement +activity: The [activity export API](https://developer.hashicorp.com/vault/api-docs/system/internal-counters#activity-export) can now be called in non-root namespaces. Resulting records will be filtered to include the requested namespace (via `X-Vault-Namespace` header or within the path) and all child namespaces. +``` diff --git a/changelog/27858.txt b/changelog/27858.txt new file mode 100644 index 000000000000..398d94ac438a --- /dev/null +++ b/changelog/27858.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/aws: fixes an issue where not supplying an external id was interpreted as an empty external id +``` diff --git a/changelog/27859.txt b/changelog/27859.txt new file mode 100644 index 000000000000..d6836641fae7 --- /dev/null +++ b/changelog/27859.txt @@ -0,0 +1,4 @@ +```release-note:improvement +audit: sinks (file, socket, syslog) will attempt to log errors to the server operational +log before returning (if there are errors to log, and the context is done). +``` diff --git a/changelog/27881.txt b/changelog/27881.txt new file mode 100644 index 000000000000..0b3f9aa7fd69 --- /dev/null +++ b/changelog/27881.txt @@ -0,0 +1,4 @@ +```release-note:bug +sys: Fix a bug where mounts of external plugins that were registered before Vault v1.0.0 could not be tuned to +use versioned plugins. +``` \ No newline at end of file diff --git a/changelog/27883.txt b/changelog/27883.txt new file mode 100644 index 000000000000..63752f78f109 --- /dev/null +++ b/changelog/27883.txt @@ -0,0 +1,3 @@ +```release-note:improvement +website/docs: Added API documentation for Azure Secrets Engine delete role +``` \ No newline at end of file diff --git a/changelog/27891.txt b/changelog/27891.txt new file mode 100644 index 000000000000..c5962285e7dc --- /dev/null +++ b/changelog/27891.txt @@ -0,0 +1,3 @@ +```release-note:bug +proxy/cache (enterprise): Fixed an issue where Proxy would not correctly update KV secrets when talking to a perf standby. Proxy will now attempt to forward requests to update secrets triggered by events to the active node. Note that this requires `allow_forwarding_via_header` to be configured on the cluster. +``` diff --git a/changelog/27902.txt b/changelog/27902.txt new file mode 100644 index 000000000000..e058e8d96272 --- /dev/null +++ b/changelog/27902.txt @@ -0,0 +1,5 @@ +```release-note:improvement +auth/cert: Cache full list of role trust information separately to avoid +eviction, and avoid duplicate loading during multiple simultaneous logins on +the same role. +``` \ No newline at end of file diff --git a/changelog/27918.txt b/changelog/27918.txt new file mode 100644 index 000000000000..bdf34609efe3 --- /dev/null +++ b/changelog/27918.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Move secret-engine configuration create/edit from routing `vault/settings/secrets/configure/` to `vault/secrets//configuration/edit` +``` \ No newline at end of file diff --git a/changelog/27929.txt b/changelog/27929.txt new file mode 100644 index 000000000000..d957bcace4d8 --- /dev/null +++ b/changelog/27929.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity/oidc: prevent JWKS from being generated by multiple concurrent requests +``` diff --git a/changelog/27939.txt b/changelog/27939.txt new file mode 100644 index 000000000000..093b742b90bc --- /dev/null +++ b/changelog/27939.txt @@ -0,0 +1,3 @@ +```release-note:bug +core (enterprise): Fix 500 errors that occurred querying `sys/internal/ui/mounts` for a mount prefixed by a namespace path when path filters are configured. +``` \ No newline at end of file diff --git a/changelog/27950.txt b/changelog/27950.txt new file mode 100644 index 000000000000..27a1e53b1743 --- /dev/null +++ b/changelog/27950.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/database/hana: Update HANA db client to v1.10.1 +``` diff --git a/changelog/27952.txt b/changelog/27952.txt new file mode 100644 index 000000000000..aa7d2ba84f9d --- /dev/null +++ b/changelog/27952.txt @@ -0,0 +1,3 @@ +```release-note:improvement +audit: Internal implementation changes to the audit subsystem which improve performance. +``` diff --git a/changelog/27966.txt b/changelog/27966.txt new file mode 100644 index 000000000000..616bfe5c8acf --- /dev/null +++ b/changelog/27966.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/metrics: ensure core HA metrics are always output to Prometheus. +``` \ No newline at end of file diff --git a/changelog/28016.txt b/changelog/28016.txt new file mode 100644 index 000000000000..ef8f06b873a1 --- /dev/null +++ b/changelog/28016.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/terraform: Update plugin to v0.9.0 +``` diff --git a/changelog/28024.txt b/changelog/28024.txt new file mode 100644 index 000000000000..8d1fbaa0e2a5 --- /dev/null +++ b/changelog/28024.txt @@ -0,0 +1,3 @@ +```release-note:improvement +database/postgres: Add new fields to the plugin's config endpoint for client certificate authentication. +``` diff --git a/changelog/28036.txt b/changelog/28036.txt new file mode 100644 index 000000000000..f47891e46c8d --- /dev/null +++ b/changelog/28036.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update the client count dashboard to use API namespace filtering and other UX improvements +``` \ No newline at end of file diff --git a/changelog/28042.txt b/changelog/28042.txt new file mode 100644 index 000000000000..f49cad8ac597 --- /dev/null +++ b/changelog/28042.txt @@ -0,0 +1,3 @@ +```release-note:bug +activity: The sys/internal/counters/activity endpoint will return current month data when the end_date parameter is set to a future date. +``` diff --git a/changelog/28056.txt b/changelog/28056.txt new file mode 100644 index 000000000000..f39fbe2921c5 --- /dev/null +++ b/changelog/28056.txt @@ -0,0 +1,3 @@ +```release-note:improvement +audit: Adds TRACE logging to log request/response under certain circumstances, and further improvements to the audit subsystem. +``` \ No newline at end of file diff --git a/changelog/28059.txt b/changelog/28059.txt new file mode 100644 index 000000000000..550fd75af5c6 --- /dev/null +++ b/changelog/28059.txt @@ -0,0 +1,3 @@ +```release-note:bug +command: The `vault secrets move` and `vault auth move` command will no longer attempt to write to storage on performance standby nodes. +``` diff --git a/changelog/28062.txt b/changelog/28062.txt new file mode 100644 index 000000000000..c6a1fbef1980 --- /dev/null +++ b/changelog/28062.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/activity: Ensure client count queries that include the current month return consistent results by sorting the clients before performing estimation +``` \ No newline at end of file diff --git a/changelog/28064.txt b/changelog/28064.txt new file mode 100644 index 000000000000..6f18843cad3a --- /dev/null +++ b/changelog/28064.txt @@ -0,0 +1,7 @@ +```release-note:improvement +activity: The [activity export API](https://developer.hashicorp.com/vault/api-docs/system/internal-counters#activity-export) now includes identity metadata about entity clients. +``` + +```release-note:change +activity: The [activity export API](https://developer.hashicorp.com/vault/api-docs/system/internal-counters#activity-export) now responds with a status of 204 instead 400 when no data exists within the time range specified by `start_time` and `end_time`. +``` diff --git a/changelog/28068.txt b/changelog/28068.txt new file mode 100644 index 000000000000..ec608a67a108 --- /dev/null +++ b/changelog/28068.txt @@ -0,0 +1,7 @@ +```release-note:improvement +cli: `vault operator usage` will now include a warning if the specified usage period contains estimated client counts. +``` + +```release-note:improvement +activity: `/sys/internal/counters/activity` will now include a warning if the specified usage period contains estimated client counts. +``` \ No newline at end of file diff --git a/changelog/28076.txt b/changelog/28076.txt new file mode 100644 index 000000000000..bb9adb94294e --- /dev/null +++ b/changelog/28076.txt @@ -0,0 +1,3 @@ +```release-note:improvement +website/docs: corrected invalid json in sample payload for azure secrets engine create/update role +``` \ No newline at end of file diff --git a/changelog/28093.txt b/changelog/28093.txt new file mode 100644 index 000000000000..830f53baa64e --- /dev/null +++ b/changelog/28093.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes toast (flash) alert message saying "created" when deleting a kv v2 secret +``` \ No newline at end of file diff --git a/changelog/28139.txt b/changelog/28139.txt new file mode 100644 index 000000000000..f538ddf769dd --- /dev/null +++ b/changelog/28139.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/database: Skip connection verification on reading existing DB connection configuration +``` \ No newline at end of file diff --git a/changelog/28148.txt b/changelog/28148.txt new file mode 100644 index 000000000000..141e6a0129d2 --- /dev/null +++ b/changelog/28148.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Feature Name**: Add WIF fields to AWS secrets engine. +``` diff --git a/changelog/28186.txt b/changelog/28186.txt new file mode 100644 index 000000000000..ee17101e4b5d --- /dev/null +++ b/changelog/28186.txt @@ -0,0 +1,3 @@ +```release-note:improvement +raft/autopilot: Persist Raft server versions so autopilot always knows the versions of all servers in the cluster. Include server versions in the Raft bootstrap challenge answer so autopilot immediately knows the versions of new nodes. +``` \ No newline at end of file diff --git a/changelog/28199.txt b/changelog/28199.txt new file mode 100644 index 000000000000..9b94d97f8c5f --- /dev/null +++ b/changelog/28199.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Self-Managed Static Roles**: Self-Managed Static Roles are now supported for select SQL database engines (Postgres, Oracle). Requires Vault Enterprise. +``` \ No newline at end of file diff --git a/changelog/28204.txt b/changelog/28204.txt new file mode 100644 index 000000000000..beaef7968c73 --- /dev/null +++ b/changelog/28204.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes renew-self being called right after login for non-renewable tokens +``` \ No newline at end of file diff --git a/changelog/28207.txt b/changelog/28207.txt new file mode 100644 index 000000000000..0c3e07b517e8 --- /dev/null +++ b/changelog/28207.txt @@ -0,0 +1,3 @@ +```release-note:bug +proxy/cache (enterprise): Fixed an issue where Proxy with static secret caching enabled would not correctly handle requests to older secret versions for KVv2 secrets. Proxy's static secret cache now properly handles all requests relating to older versions for KVv2 secrets. +``` diff --git a/changelog/28212.txt b/changelog/28212.txt new file mode 100644 index 000000000000..a5e1a2b19a04 --- /dev/null +++ b/changelog/28212.txt @@ -0,0 +1,3 @@ +```release-note:feature +**KV v2 Patch/Subkey (enterprise)**: Adds GUI support to read the subkeys of a KV v2 secret and patch (partially update) secret data. +``` diff --git a/changelog/28230.txt b/changelog/28230.txt new file mode 100644 index 000000000000..13934ace8c25 --- /dev/null +++ b/changelog/28230.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fixed an issue where maximum request duration timeout was not being added to all requests containing strings sys/monitor and sys/events. With this change, timeout is now added to all requests except monitor and events endpoint. +``` diff --git a/changelog/28237.txt b/changelog/28237.txt new file mode 100644 index 000000000000..8b5728f3f434 --- /dev/null +++ b/changelog/28237.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Key Usage can now be set on intermediate and root CAs, and CSRs generated by the PKI secret's engine. +``` diff --git a/changelog/28263.txt b/changelog/28263.txt new file mode 100644 index 000000000000..704ff2bb30a9 --- /dev/null +++ b/changelog/28263.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/alicloud: Update plugin to v0.19.0 +``` diff --git a/changelog/28264.txt b/changelog/28264.txt new file mode 100644 index 000000000000..03a508fa8808 --- /dev/null +++ b/changelog/28264.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/kerberos: Update plugin to v0.13.0 +``` diff --git a/changelog/28266.txt b/changelog/28266.txt new file mode 100644 index 000000000000..084977f1851c --- /dev/null +++ b/changelog/28266.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/cf: Update plugin to v0.19.0 +``` diff --git a/changelog/28267.txt b/changelog/28267.txt new file mode 100644 index 000000000000..def6986c7d63 --- /dev/null +++ b/changelog/28267.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/azure: Update plugin to v0.20.0 +``` diff --git a/changelog/28268.txt b/changelog/28268.txt new file mode 100644 index 000000000000..b622284ade72 --- /dev/null +++ b/changelog/28268.txt @@ -0,0 +1,3 @@ +```release-note:change +database/mongodbatlas: Update plugin to v0.13.0 +``` diff --git a/changelog/28269.txt b/changelog/28269.txt new file mode 100644 index 000000000000..336e88a44014 --- /dev/null +++ b/changelog/28269.txt @@ -0,0 +1,3 @@ +```release-note:change +sdk: Upgrade to go-secure-stdlib/plugincontainer@v0.4.0, which also bumps github.com/docker/docker to v26.1.5+incompatible +``` diff --git a/changelog/28271.txt b/changelog/28271.txt new file mode 100644 index 000000000000..6cc6148f3d20 --- /dev/null +++ b/changelog/28271.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/alicloud: Update plugin to v0.18.0 +``` diff --git a/changelog/28275.txt b/changelog/28275.txt new file mode 100644 index 000000000000..85564eed28af --- /dev/null +++ b/changelog/28275.txt @@ -0,0 +1,3 @@ +```release-note:change +database/snowflake: Update plugin to v0.12.0 +``` diff --git a/changelog/28277.txt b/changelog/28277.txt new file mode 100644 index 000000000000..60985a1d9c25 --- /dev/null +++ b/changelog/28277.txt @@ -0,0 +1,3 @@ +```release-note:change +database/elasticsearch: Update plugin to v0.16.0 +``` diff --git a/changelog/28286.txt b/changelog/28286.txt new file mode 100644 index 000000000000..c7010a199241 --- /dev/null +++ b/changelog/28286.txt @@ -0,0 +1,3 @@ +```release-note:improvement +audit: Internal implementation changes to the audit subsystem which improve relability. +``` \ No newline at end of file diff --git a/changelog/28287.txt b/changelog/28287.txt new file mode 100644 index 000000000000..7e6a6b8e750b --- /dev/null +++ b/changelog/28287.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/kubernetes: Update plugin to v0.9.0 +``` diff --git a/changelog/28289.txt b/changelog/28289.txt new file mode 100644 index 000000000000..e203556fb134 --- /dev/null +++ b/changelog/28289.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/kubernetes: Update plugin to v0.20.0 +``` diff --git a/changelog/28293.txt b/changelog/28293.txt new file mode 100644 index 000000000000..4db93f90dcf1 --- /dev/null +++ b/changelog/28293.txt @@ -0,0 +1,3 @@ +```release-note:change +database/redis-elasticache: Update plugin to v0.5.0 +``` diff --git a/changelog/28294.txt b/changelog/28294.txt new file mode 100644 index 000000000000..13168cf2d746 --- /dev/null +++ b/changelog/28294.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/azure: Update plugin to v0.19.0 +``` diff --git a/changelog/28300.txt b/changelog/28300.txt new file mode 100644 index 000000000000..4357135fb9ef --- /dev/null +++ b/changelog/28300.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/gcpkms: Update plugin to v0.18.0 +``` diff --git a/changelog/28307.txt b/changelog/28307.txt new file mode 100644 index 000000000000..9f1f3be31624 --- /dev/null +++ b/changelog/28307.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/oci: Update plugin to v0.17.0 +``` diff --git a/changelog/28309.txt b/changelog/28309.txt new file mode 100644 index 000000000000..94574b312103 --- /dev/null +++ b/changelog/28309.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/cert: ocsp_ca_certificates field was not honored when validating OCSP responses signed by a CA that did not issue the certificate. +``` \ No newline at end of file diff --git a/changelog/28312.txt b/changelog/28312.txt new file mode 100644 index 000000000000..e2959b09ac22 --- /dev/null +++ b/changelog/28312.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/terraform: Update plugin to v0.10.0 +``` diff --git a/changelog/28324.txt b/changelog/28324.txt new file mode 100644 index 000000000000..a38b971b6b1c --- /dev/null +++ b/changelog/28324.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/gcp: Update plugin to v0.20.0 +``` diff --git a/changelog/28325.txt b/changelog/28325.txt new file mode 100644 index 000000000000..b00c20b19227 --- /dev/null +++ b/changelog/28325.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/openldap: Update plugin to v0.14.0 +``` diff --git a/changelog/28327.txt b/changelog/28327.txt new file mode 100644 index 000000000000..4d77459f3568 --- /dev/null +++ b/changelog/28327.txt @@ -0,0 +1,3 @@ +```release-note:change +database/couchbase: Update plugin to v0.12.0 +``` diff --git a/changelog/28334.txt b/changelog/28334.txt new file mode 100644 index 000000000000..6a126a0e02a5 --- /dev/null +++ b/changelog/28334.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/kv: Update plugin to v0.20.0 +``` diff --git a/changelog/28348.txt b/changelog/28348.txt new file mode 100644 index 000000000000..7b8177491b8c --- /dev/null +++ b/changelog/28348.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/mongodbatlas: Update plugin to v0.13.0 +``` diff --git a/changelog/28349.txt b/changelog/28349.txt new file mode 100644 index 000000000000..c81df34fc003 --- /dev/null +++ b/changelog/28349.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/jwt: Update plugin to v0.22.0 +``` diff --git a/changelog/28360.txt b/changelog/28360.txt new file mode 100644 index 000000000000..722c8361eaaa --- /dev/null +++ b/changelog/28360.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/gcpkms: Update plugin to v0.19.0 +``` diff --git a/changelog/28361.txt b/changelog/28361.txt new file mode 100644 index 000000000000..075bb21414f3 --- /dev/null +++ b/changelog/28361.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/ad: Update plugin to v0.19.0 +``` diff --git a/changelog/28366.txt b/changelog/28366.txt new file mode 100644 index 000000000000..5f2d65783986 --- /dev/null +++ b/changelog/28366.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/gcp: Update plugin to v0.19.0 +``` diff --git a/changelog/28371.txt b/changelog/28371.txt new file mode 100644 index 000000000000..c719c4be56c0 --- /dev/null +++ b/changelog/28371.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix UI improperly checking capabilities for enabling performance and dr replication +``` diff --git a/changelog/28404.txt b/changelog/28404.txt new file mode 100644 index 000000000000..cca52cce8b67 --- /dev/null +++ b/changelog/28404.txt @@ -0,0 +1,3 @@ +```release-note:change +database/redis: Update plugin to v0.4.0 +``` diff --git a/changelog/28441.txt b/changelog/28441.txt new file mode 100644 index 000000000000..e78ed504af22 --- /dev/null +++ b/changelog/28441.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth: Updated error handling for missing login credentials in AppRole and UserPass auth methods to return a 400 error instead of a 500 error. +``` diff --git a/changelog/28450.txt b/changelog/28450.txt new file mode 100644 index 000000000000..38f27478e854 --- /dev/null +++ b/changelog/28450.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/cert: During certificate validation, OCSP requests are debug logged even if Vault's log level is above DEBUG. +``` \ No newline at end of file diff --git a/changelog/28466.txt b/changelog/28466.txt new file mode 100644 index 000000000000..be9016ec05e8 --- /dev/null +++ b/changelog/28466.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/ssh: Add a flag, `allow_empty_principals` to allow keys or certs to apply to any user/principal. +``` diff --git a/changelog/28488.txt b/changelog/28488.txt new file mode 100644 index 000000000000..bc297fc2f20d --- /dev/null +++ b/changelog/28488.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Track the last time auto-tidy ran to address auto-tidy not running if the auto-tidy interval is longer than scheduled Vault restarts. +``` diff --git a/changelog/28494.txt b/changelog/28494.txt new file mode 100644 index 000000000000..50b9223968a4 --- /dev/null +++ b/changelog/28494.txt @@ -0,0 +1,3 @@ +```release-note:bug +proxy/cache (enterprise): Fixed a data race that could occur while tracking capabilities in Proxy's static secret cache. +``` diff --git a/changelog/28498.txt b/changelog/28498.txt new file mode 100644 index 000000000000..0a6810ad7392 --- /dev/null +++ b/changelog/28498.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/token: Fix token TTL calculation so that it uses `max_lease_ttl` tune value for tokens created via `auth/token/create`. +``` diff --git a/changelog/28509.txt b/changelog/28509.txt new file mode 100644 index 000000000000..821f8411464f --- /dev/null +++ b/changelog/28509.txt @@ -0,0 +1,3 @@ +```release-note:bug +databases: fix issue where local timezone was getting lost when using a rotation schedule cron +``` diff --git a/changelog/28519.txt b/changelog/28519.txt new file mode 100644 index 000000000000..9cd11f74ff36 --- /dev/null +++ b/changelog/28519.txt @@ -0,0 +1,3 @@ +```release-note:bug +database/postgresql: Fix potential error revoking privileges in postgresql database secrets engine when a schema contains special characters +``` diff --git a/changelog/28526.txt b/changelog/28526.txt new file mode 100644 index 000000000000..b22b093efc9c --- /dev/null +++ b/changelog/28526.txt @@ -0,0 +1,3 @@ +```release-note:improvement +physical/raft: Log when the MAP_POPULATE mmap flag gets disabled before opening the database. +``` \ No newline at end of file diff --git a/changelog/28539.txt b/changelog/28539.txt new file mode 100644 index 000000000000..ebbc5476135b --- /dev/null +++ b/changelog/28539.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix `default_role` input missing from oidc auth method configuration form +``` diff --git a/changelog/28564.txt b/changelog/28564.txt new file mode 100644 index 000000000000..22e6b74198e3 --- /dev/null +++ b/changelog/28564.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: log at level ERROR rather than INFO when all seals are unhealthy. +``` diff --git a/changelog/28597.txt b/changelog/28597.txt new file mode 100644 index 000000000000..774c200f1adc --- /dev/null +++ b/changelog/28597.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/cert: When using ocsp_ca_certificates, an error was produced though extra certs validation succeeded. +``` diff --git a/changelog/6483.txt b/changelog/6483.txt new file mode 100644 index 000000000000..2f0dbed1fdc8 --- /dev/null +++ b/changelog/6483.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/consul: Consul service registration tags are now case-sensitive. +``` \ No newline at end of file diff --git a/changelog/9733.txt b/changelog/9733.txt new file mode 100644 index 000000000000..f345cbd5278e --- /dev/null +++ b/changelog/9733.txt @@ -0,0 +1,3 @@ +```release-note:improvement +db/cassandra: Add `disable_host_initial_lookup` option to backend, allowing the disabling of initial host lookup. +``` diff --git a/changelog/README.md b/changelog/README.md index cbf841f6c77b..6c249c687fd1 100644 --- a/changelog/README.md +++ b/changelog/README.md @@ -24,7 +24,7 @@ Release notes are text files with three lines: might be warranted. - `deprecation` - Announcement of a planned future removal of a feature. Only use this if a deprecation notice also exists [in the - docs](https://www.vaultproject.io/docs/deprecation). + docs](https://developer.hashicorp.com/vault/docs/deprecation). - `feature` - Large topical additions for a major release. These are rarely in minor releases. Formatting for `feature` entries differs from normal changelog formatting - see the [new features @@ -36,6 +36,8 @@ Release notes are text files with three lines: 3. An ending code block. +If more than one area is impacted, use separate code blocks for each entry. + This should be in a file named after the pull request number (e.g., `12345.txt`). There are many examples in this folder; check one out if you're stuck! diff --git a/changelog/_22733.txt b/changelog/_22733.txt new file mode 100644 index 000000000000..039e423596fe --- /dev/null +++ b/changelog/_22733.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes long namespace names overflow in the sidebar +``` diff --git a/changelog/_23945.txt b/changelog/_23945.txt new file mode 100644 index 000000000000..030b677aec70 --- /dev/null +++ b/changelog/_23945.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Custom messages (enterprise)**: Introduces custom messages settings, allowing users to view, and operators to configure system-wide messages. +``` diff --git a/changelog/_go-ver-1140.txt b/changelog/_go-ver-1140.txt index d1b6a1489383..052a277ab431 100644 --- a/changelog/_go-ver-1140.txt +++ b/changelog/_go-ver-1140.txt @@ -1,3 +1,3 @@ ```release-note:change -core: Bump Go version to 1.20.1. +core: Bump Go version to 1.20.5. ``` diff --git a/changelog/_go-ver-1150.txt b/changelog/_go-ver-1150.txt new file mode 100644 index 000000000000..6df482655f34 --- /dev/null +++ b/changelog/_go-ver-1150.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.21.1. +``` diff --git a/changelog/_go-ver-1160.txt b/changelog/_go-ver-1160.txt new file mode 100644 index 000000000000..3b7b96eb0d89 --- /dev/null +++ b/changelog/_go-ver-1160.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.21.5. +``` diff --git a/changelog/_go-ver-1170.txt b/changelog/_go-ver-1170.txt new file mode 100644 index 000000000000..e06311e84e5b --- /dev/null +++ b/changelog/_go-ver-1170.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.22.2 +``` diff --git a/changelog/_go-ver-1180.txt b/changelog/_go-ver-1180.txt new file mode 100644 index 000000000000..45b3d2ef5f47 --- /dev/null +++ b/changelog/_go-ver-1180.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.22.7 +``` diff --git a/changelog/pki-ui-improvements.txt b/changelog/pki-ui-improvements.txt new file mode 100644 index 000000000000..d824033f2e3c --- /dev/null +++ b/changelog/pki-ui-improvements.txt @@ -0,0 +1,3 @@ +```release-note:feature +**NEW PKI Workflow in UI**: Completes generally available rollout of new PKI UI that provides smoother mount configuration and a more guided user experience +``` \ No newline at end of file diff --git a/command/agent.go b/command/agent.go index 9772ff33f61f..2e5f550a55f9 100644 --- a/command/agent.go +++ b/command/agent.go @@ -1,15 +1,15 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "context" "crypto/tls" + "errors" "flag" "fmt" "io" - "io/ioutil" "net" "net/http" "os" @@ -17,42 +17,28 @@ import ( "sort" "strings" "sync" + "sync/atomic" "time" - token_file "github.com/hashicorp/vault/command/agent/auth/token-file" - + systemd "github.com/coreos/go-systemd/daemon" + "github.com/hashicorp/cli" ctconfig "github.com/hashicorp/consul-template/config" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" - - "github.com/hashicorp/vault/command/agent/sink/inmem" - - systemd "github.com/coreos/go-systemd/daemon" - log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/gatedwriter" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-secure-stdlib/reloadutil" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" - "github.com/hashicorp/vault/command/agent/auth/alicloud" - "github.com/hashicorp/vault/command/agent/auth/approle" - "github.com/hashicorp/vault/command/agent/auth/aws" - "github.com/hashicorp/vault/command/agent/auth/azure" - "github.com/hashicorp/vault/command/agent/auth/cert" - "github.com/hashicorp/vault/command/agent/auth/cf" - "github.com/hashicorp/vault/command/agent/auth/gcp" - "github.com/hashicorp/vault/command/agent/auth/jwt" - "github.com/hashicorp/vault/command/agent/auth/kerberos" - "github.com/hashicorp/vault/command/agent/auth/kubernetes" - "github.com/hashicorp/vault/command/agent/auth/oci" - "github.com/hashicorp/vault/command/agent/cache" - "github.com/hashicorp/vault/command/agent/cache/cacheboltdb" - "github.com/hashicorp/vault/command/agent/cache/cachememdb" - "github.com/hashicorp/vault/command/agent/cache/keymanager" agentConfig "github.com/hashicorp/vault/command/agent/config" - "github.com/hashicorp/vault/command/agent/sink" - "github.com/hashicorp/vault/command/agent/sink/file" + "github.com/hashicorp/vault/command/agent/exec" "github.com/hashicorp/vault/command/agent/template" - "github.com/hashicorp/vault/command/agent/winsvc" + "github.com/hashicorp/vault/command/agentproxyshared" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/command/agentproxyshared/cache" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/sink/inmem" + "github.com/hashicorp/vault/command/agentproxyshared/winsvc" "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/helper/useragent" @@ -62,9 +48,10 @@ import ( "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/version" "github.com/kr/pretty" - "github.com/mitchellh/cli" "github.com/oklog/run" "github.com/posener/complete" + "golang.org/x/text/cases" + "golang.org/x/text/language" "google.golang.org/grpc/test/bufconn" ) @@ -77,6 +64,7 @@ const ( // flagNameAgentExitAfterAuth is used as an Agent specific flag to indicate // that agent should exit after a single successful auth flagNameAgentExitAfterAuth = "exit-after-auth" + nameAgent = "agent" ) type AgentCommand struct { @@ -87,18 +75,18 @@ type AgentCommand struct { ShutdownCh chan struct{} SighupCh chan struct{} + SigUSR2Ch chan struct{} tlsReloadFuncsLock sync.RWMutex tlsReloadFuncs []reloadutil.ReloadFunc logWriter io.Writer logGate *gatedwriter.Writer - logger log.Logger + logger hclog.Logger // Telemetry object metricsHelper *metricsutil.MetricsHelper - - cleanupGuard sync.Once + cleanupGuard sync.Once startedCh chan struct{} // for tests reloadedCh chan struct{} // for tests @@ -225,7 +213,21 @@ func (c *AgentCommand) Run(args []string) int { c.outputErrors(err) return 1 } + + // Update the logger and then base the log writer on that logger. + // Log writer is supplied to consul-template runners for templates and execs. + // We want to ensure that consul-template will honor the settings, for example + // if the -log-format is JSON we want JSON, not a mix of JSON and non-JSON messages. c.logger = l + c.logWriter = l.StandardWriter(&hclog.StandardLoggerOptions{ + InferLevels: true, + InferLevelsWithTimestamp: true, + }) + + // release log gate if the disable-gated-logs flag is set + if c.logFlags.flagDisableGatedLogs { + c.logGate.Flush() + } infoKeys := make([]string, 0, 10) info := make(map[string]string) @@ -258,7 +260,7 @@ func (c *AgentCommand) Run(args []string) int { // Ignore any setting of Agent's address. This client is used by the Agent // to reach out to Vault. This should never loop back to agent. - c.flagAgentAddress = "" + c.flagAgentProxyAddress = "" client, err := c.Client() if err != nil { c.UI.Error(fmt.Sprintf( @@ -278,7 +280,17 @@ func (c *AgentCommand) Run(args []string) int { } } - // ctx and cancelFunc are passed to the AuthHandler, SinkServer, and + if config.IsDefaultListerDefined() { + // Notably, we cannot know for sure if they are using the API proxy functionality unless + // we log on each API proxy call, which would be too noisy. + // A customer could have a listener defined but only be using e.g. the cache-clear API, + // even though the API proxy is something they have available. + c.UI.Warn("==> Note: Vault Agent will be deprecating API proxy functionality in a future " + + "release, and this functionality has moved to a new subcommand, vault proxy. If you rely on this " + + "functionality, plan to move to Vault Proxy instead.") + } + + // ctx and cancelFunc are passed to the AuthHandler, SinkServer, ExecServer and // TemplateServer that periodically listen for ctx.Done() to fire and shut // down accordingly. ctx, cancelFunc := context.WithCancel(context.Background()) @@ -290,7 +302,7 @@ func (c *AgentCommand) Run(args []string) int { Ui: c.UI, ServiceName: "vault", DisplayName: "Vault", - UserAgent: useragent.String(), + UserAgent: useragent.AgentString(), ClusterName: config.ClusterName, }) if err != nil { @@ -299,14 +311,28 @@ func (c *AgentCommand) Run(args []string) int { } c.metricsHelper = metricsutil.NewMetricsHelper(inmemMetrics, prometheusEnabled) + var templateNamespace string + // This indicates whether the namespace for the client has been set by environment variable. + // If it has, we don't touch it + namespaceSetByEnvironmentVariable := client.Namespace() != "" + + if !namespaceSetByEnvironmentVariable && config.Vault != nil && config.Vault.Namespace != "" { + client.SetNamespace(config.Vault.Namespace) + } + var method auth.AuthMethod var sinks []*sink.SinkConfig - var templateNamespace string if config.AutoAuth != nil { - if client.Headers().Get(consts.NamespaceHeaderName) == "" && config.AutoAuth.Method.Namespace != "" { + // Note: This will only set namespace header to the value in config.AutoAuth.Method.Namespace + // only if it hasn't been set by config.Vault.Namespace above. In that case, the config value + // present at config.AutoAuth.Method.Namespace will still be used for auto-auth. + if !namespaceSetByEnvironmentVariable && config.AutoAuth.Method.Namespace != "" { client.SetNamespace(config.AutoAuth.Method.Namespace) } - templateNamespace = client.Headers().Get(consts.NamespaceHeaderName) + templateNamespace = client.Namespace() + if !namespaceSetByEnvironmentVariable && config.Vault != nil && config.Vault.Namespace != "" { + templateNamespace = config.Vault.Namespace + } sinkClient, err := client.CloneWithHeaders() if err != nil { @@ -353,39 +379,9 @@ func (c *AgentCommand) Run(args []string) int { MountPath: config.AutoAuth.Method.MountPath, Config: config.AutoAuth.Method.Config, } - switch config.AutoAuth.Method.Type { - case "alicloud": - method, err = alicloud.NewAliCloudAuthMethod(authConfig) - case "aws": - method, err = aws.NewAWSAuthMethod(authConfig) - case "azure": - method, err = azure.NewAzureAuthMethod(authConfig) - case "cert": - method, err = cert.NewCertAuthMethod(authConfig) - case "cf": - method, err = cf.NewCFAuthMethod(authConfig) - case "gcp": - method, err = gcp.NewGCPAuthMethod(authConfig) - case "jwt": - method, err = jwt.NewJWTAuthMethod(authConfig) - case "kerberos": - method, err = kerberos.NewKerberosAuthMethod(authConfig) - case "kubernetes": - method, err = kubernetes.NewKubernetesAuthMethod(authConfig) - case "approle": - method, err = approle.NewApproleAuthMethod(authConfig) - case "oci": - method, err = oci.NewOCIAuthMethod(authConfig, config.Vault.Address) - case "token_file": - method, err = token_file.NewTokenFileAuthMethod(authConfig) - case "pcf": // Deprecated. - method, err = cf.NewCFAuthMethod(authConfig) - default: - c.UI.Error(fmt.Sprintf("Unknown auth method %q", config.AutoAuth.Method.Type)) - return 1 - } + method, err = agentproxyshared.GetAutoAuthMethodFromConfig(config.AutoAuth.Method.Type, authConfig, config.Vault.Address) if err != nil { - c.UI.Error(fmt.Errorf("Error creating %s auth method: %w", config.AutoAuth.Method.Type, err).Error()) + c.UI.Error(fmt.Sprintf("Error creating %s auth method: %v", config.AutoAuth.Method.Type, err)) return 1 } } @@ -505,10 +501,12 @@ func (c *AgentCommand) Run(args []string) int { // The API proxy to be used, if listeners are configured apiProxy, err := cache.NewAPIProxy(&cache.APIProxyConfig{ - Client: proxyClient, - Logger: apiProxyLogger, - EnforceConsistency: enforceConsistency, - WhenInconsistentAction: whenInconsistent, + Client: proxyClient, + Logger: apiProxyLogger, + EnforceConsistency: enforceConsistency, + WhenInconsistentAction: whenInconsistent, + UserAgentStringFunction: useragent.AgentProxyStringWithProxiedUserAgent, + UserAgentString: useragent.AgentProxyString(), }) if err != nil { c.UI.Error(fmt.Sprintf("Error creating API proxy: %v", err)) @@ -522,10 +520,12 @@ func (c *AgentCommand) Run(args []string) int { // Create the lease cache proxier and set its underlying proxier to // the API proxier. leaseCache, err = cache.NewLeaseCache(&cache.LeaseCacheConfig{ - Client: proxyClient, - BaseContext: ctx, - Proxier: apiProxy, - Logger: cacheLogger.Named("leasecache"), + Client: proxyClient, + BaseContext: ctx, + Proxier: apiProxy, + Logger: cacheLogger.Named("leasecache"), + CacheDynamicSecrets: true, + UserAgentToUse: useragent.ProxyAPIProxyString(), }) if err != nil { c.UI.Error(fmt.Sprintf("Error creating lease cache: %v", err)) @@ -534,155 +534,99 @@ func (c *AgentCommand) Run(args []string) int { // Configure persistent storage and add to LeaseCache if config.Cache.Persist != nil { - if config.Cache.Persist.Path == "" { - c.UI.Error("must specify persistent cache path") - return 1 - } - - // Set AAD based on key protection type - var aad string - switch config.Cache.Persist.Type { - case "kubernetes": - aad, err = getServiceAccountJWT(config.Cache.Persist.ServiceAccountTokenFile) - if err != nil { - c.UI.Error(fmt.Sprintf("failed to read service account token from %s: %s", config.Cache.Persist.ServiceAccountTokenFile, err)) - return 1 - } - default: - c.UI.Error(fmt.Sprintf("persistent key protection type %q not supported", config.Cache.Persist.Type)) - return 1 - } - - // Check if bolt file exists already - dbFileExists, err := cacheboltdb.DBFileExists(config.Cache.Persist.Path) + deferFunc, oldToken, err := agentproxyshared.AddPersistentStorageToLeaseCache(ctx, leaseCache, config.Cache.Persist, cacheLogger) if err != nil { - c.UI.Error(fmt.Sprintf("failed to check if bolt file exists at path %s: %s", config.Cache.Persist.Path, err)) + c.UI.Error(fmt.Sprintf("Error creating persistent cache: %v", err)) return 1 } - if dbFileExists { - // Open the bolt file, but wait to setup Encryption - ps, err := cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ - Path: config.Cache.Persist.Path, - Logger: cacheLogger.Named("cacheboltdb"), - }) - if err != nil { - c.UI.Error(fmt.Sprintf("Error opening persistent cache: %v", err)) - return 1 - } + previousToken = oldToken + if deferFunc != nil { + defer deferFunc() + } + } + } - // Get the token from bolt for retrieving the encryption key, - // then setup encryption so that restore is possible - token, err := ps.GetRetrievalToken() - if err != nil { - c.UI.Error(fmt.Sprintf("Error getting retrieval token from persistent cache: %v", err)) - } + // Create the AuthHandler, SinkServer, TemplateServer, and ExecServer now so that we can pass AuthHandler struct + // values into the Proxy http.Handler. We will wait to actually start these servers + // once we have configured the handlers for each listener below + authInProgress := &atomic.Bool{} + invalidTokenErrCh := make(chan error) + var ah *auth.AuthHandler + var ss *sink.SinkServer + var ts *template.Server + var es *exec.Server + if method != nil { + enableTemplateTokenCh := len(config.Templates) > 0 + enableEnvTemplateTokenCh := len(config.EnvTemplates) > 0 - if err := ps.Close(); err != nil { - c.UI.Warn(fmt.Sprintf("Failed to close persistent cache file after getting retrieval token: %s", err)) - } + // Auth Handler is going to set its own retry values, so we want to + // work on a copy of the client to not affect other subsystems. + ahClient, err := c.client.CloneWithHeaders() + if err != nil { + c.UI.Error(fmt.Sprintf("Error cloning client for auth handler: %v", err)) + return 1 + } - km, err := keymanager.NewPassthroughKeyManager(ctx, token) - if err != nil { - c.UI.Error(fmt.Sprintf("failed to configure persistence encryption for cache: %s", err)) - return 1 - } + // Override the set namespace with the auto-auth specific namespace + if !namespaceSetByEnvironmentVariable && config.AutoAuth.Method.Namespace != "" { + ahClient.SetNamespace(config.AutoAuth.Method.Namespace) + } - // Open the bolt file with the wrapper provided - ps, err = cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ - Path: config.Cache.Persist.Path, - Logger: cacheLogger.Named("cacheboltdb"), - Wrapper: km.Wrapper(), - AAD: aad, - }) - if err != nil { - c.UI.Error(fmt.Sprintf("Error opening persistent cache with wrapper: %v", err)) - return 1 - } + if config.DisableIdleConnsAutoAuth { + ahClient.SetMaxIdleConnections(-1) + } - // Restore anything in the persistent cache to the memory cache - if err := leaseCache.Restore(ctx, ps); err != nil { - c.UI.Error(fmt.Sprintf("Error restoring in-memory cache from persisted file: %v", err)) - if config.Cache.Persist.ExitOnErr { - return 1 - } - } - cacheLogger.Info("loaded memcache from persistent storage") + if config.DisableKeepAlivesAutoAuth { + ahClient.SetDisableKeepAlives(true) + } - // Check for previous auto-auth token - oldTokenBytes, err := ps.GetAutoAuthToken(ctx) - if err != nil { - c.UI.Error(fmt.Sprintf("Error in fetching previous auto-auth token: %s", err)) - if config.Cache.Persist.ExitOnErr { - return 1 - } - } - if len(oldTokenBytes) > 0 { - oldToken, err := cachememdb.Deserialize(oldTokenBytes) - if err != nil { - c.UI.Error(fmt.Sprintf("Error in deserializing previous auto-auth token cache entry: %s", err)) - if config.Cache.Persist.ExitOnErr { - return 1 - } - } - previousToken = oldToken.Token - } + ah = auth.NewAuthHandler(&auth.AuthHandlerConfig{ + Logger: c.logger.Named("auth.handler"), + Client: ahClient, + WrapTTL: config.AutoAuth.Method.WrapTTL, + MinBackoff: config.AutoAuth.Method.MinBackoff, + MaxBackoff: config.AutoAuth.Method.MaxBackoff, + EnableReauthOnNewCredentials: config.AutoAuth.EnableReauthOnNewCredentials, + EnableTemplateTokenCh: enableTemplateTokenCh, + EnableExecTokenCh: enableEnvTemplateTokenCh, + Token: previousToken, + ExitOnError: config.AutoAuth.Method.ExitOnError, + UserAgent: useragent.AgentAutoAuthString(), + MetricsSignifier: "agent", + }) - // If keep_after_import true, set persistent storage layer in - // leaseCache, else remove db file - if config.Cache.Persist.KeepAfterImport { - defer ps.Close() - leaseCache.SetPersistentStorage(ps) - } else { - if err := ps.Close(); err != nil { - c.UI.Warn(fmt.Sprintf("failed to close persistent cache file: %s", err)) - } - dbFile := filepath.Join(config.Cache.Persist.Path, cacheboltdb.DatabaseFileName) - if err := os.Remove(dbFile); err != nil { - c.UI.Error(fmt.Sprintf("failed to remove persistent storage file %s: %s", dbFile, err)) - if config.Cache.Persist.ExitOnErr { - return 1 - } - } - } - } else { - km, err := keymanager.NewPassthroughKeyManager(ctx, nil) - if err != nil { - c.UI.Error(fmt.Sprintf("failed to configure persistence encryption for cache: %s", err)) - return 1 - } - ps, err := cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ - Path: config.Cache.Persist.Path, - Logger: cacheLogger.Named("cacheboltdb"), - Wrapper: km.Wrapper(), - AAD: aad, - }) - if err != nil { - c.UI.Error(fmt.Sprintf("Error creating persistent cache: %v", err)) - return 1 - } - cacheLogger.Info("configured persistent storage", "path", config.Cache.Persist.Path) + ss = sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: c.logger.Named("sink.server"), + Client: ahClient, + ExitAfterAuth: config.ExitAfterAuth, + }) - // Stash the key material in bolt - token, err := km.RetrievalToken(ctx) - if err != nil { - c.UI.Error(fmt.Sprintf("Error getting persistent key: %s", err)) - return 1 - } - if err := ps.StoreRetrievalToken(token); err != nil { - c.UI.Error(fmt.Sprintf("Error setting key in persistent cache: %v", err)) - return 1 - } + ts = template.NewServer(&template.ServerConfig{ + Logger: c.logger.Named("template.server"), + LogLevel: c.logger.GetLevel(), + LogWriter: c.logWriter, + AgentConfig: c.config, + Namespace: templateNamespace, + ExitAfterAuth: config.ExitAfterAuth, + }) - defer ps.Close() - leaseCache.SetPersistentStorage(ps) - } + es, err = exec.NewServer(&exec.ServerConfig{ + AgentConfig: c.config, + Namespace: templateNamespace, + Logger: c.logger.Named("exec.server"), + LogLevel: c.logger.GetLevel(), + LogWriter: c.logWriter, + }) + if err != nil { + c.logger.Error("could not create exec server", "error", err) + return 1 } } var listeners []net.Listener // If there are templates, add an in-process listener - if len(config.Templates) > 0 { + if len(config.Templates) > 0 || len(config.EnvTemplates) > 0 { config.Listeners = append(config.Listeners, &configutil.Listener{Type: listenerutil.BufConnType}) } @@ -703,6 +647,7 @@ func (c *AgentCommand) Run(args []string) int { lnBundle, err := cache.StartListener(lnConfig) if err != nil { c.UI.Error(fmt.Sprintf("Error starting listener: %v", err)) + c.tlsReloadFuncsLock.Unlock() return 1 } @@ -715,31 +660,31 @@ func (c *AgentCommand) Run(args []string) int { listeners = append(listeners, ln) - proxyVaultToken := true - var inmemSink sink.Sink + apiProxyLogger.Debug("auto-auth token is allowed to be used; configuring inmem sink") + inmemSink, err := inmem.New(&sink.SinkConfig{ + Logger: apiProxyLogger, + }, leaseCache) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating inmem sink for cache: %v", err)) + c.tlsReloadFuncsLock.Unlock() + return 1 + } + sinks = append(sinks, &sink.SinkConfig{ + Logger: apiProxyLogger, + Sink: inmemSink, + }) + useAutoAuthToken := false + forceAutoAuthToken := false if config.APIProxy != nil { - if config.APIProxy.UseAutoAuthToken { - apiProxyLogger.Debug("auto-auth token is allowed to be used; configuring inmem sink") - inmemSink, err = inmem.New(&sink.SinkConfig{ - Logger: apiProxyLogger, - }, leaseCache) - if err != nil { - c.UI.Error(fmt.Sprintf("Error creating inmem sink for cache: %v", err)) - return 1 - } - sinks = append(sinks, &sink.SinkConfig{ - Logger: apiProxyLogger, - Sink: inmemSink, - }) - } - proxyVaultToken = !config.APIProxy.ForceAutoAuthToken + useAutoAuthToken = config.APIProxy.UseAutoAuthToken + forceAutoAuthToken = config.APIProxy.ForceAutoAuthToken } var muxHandler http.Handler if leaseCache != nil { - muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, leaseCache, inmemSink, proxyVaultToken) + muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, leaseCache, inmemSink, forceAutoAuthToken, useAutoAuthToken, authInProgress, invalidTokenErrCh) } else { - muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, apiProxy, inmemSink, proxyVaultToken) + muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, apiProxy, inmemSink, forceAutoAuthToken, useAutoAuthToken, authInProgress, invalidTokenErrCh) } // Parse 'require_request_header' listener config option, and wrap @@ -815,6 +760,16 @@ func (c *AgentCommand) Run(args []string) int { case c.reloadedCh <- struct{}{}: default: } + case <-c.SigUSR2Ch: + pprofPath := filepath.Join(os.TempDir(), "vault-agent-pprof") + cpuProfileDuration := time.Second * 1 + err := WritePprofToFile(pprofPath, cpuProfileDuration) + if err != nil { + c.logger.Error(err.Error()) + continue + } + + c.logger.Info(fmt.Sprintf("Wrote pprof files to: %s", pprofPath)) case <-ctx.Done(): return nil } @@ -845,50 +800,6 @@ func (c *AgentCommand) Run(args []string) int { // Start auto-auth and sink servers if method != nil { - enableTokenCh := len(config.Templates) > 0 - - // Auth Handler is going to set its own retry values, so we want to - // work on a copy of the client to not affect other subsystems. - ahClient, err := c.client.CloneWithHeaders() - if err != nil { - c.UI.Error(fmt.Sprintf("Error cloning client for auth handler: %v", err)) - return 1 - } - - if config.DisableIdleConnsAutoAuth { - ahClient.SetMaxIdleConnections(-1) - } - - if config.DisableKeepAlivesAutoAuth { - ahClient.SetDisableKeepAlives(true) - } - - ah := auth.NewAuthHandler(&auth.AuthHandlerConfig{ - Logger: c.logger.Named("auth.handler"), - Client: ahClient, - WrapTTL: config.AutoAuth.Method.WrapTTL, - MinBackoff: config.AutoAuth.Method.MinBackoff, - MaxBackoff: config.AutoAuth.Method.MaxBackoff, - EnableReauthOnNewCredentials: config.AutoAuth.EnableReauthOnNewCredentials, - EnableTemplateTokenCh: enableTokenCh, - Token: previousToken, - ExitOnError: config.AutoAuth.Method.ExitOnError, - }) - - ss := sink.NewSinkServer(&sink.SinkServerConfig{ - Logger: c.logger.Named("sink.server"), - Client: ahClient, - ExitAfterAuth: config.ExitAfterAuth, - }) - - ts := template.NewServer(&template.ServerConfig{ - Logger: c.logger.Named("template.server"), - LogLevel: c.logger.GetLevel(), - LogWriter: c.logWriter, - AgentConfig: c.config, - Namespace: templateNamespace, - ExitAfterAuth: config.ExitAfterAuth, - }) g.Add(func() error { return ah.Run(ctx, method) @@ -902,7 +813,7 @@ func (c *AgentCommand) Run(args []string) int { }) g.Add(func() error { - err := ss.Run(ctx, ah.OutputCh, sinks) + err := ss.Run(ctx, ah.OutputCh, sinks, ah.AuthInProgress) c.logger.Info("sinks finished, exiting") // Start goroutine to drain from ah.OutputCh from this point onward @@ -933,7 +844,7 @@ func (c *AgentCommand) Run(args []string) int { }) g.Add(func() error { - return ts.Run(ctx, ah.TemplateTokenCh, config.Templates) + return ts.Run(ctx, ah.TemplateTokenCh, config.Templates, ah.AuthInProgress, ah.InvalidToken) }, func(error) { // Let the lease cache know this is a shutdown; no need to evict // everything @@ -944,17 +855,30 @@ func (c *AgentCommand) Run(args []string) int { ts.Stop() }) + g.Add(func() error { + return es.Run(ctx, ah.ExecTokenCh) + }, func(err error) { + // Let the lease cache know this is a shutdown; no need to evict + // everything + if leaseCache != nil { + leaseCache.SetShuttingDown(true) + } + cancelFunc() + es.Close() + }) + } // Server configuration output padding := 24 sort.Strings(infoKeys) + caser := cases.Title(language.English) c.UI.Output("==> Vault Agent configuration:\n") for _, k := range infoKeys { c.UI.Output(fmt.Sprintf( "%s%s: %s", strings.Repeat(" ", padding-len(k)), - strings.Title(k), + caser.String(k), info[k])) } c.UI.Output("") @@ -979,11 +903,21 @@ func (c *AgentCommand) Run(args []string) int { var exitCode int if err := g.Run(); err != nil { - c.logger.Error("runtime error encountered", "error", err) - c.UI.Error("Error encountered during run, refer to logs for more details.") - exitCode = 1 + var processExitError *exec.ProcessExitError + if errors.As(err, &processExitError) { + exitCode = processExitError.ExitCode + } else { + exitCode = 1 + } + + if exitCode != 0 { + c.logger.Error("runtime error encountered", "error", err, "exitCode", exitCode) + c.UI.Error("Error encountered during run, refer to logs for more details.") + } } + c.notifySystemd(systemd.SdNotifyStopping) + return exitCode } @@ -1121,7 +1055,12 @@ func (c *AgentCommand) setBoolFlag(f *FlagSets, configVal bool, fVar *BoolVar) { // Don't do anything as the flag is already set from the command line case flagEnvSet: // Use value from env var - *fVar.Target = flagEnvValue != "" + val, err := parseutil.ParseBool(flagEnvValue) + if err != nil { + c.logger.Error("error parsing bool from environment variable, using default instead", "environment variable", fVar.EnvVar, "provided value", flagEnvValue, "default", fVar.Default, "err", err) + val = fVar.Default + } + *fVar.Target = val case configVal: // Use value from config *fVar.Target = configVal @@ -1162,19 +1101,6 @@ func (c *AgentCommand) removePidFile(pidPath string) error { return os.Remove(pidPath) } -// GetServiceAccountJWT reads the service account jwt from `tokenFile`. Default is -// the default service account file path in kubernetes. -func getServiceAccountJWT(tokenFile string) (string, error) { - if len(tokenFile) == 0 { - tokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" - } - token, err := ioutil.ReadFile(tokenFile) - if err != nil { - return "", err - } - return strings.TrimSpace(string(token)), nil -} - func (c *AgentCommand) handleMetrics() http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { @@ -1200,7 +1126,7 @@ func (c *AgentCommand) handleMetrics() http.Handler { w.Header().Set("Content-Type", resp.Data[logical.HTTPContentType].(string)) switch v := resp.Data[logical.HTTPRawBody].(type) { case string: - w.WriteHeader((status)) + w.WriteHeader(status) w.Write([]byte(v)) case []byte: w.WriteHeader(status) @@ -1231,42 +1157,44 @@ func (c *AgentCommand) handleQuit(enabled bool) http.Handler { } // newLogger creates a logger based on parsed config field on the Agent Command struct. -func (c *AgentCommand) newLogger() (log.InterceptLogger, error) { +func (c *AgentCommand) newLogger() (hclog.InterceptLogger, error) { if c.config == nil { return nil, fmt.Errorf("cannot create logger, no config") } - var errors error + var errs *multierror.Error // Parse all the log related config logLevel, err := logging.ParseLogLevel(c.config.LogLevel) if err != nil { - errors = multierror.Append(errors, err) + errs = multierror.Append(errs, err) } logFormat, err := logging.ParseLogFormat(c.config.LogFormat) if err != nil { - errors = multierror.Append(errors, err) + errs = multierror.Append(errs, err) } logRotateDuration, err := parseutil.ParseDurationSecond(c.config.LogRotateDuration) if err != nil { - errors = multierror.Append(errors, err) + errs = multierror.Append(errs, err) } - if errors != nil { - return nil, errors + if errs != nil { + return nil, errs } - logCfg := &logging.LogConfig{ - Name: "agent", - LogLevel: logLevel, - LogFormat: logFormat, - LogFilePath: c.config.LogFile, - LogRotateDuration: logRotateDuration, - LogRotateBytes: c.config.LogRotateBytes, - LogRotateMaxFiles: c.config.LogRotateMaxFiles, + logCfg, err := logging.NewLogConfig(nameAgent) + if err != nil { + return nil, err } + logCfg.Name = nameAgent + logCfg.LogLevel = logLevel + logCfg.LogFormat = logFormat + logCfg.LogFilePath = c.config.LogFile + logCfg.LogRotateDuration = logRotateDuration + logCfg.LogRotateBytes = c.config.LogRotateBytes + logCfg.LogRotateMaxFiles = c.config.LogRotateMaxFiles l, err := logging.Setup(logCfg, c.logWriter) if err != nil { @@ -1278,20 +1206,20 @@ func (c *AgentCommand) newLogger() (log.InterceptLogger, error) { // loadConfig attempts to generate an Agent config from the file(s) specified. func (c *AgentCommand) loadConfig(paths []string) (*agentConfig.Config, error) { - var errors error + var errs *multierror.Error cfg := agentConfig.NewConfig() for _, configPath := range paths { configFromPath, err := agentConfig.LoadConfig(configPath) if err != nil { - errors = multierror.Append(errors, fmt.Errorf("error loading configuration from %s: %w", configPath, err)) + errs = multierror.Append(errs, fmt.Errorf("error loading configuration from %s: %w", configPath, err)) } else { cfg = cfg.Merge(configFromPath) } } - if errors != nil { - return nil, errors + if errs != nil { + return nil, errs } if err := cfg.ValidateConfig(); err != nil { diff --git a/command/agent/README.md b/command/agent/README.md index 02ef02159f01..e46109810ef6 100644 --- a/command/agent/README.md +++ b/command/agent/README.md @@ -12,4 +12,4 @@ addressing the following challenges: See the usage documentation on the Vault website here: -- https://www.vaultproject.io/docs/agent/ +- https://developer.hashicorp.com/vault/docs/agent-and-proxy/agent diff --git a/command/agent/agent_auto_auth_self_heal_test.go b/command/agent/agent_auto_auth_self_heal_test.go new file mode 100644 index 000000000000..2eeb6d19d933 --- /dev/null +++ b/command/agent/agent_auto_auth_self_heal_test.go @@ -0,0 +1,413 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package agent + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "testing" + "time" + + ctconfig "github.com/hashicorp/consul-template/config" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + agentConfig "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/command/agent/template" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + tokenfile "github.com/hashicorp/vault/command/agentproxyshared/auth/token-file" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/helper/testhelpers/minimal" + "github.com/hashicorp/vault/sdk/helper/pointerutil" + "github.com/stretchr/testify/require" +) + +// TestAutoAuthSelfHealing_TokenFileAuth_SinkOutput tests that +// if the token is revoked, Auto Auth is re-triggered and a valid new token +// is written to a sink, and the template is correctly rendered with the new token +func TestAutoAuthSelfHealing_TokenFileAuth_SinkOutput(t *testing.T) { + // Unset the environment variable so that agent picks up the right test cluster address + t.Setenv(api.EnvVaultAddress, "") + + cluster := minimal.NewTestSoloCluster(t, nil) + logger := corehelpers.NewTestLogger(t) + serverClient := cluster.Cores[0].Client + + // Create token + secret, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{}) + require.NoError(t, err) + require.NotNil(t, secret) + require.NotNil(t, secret.Auth) + require.NotEmpty(t, secret.Auth.ClientToken) + token := secret.Auth.ClientToken + + // Write token to the auto-auth token file + pathVaultToken := makeTempFile(t, "token-file", token) + + // Give us some leeway of 3 errors 1 from each of: auth handler, sink server template server. + errCh := make(chan error, 3) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + + // Create auth handler + am, err := tokenfile.NewTokenFileAuthMethod(&auth.AuthConfig{ + Logger: logger.Named("auth.method"), + Config: map[string]interface{}{ + "token_file_path": pathVaultToken, + }, + }) + require.NoError(t, err) + + // Create sink file + pathSinkFile := makeTempFile(t, "sink-file", "") + require.NoError(t, err) + + ahConfig := &auth.AuthHandlerConfig{ + Logger: logger.Named("auth.handler"), + Client: serverClient, + EnableExecTokenCh: true, + EnableTemplateTokenCh: true, + EnableReauthOnNewCredentials: true, + ExitOnError: false, + } + ah := auth.NewAuthHandler(ahConfig) + go func() { + errCh <- ah.Run(ctx, am) + }() + + config := &sink.SinkConfig{ + Logger: logger.Named("sink.file"), + Config: map[string]interface{}{ + "path": pathSinkFile, + }, + } + fs, err := file.NewFileSink(config) + require.NoError(t, err) + config.Sink = fs + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: logger.Named("sink.server"), + Client: serverClient, + }) + go func() { + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}, ah.AuthInProgress) + }() + + // Create template server + sc := &template.ServerConfig{ + Logger: logger.Named("template.server"), + AgentConfig: &agentConfig.Config{ + Vault: &agentConfig.Vault{ + Address: serverClient.Address(), + TLSSkipVerify: true, + }, + TemplateConfig: &agentConfig.TemplateConfig{ + StaticSecretRenderInt: 1 * time.Second, + }, + AutoAuth: &agentConfig.AutoAuth{ + Sinks: []*agentConfig.Sink{ + { + Type: "file", + Config: map[string]interface{}{ + "path": pathSinkFile, + }, + }, + }, + }, + ExitAfterAuth: false, + }, + LogLevel: hclog.Trace, + LogWriter: hclog.DefaultOutput, + ExitAfterAuth: false, + } + + pathTemplateOutput := makeTempFile(t, "template-output", "") + require.NoError(t, err) + templateTest := &ctconfig.TemplateConfig{ + Contents: pointerutil.StringPtr(`{{ with secret "auth/token/lookup-self" }}{{ .Data.id }}{{ end }}`), + Destination: pointerutil.StringPtr(pathTemplateOutput), + } + templatesToRender := []*ctconfig.TemplateConfig{templateTest} + + server := template.NewServer(sc) + go func() { + errCh <- server.Run(ctx, ah.TemplateTokenCh, templatesToRender, ah.AuthInProgress, ah.InvalidToken) + }() + + // Send token to template channel, and wait for the template to render + ah.TemplateTokenCh <- token + err = waitForFileContent(t, pathTemplateOutput, token) + + // Revoke Token + err = serverClient.Auth().Token().RevokeOrphan(token) + require.NoError(t, err) + + // Create new token + tokenSecret, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{}) + require.NoError(t, err) + require.NotNil(t, tokenSecret) + require.NotNil(t, tokenSecret.Auth) + require.NotEmpty(t, tokenSecret.Auth.ClientToken) + newToken := tokenSecret.Auth.ClientToken + + // Write token to file + err = os.WriteFile(pathVaultToken, []byte(newToken), 0o600) + require.NoError(t, err) + + // Wait for auto-auth to complete and verify token has been written to the sink + // and the template has been re-rendered + err = waitForFileContent(t, pathSinkFile, newToken) + require.NoError(t, err) + + err = waitForFileContent(t, pathTemplateOutput, newToken) + require.NoError(t, err) + + // Calling cancel will stop the 'Run' funcs we started in Goroutines, we should + // then check that there were no errors in our channel. + cancel() + wrapUpTimeout := 5 * time.Second + for { + select { + case <-time.After(wrapUpTimeout): + t.Fatal("test timed out") + case err := <-errCh: + require.NoError(t, err) + case <-ctx.Done(): + // We can finish the test ourselves + return + } + } +} + +// Test_NoAutoAuthSelfHealing_BadPolicy tests that auto auth +// is not re-triggered if a token with incorrect policy access +// is used to render a template +func Test_NoAutoAuthSelfHealing_BadPolicy(t *testing.T) { + // Unset the environment variable so that agent picks up the right test cluster address + t.Setenv(api.EnvVaultAddress, "") + + policyName := "kv-access" + + cluster := minimal.NewTestSoloCluster(t, nil) + logger := corehelpers.NewTestLogger(t) + serverClient := cluster.Cores[0].Client + + // Write a policy with correct access to the secrets + err := serverClient.Sys().PutPolicy(policyName, ` +path "/kv/*" { + capabilities = ["create", "read", "update", "delete", "list"] +} +path "/secret/*" { + capabilities = ["create", "read", "update", "delete", "list"] +}`) + require.NoError(t, err) + + // Create a token without enough policy access to the kv secrets + secret, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{"default"}, + }) + require.NoError(t, err) + require.NotNil(t, secret) + require.NotNil(t, secret.Auth) + require.NotEmpty(t, secret.Auth.ClientToken) + require.Len(t, secret.Auth.Policies, 1) + require.Contains(t, secret.Auth.Policies, "default") + token := secret.Auth.ClientToken + + // Write token to vault-token file + pathVaultToken := makeTempFile(t, "vault-token", token) + + // Give us some leeway of 3 errors 1 from each of: auth handler, sink server template server. + errCh := make(chan error, 3) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + + // Create auth handler + am, err := tokenfile.NewTokenFileAuthMethod(&auth.AuthConfig{ + Logger: logger.Named("auth.method"), + Config: map[string]interface{}{ + "token_file_path": pathVaultToken, + }, + }) + require.NoError(t, err) + + ahConfig := &auth.AuthHandlerConfig{ + Logger: logger.Named("auth.handler"), + Client: serverClient, + EnableExecTokenCh: true, + EnableReauthOnNewCredentials: true, + ExitOnError: false, + } + ah := auth.NewAuthHandler(ahConfig) + go func() { + errCh <- ah.Run(ctx, am) + }() + + // Create sink file + pathSinkFile := makeTempFile(t, "sink-file", "") + + config := &sink.SinkConfig{ + Logger: logger.Named("sink.file"), + Config: map[string]interface{}{ + "path": pathSinkFile, + }, + } + fs, err := file.NewFileSink(config) + require.NoError(t, err) + config.Sink = fs + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: logger.Named("sink.server"), + Client: serverClient, + }) + go func() { + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}, ah.AuthInProgress) + }() + + // Create template server + sc := template.ServerConfig{ + Logger: logger.Named("template.server"), + AgentConfig: &agentConfig.Config{ + Vault: &agentConfig.Vault{ + Address: serverClient.Address(), + TLSSkipVerify: true, + }, + TemplateConfig: &agentConfig.TemplateConfig{ + StaticSecretRenderInt: 1 * time.Second, + }, + // Need to create at least one sink output so that it does not exit after rendering + AutoAuth: &agentConfig.AutoAuth{ + Sinks: []*agentConfig.Sink{ + { + Type: "file", + Config: map[string]interface{}{ + "path": pathSinkFile, + }, + }, + }, + }, + ExitAfterAuth: false, + }, + LogLevel: hclog.Trace, + LogWriter: hclog.DefaultOutput, + ExitAfterAuth: false, + } + + pathTemplateDestination := makeTempFile(t, "kv-data", "") + templateTest := &ctconfig.TemplateConfig{ + Contents: pointerutil.StringPtr(`"{{ with secret "secret/data/otherapp" }}{{ .Data.data.username }}{{ end }}"`), + Destination: pointerutil.StringPtr(pathTemplateDestination), + } + templatesToRender := []*ctconfig.TemplateConfig{templateTest} + + server := template.NewServer(&sc) + go func() { + errCh <- server.Run(ctx, ah.TemplateTokenCh, templatesToRender, ah.AuthInProgress, ah.InvalidToken) + }() + + // Send token to the template channel + ah.TemplateTokenCh <- token + + // Create new token with the correct policy access + tokenSecret, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{policyName}, + }) + require.NoError(t, err) + require.NotNil(t, tokenSecret) + require.NotNil(t, tokenSecret.Auth) + require.NotEmpty(t, tokenSecret.Auth.ClientToken) + require.Len(t, tokenSecret.Auth.Policies, 2) + require.Contains(t, tokenSecret.Auth.Policies, "default") + require.Contains(t, tokenSecret.Auth.Policies, policyName) + newToken := tokenSecret.Auth.ClientToken + + // Write new token to token file (where Agent would re-auto-auth from if + // it were triggered) + err = os.WriteFile(pathVaultToken, []byte(newToken), 0o600) + require.NoError(t, err) + + // Wait for any potential *incorrect* re-triggers of auto auth + time.Sleep(time.Second * 3) + + // Auto auth should not have been re-triggered because of just a permission denied error + // Verify that the new token has NOT been written to the token sink + tokenInSink, err := os.ReadFile(pathSinkFile) + require.NoError(t, err) + require.Equal(t, token, string(tokenInSink)) + + // Validate that the template still hasn't been rendered. + templateContent, err := os.ReadFile(pathTemplateDestination) + require.NoError(t, err) + require.Equal(t, "", string(templateContent)) + + cancel() + wrapUpTimeout := 5 * time.Second + for { + select { + case <-time.After(wrapUpTimeout): + t.Fatal("test timed out") + case err := <-errCh: + require.NoError(t, err) + case <-ctx.Done(): + // We can finish the test ourselves + return + } + } +} + +// waitForFileContent waits for the file at filePath to exist and contain fileContent +// or it will return in an error. Waits for five seconds, with 100ms intervals. +// Returns nil if content became the same, or non-nil if it didn't. +func waitForFileContent(t *testing.T, filePath, expectedContent string) error { + t.Helper() + + var err error + tick := time.Tick(100 * time.Millisecond) + timeout := time.After(5 * time.Second) + // We need to wait for the files to be updated... + for { + select { + case <-timeout: + return fmt.Errorf("timed out waiting for file content, last error: %w", err) + case <-tick: + } + + content, err := os.ReadFile(filePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + continue + } + return err + } + + stringContent := string(content) + if stringContent != expectedContent { + err = fmt.Errorf("content not yet the same, expectedContent=%s, content=%s", expectedContent, stringContent) + continue + } + + return nil + } +} + +// makeTempFile creates a temp file with the specified name, populates it with the +// supplied contents and closes it. The path to the file is returned, also the file +// will be automatically removed when the test which created it, finishes. +func makeTempFile(t *testing.T, name, contents string) string { + t.Helper() + + f, err := os.Create(filepath.Join(t.TempDir(), name)) + require.NoError(t, err) + path := f.Name() + + _, err = f.WriteString(contents) + require.NoError(t, err) + + err = f.Close() + require.NoError(t, err) + + return path +} diff --git a/command/agent/alicloud_end_to_end_test.go b/command/agent/alicloud_end_to_end_test.go index 969b066335d0..220d98130f3f 100644 --- a/command/agent/alicloud_end_to_end_test.go +++ b/command/agent/alicloud_end_to_end_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package agent @@ -15,17 +15,15 @@ import ( "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers" "github.com/aliyun/alibaba-cloud-sdk-go/services/sts" - hclog "github.com/hashicorp/go-hclog" - uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/go-uuid" vaultalicloud "github.com/hashicorp/vault-plugin-auth-alicloud" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" - agentalicloud "github.com/hashicorp/vault/command/agent/auth/alicloud" - "github.com/hashicorp/vault/command/agent/sink" - "github.com/hashicorp/vault/command/agent/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentalicloud "github.com/hashicorp/vault/command/agentproxyshared/auth/alicloud" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" "github.com/hashicorp/vault/helper/testhelpers" vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" ) @@ -49,9 +47,7 @@ func TestAliCloudEndToEnd(t *testing.T) { } testhelpers.SkipUnlessEnvVarsSet(t, credNames) - logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ - Logger: logger, CredentialBackends: map[string]logical.Factory{ "alicloud": vaultalicloud.Factory, }, @@ -91,7 +87,7 @@ func TestAliCloudEndToEnd(t *testing.T) { }() am, err := agentalicloud.NewAliCloudAuthMethod(&auth.AuthConfig{ - Logger: logger.Named("auth.alicloud"), + Logger: cluster.Logger.Named("auth.alicloud"), MountPath: "auth/alicloud", Config: map[string]interface{}{ "role": "test", @@ -104,7 +100,7 @@ func TestAliCloudEndToEnd(t *testing.T) { } ahConfig := &auth.AuthHandlerConfig{ - Logger: logger.Named("auth.handler"), + Logger: cluster.Logger.Named("auth.handler"), Client: client, } @@ -133,7 +129,7 @@ func TestAliCloudEndToEnd(t *testing.T) { t.Logf("output: %s", tokenSinkFileName) config := &sink.SinkConfig{ - Logger: logger.Named("sink.file"), + Logger: cluster.Logger.Named("sink.file"), Config: map[string]interface{}{ "path": tokenSinkFileName, }, @@ -147,11 +143,11 @@ func TestAliCloudEndToEnd(t *testing.T) { config.Sink = fs ss := sink.NewSinkServer(&sink.SinkServerConfig{ - Logger: logger.Named("sink.server"), + Logger: cluster.Logger.Named("sink.server"), Client: client, }) go func() { - errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}, ah.AuthInProgress) }() defer func() { select { diff --git a/command/agent/approle_end_to_end_test.go b/command/agent/approle_end_to_end_test.go index a0e51f0bb9b0..f77feec18c7c 100644 --- a/command/agent/approle_end_to_end_test.go +++ b/command/agent/approle_end_to_end_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package agent @@ -16,10 +16,10 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" - "github.com/hashicorp/vault/command/agent/auth" - agentapprole "github.com/hashicorp/vault/command/agent/auth/approle" - "github.com/hashicorp/vault/command/agent/sink" - "github.com/hashicorp/vault/command/agent/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentapprole "github.com/hashicorp/vault/command/agentproxyshared/auth/approle" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" @@ -72,7 +72,6 @@ func testAppRoleEndToEnd(t *testing.T, removeSecretIDFile bool, bindSecretID boo coreConfig := &vault.CoreConfig{ DisableMlock: true, DisableCache: true, - Logger: log.NewNullLogger(), CredentialBackends: map[string]logical.Factory{ "approle": credAppRole.Factory, }, @@ -257,7 +256,7 @@ func testAppRoleEndToEnd(t *testing.T, removeSecretIDFile bool, bindSecretID boo }) go func() { - errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}, ah.AuthInProgress) }() defer func() { select { @@ -411,9 +410,6 @@ func TestAppRoleLongRoleName(t *testing.T) { approleName := strings.Repeat("a", 5000) coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: log.NewNullLogger(), CredentialBackends: map[string]logical.Factory{ "approle": credAppRole.Factory, }, @@ -477,9 +473,6 @@ func testAppRoleWithWrapping(t *testing.T, bindSecretID bool, secretIDLess bool, var err error logger := logging.NewVaultLogger(log.Trace) coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: log.NewNullLogger(), CredentialBackends: map[string]logical.Factory{ "approle": credAppRole.Factory, }, @@ -646,7 +639,7 @@ func testAppRoleWithWrapping(t *testing.T, bindSecretID bool, secretIDLess bool, Client: client, }) go func() { - errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}, ah.AuthInProgress) }() defer func() { diff --git a/command/agent/auth/auth.go b/command/agent/auth/auth.go deleted file mode 100644 index 80bc5bbbb86b..000000000000 --- a/command/agent/auth/auth.go +++ /dev/null @@ -1,518 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package auth - -import ( - "context" - "encoding/json" - "errors" - "math/rand" - "net/http" - "time" - - "github.com/armon/go-metrics" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/helper/jsonutil" -) - -const ( - defaultMinBackoff = 1 * time.Second - defaultMaxBackoff = 5 * time.Minute -) - -// AuthMethod is the interface that auto-auth methods implement for the agent -// to use. -type AuthMethod interface { - // Authenticate returns a mount path, header, request body, and error. - // The header may be nil if no special header is needed. - Authenticate(context.Context, *api.Client) (string, http.Header, map[string]interface{}, error) - NewCreds() chan struct{} - CredSuccess() - Shutdown() -} - -// AuthMethodWithClient is an extended interface that can return an API client -// for use during the authentication call. -type AuthMethodWithClient interface { - AuthMethod - AuthClient(client *api.Client) (*api.Client, error) -} - -type AuthConfig struct { - Logger hclog.Logger - MountPath string - WrapTTL time.Duration - Config map[string]interface{} -} - -// AuthHandler is responsible for keeping a token alive and renewed and passing -// new tokens to the sink server -type AuthHandler struct { - OutputCh chan string - TemplateTokenCh chan string - token string - logger hclog.Logger - client *api.Client - random *rand.Rand - wrapTTL time.Duration - maxBackoff time.Duration - minBackoff time.Duration - enableReauthOnNewCredentials bool - enableTemplateTokenCh bool - exitOnError bool -} - -type AuthHandlerConfig struct { - Logger hclog.Logger - Client *api.Client - WrapTTL time.Duration - MaxBackoff time.Duration - MinBackoff time.Duration - Token string - EnableReauthOnNewCredentials bool - EnableTemplateTokenCh bool - ExitOnError bool -} - -func NewAuthHandler(conf *AuthHandlerConfig) *AuthHandler { - ah := &AuthHandler{ - // This is buffered so that if we try to output after the sink server - // has been shut down, during agent shutdown, we won't block - OutputCh: make(chan string, 1), - TemplateTokenCh: make(chan string, 1), - token: conf.Token, - logger: conf.Logger, - client: conf.Client, - random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), - wrapTTL: conf.WrapTTL, - minBackoff: conf.MinBackoff, - maxBackoff: conf.MaxBackoff, - enableReauthOnNewCredentials: conf.EnableReauthOnNewCredentials, - enableTemplateTokenCh: conf.EnableTemplateTokenCh, - exitOnError: conf.ExitOnError, - } - - return ah -} - -func backoff(ctx context.Context, backoff *agentBackoff) bool { - if backoff.exitOnErr { - return false - } - - select { - case <-time.After(backoff.current): - case <-ctx.Done(): - } - - // Increase exponential backoff for the next time if we don't - // successfully auth/renew/etc. - backoff.next() - return true -} - -func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error { - if am == nil { - return errors.New("auth handler: nil auth method") - } - - if ah.minBackoff <= 0 { - ah.minBackoff = defaultMinBackoff - } - - backoffCfg := newAgentBackoff(ah.minBackoff, ah.maxBackoff, ah.exitOnError) - - if backoffCfg.min >= backoffCfg.max { - return errors.New("auth handler: min_backoff cannot be greater than max_backoff") - } - - ah.logger.Info("starting auth handler") - defer func() { - am.Shutdown() - close(ah.OutputCh) - close(ah.TemplateTokenCh) - ah.logger.Info("auth handler stopped") - }() - - credCh := am.NewCreds() - if !ah.enableReauthOnNewCredentials { - realCredCh := credCh - credCh = nil - if realCredCh != nil { - go func() { - for { - select { - case <-ctx.Done(): - return - case <-realCredCh: - } - } - }() - } - } - if credCh == nil { - credCh = make(chan struct{}) - } - - var watcher *api.LifetimeWatcher - first := true - - for { - select { - case <-ctx.Done(): - return nil - - default: - } - - var clientToUse *api.Client - var err error - var path string - var data map[string]interface{} - var header http.Header - var isTokenFileMethod bool - - switch am.(type) { - case AuthMethodWithClient: - clientToUse, err = am.(AuthMethodWithClient).AuthClient(ah.client) - if err != nil { - ah.logger.Error("error creating client for authentication call", "error", err, "backoff", backoff) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - - return err - } - default: - clientToUse = ah.client - } - - // Disable retry on the client to ensure our backoffOrQuit function is - // the only source of retry/backoff. - clientToUse.SetMaxRetries(0) - - var secret *api.Secret = new(api.Secret) - if first && ah.token != "" { - ah.logger.Debug("using preloaded token") - - first = false - ah.logger.Debug("lookup-self with preloaded token") - clientToUse.SetToken(ah.token) - - secret, err = clientToUse.Auth().Token().LookupSelfWithContext(ctx) - if err != nil { - ah.logger.Error("could not look up token", "err", err, "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - - duration, _ := secret.Data["ttl"].(json.Number).Int64() - secret.Auth = &api.SecretAuth{ - ClientToken: secret.Data["id"].(string), - LeaseDuration: int(duration), - Renewable: secret.Data["renewable"].(bool), - } - } else { - ah.logger.Info("authenticating") - - path, header, data, err = am.Authenticate(ctx, ah.client) - if err != nil { - ah.logger.Error("error getting path or data from method", "error", err, "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - } - - if ah.wrapTTL > 0 { - wrapClient, err := clientToUse.Clone() - if err != nil { - ah.logger.Error("error creating client for wrapped call", "error", err, "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - wrapClient.SetWrappingLookupFunc(func(string, string) string { - return ah.wrapTTL.String() - }) - clientToUse = wrapClient - } - for key, values := range header { - for _, value := range values { - clientToUse.AddHeader(key, value) - } - } - - // This should only happen if there's no preloaded token (regular auto-auth login) - // or if a preloaded token has expired and is now switching to auto-auth. - if secret.Auth == nil { - isTokenFileMethod = path == "auth/token/lookup-self" - if isTokenFileMethod { - token, _ := data["token"].(string) - lookupSelfClient, err := clientToUse.Clone() - if err != nil { - ah.logger.Error("failed to clone client to perform token lookup") - return err - } - lookupSelfClient.SetToken(token) - secret, err = lookupSelfClient.Auth().Token().LookupSelf() - } else { - secret, err = clientToUse.Logical().WriteWithContext(ctx, path, data) - } - - // Check errors/sanity - if err != nil { - ah.logger.Error("error authenticating", "error", err, "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - } - - var leaseDuration int - - switch { - case ah.wrapTTL > 0: - if secret.WrapInfo == nil { - ah.logger.Error("authentication returned nil wrap info", "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - if secret.WrapInfo.Token == "" { - ah.logger.Error("authentication returned empty wrapped client token", "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - wrappedResp, err := jsonutil.EncodeJSON(secret.WrapInfo) - if err != nil { - ah.logger.Error("failed to encode wrapinfo", "error", err, "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - ah.logger.Info("authentication successful, sending wrapped token to sinks and pausing") - ah.OutputCh <- string(wrappedResp) - if ah.enableTemplateTokenCh { - ah.TemplateTokenCh <- string(wrappedResp) - } - - am.CredSuccess() - backoffCfg.reset() - - select { - case <-ctx.Done(): - ah.logger.Info("shutdown triggered") - continue - - case <-credCh: - ah.logger.Info("auth method found new credentials, re-authenticating") - continue - } - - default: - // We handle the token_file method specially, as it's the only - // auth method that isn't actually authenticating, i.e. the secret - // returned does not have an Auth struct attached - isTokenFileMethod := path == "auth/token/lookup-self" - if isTokenFileMethod { - // We still check the response of the request to ensure the token is valid - // i.e. if the token is invalid, we will fail in the authentication step - if secret == nil || secret.Data == nil { - ah.logger.Error("token file validation failed, token may be invalid", "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - token, ok := secret.Data["id"].(string) - if !ok || token == "" { - ah.logger.Error("token file validation returned empty client token", "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - - duration, _ := secret.Data["ttl"].(json.Number).Int64() - leaseDuration = int(duration) - renewable, _ := secret.Data["renewable"].(bool) - secret.Auth = &api.SecretAuth{ - ClientToken: token, - LeaseDuration: int(duration), - Renewable: renewable, - } - ah.logger.Info("authentication successful, sending token to sinks") - ah.OutputCh <- token - if ah.enableTemplateTokenCh { - ah.TemplateTokenCh <- token - } - - tokenType := secret.Data["type"].(string) - if tokenType == "batch" { - ah.logger.Info("note that this token type is batch, and batch tokens cannot be renewed", "ttl", leaseDuration) - } - } else { - if secret == nil || secret.Auth == nil { - ah.logger.Error("authentication returned nil auth info", "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - if secret.Auth.ClientToken == "" { - ah.logger.Error("authentication returned empty client token", "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - - leaseDuration = secret.LeaseDuration - ah.logger.Info("authentication successful, sending token to sinks") - ah.OutputCh <- secret.Auth.ClientToken - if ah.enableTemplateTokenCh { - ah.TemplateTokenCh <- secret.Auth.ClientToken - } - } - - am.CredSuccess() - backoffCfg.reset() - } - - if watcher != nil { - watcher.Stop() - } - - watcher, err = clientToUse.NewLifetimeWatcher(&api.LifetimeWatcherInput{ - Secret: secret, - }) - if err != nil { - ah.logger.Error("error creating lifetime watcher", "error", err, "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - - metrics.IncrCounter([]string{"agent", "auth", "success"}, 1) - // We don't want to trigger the renewal process for tokens with - // unlimited TTL, such as the root token. - if leaseDuration == 0 && isTokenFileMethod { - ah.logger.Info("not starting token renewal process, as token has unlimited TTL") - } else { - ah.logger.Info("starting renewal process") - go watcher.Renew() - } - - LifetimeWatcherLoop: - for { - select { - case <-ctx.Done(): - ah.logger.Info("shutdown triggered, stopping lifetime watcher") - watcher.Stop() - break LifetimeWatcherLoop - - case err := <-watcher.DoneCh(): - ah.logger.Info("lifetime watcher done channel triggered") - if err != nil { - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - ah.logger.Error("error renewing token", "error", err) - } - break LifetimeWatcherLoop - - case <-watcher.RenewCh(): - metrics.IncrCounter([]string{"agent", "auth", "success"}, 1) - ah.logger.Info("renewed auth token") - - case <-credCh: - ah.logger.Info("auth method found new credentials, re-authenticating") - break LifetimeWatcherLoop - } - } - } -} - -// agentBackoff tracks exponential backoff state. -type agentBackoff struct { - min time.Duration - max time.Duration - current time.Duration - exitOnErr bool -} - -func newAgentBackoff(min, max time.Duration, exitErr bool) *agentBackoff { - if max <= 0 { - max = defaultMaxBackoff - } - - if min <= 0 { - min = defaultMinBackoff - } - - return &agentBackoff{ - current: min, - max: max, - min: min, - exitOnErr: exitErr, - } -} - -// next determines the next backoff duration that is roughly twice -// the current value, capped to a max value, with a measure of randomness. -func (b *agentBackoff) next() { - maxBackoff := 2 * b.current - - if maxBackoff > b.max { - maxBackoff = b.max - } - - // Trim a random amount (0-25%) off the doubled duration - trim := rand.Int63n(int64(maxBackoff) / 4) - b.current = maxBackoff - time.Duration(trim) -} - -func (b *agentBackoff) reset() { - b.current = b.min -} - -func (b agentBackoff) String() string { - return b.current.Truncate(10 * time.Millisecond).String() -} diff --git a/command/agent/auth/auth_test.go b/command/agent/auth/auth_test.go deleted file mode 100644 index 4425416759dc..000000000000 --- a/command/agent/auth/auth_test.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package auth - -import ( - "context" - "net/http" - "testing" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/builtin/credential/userpass" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" -) - -type userpassTestMethod struct{} - -func newUserpassTestMethod(t *testing.T, client *api.Client) AuthMethod { - err := client.Sys().EnableAuthWithOptions("userpass", &api.EnableAuthOptions{ - Type: "userpass", - Config: api.AuthConfigInput{ - DefaultLeaseTTL: "1s", - MaxLeaseTTL: "3s", - }, - }) - if err != nil { - t.Fatal(err) - } - - return &userpassTestMethod{} -} - -func (u *userpassTestMethod) Authenticate(_ context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) { - _, err := client.Logical().Write("auth/userpass/users/foo", map[string]interface{}{ - "password": "bar", - }) - if err != nil { - return "", nil, nil, err - } - return "auth/userpass/login/foo", nil, map[string]interface{}{ - "password": "bar", - }, nil -} - -func (u *userpassTestMethod) NewCreds() chan struct{} { - return nil -} - -func (u *userpassTestMethod) CredSuccess() { -} - -func (u *userpassTestMethod) Shutdown() { -} - -func TestAuthHandler(t *testing.T) { - logger := logging.NewVaultLogger(hclog.Trace) - coreConfig := &vault.CoreConfig{ - Logger: logger, - CredentialBackends: map[string]logical.Factory{ - "userpass": userpass.Factory, - }, - } - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - vault.TestWaitActive(t, cluster.Cores[0].Core) - client := cluster.Cores[0].Client - - ctx, cancelFunc := context.WithCancel(context.Background()) - - ah := NewAuthHandler(&AuthHandlerConfig{ - Logger: logger.Named("auth.handler"), - Client: client, - }) - - am := newUserpassTestMethod(t, client) - errCh := make(chan error) - go func() { - errCh <- ah.Run(ctx, am) - }() - - // Consume tokens so we don't block - stopTime := time.Now().Add(5 * time.Second) - closed := false -consumption: - for { - select { - case err := <-errCh: - if err != nil { - t.Fatal(err) - } - break consumption - case <-ah.OutputCh: - case <-ah.TemplateTokenCh: - // Nothing - case <-time.After(stopTime.Sub(time.Now())): - if !closed { - cancelFunc() - closed = true - } - } - } -} - -func TestAgentBackoff(t *testing.T) { - max := 1024 * time.Second - backoff := newAgentBackoff(defaultMinBackoff, max, false) - - // Test initial value - if backoff.current != defaultMinBackoff { - t.Fatalf("expected 1s initial backoff, got: %v", backoff.current) - } - - // Test that backoff values are in expected range (75-100% of 2*previous) - for i := 0; i < 9; i++ { - old := backoff.current - backoff.next() - - expMax := 2 * old - expMin := 3 * expMax / 4 - - if backoff.current < expMin || backoff.current > expMax { - t.Fatalf("expected backoff in range %v to %v, got: %v", expMin, expMax, backoff) - } - } - - // Test that backoff is capped - for i := 0; i < 100; i++ { - backoff.next() - if backoff.current > max { - t.Fatalf("backoff exceeded max of 100s: %v", backoff) - } - } - - // Test reset - backoff.reset() - if backoff.current != defaultMinBackoff { - t.Fatalf("expected 1s backoff after reset, got: %v", backoff.current) - } -} - -func TestAgentMinBackoffCustom(t *testing.T) { - type test struct { - minBackoff time.Duration - want time.Duration - } - - tests := []test{ - {minBackoff: 0 * time.Second, want: 1 * time.Second}, - {minBackoff: 1 * time.Second, want: 1 * time.Second}, - {minBackoff: 5 * time.Second, want: 5 * time.Second}, - {minBackoff: 10 * time.Second, want: 10 * time.Second}, - } - - for _, test := range tests { - max := 1024 * time.Second - backoff := newAgentBackoff(test.minBackoff, max, false) - - // Test initial value - if backoff.current != test.want { - t.Fatalf("expected %d initial backoff, got: %v", test.want, backoff.current) - } - - // Test that backoff values are in expected range (75-100% of 2*previous) - for i := 0; i < 5; i++ { - old := backoff.current - backoff.next() - - expMax := 2 * old - expMin := 3 * expMax / 4 - - if backoff.current < expMin || backoff.current > expMax { - t.Fatalf("expected backoff in range %v to %v, got: %v", expMin, expMax, backoff) - } - } - - // Test that backoff is capped - for i := 0; i < 100; i++ { - backoff.next() - if backoff.current > max { - t.Fatalf("backoff exceeded max of 100s: %v", backoff) - } - } - - // Test reset - backoff.reset() - if backoff.current != test.want { - t.Fatalf("expected %d backoff after reset, got: %v", test.want, backoff.current) - } - } -} diff --git a/command/agent/auth/azure/azure.go b/command/agent/auth/azure/azure.go deleted file mode 100644 index d4689f0d555e..000000000000 --- a/command/agent/auth/azure/azure.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package azure - -import ( - "context" - "errors" - "fmt" - "io/ioutil" - "net/http" - - cleanhttp "github.com/hashicorp/go-cleanhttp" - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" - "github.com/hashicorp/vault/helper/useragent" - "github.com/hashicorp/vault/sdk/helper/jsonutil" -) - -const ( - instanceEndpoint = "http://169.254.169.254/metadata/instance" - identityEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" - - // minimum version 2018-02-01 needed for identity metadata - // regional availability: https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service - apiVersion = "2018-02-01" -) - -type azureMethod struct { - logger hclog.Logger - mountPath string - - role string - resource string - objectID string - clientID string -} - -func NewAzureAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { - if conf == nil { - return nil, errors.New("empty config") - } - if conf.Config == nil { - return nil, errors.New("empty config data") - } - - a := &azureMethod{ - logger: conf.Logger, - mountPath: conf.MountPath, - } - - roleRaw, ok := conf.Config["role"] - if !ok { - return nil, errors.New("missing 'role' value") - } - a.role, ok = roleRaw.(string) - if !ok { - return nil, errors.New("could not convert 'role' config value to string") - } - - resourceRaw, ok := conf.Config["resource"] - if !ok { - return nil, errors.New("missing 'resource' value") - } - a.resource, ok = resourceRaw.(string) - if !ok { - return nil, errors.New("could not convert 'resource' config value to string") - } - - objectIDRaw, ok := conf.Config["object_id"] - if ok { - a.objectID, ok = objectIDRaw.(string) - if !ok { - return nil, errors.New("could not convert 'object_id' config value to string") - } - } - - clientIDRaw, ok := conf.Config["client_id"] - if ok { - a.clientID, ok = clientIDRaw.(string) - if !ok { - return nil, errors.New("could not convert 'client_id' config value to string") - } - } - - switch { - case a.role == "": - return nil, errors.New("'role' value is empty") - case a.resource == "": - return nil, errors.New("'resource' value is empty") - case a.objectID != "" && a.clientID != "": - return nil, errors.New("only one of 'object_id' or 'client_id' may be provided") - } - - return a, nil -} - -func (a *azureMethod) Authenticate(ctx context.Context, client *api.Client) (retPath string, header http.Header, retData map[string]interface{}, retErr error) { - a.logger.Trace("beginning authentication") - - // Fetch instance data - var instance struct { - Compute struct { - Name string - ResourceGroupName string - SubscriptionID string - VMScaleSetName string - } - } - - body, err := getMetadataInfo(ctx, instanceEndpoint, "", "", "") - if err != nil { - retErr = err - return - } - - err = jsonutil.DecodeJSON(body, &instance) - if err != nil { - retErr = fmt.Errorf("error parsing instance metadata response: %w", err) - return - } - - // Fetch JWT - var identity struct { - AccessToken string `json:"access_token"` - } - - body, err = getMetadataInfo(ctx, identityEndpoint, a.resource, a.objectID, a.clientID) - if err != nil { - retErr = err - return - } - - err = jsonutil.DecodeJSON(body, &identity) - if err != nil { - retErr = fmt.Errorf("error parsing identity metadata response: %w", err) - return - } - - // Attempt login - data := map[string]interface{}{ - "role": a.role, - "vm_name": instance.Compute.Name, - "vmss_name": instance.Compute.VMScaleSetName, - "resource_group_name": instance.Compute.ResourceGroupName, - "subscription_id": instance.Compute.SubscriptionID, - "jwt": identity.AccessToken, - } - - return fmt.Sprintf("%s/login", a.mountPath), nil, data, nil -} - -func (a *azureMethod) NewCreds() chan struct{} { - return nil -} - -func (a *azureMethod) CredSuccess() { -} - -func (a *azureMethod) Shutdown() { -} - -func getMetadataInfo(ctx context.Context, endpoint, resource, objectID, clientID string) ([]byte, error) { - req, err := http.NewRequest("GET", endpoint, nil) - if err != nil { - return nil, err - } - - q := req.URL.Query() - q.Add("api-version", apiVersion) - if resource != "" { - q.Add("resource", resource) - } - if objectID != "" { - q.Add("object_id", objectID) - } - if clientID != "" { - q.Add("client_id", clientID) - } - req.URL.RawQuery = q.Encode() - req.Header.Set("Metadata", "true") - req.Header.Set("User-Agent", useragent.String()) - req = req.WithContext(ctx) - - client := cleanhttp.DefaultClient() - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("error fetching metadata from %s: %w", endpoint, err) - } - - if resp == nil { - return nil, fmt.Errorf("empty response fetching metadata from %s", endpoint) - } - - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("error reading metadata from %s: %w", endpoint, err) - } - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("error response in metadata from %s: %s", endpoint, body) - } - - return body, nil -} diff --git a/command/agent/auth/cert/cert.go b/command/agent/auth/cert/cert.go deleted file mode 100644 index 96f43de69f2e..000000000000 --- a/command/agent/auth/cert/cert.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cert - -import ( - "context" - "errors" - "fmt" - "net/http" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" - "github.com/hashicorp/vault/sdk/helper/consts" -) - -type certMethod struct { - logger hclog.Logger - mountPath string - name string - - caCert string - clientCert string - clientKey string - - // Client is the cached client to use if cert info was provided. - client *api.Client -} - -var _ auth.AuthMethodWithClient = &certMethod{} - -func NewCertAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { - if conf == nil { - return nil, errors.New("empty config") - } - - // Not concerned if the conf.Config is empty as the 'name' - // parameter is optional when using TLS Auth - - c := &certMethod{ - logger: conf.Logger, - mountPath: conf.MountPath, - } - - if conf.Config != nil { - nameRaw, ok := conf.Config["name"] - if !ok { - nameRaw = "" - } - c.name, ok = nameRaw.(string) - if !ok { - return nil, errors.New("could not convert 'name' config value to string") - } - - caCertRaw, ok := conf.Config["ca_cert"] - if ok { - c.caCert, ok = caCertRaw.(string) - if !ok { - return nil, errors.New("could not convert 'ca_cert' config value to string") - } - } - - clientCertRaw, ok := conf.Config["client_cert"] - if ok { - c.clientCert, ok = clientCertRaw.(string) - if !ok { - return nil, errors.New("could not convert 'cert_file' config value to string") - } - } - - clientKeyRaw, ok := conf.Config["client_key"] - if ok { - c.clientKey, ok = clientKeyRaw.(string) - if !ok { - return nil, errors.New("could not convert 'cert_key' config value to string") - } - } - } - - return c, nil -} - -func (c *certMethod) Authenticate(_ context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) { - c.logger.Trace("beginning authentication") - - authMap := map[string]interface{}{} - - if c.name != "" { - authMap["name"] = c.name - } - - return fmt.Sprintf("%s/login", c.mountPath), nil, authMap, nil -} - -func (c *certMethod) NewCreds() chan struct{} { - return nil -} - -func (c *certMethod) CredSuccess() {} - -func (c *certMethod) Shutdown() {} - -// AuthClient uses the existing client's address and returns a new client with -// the auto-auth method's certificate information if that's provided in its -// config map. -func (c *certMethod) AuthClient(client *api.Client) (*api.Client, error) { - c.logger.Trace("deriving auth client to use") - - clientToAuth := client - - if c.caCert != "" || (c.clientKey != "" && c.clientCert != "") { - // Return cached client if present - if c.client != nil { - return c.client, nil - } - - config := api.DefaultConfig() - if config.Error != nil { - return nil, config.Error - } - config.Address = client.Address() - - t := &api.TLSConfig{ - CACert: c.caCert, - ClientCert: c.clientCert, - ClientKey: c.clientKey, - } - - // Setup TLS config - if err := config.ConfigureTLS(t); err != nil { - return nil, err - } - - var err error - clientToAuth, err = api.NewClient(config) - if err != nil { - return nil, err - } - if ns := client.Headers().Get(consts.NamespaceHeaderName); ns != "" { - clientToAuth.SetNamespace(ns) - } - - // Cache the client for future use - c.client = clientToAuth - } - - return clientToAuth, nil -} diff --git a/command/agent/auth/cert/cert_test.go b/command/agent/auth/cert/cert_test.go deleted file mode 100644 index a5d5e6a5f74d..000000000000 --- a/command/agent/auth/cert/cert_test.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cert - -import ( - "context" - "os" - "path" - "reflect" - "testing" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" -) - -func TestCertAuthMethod_Authenticate(t *testing.T) { - config := &auth.AuthConfig{ - Logger: hclog.NewNullLogger(), - MountPath: "cert-test", - Config: map[string]interface{}{ - "name": "foo", - }, - } - - method, err := NewCertAuthMethod(config) - if err != nil { - t.Fatal(err) - } - - client, err := api.NewClient(nil) - if err != nil { - t.Fatal(err) - } - - loginPath, _, authMap, err := method.Authenticate(context.Background(), client) - if err != nil { - t.Fatal(err) - } - - expectedLoginPath := path.Join(config.MountPath, "/login") - if loginPath != expectedLoginPath { - t.Fatalf("mismatch on login path: got: %s, expected: %s", loginPath, expectedLoginPath) - } - - expectedAuthMap := map[string]interface{}{ - "name": config.Config["name"], - } - if !reflect.DeepEqual(authMap, expectedAuthMap) { - t.Fatalf("mismatch on login path:\ngot:\n\t%v\nexpected:\n\t%v", authMap, expectedAuthMap) - } -} - -func TestCertAuthMethod_AuthClient_withoutCerts(t *testing.T) { - config := &auth.AuthConfig{ - Logger: hclog.NewNullLogger(), - MountPath: "cert-test", - Config: map[string]interface{}{ - "name": "without-certs", - }, - } - - method, err := NewCertAuthMethod(config) - if err != nil { - t.Fatal(err) - } - - client, err := api.NewClient(api.DefaultConfig()) - if err != nil { - t.Fatal(err) - } - - clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client) - if err != nil { - t.Fatal(err) - } - - if client != clientToUse { - t.Fatal("error: expected AuthClient to return back original client") - } -} - -func TestCertAuthMethod_AuthClient_withCerts(t *testing.T) { - clientCert, err := os.Open("./test-fixtures/keys/cert.pem") - if err != nil { - t.Fatal(err) - } - defer clientCert.Close() - - clientKey, err := os.Open("./test-fixtures/keys/key.pem") - if err != nil { - t.Fatal(err) - } - defer clientKey.Close() - - config := &auth.AuthConfig{ - Logger: hclog.NewNullLogger(), - MountPath: "cert-test", - Config: map[string]interface{}{ - "name": "with-certs", - "client_cert": clientCert.Name(), - "client_key": clientKey.Name(), - }, - } - - method, err := NewCertAuthMethod(config) - if err != nil { - t.Fatal(err) - } - - client, err := api.NewClient(nil) - if err != nil { - t.Fatal(err) - } - - clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client) - if err != nil { - t.Fatal(err) - } - - if client == clientToUse { - t.Fatal("expected client from AuthClient to be different from original client") - } - - // Call AuthClient again to get back the cached client - cachedClient, err := method.(auth.AuthMethodWithClient).AuthClient(client) - if err != nil { - t.Fatal(err) - } - - if cachedClient != clientToUse { - t.Fatal("expected client from AuthClient to return back a cached client") - } -} diff --git a/command/agent/auto_auth_preload_token_end_to_end_test.go b/command/agent/auto_auth_preload_token_end_to_end_test.go index 2ad81d3e6408..e0d78589b512 100644 --- a/command/agent/auto_auth_preload_token_end_to_end_test.go +++ b/command/agent/auto_auth_preload_token_end_to_end_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package agent @@ -10,13 +10,13 @@ import ( "testing" "time" - hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" - "github.com/hashicorp/vault/command/agent/auth" - agentAppRole "github.com/hashicorp/vault/command/agent/auth/approle" - "github.com/hashicorp/vault/command/agent/sink" - "github.com/hashicorp/vault/command/agent/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentAppRole "github.com/hashicorp/vault/command/agentproxyshared/auth/approle" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" @@ -26,7 +26,6 @@ import ( func TestTokenPreload_UsingAutoAuth(t *testing.T) { logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ - Logger: logger, LogicalBackends: map[string]logical.Factory{ "kv": vault.LeasedPassthroughBackendFactory, }, @@ -185,7 +184,7 @@ func TestTokenPreload_UsingAutoAuth(t *testing.T) { }() go func() { - errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}, ah.AuthInProgress) }() defer func() { select { diff --git a/command/agent/aws_end_to_end_test.go b/command/agent/aws_end_to_end_test.go index 5b23461fa4ba..231d06938471 100644 --- a/command/agent/aws_end_to_end_test.go +++ b/command/agent/aws_end_to_end_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package agent @@ -14,14 +14,14 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/sts" - hclog "github.com/hashicorp/go-hclog" - uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/api" vaultaws "github.com/hashicorp/vault/builtin/credential/aws" - "github.com/hashicorp/vault/command/agent/auth" - agentaws "github.com/hashicorp/vault/command/agent/auth/aws" - "github.com/hashicorp/vault/command/agent/sink" - "github.com/hashicorp/vault/command/agent/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentaws "github.com/hashicorp/vault/command/agentproxyshared/auth/aws" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" "github.com/hashicorp/vault/helper/testhelpers" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/logging" @@ -61,7 +61,6 @@ func TestAWSEndToEnd(t *testing.T) { logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ - Logger: logger, CredentialBackends: map[string]logical.Factory{ "aws": vaultaws.Factory, }, @@ -164,7 +163,7 @@ func TestAWSEndToEnd(t *testing.T) { Client: client, }) go func() { - errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}, ah.AuthInProgress) }() defer func() { select { diff --git a/command/agent/cache/api_proxy.go b/command/agent/cache/api_proxy.go deleted file mode 100644 index 4581117a0645..000000000000 --- a/command/agent/cache/api_proxy.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cache - -import ( - "context" - "fmt" - "sync" - - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-retryablehttp" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/http" -) - -type EnforceConsistency int - -const ( - EnforceConsistencyNever EnforceConsistency = iota - EnforceConsistencyAlways -) - -type WhenInconsistentAction int - -const ( - WhenInconsistentFail WhenInconsistentAction = iota - WhenInconsistentRetry - WhenInconsistentForward -) - -// APIProxy is an implementation of the proxier interface that is used to -// forward the request to Vault and get the response. -type APIProxy struct { - client *api.Client - logger hclog.Logger - enforceConsistency EnforceConsistency - whenInconsistentAction WhenInconsistentAction - l sync.RWMutex - lastIndexStates []string -} - -var _ Proxier = &APIProxy{} - -type APIProxyConfig struct { - Client *api.Client - Logger hclog.Logger - EnforceConsistency EnforceConsistency - WhenInconsistentAction WhenInconsistentAction -} - -func NewAPIProxy(config *APIProxyConfig) (Proxier, error) { - if config.Client == nil { - return nil, fmt.Errorf("nil API client") - } - return &APIProxy{ - client: config.Client, - logger: config.Logger, - enforceConsistency: config.EnforceConsistency, - whenInconsistentAction: config.WhenInconsistentAction, - }, nil -} - -func (ap *APIProxy) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { - client, err := ap.client.Clone() - if err != nil { - return nil, err - } - client.SetToken(req.Token) - - // Derive and set a logger for the client - clientLogger := ap.logger.Named("client") - client.SetLogger(clientLogger) - - // http.Transport will transparently request gzip and decompress the response, but only if - // the client doesn't manually set the header. Removing any Accept-Encoding header allows the - // transparent compression to occur. - req.Request.Header.Del("Accept-Encoding") - client.SetHeaders(req.Request.Header) - - fwReq := client.NewRequest(req.Request.Method, req.Request.URL.Path) - fwReq.BodyBytes = req.RequestBody - - query := req.Request.URL.Query() - if len(query) != 0 { - fwReq.Params = query - } - - var newState string - manageState := ap.enforceConsistency == EnforceConsistencyAlways && - req.Request.Header.Get(http.VaultIndexHeaderName) == "" && - req.Request.Header.Get(http.VaultForwardHeaderName) == "" && - req.Request.Header.Get(http.VaultInconsistentHeaderName) == "" - - if manageState { - client = client.WithResponseCallbacks(api.RecordState(&newState)) - ap.l.RLock() - lastStates := ap.lastIndexStates - ap.l.RUnlock() - if len(lastStates) != 0 { - client = client.WithRequestCallbacks(api.RequireState(lastStates...)) - switch ap.whenInconsistentAction { - case WhenInconsistentFail: - // In this mode we want to delegate handling of inconsistency - // failures to the external client talking to Agent. - client.SetCheckRetry(retryablehttp.DefaultRetryPolicy) - case WhenInconsistentRetry: - // In this mode we want to handle retries due to inconsistency - // internally. This is the default api.Client behaviour so - // we needn't do anything. - case WhenInconsistentForward: - fwReq.Headers.Set(http.VaultInconsistentHeaderName, http.VaultInconsistentForward) - } - } - } - - // Make the request to Vault and get the response - ap.logger.Info("forwarding request to Vault", "method", req.Request.Method, "path", req.Request.URL.Path) - - resp, err := client.RawRequestWithContext(ctx, fwReq) - if resp == nil && err != nil { - // We don't want to cache nil responses, so we simply return the error - return nil, err - } - - if newState != "" { - ap.l.Lock() - // We want to be using the "newest" states seen, but newer isn't well - // defined here. There can be two states S1 and S2 which aren't strictly ordered: - // S1 could have a newer localindex and S2 could have a newer replicatedindex. So - // we need to merge them. But we can't merge them because we wouldn't be able to - // "sign" the resulting header because we don't have access to the HMAC key that - // Vault uses to do so. So instead we compare any of the 0-2 saved states - // we have to the new header, keeping the newest 1-2 of these, and sending - // them to Vault to evaluate. - ap.lastIndexStates = api.MergeReplicationStates(ap.lastIndexStates, newState) - ap.l.Unlock() - } - - // Before error checking from the request call, we'd want to initialize a SendResponse to - // potentially return - sendResponse, newErr := NewSendResponse(resp, nil) - if newErr != nil { - return nil, newErr - } - - // Bubble back the api.Response as well for error checking/handling at the handler layer. - return sendResponse, err -} diff --git a/command/agent/cache/cache_test.go b/command/agent/cache/cache_test.go deleted file mode 100644 index 19671b90b211..000000000000 --- a/command/agent/cache/cache_test.go +++ /dev/null @@ -1,1242 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cache - -import ( - "context" - "encoding/json" - "fmt" - "io" - "math/rand" - "net" - "net/http" - "sync" - "testing" - "time" - - "github.com/go-test/deep" - "github.com/hashicorp/go-hclog" - kv "github.com/hashicorp/vault-plugin-secrets-kv" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/cache/cachememdb" - "github.com/hashicorp/vault/command/agent/sink/mock" - "github.com/hashicorp/vault/helper/namespace" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" -) - -func tokenRevocationValidation(t *testing.T, sampleSpace map[string]string, expected map[string]string, leaseCache *LeaseCache) { - t.Helper() - for val, valType := range sampleSpace { - index, err := leaseCache.db.Get(valType, val) - if err != nil { - t.Fatal(err) - } - if expected[val] == "" && index != nil { - t.Fatalf("failed to evict index from the cache: type: %q, value: %q", valType, val) - } - if expected[val] != "" && index == nil { - t.Fatalf("evicted an undesired index from cache: type: %q, value: %q", valType, val) - } - } -} - -func TestCache_AutoAuthTokenStripping(t *testing.T) { - response1 := `{"data": {"id": "testid", "accessor": "testaccessor", "request": "lookup-self"}}` - response2 := `{"data": {"id": "testid", "accessor": "testaccessor", "request": "lookup"}}` - response3 := `{"auth": {"client_token": "testid", "accessor": "testaccessor"}}` - response4 := `{"auth": {"client_token": "testid", "accessor": "testaccessor"}}` - responses := []*SendResponse{ - newTestSendResponse(http.StatusOK, response1), - newTestSendResponse(http.StatusOK, response2), - newTestSendResponse(http.StatusOK, response3), - newTestSendResponse(http.StatusOK, response4), - } - - leaseCache := testNewLeaseCache(t, responses) - - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - cores := cluster.Cores - vault.TestWaitActive(t, cores[0].Core) - client := cores[0].Client - - cacheLogger := logging.NewVaultLogger(hclog.Trace).Named("cache") - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal(err) - } - - ctx := namespace.RootContext(nil) - - // Create a muxer and add paths relevant for the lease cache layer - mux := http.NewServeMux() - mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) - - mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, mock.NewSink("testid"), true)) - server := &http.Server{ - Handler: mux, - ReadHeaderTimeout: 10 * time.Second, - ReadTimeout: 30 * time.Second, - IdleTimeout: 5 * time.Minute, - ErrorLog: cacheLogger.StandardLogger(nil), - } - go server.Serve(listener) - - testClient, err := client.Clone() - if err != nil { - t.Fatal(err) - } - - if err := testClient.SetAddress("http://" + listener.Addr().String()); err != nil { - t.Fatal(err) - } - - // Empty the token in the client. Auto-auth token should be put to use. - testClient.SetToken("") - secret, err := testClient.Auth().Token().LookupSelf() - if err != nil { - t.Fatal(err) - } - if secret.Data["id"] != nil || secret.Data["accessor"] != nil || secret.Data["request"].(string) != "lookup-self" { - t.Fatalf("failed to strip off auto-auth token on lookup-self") - } - - secret, err = testClient.Auth().Token().Lookup("") - if err != nil { - t.Fatal(err) - } - if secret.Data["id"] != nil || secret.Data["accessor"] != nil || secret.Data["request"].(string) != "lookup" { - t.Fatalf("failed to strip off auto-auth token on lookup") - } - - secret, err = testClient.Auth().Token().RenewSelf(1) - if err != nil { - t.Fatal(err) - } - if secret.Auth == nil { - secretJson, _ := json.Marshal(secret) - t.Fatalf("Expected secret to have Auth but was %s", secretJson) - } - if secret.Auth.ClientToken != "" || secret.Auth.Accessor != "" { - t.Fatalf("failed to strip off auto-auth token on renew-self") - } - - secret, err = testClient.Auth().Token().Renew("testid", 1) - if err != nil { - t.Fatal(err) - } - if secret.Auth == nil { - secretJson, _ := json.Marshal(secret) - t.Fatalf("Expected secret to have Auth but was %s", secretJson) - } - if secret.Auth.ClientToken != "" || secret.Auth.Accessor != "" { - t.Fatalf("failed to strip off auto-auth token on renew") - } -} - -func TestCache_AutoAuthClientTokenProxyStripping(t *testing.T) { - leaseCache := &mockTokenVerifierProxier{} - dummyToken := "DUMMY" - realToken := "testid" - - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - cores := cluster.Cores - vault.TestWaitActive(t, cores[0].Core) - client := cores[0].Client - - cacheLogger := logging.NewVaultLogger(hclog.Trace).Named("cache") - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal(err) - } - - ctx := namespace.RootContext(nil) - - // Create a muxer and add paths relevant for the lease cache layer - mux := http.NewServeMux() - // mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) - - mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, mock.NewSink(realToken), false)) - server := &http.Server{ - Handler: mux, - ReadHeaderTimeout: 10 * time.Second, - ReadTimeout: 30 * time.Second, - IdleTimeout: 5 * time.Minute, - ErrorLog: cacheLogger.StandardLogger(nil), - } - go server.Serve(listener) - - testClient, err := client.Clone() - if err != nil { - t.Fatal(err) - } - - if err := testClient.SetAddress("http://" + listener.Addr().String()); err != nil { - t.Fatal(err) - } - - // Empty the token in the client. Auto-auth token should be put to use. - testClient.SetToken(dummyToken) - _, err = testClient.Auth().Token().LookupSelf() - if err != nil { - t.Fatal(err) - } - if leaseCache.currentToken != realToken { - t.Fatalf("failed to use real token from auto-auth") - } -} - -func TestCache_ConcurrentRequests(t *testing.T) { - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), - LogicalBackends: map[string]logical.Factory{ - "kv": vault.LeasedPassthroughBackendFactory, - }, - } - - cleanup, _, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) - defer cleanup() - - err := testClient.Sys().Mount("kv", &api.MountInput{ - Type: "kv", - }) - if err != nil { - t.Fatal(err) - } - - wg := &sync.WaitGroup{} - for i := 0; i < 100; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - key := fmt.Sprintf("kv/foo/%d_%d", i, rand.Int()) - _, err := testClient.Logical().Write(key, map[string]interface{}{ - "key": key, - }) - if err != nil { - t.Fatal(err) - } - secret, err := testClient.Logical().Read(key) - if err != nil { - t.Fatal(err) - } - if secret == nil || secret.Data["key"].(string) != key { - t.Fatal(fmt.Sprintf("failed to read value for key: %q", key)) - } - }(i) - - } - wg.Wait() -} - -func TestCache_TokenRevocations_RevokeOrphan(t *testing.T) { - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), - LogicalBackends: map[string]logical.Factory{ - "kv": vault.LeasedPassthroughBackendFactory, - }, - } - - sampleSpace := make(map[string]string) - - cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) - defer cleanup() - - token1 := testClient.Token() - sampleSpace[token1] = "token" - - // Mount the kv backend - err := testClient.Sys().Mount("kv", &api.MountInput{ - Type: "kv", - }) - if err != nil { - t.Fatal(err) - } - - // Create a secret in the backend - _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ - "value": "bar", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - - // Read the secret and create a lease - leaseResp, err := testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease1 := leaseResp.LeaseID - sampleSpace[lease1] = "lease" - - resp, err := testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token2 := resp.Auth.ClientToken - sampleSpace[token2] = "token" - - testClient.SetToken(token2) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease2 := leaseResp.LeaseID - sampleSpace[lease2] = "lease" - - resp, err = testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token3 := resp.Auth.ClientToken - sampleSpace[token3] = "token" - - testClient.SetToken(token3) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease3 := leaseResp.LeaseID - sampleSpace[lease3] = "lease" - - expected := make(map[string]string) - for k, v := range sampleSpace { - expected[k] = v - } - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) - - // Revoke-orphan the intermediate token. This should result in its own - // eviction and evictions of the revoked token's leases. All other things - // including the child tokens and leases of the child tokens should be - // untouched. - testClient.SetToken(token2) - err = testClient.Auth().Token().RevokeOrphan(token2) - if err != nil { - t.Fatal(err) - } - time.Sleep(1 * time.Second) - - expected = map[string]string{ - token1: "token", - lease1: "lease", - token3: "token", - lease3: "lease", - } - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) -} - -func TestCache_TokenRevocations_LeafLevelToken(t *testing.T) { - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), - LogicalBackends: map[string]logical.Factory{ - "kv": vault.LeasedPassthroughBackendFactory, - }, - } - - sampleSpace := make(map[string]string) - - cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) - defer cleanup() - - token1 := testClient.Token() - sampleSpace[token1] = "token" - - // Mount the kv backend - err := testClient.Sys().Mount("kv", &api.MountInput{ - Type: "kv", - }) - if err != nil { - t.Fatal(err) - } - - // Create a secret in the backend - _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ - "value": "bar", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - - // Read the secret and create a lease - leaseResp, err := testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease1 := leaseResp.LeaseID - sampleSpace[lease1] = "lease" - - resp, err := testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token2 := resp.Auth.ClientToken - sampleSpace[token2] = "token" - - testClient.SetToken(token2) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease2 := leaseResp.LeaseID - sampleSpace[lease2] = "lease" - - resp, err = testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token3 := resp.Auth.ClientToken - sampleSpace[token3] = "token" - - testClient.SetToken(token3) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease3 := leaseResp.LeaseID - sampleSpace[lease3] = "lease" - - expected := make(map[string]string) - for k, v := range sampleSpace { - expected[k] = v - } - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) - - // Revoke the lef token. This should evict all the leases belonging to this - // token, evict entries for all the child tokens and their respective - // leases. - testClient.SetToken(token3) - err = testClient.Auth().Token().RevokeSelf("") - if err != nil { - t.Fatal(err) - } - time.Sleep(1 * time.Second) - - expected = map[string]string{ - token1: "token", - lease1: "lease", - token2: "token", - lease2: "lease", - } - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) -} - -func TestCache_TokenRevocations_IntermediateLevelToken(t *testing.T) { - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), - LogicalBackends: map[string]logical.Factory{ - "kv": vault.LeasedPassthroughBackendFactory, - }, - } - - sampleSpace := make(map[string]string) - - cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) - defer cleanup() - - token1 := testClient.Token() - sampleSpace[token1] = "token" - - // Mount the kv backend - err := testClient.Sys().Mount("kv", &api.MountInput{ - Type: "kv", - }) - if err != nil { - t.Fatal(err) - } - - // Create a secret in the backend - _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ - "value": "bar", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - - // Read the secret and create a lease - leaseResp, err := testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease1 := leaseResp.LeaseID - sampleSpace[lease1] = "lease" - - resp, err := testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token2 := resp.Auth.ClientToken - sampleSpace[token2] = "token" - - testClient.SetToken(token2) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease2 := leaseResp.LeaseID - sampleSpace[lease2] = "lease" - - resp, err = testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token3 := resp.Auth.ClientToken - sampleSpace[token3] = "token" - - testClient.SetToken(token3) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease3 := leaseResp.LeaseID - sampleSpace[lease3] = "lease" - - expected := make(map[string]string) - for k, v := range sampleSpace { - expected[k] = v - } - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) - - // Revoke the second level token. This should evict all the leases - // belonging to this token, evict entries for all the child tokens and - // their respective leases. - testClient.SetToken(token2) - err = testClient.Auth().Token().RevokeSelf("") - if err != nil { - t.Fatal(err) - } - time.Sleep(1 * time.Second) - - expected = map[string]string{ - token1: "token", - lease1: "lease", - } - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) -} - -func TestCache_TokenRevocations_TopLevelToken(t *testing.T) { - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), - LogicalBackends: map[string]logical.Factory{ - "kv": vault.LeasedPassthroughBackendFactory, - }, - } - - sampleSpace := make(map[string]string) - - cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) - defer cleanup() - - token1 := testClient.Token() - sampleSpace[token1] = "token" - - // Mount the kv backend - err := testClient.Sys().Mount("kv", &api.MountInput{ - Type: "kv", - }) - if err != nil { - t.Fatal(err) - } - - // Create a secret in the backend - _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ - "value": "bar", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - - // Read the secret and create a lease - leaseResp, err := testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease1 := leaseResp.LeaseID - sampleSpace[lease1] = "lease" - - resp, err := testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token2 := resp.Auth.ClientToken - sampleSpace[token2] = "token" - - testClient.SetToken(token2) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease2 := leaseResp.LeaseID - sampleSpace[lease2] = "lease" - - resp, err = testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token3 := resp.Auth.ClientToken - sampleSpace[token3] = "token" - - testClient.SetToken(token3) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease3 := leaseResp.LeaseID - sampleSpace[lease3] = "lease" - - expected := make(map[string]string) - for k, v := range sampleSpace { - expected[k] = v - } - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) - - // Revoke the top level token. This should evict all the leases belonging - // to this token, evict entries for all the child tokens and their - // respective leases. - testClient.SetToken(token1) - err = testClient.Auth().Token().RevokeSelf("") - if err != nil { - t.Fatal(err) - } - time.Sleep(1 * time.Second) - - expected = make(map[string]string) - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) -} - -func TestCache_TokenRevocations_Shutdown(t *testing.T) { - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), - LogicalBackends: map[string]logical.Factory{ - "kv": vault.LeasedPassthroughBackendFactory, - }, - } - - sampleSpace := make(map[string]string) - - ctx, rootCancelFunc := context.WithCancel(namespace.RootContext(nil)) - cleanup, _, testClient, leaseCache := setupClusterAndAgent(ctx, t, coreConfig) - defer cleanup() - - token1 := testClient.Token() - sampleSpace[token1] = "token" - - // Mount the kv backend - err := testClient.Sys().Mount("kv", &api.MountInput{ - Type: "kv", - }) - if err != nil { - t.Fatal(err) - } - - // Create a secret in the backend - _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ - "value": "bar", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - - // Read the secret and create a lease - leaseResp, err := testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease1 := leaseResp.LeaseID - sampleSpace[lease1] = "lease" - - resp, err := testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token2 := resp.Auth.ClientToken - sampleSpace[token2] = "token" - - testClient.SetToken(token2) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease2 := leaseResp.LeaseID - sampleSpace[lease2] = "lease" - - resp, err = testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token3 := resp.Auth.ClientToken - sampleSpace[token3] = "token" - - testClient.SetToken(token3) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease3 := leaseResp.LeaseID - sampleSpace[lease3] = "lease" - - expected := make(map[string]string) - for k, v := range sampleSpace { - expected[k] = v - } - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) - - rootCancelFunc() - time.Sleep(1 * time.Second) - - // Ensure that all the entries are now gone - expected = make(map[string]string) - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) -} - -func TestCache_TokenRevocations_BaseContextCancellation(t *testing.T) { - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), - LogicalBackends: map[string]logical.Factory{ - "kv": vault.LeasedPassthroughBackendFactory, - }, - } - - sampleSpace := make(map[string]string) - - cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) - defer cleanup() - - token1 := testClient.Token() - sampleSpace[token1] = "token" - - // Mount the kv backend - err := testClient.Sys().Mount("kv", &api.MountInput{ - Type: "kv", - }) - if err != nil { - t.Fatal(err) - } - - // Create a secret in the backend - _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ - "value": "bar", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - - // Read the secret and create a lease - leaseResp, err := testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease1 := leaseResp.LeaseID - sampleSpace[lease1] = "lease" - - resp, err := testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token2 := resp.Auth.ClientToken - sampleSpace[token2] = "token" - - testClient.SetToken(token2) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease2 := leaseResp.LeaseID - sampleSpace[lease2] = "lease" - - resp, err = testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token3 := resp.Auth.ClientToken - sampleSpace[token3] = "token" - - testClient.SetToken(token3) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease3 := leaseResp.LeaseID - sampleSpace[lease3] = "lease" - - expected := make(map[string]string) - for k, v := range sampleSpace { - expected[k] = v - } - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) - - // Cancel the base context of the lease cache. This should trigger - // evictions of all the entries from the cache. - leaseCache.baseCtxInfo.CancelFunc() - time.Sleep(1 * time.Second) - - // Ensure that all the entries are now gone - expected = make(map[string]string) - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) -} - -func TestCache_NonCacheable(t *testing.T) { - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), - LogicalBackends: map[string]logical.Factory{ - "kv": kv.Factory, - }, - } - - cleanup, _, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) - defer cleanup() - - // Query mounts first - origMounts, err := testClient.Sys().ListMounts() - if err != nil { - t.Fatal(err) - } - - // Mount a kv backend - if err := testClient.Sys().Mount("kv", &api.MountInput{ - Type: "kv", - Options: map[string]string{ - "version": "2", - }, - }); err != nil { - t.Fatal(err) - } - - // Query mounts again - newMounts, err := testClient.Sys().ListMounts() - if err != nil { - t.Fatal(err) - } - - if diff := deep.Equal(origMounts, newMounts); diff == nil { - t.Logf("response #1: %#v", origMounts) - t.Logf("response #2: %#v", newMounts) - t.Fatal("expected requests to be not cached") - } - - // Query a non-existing mount, expect an error from api.Response - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - r := testClient.NewRequest("GET", "/v1/kv-invalid") - - apiResp, err := testClient.RawRequestWithContext(ctx, r) - if apiResp != nil { - defer apiResp.Body.Close() - } - if apiResp.Error() == nil || (apiResp != nil && apiResp.StatusCode != 404) { - t.Fatalf("expected an error response and a 404 from requesting an invalid path, got: %#v", apiResp) - } - if err == nil { - t.Fatal("expected an error from requesting an invalid path") - } -} - -func TestCache_Caching_AuthResponse(t *testing.T) { - cleanup, _, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, nil) - defer cleanup() - - resp, err := testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token := resp.Auth.ClientToken - testClient.SetToken(token) - - authTokeCreateReq := func(t *testing.T, policies map[string]interface{}) *api.Secret { - resp, err := testClient.Logical().Write("auth/token/create", policies) - if err != nil { - t.Fatal(err) - } - if resp.Auth == nil || resp.Auth.ClientToken == "" { - t.Fatalf("expected a valid client token in the response, got = %#v", resp) - } - - return resp - } - - // Test on auth response by creating a child token - { - proxiedResp := authTokeCreateReq(t, map[string]interface{}{ - "policies": "default", - }) - - cachedResp := authTokeCreateReq(t, map[string]interface{}{ - "policies": "default", - }) - - if diff := deep.Equal(proxiedResp.Auth.ClientToken, cachedResp.Auth.ClientToken); diff != nil { - t.Fatal(diff) - } - } - - // Test on *non-renewable* auth response by creating a child root token - { - proxiedResp := authTokeCreateReq(t, nil) - - cachedResp := authTokeCreateReq(t, nil) - - if diff := deep.Equal(proxiedResp.Auth.ClientToken, cachedResp.Auth.ClientToken); diff != nil { - t.Fatal(diff) - } - } -} - -func TestCache_Caching_LeaseResponse(t *testing.T) { - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), - LogicalBackends: map[string]logical.Factory{ - "kv": vault.LeasedPassthroughBackendFactory, - }, - } - - cleanup, client, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) - defer cleanup() - - err := client.Sys().Mount("kv", &api.MountInput{ - Type: "kv", - }) - if err != nil { - t.Fatal(err) - } - - // Test proxy by issuing two different requests - { - // Write data to the lease-kv backend - _, err := testClient.Logical().Write("kv/foo", map[string]interface{}{ - "value": "bar", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - _, err = testClient.Logical().Write("kv/foobar", map[string]interface{}{ - "value": "bar", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - - firstResp, err := testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - - secondResp, err := testClient.Logical().Read("kv/foobar") - if err != nil { - t.Fatal(err) - } - - if diff := deep.Equal(firstResp, secondResp); diff == nil { - t.Logf("response: %#v", firstResp) - t.Fatal("expected proxied responses, got cached response on second request") - } - } - - // Test caching behavior by issue the same request twice - { - _, err := testClient.Logical().Write("kv/baz", map[string]interface{}{ - "value": "foo", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - - proxiedResp, err := testClient.Logical().Read("kv/baz") - if err != nil { - t.Fatal(err) - } - - cachedResp, err := testClient.Logical().Read("kv/baz") - if err != nil { - t.Fatal(err) - } - - if diff := deep.Equal(proxiedResp, cachedResp); diff != nil { - t.Fatal(diff) - } - } -} - -func TestCache_Caching_CacheClear(t *testing.T) { - t.Run("request_path", func(t *testing.T) { - testCachingCacheClearCommon(t, "request_path") - }) - - t.Run("lease", func(t *testing.T) { - testCachingCacheClearCommon(t, "lease") - }) - - t.Run("token", func(t *testing.T) { - testCachingCacheClearCommon(t, "token") - }) - - t.Run("token_accessor", func(t *testing.T) { - testCachingCacheClearCommon(t, "token_accessor") - }) - - t.Run("all", func(t *testing.T) { - testCachingCacheClearCommon(t, "all") - }) -} - -func testCachingCacheClearCommon(t *testing.T, clearType string) { - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), - LogicalBackends: map[string]logical.Factory{ - "kv": vault.LeasedPassthroughBackendFactory, - }, - } - - cleanup, client, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) - defer cleanup() - - err := client.Sys().Mount("kv", &api.MountInput{ - Type: "kv", - }) - if err != nil { - t.Fatal(err) - } - - // Write data to the lease-kv backend - _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ - "value": "bar", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - - // Proxy this request, agent should cache the response - resp, err := testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - gotLeaseID := resp.LeaseID - - // Verify the entry exists - idx, err := leaseCache.db.Get(cachememdb.IndexNameLease, gotLeaseID) - if err != nil { - t.Fatal(err) - } - - if idx == nil { - t.Fatalf("expected cached entry, got: %v", idx) - } - - data := map[string]interface{}{ - "type": clearType, - } - - // We need to set the value here depending on what we're trying to test. - // Some values are be static, but others are dynamically generated at runtime. - switch clearType { - case "request_path": - data["value"] = "/v1/kv/foo" - case "lease": - data["value"] = resp.LeaseID - case "token": - data["value"] = testClient.Token() - case "token_accessor": - lookupResp, err := client.Auth().Token().Lookup(testClient.Token()) - if err != nil { - t.Fatal(err) - } - data["value"] = lookupResp.Data["accessor"] - case "all": - default: - t.Fatalf("invalid type provided: %v", clearType) - } - - r := testClient.NewRequest("PUT", consts.AgentPathCacheClear) - if err := r.SetJSONBody(data); err != nil { - t.Fatal(err) - } - - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - apiResp, err := testClient.RawRequestWithContext(ctx, r) - if apiResp != nil { - defer apiResp.Body.Close() - } - if apiResp != nil && apiResp.StatusCode == 404 { - _, parseErr := api.ParseSecret(apiResp.Body) - switch parseErr { - case nil: - case io.EOF: - default: - t.Fatal(err) - } - } - if err != nil { - t.Fatal(err) - } - - time.Sleep(100 * time.Millisecond) - - // Verify the entry is cleared - idx, err = leaseCache.db.Get(cachememdb.IndexNameLease, gotLeaseID) - if err != nil { - t.Fatal(err) - } - - if idx != nil { - t.Fatalf("expected entry to be nil, got: %v", idx) - } -} - -func TestCache_AuthTokenCreateOrphan(t *testing.T) { - t.Run("create", func(t *testing.T) { - t.Run("managed", func(t *testing.T) { - cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, nil) - defer cleanup() - - reqOpts := &api.TokenCreateRequest{ - Policies: []string{"default"}, - NoParent: true, - } - resp, err := testClient.Auth().Token().Create(reqOpts) - if err != nil { - t.Fatal(err) - } - token := resp.Auth.ClientToken - - idx, err := leaseCache.db.Get(cachememdb.IndexNameToken, token) - if err != nil { - t.Fatal(err) - } - if idx == nil { - t.Fatalf("expected entry to be non-nil, got: %#v", idx) - } - }) - - t.Run("non-managed", func(t *testing.T) { - cleanup, clusterClient, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, nil) - defer cleanup() - - reqOpts := &api.TokenCreateRequest{ - Policies: []string{"default"}, - NoParent: true, - } - - // Use the test client but set the token to one that's not managed by agent - testClient.SetToken(clusterClient.Token()) - - resp, err := testClient.Auth().Token().Create(reqOpts) - if err != nil { - t.Fatal(err) - } - token := resp.Auth.ClientToken - - idx, err := leaseCache.db.Get(cachememdb.IndexNameToken, token) - if err != nil { - t.Fatal(err) - } - if idx == nil { - t.Fatalf("expected entry to be non-nil, got: %#v", idx) - } - }) - }) - - t.Run("create-orphan", func(t *testing.T) { - t.Run("managed", func(t *testing.T) { - cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, nil) - defer cleanup() - - reqOpts := &api.TokenCreateRequest{ - Policies: []string{"default"}, - } - resp, err := testClient.Auth().Token().CreateOrphan(reqOpts) - if err != nil { - t.Fatal(err) - } - token := resp.Auth.ClientToken - - idx, err := leaseCache.db.Get(cachememdb.IndexNameToken, token) - if err != nil { - t.Fatal(err) - } - if idx == nil { - t.Fatalf("expected entry to be non-nil, got: %#v", idx) - } - }) - - t.Run("non-managed", func(t *testing.T) { - cleanup, clusterClient, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, nil) - defer cleanup() - - reqOpts := &api.TokenCreateRequest{ - Policies: []string{"default"}, - } - - // Use the test client but set the token to one that's not managed by agent - testClient.SetToken(clusterClient.Token()) - - resp, err := testClient.Auth().Token().CreateOrphan(reqOpts) - if err != nil { - t.Fatal(err) - } - token := resp.Auth.ClientToken - - idx, err := leaseCache.db.Get(cachememdb.IndexNameToken, token) - if err != nil { - t.Fatal(err) - } - if idx == nil { - t.Fatalf("expected entry to be non-nil, got: %#v", idx) - } - }) - }) -} diff --git a/command/agent/cache/cachememdb/cache_memdb.go b/command/agent/cache/cachememdb/cache_memdb.go deleted file mode 100644 index 93aa2bf78faf..000000000000 --- a/command/agent/cache/cachememdb/cache_memdb.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cachememdb - -import ( - "errors" - "fmt" - "sync/atomic" - - memdb "github.com/hashicorp/go-memdb" -) - -const ( - tableNameIndexer = "indexer" -) - -// CacheMemDB is the underlying cache database for storing indexes. -type CacheMemDB struct { - db *atomic.Value -} - -// New creates a new instance of CacheMemDB. -func New() (*CacheMemDB, error) { - db, err := newDB() - if err != nil { - return nil, err - } - - c := &CacheMemDB{ - db: new(atomic.Value), - } - c.db.Store(db) - - return c, nil -} - -func newDB() (*memdb.MemDB, error) { - cacheSchema := &memdb.DBSchema{ - Tables: map[string]*memdb.TableSchema{ - tableNameIndexer: { - Name: tableNameIndexer, - Indexes: map[string]*memdb.IndexSchema{ - // This index enables fetching the cached item based on the - // identifier of the index. - IndexNameID: { - Name: IndexNameID, - Unique: true, - Indexer: &memdb.StringFieldIndex{ - Field: "ID", - }, - }, - // This index enables fetching all the entries in cache for - // a given request path, in a given namespace. - IndexNameRequestPath: { - Name: IndexNameRequestPath, - Unique: false, - Indexer: &memdb.CompoundIndex{ - Indexes: []memdb.Indexer{ - &memdb.StringFieldIndex{ - Field: "Namespace", - }, - &memdb.StringFieldIndex{ - Field: "RequestPath", - }, - }, - }, - }, - // This index enables fetching all the entries in cache - // belonging to the leases of a given token. - IndexNameLeaseToken: { - Name: IndexNameLeaseToken, - Unique: false, - AllowMissing: true, - Indexer: &memdb.StringFieldIndex{ - Field: "LeaseToken", - }, - }, - // This index enables fetching all the entries in cache - // that are tied to the given token, regardless of the - // entries belonging to the token or belonging to the - // lease. - IndexNameToken: { - Name: IndexNameToken, - Unique: true, - AllowMissing: true, - Indexer: &memdb.StringFieldIndex{ - Field: "Token", - }, - }, - // This index enables fetching all the entries in cache for - // the given parent token. - IndexNameTokenParent: { - Name: IndexNameTokenParent, - Unique: false, - AllowMissing: true, - Indexer: &memdb.StringFieldIndex{ - Field: "TokenParent", - }, - }, - // This index enables fetching all the entries in cache for - // the given accessor. - IndexNameTokenAccessor: { - Name: IndexNameTokenAccessor, - Unique: true, - AllowMissing: true, - Indexer: &memdb.StringFieldIndex{ - Field: "TokenAccessor", - }, - }, - // This index enables fetching all the entries in cache for - // the given lease identifier. - IndexNameLease: { - Name: IndexNameLease, - Unique: true, - AllowMissing: true, - Indexer: &memdb.StringFieldIndex{ - Field: "Lease", - }, - }, - }, - }, - }, - } - - db, err := memdb.NewMemDB(cacheSchema) - if err != nil { - return nil, err - } - return db, nil -} - -// Get returns the index based on the indexer and the index values provided. -func (c *CacheMemDB) Get(indexName string, indexValues ...interface{}) (*Index, error) { - if !validIndexName(indexName) { - return nil, fmt.Errorf("invalid index name %q", indexName) - } - - txn := c.db.Load().(*memdb.MemDB).Txn(false) - - raw, err := txn.First(tableNameIndexer, indexName, indexValues...) - if err != nil { - return nil, err - } - - if raw == nil { - return nil, nil - } - - index, ok := raw.(*Index) - if !ok { - return nil, errors.New("unable to parse index value from the cache") - } - - return index, nil -} - -// Set stores the index into the cache. -func (c *CacheMemDB) Set(index *Index) error { - if index == nil { - return errors.New("nil index provided") - } - - txn := c.db.Load().(*memdb.MemDB).Txn(true) - defer txn.Abort() - - if err := txn.Insert(tableNameIndexer, index); err != nil { - return fmt.Errorf("unable to insert index into cache: %v", err) - } - - txn.Commit() - - return nil -} - -// GetByPrefix returns all the cached indexes based on the index name and the -// value prefix. -func (c *CacheMemDB) GetByPrefix(indexName string, indexValues ...interface{}) ([]*Index, error) { - if !validIndexName(indexName) { - return nil, fmt.Errorf("invalid index name %q", indexName) - } - - indexName = indexName + "_prefix" - - // Get all the objects - txn := c.db.Load().(*memdb.MemDB).Txn(false) - - iter, err := txn.Get(tableNameIndexer, indexName, indexValues...) - if err != nil { - return nil, err - } - - var indexes []*Index - for { - obj := iter.Next() - if obj == nil { - break - } - index, ok := obj.(*Index) - if !ok { - return nil, fmt.Errorf("failed to cast cached index") - } - - indexes = append(indexes, index) - } - - return indexes, nil -} - -// Evict removes an index from the cache based on index name and value. -func (c *CacheMemDB) Evict(indexName string, indexValues ...interface{}) error { - index, err := c.Get(indexName, indexValues...) - if err != nil { - return fmt.Errorf("unable to fetch index on cache deletion: %v", err) - } - - if index == nil { - return nil - } - - txn := c.db.Load().(*memdb.MemDB).Txn(true) - defer txn.Abort() - - if err := txn.Delete(tableNameIndexer, index); err != nil { - return fmt.Errorf("unable to delete index from cache: %v", err) - } - - txn.Commit() - - return nil -} - -// Flush resets the underlying cache object. -func (c *CacheMemDB) Flush() error { - newDB, err := newDB() - if err != nil { - return err - } - - c.db.Store(newDB) - - return nil -} diff --git a/command/agent/cache/cachememdb/cache_memdb_test.go b/command/agent/cache/cachememdb/cache_memdb_test.go deleted file mode 100644 index 87b8eee798b2..000000000000 --- a/command/agent/cache/cachememdb/cache_memdb_test.go +++ /dev/null @@ -1,395 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cachememdb - -import ( - "context" - "testing" - - "github.com/go-test/deep" -) - -func testContextInfo() *ContextInfo { - ctx, cancelFunc := context.WithCancel(context.Background()) - - return &ContextInfo{ - Ctx: ctx, - CancelFunc: cancelFunc, - } -} - -func TestNew(t *testing.T) { - _, err := New() - if err != nil { - t.Fatal(err) - } -} - -func TestCacheMemDB_Get(t *testing.T) { - cache, err := New() - if err != nil { - t.Fatal(err) - } - - // Test invalid index name - _, err = cache.Get("foo", "bar") - if err == nil { - t.Fatal("expected error") - } - - // Test on empty cache - index, err := cache.Get(IndexNameID, "foo") - if err != nil { - t.Fatal(err) - } - if index != nil { - t.Fatalf("expected nil index, got: %v", index) - } - - // Populate cache - in := &Index{ - ID: "test_id", - Namespace: "test_ns/", - RequestPath: "/v1/request/path", - Token: "test_token", - TokenAccessor: "test_accessor", - Lease: "test_lease", - Response: []byte("hello world"), - } - - if err := cache.Set(in); err != nil { - t.Fatal(err) - } - - testCases := []struct { - name string - indexName string - indexValues []interface{} - }{ - { - "by_index_id", - "id", - []interface{}{in.ID}, - }, - { - "by_request_path", - "request_path", - []interface{}{in.Namespace, in.RequestPath}, - }, - { - "by_lease", - "lease", - []interface{}{in.Lease}, - }, - { - "by_token", - "token", - []interface{}{in.Token}, - }, - { - "by_token_accessor", - "token_accessor", - []interface{}{in.TokenAccessor}, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - out, err := cache.Get(tc.indexName, tc.indexValues...) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(in, out); diff != nil { - t.Fatal(diff) - } - }) - } -} - -func TestCacheMemDB_GetByPrefix(t *testing.T) { - cache, err := New() - if err != nil { - t.Fatal(err) - } - - // Test invalid index name - _, err = cache.GetByPrefix("foo", "bar", "baz") - if err == nil { - t.Fatal("expected error") - } - - // Test on empty cache - index, err := cache.GetByPrefix(IndexNameRequestPath, "foo", "bar") - if err != nil { - t.Fatal(err) - } - if index != nil { - t.Fatalf("expected nil index, got: %v", index) - } - - // Populate cache - in := &Index{ - ID: "test_id", - Namespace: "test_ns/", - RequestPath: "/v1/request/path/1", - Token: "test_token", - TokenParent: "test_token_parent", - TokenAccessor: "test_accessor", - Lease: "path/to/test_lease/1", - LeaseToken: "test_lease_token", - Response: []byte("hello world"), - } - - if err := cache.Set(in); err != nil { - t.Fatal(err) - } - - // Populate cache - in2 := &Index{ - ID: "test_id_2", - Namespace: "test_ns/", - RequestPath: "/v1/request/path/2", - Token: "test_token2", - TokenParent: "test_token_parent", - TokenAccessor: "test_accessor2", - Lease: "path/to/test_lease/2", - LeaseToken: "test_lease_token", - Response: []byte("hello world"), - } - - if err := cache.Set(in2); err != nil { - t.Fatal(err) - } - - testCases := []struct { - name string - indexName string - indexValues []interface{} - }{ - { - "by_request_path", - "request_path", - []interface{}{"test_ns/", "/v1/request/path"}, - }, - { - "by_lease", - "lease", - []interface{}{"path/to/test_lease"}, - }, - { - "by_token_parent", - "token_parent", - []interface{}{"test_token_parent"}, - }, - { - "by_lease_token", - "lease_token", - []interface{}{"test_lease_token"}, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - out, err := cache.GetByPrefix(tc.indexName, tc.indexValues...) - if err != nil { - t.Fatal(err) - } - - if diff := deep.Equal([]*Index{in, in2}, out); diff != nil { - t.Fatal(diff) - } - }) - } -} - -func TestCacheMemDB_Set(t *testing.T) { - cache, err := New() - if err != nil { - t.Fatal(err) - } - - testCases := []struct { - name string - index *Index - wantErr bool - }{ - { - "nil", - nil, - true, - }, - { - "empty_fields", - &Index{}, - true, - }, - { - "missing_required_fields", - &Index{ - Lease: "foo", - }, - true, - }, - { - "all_fields", - &Index{ - ID: "test_id", - Namespace: "test_ns/", - RequestPath: "/v1/request/path", - Token: "test_token", - TokenAccessor: "test_accessor", - Lease: "test_lease", - RenewCtxInfo: testContextInfo(), - }, - false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - if err := cache.Set(tc.index); (err != nil) != tc.wantErr { - t.Fatalf("CacheMemDB.Set() error = %v, wantErr = %v", err, tc.wantErr) - } - }) - } -} - -func TestCacheMemDB_Evict(t *testing.T) { - cache, err := New() - if err != nil { - t.Fatal(err) - } - - // Test on empty cache - if err := cache.Evict(IndexNameID, "foo"); err != nil { - t.Fatal(err) - } - - testIndex := &Index{ - ID: "test_id", - Namespace: "test_ns/", - RequestPath: "/v1/request/path", - Token: "test_token", - TokenAccessor: "test_token_accessor", - Lease: "test_lease", - RenewCtxInfo: testContextInfo(), - } - - testCases := []struct { - name string - indexName string - indexValues []interface{} - insertIndex *Index - wantErr bool - }{ - { - "empty_params", - "", - []interface{}{""}, - nil, - true, - }, - { - "invalid_params", - "foo", - []interface{}{"bar"}, - nil, - true, - }, - { - "by_id", - "id", - []interface{}{"test_id"}, - testIndex, - false, - }, - { - "by_request_path", - "request_path", - []interface{}{"test_ns/", "/v1/request/path"}, - testIndex, - false, - }, - { - "by_token", - "token", - []interface{}{"test_token"}, - testIndex, - false, - }, - { - "by_token_accessor", - "token_accessor", - []interface{}{"test_accessor"}, - testIndex, - false, - }, - { - "by_lease", - "lease", - []interface{}{"test_lease"}, - testIndex, - false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - if tc.insertIndex != nil { - if err := cache.Set(tc.insertIndex); err != nil { - t.Fatal(err) - } - } - - if err := cache.Evict(tc.indexName, tc.indexValues...); (err != nil) != tc.wantErr { - t.Fatal(err) - } - - // Verify that the cache doesn't contain the entry any more - index, err := cache.Get(tc.indexName, tc.indexValues...) - if (err != nil) != tc.wantErr { - t.Fatal(err) - } - - if index != nil { - t.Fatalf("expected nil entry, got = %#v", index) - } - }) - } -} - -func TestCacheMemDB_Flush(t *testing.T) { - cache, err := New() - if err != nil { - t.Fatal(err) - } - - // Populate cache - in := &Index{ - ID: "test_id", - Token: "test_token", - Lease: "test_lease", - Namespace: "test_ns/", - RequestPath: "/v1/request/path", - Response: []byte("hello world"), - } - - if err := cache.Set(in); err != nil { - t.Fatal(err) - } - - // Reset the cache - if err := cache.Flush(); err != nil { - t.Fatal(err) - } - - // Check the cache doesn't contain inserted index - out, err := cache.Get(IndexNameID, "test_id") - if err != nil { - t.Fatal(err) - } - if out != nil { - t.Fatalf("expected cache to be empty, got = %v", out) - } -} diff --git a/command/agent/cache/cachememdb/index.go b/command/agent/cache/cachememdb/index.go deleted file mode 100644 index a7da2edc2514..000000000000 --- a/command/agent/cache/cachememdb/index.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cachememdb - -import ( - "context" - "encoding/json" - "net/http" - "time" -) - -// Index holds the response to be cached along with multiple other values that -// serve as pointers to refer back to this index. -type Index struct { - // ID is a value that uniquely represents the request held by this - // index. This is computed by serializing and hashing the response object. - // Required: true, Unique: true - ID string - - // Token is the token that fetched the response held by this index - // Required: true, Unique: true - Token string - - // TokenParent is the parent token of the token held by this index - // Required: false, Unique: false - TokenParent string - - // TokenAccessor is the accessor of the token being cached in this index - // Required: true, Unique: true - TokenAccessor string - - // Namespace is the namespace that was provided in the request path as the - // Vault namespace to query - Namespace string - - // RequestPath is the path of the request that resulted in the response - // held by this index. - // Required: true, Unique: false - RequestPath string - - // Lease is the identifier of the lease in Vault, that belongs to the - // response held by this index. - // Required: false, Unique: true - Lease string - - // LeaseToken is the identifier of the token that created the lease held by - // this index. - // Required: false, Unique: false - LeaseToken string - - // Response is the serialized response object that the agent is caching. - Response []byte - - // RenewCtxInfo holds the context and the corresponding cancel func for the - // goroutine that manages the renewal of the secret belonging to the - // response in this index. - RenewCtxInfo *ContextInfo - - // RequestMethod is the HTTP method of the request - RequestMethod string - - // RequestToken is the token used in the request - RequestToken string - - // RequestHeader is the header used in the request - RequestHeader http.Header - - // LastRenewed is the timestamp of last renewal - LastRenewed time.Time - - // Type is the index type (token, auth-lease, secret-lease) - Type string -} - -type IndexName uint32 - -const ( - // IndexNameID is the ID of the index constructed from the serialized request. - IndexNameID = "id" - - // IndexNameLease is the lease of the index. - IndexNameLease = "lease" - - // IndexNameRequestPath is the request path of the index. - IndexNameRequestPath = "request_path" - - // IndexNameToken is the token of the index. - IndexNameToken = "token" - - // IndexNameTokenAccessor is the token accessor of the index. - IndexNameTokenAccessor = "token_accessor" - - // IndexNameTokenParent is the token parent of the index. - IndexNameTokenParent = "token_parent" - - // IndexNameLeaseToken is the token that created the lease. - IndexNameLeaseToken = "lease_token" -) - -func validIndexName(indexName string) bool { - switch indexName { - case "id": - case "lease": - case "request_path": - case "token": - case "token_accessor": - case "token_parent": - case "lease_token": - default: - return false - } - return true -} - -type ContextInfo struct { - Ctx context.Context - CancelFunc context.CancelFunc - DoneCh chan struct{} -} - -func NewContextInfo(ctx context.Context) *ContextInfo { - if ctx == nil { - return nil - } - - ctxInfo := new(ContextInfo) - ctxInfo.Ctx, ctxInfo.CancelFunc = context.WithCancel(ctx) - ctxInfo.DoneCh = make(chan struct{}) - return ctxInfo -} - -// Serialize returns a json marshal'ed Index object, without the RenewCtxInfo -func (i Index) Serialize() ([]byte, error) { - i.RenewCtxInfo = nil - - indexBytes, err := json.Marshal(i) - if err != nil { - return nil, err - } - - return indexBytes, nil -} - -// Deserialize converts json bytes to an Index object -// Note: RenewCtxInfo will need to be reconstructed elsewhere. -func Deserialize(indexBytes []byte) (*Index, error) { - index := new(Index) - if err := json.Unmarshal(indexBytes, index); err != nil { - return nil, err - } - return index, nil -} diff --git a/command/agent/cache/keymanager/manager.go b/command/agent/cache/keymanager/manager.go deleted file mode 100644 index 0cecc03a11f7..000000000000 --- a/command/agent/cache/keymanager/manager.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package keymanager - -import ( - "context" - - wrapping "github.com/hashicorp/go-kms-wrapping/v2" -) - -const ( - KeyID = "root" -) - -type KeyManager interface { - // Returns a wrapping.Wrapper which can be used to perform key-related operations. - Wrapper() wrapping.Wrapper - // RetrievalToken is the material returned which can be used to source back the - // encryption key. Depending on the implementation, the token can be the - // encryption key itself or a token/identifier used to exchange the token. - RetrievalToken(ctx context.Context) ([]byte, error) -} diff --git a/command/agent/cache/lease_cache.go b/command/agent/cache/lease_cache.go deleted file mode 100644 index 286f498e0051..000000000000 --- a/command/agent/cache/lease_cache.go +++ /dev/null @@ -1,1311 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cache - -import ( - "bufio" - "bytes" - "context" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-secure-stdlib/base62" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/cache/cacheboltdb" - "github.com/hashicorp/vault/command/agent/cache/cachememdb" - "github.com/hashicorp/vault/helper/namespace" - nshelper "github.com/hashicorp/vault/helper/namespace" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/cryptoutil" - "github.com/hashicorp/vault/sdk/helper/jsonutil" - "github.com/hashicorp/vault/sdk/helper/locksutil" - "github.com/hashicorp/vault/sdk/logical" - gocache "github.com/patrickmn/go-cache" - "go.uber.org/atomic" -) - -const ( - vaultPathTokenCreate = "/v1/auth/token/create" - vaultPathTokenRevoke = "/v1/auth/token/revoke" - vaultPathTokenRevokeSelf = "/v1/auth/token/revoke-self" - vaultPathTokenRevokeAccessor = "/v1/auth/token/revoke-accessor" - vaultPathTokenRevokeOrphan = "/v1/auth/token/revoke-orphan" - vaultPathTokenLookup = "/v1/auth/token/lookup" - vaultPathTokenLookupSelf = "/v1/auth/token/lookup-self" - vaultPathTokenRenew = "/v1/auth/token/renew" - vaultPathTokenRenewSelf = "/v1/auth/token/renew-self" - vaultPathLeaseRevoke = "/v1/sys/leases/revoke" - vaultPathLeaseRevokeForce = "/v1/sys/leases/revoke-force" - vaultPathLeaseRevokePrefix = "/v1/sys/leases/revoke-prefix" -) - -var ( - contextIndexID = contextIndex{} - errInvalidType = errors.New("invalid type provided") - revocationPaths = []string{ - strings.TrimPrefix(vaultPathTokenRevoke, "/v1"), - strings.TrimPrefix(vaultPathTokenRevokeSelf, "/v1"), - strings.TrimPrefix(vaultPathTokenRevokeAccessor, "/v1"), - strings.TrimPrefix(vaultPathTokenRevokeOrphan, "/v1"), - strings.TrimPrefix(vaultPathLeaseRevoke, "/v1"), - strings.TrimPrefix(vaultPathLeaseRevokeForce, "/v1"), - strings.TrimPrefix(vaultPathLeaseRevokePrefix, "/v1"), - } -) - -type contextIndex struct{} - -type cacheClearRequest struct { - Type string `json:"type"` - Value string `json:"value"` - Namespace string `json:"namespace"` -} - -// LeaseCache is an implementation of Proxier that handles -// the caching of responses. It passes the incoming request -// to an underlying Proxier implementation. -type LeaseCache struct { - client *api.Client - proxier Proxier - logger hclog.Logger - db *cachememdb.CacheMemDB - baseCtxInfo *cachememdb.ContextInfo - l *sync.RWMutex - - // idLocks is used during cache lookup to ensure that identical requests made - // in parallel won't trigger multiple renewal goroutines. - idLocks []*locksutil.LockEntry - - // inflightCache keeps track of inflight requests - inflightCache *gocache.Cache - - // ps is the persistent storage for tokens and leases - ps *cacheboltdb.BoltStorage - - // shuttingDown is used to determine if cache needs to be evicted or not - // when the context is cancelled - shuttingDown atomic.Bool -} - -// LeaseCacheConfig is the configuration for initializing a new -// Lease. -type LeaseCacheConfig struct { - Client *api.Client - BaseContext context.Context - Proxier Proxier - Logger hclog.Logger - Storage *cacheboltdb.BoltStorage -} - -type inflightRequest struct { - // ch is closed by the request that ends up processing the set of - // parallel request - ch chan struct{} - - // remaining is the number of remaining inflight request that needs to - // be processed before this object can be cleaned up - remaining *atomic.Uint64 -} - -func newInflightRequest() *inflightRequest { - return &inflightRequest{ - ch: make(chan struct{}), - remaining: atomic.NewUint64(0), - } -} - -// NewLeaseCache creates a new instance of a LeaseCache. -func NewLeaseCache(conf *LeaseCacheConfig) (*LeaseCache, error) { - if conf == nil { - return nil, errors.New("nil configuration provided") - } - - if conf.Proxier == nil || conf.Logger == nil { - return nil, fmt.Errorf("missing configuration required params: %v", conf) - } - - if conf.Client == nil { - return nil, fmt.Errorf("nil API client") - } - - db, err := cachememdb.New() - if err != nil { - return nil, err - } - - // Create a base context for the lease cache layer - baseCtxInfo := cachememdb.NewContextInfo(conf.BaseContext) - - return &LeaseCache{ - client: conf.Client, - proxier: conf.Proxier, - logger: conf.Logger, - db: db, - baseCtxInfo: baseCtxInfo, - l: &sync.RWMutex{}, - idLocks: locksutil.CreateLocks(), - inflightCache: gocache.New(gocache.NoExpiration, gocache.NoExpiration), - ps: conf.Storage, - }, nil -} - -// SetShuttingDown is a setter for the shuttingDown field -func (c *LeaseCache) SetShuttingDown(in bool) { - c.shuttingDown.Store(in) -} - -// SetPersistentStorage is a setter for the persistent storage field in -// LeaseCache -func (c *LeaseCache) SetPersistentStorage(storageIn *cacheboltdb.BoltStorage) { - c.ps = storageIn -} - -// checkCacheForRequest checks the cache for a particular request based on its -// computed ID. It returns a non-nil *SendResponse if an entry is found. -func (c *LeaseCache) checkCacheForRequest(id string) (*SendResponse, error) { - index, err := c.db.Get(cachememdb.IndexNameID, id) - if err != nil { - return nil, err - } - - if index == nil { - return nil, nil - } - - // Cached request is found, deserialize the response - reader := bufio.NewReader(bytes.NewReader(index.Response)) - resp, err := http.ReadResponse(reader, nil) - if err != nil { - c.logger.Error("failed to deserialize response", "error", err) - return nil, err - } - - sendResp, err := NewSendResponse(&api.Response{Response: resp}, index.Response) - if err != nil { - c.logger.Error("failed to create new send response", "error", err) - return nil, err - } - sendResp.CacheMeta.Hit = true - - respTime, err := http.ParseTime(resp.Header.Get("Date")) - if err != nil { - c.logger.Error("failed to parse cached response date", "error", err) - return nil, err - } - sendResp.CacheMeta.Age = time.Now().Sub(respTime) - - return sendResp, nil -} - -// Send performs a cache lookup on the incoming request. If it's a cache hit, -// it will return the cached response, otherwise it will delegate to the -// underlying Proxier and cache the received response. -func (c *LeaseCache) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { - // Compute the index ID - id, err := computeIndexID(req) - if err != nil { - c.logger.Error("failed to compute cache key", "error", err) - return nil, err - } - - // Check the inflight cache to see if there are other inflight requests - // of the same kind, based on the computed ID. If so, we increment a counter - - var inflight *inflightRequest - - defer func() { - // Cleanup on the cache if there are no remaining inflight requests. - // This is the last step, so we defer the call first - if inflight != nil && inflight.remaining.Load() == 0 { - c.inflightCache.Delete(id) - } - }() - - idLock := locksutil.LockForKey(c.idLocks, id) - - // Briefly grab an ID-based lock in here to emulate a load-or-store behavior - // and prevent concurrent cacheable requests from being proxied twice if - // they both miss the cache due to it being clean when peeking the cache - // entry. - idLock.Lock() - inflightRaw, found := c.inflightCache.Get(id) - if found { - idLock.Unlock() - inflight = inflightRaw.(*inflightRequest) - inflight.remaining.Inc() - defer inflight.remaining.Dec() - - // If found it means that there's an inflight request being processed. - // We wait until that's finished before proceeding further. - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-inflight.ch: - } - } else { - inflight = newInflightRequest() - inflight.remaining.Inc() - defer inflight.remaining.Dec() - - c.inflightCache.Set(id, inflight, gocache.NoExpiration) - idLock.Unlock() - - // Signal that the processing request is done - defer close(inflight.ch) - } - - // Check if the response for this request is already in the cache - cachedResp, err := c.checkCacheForRequest(id) - if err != nil { - return nil, err - } - if cachedResp != nil { - c.logger.Debug("returning cached response", "path", req.Request.URL.Path) - return cachedResp, nil - } - - c.logger.Debug("forwarding request from cache", "method", req.Request.Method, "path", req.Request.URL.Path) - - // Pass the request down and get a response - resp, err := c.proxier.Send(ctx, req) - if err != nil { - return resp, err - } - - // If this is a non-2xx or if the returned response does not contain JSON payload, - // we skip caching - if resp.Response.StatusCode >= 300 || resp.Response.Header.Get("Content-Type") != "application/json" { - return resp, err - } - - // Get the namespace from the request header - namespace := req.Request.Header.Get(consts.NamespaceHeaderName) - // We need to populate an empty value since go-memdb will skip over indexes - // that contain empty values. - if namespace == "" { - namespace = "root/" - } - - // Build the index to cache based on the response received - index := &cachememdb.Index{ - ID: id, - Namespace: namespace, - RequestPath: req.Request.URL.Path, - LastRenewed: time.Now().UTC(), - } - - secret, err := api.ParseSecret(bytes.NewReader(resp.ResponseBody)) - if err != nil { - c.logger.Error("failed to parse response as secret", "error", err) - return nil, err - } - - isRevocation, err := c.handleRevocationRequest(ctx, req, resp) - if err != nil { - c.logger.Error("failed to process the response", "error", err) - return nil, err - } - - // If this is a revocation request, do not go through cache logic. - if isRevocation { - return resp, nil - } - - // Fast path for responses with no secrets - if secret == nil { - c.logger.Debug("pass-through response; no secret in response", "method", req.Request.Method, "path", req.Request.URL.Path) - return resp, nil - } - - // Short-circuit if the secret is not renewable - tokenRenewable, err := secret.TokenIsRenewable() - if err != nil { - c.logger.Error("failed to parse renewable param", "error", err) - return nil, err - } - if !secret.Renewable && !tokenRenewable { - c.logger.Debug("pass-through response; secret not renewable", "method", req.Request.Method, "path", req.Request.URL.Path) - return resp, nil - } - - var renewCtxInfo *cachememdb.ContextInfo - switch { - case secret.LeaseID != "": - c.logger.Debug("processing lease response", "method", req.Request.Method, "path", req.Request.URL.Path) - entry, err := c.db.Get(cachememdb.IndexNameToken, req.Token) - if err != nil { - return nil, err - } - // If the lease belongs to a token that is not managed by the agent, - // return the response without caching it. - if entry == nil { - c.logger.Debug("pass-through lease response; token not managed by agent", "method", req.Request.Method, "path", req.Request.URL.Path) - return resp, nil - } - - // Derive a context for renewal using the token's context - renewCtxInfo = cachememdb.NewContextInfo(entry.RenewCtxInfo.Ctx) - - index.Lease = secret.LeaseID - index.LeaseToken = req.Token - - index.Type = cacheboltdb.LeaseType - - case secret.Auth != nil: - c.logger.Debug("processing auth response", "method", req.Request.Method, "path", req.Request.URL.Path) - - // Check if this token creation request resulted in a non-orphan token, and if so - // correctly set the parentCtx to the request's token context. - var parentCtx context.Context - if !secret.Auth.Orphan { - entry, err := c.db.Get(cachememdb.IndexNameToken, req.Token) - if err != nil { - return nil, err - } - // If parent token is not managed by the agent, child shouldn't be - // either. - if entry == nil { - c.logger.Debug("pass-through auth response; parent token not managed by agent", "method", req.Request.Method, "path", req.Request.URL.Path) - return resp, nil - } - - c.logger.Debug("setting parent context", "method", req.Request.Method, "path", req.Request.URL.Path) - parentCtx = entry.RenewCtxInfo.Ctx - - index.TokenParent = req.Token - } - - renewCtxInfo = c.createCtxInfo(parentCtx) - index.Token = secret.Auth.ClientToken - index.TokenAccessor = secret.Auth.Accessor - - index.Type = cacheboltdb.LeaseType - - default: - // We shouldn't be hitting this, but will err on the side of caution and - // simply proxy. - c.logger.Debug("pass-through response; secret without lease and token", "method", req.Request.Method, "path", req.Request.URL.Path) - return resp, nil - } - - // Serialize the response to store it in the cached index - var respBytes bytes.Buffer - err = resp.Response.Write(&respBytes) - if err != nil { - c.logger.Error("failed to serialize response", "error", err) - return nil, err - } - - // Reset the response body for upper layers to read - if resp.Response.Body != nil { - resp.Response.Body.Close() - } - resp.Response.Body = ioutil.NopCloser(bytes.NewReader(resp.ResponseBody)) - - // Set the index's Response - index.Response = respBytes.Bytes() - - // Store the index ID in the lifetimewatcher context - renewCtx := context.WithValue(renewCtxInfo.Ctx, contextIndexID, index.ID) - - // Store the lifetime watcher context in the index - index.RenewCtxInfo = &cachememdb.ContextInfo{ - Ctx: renewCtx, - CancelFunc: renewCtxInfo.CancelFunc, - DoneCh: renewCtxInfo.DoneCh, - } - - // Add extra information necessary for restoring from persisted cache - index.RequestMethod = req.Request.Method - index.RequestToken = req.Token - index.RequestHeader = req.Request.Header - - // Store the index in the cache - c.logger.Debug("storing response into the cache", "method", req.Request.Method, "path", req.Request.URL.Path) - err = c.Set(ctx, index) - if err != nil { - c.logger.Error("failed to cache the proxied response", "error", err) - return nil, err - } - - // Start renewing the secret in the response - go c.startRenewing(renewCtx, index, req, secret) - - return resp, nil -} - -func (c *LeaseCache) createCtxInfo(ctx context.Context) *cachememdb.ContextInfo { - if ctx == nil { - c.l.RLock() - ctx = c.baseCtxInfo.Ctx - c.l.RUnlock() - } - return cachememdb.NewContextInfo(ctx) -} - -func (c *LeaseCache) startRenewing(ctx context.Context, index *cachememdb.Index, req *SendRequest, secret *api.Secret) { - defer func() { - id := ctx.Value(contextIndexID).(string) - if c.shuttingDown.Load() { - c.logger.Trace("not evicting index from cache during shutdown", "id", id, "method", req.Request.Method, "path", req.Request.URL.Path) - return - } - c.logger.Debug("evicting index from cache", "id", id, "method", req.Request.Method, "path", req.Request.URL.Path) - err := c.Evict(index) - if err != nil { - c.logger.Error("failed to evict index", "id", id, "error", err) - return - } - }() - - client, err := c.client.Clone() - if err != nil { - c.logger.Error("failed to create API client in the lifetime watcher", "error", err) - return - } - client.SetToken(req.Token) - client.SetHeaders(req.Request.Header) - - watcher, err := client.NewLifetimeWatcher(&api.LifetimeWatcherInput{ - Secret: secret, - }) - if err != nil { - c.logger.Error("failed to create secret lifetime watcher", "error", err) - return - } - - c.logger.Debug("initiating renewal", "method", req.Request.Method, "path", req.Request.URL.Path) - go watcher.Start() - defer watcher.Stop() - - for { - select { - case <-ctx.Done(): - // This is the case which captures context cancellations from token - // and leases. Since all the contexts are derived from the agent's - // context, this will also cover the shutdown scenario. - c.logger.Debug("context cancelled; stopping lifetime watcher", "path", req.Request.URL.Path) - return - case err := <-watcher.DoneCh(): - // This case covers renewal completion and renewal errors - if err != nil { - c.logger.Error("failed to renew secret", "error", err) - return - } - c.logger.Debug("renewal halted; evicting from cache", "path", req.Request.URL.Path) - return - case <-watcher.RenewCh(): - c.logger.Debug("secret renewed", "path", req.Request.URL.Path) - if c.ps != nil { - if err := c.updateLastRenewed(ctx, index, time.Now().UTC()); err != nil { - c.logger.Warn("not able to update lastRenewed time for cached index", "id", index.ID) - } - } - case <-index.RenewCtxInfo.DoneCh: - // This case indicates the renewal process to shutdown and evict - // the cache entry. This is triggered when a specific secret - // renewal needs to be killed without affecting any of the derived - // context renewals. - c.logger.Debug("done channel closed") - return - } - } -} - -func (c *LeaseCache) updateLastRenewed(ctx context.Context, index *cachememdb.Index, t time.Time) error { - idLock := locksutil.LockForKey(c.idLocks, index.ID) - idLock.Lock() - defer idLock.Unlock() - - getIndex, err := c.db.Get(cachememdb.IndexNameID, index.ID) - if err != nil { - return err - } - index.LastRenewed = t - if err := c.Set(ctx, getIndex); err != nil { - return err - } - return nil -} - -// computeIndexID results in a value that uniquely identifies a request -// received by the agent. It does so by SHA256 hashing the serialized request -// object containing the request path, query parameters and body parameters. -func computeIndexID(req *SendRequest) (string, error) { - var b bytes.Buffer - - cloned := req.Request.Clone(context.Background()) - cloned.Header.Del(vaulthttp.VaultIndexHeaderName) - cloned.Header.Del(vaulthttp.VaultForwardHeaderName) - cloned.Header.Del(vaulthttp.VaultInconsistentHeaderName) - // Serialize the request - if err := cloned.Write(&b); err != nil { - return "", fmt.Errorf("failed to serialize request: %v", err) - } - - // Reset the request body after it has been closed by Write - req.Request.Body = ioutil.NopCloser(bytes.NewReader(req.RequestBody)) - - // Append req.Token into the byte slice. This is needed since auto-auth'ed - // requests sets the token directly into SendRequest.Token - if _, err := b.Write([]byte(req.Token)); err != nil { - return "", fmt.Errorf("failed to write token to hash input: %w", err) - } - - return hex.EncodeToString(cryptoutil.Blake2b256Hash(string(b.Bytes()))), nil -} - -// HandleCacheClear returns a handlerFunc that can perform cache clearing operations. -func (c *LeaseCache) HandleCacheClear(ctx context.Context) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // If the cache is not enabled, return a 200 - if c == nil { - return - } - - // Only handle POST/PUT requests - switch r.Method { - case http.MethodPost: - case http.MethodPut: - default: - return - } - - req := new(cacheClearRequest) - if err := jsonutil.DecodeJSONFromReader(r.Body, req); err != nil { - if err == io.EOF { - err = errors.New("empty JSON provided") - } - logical.RespondError(w, http.StatusBadRequest, fmt.Errorf("failed to parse JSON input: %w", err)) - return - } - - c.logger.Debug("received cache-clear request", "type", req.Type, "namespace", req.Namespace, "value", req.Value) - - in, err := parseCacheClearInput(req) - if err != nil { - c.logger.Error("unable to parse clear input", "error", err) - logical.RespondError(w, http.StatusBadRequest, fmt.Errorf("failed to parse clear input: %w", err)) - return - } - - if err := c.handleCacheClear(ctx, in); err != nil { - // Default to 500 on error, unless the user provided an invalid type, - // which would then be a 400. - httpStatus := http.StatusInternalServerError - if err == errInvalidType { - httpStatus = http.StatusBadRequest - } - logical.RespondError(w, httpStatus, fmt.Errorf("failed to clear cache: %w", err)) - return - } - - return - }) -} - -func (c *LeaseCache) handleCacheClear(ctx context.Context, in *cacheClearInput) error { - if in == nil { - return errors.New("no value(s) provided to clear corresponding cache entries") - } - - switch in.Type { - case "request_path": - // For this particular case, we need to ensure that there are 2 provided - // indexers for the proper lookup. - if in.RequestPath == "" { - return errors.New("request path not provided") - } - - // The first value provided for this case will be the namespace, but if it's - // an empty value we need to overwrite it with "root/" to ensure proper - // cache lookup. - if in.Namespace == "" { - in.Namespace = "root/" - } - - // Find all the cached entries which has the given request path and - // cancel the contexts of all the respective lifetime watchers - indexes, err := c.db.GetByPrefix(cachememdb.IndexNameRequestPath, in.Namespace, in.RequestPath) - if err != nil { - return err - } - for _, index := range indexes { - index.RenewCtxInfo.CancelFunc() - } - - case "token": - if in.Token == "" { - return errors.New("token not provided") - } - - // Get the context for the given token and cancel its context - index, err := c.db.Get(cachememdb.IndexNameToken, in.Token) - if err != nil { - return err - } - if index == nil { - return nil - } - - c.logger.Debug("canceling context of index attached to token") - - index.RenewCtxInfo.CancelFunc() - - case "token_accessor": - if in.TokenAccessor == "" { - return errors.New("token accessor not provided") - } - - // Get the cached index and cancel the corresponding lifetime watcher - // context - index, err := c.db.Get(cachememdb.IndexNameTokenAccessor, in.TokenAccessor) - if err != nil { - return err - } - if index == nil { - return nil - } - - c.logger.Debug("canceling context of index attached to accessor") - - index.RenewCtxInfo.CancelFunc() - - case "lease": - if in.Lease == "" { - return errors.New("lease not provided") - } - - // Get the cached index and cancel the corresponding lifetime watcher - // context - index, err := c.db.Get(cachememdb.IndexNameLease, in.Lease) - if err != nil { - return err - } - if index == nil { - return nil - } - - c.logger.Debug("canceling context of index attached to accessor") - - index.RenewCtxInfo.CancelFunc() - - case "all": - // Cancel the base context which triggers all the goroutines to - // stop and evict entries from cache. - c.logger.Debug("canceling base context") - c.l.Lock() - c.baseCtxInfo.CancelFunc() - // Reset the base context - baseCtx, baseCancel := context.WithCancel(ctx) - c.baseCtxInfo = &cachememdb.ContextInfo{ - Ctx: baseCtx, - CancelFunc: baseCancel, - } - c.l.Unlock() - - // Reset the memdb instance (and persistent storage if enabled) - if err := c.Flush(); err != nil { - return err - } - - default: - return errInvalidType - } - - c.logger.Debug("successfully cleared matching cache entries") - - return nil -} - -// handleRevocationRequest checks whether the originating request is a -// revocation request, and if so perform applicable cache cleanups. -// Returns true is this is a revocation request. -func (c *LeaseCache) handleRevocationRequest(ctx context.Context, req *SendRequest, resp *SendResponse) (bool, error) { - // Lease and token revocations return 204's on success. Fast-path if that's - // not the case. - if resp.Response.StatusCode != http.StatusNoContent { - return false, nil - } - - _, path := deriveNamespaceAndRevocationPath(req) - - switch { - case path == vaultPathTokenRevoke: - // Get the token from the request body - jsonBody := map[string]interface{}{} - if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { - return false, err - } - tokenRaw, ok := jsonBody["token"] - if !ok { - return false, fmt.Errorf("failed to get token from request body") - } - token, ok := tokenRaw.(string) - if !ok { - return false, fmt.Errorf("expected token in the request body to be string") - } - - // Clear the cache entry associated with the token and all the other - // entries belonging to the leases derived from this token. - in := &cacheClearInput{ - Type: "token", - Token: token, - } - if err := c.handleCacheClear(ctx, in); err != nil { - return false, err - } - - case path == vaultPathTokenRevokeSelf: - // Clear the cache entry associated with the token and all the other - // entries belonging to the leases derived from this token. - in := &cacheClearInput{ - Type: "token", - Token: req.Token, - } - if err := c.handleCacheClear(ctx, in); err != nil { - return false, err - } - - case path == vaultPathTokenRevokeAccessor: - jsonBody := map[string]interface{}{} - if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { - return false, err - } - accessorRaw, ok := jsonBody["accessor"] - if !ok { - return false, fmt.Errorf("failed to get accessor from request body") - } - accessor, ok := accessorRaw.(string) - if !ok { - return false, fmt.Errorf("expected accessor in the request body to be string") - } - - in := &cacheClearInput{ - Type: "token_accessor", - TokenAccessor: accessor, - } - if err := c.handleCacheClear(ctx, in); err != nil { - return false, err - } - - case path == vaultPathTokenRevokeOrphan: - jsonBody := map[string]interface{}{} - if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { - return false, err - } - tokenRaw, ok := jsonBody["token"] - if !ok { - return false, fmt.Errorf("failed to get token from request body") - } - token, ok := tokenRaw.(string) - if !ok { - return false, fmt.Errorf("expected token in the request body to be string") - } - - // Kill the lifetime watchers of all the leases attached to the revoked - // token - indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLeaseToken, token) - if err != nil { - return false, err - } - for _, index := range indexes { - index.RenewCtxInfo.CancelFunc() - } - - // Kill the lifetime watchers of the revoked token - index, err := c.db.Get(cachememdb.IndexNameToken, token) - if err != nil { - return false, err - } - if index == nil { - return true, nil - } - - // Indicate the lifetime watcher goroutine for this index to return. - // This will not affect the child tokens because the context is not - // getting cancelled. - close(index.RenewCtxInfo.DoneCh) - - // Clear the parent references of the revoked token in the entries - // belonging to the child tokens of the revoked token. - indexes, err = c.db.GetByPrefix(cachememdb.IndexNameTokenParent, token) - if err != nil { - return false, err - } - for _, index := range indexes { - index.TokenParent = "" - err = c.db.Set(index) - if err != nil { - c.logger.Error("failed to persist index", "error", err) - return false, err - } - } - - case path == vaultPathLeaseRevoke: - // TODO: Should lease present in the URL itself be considered here? - // Get the lease from the request body - jsonBody := map[string]interface{}{} - if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { - return false, err - } - leaseIDRaw, ok := jsonBody["lease_id"] - if !ok { - return false, fmt.Errorf("failed to get lease_id from request body") - } - leaseID, ok := leaseIDRaw.(string) - if !ok { - return false, fmt.Errorf("expected lease_id the request body to be string") - } - in := &cacheClearInput{ - Type: "lease", - Lease: leaseID, - } - if err := c.handleCacheClear(ctx, in); err != nil { - return false, err - } - - case strings.HasPrefix(path, vaultPathLeaseRevokeForce): - // Trim the URL path to get the request path prefix - prefix := strings.TrimPrefix(path, vaultPathLeaseRevokeForce) - // Get all the cache indexes that use the request path containing the - // prefix and cancel the lifetime watcher context of each. - indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLease, prefix) - if err != nil { - return false, err - } - - _, tokenNSID := namespace.SplitIDFromString(req.Token) - for _, index := range indexes { - _, leaseNSID := namespace.SplitIDFromString(index.Lease) - // Only evict leases that match the token's namespace - if tokenNSID == leaseNSID { - index.RenewCtxInfo.CancelFunc() - } - } - - case strings.HasPrefix(path, vaultPathLeaseRevokePrefix): - // Trim the URL path to get the request path prefix - prefix := strings.TrimPrefix(path, vaultPathLeaseRevokePrefix) - // Get all the cache indexes that use the request path containing the - // prefix and cancel the lifetime watcher context of each. - indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLease, prefix) - if err != nil { - return false, err - } - - _, tokenNSID := namespace.SplitIDFromString(req.Token) - for _, index := range indexes { - _, leaseNSID := namespace.SplitIDFromString(index.Lease) - // Only evict leases that match the token's namespace - if tokenNSID == leaseNSID { - index.RenewCtxInfo.CancelFunc() - } - } - - default: - return false, nil - } - - c.logger.Debug("triggered caching eviction from revocation request") - - return true, nil -} - -// Set stores the index in the cachememdb, and also stores it in the persistent -// cache (if enabled) -func (c *LeaseCache) Set(ctx context.Context, index *cachememdb.Index) error { - if err := c.db.Set(index); err != nil { - return err - } - - if c.ps != nil { - plaintext, err := index.Serialize() - if err != nil { - return err - } - - if err := c.ps.Set(ctx, index.ID, plaintext, index.Type); err != nil { - return err - } - c.logger.Trace("set entry in persistent storage", "type", index.Type, "path", index.RequestPath, "id", index.ID) - } - - return nil -} - -// Evict removes an Index from the cachememdb, and also removes it from the -// persistent cache (if enabled) -func (c *LeaseCache) Evict(index *cachememdb.Index) error { - if err := c.db.Evict(cachememdb.IndexNameID, index.ID); err != nil { - return err - } - - if c.ps != nil { - if err := c.ps.Delete(index.ID, index.Type); err != nil { - return err - } - c.logger.Trace("deleted item from persistent storage", "id", index.ID) - } - - return nil -} - -// Flush the cachememdb and persistent cache (if enabled) -func (c *LeaseCache) Flush() error { - if err := c.db.Flush(); err != nil { - return err - } - - if c.ps != nil { - c.logger.Trace("clearing persistent storage") - return c.ps.Clear() - } - - return nil -} - -// Restore loads the cachememdb from the persistent storage passed in. Loads -// tokens first, since restoring a lease's renewal context and watcher requires -// looking up the token in the cachememdb. -func (c *LeaseCache) Restore(ctx context.Context, storage *cacheboltdb.BoltStorage) error { - var errs *multierror.Error - - // Process tokens first - tokens, err := storage.GetByType(ctx, cacheboltdb.TokenType) - if err != nil { - errs = multierror.Append(errs, err) - } else { - if err := c.restoreTokens(tokens); err != nil { - errs = multierror.Append(errs, err) - } - } - - // Then process leases - leases, err := storage.GetByType(ctx, cacheboltdb.LeaseType) - if err != nil { - errs = multierror.Append(errs, err) - } else { - for _, lease := range leases { - newIndex, err := cachememdb.Deserialize(lease) - if err != nil { - errs = multierror.Append(errs, err) - continue - } - - c.logger.Trace("restoring lease", "id", newIndex.ID, "path", newIndex.RequestPath) - - // Check if this lease has already expired - expired, err := c.hasExpired(time.Now().UTC(), newIndex) - if err != nil { - c.logger.Warn("failed to check if lease is expired", "id", newIndex.ID, "error", err) - } - if expired { - continue - } - - if err := c.restoreLeaseRenewCtx(newIndex); err != nil { - errs = multierror.Append(errs, err) - continue - } - if err := c.db.Set(newIndex); err != nil { - errs = multierror.Append(errs, err) - continue - } - c.logger.Trace("restored lease", "id", newIndex.ID, "path", newIndex.RequestPath) - } - } - - return errs.ErrorOrNil() -} - -func (c *LeaseCache) restoreTokens(tokens [][]byte) error { - var errors *multierror.Error - - for _, token := range tokens { - newIndex, err := cachememdb.Deserialize(token) - if err != nil { - errors = multierror.Append(errors, err) - continue - } - newIndex.RenewCtxInfo = c.createCtxInfo(nil) - if err := c.db.Set(newIndex); err != nil { - errors = multierror.Append(errors, err) - continue - } - c.logger.Trace("restored token", "id", newIndex.ID) - } - - return errors.ErrorOrNil() -} - -// restoreLeaseRenewCtx re-creates a RenewCtx for an index object and starts -// the watcher go routine -func (c *LeaseCache) restoreLeaseRenewCtx(index *cachememdb.Index) error { - if index.Response == nil { - return fmt.Errorf("cached response was nil for %s", index.ID) - } - - // Parse the secret to determine which type it is - reader := bufio.NewReader(bytes.NewReader(index.Response)) - resp, err := http.ReadResponse(reader, nil) - if err != nil { - c.logger.Error("failed to deserialize response", "error", err) - return err - } - secret, err := api.ParseSecret(resp.Body) - if err != nil { - c.logger.Error("failed to parse response as secret", "error", err) - return err - } - - var renewCtxInfo *cachememdb.ContextInfo - switch { - case secret.LeaseID != "": - entry, err := c.db.Get(cachememdb.IndexNameToken, index.RequestToken) - if err != nil { - return err - } - - if entry == nil { - return fmt.Errorf("could not find parent Token %s for req path %s", index.RequestToken, index.RequestPath) - } - - // Derive a context for renewal using the token's context - renewCtxInfo = cachememdb.NewContextInfo(entry.RenewCtxInfo.Ctx) - - case secret.Auth != nil: - var parentCtx context.Context - if !secret.Auth.Orphan { - entry, err := c.db.Get(cachememdb.IndexNameToken, index.RequestToken) - if err != nil { - return err - } - // If parent token is not managed by the agent, child shouldn't be - // either. - if entry == nil { - return fmt.Errorf("could not find parent Token %s for req path %s", index.RequestToken, index.RequestPath) - } - - c.logger.Debug("setting parent context", "method", index.RequestMethod, "path", index.RequestPath) - parentCtx = entry.RenewCtxInfo.Ctx - } - renewCtxInfo = c.createCtxInfo(parentCtx) - default: - return fmt.Errorf("unknown cached index item: %s", index.ID) - } - - renewCtx := context.WithValue(renewCtxInfo.Ctx, contextIndexID, index.ID) - index.RenewCtxInfo = &cachememdb.ContextInfo{ - Ctx: renewCtx, - CancelFunc: renewCtxInfo.CancelFunc, - DoneCh: renewCtxInfo.DoneCh, - } - - sendReq := &SendRequest{ - Token: index.RequestToken, - Request: &http.Request{ - Header: index.RequestHeader, - Method: index.RequestMethod, - URL: &url.URL{ - Path: index.RequestPath, - }, - }, - } - go c.startRenewing(renewCtx, index, sendReq, secret) - - return nil -} - -// deriveNamespaceAndRevocationPath returns the namespace and relative path for -// revocation paths. -// -// If the path contains a namespace, but it's not a revocation path, it will be -// returned as-is, since there's no way to tell where the namespace ends and -// where the request path begins purely based off a string. -// -// Case 1: /v1/ns1/leases/revoke -> ns1/, /v1/leases/revoke -// Case 2: ns1/ /v1/leases/revoke -> ns1/, /v1/leases/revoke -// Case 3: /v1/ns1/foo/bar -> root/, /v1/ns1/foo/bar -// Case 4: ns1/ /v1/foo/bar -> ns1/, /v1/foo/bar -func deriveNamespaceAndRevocationPath(req *SendRequest) (string, string) { - namespace := "root/" - nsHeader := req.Request.Header.Get(consts.NamespaceHeaderName) - if nsHeader != "" { - namespace = nsHeader - } - - fullPath := req.Request.URL.Path - nonVersionedPath := strings.TrimPrefix(fullPath, "/v1") - - for _, pathToCheck := range revocationPaths { - // We use strings.Contains here for paths that can contain - // vars in the path, e.g. /v1/lease/revoke-prefix/:prefix - i := strings.Index(nonVersionedPath, pathToCheck) - // If there's no match, move on to the next check - if i == -1 { - continue - } - - // If the index is 0, this is a relative path with no namespace preppended, - // so we can break early - if i == 0 { - break - } - - // We need to turn /ns1 into ns1/, this makes it easy - namespaceInPath := nshelper.Canonicalize(nonVersionedPath[:i]) - - // If it's root, we replace, otherwise we join - if namespace == "root/" { - namespace = namespaceInPath - } else { - namespace = namespace + namespaceInPath - } - - return namespace, fmt.Sprintf("/v1%s", nonVersionedPath[i:]) - } - - return namespace, fmt.Sprintf("/v1%s", nonVersionedPath) -} - -// RegisterAutoAuthToken adds the provided auto-token into the cache. This is -// primarily used to register the auto-auth token and should only be called -// within a sink's WriteToken func. -func (c *LeaseCache) RegisterAutoAuthToken(token string) error { - // Get the token from the cache - oldIndex, err := c.db.Get(cachememdb.IndexNameToken, token) - if err != nil { - return err - } - - // If the index is found, just keep it in the cache and ignore the incoming - // token (since they're the same) - if oldIndex != nil { - c.logger.Trace("auto-auth token already exists in cache; no need to store it again") - return nil - } - - // The following randomly generated values are required for index stored by - // the cache, but are not actually used. We use random values to prevent - // accidental access. - id, err := base62.Random(5) - if err != nil { - return err - } - namespace, err := base62.Random(5) - if err != nil { - return err - } - requestPath, err := base62.Random(5) - if err != nil { - return err - } - - index := &cachememdb.Index{ - ID: id, - Token: token, - Namespace: namespace, - RequestPath: requestPath, - Type: cacheboltdb.TokenType, - } - - // Derive a context off of the lease cache's base context - ctxInfo := c.createCtxInfo(nil) - - index.RenewCtxInfo = &cachememdb.ContextInfo{ - Ctx: ctxInfo.Ctx, - CancelFunc: ctxInfo.CancelFunc, - DoneCh: ctxInfo.DoneCh, - } - - // Store the index in the cache - c.logger.Debug("storing auto-auth token into the cache") - err = c.Set(c.baseCtxInfo.Ctx, index) - if err != nil { - c.logger.Error("failed to cache the auto-auth token", "error", err) - return err - } - - return nil -} - -type cacheClearInput struct { - Type string - - RequestPath string - Namespace string - Token string - TokenAccessor string - Lease string -} - -func parseCacheClearInput(req *cacheClearRequest) (*cacheClearInput, error) { - if req == nil { - return nil, errors.New("nil request options provided") - } - - if req.Type == "" { - return nil, errors.New("no type provided") - } - - in := &cacheClearInput{ - Type: req.Type, - Namespace: req.Namespace, - } - - switch req.Type { - case "request_path": - in.RequestPath = req.Value - case "token": - in.Token = req.Value - case "token_accessor": - in.TokenAccessor = req.Value - case "lease": - in.Lease = req.Value - } - - return in, nil -} - -func (c *LeaseCache) hasExpired(currentTime time.Time, index *cachememdb.Index) (bool, error) { - reader := bufio.NewReader(bytes.NewReader(index.Response)) - resp, err := http.ReadResponse(reader, nil) - if err != nil { - return false, fmt.Errorf("failed to deserialize response: %w", err) - } - secret, err := api.ParseSecret(resp.Body) - if err != nil { - return false, fmt.Errorf("failed to parse response as secret: %w", err) - } - - elapsed := currentTime.Sub(index.LastRenewed) - var leaseDuration int - switch { - case secret.LeaseID != "": - leaseDuration = secret.LeaseDuration - case secret.Auth != nil: - leaseDuration = secret.Auth.LeaseDuration - default: - return false, errors.New("secret without lease encountered in expiration check") - } - - if int(elapsed.Seconds()) > leaseDuration { - c.logger.Trace("secret has expired", "id", index.ID, "elapsed", elapsed, "lease duration", leaseDuration) - return true, nil - } - return false, nil -} diff --git a/command/agent/cache/lease_cache_test.go b/command/agent/cache/lease_cache_test.go deleted file mode 100644 index 6ce6183120f6..000000000000 --- a/command/agent/cache/lease_cache_test.go +++ /dev/null @@ -1,1222 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cache - -import ( - "context" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "os" - "reflect" - "strings" - "sync" - "testing" - "time" - - "github.com/go-test/deep" - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/cache/cacheboltdb" - "github.com/hashicorp/vault/command/agent/cache/cachememdb" - "github.com/hashicorp/vault/command/agent/cache/keymanager" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/atomic" -) - -func testNewLeaseCache(t *testing.T, responses []*SendResponse) *LeaseCache { - t.Helper() - - client, err := api.NewClient(api.DefaultConfig()) - if err != nil { - t.Fatal(err) - } - lc, err := NewLeaseCache(&LeaseCacheConfig{ - Client: client, - BaseContext: context.Background(), - Proxier: newMockProxier(responses), - Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"), - }) - if err != nil { - t.Fatal(err) - } - return lc -} - -func testNewLeaseCacheWithDelay(t *testing.T, cacheable bool, delay int) *LeaseCache { - t.Helper() - - client, err := api.NewClient(api.DefaultConfig()) - if err != nil { - t.Fatal(err) - } - - lc, err := NewLeaseCache(&LeaseCacheConfig{ - Client: client, - BaseContext: context.Background(), - Proxier: &mockDelayProxier{cacheable, delay}, - Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"), - }) - if err != nil { - t.Fatal(err) - } - - return lc -} - -func testNewLeaseCacheWithPersistence(t *testing.T, responses []*SendResponse, storage *cacheboltdb.BoltStorage) *LeaseCache { - t.Helper() - - client, err := api.NewClient(api.DefaultConfig()) - require.NoError(t, err) - - lc, err := NewLeaseCache(&LeaseCacheConfig{ - Client: client, - BaseContext: context.Background(), - Proxier: newMockProxier(responses), - Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"), - Storage: storage, - }) - require.NoError(t, err) - - return lc -} - -func TestCache_ComputeIndexID(t *testing.T) { - type args struct { - req *http.Request - } - tests := []struct { - name string - req *SendRequest - want string - wantErr bool - }{ - { - "basic", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "test", - }, - }, - }, - "7b5db388f211fd9edca8c6c254831fb01ad4e6fe624dbb62711f256b5e803717", - false, - }, - { - "ignore consistency headers", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "test", - }, - Header: http.Header{ - vaulthttp.VaultIndexHeaderName: []string{"foo"}, - vaulthttp.VaultInconsistentHeaderName: []string{"foo"}, - vaulthttp.VaultForwardHeaderName: []string{"foo"}, - }, - }, - }, - "7b5db388f211fd9edca8c6c254831fb01ad4e6fe624dbb62711f256b5e803717", - false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := computeIndexID(tt.req) - if (err != nil) != tt.wantErr { - t.Errorf("actual_error: %v, expected_error: %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, string(tt.want)) { - t.Errorf("bad: index id; actual: %q, expected: %q", got, string(tt.want)) - } - }) - } -} - -func TestLeaseCache_EmptyToken(t *testing.T) { - responses := []*SendResponse{ - newTestSendResponse(http.StatusCreated, `{"value": "invalid", "auth": {"client_token": "testtoken"}}`), - } - lc := testNewLeaseCache(t, responses) - - // Even if the send request doesn't have a token on it, a successful - // cacheable response should result in the index properly getting populated - // with a token and memdb shouldn't complain while inserting the index. - urlPath := "http://example.com/v1/sample/api" - sendReq := &SendRequest{ - Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), - } - resp, err := lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatalf("expected a non empty response") - } -} - -func TestLeaseCache_SendCacheable(t *testing.T) { - // Emulate 2 responses from the api proxy. One returns a new token and the - // other returns a lease. - responses := []*SendResponse{ - newTestSendResponse(http.StatusCreated, `{"auth": {"client_token": "testtoken", "renewable": true}}`), - newTestSendResponse(http.StatusOK, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}}`), - } - - lc := testNewLeaseCache(t, responses) - // Register an token so that the token and lease requests are cached - require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) - - // Make a request. A response with a new token is returned to the lease - // cache and that will be cached. - urlPath := "http://example.com/v1/sample/api" - sendReq := &SendRequest{ - Token: "autoauthtoken", - Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), - } - resp, err := lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(resp.Response.StatusCode, responses[0].Response.StatusCode); diff != nil { - t.Fatalf("expected getting proxied response: got %v", diff) - } - - // Send the same request again to get the cached response - sendReq = &SendRequest{ - Token: "autoauthtoken", - Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), - } - resp, err = lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(resp.Response.StatusCode, responses[0].Response.StatusCode); diff != nil { - t.Fatalf("expected getting proxied response: got %v", diff) - } - - // Check TokenParent - cachedItem, err := lc.db.Get(cachememdb.IndexNameToken, "testtoken") - if err != nil { - t.Fatal(err) - } - if cachedItem == nil { - t.Fatalf("expected token entry from cache") - } - if cachedItem.TokenParent != "autoauthtoken" { - t.Fatalf("unexpected value for tokenparent: %s", cachedItem.TokenParent) - } - - // Modify the request a little bit to ensure the second response is - // returned to the lease cache. - sendReq = &SendRequest{ - Token: "autoauthtoken", - Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input_changed"}`)), - } - resp, err = lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(resp.Response.StatusCode, responses[1].Response.StatusCode); diff != nil { - t.Fatalf("expected getting proxied response: got %v", diff) - } - - // Make the same request again and ensure that the same response is returned - // again. - sendReq = &SendRequest{ - Token: "autoauthtoken", - Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input_changed"}`)), - } - resp, err = lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(resp.Response.StatusCode, responses[1].Response.StatusCode); diff != nil { - t.Fatalf("expected getting proxied response: got %v", diff) - } -} - -func TestLeaseCache_SendNonCacheable(t *testing.T) { - responses := []*SendResponse{ - newTestSendResponse(http.StatusOK, `{"value": "output"}`), - newTestSendResponse(http.StatusNotFound, `{"value": "invalid"}`), - newTestSendResponse(http.StatusOK, `Hello`), - newTestSendResponse(http.StatusTemporaryRedirect, ""), - } - - lc := testNewLeaseCache(t, responses) - - // Send a request through the lease cache which is not cacheable (there is - // no lease information or auth information in the response) - sendReq := &SendRequest{ - Request: httptest.NewRequest("GET", "http://example.com", strings.NewReader(`{"value": "input"}`)), - } - resp, err := lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(resp.Response, responses[0].Response); diff != nil { - t.Fatalf("expected getting proxied response: got %v", diff) - } - - // Since the response is non-cacheable, the second response will be - // returned. - sendReq = &SendRequest{ - Token: "foo", - Request: httptest.NewRequest("GET", "http://example.com", strings.NewReader(`{"value": "input"}`)), - } - resp, err = lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(resp.Response, responses[1].Response); diff != nil { - t.Fatalf("expected getting proxied response: got %v", diff) - } - - // Since the response is non-cacheable, the third response will be - // returned. - sendReq = &SendRequest{ - Token: "foo", - Request: httptest.NewRequest("GET", "http://example.com", nil), - } - resp, err = lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(resp.Response, responses[2].Response); diff != nil { - t.Fatalf("expected getting proxied response: got %v", diff) - } - - // Since the response is non-cacheable, the fourth response will be - // returned. - sendReq = &SendRequest{ - Token: "foo", - Request: httptest.NewRequest("GET", "http://example.com", nil), - } - resp, err = lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(resp.Response, responses[3].Response); diff != nil { - t.Fatalf("expected getting proxied response: got %v", diff) - } -} - -func TestLeaseCache_SendNonCacheableNonTokenLease(t *testing.T) { - // Create the cache - responses := []*SendResponse{ - newTestSendResponse(http.StatusOK, `{"value": "output", "lease_id": "foo"}`), - newTestSendResponse(http.StatusCreated, `{"value": "invalid", "auth": {"client_token": "testtoken"}}`), - } - lc := testNewLeaseCache(t, responses) - - // Send a request through lease cache which returns a response containing - // lease_id. Response will not be cached because it doesn't belong to a - // token that is managed by the lease cache. - urlPath := "http://example.com/v1/sample/api" - sendReq := &SendRequest{ - Token: "foo", - Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), - } - resp, err := lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(resp.Response, responses[0].Response); diff != nil { - t.Fatalf("expected getting proxied response: got %v", diff) - } - - idx, err := lc.db.Get(cachememdb.IndexNameRequestPath, "root/", urlPath) - if err != nil { - t.Fatal(err) - } - if idx != nil { - t.Fatalf("expected nil entry, got: %#v", idx) - } - - // Verify that the response is not cached by sending the same request and - // by expecting a different response. - sendReq = &SendRequest{ - Token: "foo", - Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), - } - resp, err = lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(resp.Response, responses[1].Response); diff != nil { - t.Fatalf("expected getting proxied response: got %v", diff) - } - - idx, err = lc.db.Get(cachememdb.IndexNameRequestPath, "root/", urlPath) - if err != nil { - t.Fatal(err) - } - if idx != nil { - t.Fatalf("expected nil entry, got: %#v", idx) - } -} - -func TestLeaseCache_HandleCacheClear(t *testing.T) { - lc := testNewLeaseCache(t, nil) - - handler := lc.HandleCacheClear(context.Background()) - ts := httptest.NewServer(handler) - defer ts.Close() - - // Test missing body, should return 400 - resp, err := http.Post(ts.URL, "application/json", nil) - if err != nil { - t.Fatal() - } - if resp.StatusCode != http.StatusBadRequest { - t.Fatalf("status code mismatch: expected = %v, got = %v", http.StatusBadRequest, resp.StatusCode) - } - - testCases := []struct { - name string - reqType string - reqValue string - expectedStatusCode int - }{ - { - "invalid_type", - "foo", - "", - http.StatusBadRequest, - }, - { - "invalid_value", - "", - "bar", - http.StatusBadRequest, - }, - { - "all", - "all", - "", - http.StatusOK, - }, - { - "by_request_path", - "request_path", - "foo", - http.StatusOK, - }, - { - "by_token", - "token", - "foo", - http.StatusOK, - }, - { - "by_lease", - "lease", - "foo", - http.StatusOK, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - reqBody := fmt.Sprintf("{\"type\": \"%s\", \"value\": \"%s\"}", tc.reqType, tc.reqValue) - resp, err := http.Post(ts.URL, "application/json", strings.NewReader(reqBody)) - if err != nil { - t.Fatal(err) - } - if tc.expectedStatusCode != resp.StatusCode { - t.Fatalf("status code mismatch: expected = %v, got = %v", tc.expectedStatusCode, resp.StatusCode) - } - }) - } -} - -func TestCache_DeriveNamespaceAndRevocationPath(t *testing.T) { - tests := []struct { - name string - req *SendRequest - wantNamespace string - wantRelativePath string - }{ - { - "non_revocation_full_path", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "/v1/ns1/sys/mounts", - }, - }, - }, - "root/", - "/v1/ns1/sys/mounts", - }, - { - "non_revocation_relative_path", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "/v1/sys/mounts", - }, - Header: http.Header{ - consts.NamespaceHeaderName: []string{"ns1/"}, - }, - }, - }, - "ns1/", - "/v1/sys/mounts", - }, - { - "non_revocation_relative_path", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "/v1/ns2/sys/mounts", - }, - Header: http.Header{ - consts.NamespaceHeaderName: []string{"ns1/"}, - }, - }, - }, - "ns1/", - "/v1/ns2/sys/mounts", - }, - { - "revocation_full_path", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "/v1/ns1/sys/leases/revoke", - }, - }, - }, - "ns1/", - "/v1/sys/leases/revoke", - }, - { - "revocation_relative_path", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "/v1/sys/leases/revoke", - }, - Header: http.Header{ - consts.NamespaceHeaderName: []string{"ns1/"}, - }, - }, - }, - "ns1/", - "/v1/sys/leases/revoke", - }, - { - "revocation_relative_partial_ns", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "/v1/ns2/sys/leases/revoke", - }, - Header: http.Header{ - consts.NamespaceHeaderName: []string{"ns1/"}, - }, - }, - }, - "ns1/ns2/", - "/v1/sys/leases/revoke", - }, - { - "revocation_prefix_full_path", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "/v1/ns1/sys/leases/revoke-prefix/foo", - }, - }, - }, - "ns1/", - "/v1/sys/leases/revoke-prefix/foo", - }, - { - "revocation_prefix_relative_path", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "/v1/sys/leases/revoke-prefix/foo", - }, - Header: http.Header{ - consts.NamespaceHeaderName: []string{"ns1/"}, - }, - }, - }, - "ns1/", - "/v1/sys/leases/revoke-prefix/foo", - }, - { - "revocation_prefix_partial_ns", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "/v1/ns2/sys/leases/revoke-prefix/foo", - }, - Header: http.Header{ - consts.NamespaceHeaderName: []string{"ns1/"}, - }, - }, - }, - "ns1/ns2/", - "/v1/sys/leases/revoke-prefix/foo", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotNamespace, gotRelativePath := deriveNamespaceAndRevocationPath(tt.req) - if gotNamespace != tt.wantNamespace { - t.Errorf("deriveNamespaceAndRevocationPath() gotNamespace = %v, want %v", gotNamespace, tt.wantNamespace) - } - if gotRelativePath != tt.wantRelativePath { - t.Errorf("deriveNamespaceAndRevocationPath() gotRelativePath = %v, want %v", gotRelativePath, tt.wantRelativePath) - } - }) - } -} - -func TestLeaseCache_Concurrent_NonCacheable(t *testing.T) { - lc := testNewLeaseCacheWithDelay(t, false, 50) - - // We are going to send 100 requests, each taking 50ms to process. If these - // requests are processed serially, it will take ~5seconds to finish. we - // use a ContextWithTimeout to tell us if this is the case by giving ample - // time for it process them concurrently but time out if they get processed - // serially. - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - defer cancel() - - wgDoneCh := make(chan struct{}) - errCh := make(chan error) - - go func() { - var wg sync.WaitGroup - // 100 concurrent requests - for i := 0; i < 100; i++ { - wg.Add(1) - - go func() { - defer wg.Done() - - // Send a request through the lease cache which is not cacheable (there is - // no lease information or auth information in the response) - sendReq := &SendRequest{ - Request: httptest.NewRequest("GET", "http://example.com", nil), - } - - _, err := lc.Send(ctx, sendReq) - if err != nil { - errCh <- err - } - }() - } - - wg.Wait() - close(wgDoneCh) - }() - - select { - case <-ctx.Done(): - t.Fatalf("request timed out: %s", ctx.Err()) - case <-wgDoneCh: - case err := <-errCh: - t.Fatal(err) - } -} - -func TestLeaseCache_Concurrent_Cacheable(t *testing.T) { - lc := testNewLeaseCacheWithDelay(t, true, 50) - - if err := lc.RegisterAutoAuthToken("autoauthtoken"); err != nil { - t.Fatal(err) - } - - // We are going to send 100 requests, each taking 50ms to process. If these - // requests are processed serially, it will take ~5seconds to finish, so we - // use a ContextWithTimeout to tell us if this is the case. - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - defer cancel() - - var cacheCount atomic.Uint32 - wgDoneCh := make(chan struct{}) - errCh := make(chan error) - - go func() { - var wg sync.WaitGroup - // Start 100 concurrent requests - for i := 0; i < 100; i++ { - wg.Add(1) - - go func() { - defer wg.Done() - - sendReq := &SendRequest{ - Token: "autoauthtoken", - Request: httptest.NewRequest("GET", "http://example.com/v1/sample/api", nil), - } - - resp, err := lc.Send(ctx, sendReq) - if err != nil { - errCh <- err - } - - if resp.CacheMeta != nil && resp.CacheMeta.Hit { - cacheCount.Inc() - } - }() - } - - wg.Wait() - close(wgDoneCh) - }() - - select { - case <-ctx.Done(): - t.Fatalf("request timed out: %s", ctx.Err()) - case <-wgDoneCh: - case err := <-errCh: - t.Fatal(err) - } - - // Ensure that all but one request got proxied. The other 99 should be - // returned from the cache. - if cacheCount.Load() != 99 { - t.Fatalf("Should have returned a cached response 99 times, got %d", cacheCount.Load()) - } -} - -func setupBoltStorage(t *testing.T) (tempCacheDir string, boltStorage *cacheboltdb.BoltStorage) { - t.Helper() - - km, err := keymanager.NewPassthroughKeyManager(context.Background(), nil) - require.NoError(t, err) - - tempCacheDir, err = ioutil.TempDir("", "agent-cache-test") - require.NoError(t, err) - boltStorage, err = cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ - Path: tempCacheDir, - Logger: hclog.Default(), - Wrapper: km.Wrapper(), - }) - require.NoError(t, err) - require.NotNil(t, boltStorage) - // The calling function should `defer boltStorage.Close()` and `defer os.RemoveAll(tempCacheDir)` - return tempCacheDir, boltStorage -} - -func compareBeforeAndAfter(t *testing.T, before, after *LeaseCache, beforeLen, afterLen int) { - beforeDB, err := before.db.GetByPrefix(cachememdb.IndexNameID) - require.NoError(t, err) - assert.Len(t, beforeDB, beforeLen) - afterDB, err := after.db.GetByPrefix(cachememdb.IndexNameID) - require.NoError(t, err) - assert.Len(t, afterDB, afterLen) - for _, cachedItem := range beforeDB { - if strings.Contains(cachedItem.RequestPath, "expect-missing") { - continue - } - restoredItem, err := after.db.Get(cachememdb.IndexNameID, cachedItem.ID) - require.NoError(t, err) - - assert.NoError(t, err) - assert.Equal(t, cachedItem.ID, restoredItem.ID) - assert.Equal(t, cachedItem.Lease, restoredItem.Lease) - assert.Equal(t, cachedItem.LeaseToken, restoredItem.LeaseToken) - assert.Equal(t, cachedItem.Namespace, restoredItem.Namespace) - assert.Equal(t, cachedItem.RequestHeader, restoredItem.RequestHeader) - assert.Equal(t, cachedItem.RequestMethod, restoredItem.RequestMethod) - assert.Equal(t, cachedItem.RequestPath, restoredItem.RequestPath) - assert.Equal(t, cachedItem.RequestToken, restoredItem.RequestToken) - assert.Equal(t, cachedItem.Response, restoredItem.Response) - assert.Equal(t, cachedItem.Token, restoredItem.Token) - assert.Equal(t, cachedItem.TokenAccessor, restoredItem.TokenAccessor) - assert.Equal(t, cachedItem.TokenParent, restoredItem.TokenParent) - - // check what we can in the renewal context - assert.NotEmpty(t, restoredItem.RenewCtxInfo.CancelFunc) - assert.NotZero(t, restoredItem.RenewCtxInfo.DoneCh) - require.NotEmpty(t, restoredItem.RenewCtxInfo.Ctx) - assert.Equal(t, - cachedItem.RenewCtxInfo.Ctx.Value(contextIndexID), - restoredItem.RenewCtxInfo.Ctx.Value(contextIndexID), - ) - } -} - -func TestLeaseCache_PersistAndRestore(t *testing.T) { - // Emulate responses from the api proxy. The first two use the auto-auth - // token, and the others use another token. - // The test re-sends each request to ensure that the response is cached - // so the number of responses and cacheTests specified should always be equal. - responses := []*SendResponse{ - newTestSendResponse(200, `{"auth": {"client_token": "testtoken", "renewable": true, "lease_duration": 600}}`), - newTestSendResponse(201, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}, "lease_duration": 600}`), - // The auth token will get manually deleted from the bolt DB storage, causing both of the following two responses - // to be missing from the cache after a restore, because the lease is a child of the auth token. - newTestSendResponse(202, `{"auth": {"client_token": "testtoken2", "renewable": true, "orphan": true, "lease_duration": 600}}`), - newTestSendResponse(203, `{"lease_id": "secret2-lease", "renewable": true, "data": {"number": "two"}, "lease_duration": 600}`), - // 204 No content gets special handling - avoid. - newTestSendResponse(250, `{"auth": {"client_token": "testtoken3", "renewable": true, "orphan": true, "lease_duration": 600}}`), - newTestSendResponse(251, `{"lease_id": "secret3-lease", "renewable": true, "data": {"number": "three"}, "lease_duration": 600}`), - } - - tempDir, boltStorage := setupBoltStorage(t) - defer os.RemoveAll(tempDir) - defer boltStorage.Close() - lc := testNewLeaseCacheWithPersistence(t, responses, boltStorage) - - // Register an auto-auth token so that the token and lease requests are cached - err := lc.RegisterAutoAuthToken("autoauthtoken") - require.NoError(t, err) - - cacheTests := []struct { - token string - method string - urlPath string - body string - deleteFromPersistentStore bool // If true, will be deleted from bolt DB to induce an error on restore - expectMissingAfterRestore bool // If true, the response is not expected to be present in the restored cache - }{ - { - // Make a request. A response with a new token is returned to the - // lease cache and that will be cached. - token: "autoauthtoken", - method: "GET", - urlPath: "http://example.com/v1/sample/api", - body: `{"value": "input"}`, - }, - { - // Modify the request a little bit to ensure the second response is - // returned to the lease cache. - token: "autoauthtoken", - method: "GET", - urlPath: "http://example.com/v1/sample/api", - body: `{"value": "input_changed"}`, - }, - { - // Simulate an approle login to get another token - method: "PUT", - urlPath: "http://example.com/v1/auth/approle-expect-missing/login", - body: `{"role_id": "my role", "secret_id": "my secret"}`, - deleteFromPersistentStore: true, - expectMissingAfterRestore: true, - }, - { - // Test caching with the token acquired from the approle login - token: "testtoken2", - method: "GET", - urlPath: "http://example.com/v1/sample-expect-missing/api", - body: `{"second": "input"}`, - // This will be missing from the restored cache because its parent token was deleted - expectMissingAfterRestore: true, - }, - { - // Simulate another approle login to get another token - method: "PUT", - urlPath: "http://example.com/v1/auth/approle/login", - body: `{"role_id": "my role", "secret_id": "my secret"}`, - }, - { - // Test caching with the token acquired from the latest approle login - token: "testtoken3", - method: "GET", - urlPath: "http://example.com/v1/sample3/api", - body: `{"third": "input"}`, - }, - } - - var deleteIDs []string - for i, ct := range cacheTests { - // Send once to cache - sendReq := &SendRequest{ - Token: ct.token, - Request: httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)), - } - if ct.deleteFromPersistentStore { - deleteID, err := computeIndexID(sendReq) - require.NoError(t, err) - deleteIDs = append(deleteIDs, deleteID) - // Now reset the body after calculating the index - sendReq.Request = httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)) - } - resp, err := lc.Send(context.Background(), sendReq) - require.NoError(t, err) - assert.Equal(t, responses[i].Response.StatusCode, resp.Response.StatusCode, "expected proxied response") - assert.Nil(t, resp.CacheMeta) - - // Send again to test cache. If this isn't cached, the response returned - // will be the next in the list and the status code will not match. - sendCacheReq := &SendRequest{ - Token: ct.token, - Request: httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)), - } - respCached, err := lc.Send(context.Background(), sendCacheReq) - require.NoError(t, err, "failed to send request %+v", ct) - assert.Equal(t, responses[i].Response.StatusCode, respCached.Response.StatusCode, "expected proxied response") - require.NotNil(t, respCached.CacheMeta) - assert.True(t, respCached.CacheMeta.Hit) - } - - require.NotEmpty(t, deleteIDs) - for _, deleteID := range deleteIDs { - err = boltStorage.Delete(deleteID, cacheboltdb.LeaseType) - require.NoError(t, err) - } - - // Now we know the cache is working, so try restoring from the persisted - // cache's storage. Responses 3 and 4 have been cleared from the cache, so - // re-send those. - restoredCache := testNewLeaseCache(t, responses[2:4]) - - err = restoredCache.Restore(context.Background(), boltStorage) - errors, ok := err.(*multierror.Error) - require.True(t, ok) - assert.Len(t, errors.Errors, 1) - assert.Contains(t, errors.Error(), "could not find parent Token testtoken2") - - // Now compare the cache contents before and after - compareBeforeAndAfter(t, lc, restoredCache, 7, 5) - - // And finally send the cache requests once to make sure they're all being - // served from the restoredCache unless they were intended to be missing after restore. - for i, ct := range cacheTests { - sendCacheReq := &SendRequest{ - Token: ct.token, - Request: httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)), - } - respCached, err := restoredCache.Send(context.Background(), sendCacheReq) - require.NoError(t, err, "failed to send request %+v", ct) - assert.Equal(t, responses[i].Response.StatusCode, respCached.Response.StatusCode, "expected proxied response") - if ct.expectMissingAfterRestore { - require.Nil(t, respCached.CacheMeta) - } else { - require.NotNil(t, respCached.CacheMeta) - assert.True(t, respCached.CacheMeta.Hit) - } - } -} - -func TestLeaseCache_PersistAndRestore_WithManyDependencies(t *testing.T) { - tempDir, boltStorage := setupBoltStorage(t) - defer os.RemoveAll(tempDir) - defer boltStorage.Close() - - var requests []*SendRequest - var responses []*SendResponse - var orderedRequestPaths []string - - // helper func to generate new auth leases with a child secret lease attached - authAndSecretLease := func(id int, parentToken, newToken string) { - t.Helper() - path := fmt.Sprintf("/v1/auth/approle-%d/login", id) - orderedRequestPaths = append(orderedRequestPaths, path) - requests = append(requests, &SendRequest{ - Token: parentToken, - Request: httptest.NewRequest("PUT", "http://example.com"+path, strings.NewReader("")), - }) - responses = append(responses, newTestSendResponse(200, fmt.Sprintf(`{"auth": {"client_token": "%s", "renewable": true, "lease_duration": 600}}`, newToken))) - - // Fetch a leased secret using the new token - path = fmt.Sprintf("/v1/kv/%d", id) - orderedRequestPaths = append(orderedRequestPaths, path) - requests = append(requests, &SendRequest{ - Token: newToken, - Request: httptest.NewRequest("GET", "http://example.com"+path, strings.NewReader("")), - }) - responses = append(responses, newTestSendResponse(200, fmt.Sprintf(`{"lease_id": "secret-%d-lease", "renewable": true, "data": {"number": %d}, "lease_duration": 600}`, id, id))) - } - - // Pathological case: a long chain of child tokens - authAndSecretLease(0, "autoauthtoken", "many-ancestors-token;0") - for i := 1; i <= 50; i++ { - // Create a new generation of child token - authAndSecretLease(i, fmt.Sprintf("many-ancestors-token;%d", i-1), fmt.Sprintf("many-ancestors-token;%d", i)) - } - - // Lots of sibling tokens with auto auth token as their parent - for i := 51; i <= 100; i++ { - authAndSecretLease(i, "autoauthtoken", fmt.Sprintf("many-siblings-token;%d", i)) - } - - // Also create some extra siblings for an auth token further down the chain - for i := 101; i <= 110; i++ { - authAndSecretLease(i, "many-ancestors-token;25", fmt.Sprintf("many-siblings-for-ancestor-token;%d", i)) - } - - lc := testNewLeaseCacheWithPersistence(t, responses, boltStorage) - - // Register an auto-auth token so that the token and lease requests are cached - err := lc.RegisterAutoAuthToken("autoauthtoken") - require.NoError(t, err) - - for _, req := range requests { - // Send once to cache - resp, err := lc.Send(context.Background(), req) - require.NoError(t, err) - assert.Equal(t, 200, resp.Response.StatusCode, "expected success") - assert.Nil(t, resp.CacheMeta) - } - - // Ensure leases are retrieved in the correct order - var processed int - - leases, err := boltStorage.GetByType(context.Background(), cacheboltdb.LeaseType) - require.NoError(t, err) - for _, lease := range leases { - index, err := cachememdb.Deserialize(lease) - require.NoError(t, err) - require.Equal(t, orderedRequestPaths[processed], index.RequestPath) - processed++ - } - - assert.Equal(t, len(orderedRequestPaths), processed) - - restoredCache := testNewLeaseCache(t, nil) - err = restoredCache.Restore(context.Background(), boltStorage) - require.NoError(t, err) - - // Now compare the cache contents before and after - compareBeforeAndAfter(t, lc, restoredCache, 223, 223) -} - -func TestEvictPersistent(t *testing.T) { - ctx := context.Background() - - responses := []*SendResponse{ - newTestSendResponse(201, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}}`), - } - - tempDir, boltStorage := setupBoltStorage(t) - defer os.RemoveAll(tempDir) - defer boltStorage.Close() - lc := testNewLeaseCacheWithPersistence(t, responses, boltStorage) - - require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) - - // populate cache by sending request through - sendReq := &SendRequest{ - Token: "autoauthtoken", - Request: httptest.NewRequest("GET", "http://example.com/v1/sample/api", strings.NewReader(`{"value": "some_input"}`)), - } - resp, err := lc.Send(context.Background(), sendReq) - require.NoError(t, err) - assert.Equal(t, resp.Response.StatusCode, 201, "expected proxied response") - assert.Nil(t, resp.CacheMeta) - - // Check bolt for the cached lease - secrets, err := lc.ps.GetByType(ctx, cacheboltdb.LeaseType) - require.NoError(t, err) - assert.Len(t, secrets, 1) - - // Call clear for the request path - err = lc.handleCacheClear(context.Background(), &cacheClearInput{ - Type: "request_path", - RequestPath: "/v1/sample/api", - }) - require.NoError(t, err) - - time.Sleep(2 * time.Second) - - // Check that cached item is gone - secrets, err = lc.ps.GetByType(ctx, cacheboltdb.LeaseType) - require.NoError(t, err) - assert.Len(t, secrets, 0) -} - -func TestRegisterAutoAuth_sameToken(t *testing.T) { - // If the auto-auth token already exists in the cache, it should not be - // stored again in a new index. - lc := testNewLeaseCache(t, nil) - err := lc.RegisterAutoAuthToken("autoauthtoken") - assert.NoError(t, err) - - oldTokenIndex, err := lc.db.Get(cachememdb.IndexNameToken, "autoauthtoken") - assert.NoError(t, err) - oldTokenID := oldTokenIndex.ID - - // register the same token again - err = lc.RegisterAutoAuthToken("autoauthtoken") - assert.NoError(t, err) - - // check that there's only one index for autoauthtoken - entries, err := lc.db.GetByPrefix(cachememdb.IndexNameToken, "autoauthtoken") - assert.NoError(t, err) - assert.Len(t, entries, 1) - - newTokenIndex, err := lc.db.Get(cachememdb.IndexNameToken, "autoauthtoken") - assert.NoError(t, err) - - // compare the ID's since those are randomly generated when an index for a - // token is added to the cache, so if a new token was added, the id's will - // not match. - assert.Equal(t, oldTokenID, newTokenIndex.ID) -} - -func Test_hasExpired(t *testing.T) { - responses := []*SendResponse{ - newTestSendResponse(200, `{"auth": {"client_token": "testtoken", "renewable": true, "lease_duration": 60}}`), - newTestSendResponse(201, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}, "lease_duration": 60}`), - } - lc := testNewLeaseCache(t, responses) - require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) - - cacheTests := []struct { - token string - urlPath string - leaseType string - wantStatusCode int - }{ - { - // auth lease - token: "autoauthtoken", - urlPath: "/v1/sample/auth", - leaseType: cacheboltdb.LeaseType, - wantStatusCode: responses[0].Response.StatusCode, - }, - { - // secret lease - token: "autoauthtoken", - urlPath: "/v1/sample/secret", - leaseType: cacheboltdb.LeaseType, - wantStatusCode: responses[1].Response.StatusCode, - }, - } - - for _, ct := range cacheTests { - // Send once to cache - urlPath := "http://example.com" + ct.urlPath - sendReq := &SendRequest{ - Token: ct.token, - Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), - } - resp, err := lc.Send(context.Background(), sendReq) - require.NoError(t, err) - assert.Equal(t, resp.Response.StatusCode, ct.wantStatusCode, "expected proxied response") - assert.Nil(t, resp.CacheMeta) - - // get the Index out of the mem cache - index, err := lc.db.Get(cachememdb.IndexNameRequestPath, "root/", ct.urlPath) - require.NoError(t, err) - assert.Equal(t, ct.leaseType, index.Type) - - // The lease duration is 60 seconds, so time.Now() should be within that - notExpired, err := lc.hasExpired(time.Now().UTC(), index) - require.NoError(t, err) - assert.False(t, notExpired) - - // In 90 seconds the index should be "expired" - futureTime := time.Now().UTC().Add(time.Second * 90) - expired, err := lc.hasExpired(futureTime, index) - require.NoError(t, err) - assert.True(t, expired) - } -} - -func TestLeaseCache_hasExpired_wrong_type(t *testing.T) { - index := &cachememdb.Index{ - Type: cacheboltdb.TokenType, - Response: []byte(`HTTP/0.0 200 OK -Content-Type: application/json -Date: Tue, 02 Mar 2021 17:54:16 GMT - -{}`), - } - - lc := testNewLeaseCache(t, nil) - expired, err := lc.hasExpired(time.Now().UTC(), index) - assert.False(t, expired) - assert.EqualError(t, err, `secret without lease encountered in expiration check`) -} - -func TestLeaseCacheRestore_expired(t *testing.T) { - // Emulate 2 responses from the api proxy, both expired - responses := []*SendResponse{ - newTestSendResponse(200, `{"auth": {"client_token": "testtoken", "renewable": true, "lease_duration": -600}}`), - newTestSendResponse(201, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}, "lease_duration": -600}`), - } - - tempDir, boltStorage := setupBoltStorage(t) - defer os.RemoveAll(tempDir) - defer boltStorage.Close() - lc := testNewLeaseCacheWithPersistence(t, responses, boltStorage) - - // Register an auto-auth token so that the token and lease requests are cached in mem - require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) - - cacheTests := []struct { - token string - method string - urlPath string - body string - wantStatusCode int - }{ - { - // Make a request. A response with a new token is returned to the - // lease cache and that will be cached. - token: "autoauthtoken", - method: "GET", - urlPath: "http://example.com/v1/sample/api", - body: `{"value": "input"}`, - wantStatusCode: responses[0].Response.StatusCode, - }, - { - // Modify the request a little bit to ensure the second response is - // returned to the lease cache. - token: "autoauthtoken", - method: "GET", - urlPath: "http://example.com/v1/sample/api", - body: `{"value": "input_changed"}`, - wantStatusCode: responses[1].Response.StatusCode, - }, - } - - for _, ct := range cacheTests { - // Send once to cache - sendReq := &SendRequest{ - Token: ct.token, - Request: httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)), - } - resp, err := lc.Send(context.Background(), sendReq) - require.NoError(t, err) - assert.Equal(t, resp.Response.StatusCode, ct.wantStatusCode, "expected proxied response") - assert.Nil(t, resp.CacheMeta) - } - - // Restore from the persisted cache's storage - restoredCache := testNewLeaseCache(t, nil) - - err := restoredCache.Restore(context.Background(), boltStorage) - assert.NoError(t, err) - - // The original mem cache should have all three items - beforeDB, err := lc.db.GetByPrefix(cachememdb.IndexNameID) - require.NoError(t, err) - assert.Len(t, beforeDB, 3) - - // There should only be one item in the restored cache: the autoauth token - afterDB, err := restoredCache.db.GetByPrefix(cachememdb.IndexNameID) - require.NoError(t, err) - assert.Len(t, afterDB, 1) - - // Just verify that the one item in the restored mem cache matches one in the original mem cache, and that it's the auto-auth token - beforeItem, err := lc.db.Get(cachememdb.IndexNameID, afterDB[0].ID) - require.NoError(t, err) - assert.NotNil(t, beforeItem) - - assert.Equal(t, "autoauthtoken", afterDB[0].Token) - assert.Equal(t, cacheboltdb.TokenType, afterDB[0].Type) -} diff --git a/command/agent/cache/listener.go b/command/agent/cache/listener.go deleted file mode 100644 index c8ed72219148..000000000000 --- a/command/agent/cache/listener.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cache - -import ( - "crypto/tls" - "fmt" - "net" - "strings" - - "github.com/hashicorp/go-secure-stdlib/reloadutil" - "github.com/hashicorp/vault/command/server" - "github.com/hashicorp/vault/internalshared/configutil" - "github.com/hashicorp/vault/internalshared/listenerutil" -) - -type ListenerBundle struct { - Listener net.Listener - TLSConfig *tls.Config - TLSReloadFunc reloadutil.ReloadFunc -} - -func StartListener(lnConfig *configutil.Listener) (*ListenerBundle, error) { - addr := lnConfig.Address - - var ln net.Listener - var err error - switch lnConfig.Type { - case "tcp": - if addr == "" { - addr = "127.0.0.1:8200" - } - - bindProto := "tcp" - // If they've passed 0.0.0.0, we only want to bind on IPv4 - // rather than golang's dual stack default - if strings.HasPrefix(addr, "0.0.0.0:") { - bindProto = "tcp4" - } - - ln, err = net.Listen(bindProto, addr) - if err != nil { - return nil, err - } - ln = &server.TCPKeepAliveListener{ln.(*net.TCPListener)} - - case "unix": - var uConfig *listenerutil.UnixSocketsConfig - if lnConfig.SocketMode != "" && - lnConfig.SocketUser != "" && - lnConfig.SocketGroup != "" { - uConfig = &listenerutil.UnixSocketsConfig{ - Mode: lnConfig.SocketMode, - User: lnConfig.SocketUser, - Group: lnConfig.SocketGroup, - } - } - ln, err = listenerutil.UnixSocketListener(addr, uConfig) - if err != nil { - return nil, err - } - - default: - return nil, fmt.Errorf("invalid listener type: %q", lnConfig.Type) - } - - props := map[string]string{"addr": ln.Addr().String()} - tlsConf, reloadFunc, err := listenerutil.TLSConfig(lnConfig, props, nil) - if err != nil { - return nil, err - } - if tlsConf != nil { - ln = tls.NewListener(ln, tlsConf) - } - - cfg := &ListenerBundle{ - Listener: ln, - TLSConfig: tlsConf, - TLSReloadFunc: reloadFunc, - } - - return cfg, nil -} diff --git a/command/agent/cache/proxy.go b/command/agent/cache/proxy.go deleted file mode 100644 index 4dcd1803389f..000000000000 --- a/command/agent/cache/proxy.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cache - -import ( - "bytes" - "context" - "io" - "net/http" - "time" - - "github.com/hashicorp/vault/api" -) - -// SendRequest is the input for Proxier.Send. -type SendRequest struct { - Token string - Request *http.Request - - // RequestBody is the stored body bytes from Request.Body. It is set here to - // avoid reading and re-setting the stream multiple times. - RequestBody []byte -} - -// SendResponse is the output from Proxier.Send. -type SendResponse struct { - Response *api.Response - - // ResponseBody is the stored body bytes from Response.Body. It is set here to - // avoid reading and re-setting the stream multiple times. - ResponseBody []byte - CacheMeta *CacheMeta -} - -// CacheMeta contains metadata information about the response, -// such as whether it was a cache hit or miss, and the age of the -// cached entry. -type CacheMeta struct { - Hit bool - Age time.Duration -} - -// Proxier is the interface implemented by different components that are -// responsible for performing specific tasks, such as caching and proxying. All -// these tasks combined together would serve the request received by the agent. -type Proxier interface { - Send(ctx context.Context, req *SendRequest) (*SendResponse, error) -} - -// NewSendResponse creates a new SendResponse and takes care of initializing its -// fields properly. -func NewSendResponse(apiResponse *api.Response, responseBody []byte) (*SendResponse, error) { - resp := &SendResponse{ - Response: apiResponse, - CacheMeta: &CacheMeta{}, - } - - // If a response body is separately provided we set that as the SendResponse.ResponseBody, - // otherwise we will do an ioutil.ReadAll to extract the response body from apiResponse. - switch { - case len(responseBody) > 0: - resp.ResponseBody = responseBody - case apiResponse.Body != nil: - respBody, err := io.ReadAll(apiResponse.Body) - if err != nil { - return nil, err - } - // Close the old body - apiResponse.Body.Close() - - // Re-set the response body after reading from the Reader - apiResponse.Body = io.NopCloser(bytes.NewReader(respBody)) - - resp.ResponseBody = respBody - } - - return resp, nil -} diff --git a/command/agent/cache/testing.go b/command/agent/cache/testing.go deleted file mode 100644 index f68ae6e49d4f..000000000000 --- a/command/agent/cache/testing.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cache - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "math/rand" - "net/http" - "strings" - "time" - - "github.com/hashicorp/vault/api" -) - -// mockProxier is a mock implementation of the Proxier interface, used for testing purposes. -// The mock will return the provided responses every time it reaches its Send method, up to -// the last provided response. This lets tests control what the next/underlying Proxier layer -// might expect to return. -type mockProxier struct { - proxiedResponses []*SendResponse - responseIndex int -} - -func newMockProxier(responses []*SendResponse) *mockProxier { - return &mockProxier{ - proxiedResponses: responses, - } -} - -func (p *mockProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { - if p.responseIndex >= len(p.proxiedResponses) { - return nil, fmt.Errorf("index out of bounds: responseIndex = %d, responses = %d", p.responseIndex, len(p.proxiedResponses)) - } - resp := p.proxiedResponses[p.responseIndex] - - p.responseIndex++ - - return resp, nil -} - -func (p *mockProxier) ResponseIndex() int { - return p.responseIndex -} - -func newTestSendResponse(status int, body string) *SendResponse { - resp := &SendResponse{ - Response: &api.Response{ - Response: &http.Response{ - StatusCode: status, - Header: http.Header{}, - }, - }, - } - resp.Response.Header.Set("Date", time.Now().Format(http.TimeFormat)) - - if body != "" { - resp.Response.Body = ioutil.NopCloser(strings.NewReader(body)) - resp.ResponseBody = []byte(body) - } - - if json.Valid([]byte(body)) { - resp.Response.Header.Set("content-type", "application/json") - } - - return resp -} - -type mockTokenVerifierProxier struct { - currentToken string -} - -func (p *mockTokenVerifierProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { - p.currentToken = req.Token - resp := newTestSendResponse(http.StatusOK, - `{"data": {"id": "`+p.currentToken+`"}}`) - - return resp, nil -} - -func (p *mockTokenVerifierProxier) GetCurrentRequestToken() string { - return p.currentToken -} - -type mockDelayProxier struct { - cacheableResp bool - delay int -} - -func (p *mockDelayProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { - if p.delay > 0 { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-time.After(time.Duration(p.delay) * time.Millisecond): - } - } - - // If this is a cacheable response, we return a unique response every time - if p.cacheableResp { - rand.Seed(time.Now().Unix()) - s := fmt.Sprintf(`{"lease_id": "%d", "renewable": true, "data": {"foo": "bar"}}`, rand.Int()) - return newTestSendResponse(http.StatusOK, s), nil - } - - return newTestSendResponse(http.StatusOK, `{"value": "output"}`), nil -} diff --git a/command/agent/cache_end_to_end_test.go b/command/agent/cache_end_to_end_test.go index c3444ed58ef5..72e7bdc9375e 100644 --- a/command/agent/cache_end_to_end_test.go +++ b/command/agent/cache_end_to_end_test.go @@ -1,28 +1,28 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package agent import ( "context" "fmt" - "io/ioutil" "net" "net/http" "os" "testing" "time" - hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-hclog" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" - "github.com/hashicorp/vault/command/agent/auth" - agentapprole "github.com/hashicorp/vault/command/agent/auth/approle" - "github.com/hashicorp/vault/command/agent/cache" - "github.com/hashicorp/vault/command/agent/sink" - "github.com/hashicorp/vault/command/agent/sink/file" - "github.com/hashicorp/vault/command/agent/sink/inmem" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentapprole "github.com/hashicorp/vault/command/agentproxyshared/auth/approle" + "github.com/hashicorp/vault/command/agentproxyshared/cache" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/sink/inmem" + "github.com/hashicorp/vault/helper/useragent" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/logging" @@ -44,9 +44,6 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { var err error logger := logging.NewVaultLogger(log.Trace) coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: log.NewNullLogger(), LogicalBackends: map[string]logical.Factory{ "kv": vault.LeasedPassthroughBackendFactory, }, @@ -125,7 +122,7 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { } roleID1 := resp.Data["role_id"].(string) - rolef, err := ioutil.TempFile("", "auth.role-id.test.") + rolef, err := os.CreateTemp("", "auth.role-id.test.") if err != nil { t.Fatal(err) } @@ -134,7 +131,7 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { defer os.Remove(role) t.Logf("input role_id_file_path: %s", role) - secretf, err := ioutil.TempFile("", "auth.secret-id.test.") + secretf, err := os.CreateTemp("", "auth.secret-id.test.") if err != nil { t.Fatal(err) } @@ -145,7 +142,7 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { // We close these right away because we're just basically testing // permissions and finding a usable file name - ouf, err := ioutil.TempFile("", "auth.tokensink.test.") + ouf, err := os.CreateTemp("", "auth.tokensink.test.") if err != nil { t.Fatal(err) } @@ -166,8 +163,10 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { // Create the API proxier apiProxy, err := cache.NewAPIProxy(&cache.APIProxyConfig{ - Client: client, - Logger: cacheLogger.Named("apiproxy"), + Client: client, + Logger: cacheLogger.Named("apiproxy"), + UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, + UserAgentString: useragent.ProxyAPIProxyString(), }) if err != nil { t.Fatal(err) @@ -176,10 +175,12 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { // Create the lease cache proxier and set its underlying proxier to // the API proxier. leaseCache, err := cache.NewLeaseCache(&cache.LeaseCacheConfig{ - Client: client, - BaseContext: ctx, - Proxier: apiProxy, - Logger: cacheLogger.Named("leasecache"), + Client: client, + BaseContext: ctx, + Proxier: apiProxy, + Logger: cacheLogger.Named("leasecache"), + CacheDynamicSecrets: true, + UserAgentToUse: "test", }) if err != nil { t.Fatal(err) @@ -240,7 +241,7 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { inmemSinkConfig.Sink = inmemSink go func() { - errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config, inmemSinkConfig}) + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config, inmemSinkConfig}, ah.AuthInProgress) }() defer func() { select { @@ -268,13 +269,13 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { t.Fatal("expected notexist err") } - if err := ioutil.WriteFile(role, []byte(roleID1), 0o600); err != nil { + if err := os.WriteFile(role, []byte(roleID1), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test role 1", "path", role) } - if err := ioutil.WriteFile(secret, []byte(secretID1), 0o600); err != nil { + if err := os.WriteFile(secret, []byte(secretID1), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test secret 1", "path", secret) @@ -286,7 +287,7 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { if time.Now().After(timeout) { t.Fatal("did not find a written token after timeout") } - val, err := ioutil.ReadFile(out) + val, err := os.ReadFile(out) if err == nil { os.Remove(out) if len(val) == 0 { @@ -317,8 +318,8 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { mux := http.NewServeMux() mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) - // Passing a non-nil inmemsink tells the agent to use the auto-auth token - mux.Handle("/", cache.ProxyHandler(ctx, cacheLogger, leaseCache, inmemSink, true)) + // Setting useAutoAuthToken to true to ensure that the auto-auth token is used + mux.Handle("/", cache.ProxyHandler(ctx, cacheLogger, leaseCache, inmemSink, false, true, nil, nil)) server := &http.Server{ Handler: mux, ReadHeaderTimeout: 10 * time.Second, diff --git a/command/agent/cert_end_to_end_test.go b/command/agent/cert_end_to_end_test.go index 9b2729e69aec..41275f860ce0 100644 --- a/command/agent/cert_end_to_end_test.go +++ b/command/agent/cert_end_to_end_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package agent @@ -12,16 +12,14 @@ import ( "testing" "time" - "github.com/hashicorp/vault/builtin/logical/pki" - - hclog "github.com/hashicorp/go-hclog" - + "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" vaultcert "github.com/hashicorp/vault/builtin/credential/cert" - "github.com/hashicorp/vault/command/agent/auth" - agentcert "github.com/hashicorp/vault/command/agent/auth/cert" - "github.com/hashicorp/vault/command/agent/sink" - "github.com/hashicorp/vault/command/agent/sink/file" + "github.com/hashicorp/vault/builtin/logical/pki" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentcert "github.com/hashicorp/vault/command/agentproxyshared/auth/cert" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" "github.com/hashicorp/vault/helper/dhutil" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/jsonutil" @@ -68,7 +66,6 @@ func TestCertEndToEnd(t *testing.T) { func testCertEndToEnd(t *testing.T, withCertRoleName, ahWrapping bool) { logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ - Logger: logger, CredentialBackends: map[string]logical.Factory{ "cert": vaultcert.Factory, }, @@ -201,7 +198,7 @@ func testCertEndToEnd(t *testing.T, withCertRoleName, ahWrapping bool) { Client: client, }) go func() { - errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}, ah.AuthInProgress) }() defer func() { select { @@ -308,7 +305,6 @@ func testCertEndToEnd(t *testing.T, withCertRoleName, ahWrapping bool) { func TestCertEndToEnd_CertsInConfig(t *testing.T) { logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ - Logger: logger, CredentialBackends: map[string]logical.Factory{ "cert": vaultcert.Factory, }, @@ -428,7 +424,7 @@ func TestCertEndToEnd_CertsInConfig(t *testing.T) { t.Fatal(err) } defer os.Remove(leafCertFile.Name()) - if _, err := leafCertFile.Write([]byte(leafCertPEM)); err != nil { + if _, err := leafCertFile.WriteString(leafCertPEM); err != nil { t.Fatal(err) } if err := leafCertFile.Close(); err != nil { @@ -440,7 +436,7 @@ func TestCertEndToEnd_CertsInConfig(t *testing.T) { t.Fatal(err) } defer os.Remove(leafCertKeyFile.Name()) - if _, err := leafCertKeyFile.Write([]byte(leafCertKeyPEM)); err != nil { + if _, err := leafCertKeyFile.WriteString(leafCertKeyPEM); err != nil { t.Fatal(err) } if err := leafCertKeyFile.Close(); err != nil { @@ -540,7 +536,7 @@ func TestCertEndToEnd_CertsInConfig(t *testing.T) { Client: client, }) go func() { - errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}, ah.AuthInProgress) }() defer func() { select { diff --git a/command/agent/cf_end_to_end_test.go b/command/agent/cf_end_to_end_test.go index 3ccd3be61b94..7922d55525b1 100644 --- a/command/agent/cf_end_to_end_test.go +++ b/command/agent/cf_end_to_end_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package agent @@ -10,15 +10,15 @@ import ( "testing" "time" - hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-hclog" credCF "github.com/hashicorp/vault-plugin-auth-cf" "github.com/hashicorp/vault-plugin-auth-cf/testing/certificates" cfAPI "github.com/hashicorp/vault-plugin-auth-cf/testing/cf" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" - agentcf "github.com/hashicorp/vault/command/agent/auth/cf" - "github.com/hashicorp/vault/command/agent/sink" - "github.com/hashicorp/vault/command/agent/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentcf "github.com/hashicorp/vault/command/agentproxyshared/auth/cf" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" @@ -29,9 +29,6 @@ func TestCFEndToEnd(t *testing.T) { logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), CredentialBackends: map[string]logical.Factory{ "cf": credCF.Factory, }, @@ -153,7 +150,7 @@ func TestCFEndToEnd(t *testing.T) { Client: client, }) go func() { - errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}, ah.AuthInProgress) }() defer func() { select { diff --git a/command/agent/config/config.go b/command/agent/config/config.go index f701af4fc387..d1597cece8fa 100644 --- a/command/agent/config/config.go +++ b/command/agent/config/config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package config @@ -12,16 +12,21 @@ import ( "os" "path/filepath" "strings" + "syscall" "time" ctconfig "github.com/hashicorp/consul-template/config" + ctsignals "github.com/hashicorp/consul-template/signals" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/command/agentproxyshared" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/sdk/helper/pointerutil" "github.com/mitchellh/mapstructure" + "k8s.io/utils/strings/slices" ) // Config is the configuration for Vault Agent. @@ -31,7 +36,7 @@ type Config struct { AutoAuth *AutoAuth `hcl:"auto_auth"` ExitAfterAuth bool `hcl:"exit_after_auth"` Cache *Cache `hcl:"cache"` - APIProxy *APIProxy `hcl:"api_proxy""` + APIProxy *APIProxy `hcl:"api_proxy"` Vault *Vault `hcl:"vault"` TemplateConfig *TemplateConfig `hcl:"template_config"` Templates []*ctconfig.TemplateConfig `hcl:"templates"` @@ -43,11 +48,15 @@ type Config struct { DisableKeepAlivesAPIProxy bool `hcl:"-"` DisableKeepAlivesTemplating bool `hcl:"-"` DisableKeepAlivesAutoAuth bool `hcl:"-"` + Exec *ExecConfig `hcl:"exec,optional"` + EnvTemplates []*ctconfig.TemplateConfig `hcl:"env_template,optional"` } const ( DisableIdleConnsEnv = "VAULT_AGENT_DISABLE_IDLE_CONNECTIONS" DisableKeepAlivesEnv = "VAULT_AGENT_DISABLE_KEEP_ALIVES" + + DefaultTemplateConfigMaxConnsPerHost = 10 ) func (c *Config) Prune() { @@ -81,6 +90,7 @@ type Vault struct { ClientCert string `hcl:"client_cert"` ClientKey string `hcl:"client_key"` TLSServerName string `hcl:"tls_server_name"` + Namespace string `hcl:"namespace"` Retry *Retry `hcl:"retry"` } @@ -105,22 +115,13 @@ type APIProxy struct { // Cache contains any configuration needed for Cache mode type Cache struct { - UseAutoAuthTokenRaw interface{} `hcl:"use_auto_auth_token"` - UseAutoAuthToken bool `hcl:"-"` - ForceAutoAuthToken bool `hcl:"-"` - EnforceConsistency string `hcl:"enforce_consistency"` - WhenInconsistent string `hcl:"when_inconsistent"` - Persist *Persist `hcl:"persist"` - InProcDialer transportDialer `hcl:"-"` -} - -// Persist contains configuration needed for persistent caching -type Persist struct { - Type string - Path string `hcl:"path"` - KeepAfterImport bool `hcl:"keep_after_import"` - ExitOnErr bool `hcl:"exit_on_err"` - ServiceAccountTokenFile string `hcl:"service_account_token_file"` + UseAutoAuthTokenRaw interface{} `hcl:"use_auto_auth_token"` + UseAutoAuthToken bool `hcl:"-"` + ForceAutoAuthToken bool `hcl:"-"` + EnforceConsistency string `hcl:"enforce_consistency"` + WhenInconsistent string `hcl:"when_inconsistent"` + Persist *agentproxyshared.PersistConfig `hcl:"persist"` + InProcDialer transportDialer `hcl:"-"` } // AutoAuth is the configured authentication method and sinks @@ -166,6 +167,17 @@ type TemplateConfig struct { ExitOnRetryFailure bool `hcl:"exit_on_retry_failure"` StaticSecretRenderIntRaw interface{} `hcl:"static_secret_render_interval"` StaticSecretRenderInt time.Duration `hcl:"-"` + MaxConnectionsPerHostRaw interface{} `hcl:"max_connections_per_host"` + MaxConnectionsPerHost int `hcl:"-"` + LeaseRenewalThreshold *float64 `hcl:"lease_renewal_threshold"` +} + +type ExecConfig struct { + Command []string `hcl:"command,attr" mapstructure:"command"` + RestartOnSecretChanges string `hcl:"restart_on_secret_changes,optional" mapstructure:"restart_on_secret_changes"` + RestartStopSignal os.Signal `hcl:"-" mapstructure:"restart_stop_signal"` + ChildProcessStdout string `mapstructure:"child_process_stdout"` + ChildProcessStderr string `mapstructure:"child_process_stderr"` } func NewConfig() *Config { @@ -265,9 +277,33 @@ func (c *Config) Merge(c2 *Config) *Config { result.PidFile = c2.PidFile } + result.Exec = c.Exec + if c2.Exec != nil { + result.Exec = c2.Exec + } + + for _, envTmpl := range c.EnvTemplates { + result.EnvTemplates = append(result.EnvTemplates, envTmpl) + } + + for _, envTmpl := range c2.EnvTemplates { + result.EnvTemplates = append(result.EnvTemplates, envTmpl) + } + return result } +// IsDefaultListerDefined returns true if a default listener has been defined +// in this config +func (c *Config) IsDefaultListerDefined() bool { + for _, l := range c.Listeners { + if l.Role != "metrics_only" { + return true + } + } + return false +} + // ValidateConfig validates an Agent configuration after it has been fully merged together, to // ensure that required combinations of configs are there func (c *Config) ValidateConfig() error { @@ -282,7 +318,7 @@ func (c *Config) ValidateConfig() error { } if c.Cache != nil { - if len(c.Listeners) < 1 && len(c.Templates) < 1 { + if len(c.Listeners) < 1 && len(c.Templates) < 1 && len(c.EnvTemplates) < 1 { return fmt.Errorf("enabling the cache requires at least 1 template or 1 listener to be defined") } @@ -314,7 +350,8 @@ func (c *Config) ValidateConfig() error { if c.AutoAuth != nil { if len(c.AutoAuth.Sinks) == 0 && (c.APIProxy == nil || !c.APIProxy.UseAutoAuthToken) && - len(c.Templates) == 0 { + len(c.Templates) == 0 && + len(c.EnvTemplates) == 0 { return fmt.Errorf("auto_auth requires at least one sink or at least one template or api_proxy.use_auto_auth_token=true") } } @@ -323,6 +360,126 @@ func (c *Config) ValidateConfig() error { return fmt.Errorf("no auto_auth, cache, or listener block found in config") } + return c.validateEnvTemplateConfig() +} + +func (c *Config) validateEnvTemplateConfig() error { + // if we are not in env-template mode, exit early + if c.Exec == nil && len(c.EnvTemplates) == 0 { + return nil + } + + if c.Exec == nil { + return fmt.Errorf("a top-level 'exec' element must be specified with 'env_template' entries") + } + + if len(c.EnvTemplates) == 0 { + return fmt.Errorf("must specify at least one 'env_template' element with a top-level 'exec' element") + } + + if c.APIProxy != nil { + return fmt.Errorf("'api_proxy' cannot be specified with 'env_template' entries") + } + + if len(c.Templates) > 0 { + return fmt.Errorf("'template' cannot be specified with 'env_template' entries") + } + + if len(c.Exec.Command) == 0 { + return fmt.Errorf("'exec' requires a non-empty 'command' field") + } + + if !slices.Contains([]string{"always", "never"}, c.Exec.RestartOnSecretChanges) { + return fmt.Errorf("'exec.restart_on_secret_changes' unexpected value: %q", c.Exec.RestartOnSecretChanges) + } + + uniqueKeys := make(map[string]struct{}) + + for _, template := range c.EnvTemplates { + // Required: + // - the key (environment variable name) + // - either "contents" or "source" + // Optional / permitted: + // - error_on_missing_key + // - error_fatal + // - left_delimiter + // - right_delimiter + // - ExtFuncMap + // - function_denylist / function_blacklist + + if template.MapToEnvironmentVariable == nil { + return fmt.Errorf("env_template: an environment variable name is required") + } + + key := *template.MapToEnvironmentVariable + + if _, exists := uniqueKeys[key]; exists { + return fmt.Errorf("env_template: duplicate environment variable name: %q", key) + } + + uniqueKeys[key] = struct{}{} + + if template.Contents == nil && template.Source == nil { + return fmt.Errorf("env_template[%s]: either 'contents' or 'source' must be specified", key) + } + + if template.Contents != nil && template.Source != nil { + return fmt.Errorf("env_template[%s]: 'contents' and 'source' cannot be specified together", key) + } + + if template.Backup != nil { + return fmt.Errorf("env_template[%s]: 'backup' is not allowed", key) + } + + if template.Command != nil { + return fmt.Errorf("env_template[%s]: 'command' is not allowed", key) + } + + if template.CommandTimeout != nil { + return fmt.Errorf("env_template[%s]: 'command_timeout' is not allowed", key) + } + + if template.CreateDestDirs != nil { + return fmt.Errorf("env_template[%s]: 'create_dest_dirs' is not allowed", key) + } + + if template.Destination != nil { + return fmt.Errorf("env_template[%s]: 'destination' is not allowed", key) + } + + if template.Exec != nil { + return fmt.Errorf("env_template[%s]: 'exec' is not allowed", key) + } + + if template.Perms != nil { + return fmt.Errorf("env_template[%s]: 'perms' is not allowed", key) + } + + if template.User != nil { + return fmt.Errorf("env_template[%s]: 'user' is not allowed", key) + } + + if template.Uid != nil { + return fmt.Errorf("env_template[%s]: 'uid' is not allowed", key) + } + + if template.Group != nil { + return fmt.Errorf("env_template[%s]: 'group' is not allowed", key) + } + + if template.Gid != nil { + return fmt.Errorf("env_template[%s]: 'gid' is not allowed", key) + } + + if template.Wait != nil { + return fmt.Errorf("env_template[%s]: 'wait' is not allowed", key) + } + + if template.SandboxPath != nil { + return fmt.Errorf("env_template[%s]: 'sandbox_path' is not allowed", key) + } + } + return nil } @@ -488,7 +645,15 @@ func LoadConfigFile(path string) (*Config, error) { return nil, fmt.Errorf("error parsing 'template': %w", err) } - if result.Cache != nil && result.APIProxy == nil { + if err := parseExec(result, list); err != nil { + return nil, fmt.Errorf("error parsing 'exec': %w", err) + } + + if err := parseEnvTemplates(result, list); err != nil { + return nil, fmt.Errorf("error parsing 'env_template': %w", err) + } + + if result.Cache != nil && result.APIProxy == nil && (result.Cache.UseAutoAuthToken || result.Cache.ForceAutoAuthToken) { result.APIProxy = &APIProxy{ UseAutoAuthToken: result.Cache.UseAutoAuthToken, ForceAutoAuthToken: result.Cache.ForceAutoAuthToken, @@ -737,7 +902,7 @@ func parsePersist(result *Config, list *ast.ObjectList) error { item := persistList.Items[0] - var p Persist + var p agentproxyshared.PersistConfig err := hcl.DecodeObject(&p, item.Val) if err != nil { return err @@ -941,6 +1106,9 @@ func parseTemplateConfig(result *Config, list *ast.ObjectList) error { templateConfigList := list.Filter(name) if len(templateConfigList.Items) == 0 { + result.TemplateConfig = &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + } return nil } @@ -966,6 +1134,17 @@ func parseTemplateConfig(result *Config, list *ast.ObjectList) error { result.TemplateConfig.StaticSecretRenderIntRaw = nil } + if result.TemplateConfig.MaxConnectionsPerHostRaw != nil { + var err error + if result.TemplateConfig.MaxConnectionsPerHost, err = parseutil.SafeParseInt(result.TemplateConfig.MaxConnectionsPerHostRaw); err != nil { + return err + } + + result.TemplateConfig.MaxConnectionsPerHostRaw = nil + } else { + result.TemplateConfig.MaxConnectionsPerHost = DefaultTemplateConfigMaxConnsPerHost + } + return nil } @@ -1035,3 +1214,121 @@ func parseTemplates(result *Config, list *ast.ObjectList) error { result.Templates = tcs return nil } + +func parseExec(result *Config, list *ast.ObjectList) error { + name := "exec" + + execList := list.Filter(name) + if len(execList.Items) == 0 { + return nil + } + + if len(execList.Items) > 1 { + return fmt.Errorf("at most one %q block is allowed", name) + } + + item := execList.Items[0] + var shadow interface{} + if err := hcl.DecodeObject(&shadow, item.Val); err != nil { + return fmt.Errorf("error decoding config: %s", err) + } + + parsed, ok := shadow.(map[string]interface{}) + if !ok { + return errors.New("error converting config") + } + + var execConfig ExecConfig + var md mapstructure.Metadata + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + ctconfig.StringToFileModeFunc(), + ctconfig.StringToWaitDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + mapstructure.StringToTimeDurationHookFunc(), + ctsignals.StringToSignalFunc(), + ), + ErrorUnused: true, + Metadata: &md, + Result: &execConfig, + }) + if err != nil { + return errors.New("mapstructure decoder creation failed") + } + if err := decoder.Decode(parsed); err != nil { + return err + } + + // if the user does not specify a restart signal, default to SIGTERM + if execConfig.RestartStopSignal == nil { + execConfig.RestartStopSignal = syscall.SIGTERM + } + + if execConfig.RestartOnSecretChanges == "" { + execConfig.RestartOnSecretChanges = "always" + } + + result.Exec = &execConfig + return nil +} + +func parseEnvTemplates(result *Config, list *ast.ObjectList) error { + name := "env_template" + + envTemplateList := list.Filter(name) + + if len(envTemplateList.Items) < 1 { + return nil + } + + envTemplates := make([]*ctconfig.TemplateConfig, 0, len(envTemplateList.Items)) + + for _, item := range envTemplateList.Items { + var shadow interface{} + if err := hcl.DecodeObject(&shadow, item.Val); err != nil { + return fmt.Errorf("error decoding config: %s", err) + } + + // Convert to a map and flatten the keys we want to flatten + parsed, ok := shadow.(map[string]any) + if !ok { + return errors.New("error converting config") + } + + var templateConfig ctconfig.TemplateConfig + var md mapstructure.Metadata + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + ctconfig.StringToFileModeFunc(), + ctconfig.StringToWaitDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + mapstructure.StringToTimeDurationHookFunc(), + ctsignals.StringToSignalFunc(), + ), + ErrorUnused: true, + Metadata: &md, + Result: &templateConfig, + }) + if err != nil { + return errors.New("mapstructure decoder creation failed") + } + if err := decoder.Decode(parsed); err != nil { + return err + } + + // parse the keys in the item for the environment variable name + if numberOfKeys := len(item.Keys); numberOfKeys != 1 { + return fmt.Errorf("expected one and only one environment variable name, got %d", numberOfKeys) + } + + // hcl parses this with extra quotes if quoted in config file + environmentVariableName := strings.Trim(item.Keys[0].Token.Text, `"`) + + templateConfig.MapToEnvironmentVariable = pointerutil.StringPtr(environmentVariableName) + + envTemplates = append(envTemplates, &templateConfig) + } + + result.EnvTemplates = envTemplates + return nil +} diff --git a/command/agent/config/config_test.go b/command/agent/config/config_test.go index 375040198d56..6c12ebe5def3 100644 --- a/command/agent/config/config_test.go +++ b/command/agent/config/config_test.go @@ -1,19 +1,26 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package config import ( "os" + "syscall" "testing" "time" "github.com/go-test/deep" ctconfig "github.com/hashicorp/consul-template/config" + "github.com/hashicorp/vault/command/agentproxyshared" "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/sdk/helper/pointerutil" + "golang.org/x/exp/slices" ) +func FloatPtr(t float64) *float64 { + return &t +} + func TestLoadConfigFile_AgentCache(t *testing.T) { config, err := LoadConfigFile("./test-fixtures/config-cache.hcl") if err != nil { @@ -80,7 +87,7 @@ func TestLoadConfigFile_AgentCache(t *testing.T) { UseAutoAuthToken: true, UseAutoAuthTokenRaw: true, ForceAutoAuthToken: false, - Persist: &Persist{ + Persist: &agentproxyshared.PersistConfig{ Type: "kubernetes", Path: "/vault/agent-cache/", KeepAfterImport: true, @@ -88,6 +95,9 @@ func TestLoadConfigFile_AgentCache(t *testing.T) { ServiceAccountTokenFile: "/tmp/serviceaccount/token", }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", CACert: "config_ca_cert", @@ -185,7 +195,7 @@ func TestLoadConfigDir_AgentCache(t *testing.T) { UseAutoAuthToken: true, UseAutoAuthTokenRaw: true, ForceAutoAuthToken: false, - Persist: &Persist{ + Persist: &agentproxyshared.PersistConfig{ Type: "kubernetes", Path: "/vault/agent-cache/", KeepAfterImport: true, @@ -193,6 +203,9 @@ func TestLoadConfigDir_AgentCache(t *testing.T) { ServiceAccountTokenFile: "/tmp/serviceaccount/token", }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", CACert: "config_ca_cert", @@ -263,6 +276,9 @@ func TestLoadConfigDir_AutoAuthAndListener(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, } config.Prune() @@ -326,6 +342,9 @@ func TestLoadConfigDir_VaultBlock(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, } config.Prune() @@ -385,7 +404,7 @@ func TestLoadConfigFile_AgentCache_NoListeners(t *testing.T) { UseAutoAuthToken: true, UseAutoAuthTokenRaw: true, ForceAutoAuthToken: false, - Persist: &Persist{ + Persist: &agentproxyshared.PersistConfig{ Type: "kubernetes", Path: "/vault/agent-cache/", KeepAfterImport: true, @@ -393,6 +412,9 @@ func TestLoadConfigFile_AgentCache_NoListeners(t *testing.T) { ServiceAccountTokenFile: "/tmp/serviceaccount/token", }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", CACert: "config_ca_cert", @@ -472,6 +494,9 @@ func TestLoadConfigFile(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, } config.Prune() @@ -520,6 +545,9 @@ func TestLoadConfigFile_Method_Wrapping(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, } config.Prune() @@ -559,6 +587,9 @@ func TestLoadConfigFile_Method_InitialBackoff(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, } config.Prune() @@ -598,6 +629,9 @@ func TestLoadConfigFile_Method_ExitOnErr(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, } config.Prune() @@ -613,8 +647,7 @@ func TestLoadConfigFile_AgentCache_NoAutoAuth(t *testing.T) { } expected := &Config{ - APIProxy: &APIProxy{}, - Cache: &Cache{}, + Cache: &Cache{}, SharedConfig: &configutil.SharedConfig{ PidFile: "./pidfile", Listeners: []*configutil.Listener{ @@ -625,6 +658,9 @@ func TestLoadConfigFile_AgentCache_NoAutoAuth(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, } config.Prune() @@ -759,6 +795,9 @@ func TestLoadConfigFile_AgentCache_AutoAuth_NoSink(t *testing.T) { UseAutoAuthTokenRaw: true, ForceAutoAuthToken: false, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, } config.Prune() @@ -802,6 +841,9 @@ func TestLoadConfigFile_AgentCache_AutoAuth_Force(t *testing.T) { UseAutoAuthTokenRaw: "force", ForceAutoAuthToken: true, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, } config.Prune() @@ -845,6 +887,9 @@ func TestLoadConfigFile_AgentCache_AutoAuth_True(t *testing.T) { UseAutoAuthTokenRaw: "true", ForceAutoAuthToken: false, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, } config.Prune() @@ -886,6 +931,9 @@ func TestLoadConfigFile_Agent_AutoAuth_APIProxyAllConfig(t *testing.T) { EnforceConsistency: "always", WhenInconsistent: "forward", }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, } config.Prune() @@ -931,15 +979,14 @@ func TestLoadConfigFile_AgentCache_AutoAuth_False(t *testing.T) { }, }, }, - APIProxy: &APIProxy{ - UseAutoAuthToken: false, - ForceAutoAuthToken: false, - }, Cache: &Cache{ UseAutoAuthToken: false, UseAutoAuthTokenRaw: "false", ForceAutoAuthToken: false, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, } config.Prune() @@ -955,9 +1002,8 @@ func TestLoadConfigFile_AgentCache_Persist(t *testing.T) { } expected := &Config{ - APIProxy: &APIProxy{}, Cache: &Cache{ - Persist: &Persist{ + Persist: &agentproxyshared.PersistConfig{ Type: "kubernetes", Path: "/vault/agent-cache/", KeepAfterImport: false, @@ -975,6 +1021,9 @@ func TestLoadConfigFile_AgentCache_Persist(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, } config.Prune() @@ -1000,12 +1049,21 @@ func TestLoadConfigFile_TemplateConfig(t *testing.T) { TemplateConfig{ ExitOnRetryFailure: true, StaticSecretRenderInt: 1 * time.Minute, + MaxConnectionsPerHost: 100, + LeaseRenewalThreshold: FloatPtr(0.8), }, }, "empty": { "./test-fixtures/config-template_config-empty.hcl", TemplateConfig{ - ExitOnRetryFailure: false, + ExitOnRetryFailure: false, + MaxConnectionsPerHost: 10, + }, + }, + "missing": { + "./test-fixtures/config-template_config-missing.hcl", + TemplateConfig{ + MaxConnectionsPerHost: 10, }, }, } @@ -1141,6 +1199,9 @@ func TestLoadConfigFile_Template(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Templates: tc.expectedTemplates, } @@ -1237,6 +1298,9 @@ func TestLoadConfigFile_Template_NoSinks(t *testing.T) { }, Sinks: nil, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Templates: tc.expectedTemplates, } @@ -1248,6 +1312,46 @@ func TestLoadConfigFile_Template_NoSinks(t *testing.T) { } } +// TestLoadConfigFile_Template_WithCache tests ensures that cache {} stanza is +// permitted in vault agent configuration with template(s) +func TestLoadConfigFile_Template_WithCache(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-template-with-cache.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + }, + Cache: &Cache{}, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, + Templates: []*ctconfig.TemplateConfig{ + { + Source: pointerutil.StringPtr("/path/on/disk/to/template.ctmpl"), + Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render.txt"), + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + func TestLoadConfigFile_Vault_Retry(t *testing.T) { config, err := LoadConfigFile("./test-fixtures/config-vault-retry.hcl") if err != nil { @@ -1279,6 +1383,9 @@ func TestLoadConfigFile_Vault_Retry(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1324,6 +1431,9 @@ func TestLoadConfigFile_Vault_Retry_Empty(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1355,11 +1465,13 @@ func TestLoadConfigFile_EnforceConsistency(t *testing.T) { }, PidFile: "", }, - APIProxy: &APIProxy{}, Cache: &Cache{ EnforceConsistency: "always", WhenInconsistent: "retry", }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, } config.Prune() @@ -1389,6 +1501,9 @@ func TestLoadConfigFile_EnforceConsistency_APIProxy(t *testing.T) { EnforceConsistency: "always", WhenInconsistent: "retry", }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, } config.Prune() @@ -1432,6 +1547,9 @@ func TestLoadConfigFile_Disable_Idle_Conns_All(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1481,6 +1599,9 @@ func TestLoadConfigFile_Disable_Idle_Conns_Auto_Auth(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1530,6 +1651,9 @@ func TestLoadConfigFile_Disable_Idle_Conns_Templating(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1579,6 +1703,9 @@ func TestLoadConfigFile_Disable_Idle_Conns_Caching(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1628,6 +1755,9 @@ func TestLoadConfigFile_Disable_Idle_Conns_Proxying(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1677,6 +1807,9 @@ func TestLoadConfigFile_Disable_Idle_Conns_Empty(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1732,6 +1865,9 @@ func TestLoadConfigFile_Disable_Idle_Conns_Env(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1788,6 +1924,9 @@ func TestLoadConfigFile_Disable_Keep_Alives_All(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1837,6 +1976,9 @@ func TestLoadConfigFile_Disable_Keep_Alives_Auto_Auth(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1886,6 +2028,9 @@ func TestLoadConfigFile_Disable_Keep_Alives_Templating(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1935,6 +2080,9 @@ func TestLoadConfigFile_Disable_Keep_Alives_Caching(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1984,6 +2132,9 @@ func TestLoadConfigFile_Disable_Keep_Alives_Proxying(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -2033,6 +2184,9 @@ func TestLoadConfigFile_Disable_Keep_Alives_Empty(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -2088,6 +2242,9 @@ func TestLoadConfigFile_Disable_Keep_Alives_Env(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -2108,3 +2265,189 @@ func TestLoadConfigFile_Bad_Value_Disable_Keep_Alives(t *testing.T) { t.Fatal("should have error, it didn't") } } + +// TestLoadConfigFile_EnvTemplates_Simple loads and validates an env_template config +func TestLoadConfigFile_EnvTemplates_Simple(t *testing.T) { + cfg, err := LoadConfigFile("./test-fixtures/config-env-templates-simple.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := cfg.ValidateConfig(); err != nil { + t.Fatalf("validation error: %s", err) + } + + expectedKey := "MY_DATABASE_USER" + found := false + for _, envTemplate := range cfg.EnvTemplates { + if *envTemplate.MapToEnvironmentVariable == expectedKey { + found = true + } + } + if !found { + t.Fatalf("expected environment variable name to be populated") + } +} + +// TestLoadConfigFile_EnvTemplates_Complex loads and validates an env_template config +func TestLoadConfigFile_EnvTemplates_Complex(t *testing.T) { + cfg, err := LoadConfigFile("./test-fixtures/config-env-templates-complex.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := cfg.ValidateConfig(); err != nil { + t.Fatalf("validation error: %s", err) + } + + expectedKeys := []string{ + "FOO_PASSWORD", + "FOO_USER", + } + + envExists := func(key string) bool { + for _, envTmpl := range cfg.EnvTemplates { + if *envTmpl.MapToEnvironmentVariable == key { + return true + } + } + return false + } + + for _, expected := range expectedKeys { + if !envExists(expected) { + t.Fatalf("expected environment variable %s", expected) + } + } +} + +// TestLoadConfigFile_EnvTemplates_WithSource loads and validates an +// env_template config with "source" instead of "contents" +func TestLoadConfigFile_EnvTemplates_WithSource(t *testing.T) { + cfg, err := LoadConfigFile("./test-fixtures/config-env-templates-with-source.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := cfg.ValidateConfig(); err != nil { + t.Fatalf("validation error: %s", err) + } +} + +// TestLoadConfigFile_EnvTemplates_NoName ensures that env_template with no name triggers an error +func TestLoadConfigFile_EnvTemplates_NoName(t *testing.T) { + _, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-no-name.hcl") + if err == nil { + t.Fatalf("expected error") + } +} + +// TestLoadConfigFile_EnvTemplates_ExecInvalidSignal ensures that an invalid signal triggers an error +func TestLoadConfigFile_EnvTemplates_ExecInvalidSignal(t *testing.T) { + _, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-invalid-signal.hcl") + if err == nil { + t.Fatalf("expected error") + } +} + +// TestLoadConfigFile_EnvTemplates_ExecSimple validates the exec section with default parameters +func TestLoadConfigFile_EnvTemplates_ExecSimple(t *testing.T) { + cfg, err := LoadConfigFile("./test-fixtures/config-env-templates-simple.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := cfg.ValidateConfig(); err != nil { + t.Fatalf("validation error: %s", err) + } + + expectedCmd := []string{"/path/to/my/app", "arg1", "arg2"} + if !slices.Equal(cfg.Exec.Command, expectedCmd) { + t.Fatal("exec.command does not have expected value") + } + + // check defaults + if cfg.Exec.RestartOnSecretChanges != "always" { + t.Fatalf("expected cfg.Exec.RestartOnSecretChanges to be 'always', got '%s'", cfg.Exec.RestartOnSecretChanges) + } + + if cfg.Exec.RestartStopSignal != syscall.SIGTERM { + t.Fatalf("expected cfg.Exec.RestartStopSignal to be 'syscall.SIGTERM', got '%s'", cfg.Exec.RestartStopSignal) + } +} + +// TestLoadConfigFile_EnvTemplates_ExecComplex validates the exec section with non-default parameters +func TestLoadConfigFile_EnvTemplates_ExecComplex(t *testing.T) { + cfg, err := LoadConfigFile("./test-fixtures/config-env-templates-complex.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := cfg.ValidateConfig(); err != nil { + t.Fatalf("validation error: %s", err) + } + + if !slices.Equal(cfg.Exec.Command, []string{"env"}) { + t.Fatal("exec.command does not have expected value") + } + + if cfg.Exec.RestartOnSecretChanges != "never" { + t.Fatalf("expected cfg.Exec.RestartOnSecretChanges to be 'never', got %q", cfg.Exec.RestartOnSecretChanges) + } + + if cfg.Exec.RestartStopSignal != syscall.SIGINT { + t.Fatalf("expected cfg.Exec.RestartStopSignal to be 'syscall.SIGINT', got %q", cfg.Exec.RestartStopSignal) + } +} + +// TestLoadConfigFile_Bad_EnvTemplates_MissingExec ensures that ValidateConfig +// errors when "env_template" stanza(s) are specified but "exec" is missing +func TestLoadConfigFile_Bad_EnvTemplates_MissingExec(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-missing-exec.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := config.ValidateConfig(); err == nil { + t.Fatal("expected an error from ValidateConfig: exec section is missing") + } +} + +// TestLoadConfigFile_Bad_EnvTemplates_WithProxy ensures that ValidateConfig +// errors when both env_template and api_proxy stanzas are present +func TestLoadConfigFile_Bad_EnvTemplates_WithProxy(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-with-proxy.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := config.ValidateConfig(); err == nil { + t.Fatal("expected an error from ValidateConfig: listener / api_proxy are not compatible with env_template") + } +} + +// TestLoadConfigFile_Bad_EnvTemplates_WithFileTemplates ensures that +// ValidateConfig errors when both env_template and template stanzas are present +func TestLoadConfigFile_Bad_EnvTemplates_WithFileTemplates(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-with-file-templates.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := config.ValidateConfig(); err == nil { + t.Fatal("expected an error from ValidateConfig: file template stanza is not compatible with env_template") + } +} + +// TestLoadConfigFile_Bad_EnvTemplates_DisalowedFields ensure that +// ValidateConfig errors for disalowed env_template fields +func TestLoadConfigFile_Bad_EnvTemplates_DisalowedFields(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-disalowed-fields.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := config.ValidateConfig(); err == nil { + t.Fatal("expected an error from ValidateConfig: disallowed fields specified in env_template") + } +} diff --git a/command/agent/config/test-fixtures/bad-config-api_proxy-cache.hcl b/command/agent/config/test-fixtures/bad-config-api_proxy-cache.hcl index 7d2bf5c2d3a9..b35a06f2848b 100644 --- a/command/agent/config/test-fixtures/bad-config-api_proxy-cache.hcl +++ b/command/agent/config/test-fixtures/bad-config-api_proxy-cache.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/bad-config-auto_auth-nosinks-nocache-notemplates.hcl b/command/agent/config/test-fixtures/bad-config-auto_auth-nosinks-nocache-notemplates.hcl index d3d5d426695a..1d618befa974 100644 --- a/command/agent/config/test-fixtures/bad-config-auto_auth-nosinks-nocache-notemplates.hcl +++ b/command/agent/config/test-fixtures/bad-config-auto_auth-nosinks-nocache-notemplates.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks.hcl b/command/agent/config/test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks.hcl index 5c2b3fb79a79..eea90860cbe9 100644 --- a/command/agent/config/test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks.hcl +++ b/command/agent/config/test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/bad-config-cache-auto_auth-method-wrapping.hcl b/command/agent/config/test-fixtures/bad-config-cache-auto_auth-method-wrapping.hcl index 8a39837fa721..e90ba98bb0aa 100644 --- a/command/agent/config/test-fixtures/bad-config-cache-auto_auth-method-wrapping.hcl +++ b/command/agent/config/test-fixtures/bad-config-cache-auto_auth-method-wrapping.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/bad-config-cache-force-token-no-auth-method.hcl b/command/agent/config/test-fixtures/bad-config-cache-force-token-no-auth-method.hcl index d1cae7512970..39f2bc740266 100644 --- a/command/agent/config/test-fixtures/bad-config-cache-force-token-no-auth-method.hcl +++ b/command/agent/config/test-fixtures/bad-config-cache-force-token-no-auth-method.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/bad-config-cache-inconsistent-auto_auth.hcl b/command/agent/config/test-fixtures/bad-config-cache-inconsistent-auto_auth.hcl index 38b9c2c12207..f5d39af89eee 100644 --- a/command/agent/config/test-fixtures/bad-config-cache-inconsistent-auto_auth.hcl +++ b/command/agent/config/test-fixtures/bad-config-cache-inconsistent-auto_auth.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/bad-config-cache-no-listeners.hcl b/command/agent/config/test-fixtures/bad-config-cache-no-listeners.hcl index 9112183ea842..46f2802689ad 100644 --- a/command/agent/config/test-fixtures/bad-config-cache-no-listeners.hcl +++ b/command/agent/config/test-fixtures/bad-config-cache-no-listeners.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/bad-config-disable-idle-connections.hcl b/command/agent/config/test-fixtures/bad-config-disable-idle-connections.hcl index 34c292e74f9b..10b7e54a7a54 100644 --- a/command/agent/config/test-fixtures/bad-config-disable-idle-connections.hcl +++ b/command/agent/config/test-fixtures/bad-config-disable-idle-connections.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" disable_idle_connections = ["foo","caching","templating"] diff --git a/command/agent/config/test-fixtures/bad-config-disable-keep-alives.hcl b/command/agent/config/test-fixtures/bad-config-disable-keep-alives.hcl index 087e2ffa010d..47f1eb2e52cd 100644 --- a/command/agent/config/test-fixtures/bad-config-disable-keep-alives.hcl +++ b/command/agent/config/test-fixtures/bad-config-disable-keep-alives.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" disable_keep_alives = ["foo","caching","templating"] diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-disalowed-fields.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-disalowed-fields.hcl new file mode 100644 index 000000000000..4355fd078f9e --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-env-templates-disalowed-fields.hcl @@ -0,0 +1,36 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/Users/avean/.vault-token" + } + } +} + +template_config { + static_secret_render_interval = "5m" + exit_on_retry_failure = true +} + +vault { + address = "http://localhost:8200" +} + +env_template "FOO_PASSWORD" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.password }}{{ end }}" + + # Error: destination and create_dest_dirs are not allowed in env_template + destination = "/path/on/disk/where/template/will/render.txt" + create_dest_dirs = true +} + +exec { + command = ["./my-app", "arg1", "arg2"] + restart_on_secret_changes = "always" + restart_stop_signal = "SIGTERM" +} diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-invalid-signal.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-invalid-signal.hcl new file mode 100644 index 000000000000..7cbbc09318bb --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-env-templates-invalid-signal.hcl @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/home/username/.vault-token" + } + } +} + +vault { + address = "http://localhost:8200" +} + +env_template "FOO" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.lock }}{{ end }}" + error_on_missing_key = false +} + + +exec { + command = ["env"] + restart_on_secret_changes = "never" + restart_stop_signal = "notasignal" +} diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-missing-exec.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-missing-exec.hcl new file mode 100644 index 000000000000..8fbbd83bae5c --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-env-templates-missing-exec.hcl @@ -0,0 +1,33 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/Users/avean/.vault-token" + } + } +} + +template_config { + static_secret_render_interval = "5m" + exit_on_retry_failure = true +} + +vault { + address = "http://localhost:8200" +} + +env_template "FOO_PASSWORD" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.password }}{{ end }}" + error_on_missing_key = false +} +env_template "FOO_USER" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.user }}{{ end }}" + error_on_missing_key = false +} + +# Error: missing a required "exec" section! diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-no-name.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-no-name.hcl new file mode 100644 index 000000000000..7c7363a46548 --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-env-templates-no-name.hcl @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/home/username/.vault-token" + } + } +} + +vault { + address = "http://localhost:8200" +} + +env_template { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.lock }}{{ end }}" + error_on_missing_key = false +} + + +exec { + command = ["env"] + restart_on_secret_changes = "never" + restart_stop_signal = "SIGTERM" +} diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-with-file-templates.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-with-file-templates.hcl new file mode 100644 index 000000000000..ace9410bd0b8 --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-env-templates-with-file-templates.hcl @@ -0,0 +1,43 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/Users/avean/.vault-token" + } + } +} + +template_config { + static_secret_render_interval = "5m" + exit_on_retry_failure = true +} + +vault { + address = "http://localhost:8200" +} + +# Error: template is incompatible with env_template! +template { + source = "/path/on/disk/to/template.ctmpl" + destination = "/path/on/disk/where/template/will/render.txt" +} + +env_template "FOO_PASSWORD" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.password }}{{ end }}" + error_on_missing_key = false +} +env_template "FOO_USER" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.user }}{{ end }}" + error_on_missing_key = false +} + +exec { + command = ["./my-app", "arg1", "arg2"] + restart_on_secret_changes = "always" + restart_stop_signal = "SIGTERM" +} diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-with-proxy.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-with-proxy.hcl new file mode 100644 index 000000000000..ac0824441af0 --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-env-templates-with-proxy.hcl @@ -0,0 +1,50 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/Users/avean/.vault-token" + } + } +} + +template_config { + static_secret_render_interval = "5m" + exit_on_retry_failure = true +} + +vault { + address = "http://localhost:8200" +} + +env_template "FOO_PASSWORD" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.password }}{{ end }}" + error_on_missing_key = false +} +env_template "FOO_USER" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.user }}{{ end }}" + error_on_missing_key = false +} + +exec { + command = ["./my-app", "arg1", "arg2"] + restart_on_secret_changes = "always" + restart_stop_signal = "SIGTERM" +} + +# Error: api_proxy is incompatible with env_template +api_proxy { + use_auto_auth_token = "force" + enforce_consistency = "always" + when_inconsistent = "forward" +} + +# Error: listener is incompatible with env_template +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} diff --git a/command/agent/config/test-fixtures/bad-config-method-wrapping-and-sink-wrapping.hcl b/command/agent/config/test-fixtures/bad-config-method-wrapping-and-sink-wrapping.hcl index cb9696dfb3f1..89c766d5ceb7 100644 --- a/command/agent/config/test-fixtures/bad-config-method-wrapping-and-sink-wrapping.hcl +++ b/command/agent/config/test-fixtures/bad-config-method-wrapping-and-sink-wrapping.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-api_proxy-auto_auth-all-api_proxy-config.hcl b/command/agent/config/test-fixtures/config-api_proxy-auto_auth-all-api_proxy-config.hcl index a3e4e5b99a2a..79b2009630a6 100644 --- a/command/agent/config/test-fixtures/config-api_proxy-auto_auth-all-api_proxy-config.hcl +++ b/command/agent/config/test-fixtures/config-api_proxy-auto_auth-all-api_proxy-config.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-cache-auto_auth-false.hcl b/command/agent/config/test-fixtures/config-cache-auto_auth-false.hcl index 252216e21d7f..7fbaa7418e80 100644 --- a/command/agent/config/test-fixtures/config-cache-auto_auth-false.hcl +++ b/command/agent/config/test-fixtures/config-cache-auto_auth-false.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-cache-auto_auth-force.hcl b/command/agent/config/test-fixtures/config-cache-auto_auth-force.hcl index 429645527a15..5d280bd20355 100644 --- a/command/agent/config/test-fixtures/config-cache-auto_auth-force.hcl +++ b/command/agent/config/test-fixtures/config-cache-auto_auth-force.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-cache-auto_auth-no-sink.hcl b/command/agent/config/test-fixtures/config-cache-auto_auth-no-sink.hcl index 80486b346b1c..e951427430eb 100644 --- a/command/agent/config/test-fixtures/config-cache-auto_auth-no-sink.hcl +++ b/command/agent/config/test-fixtures/config-cache-auto_auth-no-sink.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-cache-auto_auth-true.hcl b/command/agent/config/test-fixtures/config-cache-auto_auth-true.hcl index cebcdfbc5868..bbc945ccac3d 100644 --- a/command/agent/config/test-fixtures/config-cache-auto_auth-true.hcl +++ b/command/agent/config/test-fixtures/config-cache-auto_auth-true.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-cache-embedded-type.hcl b/command/agent/config/test-fixtures/config-cache-embedded-type.hcl index 4ea525753b6c..6661966c2816 100644 --- a/command/agent/config/test-fixtures/config-cache-embedded-type.hcl +++ b/command/agent/config/test-fixtures/config-cache-embedded-type.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-cache-no-auto_auth.hcl b/command/agent/config/test-fixtures/config-cache-no-auto_auth.hcl index 45c71412bdd5..b654e202da14 100644 --- a/command/agent/config/test-fixtures/config-cache-no-auto_auth.hcl +++ b/command/agent/config/test-fixtures/config-cache-no-auto_auth.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-cache-no-listeners.hcl b/command/agent/config/test-fixtures/config-cache-no-listeners.hcl index 3e0abfb6b9eb..0ad5203fd87d 100644 --- a/command/agent/config/test-fixtures/config-cache-no-listeners.hcl +++ b/command/agent/config/test-fixtures/config-cache-no-listeners.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-cache-persist-empty-type.hcl b/command/agent/config/test-fixtures/config-cache-persist-empty-type.hcl index f40715e6e418..f85799bb93c4 100644 --- a/command/agent/config/test-fixtures/config-cache-persist-empty-type.hcl +++ b/command/agent/config/test-fixtures/config-cache-persist-empty-type.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-cache-persist-false.hcl b/command/agent/config/test-fixtures/config-cache-persist-false.hcl index 77bb926cffdf..f48dfd857638 100644 --- a/command/agent/config/test-fixtures/config-cache-persist-false.hcl +++ b/command/agent/config/test-fixtures/config-cache-persist-false.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-cache.hcl b/command/agent/config/test-fixtures/config-cache.hcl index 87fa5afad861..148ef6e7ccbf 100644 --- a/command/agent/config/test-fixtures/config-cache.hcl +++ b/command/agent/config/test-fixtures/config-cache.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-consistency-apiproxy.hcl b/command/agent/config/test-fixtures/config-consistency-apiproxy.hcl index c2e662a67206..318c36973743 100644 --- a/command/agent/config/test-fixtures/config-consistency-apiproxy.hcl +++ b/command/agent/config/test-fixtures/config-consistency-apiproxy.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 api_proxy { enforce_consistency = "always" diff --git a/command/agent/config/test-fixtures/config-consistency.hcl b/command/agent/config/test-fixtures/config-consistency.hcl index 535181197c6c..7ed752297358 100644 --- a/command/agent/config/test-fixtures/config-consistency.hcl +++ b/command/agent/config/test-fixtures/config-consistency.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 cache { enforce_consistency = "always" diff --git a/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config1.hcl b/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config1.hcl index c900df6b1aa3..1bd5e93c770f 100644 --- a/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config1.hcl +++ b/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config1.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config2.hcl b/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config2.hcl index 2e942da66381..b5d7425ebe46 100644 --- a/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config2.hcl +++ b/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config2.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-dir-cache/config-cache1.hcl b/command/agent/config/test-fixtures/config-dir-cache/config-cache1.hcl index 767cdd9e4a5f..f65b4c6ddbcb 100644 --- a/command/agent/config/test-fixtures/config-dir-cache/config-cache1.hcl +++ b/command/agent/config/test-fixtures/config-dir-cache/config-cache1.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-dir-cache/config-cache2.hcl b/command/agent/config/test-fixtures/config-dir-cache/config-cache2.hcl index f4d0f47f9612..57929cd55c82 100644 --- a/command/agent/config/test-fixtures/config-dir-cache/config-cache2.hcl +++ b/command/agent/config/test-fixtures/config-dir-cache/config-cache2.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 cache { use_auto_auth_token = true diff --git a/command/agent/config/test-fixtures/config-dir-vault-block/config1.hcl b/command/agent/config/test-fixtures/config-dir-vault-block/config1.hcl index 18729533f441..b99ee93f907a 100644 --- a/command/agent/config/test-fixtures/config-dir-vault-block/config1.hcl +++ b/command/agent/config/test-fixtures/config-dir-vault-block/config1.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 vault { address = "http://127.0.0.1:1111" diff --git a/command/agent/config/test-fixtures/config-dir-vault-block/config2.hcl b/command/agent/config/test-fixtures/config-dir-vault-block/config2.hcl index c900df6b1aa3..1bd5e93c770f 100644 --- a/command/agent/config/test-fixtures/config-dir-vault-block/config2.hcl +++ b/command/agent/config/test-fixtures/config-dir-vault-block/config2.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-all.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-all.hcl index f312d420c8e7..b6869a2000ab 100644 --- a/command/agent/config/test-fixtures/config-disable-idle-connections-all.hcl +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-all.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" disable_idle_connections = ["auto-auth","caching","templating","proxying"] diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-auto-auth.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-auto-auth.hcl index abb1756697fa..02bda0b0a6ed 100644 --- a/command/agent/config/test-fixtures/config-disable-idle-connections-auto-auth.hcl +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-auto-auth.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" disable_idle_connections = ["auto-auth"] diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-caching.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-caching.hcl index 95a36e925a58..624d1bd1c86e 100644 --- a/command/agent/config/test-fixtures/config-disable-idle-connections-caching.hcl +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-caching.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" disable_idle_connections = ["caching"] diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-empty.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-empty.hcl index 3e490bfbc4c2..6b7ac26df736 100644 --- a/command/agent/config/test-fixtures/config-disable-idle-connections-empty.hcl +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-empty.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" disable_idle_connections = [] diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-proxying.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-proxying.hcl index 88da2effcc71..2219b84eb285 100644 --- a/command/agent/config/test-fixtures/config-disable-idle-connections-proxying.hcl +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-proxying.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" disable_idle_connections = ["proxying"] diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-templating.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-templating.hcl index 6e51c91a0e4b..4f819c7a443e 100644 --- a/command/agent/config/test-fixtures/config-disable-idle-connections-templating.hcl +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-templating.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" disable_idle_connections = ["templating"] diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-all.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-all.hcl index 8c1c6d58282a..356c79ff5bb0 100644 --- a/command/agent/config/test-fixtures/config-disable-keep-alives-all.hcl +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-all.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" disable_keep_alives = ["auto-auth","caching","templating","proxying"] diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-auto-auth.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-auto-auth.hcl index d77dfb278378..a7648c480ffe 100644 --- a/command/agent/config/test-fixtures/config-disable-keep-alives-auto-auth.hcl +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-auto-auth.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" disable_keep_alives = ["auto-auth"] diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-caching.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-caching.hcl index 386267e3e801..4f93218ee501 100644 --- a/command/agent/config/test-fixtures/config-disable-keep-alives-caching.hcl +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-caching.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" disable_keep_alives = ["caching"] diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-empty.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-empty.hcl index b4239a5261ed..b0969776fde3 100644 --- a/command/agent/config/test-fixtures/config-disable-keep-alives-empty.hcl +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-empty.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" disable_keep_alives = [] diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-proxying.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-proxying.hcl index 8c82a92b6e15..138254652579 100644 --- a/command/agent/config/test-fixtures/config-disable-keep-alives-proxying.hcl +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-proxying.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" disable_keep_alives = ["proxying"] diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-templating.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-templating.hcl index 01ec09504544..9e154a9ce467 100644 --- a/command/agent/config/test-fixtures/config-disable-keep-alives-templating.hcl +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-templating.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" disable_keep_alives = ["templating"] diff --git a/command/agent/config/test-fixtures/config-embedded-type.hcl b/command/agent/config/test-fixtures/config-embedded-type.hcl index 2ce3b401ca7e..cf3c182a85f3 100644 --- a/command/agent/config/test-fixtures/config-embedded-type.hcl +++ b/command/agent/config/test-fixtures/config-embedded-type.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" log_file = "/var/log/vault/vault-agent.log" diff --git a/command/agent/config/test-fixtures/config-env-templates-complex.hcl b/command/agent/config/test-fixtures/config-env-templates-complex.hcl new file mode 100644 index 000000000000..adcd4b0dccc0 --- /dev/null +++ b/command/agent/config/test-fixtures/config-env-templates-complex.hcl @@ -0,0 +1,39 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/home/username/.vault-token" + } + } +} + +cache {} + +template_config { + static_secret_render_interval = "5m" + exit_on_retry_failure = true +} + +vault { + address = "http://localhost:8200" +} + +env_template "FOO_PASSWORD" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.password }}{{ end }}" + error_on_missing_key = false +} +env_template "FOO_USER" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.user }}{{ end }}" + error_on_missing_key = false +} + +exec { + command = ["env"] + restart_on_secret_changes = "never" + restart_stop_signal = "SIGINT" +} diff --git a/command/agent/config/test-fixtures/config-env-templates-simple.hcl b/command/agent/config/test-fixtures/config-env-templates-simple.hcl new file mode 100644 index 000000000000..3ca1a190980e --- /dev/null +++ b/command/agent/config/test-fixtures/config-env-templates-simple.hcl @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/Users/avean/.vault-token" + } + } +} + +env_template "MY_DATABASE_USER" { + contents = "{{ with secret \"secret/db-secret\" }}{{ .Data.data.user }}{{ end }}" +} + +exec { + command = ["/path/to/my/app", "arg1", "arg2"] +} diff --git a/command/agent/config/test-fixtures/config-env-templates-with-source.hcl b/command/agent/config/test-fixtures/config-env-templates-with-source.hcl new file mode 100644 index 000000000000..7643ff28d826 --- /dev/null +++ b/command/agent/config/test-fixtures/config-env-templates-with-source.hcl @@ -0,0 +1,19 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +auto_auth { + method { + type = "token_file" + config { + token_file_path = "/home/username/.vault-token" + } + } +} + +env_template "MY_PASSWORD" { + source = "/path/on/disk/to/template.ctmpl" +} + +exec { + command = ["/path/to/my/app", "arg1", "arg2"] +} diff --git a/command/agent/config/test-fixtures/config-method-exit-on-err.hcl b/command/agent/config/test-fixtures/config-method-exit-on-err.hcl index bbda08c01b63..d6b32c70de5a 100644 --- a/command/agent/config/test-fixtures/config-method-exit-on-err.hcl +++ b/command/agent/config/test-fixtures/config-method-exit-on-err.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-method-initial-backoff.hcl b/command/agent/config/test-fixtures/config-method-initial-backoff.hcl index b166dabde418..a7fbccd4ba97 100644 --- a/command/agent/config/test-fixtures/config-method-initial-backoff.hcl +++ b/command/agent/config/test-fixtures/config-method-initial-backoff.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-method-wrapping.hcl b/command/agent/config/test-fixtures/config-method-wrapping.hcl index 8142a19dd80f..0012bb5708a0 100644 --- a/command/agent/config/test-fixtures/config-method-wrapping.hcl +++ b/command/agent/config/test-fixtures/config-method-wrapping.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-template-full-nosink.hcl b/command/agent/config/test-fixtures/config-template-full-nosink.hcl index 579aae1e86ca..cda6d020c619 100644 --- a/command/agent/config/test-fixtures/config-template-full-nosink.hcl +++ b/command/agent/config/test-fixtures/config-template-full-nosink.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-template-full.hcl b/command/agent/config/test-fixtures/config-template-full.hcl index b7641cd66403..649510d167c6 100644 --- a/command/agent/config/test-fixtures/config-template-full.hcl +++ b/command/agent/config/test-fixtures/config-template-full.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-template-many-nosink.hcl b/command/agent/config/test-fixtures/config-template-many-nosink.hcl index 2f8352d1b000..2882d76de0f2 100644 --- a/command/agent/config/test-fixtures/config-template-many-nosink.hcl +++ b/command/agent/config/test-fixtures/config-template-many-nosink.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-template-many.hcl b/command/agent/config/test-fixtures/config-template-many.hcl index 3a3ce77f1f46..992381704990 100644 --- a/command/agent/config/test-fixtures/config-template-many.hcl +++ b/command/agent/config/test-fixtures/config-template-many.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-template-min-nosink.hcl b/command/agent/config/test-fixtures/config-template-min-nosink.hcl index 064b7a452f69..395be10e367e 100644 --- a/command/agent/config/test-fixtures/config-template-min-nosink.hcl +++ b/command/agent/config/test-fixtures/config-template-min-nosink.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-template-min.hcl b/command/agent/config/test-fixtures/config-template-min.hcl index 34435da638c5..523a81e46bfb 100644 --- a/command/agent/config/test-fixtures/config-template-min.hcl +++ b/command/agent/config/test-fixtures/config-template-min.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-template-with-cache.hcl b/command/agent/config/test-fixtures/config-template-with-cache.hcl new file mode 100644 index 000000000000..14e8ab119978 --- /dev/null +++ b/command/agent/config/test-fixtures/config-template-with-cache.hcl @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + namespace = "/my-namespace" + + config = { + role = "foobar" + } + } +} + +cache {} + +template { + source = "/path/on/disk/to/template.ctmpl" + destination = "/path/on/disk/where/template/will/render.txt" +} diff --git a/command/agent/config/test-fixtures/config-template_config-empty.hcl b/command/agent/config/test-fixtures/config-template_config-empty.hcl index ac22dcc5cd6c..b497032a74ab 100644 --- a/command/agent/config/test-fixtures/config-template_config-empty.hcl +++ b/command/agent/config/test-fixtures/config-template_config-empty.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 vault { address = "http://127.0.0.1:1111" diff --git a/command/agent/config/test-fixtures/config-template_config-missing.hcl b/command/agent/config/test-fixtures/config-template_config-missing.hcl new file mode 100644 index 000000000000..4673e0c1dfdc --- /dev/null +++ b/command/agent/config/test-fixtures/config-template_config-missing.hcl @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +vault { + address = "http://127.0.0.1:1111" + retry { + num_retries = 5 + } +} + +template { + source = "/path/on/disk/to/template.ctmpl" + destination = "/path/on/disk/where/template/will/render.txt" +} diff --git a/command/agent/config/test-fixtures/config-template_config.hcl b/command/agent/config/test-fixtures/config-template_config.hcl index b550890018d4..be6bd384d816 100644 --- a/command/agent/config/test-fixtures/config-template_config.hcl +++ b/command/agent/config/test-fixtures/config-template_config.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 vault { address = "http://127.0.0.1:1111" @@ -11,6 +11,8 @@ vault { template_config { exit_on_retry_failure = true static_secret_render_interval = 60 + max_connections_per_host = 100 + lease_renewal_threshold = 0.8 } template { diff --git a/command/agent/config/test-fixtures/config-vault-retry-empty.hcl b/command/agent/config/test-fixtures/config-vault-retry-empty.hcl index 72c44e1f97bd..b6bf1abe8fec 100644 --- a/command/agent/config/test-fixtures/config-vault-retry-empty.hcl +++ b/command/agent/config/test-fixtures/config-vault-retry-empty.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config-vault-retry.hcl b/command/agent/config/test-fixtures/config-vault-retry.hcl index 5e4ee234304e..aedbfdc52052 100644 --- a/command/agent/config/test-fixtures/config-vault-retry.hcl +++ b/command/agent/config/test-fixtures/config-vault-retry.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" diff --git a/command/agent/config/test-fixtures/config.hcl b/command/agent/config/test-fixtures/config.hcl index 18ec360309c9..f6ca0e684e03 100644 --- a/command/agent/config/test-fixtures/config.hcl +++ b/command/agent/config/test-fixtures/config.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 pid_file = "./pidfile" log_file = "/var/log/vault/vault-agent.log" diff --git a/command/agent/doc.go b/command/agent/doc.go index e9f0f0b98fe3..785fe94060a5 100644 --- a/command/agent/doc.go +++ b/command/agent/doc.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 /* Package agent implements a daemon mode of Vault designed to provide helper diff --git a/command/agent/exec/childprocessstate_enumer.go b/command/agent/exec/childprocessstate_enumer.go new file mode 100644 index 000000000000..154606ed62f6 --- /dev/null +++ b/command/agent/exec/childprocessstate_enumer.go @@ -0,0 +1,51 @@ +// Code generated by "enumer -type=childProcessState -trimprefix=childProcessState"; DO NOT EDIT. + +package exec + +import ( + "fmt" +) + +const _childProcessStateName = "NotStartedRunningRestartingStopped" + +var _childProcessStateIndex = [...]uint8{0, 10, 17, 27, 34} + +func (i childProcessState) String() string { + if i >= childProcessState(len(_childProcessStateIndex)-1) { + return fmt.Sprintf("childProcessState(%d)", i) + } + return _childProcessStateName[_childProcessStateIndex[i]:_childProcessStateIndex[i+1]] +} + +var _childProcessStateValues = []childProcessState{0, 1, 2, 3} + +var _childProcessStateNameToValueMap = map[string]childProcessState{ + _childProcessStateName[0:10]: 0, + _childProcessStateName[10:17]: 1, + _childProcessStateName[17:27]: 2, + _childProcessStateName[27:34]: 3, +} + +// childProcessStateString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func childProcessStateString(s string) (childProcessState, error) { + if val, ok := _childProcessStateNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to childProcessState values", s) +} + +// childProcessStateValues returns all values of the enum +func childProcessStateValues() []childProcessState { + return _childProcessStateValues +} + +// IsAchildProcessState returns "true" if the value is listed in the enum definition. "false" otherwise +func (i childProcessState) IsAchildProcessState() bool { + for _, v := range _childProcessStateValues { + if i == v { + return true + } + } + return false +} diff --git a/command/agent/exec/exec.go b/command/agent/exec/exec.go new file mode 100644 index 000000000000..f515114bd203 --- /dev/null +++ b/command/agent/exec/exec.go @@ -0,0 +1,394 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package exec + +import ( + "context" + "fmt" + "io" + "math" + "os" + "sort" + "sync" + "time" + + "github.com/hashicorp/consul-template/child" + ctconfig "github.com/hashicorp/consul-template/config" + "github.com/hashicorp/consul-template/manager" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/command/agent/internal/ctmanager" + "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/sdk/helper/backoff" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/pointerutil" + "golang.org/x/exp/slices" +) + +//go:generate enumer -type=childProcessState -trimprefix=childProcessState +type childProcessState uint8 + +const ( + childProcessStateNotStarted childProcessState = iota + childProcessStateRunning + childProcessStateRestarting + childProcessStateStopped +) + +type ServerConfig struct { + Logger hclog.Logger + AgentConfig *config.Config + + Namespace string + + // LogLevel is needed to set the internal Consul Template Runner's log level + // to match the log level of Vault Agent. The internal Runner creates it's own + // logger and can't be set externally or copied from the Template Server. + // + // LogWriter is needed to initialize Consul Template's internal logger to use + // the same io.Writer that Vault Agent itself is using. + LogLevel hclog.Level + LogWriter io.Writer +} + +type Server struct { + // config holds the ServerConfig used to create it. It's passed along in other + // methods + config *ServerConfig + + // runner is the consul-template runner + runner *manager.Runner + + // numberOfTemplates is the count of templates determined by consul-template, + // we keep the value to ensure all templates have been rendered before + // starting the child process + // NOTE: each template may have more than one TemplateConfig, so the numbers may not match up + numberOfTemplates int + + logger hclog.Logger + + childProcess *child.Child + childProcessState childProcessState + childProcessLock sync.Mutex + childProcessStdout io.WriteCloser + childProcessStderr io.WriteCloser + + // exit channel of the child process + childProcessExitCh chan int + + // lastRenderedEnvVars is the cached value of all environment variables + // rendered by the templating engine; it is used for detecting changes + lastRenderedEnvVars []string +} + +type ProcessExitError struct { + ExitCode int +} + +func (e *ProcessExitError) Error() string { + return fmt.Sprintf("process exited with %d", e.ExitCode) +} + +func NewServer(cfg *ServerConfig) (*Server, error) { + var err error + + childProcessStdout := os.Stdout + childProcessStderr := os.Stderr + + if cfg.AgentConfig.Exec != nil { + if cfg.AgentConfig.Exec.ChildProcessStdout != "" { + childProcessStdout, err = os.OpenFile(cfg.AgentConfig.Exec.ChildProcessStdout, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) + if err != nil { + return nil, fmt.Errorf("could not open %q, %w", cfg.AgentConfig.Exec.ChildProcessStdout, err) + } + } + + if cfg.AgentConfig.Exec.ChildProcessStderr != "" { + childProcessStderr, err = os.OpenFile(cfg.AgentConfig.Exec.ChildProcessStderr, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) + if err != nil { + return nil, fmt.Errorf("could not open %q, %w", cfg.AgentConfig.Exec.ChildProcessStdout, err) + } + } + } + + server := Server{ + logger: cfg.Logger, + config: cfg, + childProcessState: childProcessStateNotStarted, + childProcessExitCh: make(chan int), + childProcessStdout: childProcessStdout, + childProcessStderr: childProcessStderr, + } + + return &server, nil +} + +func (s *Server) Run(ctx context.Context, incomingVaultToken chan string) error { + latestToken := new(string) + s.logger.Info("starting exec server") + defer func() { + s.logger.Info("exec server stopped") + }() + + if len(s.config.AgentConfig.EnvTemplates) == 0 || s.config.AgentConfig.Exec == nil { + s.logger.Info("no env templates or exec config, exiting") + <-ctx.Done() + return nil + } + + managerConfig := ctmanager.ManagerConfig{ + AgentConfig: s.config.AgentConfig, + Namespace: s.config.Namespace, + LogLevel: s.config.LogLevel, + LogWriter: s.config.LogWriter, + } + + runnerConfig, err := ctmanager.NewConfig(managerConfig, s.config.AgentConfig.EnvTemplates) + if err != nil { + return fmt.Errorf("template server failed to generate runner config: %w", err) + } + + // We leave this in "dry" mode, as there are no files to render; + // we will get the environment variables rendered contents from the incoming events + s.runner, err = manager.NewRunner(runnerConfig, true) + if err != nil { + return fmt.Errorf("template server failed to create: %w", err) + } + + // prevent the templates from being rendered to stdout in "dry" mode + s.runner.SetOutStream(io.Discard) + + s.numberOfTemplates = len(s.runner.TemplateConfigMapping()) + + // We receive multiple events every staticSecretRenderInterval + // from <-s.runner.TemplateRenderedCh(), one for each secret. Only the last + // event in a batch will contain the latest set of all secrets and the + // corresponding environment variables. This timer will fire after 2 seconds + // unless an event comes in which resets the timer back to 2 seconds. + var debounceTimer *time.Timer + + // capture the errors related to restarting the child process + restartChildProcessErrCh := make(chan error) + + // create exponential backoff object to calculate backoff time before restarting a failed + // consul template server + restartBackoff := backoff.NewBackoff(math.MaxInt, consts.DefaultMinBackoff, consts.DefaultMaxBackoff) + + for { + select { + case <-ctx.Done(): + s.runner.Stop() + s.childProcessLock.Lock() + if s.childProcess != nil { + s.childProcess.Stop() + } + s.childProcessState = childProcessStateStopped + s.close() + s.childProcessLock.Unlock() + return nil + + case token := <-incomingVaultToken: + if token != *latestToken { + s.logger.Info("exec server received new token") + + s.runner.Stop() + *latestToken = token + newTokenConfig := ctconfig.Config{ + Vault: &ctconfig.VaultConfig{ + Token: latestToken, + ClientUserAgent: pointerutil.StringPtr(useragent.AgentTemplatingString()), + }, + } + + // got a new auth token, merge it in with the existing config + runnerConfig = runnerConfig.Merge(&newTokenConfig) + s.runner, err = manager.NewRunner(runnerConfig, true) + if err != nil { + s.logger.Error("template server failed with new Vault token", "error", err) + continue + } + + // prevent the templates from being rendered to stdout in "dry" mode + s.runner.SetOutStream(io.Discard) + + go s.runner.Start() + } + + case err := <-s.runner.ErrCh: + s.logger.Error("template server error", "error", err.Error()) + s.runner.StopImmediately() + + // Return after stopping the runner if exit on retry failure was specified + if s.config.AgentConfig.TemplateConfig != nil && s.config.AgentConfig.TemplateConfig.ExitOnRetryFailure { + return fmt.Errorf("template server: %w", err) + } + + // Calculate the amount of time to backoff using exponential backoff + sleep, err := restartBackoff.Next() + if err != nil { + s.logger.Error("template server: reached maximum number restart attempts") + restartBackoff.Reset() + } + + // Sleep for the calculated backoff time then attempt to create a new runner + s.logger.Warn(fmt.Sprintf("template server restart: retry attempt after %s", sleep)) + time.Sleep(sleep) + + s.runner, err = manager.NewRunner(runnerConfig, true) + if err != nil { + return fmt.Errorf("template server failed to create: %w", err) + } + go s.runner.Start() + + case <-s.runner.TemplateRenderedCh(): + // A template has been rendered, figure out what to do + s.logger.Trace("template rendered") + events := s.runner.RenderEvents() + + // This checks if we've finished rendering the initial set of templates, + // for every consecutive re-render len(events) should equal s.numberOfTemplates + if len(events) < s.numberOfTemplates { + // Not all templates have been rendered yet + continue + } + + // assume the renders are finished, until we find otherwise + doneRendering := true + var renderedEnvVars []string + for _, event := range events { + // This template hasn't been rendered + if event.LastWouldRender.IsZero() { + doneRendering = false + break + } else { + for _, tcfg := range event.TemplateConfigs { + envVar := fmt.Sprintf("%s=%s", *tcfg.MapToEnvironmentVariable, event.Contents) + renderedEnvVars = append(renderedEnvVars, envVar) + } + } + } + if !doneRendering { + continue + } + + // sort the environment variables for a deterministic output and easy comparison + sort.Strings(renderedEnvVars) + + s.logger.Trace("done rendering templates") + + // don't restart the process unless a change is detected + if slices.Equal(s.lastRenderedEnvVars, renderedEnvVars) { + continue + } + + s.lastRenderedEnvVars = renderedEnvVars + + s.logger.Debug("detected a change in the environment variables: restarting the child process") + + // if a timer exists, stop it + if debounceTimer != nil { + debounceTimer.Stop() + } + debounceTimer = time.AfterFunc(2*time.Second, func() { + if err := s.restartChildProcess(renderedEnvVars); err != nil { + restartChildProcessErrCh <- fmt.Errorf("unable to restart the child process: %w", err) + } + }) + + case err := <-restartChildProcessErrCh: + // catch the error from restarting + return err + + case exitCode := <-s.childProcessExitCh: + // process exited on its own + return &ProcessExitError{ExitCode: exitCode} + } + } +} + +func (s *Server) restartChildProcess(newEnvVars []string) error { + s.childProcessLock.Lock() + defer s.childProcessLock.Unlock() + + switch s.config.AgentConfig.Exec.RestartOnSecretChanges { + case "always": + if s.childProcessState == childProcessStateRunning { + // process is running, need to kill it first + s.logger.Info("stopping process", "process_id", s.childProcess.Pid()) + s.childProcessState = childProcessStateRestarting + s.childProcess.Stop() + } + case "never": + if s.childProcessState == childProcessStateRunning { + s.logger.Info("detected update, but not restarting process", "process_id", s.childProcess.Pid()) + return nil + } + default: + return fmt.Errorf("invalid value for restart-on-secret-changes: %q", s.config.AgentConfig.Exec.RestartOnSecretChanges) + } + + args, subshell, err := child.CommandPrep(s.config.AgentConfig.Exec.Command) + if err != nil { + return fmt.Errorf("unable to parse command: %w", err) + } + + childInput := &child.NewInput{ + Stdin: os.Stdin, + Stdout: s.childProcessStdout, + Stderr: s.childProcessStderr, + Command: args[0], + Args: args[1:], + Timeout: 0, // let it run forever + Env: append(os.Environ(), newEnvVars...), + ReloadSignal: nil, // can't reload w/ new env vars + KillSignal: s.config.AgentConfig.Exec.RestartStopSignal, + KillTimeout: 30 * time.Second, + Splay: 0, + Setpgid: subshell, + Logger: s.logger.StandardLogger(nil), + } + + proc, err := child.New(childInput) + if err != nil { + return err + } + s.childProcess = proc + + if err := s.childProcess.Start(); err != nil { + return fmt.Errorf("error starting the child process: %w", err) + } + + s.childProcessState = childProcessStateRunning + + // Listen if the child process exits and bubble it up to the main loop. + // + // NOTE: this must be invoked after child.Start() to avoid a potential + // race condition with ExitCh not being initialized. + go func() { + select { + case exitCode, ok := <-proc.ExitCh(): + // ignore ExitCh channel closures caused by our restarts + if ok { + s.childProcessExitCh <- exitCode + } + } + }() + + return nil +} + +func (s *Server) Close() { + s.childProcessLock.Lock() + defer s.childProcessLock.Unlock() + s.close() +} + +func (s *Server) close() { + if s.childProcessStdout != os.Stdout { + _ = s.childProcessStdout.Close() + } + if s.childProcessStderr != os.Stderr { + _ = s.childProcessStderr.Close() + } +} diff --git a/command/agent/exec/exec_test.go b/command/agent/exec/exec_test.go new file mode 100644 index 000000000000..a825037faa46 --- /dev/null +++ b/command/agent/exec/exec_test.go @@ -0,0 +1,580 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package exec + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + "strconv" + "syscall" + "testing" + "time" + + ctconfig "github.com/hashicorp/consul-template/config" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-retryablehttp" + "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/helper/pointerutil" +) + +func fakeVaultServer(t *testing.T) *httptest.Server { + t.Helper() + + firstRequest := true + + mux := http.NewServeMux() + mux.HandleFunc("/v1/kv/my-app/creds", func(w http.ResponseWriter, r *http.Request) { + // change the password on the second request to re-render the template + var password string + + if firstRequest { + password = "s3cr3t" + } else { + password = "s3cr3t-two" + } + + firstRequest = false + + fmt.Fprintf(w, `{ + "request_id": "8af096e9-518c-7351-eff5-5ba20554b21f", + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": { + "data": { + "password": "%s", + "user": "app-user" + }, + "metadata": { + "created_time": "2019-10-07T22:18:44.233247Z", + "deletion_time": "", + "destroyed": false, + "version": 3 + } + }, + "wrap_info": null, + "warnings": null, + "auth": null + }`, + password, + ) + }) + + return httptest.NewServer(mux) +} + +// TestExecServer_Run tests various scenarios of using vault agent as a process +// supervisor. At its core is a sample application referred to as 'test app', +// compiled from ./test-app/main.go. Each test case verifies that the test app +// is started and/or stopped correctly by exec.Server.Run(). There are 3 +// high-level scenarios we want to test for: +// +// 1. test app is started and is injected with environment variables +// 2. test app exits early (either with zero or non-zero extit code) +// 3. test app needs to be stopped (and restarted) by exec.Server +func TestExecServer_Run(t *testing.T) { + // we must build a test-app binary since 'go run' does not propagate signals correctly + goBinary, err := exec.LookPath("go") + if err != nil { + t.Fatalf("could not find go binary on path: %s", err) + } + + testAppBinary := filepath.Join(os.TempDir(), "test-app") + + if err := exec.Command(goBinary, "build", "-o", testAppBinary, "./test-app").Run(); err != nil { + t.Fatalf("could not build the test application: %s", err) + } + defer func() { + if err := os.Remove(testAppBinary); err != nil { + t.Fatalf("could not remove %q test application: %s", testAppBinary, err) + } + }() + + testCases := map[string]struct { + // skip this test case + skip bool + skipReason string + + // inputs to the exec server + envTemplates []*ctconfig.TemplateConfig + staticSecretRenderInterval time.Duration + + // test app parameters + testAppArgs []string + testAppStopSignal os.Signal + + // simulate a shutdown of agent, which, in turn stops the test app + simulateShutdown bool + simulateShutdownWaitDuration time.Duration + + // expected results + expected map[string]string + expectedTestDuration time.Duration + expectedError error + }{ + "ensure_environment_variables_are_injected": { + skip: true, + envTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }, { + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.password }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_PASSWORD"), + }}, + testAppArgs: []string{"--stop-after", "10s"}, + testAppStopSignal: syscall.SIGTERM, + expected: map[string]string{ + "MY_USER": "app-user", + "MY_PASSWORD": "s3cr3t", + }, + expectedTestDuration: 15 * time.Second, + expectedError: nil, + }, + + "password_changes_test_app_should_restart": { + envTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }, { + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.password }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_PASSWORD"), + }}, + staticSecretRenderInterval: 5 * time.Second, + testAppArgs: []string{"--stop-after", "15s", "--sleep-after-stop-signal", "0s"}, + testAppStopSignal: syscall.SIGTERM, + expected: map[string]string{ + "MY_USER": "app-user", + "MY_PASSWORD": "s3cr3t-two", + }, + expectedTestDuration: 15 * time.Second, + expectedError: nil, + }, + + "test_app_exits_early": { + skip: true, + envTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }}, + testAppArgs: []string{"--stop-after", "1s"}, + testAppStopSignal: syscall.SIGTERM, + expectedTestDuration: 15 * time.Second, + expectedError: &ProcessExitError{0}, + }, + + "test_app_exits_early_non_zero": { + skip: true, + envTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }}, + testAppArgs: []string{"--stop-after", "1s", "--exit-code", "5"}, + testAppStopSignal: syscall.SIGTERM, + expectedTestDuration: 15 * time.Second, + expectedError: &ProcessExitError{5}, + }, + + "send_sigterm_expect_test_app_exit": { + skip: true, + envTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }}, + testAppArgs: []string{"--stop-after", "30s", "--sleep-after-stop-signal", "1s"}, + testAppStopSignal: syscall.SIGTERM, + simulateShutdown: true, + simulateShutdownWaitDuration: 3 * time.Second, + expectedTestDuration: 15 * time.Second, + expectedError: nil, + }, + + "send_sigusr1_expect_test_app_exit": { + skip: true, + envTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }}, + testAppArgs: []string{"--stop-after", "30s", "--sleep-after-stop-signal", "1s", "--use-sigusr1"}, + testAppStopSignal: syscall.SIGUSR1, + simulateShutdown: true, + simulateShutdownWaitDuration: 3 * time.Second, + expectedTestDuration: 15 * time.Second, + expectedError: nil, + }, + + "test_app_ignores_stop_signal": { + skip: true, + skipReason: "This test currently fails with 'go test -race' (see hashicorp/consul-template/issues/1753).", + envTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }}, + testAppArgs: []string{"--stop-after", "60s", "--sleep-after-stop-signal", "60s"}, + testAppStopSignal: syscall.SIGTERM, + simulateShutdown: true, + simulateShutdownWaitDuration: 32 * time.Second, // the test app should be stopped immediately after 30s + expectedTestDuration: 45 * time.Second, + expectedError: nil, + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + if testCase.skip { + t.Skip(testCase.skipReason) + } + + t.Logf("test case %s: begin", name) + defer t.Logf("test case %s: end", name) + + fakeVault := fakeVaultServer(t) + defer fakeVault.Close() + + ctx, cancelContextFunc := context.WithTimeout(context.Background(), testCase.expectedTestDuration) + defer cancelContextFunc() + + port := findOpenPort(t) + + testAppCommand := []string{ + testAppBinary, + "--port", + strconv.Itoa(port), + } + + execServer, err := NewServer(&ServerConfig{ + Logger: logging.NewVaultLogger(hclog.Trace), + AgentConfig: &config.Config{ + Vault: &config.Vault{ + Address: fakeVault.URL, + Retry: &config.Retry{ + NumRetries: 3, + }, + }, + Exec: &config.ExecConfig{ + RestartOnSecretChanges: "always", + Command: append(testAppCommand, testCase.testAppArgs...), + RestartStopSignal: testCase.testAppStopSignal, + }, + EnvTemplates: testCase.envTemplates, + TemplateConfig: &config.TemplateConfig{ + ExitOnRetryFailure: true, + StaticSecretRenderInt: testCase.staticSecretRenderInterval, + }, + }, + LogLevel: hclog.Trace, + LogWriter: hclog.DefaultOutput, + }) + if err != nil { + t.Fatalf("could not create exec server: %q", err) + } + + // start the exec server + var ( + execServerErrCh = make(chan error) + execServerTokenCh = make(chan string, 1) + ) + go func() { + execServerErrCh <- execServer.Run(ctx, execServerTokenCh) + }() + + // send a dummy token to kick off the server + execServerTokenCh <- "my-token" + + // ensure the test app is running after 3 seconds + var ( + testAppAddr = fmt.Sprintf("http://localhost:%d", port) + testAppStartedCh = make(chan error) + ) + if testCase.expectedError == nil { + time.AfterFunc(500*time.Millisecond, func() { + _, err := retryablehttp.Head(testAppAddr) + testAppStartedCh <- err + }) + } + + select { + case <-ctx.Done(): + t.Fatal("timeout reached before templates were rendered") + + case err := <-execServerErrCh: + if testCase.expectedError == nil && err != nil { + t.Fatalf("exec server did not expect an error, got: %v", err) + } + + if errors.Is(err, testCase.expectedError) { + t.Fatalf("exec server expected error %v; got %v", testCase.expectedError, err) + } + + t.Log("exec server exited without an error") + + return + + case err := <-testAppStartedCh: + if testCase.expectedError == nil && err != nil { + t.Fatalf("test app could not be started") + } + + t.Log("test app started successfully") + } + + // expect the test app to restart after staticSecretRenderInterval + debounce timer due to a password change + if testCase.staticSecretRenderInterval != 0 { + t.Logf("sleeping for %v to wait for application restart", testCase.staticSecretRenderInterval+5*time.Second) + time.Sleep(testCase.staticSecretRenderInterval + 5*time.Second) + } + + // simulate a shutdown of agent, which, in turn stops the test app + if testCase.simulateShutdown { + cancelContextFunc() + + time.Sleep(testCase.simulateShutdownWaitDuration) + + // check if the test app is still alive + if _, err := http.Head(testAppAddr); err == nil { + t.Fatalf("the test app is still alive %v after a simulated shutdown!", testCase.simulateShutdownWaitDuration) + } + + return + } + + // verify the environment variables + t.Logf("verifying test-app's environment variables") + + resp, err := retryablehttp.Get(testAppAddr) + if err != nil { + t.Fatalf("error making request to the test app: %s", err) + } + defer resp.Body.Close() + + decoder := json.NewDecoder(resp.Body) + var response struct { + EnvironmentVariables map[string]string `json:"environment_variables"` + ProcessID int `json:"process_id"` + } + if err := decoder.Decode(&response); err != nil { + t.Fatalf("unable to parse response from test app: %s", err) + } + + for key, expectedValue := range testCase.expected { + actualValue, ok := response.EnvironmentVariables[key] + if !ok { + t.Fatalf("expected the test app to return %q environment variable", key) + } + if expectedValue != actualValue { + t.Fatalf("expected environment variable %s to have a value of %q but it has a value of %q", key, expectedValue, actualValue) + } + } + }) + } +} + +func TestExecServer_LogFiles(t *testing.T) { + goBinary, err := exec.LookPath("go") + if err != nil { + t.Fatalf("could not find go binary on path: %s", err) + } + + testAppBinary := filepath.Join(os.TempDir(), "test-app") + + if err := exec.Command(goBinary, "build", "-o", testAppBinary, "./test-app").Run(); err != nil { + t.Fatalf("could not build the test application: %s", err) + } + t.Cleanup(func() { + if err := os.Remove(testAppBinary); err != nil { + t.Fatalf("could not remove %q test application: %s", testAppBinary, err) + } + }) + + testCases := map[string]struct { + testAppArgs []string + stderrFile string + stdoutFile string + + expectedError error + }{ + "can_log_stderr_to_file": { + stderrFile: "vault-exec-test.stderr.log", + }, + "can_log_stdout_to_file": { + stdoutFile: "vault-exec-test.stdout.log", + testAppArgs: []string{"-log-to-stdout"}, + }, + "cant_open_file": { + stderrFile: "/file/does/not/exist", + expectedError: os.ErrNotExist, + }, + } + + for tcName, testCase := range testCases { + t.Run(tcName, func(t *testing.T) { + fakeVault := fakeVaultServer(t) + defer fakeVault.Close() + + port := findOpenPort(t) + testAppCommand := []string{ + testAppBinary, + "--port", + strconv.Itoa(port), + "--stop-after", + "60s", + } + + execConfig := &config.ExecConfig{ + RestartOnSecretChanges: "always", + Command: append(testAppCommand, testCase.testAppArgs...), + } + + if testCase.stdoutFile != "" { + execConfig.ChildProcessStdout = filepath.Join(os.TempDir(), "vault-agent-exec.stdout.log") + t.Cleanup(func() { + _ = os.Remove(execConfig.ChildProcessStdout) + }) + } + + if testCase.stderrFile != "" { + execConfig.ChildProcessStderr = filepath.Join(os.TempDir(), "vault-agent-exec.stderr.log") + t.Cleanup(func() { + _ = os.Remove(execConfig.ChildProcessStderr) + }) + } + + execServer, err := NewServer(&ServerConfig{ + Logger: logging.NewVaultLogger(hclog.Trace), + AgentConfig: &config.Config{ + Vault: &config.Vault{ + Address: fakeVault.URL, + Retry: &config.Retry{ + NumRetries: 3, + }, + }, + Exec: execConfig, + EnvTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }}, + TemplateConfig: &config.TemplateConfig{ + ExitOnRetryFailure: true, + StaticSecretRenderInt: 5 * time.Second, + }, + }, + LogLevel: hclog.Trace, + LogWriter: hclog.DefaultOutput, + }) + if err != nil { + if testCase.expectedError != nil { + if errors.Is(err, testCase.expectedError) { + t.Log("test passes! caught expected err") + return + } else { + t.Fatalf("caught error %q did not match expected error %q", err, testCase.expectedError) + } + } + t.Fatalf("could not create exec server: %q", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // start the exec server + var ( + execServerErrCh = make(chan error) + execServerTokenCh = make(chan string, 1) + ) + go func() { + execServerErrCh <- execServer.Run(ctx, execServerTokenCh) + }() + + // send a dummy token to kick off the server + execServerTokenCh <- "my-token" + + // ensure the test app is running after 500ms + var ( + testAppAddr = fmt.Sprintf("http://localhost:%d", port) + testAppStartedCh = make(chan error) + ) + time.AfterFunc(500*time.Millisecond, func() { + _, err := retryablehttp.Head(testAppAddr) + testAppStartedCh <- err + }) + + select { + case <-ctx.Done(): + t.Fatal("timeout reached before templates were rendered") + + case err := <-execServerErrCh: + if testCase.expectedError == nil && err != nil { + t.Fatalf("exec server did not expect an error, got: %v", err) + } + + if errors.Is(err, testCase.expectedError) { + t.Fatalf("exec server expected error %v; got %v", testCase.expectedError, err) + } + + t.Log("exec server exited without an error") + + return + + case <-testAppStartedCh: + t.Log("test app started successfully") + } + + // let the app run a bit + time.Sleep(5 * time.Second) + // stop the app + cancel() + // wait for app to stop + time.Sleep(5 * time.Second) + + // check if the files have content + if testCase.stdoutFile != "" { + stdoutInfo, err := os.Stat(execConfig.ChildProcessStdout) + if err != nil { + t.Fatalf("error calling stat on stdout file: %q", err) + } + if stdoutInfo.Size() == 0 { + t.Fatalf("stdout log file does not have any data!") + } + } + + if testCase.stderrFile != "" { + stderrInfo, err := os.Stat(execConfig.ChildProcessStderr) + if err != nil { + t.Fatalf("error calling stat on stderr file: %q", err) + } + if stderrInfo.Size() == 0 { + t.Fatalf("stderr log file does not have any data!") + } + } + }) + } +} + +// findOpenPort generates a random open port, using Go's :0 to find a port, +// for us to then use as the test binary's port to use. +// This is a little race-y, as something else could open the port before +// we use it, but we're not the process that needs to open the port, +// and we still need to be able to access it. +// We should be fine so long as we don't make the tests parallel. +func findOpenPort(t *testing.T) int { + t.Helper() + ln, err := net.Listen("tcp", ":0") + if err != nil { + t.Fatal(err) + } + port := ln.Addr().(*net.TCPAddr).Port + err = ln.Close() + if err != nil { + t.Fatal(err) + } + return port +} diff --git a/command/agent/exec/test-app/main.go b/command/agent/exec/test-app/main.go new file mode 100644 index 000000000000..db8845aa6055 --- /dev/null +++ b/command/agent/exec/test-app/main.go @@ -0,0 +1,159 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package main + +// This is a test application that is used by TestExecServer_Run to verify +// the behavior of vault agent running as a process supervisor. +// +// The app will automatically exit after 1 minute or the --stop-after interval, +// whichever comes first. It also can serve its loaded environment variables on +// the given --port. This app will also return the given --exit-code and +// terminate on SIGTERM unless --use-sigusr1 is specified. + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "flag" + "fmt" + "log" + "net/http" + "os" + "os/signal" + "strings" + "syscall" + "time" +) + +var ( + port uint + sleepAfterStopSignal time.Duration + useSigusr1StopSignal bool + stopAfter time.Duration + exitCode int + logToStdout bool +) + +func init() { + flag.UintVar(&port, "port", 34000, "port to run the test app on") + flag.DurationVar(&sleepAfterStopSignal, "sleep-after-stop-signal", 1*time.Second, "time to sleep after getting the signal before exiting") + flag.BoolVar(&useSigusr1StopSignal, "use-sigusr1", false, "use SIGUSR1 as the stop signal, instead of the default SIGTERM") + flag.DurationVar(&stopAfter, "stop-after", 0, "stop the process after duration (overrides all other flags if set)") + flag.IntVar(&exitCode, "exit-code", 0, "exit code to return when this script exits") + flag.BoolVar(&logToStdout, "log-to-stdout", false, "log to stdout instead of stderr") +} + +type Response struct { + EnvironmentVariables map[string]string `json:"environment_variables"` + ProcessID int `json:"process_id"` +} + +func newResponse() Response { + respEnv := make(map[string]string, len(os.Environ())) + for _, envVar := range os.Environ() { + tokens := strings.Split(envVar, "=") + respEnv[tokens[0]] = tokens[1] + } + + return Response{ + EnvironmentVariables: respEnv, + ProcessID: os.Getpid(), + } +} + +func handler(w http.ResponseWriter, r *http.Request) { + var buf bytes.Buffer + encoder := json.NewEncoder(&buf) + if r.URL.Query().Get("pretty") == "1" { + encoder.SetIndent("", " ") + } + if err := encoder.Encode(newResponse()); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, _ = w.Write(buf.Bytes()) +} + +func main() { + flag.Parse() + + logOut := os.Stderr + if logToStdout { + logOut = os.Stdout + } + logger := log.New(logOut, "test-app: ", log.LstdFlags) + + logger.Printf("running on port %d", port) + if err := run(logger); err != nil { + log.Fatalf("error: %v\n", err) + } + + logger.Printf("exit code: %d\n", exitCode) + + os.Exit(exitCode) +} + +func run(logger *log.Logger) error { + /* */ logger.Println("run: started") + defer logger.Println("run: done") + + ctx, cancelContextFunc := context.WithTimeout(context.Background(), 60*time.Second) + defer cancelContextFunc() + + server := http.Server{ + Addr: fmt.Sprintf(":%d", port), + Handler: http.HandlerFunc(handler), + ReadTimeout: 20 * time.Second, + WriteTimeout: 20 * time.Second, + IdleTimeout: 20 * time.Second, + } + + doneCh := make(chan struct{}) + + go func() { + defer close(doneCh) + + stopSignal := make(chan os.Signal, 1) + if useSigusr1StopSignal { + signal.Notify(stopSignal, syscall.SIGUSR1) + } else { + signal.Notify(stopSignal, syscall.SIGTERM) + } + + select { + case <-ctx.Done(): + logger.Println("context done: exiting") + + case s := <-stopSignal: + logger.Printf("signal %q: received\n", s) + + if sleepAfterStopSignal > 0 { + logger.Printf("signal %q: sleeping for %v simulate cleanup\n", s, sleepAfterStopSignal) + time.Sleep(sleepAfterStopSignal) + } + + case <-time.After(stopAfter): + logger.Printf("stopping after: %v\n", stopAfter) + } + + if err := server.Shutdown(context.Background()); err != nil { + log.Printf("server shutdown error: %v", err) + } + }() + + logger.Printf("server %s: started\n", server.Addr) + + if err := server.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) { + return fmt.Errorf("could not start the server: %v", err) + } + + logger.Printf("server %s: done\n", server.Addr) + + <-doneCh + + return nil +} diff --git a/command/agent/internal/ctmanager/runner_config.go b/command/agent/internal/ctmanager/runner_config.go new file mode 100644 index 000000000000..03b134dbcbd3 --- /dev/null +++ b/command/agent/internal/ctmanager/runner_config.go @@ -0,0 +1,159 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package ctmanager + +import ( + "fmt" + "io" + "strings" + + ctconfig "github.com/hashicorp/consul-template/config" + ctlogging "github.com/hashicorp/consul-template/logging" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/sdk/helper/pointerutil" +) + +type ManagerConfig struct { + AgentConfig *config.Config + Namespace string + LogLevel hclog.Level + LogWriter io.Writer +} + +// NewConfig returns a consul-template runner configuration, setting the +// Vault and Consul configurations based on the clients configs. +func NewConfig(mc ManagerConfig, templates ctconfig.TemplateConfigs) (*ctconfig.Config, error) { + conf := ctconfig.DefaultConfig() + conf.Templates = templates.Copy() + + // Setup the Vault config + // Always set these to ensure nothing is picked up from the environment + conf.Vault.RenewToken = pointerutil.BoolPtr(false) + conf.Vault.Token = pointerutil.StringPtr("") + conf.Vault.Address = &mc.AgentConfig.Vault.Address + + if mc.Namespace != "" { + conf.Vault.Namespace = &mc.Namespace + } + + if mc.AgentConfig.TemplateConfig != nil { + conf.Vault.LeaseRenewalThreshold = mc.AgentConfig.TemplateConfig.LeaseRenewalThreshold + + if mc.AgentConfig.TemplateConfig.StaticSecretRenderInt != 0 { + conf.Vault.DefaultLeaseDuration = &mc.AgentConfig.TemplateConfig.StaticSecretRenderInt + } + } + + if mc.AgentConfig.DisableIdleConnsTemplating { + idleConns := -1 + conf.Vault.Transport.MaxIdleConns = &idleConns + } + + if mc.AgentConfig.DisableKeepAlivesTemplating { + conf.Vault.Transport.DisableKeepAlives = pointerutil.BoolPtr(true) + } + + if mc.AgentConfig.TemplateConfig != nil && mc.AgentConfig.TemplateConfig.MaxConnectionsPerHost != 0 { + conf.Vault.Transport.MaxConnsPerHost = &mc.AgentConfig.TemplateConfig.MaxConnectionsPerHost + } + + conf.Vault.SSL = &ctconfig.SSLConfig{ + Enabled: pointerutil.BoolPtr(false), + Verify: pointerutil.BoolPtr(false), + Cert: pointerutil.StringPtr(""), + Key: pointerutil.StringPtr(""), + CaCert: pointerutil.StringPtr(""), + CaPath: pointerutil.StringPtr(""), + ServerName: pointerutil.StringPtr(""), + } + + // If Vault.Retry isn't specified, use the default of 12 retries. + // This retry value will be respected regardless of if we use the cache. + attempts := ctconfig.DefaultRetryAttempts + if mc.AgentConfig.Vault != nil && mc.AgentConfig.Vault.Retry != nil { + attempts = mc.AgentConfig.Vault.Retry.NumRetries + } + + // Use the cache if available or fallback to the Vault server values. + if mc.AgentConfig.Cache != nil { + if mc.AgentConfig.Cache.InProcDialer == nil { + return nil, fmt.Errorf("missing in-process dialer configuration") + } + if conf.Vault.Transport == nil { + conf.Vault.Transport = &ctconfig.TransportConfig{} + } + conf.Vault.Transport.CustomDialer = mc.AgentConfig.Cache.InProcDialer + // The in-process dialer ignores the address passed in, but we're still + // setting it here to override the setting at the top of this function, + // and to prevent the vault/http client from defaulting to https. + conf.Vault.Address = pointerutil.StringPtr("http://127.0.0.1:8200") + } else if strings.HasPrefix(mc.AgentConfig.Vault.Address, "https") || mc.AgentConfig.Vault.CACert != "" { + skipVerify := mc.AgentConfig.Vault.TLSSkipVerify + verify := !skipVerify + conf.Vault.SSL = &ctconfig.SSLConfig{ + Enabled: pointerutil.BoolPtr(true), + Verify: &verify, + Cert: &mc.AgentConfig.Vault.ClientCert, + Key: &mc.AgentConfig.Vault.ClientKey, + CaCert: &mc.AgentConfig.Vault.CACert, + CaPath: &mc.AgentConfig.Vault.CAPath, + ServerName: &mc.AgentConfig.Vault.TLSServerName, + } + } + enabled := attempts > 0 + conf.Vault.Retry = &ctconfig.RetryConfig{ + Attempts: &attempts, + Enabled: &enabled, + } + + // Sync Consul Template's retry with user set auto-auth initial backoff value. + // This is helpful if Auto Auth cannot get a new token and CT is trying to fetch + // secrets. + if mc.AgentConfig.AutoAuth != nil && mc.AgentConfig.AutoAuth.Method != nil { + if mc.AgentConfig.AutoAuth.Method.MinBackoff > 0 { + conf.Vault.Retry.Backoff = &mc.AgentConfig.AutoAuth.Method.MinBackoff + } + + if mc.AgentConfig.AutoAuth.Method.MaxBackoff > 0 { + conf.Vault.Retry.MaxBackoff = &mc.AgentConfig.AutoAuth.Method.MaxBackoff + } + } + + conf.Finalize() + + // setup log level from TemplateServer config + conf.LogLevel = logLevelToStringPtr(mc.LogLevel) + + if err := ctlogging.Setup(&ctlogging.Config{ + Level: *conf.LogLevel, + Writer: mc.LogWriter, + }); err != nil { + return nil, err + } + return conf, nil +} + +// logLevelToString converts a go-hclog level to a matching, uppercase string +// value. It's used to convert Vault Agent's hclog level to a string version +// suitable for use in Consul Template's runner configuration input. +func logLevelToStringPtr(level hclog.Level) *string { + // consul template's default level is WARN, but Vault Agent's default is INFO, + // so we use that for the Runner's default. + var levelStr string + + switch level { + case hclog.Trace: + levelStr = "TRACE" + case hclog.Debug: + levelStr = "DEBUG" + case hclog.Warn: + levelStr = "WARN" + case hclog.Error: + levelStr = "ERR" + default: + levelStr = "INFO" + } + return pointerutil.StringPtr(levelStr) +} diff --git a/command/agent/jwt_end_to_end_test.go b/command/agent/jwt_end_to_end_test.go index 4136ac1eb64f..3d1f962cdaaa 100644 --- a/command/agent/jwt_end_to_end_test.go +++ b/command/agent/jwt_end_to_end_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package agent @@ -11,13 +11,13 @@ import ( "testing" "time" - hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-hclog" vaultjwt "github.com/hashicorp/vault-plugin-auth-jwt" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" - agentjwt "github.com/hashicorp/vault/command/agent/auth/jwt" - "github.com/hashicorp/vault/command/agent/sink" - "github.com/hashicorp/vault/command/agent/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentjwt "github.com/hashicorp/vault/command/agentproxyshared/auth/jwt" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" "github.com/hashicorp/vault/helper/dhutil" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/jsonutil" @@ -55,7 +55,6 @@ func TestJWTEndToEnd(t *testing.T) { func testJWTEndToEnd(t *testing.T, ahWrapping, useSymlink, removeJWTAfterReading bool) { logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ - Logger: logger, CredentialBackends: map[string]logical.Factory{ "jwt": vaultjwt.Factory, }, @@ -224,7 +223,7 @@ func testJWTEndToEnd(t *testing.T, ahWrapping, useSymlink, removeJWTAfterReading Client: client, }) go func() { - errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}, ah.AuthInProgress) }() defer func() { select { diff --git a/command/agent/oci_end_to_end_test.go b/command/agent/oci_end_to_end_test.go index 878239d431e5..de9e86fb22ce 100644 --- a/command/agent/oci_end_to_end_test.go +++ b/command/agent/oci_end_to_end_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package agent @@ -10,13 +10,13 @@ import ( "testing" "time" - hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-hclog" vaultoci "github.com/hashicorp/vault-plugin-auth-oci" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" - agentoci "github.com/hashicorp/vault/command/agent/auth/oci" - "github.com/hashicorp/vault/command/agent/sink" - "github.com/hashicorp/vault/command/agent/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentoci "github.com/hashicorp/vault/command/agentproxyshared/auth/oci" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" "github.com/hashicorp/vault/helper/testhelpers" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/logging" @@ -59,7 +59,6 @@ func TestOCIEndToEnd(t *testing.T) { logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ - Logger: logger, CredentialBackends: map[string]logical.Factory{ "oci": vaultoci.Factory, }, @@ -166,7 +165,7 @@ func TestOCIEndToEnd(t *testing.T) { Client: client, }) go func() { - errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}, ah.AuthInProgress) }() defer func() { select { diff --git a/command/agent/sink/file/file_sink_test.go b/command/agent/sink/file/file_sink_test.go deleted file mode 100644 index 1b9f3bd05585..000000000000 --- a/command/agent/sink/file/file_sink_test.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package file - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - - hclog "github.com/hashicorp/go-hclog" - uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/command/agent/sink" - "github.com/hashicorp/vault/sdk/helper/logging" -) - -const ( - fileServerTestDir = "vault-agent-file-test" -) - -func testFileSink(t *testing.T, log hclog.Logger) (*sink.SinkConfig, string) { - tmpDir, err := ioutil.TempDir("", fmt.Sprintf("%s.", fileServerTestDir)) - if err != nil { - t.Fatal(err) - } - - path := filepath.Join(tmpDir, "token") - - config := &sink.SinkConfig{ - Logger: log.Named("sink.file"), - Config: map[string]interface{}{ - "path": path, - }, - } - - s, err := NewFileSink(config) - if err != nil { - t.Fatal(err) - } - config.Sink = s - - return config, tmpDir -} - -func TestFileSink(t *testing.T) { - log := logging.NewVaultLogger(hclog.Trace) - - fs, tmpDir := testFileSink(t, log) - defer os.RemoveAll(tmpDir) - - path := filepath.Join(tmpDir, "token") - - uuidStr, _ := uuid.GenerateUUID() - if err := fs.WriteToken(uuidStr); err != nil { - t.Fatal(err) - } - - file, err := os.Open(path) - if err != nil { - t.Fatal(err) - } - - fi, err := file.Stat() - if err != nil { - t.Fatal(err) - } - if fi.Mode() != os.FileMode(0o640) { - t.Fatalf("wrong file mode was detected at %s", path) - } - err = file.Close() - if err != nil { - t.Fatal(err) - } - - fileBytes, err := ioutil.ReadFile(path) - if err != nil { - t.Fatal(err) - } - - if string(fileBytes) != uuidStr { - t.Fatalf("expected %s, got %s", uuidStr, string(fileBytes)) - } -} - -func testFileSinkMode(t *testing.T, log hclog.Logger) (*sink.SinkConfig, string) { - tmpDir, err := ioutil.TempDir("", fmt.Sprintf("%s.", fileServerTestDir)) - if err != nil { - t.Fatal(err) - } - - path := filepath.Join(tmpDir, "token") - - config := &sink.SinkConfig{ - Logger: log.Named("sink.file"), - Config: map[string]interface{}{ - "path": path, - "mode": 0o644, - }, - } - - s, err := NewFileSink(config) - if err != nil { - t.Fatal(err) - } - config.Sink = s - - return config, tmpDir -} - -func TestFileSinkMode(t *testing.T) { - log := logging.NewVaultLogger(hclog.Trace) - - fs, tmpDir := testFileSinkMode(t, log) - defer os.RemoveAll(tmpDir) - - path := filepath.Join(tmpDir, "token") - - uuidStr, _ := uuid.GenerateUUID() - if err := fs.WriteToken(uuidStr); err != nil { - t.Fatal(err) - } - - file, err := os.Open(path) - if err != nil { - t.Fatal(err) - } - defer file.Close() - - fi, err := file.Stat() - if err != nil { - t.Fatal(err) - } - if fi.Mode() != os.FileMode(0o644) { - t.Fatalf("wrong file mode was detected at %s", path) - } - - fileBytes, err := ioutil.ReadFile(path) - if err != nil { - t.Fatal(err) - } - - if string(fileBytes) != uuidStr { - t.Fatalf("expected %s, got %s", uuidStr, string(fileBytes)) - } -} diff --git a/command/agent/template/template.go b/command/agent/template/template.go index 6de083d0a86f..f0619694a597 100644 --- a/command/agent/template/template.go +++ b/command/agent/template/template.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 // Package template is responsible for rendering user supplied templates to // disk. The Server type accepts configuration to communicate to a Vault server @@ -13,16 +13,23 @@ import ( "errors" "fmt" "io" + "math" "strings" - - "go.uber.org/atomic" + sync "sync/atomic" + "time" ctconfig "github.com/hashicorp/consul-template/config" - ctlogging "github.com/hashicorp/consul-template/logging" "github.com/hashicorp/consul-template/manager" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/command/agent/internal/ctmanager" + "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/sdk/helper/backoff" + "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/pointerutil" + "github.com/hashicorp/vault/sdk/logical" + "go.uber.org/atomic" ) // ServerConfig is a config struct for setting up the basic parts of the @@ -87,7 +94,7 @@ func NewServer(conf *ServerConfig) *Server { // Run kicks off the internal Consul Template runner, and listens for changes to // the token from the AuthHandler. If Done() is called on the context, shut down // the Runner and return -func (ts *Server) Run(ctx context.Context, incoming chan string, templates []*ctconfig.TemplateConfig) error { +func (ts *Server) Run(ctx context.Context, incoming chan string, templates []*ctconfig.TemplateConfig, tokenRenewalInProgress *sync.Bool, invalidTokenCh chan error) error { if incoming == nil { return errors.New("template server: incoming channel is nil") } @@ -110,8 +117,14 @@ func (ts *Server) Run(ctx context.Context, incoming chan string, templates []*ct // configuration var runnerConfig *ctconfig.Config var runnerConfigErr error - - if runnerConfig, runnerConfigErr = newRunnerConfig(ts.config, templates); runnerConfigErr != nil { + managerConfig := ctmanager.ManagerConfig{ + AgentConfig: ts.config.AgentConfig, + Namespace: ts.config.Namespace, + LogLevel: ts.config.LogLevel, + LogWriter: ts.config.LogWriter, + } + runnerConfig, runnerConfigErr = ctmanager.NewConfig(managerConfig, templates) + if runnerConfigErr != nil { return fmt.Errorf("template server failed to runner generate config: %w", runnerConfigErr) } @@ -138,12 +151,15 @@ func (ts *Server) Run(ctx context.Context, incoming chan string, templates []*ct } ts.lookupMap = lookupMap + // Create backoff object to calculate backoff time before restarting a failed + // consul template server + restartBackoff := backoff.NewBackoff(math.MaxInt, consts.DefaultMinBackoff, consts.DefaultMaxBackoff) + for { select { case <-ctx.Done(): ts.runner.Stop() return nil - case token := <-incoming: if token != *latestToken { ts.logger.Info("template server received new token") @@ -160,7 +176,8 @@ func (ts *Server) Run(ctx context.Context, incoming chan string, templates []*ct *latestToken = token ctv := ctconfig.Config{ Vault: &ctconfig.VaultConfig{ - Token: latestToken, + Token: latestToken, + ClientUserAgent: pointerutil.StringPtr(useragent.AgentTemplatingString()), }, } @@ -185,6 +202,17 @@ func (ts *Server) Run(ctx context.Context, incoming chan string, templates []*ct return fmt.Errorf("template server: %w", err) } + // Calculate the amount of time to backoff using exponential backoff + sleep, err := restartBackoff.Next() + if err != nil { + ts.logger.Error("template server: reached maximum number of restart attempts") + restartBackoff.Reset() + } + + // Sleep for the calculated backoff time then attempt to create a new runner + ts.logger.Warn(fmt.Sprintf("template server restart: retry attempt after %s", sleep)) + time.Sleep(sleep) + ts.runner, err = manager.NewRunner(runnerConfig, false) if err != nil { return fmt.Errorf("template server failed to create: %w", err) @@ -218,6 +246,24 @@ func (ts *Server) Run(ctx context.Context, incoming chan string, templates []*ct ts.runner.Stop() return nil } + case err := <-ts.runner.ServerErrCh: + var responseError *api.ResponseError + ok := errors.As(err, &responseError) + if !ok { + ts.logger.Error("template server: could not extract error response") + continue + } + if responseError.StatusCode == 403 && strings.Contains(responseError.Error(), logical.ErrInvalidToken.Error()) && !tokenRenewalInProgress.Load() { + ts.logger.Info("template server: received invalid token error") + + // Drain the error channel and incoming channel before sending a new error + select { + case <-invalidTokenCh: + case <-incoming: + default: + } + invalidTokenCh <- err + } } } } @@ -227,131 +273,3 @@ func (ts *Server) Stop() { close(ts.DoneCh) } } - -// newRunnerConfig returns a consul-template runner configuration, setting the -// Vault and Consul configurations based on the clients configs. -func newRunnerConfig(sc *ServerConfig, templates ctconfig.TemplateConfigs) (*ctconfig.Config, error) { - conf := ctconfig.DefaultConfig() - conf.Templates = templates.Copy() - - // Setup the Vault config - // Always set these to ensure nothing is picked up from the environment - conf.Vault.RenewToken = pointerutil.BoolPtr(false) - conf.Vault.Token = pointerutil.StringPtr("") - conf.Vault.Address = &sc.AgentConfig.Vault.Address - - if sc.Namespace != "" { - conf.Vault.Namespace = &sc.Namespace - } - - if sc.AgentConfig.TemplateConfig != nil && sc.AgentConfig.TemplateConfig.StaticSecretRenderInt != 0 { - conf.Vault.DefaultLeaseDuration = &sc.AgentConfig.TemplateConfig.StaticSecretRenderInt - } - - if sc.AgentConfig.DisableIdleConnsTemplating { - idleConns := -1 - conf.Vault.Transport.MaxIdleConns = &idleConns - } - - if sc.AgentConfig.DisableKeepAlivesTemplating { - conf.Vault.Transport.DisableKeepAlives = pointerutil.BoolPtr(true) - } - - conf.Vault.SSL = &ctconfig.SSLConfig{ - Enabled: pointerutil.BoolPtr(false), - Verify: pointerutil.BoolPtr(false), - Cert: pointerutil.StringPtr(""), - Key: pointerutil.StringPtr(""), - CaCert: pointerutil.StringPtr(""), - CaPath: pointerutil.StringPtr(""), - ServerName: pointerutil.StringPtr(""), - } - - // If Vault.Retry isn't specified, use the default of 12 retries. - // This retry value will be respected regardless of if we use the cache. - attempts := ctconfig.DefaultRetryAttempts - if sc.AgentConfig.Vault != nil && sc.AgentConfig.Vault.Retry != nil { - attempts = sc.AgentConfig.Vault.Retry.NumRetries - } - - // Use the cache if available or fallback to the Vault server values. - if sc.AgentConfig.Cache != nil { - if sc.AgentConfig.Cache.InProcDialer == nil { - return nil, fmt.Errorf("missing in-process dialer configuration") - } - if conf.Vault.Transport == nil { - conf.Vault.Transport = &ctconfig.TransportConfig{} - } - conf.Vault.Transport.CustomDialer = sc.AgentConfig.Cache.InProcDialer - // The in-process dialer ignores the address passed in, but we're still - // setting it here to override the setting at the top of this function, - // and to prevent the vault/http client from defaulting to https. - conf.Vault.Address = pointerutil.StringPtr("http://127.0.0.1:8200") - } else if strings.HasPrefix(sc.AgentConfig.Vault.Address, "https") || sc.AgentConfig.Vault.CACert != "" { - skipVerify := sc.AgentConfig.Vault.TLSSkipVerify - verify := !skipVerify - conf.Vault.SSL = &ctconfig.SSLConfig{ - Enabled: pointerutil.BoolPtr(true), - Verify: &verify, - Cert: &sc.AgentConfig.Vault.ClientCert, - Key: &sc.AgentConfig.Vault.ClientKey, - CaCert: &sc.AgentConfig.Vault.CACert, - CaPath: &sc.AgentConfig.Vault.CAPath, - ServerName: &sc.AgentConfig.Vault.TLSServerName, - } - } - enabled := attempts > 0 - conf.Vault.Retry = &ctconfig.RetryConfig{ - Attempts: &attempts, - Enabled: &enabled, - } - - // Sync Consul Template's retry with user set auto-auth initial backoff value. - // This is helpful if Auto Auth cannot get a new token and CT is trying to fetch - // secrets. - if sc.AgentConfig.AutoAuth != nil && sc.AgentConfig.AutoAuth.Method != nil { - if sc.AgentConfig.AutoAuth.Method.MinBackoff > 0 { - conf.Vault.Retry.Backoff = &sc.AgentConfig.AutoAuth.Method.MinBackoff - } - - if sc.AgentConfig.AutoAuth.Method.MaxBackoff > 0 { - conf.Vault.Retry.MaxBackoff = &sc.AgentConfig.AutoAuth.Method.MaxBackoff - } - } - - conf.Finalize() - - // setup log level from TemplateServer config - conf.LogLevel = logLevelToStringPtr(sc.LogLevel) - - if err := ctlogging.Setup(&ctlogging.Config{ - Level: *conf.LogLevel, - Writer: sc.LogWriter, - }); err != nil { - return nil, err - } - return conf, nil -} - -// logLevelToString converts a go-hclog level to a matching, uppercase string -// value. It's used to convert Vault Agent's hclog level to a string version -// suitable for use in Consul Template's runner configuration input. -func logLevelToStringPtr(level hclog.Level) *string { - // consul template's default level is WARN, but Vault Agent's default is INFO, - // so we use that for the Runner's default. - var levelStr string - - switch level { - case hclog.Trace: - levelStr = "TRACE" - case hclog.Debug: - levelStr = "DEBUG" - case hclog.Warn: - levelStr = "WARN" - case hclog.Error: - levelStr = "ERR" - default: - levelStr = "INFO" - } - return pointerutil.StringPtr(levelStr) -} diff --git a/command/agent/template/template_test.go b/command/agent/template/template_test.go index 14c4b3bdc242..0f6648c32572 100644 --- a/command/agent/template/template_test.go +++ b/command/agent/template/template_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package template @@ -11,12 +11,15 @@ import ( "net/http/httptest" "os" "strings" + sync "sync/atomic" "testing" "time" ctconfig "github.com/hashicorp/consul-template/config" "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/command/agent/internal/ctmanager" + "github.com/hashicorp/vault/command/agentproxyshared" "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/internalshared/listenerutil" "github.com/hashicorp/vault/sdk/helper/logging" @@ -26,6 +29,14 @@ import ( "google.golang.org/grpc/test/bufconn" ) +func newRunnerConfig(s *ServerConfig, configs ctconfig.TemplateConfigs) (*ctconfig.Config, error) { + managerCfg := ctmanager.ManagerConfig{ + AgentConfig: s.AgentConfig, + } + cfg, err := ctmanager.NewConfig(managerCfg, configs) + return cfg, err +} + // TestNewServer is a simple test to make sure NewServer returns a Server and // channel func TestNewServer(t *testing.T) { @@ -78,7 +89,7 @@ func newAgentConfig(listeners []*configutil.Listener, enableCache, enablePersise } if enablePersisentCache { - agentConfig.Cache.Persist = &config.Persist{Type: "kubernetes"} + agentConfig.Cache.Persist = &agentproxyshared.PersistConfig{Type: "kubernetes"} } return agentConfig @@ -377,8 +388,9 @@ func TestServerRun(t *testing.T) { } errCh := make(chan error) + serverErrCh := make(chan error, 1) go func() { - errCh <- server.Run(ctx, templateTokenCh, templatesToRender) + errCh <- server.Run(ctx, templateTokenCh, templatesToRender, &sync.Bool{}, serverErrCh) }() // send a dummy value to trigger the internal Runner to query for secret @@ -482,8 +494,9 @@ func TestNewServerLogLevels(t *testing.T) { defer cancel() errCh := make(chan error) + serverErrCh := make(chan error, 1) go func() { - errCh <- server.Run(ctx, templateTokenCh, templatesToRender) + errCh <- server.Run(ctx, templateTokenCh, templatesToRender, &sync.Bool{}, serverErrCh) }() // send a dummy value to trigger auth so the server will exit diff --git a/command/agent/testing.go b/command/agent/testing.go index 1eaa0aa352eb..d8c405490f47 100644 --- a/command/agent/testing.go +++ b/command/agent/testing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package agent @@ -13,9 +13,9 @@ import ( "testing" "time" + "github.com/go-jose/go-jose/v3" + "github.com/go-jose/go-jose/v3/jwt" "github.com/hashicorp/vault/sdk/logical" - jose "gopkg.in/square/go-jose.v2" - "gopkg.in/square/go-jose.v2/jwt" ) const envVarRunAccTests = "VAULT_ACC" diff --git a/command/agent/token_file_end_to_end_test.go b/command/agent/token_file_end_to_end_test.go index f774fc09748e..22a2dcfd2c8f 100644 --- a/command/agent/token_file_end_to_end_test.go +++ b/command/agent/token_file_end_to_end_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package agent @@ -11,25 +11,18 @@ import ( "time" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agent/auth" - token_file "github.com/hashicorp/vault/command/agent/auth/token-file" - "github.com/hashicorp/vault/command/agent/sink" - "github.com/hashicorp/vault/command/agent/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + token_file "github.com/hashicorp/vault/command/agentproxyshared/auth/token-file" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/vault" ) func TestTokenFileEndToEnd(t *testing.T) { - var err error logger := logging.NewVaultLogger(log.Trace) - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: log.NewNullLogger(), - } - - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, }) @@ -118,7 +111,7 @@ func TestTokenFileEndToEnd(t *testing.T) { Client: client, }) go func() { - errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}, ah.AuthInProgress) }() defer func() { select { diff --git a/command/agent_generate_config.go b/command/agent_generate_config.go new file mode 100644 index 000000000000..cc394490961d --- /dev/null +++ b/command/agent_generate_config.go @@ -0,0 +1,444 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "fmt" + "io" + "os" + paths "path" + "sort" + "strings" + "unicode" + + "github.com/hashicorp/cli" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/hashicorp/vault/api" + "github.com/mitchellh/go-homedir" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*AgentGenerateConfigCommand)(nil) + _ cli.CommandAutocomplete = (*AgentGenerateConfigCommand)(nil) +) + +type AgentGenerateConfigCommand struct { + *BaseCommand + + flagType string + flagPaths []string + flagExec string +} + +func (c *AgentGenerateConfigCommand) Synopsis() string { + return "Generate a Vault Agent configuration file." +} + +func (c *AgentGenerateConfigCommand) Help() string { + helpText := ` +Usage: vault agent generate-config [options] [path/to/config.hcl] + + Generates a simple Vault Agent configuration file from the given parameters. + + Currently, the only supported configuration type is 'env-template', which + helps you generate a configuration file with environment variable templates + for running Vault Agent in process supervisor mode. + + For every specified secret -path, the command will attempt to generate one or + multiple 'env_template' entries based on the JSON key(s) stored in the + specified secret. If the secret -path ends with '/*', the command will + attempt to recurse through the secrets tree rooted at the given path, + generating 'env_template' entries for each encountered secret. Currently, + only kv-v1 and kv-v2 paths are supported. + + The command specified in the '-exec' option will be used to generate an + 'exec' entry, which will tell Vault Agent which child process to run. + + In addition to env_template entries, the command generates an 'auto_auth' + section with 'token_file' authentication method. While this method is very + convenient for local testing, it should NOT be used in production. Please + see https://developer.hashicorp.com/vault/docs/agent-and-proxy/autoauth/methods + for a list of production-ready auto_auth methods that you can use instead. + + By default, the file will be generated in the local directory as 'agent.hcl' + unless a path is specified as an argument. + + Generate a simple environment variable template configuration: + + $ vault agent generate-config -type="env-template" \ + -exec="./my-app arg1 arg2" \ + -path="secret/foo" + + Generate an environment variable template configuration for multiple secrets: + + $ vault agent generate-config -type="env-template" \ + -exec="./my-app arg1 arg2" \ + -path="secret/foo" \ + -path="secret/bar" \ + -path="secret/my-app/*" + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *AgentGenerateConfigCommand) Flags() *FlagSets { + // Include client-modifying flags (-address, -namespace, etc.) + set := c.flagSet(FlagSetHTTP) + + // Common Options + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagType, + Usage: "Type of configuration file to generate; currently, only 'env-template' is supported.", + Completion: complete.PredictSet( + "env-template", + ), + }) + + f.StringSliceVar(&StringSliceVar{ + Name: "path", + Target: &c.flagPaths, + Usage: "Path to a kv-v1 or kv-v2 secret (e.g. secret/data/foo, kv-v2/prefix/*); multiple secrets and tail '*' wildcards are allowed.", + Completion: c.PredictVaultFolders(), + }) + + f.StringVar(&StringVar{ + Name: "exec", + Target: &c.flagExec, + Default: "env", + Usage: "The command to execute in agent process supervisor mode.", + }) + + return set +} + +func (c *AgentGenerateConfigCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *AgentGenerateConfigCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *AgentGenerateConfigCommand) Run(args []string) int { + flags := c.Flags() + + if err := flags.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = flags.Args() + + if len(args) > 1 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected at most 1, got %d)", len(args))) + return 1 + } + + if c.flagType == "" { + c.UI.Error(`Please specify a -type flag; currently only -type="env-template" is supported.`) + return 1 + } + + if c.flagType != "env-template" { + c.UI.Error(fmt.Sprintf(`%q is not a supported configuration type; currently only -type="env-template" is supported.`, c.flagType)) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + config, err := generateConfiguration(context.Background(), client, c.flagExec, c.flagPaths) + if err != nil { + c.UI.Error(fmt.Sprintf("Error: %v", err)) + return 2 + } + + var configPath string + if len(args) == 1 { + configPath = args[0] + } else { + configPath = "agent.hcl" + } + + f, err := os.Create(configPath) + if err != nil { + c.UI.Error(fmt.Sprintf("Could not create configuration file %q: %v", configPath, err)) + return 3 + } + defer func() { + if err := f.Close(); err != nil { + c.UI.Error(fmt.Sprintf("Could not close configuration file %q: %v", configPath, err)) + } + }() + + if _, err := config.WriteTo(f); err != nil { + c.UI.Error(fmt.Sprintf("Could not write to configuration file %q: %v", configPath, err)) + return 3 + } + + c.UI.Info(fmt.Sprintf("Successfully generated %q configuration file!", configPath)) + + c.UI.Warn("Warning: the generated file uses 'token_file' authentication method, which is not suitable for production environments.") + + return 0 +} + +func generateConfiguration(ctx context.Context, client *api.Client, flagExec string, flagPaths []string) (io.WriterTo, error) { + var execCommand []string + if flagExec != "" { + execCommand = strings.Split(flagExec, " ") + } else { + execCommand = []string{"env"} + } + + tokenPath, err := homedir.Expand("~/.vault-token") + if err != nil { + return nil, fmt.Errorf("could not expand home directory: %w", err) + } + + templates, err := constructTemplates(ctx, client, flagPaths) + if err != nil { + return nil, fmt.Errorf("could not generate templates: %w", err) + } + + config := generatedConfig{ + AutoAuth: generatedConfigAutoAuth{ + Method: generatedConfigAutoAuthMethod{ + Type: "token_file", + Config: generatedConfigAutoAuthMethodConfig{ + TokenFilePath: tokenPath, + }, + }, + }, + TemplateConfig: generatedConfigTemplateConfig{ + StaticSecretRenderInterval: "5m", + ExitOnRetryFailure: true, + MaxConnectionsPerHost: 10, + }, + Vault: generatedConfigVault{ + Address: client.Address(), + }, + Exec: generatedConfigExec{ + Command: execCommand, + RestartOnSecretChanges: "always", + RestartStopSignal: "SIGTERM", + }, + EnvTemplates: templates, + } + + contents := hclwrite.NewEmptyFile() + + gohcl.EncodeIntoBody(&config, contents.Body()) + + return contents, nil +} + +func constructTemplates(ctx context.Context, client *api.Client, paths []string) ([]generatedConfigEnvTemplate, error) { + var templates []generatedConfigEnvTemplate + + for _, path := range paths { + path = sanitizePath(path) + + mountPath, v2, err := isKVv2(path, client) + if err != nil { + return nil, fmt.Errorf("could not validate secret path %q: %w", path, err) + } + + switch { + case strings.HasSuffix(path, "/*"): + // this path contains a tail wildcard, attempt to walk the tree + t, err := constructTemplatesFromTree(ctx, client, path[:len(path)-2], mountPath, v2) + if err != nil { + return nil, fmt.Errorf("could not traverse sercet at %q: %w", path, err) + } + templates = append(templates, t...) + + case strings.Contains(path, "*"): + // don't allow any other wildcards + return nil, fmt.Errorf("the path %q cannot contain '*' wildcard characters except as the last element of the path", path) + + default: + // regular secret path + t, err := constructTemplatesFromSecret(ctx, client, path, mountPath, v2) + if err != nil { + return nil, fmt.Errorf("could not read secret at %q: %v", path, err) + } + templates = append(templates, t...) + } + } + + return templates, nil +} + +func constructTemplatesFromTree(ctx context.Context, client *api.Client, path, mountPath string, v2 bool) ([]generatedConfigEnvTemplate, error) { + var templates []generatedConfigEnvTemplate + + if v2 { + metadataPath := strings.Replace( + path, + paths.Join(mountPath, "data"), + paths.Join(mountPath, "metadata"), + 1, + ) + if path != metadataPath { + path = metadataPath + } else { + path = addPrefixToKVPath(path, mountPath, "metadata", true) + } + } + + err := walkSecretsTree(ctx, client, path, func(child string, directory bool) error { + if directory { + return nil + } + + dataPath := strings.Replace( + child, + paths.Join(mountPath, "metadata"), + paths.Join(mountPath, "data"), + 1, + ) + + t, err := constructTemplatesFromSecret(ctx, client, dataPath, mountPath, v2) + if err != nil { + return err + } + templates = append(templates, t...) + + return nil + }) + if err != nil { + return nil, err + } + + return templates, nil +} + +func constructTemplatesFromSecret(ctx context.Context, client *api.Client, path, mountPath string, v2 bool) ([]generatedConfigEnvTemplate, error) { + var templates []generatedConfigEnvTemplate + + if v2 { + path = addPrefixToKVPath(path, mountPath, "data", true) + } + + resp, err := client.Logical().ReadWithContext(ctx, path) + if err != nil { + return nil, fmt.Errorf("error querying: %w", err) + } + if resp == nil { + return nil, fmt.Errorf("secret not found") + } + + var data map[string]interface{} + if v2 { + internal, ok := resp.Data["data"] + if !ok { + return nil, fmt.Errorf("secret.Data not found") + } + data = internal.(map[string]interface{}) + } else { + data = resp.Data + } + + fields := make([]string, 0, len(data)) + + for field := range data { + fields = append(fields, field) + } + + // sort for a deterministic output + sort.Strings(fields) + + var dataContents string + if v2 { + dataContents = ".Data.data" + } else { + dataContents = ".Data" + } + + for _, field := range fields { + templates = append(templates, generatedConfigEnvTemplate{ + Name: constructDefaultEnvironmentKey(path, field), + Contents: fmt.Sprintf(`{{ with secret "%s" }}{{ %s.%s }}{{ end }}`, path, dataContents, field), + ErrorOnMissingKey: true, + }) + } + + return templates, nil +} + +func constructDefaultEnvironmentKey(path string, field string) string { + pathParts := strings.Split(path, "/") + pathPartsLast := pathParts[len(pathParts)-1] + + notLetterOrNumber := func(r rune) bool { + return !unicode.IsLetter(r) && !unicode.IsNumber(r) + } + + p1 := strings.FieldsFunc(pathPartsLast, notLetterOrNumber) + p2 := strings.FieldsFunc(field, notLetterOrNumber) + + keyParts := append(p1, p2...) + + return strings.ToUpper(strings.Join(keyParts, "_")) +} + +// Below, we are redefining a subset of the configuration-related structures +// defined under command/agent/config. Using these structures we can tailor the +// output of the generated config, while using the original structures would +// have produced an HCL document with many empty fields. The structures below +// should not be used for anything other than generation. + +type generatedConfig struct { + AutoAuth generatedConfigAutoAuth `hcl:"auto_auth,block"` + TemplateConfig generatedConfigTemplateConfig `hcl:"template_config,block"` + Vault generatedConfigVault `hcl:"vault,block"` + EnvTemplates []generatedConfigEnvTemplate `hcl:"env_template,block"` + Exec generatedConfigExec `hcl:"exec,block"` +} + +type generatedConfigTemplateConfig struct { + StaticSecretRenderInterval string `hcl:"static_secret_render_interval"` + ExitOnRetryFailure bool `hcl:"exit_on_retry_failure"` + MaxConnectionsPerHost int `hcl:"max_connections_per_host"` +} + +type generatedConfigExec struct { + Command []string `hcl:"command"` + RestartOnSecretChanges string `hcl:"restart_on_secret_changes"` + RestartStopSignal string `hcl:"restart_stop_signal"` +} + +type generatedConfigEnvTemplate struct { + Name string `hcl:"name,label"` + Contents string `hcl:"contents,attr"` + ErrorOnMissingKey bool `hcl:"error_on_missing_key"` +} + +type generatedConfigVault struct { + Address string `hcl:"address"` +} + +type generatedConfigAutoAuth struct { + Method generatedConfigAutoAuthMethod `hcl:"method,block"` +} + +type generatedConfigAutoAuthMethod struct { + Type string `hcl:"type"` + Config generatedConfigAutoAuthMethodConfig `hcl:"config,block"` +} + +type generatedConfigAutoAuthMethodConfig struct { + TokenFilePath string `hcl:"token_file_path"` +} diff --git a/command/agent_generate_config_test.go b/command/agent_generate_config_test.go new file mode 100644 index 000000000000..cbe341f8f363 --- /dev/null +++ b/command/agent_generate_config_test.go @@ -0,0 +1,276 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "bytes" + "context" + "reflect" + "regexp" + "testing" + "time" +) + +// TestConstructTemplates tests the construcTemplates helper function +func TestConstructTemplates(t *testing.T) { + ctx, cancelContextFunc := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelContextFunc() + + client, closer := testVaultServerWithSecrets(ctx, t) + defer closer() + + cases := map[string]struct { + paths []string + expected []generatedConfigEnvTemplate + expectedError bool + }{ + "kv-v1-simple": { + paths: []string{"kv-v1/foo"}, + expected: []generatedConfigEnvTemplate{ + {Contents: `{{ with secret "kv-v1/foo" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, + {Contents: `{{ with secret "kv-v1/foo" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, + }, + expectedError: false, + }, + + "kv-v2-simple": { + paths: []string{"kv-v2/foo"}, + expected: []generatedConfigEnvTemplate{ + {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, + {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, + }, + expectedError: false, + }, + + "kv-v2-data-in-path": { + paths: []string{"kv-v2/data/foo"}, + expected: []generatedConfigEnvTemplate{ + {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, + {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, + }, + expectedError: false, + }, + + "kv-v1-nested": { + paths: []string{"kv-v1/app-1/*"}, + expected: []generatedConfigEnvTemplate{ + {Contents: `{{ with secret "kv-v1/app-1/bar" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_PASSWORD"}, + {Contents: `{{ with secret "kv-v1/app-1/bar" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_USER"}, + {Contents: `{{ with secret "kv-v1/app-1/foo" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, + {Contents: `{{ with secret "kv-v1/app-1/foo" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, + {Contents: `{{ with secret "kv-v1/app-1/nested/baz" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAZ_PASSWORD"}, + {Contents: `{{ with secret "kv-v1/app-1/nested/baz" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAZ_USER"}, + }, + expectedError: false, + }, + + "kv-v2-nested": { + paths: []string{"kv-v2/app-1/*"}, + expected: []generatedConfigEnvTemplate{ + {Contents: `{{ with secret "kv-v2/data/app-1/bar" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_PASSWORD"}, + {Contents: `{{ with secret "kv-v2/data/app-1/bar" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_USER"}, + {Contents: `{{ with secret "kv-v2/data/app-1/foo" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, + {Contents: `{{ with secret "kv-v2/data/app-1/foo" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, + {Contents: `{{ with secret "kv-v2/data/app-1/nested/baz" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAZ_PASSWORD"}, + {Contents: `{{ with secret "kv-v2/data/app-1/nested/baz" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAZ_USER"}, + }, + expectedError: false, + }, + + "kv-v1-multi-path": { + paths: []string{"kv-v1/foo", "kv-v1/app-1/bar"}, + expected: []generatedConfigEnvTemplate{ + {Contents: `{{ with secret "kv-v1/foo" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, + {Contents: `{{ with secret "kv-v1/foo" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, + {Contents: `{{ with secret "kv-v1/app-1/bar" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_PASSWORD"}, + {Contents: `{{ with secret "kv-v1/app-1/bar" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_USER"}, + }, + expectedError: false, + }, + + "kv-v2-multi-path": { + paths: []string{"kv-v2/foo", "kv-v2/app-1/bar"}, + expected: []generatedConfigEnvTemplate{ + {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, + {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, + {Contents: `{{ with secret "kv-v2/data/app-1/bar" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_PASSWORD"}, + {Contents: `{{ with secret "kv-v2/data/app-1/bar" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_USER"}, + }, + expectedError: false, + }, + + "kv-v1-path-not-found": { + paths: []string{"kv-v1/does/not/exist"}, + expected: nil, + expectedError: true, + }, + + "kv-v2-path-not-found": { + paths: []string{"kv-v2/does/not/exist"}, + expected: nil, + expectedError: true, + }, + + "kv-v1-early-wildcard": { + paths: []string{"kv-v1/*/foo"}, + expected: nil, + expectedError: true, + }, + + "kv-v2-early-wildcard": { + paths: []string{"kv-v2/*/foo"}, + expected: nil, + expectedError: true, + }, + } + + for name, tc := range cases { + name, tc := name, tc + + t.Run(name, func(t *testing.T) { + templates, err := constructTemplates(ctx, client, tc.paths) + + if tc.expectedError { + if err == nil { + t.Fatal("an error was expected but the test succeeded") + } + } else { + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(tc.expected, templates) { + t.Fatalf("unexpected output; want: %v, got: %v", tc.expected, templates) + } + } + }) + } +} + +// TestGenerateConfiguration tests the generateConfiguration helper function +func TestGenerateConfiguration(t *testing.T) { + ctx, cancelContextFunc := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelContextFunc() + + client, closer := testVaultServerWithSecrets(ctx, t) + defer closer() + + cases := map[string]struct { + flagExec string + flagPaths []string + expected *regexp.Regexp + expectedError bool + }{ + "kv-v1-simple": { + flagExec: "./my-app arg1 arg2", + flagPaths: []string{"kv-v1/foo"}, + expected: regexp.MustCompile(` +auto_auth \{ + + method \{ + type = "token_file" + + config \{ + token_file_path = ".*/.vault-token" + } + } +} + +template_config \{ + static_secret_render_interval = "5m" + exit_on_retry_failure = true + max_connections_per_host = 10 +} + +vault \{ + address = "https://127.0.0.1:[0-9]{5}" +} + +env_template "FOO_PASSWORD" \{ + contents = "\{\{ with secret \\"kv-v1/foo\\" }}\{\{ .Data.password }}\{\{ end }}" + error_on_missing_key = true +} +env_template "FOO_USER" \{ + contents = "\{\{ with secret \\"kv-v1/foo\\" }}\{\{ .Data.user }}\{\{ end }}" + error_on_missing_key = true +} + +exec \{ + command = \["./my-app", "arg1", "arg2"\] + restart_on_secret_changes = "always" + restart_stop_signal = "SIGTERM" +} +`), + expectedError: false, + }, + + "kv-v2-default-exec": { + flagExec: "", + flagPaths: []string{"kv-v2/foo"}, + expected: regexp.MustCompile(` +auto_auth \{ + + method \{ + type = "token_file" + + config \{ + token_file_path = ".*/.vault-token" + } + } +} + +template_config \{ + static_secret_render_interval = "5m" + exit_on_retry_failure = true + max_connections_per_host = 10 +} + +vault \{ + address = "https://127.0.0.1:[0-9]{5}" +} + +env_template "FOO_PASSWORD" \{ + contents = "\{\{ with secret \\"kv-v2/data/foo\\" }}\{\{ .Data.data.password }}\{\{ end }}" + error_on_missing_key = true +} +env_template "FOO_USER" \{ + contents = "\{\{ with secret \\"kv-v2/data/foo\\" }}\{\{ .Data.data.user }}\{\{ end }}" + error_on_missing_key = true +} + +exec \{ + command = \["env"\] + restart_on_secret_changes = "always" + restart_stop_signal = "SIGTERM" +} +`), + expectedError: false, + }, + } + + for name, tc := range cases { + name, tc := name, tc + + t.Run(name, func(t *testing.T) { + var config bytes.Buffer + + c, err := generateConfiguration(ctx, client, tc.flagExec, tc.flagPaths) + c.WriteTo(&config) + + if tc.expectedError { + if err == nil { + t.Fatal("an error was expected but the test succeeded") + } + } else { + if err != nil { + t.Fatal(err) + } + + if !tc.expected.MatchString(config.String()) { + t.Fatalf("unexpected output; want: %v, got: %v", tc.expected.String(), config.String()) + } + } + }) + } +} diff --git a/command/agent_test.go b/command/agent_test.go index 2be2d163b64d..17c74fc316cc 100644 --- a/command/agent_test.go +++ b/command/agent_test.go @@ -1,12 +1,15 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( + "bufio" + "context" "crypto/tls" "crypto/x509" "encoding/json" + "errors" "fmt" "io" "net" @@ -19,6 +22,7 @@ import ( "testing" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/go-hclog" vaultjwt "github.com/hashicorp/vault-plugin-auth-jwt" logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" @@ -26,13 +30,14 @@ import ( credAppRole "github.com/hashicorp/vault/builtin/credential/approle" "github.com/hashicorp/vault/command/agent" agentConfig "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/helper/testhelpers/minimal" + "github.com/hashicorp/vault/helper/useragent" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/helper/pointerutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" - "github.com/mitchellh/cli" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -86,6 +91,7 @@ func testAgentCommand(tb testing.TB, logger hclog.Logger) (*cli.MockUi, *AgentCo }, ShutdownCh: MakeShutdownCh(), SighupCh: MakeSighupCh(), + SigUSR2Ch: MakeSigUSR2Ch(), logger: logger, startedCh: make(chan struct{}, 5), reloadedCh: make(chan struct{}, 5), @@ -105,7 +111,6 @@ func TestAgent_ExitAfterAuth(t *testing.T) { func testAgentExitAfterAuth(t *testing.T, viaFlag bool) { logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ - Logger: logger, CredentialBackends: map[string]logical.Factory{ "jwt": vaultjwt.Factory, }, @@ -158,23 +163,9 @@ func testAgentExitAfterAuth(t *testing.T, viaFlag bool) { os.Remove(in) t.Logf("input: %s", in) - sink1f, err := os.CreateTemp("", "sink1.jwt.test.") - if err != nil { - t.Fatal(err) - } - sink1 := sink1f.Name() - sink1f.Close() - os.Remove(sink1) - t.Logf("sink1: %s", sink1) + sinkFileName1 := makeTempFile(t, "sink-file", "") - sink2f, err := os.CreateTemp("", "sink2.jwt.test.") - if err != nil { - t.Fatal(err) - } - sink2 := sink2f.Name() - sink2f.Close() - os.Remove(sink2) - t.Logf("sink2: %s", sink2) + sinkFileName2 := makeTempFile(t, "sink-file", "") conff, err := os.CreateTemp("", "conf.jwt.test.") if err != nil { @@ -224,7 +215,7 @@ auto_auth { } ` - config = fmt.Sprintf(config, exitAfterAuthTemplText, in, sink1, sink2) + config = fmt.Sprintf(config, exitAfterAuthTemplText, in, sinkFileName1, sinkFileName2) if err := os.WriteFile(conf, []byte(config), 0o600); err != nil { t.Fatal(err) } else { @@ -257,7 +248,7 @@ auto_auth { t.Fatal("timeout reached while waiting for agent to exit") } - sink1Bytes, err := os.ReadFile(sink1) + sink1Bytes, err := os.ReadFile(sinkFileName1) if err != nil { t.Fatal(err) } @@ -265,7 +256,7 @@ auto_auth { t.Fatal("got no output from sink 1") } - sink2Bytes, err := os.ReadFile(sink2) + sink2Bytes, err := os.ReadFile(sinkFileName2) if err != nil { t.Fatal(err) } @@ -309,7 +300,6 @@ func TestAgent_RequireRequestHeader(t *testing.T) { logger := logging.NewVaultLogger(hclog.Trace) cluster := vault.NewTestCluster(t, &vault.CoreConfig{ - Logger: logger, CredentialBackends: map[string]logical.Factory{ "approle": credAppRole.Factory, }, @@ -323,40 +313,7 @@ func TestAgent_RequireRequestHeader(t *testing.T) { serverClient := cluster.Cores[0].Client // Enable the approle auth method - req := serverClient.NewRequest("POST", "/v1/sys/auth/approle") - req.BodyBytes = []byte(`{ - "type": "approle" - }`) - request(t, serverClient, req, 204) - - // Create a named role - req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role") - req.BodyBytes = []byte(`{ - "secret_id_num_uses": "10", - "secret_id_ttl": "1m", - "token_max_ttl": "1m", - "token_num_uses": "10", - "token_ttl": "1m" - }`) - request(t, serverClient, req, 204) - - // Fetch the RoleID of the named role - req = serverClient.NewRequest("GET", "/v1/auth/approle/role/test-role/role-id") - body := request(t, serverClient, req, 200) - data := body["data"].(map[string]interface{}) - roleID := data["role_id"].(string) - - // Get a SecretID issued against the named role - req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role/secret-id") - body = request(t, serverClient, req, 200) - data = body["data"].(map[string]interface{}) - secretID := data["secret_id"].(string) - - // Write the RoleID and SecretID to temp files - roleIDPath := makeTempFile(t, "role_id.txt", roleID+"\n") - secretIDPath := makeTempFile(t, "secret_id.txt", secretID+"\n") - defer os.Remove(roleIDPath) - defer os.Remove(secretIDPath) + roleIDPath, secretIDPath := setupAppRole(t, serverClient) // Create a config file config := ` @@ -382,11 +339,13 @@ listener "tcp" { address = "%s" tls_disable = true require_request_header = false + disable_request_limiter = false } listener "tcp" { address = "%s" tls_disable = true require_request_header = true + disable_request_limiter = true } ` listenAddr1 := generateListenerAddress(t) @@ -401,21 +360,20 @@ listener "tcp" { listenAddr3, ) configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) // Start the agent ui, cmd := testAgentCommand(t, logger) cmd.client = serverClient cmd.startedCh = make(chan struct{}) + var output string + var code int wg := &sync.WaitGroup{} wg.Add(1) go func() { - code := cmd.Run([]string{"-config", configPath}) + code = cmd.Run([]string{"-config", configPath}) if code != 0 { - t.Errorf("non-zero return code when running agent: %d", code) - t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) - t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) + output = ui.ErrorWriter.String() + ui.OutputWriter.String() } wg.Done() }() @@ -423,13 +381,16 @@ listener "tcp" { select { case <-cmd.startedCh: case <-time.After(5 * time.Second): - t.Errorf("timeout") + t.Fatalf("timeout") } // defer agent shutdown defer func() { cmd.ShutdownCh <- struct{}{} wg.Wait() + if code != 0 { + t.Fatalf("got a non-zero exit status: %d, stdout/stderr: %s", code, output) + } }() //---------------------------------------------------- @@ -439,7 +400,7 @@ listener "tcp" { // Test against a listener configuration that omits // 'require_request_header', with the header missing from the request. agentClient := newApiClient("http://"+listenAddr1, false) - req = agentClient.NewRequest("GET", "/v1/sys/health") + req := agentClient.NewRequest("GET", "/v1/sys/health") request(t, agentClient, req, 200) // Test against a listener configuration that sets 'require_request_header' @@ -500,7 +461,6 @@ listener "tcp" { `, generateListenerAddress(t)) configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) // Start the agent ui, cmd := testAgentCommand(t, logger) @@ -514,15 +474,124 @@ listener "tcp" { } } -// TestAgent_Template tests rendering templates -func TestAgent_Template_Basic(t *testing.T) { +// TestAgent_NoAutoAuthTokenIfNotConfigured tests that API proxy will not use the auto-auth token +// unless configured to. +func TestAgent_NoAutoAuthTokenIfNotConfigured(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + cluster := minimal.NewTestSoloCluster(t, nil) + + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + // Create token file + tokenFileName := makeTempFile(t, "token-file", serverClient.Token()) + + sinkFileName := makeTempFile(t, "sink-file", "") + + autoAuthConfig := fmt.Sprintf(` +auto_auth { + method { + type = "token_file" + config = { + token_file_path = "%s" + } + } + + sink "file" { + config = { + path = "%s" + } + } +}`, tokenFileName, sinkFileName) + + apiProxyConfig := ` +api_proxy { + use_auto_auth_token = false +} +` + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +%s +%s +%s +`, serverClient.Address(), apiProxyConfig, listenConfig, autoAuthConfig) + configPath := makeTempFile(t, "config.hcl", config) + + // Start proxy + ui, cmd := testAgentCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + code := cmd.Run([]string{"-config", configPath}) + if code != 0 { + t.Errorf("non-zero return code when running agent: %d", code) + t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) + t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) + } + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + proxyClient, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + proxyClient.SetToken("") + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + // Wait for the sink to be populated. + // Realistically won't be this long, but keeping it long just in case, for CI. + time.Sleep(10 * time.Second) + + secret, err := proxyClient.Auth().Token().CreateOrphan(&api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "30m", + }) + if secret != nil || err == nil { + t.Fatal("expected this to fail, since without a token you should not be able to make a token") + } + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestAgent_Template_UserAgent Validates that the User-Agent sent to Vault +// as part of Templating requests is correct. Uses the custom handler +// userAgentHandler struct defined in this test package, so that Vault validates the +// User-Agent on requests sent by Agent. +func TestAgent_Template_UserAgent(t *testing.T) { //---------------------------------------------------- // Start the server and agent //---------------------------------------------------- logger := logging.NewVaultLogger(hclog.Trace) + var h userAgentHandler cluster := vault.NewTestCluster(t, &vault.CoreConfig{ - Logger: logger, CredentialBackends: map[string]logical.Factory{ "approle": credAppRole.Factory, }, @@ -531,7 +600,16 @@ func TestAgent_Template_Basic(t *testing.T) { }, }, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.AgentTemplatingString() + h.pathToCheck = "/v1/secret/data" + h.requestMethodToCheck = "GET" + h.t = t + return &h + }), }) cluster.Start() defer cluster.Cleanup() @@ -544,74 +622,7 @@ func TestAgent_Template_Basic(t *testing.T) { defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) os.Setenv(api.EnvVaultAddress, serverClient.Address()) - // Enable the approle auth method - req := serverClient.NewRequest("POST", "/v1/sys/auth/approle") - req.BodyBytes = []byte(`{ - "type": "approle" - }`) - request(t, serverClient, req, 204) - - // give test-role permissions to read the kv secret - req = serverClient.NewRequest("PUT", "/v1/sys/policy/myapp-read") - req.BodyBytes = []byte(`{ - "policy": "path \"secret/*\" { capabilities = [\"read\", \"list\"] }" - }`) - request(t, serverClient, req, 204) - - // Create a named role - req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role") - req.BodyBytes = []byte(`{ - "token_ttl": "5m", - "token_policies":"default,myapp-read", - "policies":"default,myapp-read" - }`) - request(t, serverClient, req, 204) - - // Fetch the RoleID of the named role - req = serverClient.NewRequest("GET", "/v1/auth/approle/role/test-role/role-id") - body := request(t, serverClient, req, 200) - data := body["data"].(map[string]interface{}) - roleID := data["role_id"].(string) - - // Get a SecretID issued against the named role - req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role/secret-id") - body = request(t, serverClient, req, 200) - data = body["data"].(map[string]interface{}) - secretID := data["secret_id"].(string) - - // Write the RoleID and SecretID to temp files - roleIDPath := makeTempFile(t, "role_id.txt", roleID+"\n") - secretIDPath := makeTempFile(t, "secret_id.txt", secretID+"\n") - defer os.Remove(roleIDPath) - defer os.Remove(secretIDPath) - - // setup the kv secrets - req = serverClient.NewRequest("POST", "/v1/sys/mounts/secret/tune") - req.BodyBytes = []byte(`{ - "options": {"version": "2"} - }`) - request(t, serverClient, req, 200) - - // populate a secret - req = serverClient.NewRequest("POST", "/v1/secret/data/myapp") - req.BodyBytes = []byte(`{ - "data": { - "username": "bar", - "password": "zap" - } - }`) - request(t, serverClient, req, 200) - - // populate another secret - req = serverClient.NewRequest("POST", "/v1/secret/data/otherapp") - req.BodyBytes = []byte(`{ - "data": { - "username": "barstuff", - "password": "zap", - "cert": "something" - } - }`) - request(t, serverClient, req, 200) + roleIDPath, secretIDPath := setupAppRoleAndKVMounts(t, serverClient) // make a temp directory to hold renders. Each test will create a temp dir // inside this one @@ -620,56 +631,30 @@ func TestAgent_Template_Basic(t *testing.T) { t.Fatal(err) } defer os.RemoveAll(tmpDirRoot) - - // start test cases here - testCases := map[string]struct { - templateCount int - exitAfterAuth bool - }{ - "one": { - templateCount: 1, - }, - "one_with_exit": { - templateCount: 1, - exitAfterAuth: true, - }, - "many": { - templateCount: 15, - }, - "many_with_exit": { - templateCount: 13, - exitAfterAuth: true, - }, + // create temp dir for this test run + tmpDir, err := os.MkdirTemp(tmpDirRoot, "TestAgent_Template_UserAgent") + if err != nil { + t.Fatal(err) } - for tcname, tc := range testCases { - t.Run(tcname, func(t *testing.T) { - // create temp dir for this test run - tmpDir, err := os.MkdirTemp(tmpDirRoot, tcname) - if err != nil { - t.Fatal(err) - } - - // make some template files - var templatePaths []string - for i := 0; i < tc.templateCount; i++ { - fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.tmpl", i)) - if err := os.WriteFile(fileName, []byte(templateContents(i)), 0o600); err != nil { - t.Fatal(err) - } - templatePaths = append(templatePaths, fileName) - } + // make some template files + var templatePaths []string + fileName := filepath.Join(tmpDir, "render_0.tmpl") + if err := os.WriteFile(fileName, []byte(templateContents(0)), 0o600); err != nil { + t.Fatal(err) + } + templatePaths = append(templatePaths, fileName) - // build up the template config to be added to the Agent config.hcl file - var templateConfigStrings []string - for i, t := range templatePaths { - index := fmt.Sprintf("render_%d.json", i) - s := fmt.Sprintf(templateConfigString, t, tmpDir, index) - templateConfigStrings = append(templateConfigStrings, s) - } + // build up the template config to be added to the Agent config.hcl file + var templateConfigStrings []string + for i, t := range templatePaths { + index := fmt.Sprintf("render_%d.json", i) + s := fmt.Sprintf(templateConfigString, t, tmpDir, index) + templateConfigStrings = append(templateConfigStrings, s) + } - // Create a config file - config := ` + // Create a config file + config := ` vault { address = "%s" tls_skip_verify = true @@ -681,28 +666,223 @@ auto_auth { config = { role_id_file_path = "%s" secret_id_file_path = "%s" - remove_secret_id_file_after_reading = false + remove_secret_id_file_after_reading = false } } } -%s - %s ` - // conditionally set the exit_after_auth flag - exitAfterAuth := "" - if tc.exitAfterAuth { - exitAfterAuth = "exit_after_auth = true" - } + // flatten the template configs + templateConfig := strings.Join(templateConfigStrings, " ") - // flatten the template configs - templateConfig := strings.Join(templateConfigStrings, " ") + config = fmt.Sprintf(config, serverClient.Address(), roleIDPath, secretIDPath, templateConfig) + configPath := makeTempFile(t, "config.hcl", config) - config = fmt.Sprintf(config, serverClient.Address(), roleIDPath, secretIDPath, templateConfig, exitAfterAuth) + // Start the agent + ui, cmd := testAgentCommand(t, logger) + cmd.client = serverClient + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + code := cmd.Run([]string{"-config", configPath}) + if code != 0 { + t.Errorf("non-zero return code when running agent: %d", code) + t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) + t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) + } + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // We need to shut down the Agent command + defer func() { + cmd.ShutdownCh <- struct{}{} + wg.Wait() + }() + + verify := func(suffix string) { + t.Helper() + // We need to poll for a bit to give Agent time to render the + // templates. Without this, the test will attempt to read + // the temp dir before Agent has had time to render and will + // likely fail the test + tick := time.Tick(1 * time.Second) + timeout := time.After(10 * time.Second) + var err error + for { + select { + case <-timeout: + t.Fatalf("timed out waiting for templates to render, last error: %v", err) + case <-tick: + } + // Check for files rendered in the directory and break + // early for shutdown if we do have all the files + // rendered + + //---------------------------------------------------- + // Perform the tests + //---------------------------------------------------- + + if numFiles := testListFiles(t, tmpDir, ".json"); numFiles != len(templatePaths) { + err = fmt.Errorf("expected (%d) templates, got (%d)", len(templatePaths), numFiles) + continue + } + + for i := range templatePaths { + fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.json", i)) + var c []byte + c, err = os.ReadFile(fileName) + if err != nil { + continue + } + if string(c) != templateRendered(i)+suffix { + err = fmt.Errorf("expected=%q, got=%q", templateRendered(i)+suffix, string(c)) + continue + } + } + return + } + } + + verify("") + + fileName = filepath.Join(tmpDir, "render_0.tmpl") + if err := os.WriteFile(fileName, []byte(templateContents(0)+"{}"), 0o600); err != nil { + t.Fatal(err) + } + + verify("{}") +} + +// TestAgent_Template tests rendering templates +func TestAgent_Template_Basic(t *testing.T) { + //---------------------------------------------------- + // Start the server and agent + //---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Setenv(api.EnvVaultAddress, serverClient.Address()) + + roleIDPath, secretIDPath := setupAppRoleAndKVMounts(t, serverClient) + + // make a temp directory to hold renders. Each test will create a temp dir + // inside this one + tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDirRoot) + + // start test cases here + testCases := map[string]struct { + templateCount int + exitAfterAuth bool + }{ + "one": { + templateCount: 1, + }, + "one_with_exit": { + templateCount: 1, + exitAfterAuth: true, + }, + "many": { + templateCount: 15, + }, + "many_with_exit": { + templateCount: 13, + exitAfterAuth: true, + }, + } + + for tcname, tc := range testCases { + t.Run(tcname, func(t *testing.T) { + // create temp dir for this test run + tmpDir, err := os.MkdirTemp(tmpDirRoot, tcname) + if err != nil { + t.Fatal(err) + } + + // make some template files + var templatePaths []string + for i := 0; i < tc.templateCount; i++ { + fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.tmpl", i)) + if err := os.WriteFile(fileName, []byte(templateContents(i)), 0o600); err != nil { + t.Fatal(err) + } + templatePaths = append(templatePaths, fileName) + } + + // build up the template config to be added to the Agent config.hcl file + var templateConfigStrings []string + for i, t := range templatePaths { + index := fmt.Sprintf("render_%d.json", i) + s := fmt.Sprintf(templateConfigString, t, tmpDir, index) + templateConfigStrings = append(templateConfigStrings, s) + } + + // Create a config file + config := ` +vault { + address = "%s" + tls_skip_verify = true +} + +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + remove_secret_id_file_after_reading = false + } + } +} + +%s + +%s +` + + // conditionally set the exit_after_auth flag + exitAfterAuth := "" + if tc.exitAfterAuth { + exitAfterAuth = "exit_after_auth = true" + } + + // flatten the template configs + templateConfig := strings.Join(templateConfigStrings, " ") + + config = fmt.Sprintf(config, serverClient.Address(), roleIDPath, secretIDPath, templateConfig, exitAfterAuth) configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) // Start the agent ui, cmd := testAgentCommand(t, logger) @@ -740,7 +920,7 @@ auto_auth { verify := func(suffix string) { t.Helper() // We need to poll for a bit to give Agent time to render the - // templates. Without this this, the test will attempt to read + // templates. Without this, the test will attempt to read // the temp dir before Agent has had time to render and will // likely fail the test tick := time.Tick(1 * time.Second) @@ -795,58 +975,8 @@ auto_auth { } } -func testListFiles(t *testing.T, dir, extension string) int { +func setupAppRole(t *testing.T, serverClient *api.Client) (string, string) { t.Helper() - - files, err := os.ReadDir(dir) - if err != nil { - t.Fatal(err) - } - var count int - for _, f := range files { - if filepath.Ext(f.Name()) == extension { - count++ - } - } - - return count -} - -// TestAgent_Template_ExitCounter tests that Vault Agent correctly renders all -// templates before exiting when the configuration uses exit_after_auth. This is -// similar to TestAgent_Template_Basic, but differs by using a consistent number -// of secrets from multiple sources, where as the basic test could possibly -// generate a random number of secrets, but all using the same source. This test -// reproduces https://github.com/hashicorp/vault/issues/7883 -func TestAgent_Template_ExitCounter(t *testing.T) { - //---------------------------------------------------- - // Start the server and agent - //---------------------------------------------------- - logger := logging.NewVaultLogger(hclog.Trace) - cluster := vault.NewTestCluster(t, - &vault.CoreConfig{ - Logger: logger, - CredentialBackends: map[string]logical.Factory{ - "approle": credAppRole.Factory, - }, - LogicalBackends: map[string]logical.Factory{ - "kv": logicalKv.Factory, - }, - }, - &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - vault.TestWaitActive(t, cluster.Cores[0].Core) - serverClient := cluster.Cores[0].Client - - // Unset the environment variable so that agent picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Setenv(api.EnvVaultAddress, serverClient.Address()) - // Enable the approle auth method req := serverClient.NewRequest("POST", "/v1/sys/auth/approle") req.BodyBytes = []byte(`{ @@ -854,13 +984,6 @@ func TestAgent_Template_ExitCounter(t *testing.T) { }`) request(t, serverClient, req, 204) - // give test-role permissions to read the kv secret - req = serverClient.NewRequest("PUT", "/v1/sys/policy/myapp-read") - req.BodyBytes = []byte(`{ - "policy": "path \"secret/*\" { capabilities = [\"read\", \"list\"] }" - }`) - request(t, serverClient, req, 204) - // Create a named role req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role") req.BodyBytes = []byte(`{ @@ -885,8 +1008,19 @@ func TestAgent_Template_ExitCounter(t *testing.T) { // Write the RoleID and SecretID to temp files roleIDPath := makeTempFile(t, "role_id.txt", roleID+"\n") secretIDPath := makeTempFile(t, "secret_id.txt", secretID+"\n") - defer os.Remove(roleIDPath) - defer os.Remove(secretIDPath) + + return roleIDPath, secretIDPath +} + +func setupAppRoleAndKVMounts(t *testing.T, serverClient *api.Client) (string, string) { + roleIDPath, secretIDPath := setupAppRole(t, serverClient) + + // give test-role permissions to read the kv secret + req := serverClient.NewRequest("PUT", "/v1/sys/policy/myapp-read") + req.BodyBytes = []byte(`{ + "policy": "path \"secret/*\" { capabilities = [\"read\", \"list\"] }" + }`) + request(t, serverClient, req, 204) // setup the kv secrets req = serverClient.NewRequest("POST", "/v1/sys/mounts/secret/tune") @@ -895,7 +1029,7 @@ func TestAgent_Template_ExitCounter(t *testing.T) { }`) request(t, serverClient, req, 200) - // populate a secret + // Secret: myapp req = serverClient.NewRequest("POST", "/v1/secret/data/myapp") req.BodyBytes = []byte(`{ "data": { @@ -905,7 +1039,7 @@ func TestAgent_Template_ExitCounter(t *testing.T) { }`) request(t, serverClient, req, 200) - // populate another secret + // Secret: myapp2 req = serverClient.NewRequest("POST", "/v1/secret/data/myapp2") req.BodyBytes = []byte(`{ "data": { @@ -915,7 +1049,7 @@ func TestAgent_Template_ExitCounter(t *testing.T) { }`) request(t, serverClient, req, 200) - // populate another, another secret + // Secret: otherapp req = serverClient.NewRequest("POST", "/v1/secret/data/otherapp") req.BodyBytes = []byte(`{ "data": { @@ -926,6 +1060,218 @@ func TestAgent_Template_ExitCounter(t *testing.T) { }`) request(t, serverClient, req, 200) + return roleIDPath, secretIDPath +} + +// TestAgent_Template_VaultClientFromEnv tests that Vault Agent can read in its +// required `vault` client details from environment variables instead of config. +func TestAgent_Template_VaultClientFromEnv(t *testing.T) { + //---------------------------------------------------- + // Start the server and agent + //---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + roleIDPath, secretIDPath := setupAppRoleAndKVMounts(t, serverClient) + + // make a temp directory to hold renders. Each test will create a temp dir + // inside this one + tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDirRoot) + + vaultAddr := "https://" + cluster.Cores[0].Listeners[0].Address.String() + testCases := map[string]struct { + env map[string]string + }{ + "VAULT_ADDR and VAULT_CACERT": { + env: map[string]string{ + api.EnvVaultAddress: vaultAddr, + api.EnvVaultCACert: cluster.CACertPEMFile, + }, + }, + "VAULT_ADDR and VAULT_CACERT_BYTES": { + env: map[string]string{ + api.EnvVaultAddress: vaultAddr, + api.EnvVaultCACertBytes: string(cluster.CACertPEM), + }, + }, + } + + for tcname, tc := range testCases { + t.Run(tcname, func(t *testing.T) { + for k, v := range tc.env { + t.Setenv(k, v) + } + tmpDir := t.TempDir() + + // Make a template. + templateFile := filepath.Join(tmpDir, "render.tmpl") + if err := os.WriteFile(templateFile, []byte(templateContents(0)), 0o600); err != nil { + t.Fatal(err) + } + + // build up the template config to be added to the Agent config.hcl file + targetFile := filepath.Join(tmpDir, "render.json") + templateConfig := fmt.Sprintf(` +template { + source = "%s" + destination = "%s" +} + `, templateFile, targetFile) + + // Create a config file + config := ` +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + remove_secret_id_file_after_reading = false + } + } +} + +%s +` + + config = fmt.Sprintf(config, roleIDPath, secretIDPath, templateConfig) + configPath := makeTempFile(t, "config.hcl", config) + + // Start the agent + ui, cmd := testAgentCommand(t, logger) + cmd.client = serverClient + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + code := cmd.Run([]string{"-config", configPath}) + if code != 0 { + t.Errorf("non-zero return code when running agent: %d", code) + t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) + t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) + } + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + defer func() { + cmd.ShutdownCh <- struct{}{} + wg.Wait() + }() + + // We need to poll for a bit to give Agent time to render the + // templates. Without this this, the test will attempt to read + // the temp dir before Agent has had time to render and will + // likely fail the test + tick := time.Tick(1 * time.Second) + timeout := time.After(10 * time.Second) + for { + select { + case <-timeout: + t.Fatalf("timed out waiting for templates to render, last error: %v", err) + case <-tick: + } + + contents, err := os.ReadFile(targetFile) + if err != nil { + // If the file simply doesn't exist, continue waiting for + // the template rendering to complete. + if os.IsNotExist(err) { + continue + } + t.Fatal(err) + } + + if string(contents) != templateRendered(0) { + t.Fatalf("expected=%q, got=%q", templateRendered(0), string(contents)) + } + + // Success! Break out of the retry loop. + break + } + }) + } +} + +func testListFiles(t *testing.T, dir, extension string) int { + t.Helper() + + files, err := os.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + var count int + for _, f := range files { + if filepath.Ext(f.Name()) == extension { + count++ + } + } + + return count +} + +// TestAgent_Template_ExitCounter tests that Vault Agent correctly renders all +// templates before exiting when the configuration uses exit_after_auth. This is +// similar to TestAgent_Template_Basic, but differs by using a consistent number +// of secrets from multiple sources, where as the basic test could possibly +// generate a random number of secrets, but all using the same source. This test +// reproduces https://github.com/hashicorp/vault/issues/7883 +func TestAgent_Template_ExitCounter(t *testing.T) { + //---------------------------------------------------- + // Start the server and agent + //---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Setenv(api.EnvVaultAddress, serverClient.Address()) + + roleIDPath, secretIDPath := setupAppRoleAndKVMounts(t, serverClient) + // make a temp directory to hold renders. Each test will create a temp dir // inside this one tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") @@ -944,7 +1290,7 @@ func TestAgent_Template_ExitCounter(t *testing.T) { config := ` vault { address = "%s" - tls_skip_verify = true + tls_skip_verify = true } auto_auth { @@ -953,7 +1299,7 @@ auto_auth { config = { role_id_file_path = "%s" secret_id_file_path = "%s" - remove_secret_id_file_after_reading = false + remove_secret_id_file_after_reading = false } } } @@ -979,14 +1325,13 @@ template { {{ end }} EOF destination = "%s/render-other.txt" - } +} exit_after_auth = true ` config = fmt.Sprintf(config, serverClient.Address(), roleIDPath, secretIDPath, tmpDir, tmpDir, tmpDir) configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) // Start the agent ui, cmd := testAgentCommand(t, logger) @@ -1096,19 +1441,33 @@ func request(t *testing.T, client *api.Client, req *api.Request, expectedStatusC return body } -// makeTempFile creates a temp file and populates it. +// makeTempFile creates a temp file with the specified name, populates it with the +// supplied contents and closes it. The path to the file is returned, also the file +// will be automatically removed when the test which created it, finishes. func makeTempFile(t *testing.T, name, contents string) string { t.Helper() - f, err := os.CreateTemp("", name) + + f, err := os.Create(filepath.Join(t.TempDir(), name)) if err != nil { t.Fatal(err) } path := f.Name() - f.WriteString(contents) - f.Close() + + _, err = f.WriteString(contents) + if err != nil { + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } + return path } +// populateTempFile creates a temp file with the specified name, populates it with the +// supplied contents and closes it. The file pointer is returned. func populateTempFile(t *testing.T, name, contents string) *os.File { t.Helper() @@ -1151,6 +1510,27 @@ func (h *handler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { vaulthttp.Handler.Handler(h.props).ServeHTTP(resp, req) } +// userAgentHandler makes it easy to test the User-Agent header received +// by Vault +type userAgentHandler struct { + props *vault.HandlerProperties + failCount int + userAgentToCheckFor string + pathToCheck string + requestMethodToCheck string + t *testing.T +} + +func (h *userAgentHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if req.Method == h.requestMethodToCheck && strings.Contains(req.RequestURI, h.pathToCheck) { + userAgent := req.UserAgent() + if !(userAgent == h.userAgentToCheckFor) { + h.t.Fatalf("User-Agent string not as expected. Expected to find %s, got %s", h.userAgentToCheckFor, userAgent) + } + } + vaulthttp.Handler.Handler(h.props).ServeHTTP(w, req) +} + // TestAgent_Template_Retry verifies that the template server retries requests // based on retry configuration. func TestAgent_Template_Retry(t *testing.T) { @@ -1161,7 +1541,6 @@ func TestAgent_Template_Retry(t *testing.T) { var h handler cluster := vault.NewTestCluster(t, &vault.CoreConfig{ - Logger: logger, CredentialBackends: map[string]logical.Factory{ "approle": credAppRole.Factory, }, @@ -1189,8 +1568,7 @@ func TestAgent_Template_Retry(t *testing.T) { defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) os.Unsetenv(api.EnvVaultAddress) - methodConf, cleanup := prepAgentApproleKV(t, serverClient) - defer cleanup() + methodConf := prepAgentApproleKV(t, serverClient) err := serverClient.Sys().TuneMount("secret", api.MountConfigInput{ Options: map[string]string{ @@ -1293,7 +1671,6 @@ template_config { `, methodConf, serverClient.Address(), retryConf, templateConfig) configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) // Start the agent _, cmd := testAgentCommand(t, logger) @@ -1373,7 +1750,7 @@ template_config { // such that the resulting token will have global permissions across /kv // and /secret mounts. Returns the auto_auth config stanza to setup an Agent // to connect using approle. -func prepAgentApproleKV(t *testing.T, client *api.Client) (string, func()) { +func prepAgentApproleKV(t *testing.T, client *api.Client) string { t.Helper() policyAutoAuthAppRole := ` @@ -1428,17 +1805,308 @@ auto_auth { config = { role_id_file_path = "%s" secret_id_file_path = "%s" - remove_secret_id_file_after_reading = false + remove_secret_id_file_after_reading = false } } } `, roleIDFile, secretIDFile) - cleanup := func() { - _ = os.Remove(roleIDFile) - _ = os.Remove(secretIDFile) + return config +} + +// TestAgent_AutoAuth_UserAgent tests that the User-Agent sent +// to Vault by Vault Agent is correct when performing Auto-Auth. +// Uses the custom handler userAgentHandler (defined above) so +// that Vault validates the User-Agent on requests sent by Agent. +func TestAgent_AutoAuth_UserAgent(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + var h userAgentHandler + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + }, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.AgentAutoAuthString() + h.requestMethodToCheck = "PUT" + h.pathToCheck = "auth/approle/login" + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Enable the approle auth method + roleIDPath, secretIDPath := setupAppRole(t, serverClient) + + sinkFileName := makeTempFile(t, "sink-file", "") + + autoAuthConfig := fmt.Sprintf(` +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + } + } + + sink "file" { + config = { + path = "%s" + } + } +}`, roleIDPath, secretIDPath, sinkFileName) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +api_proxy { + use_auto_auth_token = true +} +%s +%s +`, serverClient.Address(), listenConfig, autoAuthConfig) + configPath := makeTempFile(t, "config.hcl", config) + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + // Start the agent + _, cmd := testAgentCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // Validate that the auto-auth token has been correctly attained + // and works for LookupSelf + conf := api.DefaultConfig() + conf.Address = "http://" + listenAddr + agentClient, err := api.NewClient(conf) + if err != nil { + t.Fatalf("err: %s", err) + } + + agentClient.SetToken("") + err = agentClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + // Wait for the token to be sent to syncs and be available to be used + time.Sleep(5 * time.Second) + + req := agentClient.NewRequest("GET", "/v1/auth/token/lookup-self") + request(t, agentClient, req, 200) + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestAgent_APIProxyWithoutCache_UserAgent tests that the User-Agent sent +// to Vault by Vault Agent is correct using the API proxy without +// the cache configured. Uses the custom handler +// userAgentHandler struct defined in this test package, so that Vault validates the +// User-Agent on requests sent by Agent. +func TestAgent_APIProxyWithoutCache_UserAgent(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + userAgentForProxiedClient := "proxied-client" + var h userAgentHandler + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.AgentProxyStringWithProxiedUserAgent(userAgentForProxiedClient) + h.pathToCheck = "/v1/auth/token/lookup-self" + h.requestMethodToCheck = "GET" + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +%s +`, serverClient.Address(), listenConfig) + configPath := makeTempFile(t, "config.hcl", config) + + // Start the agent + _, cmd := testAgentCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + agentClient, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + agentClient.AddHeader("User-Agent", userAgentForProxiedClient) + agentClient.SetToken(serverClient.Token()) + agentClient.SetMaxRetries(0) + err = agentClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + _, err = agentClient.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestAgent_APIProxyWithCache_UserAgent tests that the User-Agent sent +// to Vault by Vault Agent is correct using the API proxy with +// the cache configured. Uses the custom handler +// userAgentHandler struct defined in this test package, so that Vault validates the +// User-Agent on requests sent by Agent. +func TestAgent_APIProxyWithCache_UserAgent(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + userAgentForProxiedClient := "proxied-client" + var h userAgentHandler + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.AgentProxyStringWithProxiedUserAgent(userAgentForProxiedClient) + h.pathToCheck = "/v1/auth/token/lookup-self" + h.requestMethodToCheck = "GET" + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + cacheConfig := ` +cache { +}` + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +%s +%s +`, serverClient.Address(), listenConfig, cacheConfig) + configPath := makeTempFile(t, "config.hcl", config) + + // Start the agent + _, cmd := testAgentCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + agentClient, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + agentClient.AddHeader("User-Agent", userAgentForProxiedClient) + agentClient.SetToken(serverClient.Token()) + agentClient.SetMaxRetries(0) + err = agentClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + _, err = agentClient.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) } - return config, cleanup + + close(cmd.ShutdownCh) + wg.Wait() } func TestAgent_Cache_DynamicSecret(t *testing.T) { @@ -1477,7 +2145,6 @@ vault { %s `, serverClient.Address(), cacheConfig, listenConfig) configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) // Start the agent _, cmd := testAgentCommand(t, logger) @@ -1554,7 +2221,6 @@ func TestAgent_ApiProxy_Retry(t *testing.T) { var h handler cluster := vault.NewTestCluster(t, &vault.CoreConfig{ - Logger: logger, CredentialBackends: map[string]logical.Factory{ "approle": credAppRole.Factory, }, @@ -1649,7 +2315,6 @@ vault { %s `, serverClient.Address(), retryConf, cacheConfig, listenConfig) configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) // Start the agent _, cmd := testAgentCommand(t, logger) @@ -1706,7 +2371,6 @@ func TestAgent_TemplateConfig_ExitOnRetryFailure(t *testing.T) { logger := logging.NewVaultLogger(hclog.Trace) cluster := vault.NewTestCluster(t, &vault.CoreConfig{ - // Logger: logger, CredentialBackends: map[string]logical.Factory{ "approle": credAppRole.Factory, }, @@ -1729,8 +2393,7 @@ func TestAgent_TemplateConfig_ExitOnRetryFailure(t *testing.T) { defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) os.Unsetenv(api.EnvVaultAddress) - autoAuthConfig, cleanup := prepAgentApproleKV(t, serverClient) - defer cleanup() + autoAuthConfig := prepAgentApproleKV(t, serverClient) err := serverClient.Sys().TuneMount("secret", api.MountConfigInput{ Options: map[string]string{ @@ -1914,7 +2577,6 @@ vault { `, autoAuthConfig, serverClient.Address(), listenConfig, templateConfig, template) configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) // Start the agent ui, cmd := testAgentCommand(t, logger) @@ -2009,11 +2671,7 @@ func TestAgent_Metrics(t *testing.T) { //---------------------------------------------------- // Start a vault server - logger := logging.NewVaultLogger(hclog.Trace) - cluster := vault.NewTestCluster(t, - &vault.CoreConfig{ - Logger: logger, - }, + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, }) @@ -2033,21 +2691,20 @@ listener "tcp" { } `, listenAddr) configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) // Start the agent - ui, cmd := testAgentCommand(t, logger) + ui, cmd := testAgentCommand(t, logging.NewVaultLogger(hclog.Trace)) cmd.client = serverClient cmd.startedCh = make(chan struct{}) + var output string + var code int wg := &sync.WaitGroup{} wg.Add(1) go func() { - code := cmd.Run([]string{"-config", configPath}) + code = cmd.Run([]string{"-config", configPath}) if code != 0 { - t.Errorf("non-zero return code when running agent: %d", code) - t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) - t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) + output = ui.ErrorWriter.String() + ui.OutputWriter.String() } wg.Done() }() @@ -2062,6 +2719,9 @@ listener "tcp" { defer func() { cmd.ShutdownCh <- struct{}{} wg.Wait() + if code != 0 { + t.Fatalf("got a non-zero exit status: %d, stdout/stderr: %s", code, output) + } }() conf := api.DefaultConfig() @@ -2090,24 +2750,7 @@ func TestAgent_Quit(t *testing.T) { //---------------------------------------------------- // Start the server and agent //---------------------------------------------------- - logger := logging.NewVaultLogger(hclog.Error) - cluster := vault.NewTestCluster(t, - &vault.CoreConfig{ - Logger: logger, - CredentialBackends: map[string]logical.Factory{ - "approle": credAppRole.Factory, - }, - LogicalBackends: map[string]logical.Factory{ - "kv": logicalKv.Factory, - }, - }, - &vault.TestClusterOptions{ - NumCores: 1, - }) - cluster.Start() - defer cluster.Cleanup() - - vault.TestWaitActive(t, cluster.Cores[0].Core) + cluster := minimal.NewTestSoloCluster(t, nil) serverClient := cluster.Cores[0].Client // Unset the environment variable so that agent picks up the right test @@ -2143,10 +2786,9 @@ cache {} `, serverClient.Address(), listenAddr, listenAddr2) configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) // Start the agent - _, cmd := testAgentCommand(t, logger) + _, cmd := testAgentCommand(t, nil) cmd.startedCh = make(chan struct{}) wg := &sync.WaitGroup{} @@ -2258,6 +2900,36 @@ func TestAgent_LogFile_Config(t *testing.T) { assert.Equal(t, 1048576, cfg.LogRotateBytes) } +// TestAgent_EnvVar_Overrides tests that environment variables are properly +// parsed and override defaults. +func TestAgent_EnvVar_Overrides(t *testing.T) { + configFile := populateTempFile(t, "agent-config.hcl", BasicHclConfig) + + cfg, err := agentConfig.LoadConfigFile(configFile.Name()) + if err != nil { + t.Fatal("Cannot load config to test update/merge", err) + } + + assert.Equal(t, false, cfg.Vault.TLSSkipVerify) + + t.Setenv("VAULT_SKIP_VERIFY", "true") + // Parse the cli flags (but we pass in an empty slice) + cmd := &AgentCommand{BaseCommand: &BaseCommand{}} + f := cmd.Flags() + err = f.Parse([]string{}) + if err != nil { + t.Fatal(err) + } + + cmd.applyConfigOverrides(f, cfg) + assert.Equal(t, true, cfg.Vault.TLSSkipVerify) + + t.Setenv("VAULT_SKIP_VERIFY", "false") + + cmd.applyConfigOverrides(f, cfg) + assert.Equal(t, false, cfg.Vault.TLSSkipVerify) +} + func TestAgent_Config_NewLogger_Default(t *testing.T) { cmd := &AgentCommand{BaseCommand: &BaseCommand{}} cmd.config = agentConfig.NewConfig() @@ -2355,12 +3027,13 @@ func TestAgent_Config_ReloadTls(t *testing.T) { logger := logging.NewVaultLogger(hclog.Trace) ui, cmd := testAgentCommand(t, logger) + var output string + var code int wg.Add(1) args := []string{"-config", configFile.Name()} go func() { - if code := cmd.Run(args); code != 0 { - output := ui.ErrorWriter.String() + ui.OutputWriter.String() - t.Errorf("got a non-zero exit status: %s", output) + if code = cmd.Run(args); code != 0 { + output = ui.ErrorWriter.String() + ui.OutputWriter.String() } wg.Done() }() @@ -2427,8 +3100,11 @@ func TestAgent_Config_ReloadTls(t *testing.T) { // Shut down cmd.ShutdownCh <- struct{}{} - wg.Wait() + + if code != 0 { + t.Fatalf("got a non-zero exit status: %d, stdout/stderr: %s", code, output) + } } // TestAgent_NonTLSListener_SIGHUP tests giving a SIGHUP signal to a listener @@ -2465,19 +3141,19 @@ vault { %s `, serverClient.Address(), listenConfig) configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) // Start the agent ui, cmd := testAgentCommand(t, logger) cmd.startedCh = make(chan struct{}) + var output string + var code int wg := &sync.WaitGroup{} wg.Add(1) go func() { - if code := cmd.Run([]string{"-config", configPath}); code != 0 { - output := ui.ErrorWriter.String() + ui.OutputWriter.String() - t.Errorf("got a non-zero exit status: %s", output) + if code = cmd.Run([]string{"-config", configPath}); code != 0 { + output = ui.ErrorWriter.String() + ui.OutputWriter.String() } wg.Done() }() @@ -2498,6 +3174,399 @@ vault { close(cmd.ShutdownCh) wg.Wait() + + if code != 0 { + t.Fatalf("got a non-zero exit status: %d, stdout/stderr: %s", code, output) + } +} + +// TestAgent_TokenRenewal tests that LifeTimeWatcher does not make +// many renewal attempts if the token's policy does not allow for it to renew +// itself. Prior to a bug fix in the PR that added this test, this would have resulted +// in hundreds of token renewal requests with no backoff. +func TestAgent_TokenRenewal(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + auditLogFileName := makeTempFile(t, "audit-log", "") + err := serverClient.Sys().EnableAuditWithOptions("file-audit-for-TestAgent_TokenRenewal", &api.EnableAuditOptions{ + Type: "file", + Options: map[string]string{ + "file_path": auditLogFileName, + }, + }) + require.NoError(t, err) + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + policyName := "less-than-default" + // Has a subset of the default policy's permissions + // Specifically removing renew-self. + err = serverClient.Sys().PutPolicy(policyName, ` +path "auth/token/lookup-self" { + capabilities = ["read"] +} + +# Allow tokens to revoke themselves +path "auth/token/revoke-self" { + capabilities = ["update"] +} + +# Allow a token to look up its own capabilities on a path +path "sys/capabilities-self" { + capabilities = ["update"] +} +`) + require.NoError(t, err) + + renewable := true + // Make the token renewable but give it no permissions + // (e.g. the permission to renew itself) + tokenCreateRequest := &api.TokenCreateRequest{ + Policies: []string{policyName}, + TTL: "10s", + Renewable: &renewable, + NoDefaultPolicy: true, + } + + secret, err := serverClient.Auth().Token().CreateOrphan(tokenCreateRequest) + require.NoError(t, err) + lowPermissionToken := secret.Auth.ClientToken + + tokenFileName := makeTempFile(t, "token-file", lowPermissionToken) + + sinkFileName := makeTempFile(t, "sink-file", "") + + autoAuthConfig := fmt.Sprintf(` +auto_auth { + method { + type = "token_file" + config = { + token_file_path = "%s" + } + } + + sink "file" { + config = { + path = "%s" + } + } +}`, tokenFileName, sinkFileName) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} + +log_level = "trace" + +%s +`, serverClient.Address(), autoAuthConfig) + configPath := makeTempFile(t, "config.hcl", config) + + // Start the agent + ui, cmd := testAgentCommand(t, logger) + + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + t.Errorf("stdout: %s", ui.OutputWriter.String()) + t.Errorf("stderr: %s", ui.ErrorWriter.String()) + } + + // Sleep, to allow the renewal/auth process to work and ensure that it doesn't + // go crazy with renewals. + time.Sleep(30 * time.Second) + + fileBytes, err := os.ReadFile(auditLogFileName) + require.NoError(t, err) + stringAudit := string(fileBytes) + + // This is a bit of an imperfect way to test things, but we want to make sure + // that a token like this doesn't keep triggering retries. + // Due to the fact this is an auto-auth specific thing, unit tests for the + // LifetimeWatcher wouldn't be sufficient here. + // Prior to the fix made in the same PR this test was added, it would trigger many, many + // retries (hundreds to thousands in less than a minute). + // We really want to make sure that doesn't happen. + numberOfRenewSelves := strings.Count(stringAudit, "auth/token/renew-self") + // We actually expect ~6, but I added some buffer for CI weirdness. It can also vary + // due to the grace added/removed from the sleep in LifetimeWatcher too. + if numberOfRenewSelves > 10 { + t.Fatalf("did too many renews -- Vault received %d renew-self requests", numberOfRenewSelves) + } +} + +// TestAgent_Logging_ConsulTemplate attempts to ensure two things about Vault Agent logs: +// 1. When -log-format command line arg is set to JSON, it is honored as the output format +// for messages generated from within the consul-template library. +// 2. When -log-file command line arg is supplied, a file receives all log messages +// generated by the consul-template library (they don't just go to stdout/stderr). +// Should prevent a regression of: https://github.com/hashicorp/vault/issues/21109 +func TestAgent_Logging_ConsulTemplate(t *testing.T) { + const ( + runnerLogMessage = "(runner) creating new runner (dry: false, once: false)" + ) + + // Configure a Vault server so Agent can successfully communicate and render its templates + cluster := minimal.NewTestSoloCluster(t, nil) + apiClient := cluster.Cores[0].Client + t.Setenv(api.EnvVaultAddress, apiClient.Address()) + tempDir := t.TempDir() + roleIDPath, secretIDPath := setupAppRoleAndKVMounts(t, apiClient) + + // Create relevant configs for Vault Agent (config, template config) + templateSrc := filepath.Join(tempDir, "render_1.tmpl") + err := os.WriteFile(templateSrc, []byte(templateContents(1)), 0o600) + require.NoError(t, err) + templateConfig := fmt.Sprintf(templateConfigString, templateSrc, tempDir, "render_1.json") + + config := ` +vault { + address = "%s" + tls_skip_verify = true +} + +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + remove_secret_id_file_after_reading = false + } + } +} + +%s +` + config = fmt.Sprintf(config, apiClient.Address(), roleIDPath, secretIDPath, templateConfig) + configFileName := filepath.Join(tempDir, "config.hcl") + err = os.WriteFile(configFileName, []byte(config), 0o600) + require.NoError(t, err) + _, cmd := testAgentCommand(t, nil) + logFilePath := filepath.Join(tempDir, "agent") + + // Start Vault Agent + go func() { + code := cmd.Run([]string{"-config", configFileName, "-log-format", "json", "-log-file", logFilePath, "-log-level", "trace"}) + require.Equalf(t, 0, code, "Vault Agent returned a non-zero exit code") + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Fatal("timeout starting agent") + } + + // Give Vault Agent some time to render our template. + time.Sleep(3 * time.Second) + + // This flag will be used to capture whether we saw a consul-template log + // message in the log file (the presence of the log file is also part of the test) + found := false + + // Vault Agent file logs will match agent-{timestamp}.log based on the + // cmd line argument we supplied, e.g. agent-1701258869573205000.log + m, err := filepath.Glob(logFilePath + "*") + require.NoError(t, err) + require.Truef(t, len(m) > 0, "no files were found") + + for _, p := range m { + f, err := os.Open(p) + require.NoError(t, err) + + fs := bufio.NewScanner(f) + fs.Split(bufio.ScanLines) + + for fs.Scan() { + s := fs.Text() + entry := make(map[string]string) + err := json.Unmarshal([]byte(s), &entry) + require.NoError(t, err) + v, ok := entry["@message"] + if !ok { + continue + } + if v == runnerLogMessage { + found = true + break + } + } + } + + require.Truef(t, found, "unable to find consul-template partial message in logs", runnerLogMessage) +} + +// TestAgent_DeleteAfterVersion_Rendering Validates that Vault Agent +// can correctly render a secret with delete_after_version set. +func TestAgent_DeleteAfterVersion_Rendering(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + Logger: logger, + }, + &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Set up KVv2 + err := serverClient.Sys().Mount("kv-v2", &api.MountInput{ + Type: "kv-v2", + }) + require.NoError(t, err) + + // Configure the mount to set delete_version_after on all of its secrets + _, err = serverClient.Logical().Write("kv-v2/config", map[string]interface{}{ + "delete_version_after": "1h", + }) + require.NoError(t, err) + + // Set up the secret (which will have delete_version_after set to 1h) + data, err := serverClient.KVv2("kv-v2").Put(context.Background(), "foo", map[string]interface{}{ + "bar": "baz", + }) + require.NoError(t, err) + + // Ensure Deletion Time was correctly set + require.NotZero(t, data.VersionMetadata.DeletionTime) + require.True(t, data.VersionMetadata.DeletionTime.After(time.Now())) + require.NotNil(t, data.VersionMetadata.CreatedTime) + require.True(t, data.VersionMetadata.DeletionTime.After(data.VersionMetadata.CreatedTime)) + + // Unset the environment variable so that Agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Setenv(api.EnvVaultAddress, serverClient.Address()) + + // create temp dir for this test run + tmpDir, err := os.MkdirTemp("", "TestAgent_DeleteAfterVersion_Rendering") + require.NoError(t, err) + + tokenFileName := makeTempFile(t, "token-file", serverClient.Token()) + + autoAuthConfig := fmt.Sprintf(` +auto_auth { + method { + type = "token_file" + config = { + token_file_path = "%s" + } + } +}`, tokenFileName) + + // Create a config file + config := ` +vault { + address = "%s" + tls_skip_verify = true +} + +%s + +%s +` + + fileName := "secret.txt" + templateConfig := fmt.Sprintf(` +template { + destination = "%s/%s" + contents = "{{ with secret \"kv-v2/foo\" }}{{ .Data.data.bar }}{{ end }}" +} +`, tmpDir, fileName) + + config = fmt.Sprintf(config, serverClient.Address(), autoAuthConfig, templateConfig) + configPath := makeTempFile(t, "config.hcl", config) + + // Start the agent + ui, cmd := testAgentCommand(t, logger) + cmd.client = serverClient + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + code := cmd.Run([]string{"-config", configPath}) + if code != 0 { + t.Errorf("non-zero return code when running agent: %d", code) + t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) + t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) + } + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // We need to shut down the Agent command + defer func() { + cmd.ShutdownCh <- struct{}{} + wg.Wait() + }() + + filePath := fmt.Sprintf("%s/%s", tmpDir, fileName) + + waitForFiles := func() error { + tick := time.Tick(100 * time.Millisecond) + timeout := time.After(10 * time.Second) + // We need to wait for the templates to render... + for { + select { + case <-timeout: + t.Fatalf("timed out waiting for templates to render, last error: %v", err) + case <-tick: + } + + _, err := os.Stat(filePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + continue + } + return err + } + + return nil + } + } + + err = waitForFiles() + require.NoError(t, err) + + // Ensure the file has the + fileData, err := os.ReadFile(filePath) + require.NoError(t, err) + if string(fileData) != "baz" { + t.Fatalf("Unexpected file contents. Expected 'baz', got %s", string(fileData)) + } } // Get a randomly assigned port and then free it again before returning it. @@ -2507,9 +3576,7 @@ func generateListenerAddress(t *testing.T) string { t.Helper() ln1, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) listenAddr := ln1.Addr().String() ln1.Close() return listenAddr diff --git a/command/agent/auth/alicloud/alicloud.go b/command/agentproxyshared/auth/alicloud/alicloud.go similarity index 97% rename from command/agent/auth/alicloud/alicloud.go rename to command/agentproxyshared/auth/alicloud/alicloud.go index 494dedb261a3..d700bc02fa2e 100644 --- a/command/agent/auth/alicloud/alicloud.go +++ b/command/agentproxyshared/auth/alicloud/alicloud.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package alicloud @@ -17,7 +17,7 @@ import ( hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault-plugin-auth-alicloud/tools" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" ) /* @@ -63,10 +63,10 @@ func NewAliCloudAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { // Check for an optional custom frequency at which we should poll for creds. credCheckFreqSec := defaultCredCheckFreqSeconds if checkFreqRaw, ok := conf.Config["credential_poll_interval"]; ok { - if credFreq, ok := checkFreqRaw.(int); ok { + if credFreq, ok := checkFreqRaw.(int); ok && credFreq > 0 { credCheckFreqSec = credFreq } else { - return nil, errors.New("could not convert 'credential_poll_interval' config value to int") + return nil, errors.New("could not convert 'credential_poll_interval' config value to positive int") } } diff --git a/command/agent/auth/approle/approle.go b/command/agentproxyshared/auth/approle/approle.go similarity index 98% rename from command/agent/auth/approle/approle.go rename to command/agentproxyshared/auth/approle/approle.go index 889e7bd413a4..ef32d493cd0a 100644 --- a/command/agent/auth/approle/approle.go +++ b/command/agentproxyshared/auth/approle/approle.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package approle @@ -15,7 +15,7 @@ import ( hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" ) type approleMethod struct { diff --git a/command/agentproxyshared/auth/auth.go b/command/agentproxyshared/auth/auth.go new file mode 100644 index 000000000000..91e189ed2604 --- /dev/null +++ b/command/agentproxyshared/auth/auth.go @@ -0,0 +1,624 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package auth + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "math" + "math/rand" + "net/http" + "sync/atomic" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/backoff" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +// AuthMethod is the interface that auto-auth methods implement for the agent/proxy +// to use. +type AuthMethod interface { + // Authenticate returns a mount path, header, request body, and error. + // The header may be nil if no special header is needed. + Authenticate(context.Context, *api.Client) (string, http.Header, map[string]interface{}, error) + NewCreds() chan struct{} + CredSuccess() + Shutdown() +} + +// AuthMethodWithClient is an extended interface that can return an API client +// for use during the authentication call. +type AuthMethodWithClient interface { + AuthMethod + AuthClient(client *api.Client) (*api.Client, error) +} + +type AuthConfig struct { + Logger hclog.Logger + MountPath string + WrapTTL time.Duration + Config map[string]interface{} +} + +// AuthHandler is responsible for keeping a token alive and renewed and passing +// new tokens to the sink server +type AuthHandler struct { + OutputCh chan string + TemplateTokenCh chan string + ExecTokenCh chan string + AuthInProgress *atomic.Bool + InvalidToken chan error + token string + userAgent string + metricsSignifier string + logger hclog.Logger + client *api.Client + random *rand.Rand + wrapTTL time.Duration + maxBackoff time.Duration + minBackoff time.Duration + enableReauthOnNewCredentials bool + enableTemplateTokenCh bool + enableExecTokenCh bool + exitOnError bool +} + +type AuthHandlerConfig struct { + Logger hclog.Logger + Client *api.Client + WrapTTL time.Duration + MaxBackoff time.Duration + MinBackoff time.Duration + Token string + // UserAgent is the HTTP UserAgent header auto-auth will use when + // communicating with Vault. + UserAgent string + // MetricsSignifier is the first argument we will give to + // metrics.IncrCounter, signifying what the name of the application is + MetricsSignifier string + EnableReauthOnNewCredentials bool + EnableTemplateTokenCh bool + EnableExecTokenCh bool + ExitOnError bool +} + +func NewAuthHandler(conf *AuthHandlerConfig) *AuthHandler { + ah := &AuthHandler{ + // This is buffered so that if we try to output after the sink server + // has been shut down, during agent/proxy shutdown, we won't block + OutputCh: make(chan string, 1), + TemplateTokenCh: make(chan string, 1), + ExecTokenCh: make(chan string, 1), + InvalidToken: make(chan error, 1), + AuthInProgress: &atomic.Bool{}, + token: conf.Token, + logger: conf.Logger, + client: conf.Client, + random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + wrapTTL: conf.WrapTTL, + minBackoff: conf.MinBackoff, + maxBackoff: conf.MaxBackoff, + enableReauthOnNewCredentials: conf.EnableReauthOnNewCredentials, + enableTemplateTokenCh: conf.EnableTemplateTokenCh, + enableExecTokenCh: conf.EnableExecTokenCh, + exitOnError: conf.ExitOnError, + userAgent: conf.UserAgent, + metricsSignifier: conf.MetricsSignifier, + } + + return ah +} + +func backoffSleep(ctx context.Context, backoff *autoAuthBackoff) bool { + nextSleep, err := backoff.backoff.Next() + if err != nil { + return false + } + select { + case <-time.After(nextSleep): + case <-ctx.Done(): + } + return true +} + +func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error { + if am == nil { + return errors.New("auth handler: nil auth method") + } + + if ah.minBackoff <= 0 { + ah.minBackoff = consts.DefaultMinBackoff + } + if ah.maxBackoff <= 0 { + ah.maxBackoff = consts.DefaultMaxBackoff + } + if ah.minBackoff > ah.maxBackoff { + return errors.New("auth handler: min_backoff cannot be greater than max_backoff") + } + backoffCfg := newAutoAuthBackoff(ah.minBackoff, ah.maxBackoff, ah.exitOnError) + + ah.logger.Info("starting auth handler") + + // Set unauthenticated when starting up + metrics.SetGauge([]string{ah.metricsSignifier, "authenticated"}, 0) + + defer func() { + am.Shutdown() + close(ah.OutputCh) + close(ah.TemplateTokenCh) + close(ah.ExecTokenCh) + ah.logger.Info("auth handler stopped") + // Set unauthenticated when shutting down + metrics.SetGauge([]string{ah.metricsSignifier, "authenticated"}, 0) + }() + + credCh := am.NewCreds() + if !ah.enableReauthOnNewCredentials { + realCredCh := credCh + credCh = nil + if realCredCh != nil { + go func() { + for { + select { + case <-ctx.Done(): + return + case <-realCredCh: + } + } + }() + } + } + if credCh == nil { + credCh = make(chan struct{}) + } + + if ah.client != nil { + headers := ah.client.Headers() + if headers == nil { + headers = make(http.Header) + } + headers.Set("User-Agent", ah.userAgent) + ah.client.SetHeaders(headers) + } + + var watcher *api.LifetimeWatcher + first := true + + for { + // We will unset this bool in sink.go once the token has been written to + // any sinks, or the sink server stops + ah.AuthInProgress.Store(true) + // Drain any Invalid Token errors from the channel that could have been sent before AuthInProgress + // was set to true + select { + case <-ah.InvalidToken: + ah.logger.Info("renewal already in progress, draining extra auth renewal triggers") + default: + // Do nothing, keep going + } + select { + case <-ctx.Done(): + return nil + + default: + } + + var clientToUse *api.Client + var err error + var path string + var data map[string]interface{} + var header http.Header + var isTokenFileMethod bool + + switch am.(type) { + case AuthMethodWithClient: + clientToUse, err = am.(AuthMethodWithClient).AuthClient(ah.client) + if err != nil { + ah.logger.Error("error creating client for authentication call", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + // Set unauthenticated when authentication fails + metrics.SetGauge([]string{ah.metricsSignifier, "authenticated"}, 0) + + if backoffSleep(ctx, backoffCfg) { + continue + } + + return err + } + default: + clientToUse = ah.client + } + + // Disable retry on the client to ensure our backoffOrQuit function is + // the only source of retry/backoff. + clientToUse.SetMaxRetries(0) + + var secret *api.Secret = new(api.Secret) + if first && ah.token != "" { + ah.logger.Debug("using preloaded token") + + first = false + ah.logger.Debug("lookup-self with preloaded token") + clientToUse.SetToken(ah.token) + + secret, err = clientToUse.Auth().Token().LookupSelfWithContext(ctx) + if err != nil { + ah.logger.Error("could not look up token", "err", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + // Set unauthenticated when authentication fails + metrics.SetGauge([]string{ah.metricsSignifier, "authenticated"}, 0) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + + duration, _ := secret.Data["ttl"].(json.Number).Int64() + secret.Auth = &api.SecretAuth{ + ClientToken: secret.Data["id"].(string), + LeaseDuration: int(duration), + Renewable: secret.Data["renewable"].(bool), + } + } else { + ah.logger.Info("authenticating") + + path, header, data, err = am.Authenticate(ctx, ah.client) + if err != nil { + ah.logger.Error("error getting path or data from method", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + // Set unauthenticated when authentication fails + metrics.SetGauge([]string{ah.metricsSignifier, "authenticated"}, 0) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + } + + if ah.wrapTTL > 0 { + wrapClient, err := clientToUse.CloneWithHeaders() + if err != nil { + ah.logger.Error("error creating client for wrapped call", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + metrics.SetGauge([]string{ah.metricsSignifier, "authenticated"}, 0) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + wrapClient.SetWrappingLookupFunc(func(string, string) string { + return ah.wrapTTL.String() + }) + clientToUse = wrapClient + } + for key, values := range header { + for _, value := range values { + clientToUse.AddHeader(key, value) + } + } + + // This should only happen if there's no preloaded token (regular auto-auth login) + // or if a preloaded token has expired and is now switching to auto-auth. + if secret.Auth == nil { + isTokenFileMethod = path == "auth/token/lookup-self" + if isTokenFileMethod { + token, _ := data["token"].(string) + // The error is called clientErr as to not shadow the other err above it. + lookupSelfClient, clientErr := clientToUse.CloneWithHeaders() + if clientErr != nil { + ah.logger.Error("failed to clone client to perform token lookup") + return clientErr + } + lookupSelfClient.SetToken(token) + secret, err = lookupSelfClient.Auth().Token().LookupSelf() + } else { + secret, err = clientToUse.Logical().WriteWithContext(ctx, path, data) + } + + // Check errors/sanity + if err != nil { + ah.logger.Error("error authenticating", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + // Set unauthenticated when authentication fails + metrics.SetGauge([]string{ah.metricsSignifier, "authenticated"}, 0) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + } + + var leaseDuration int + + switch { + case ah.wrapTTL > 0: + if secret.WrapInfo == nil { + ah.logger.Error("authentication returned nil wrap info", "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + // Set unauthenticated when authentication fails + metrics.SetGauge([]string{ah.metricsSignifier, "authenticated"}, 0) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + if secret.WrapInfo.Token == "" { + ah.logger.Error("authentication returned empty wrapped client token", "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + // Set unauthenticated when authentication fails + metrics.SetGauge([]string{ah.metricsSignifier, "authenticated"}, 0) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + wrappedResp, err := jsonutil.EncodeJSON(secret.WrapInfo) + if err != nil { + ah.logger.Error("failed to encode wrapinfo", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + // Set unauthenticated when authentication fails + metrics.SetGauge([]string{ah.metricsSignifier, "authenticated"}, 0) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + ah.logger.Info("authentication successful, sending wrapped token to sinks and pausing") + ah.OutputCh <- string(wrappedResp) + if ah.enableTemplateTokenCh { + ah.TemplateTokenCh <- string(wrappedResp) + } + if ah.enableExecTokenCh { + ah.ExecTokenCh <- string(wrappedResp) + } + + am.CredSuccess() + backoffCfg.backoff.Reset() + + select { + case <-ctx.Done(): + ah.logger.Info("shutdown triggered") + continue + + case <-credCh: + ah.logger.Info("auth method found new credentials, re-authenticating") + continue + } + + default: + // We handle the token_file method specially, as it's the only + // auth method that isn't actually authenticating, i.e. the secret + // returned does not have an Auth struct attached + isTokenFileMethod := path == "auth/token/lookup-self" + if isTokenFileMethod { + // We still check the response of the request to ensure the token is valid + // i.e. if the token is invalid, we will fail in the authentication step + if secret == nil || secret.Data == nil { + ah.logger.Error("token file validation failed, token may be invalid", "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + // Set unauthenticated when authentication fails + metrics.SetGauge([]string{ah.metricsSignifier, "authenticated"}, 0) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + token, ok := secret.Data["id"].(string) + if !ok || token == "" { + ah.logger.Error("token file validation returned empty client token", "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + // Set unauthenticated when authentication fails + metrics.SetGauge([]string{ah.metricsSignifier, "authenticated"}, 0) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + + duration, _ := secret.Data["ttl"].(json.Number).Int64() + leaseDuration = int(duration) + renewable, _ := secret.Data["renewable"].(bool) + secret.Auth = &api.SecretAuth{ + ClientToken: token, + LeaseDuration: int(duration), + Renewable: renewable, + } + ah.logger.Info("authentication successful, sending token to sinks") + + ah.OutputCh <- token + if ah.enableTemplateTokenCh { + ah.TemplateTokenCh <- token + } + if ah.enableExecTokenCh { + ah.ExecTokenCh <- token + } + + tokenType := secret.Data["type"].(string) + if tokenType == "batch" { + ah.logger.Info("note that this token type is batch, and batch tokens cannot be renewed", "ttl", leaseDuration) + } + } else { + if secret == nil || secret.Auth == nil { + ah.logger.Error("authentication returned nil auth info", "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + // Set unauthenticated when authentication fails + metrics.SetGauge([]string{ah.metricsSignifier, "authenticated"}, 0) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + if secret.Auth.ClientToken == "" { + ah.logger.Error("authentication returned empty client token", "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + // Set unauthenticated when authentication fails + metrics.SetGauge([]string{ah.metricsSignifier, "authenticated"}, 0) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + + leaseDuration = secret.LeaseDuration + ah.logger.Info("authentication successful, sending token to sinks") + ah.OutputCh <- secret.Auth.ClientToken + if ah.enableTemplateTokenCh { + ah.TemplateTokenCh <- secret.Auth.ClientToken + } + if ah.enableExecTokenCh { + ah.ExecTokenCh <- secret.Auth.ClientToken + } + } + + am.CredSuccess() + backoffCfg.backoff.Reset() + } + + if watcher != nil { + watcher.Stop() + } + + watcher, err = clientToUse.NewLifetimeWatcher(&api.LifetimeWatcherInput{ + Secret: secret, + }) + if err != nil { + ah.logger.Error("error creating lifetime watcher", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + // Set unauthenticated when authentication fails + metrics.SetGauge([]string{ah.metricsSignifier, "authenticated"}, 0) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "success"}, 1) + metrics.SetGauge([]string{ah.metricsSignifier, "authenticated"}, 1) + // We don't want to trigger the renewal process for the root token + if isRootToken(leaseDuration, isTokenFileMethod, secret) { + ah.logger.Info("not starting token renewal process, as token is root token") + } else { + ah.logger.Info("starting renewal process") + go watcher.Renew() + } + + LifetimeWatcherLoop: + for { + select { + case <-ctx.Done(): + ah.logger.Info("shutdown triggered, stopping lifetime watcher") + watcher.Stop() + break LifetimeWatcherLoop + + case err := <-watcher.DoneCh(): + ah.logger.Info("lifetime watcher done channel triggered, re-authenticating") + if err != nil { + ah.logger.Error("error renewing token", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + // Set unauthenticated when authentication fails + metrics.SetGauge([]string{ah.metricsSignifier, "authenticated"}, 0) + + // Add some exponential backoff so that if auth is successful + // but the watcher errors, we won't go into an immediate + // aggressive retry loop. + // This might be quite a small sleep, since if we have a successful + // auth, we reset the backoff. Still, some backoff is important, and + // ensuring we follow the normal flow is important: + // auth -> try to renew + if !backoffSleep(ctx, backoffCfg) { + // We're at max retries. Return an error. + return fmt.Errorf("exceeded max retries failing to renew auth token") + } + } + + // If the lease duration is 0, wait a second before re-authenticating + // so that we don't go into a loop, as the LifetimeWatcher will immediately + // return for tokens like this. + if leaseDuration == 0 { + time.Sleep(1 * time.Second) + } + + break LifetimeWatcherLoop + + case <-watcher.RenewCh(): + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "success"}, 1) + // Set authenticated when authentication succeeds + metrics.SetGauge([]string{ah.metricsSignifier, "authenticated"}, 1) + ah.logger.Info("renewed auth token") + case <-credCh: + ah.logger.Info("auth method found new credentials, re-authenticating") + break LifetimeWatcherLoop + case <-ah.InvalidToken: + ah.logger.Info("invalid token found, re-authenticating") + break LifetimeWatcherLoop + } + } + } +} + +// isRootToken checks if the secret in the argument is the root token +// This is determinable without leaseDuration and isTokenFileMethod, +// but those make it easier to rule out other tokens cheaply. +func isRootToken(leaseDuration int, isTokenFileMethod bool, secret *api.Secret) bool { + // This check is cheaper than the others, so we do this first. + if leaseDuration == 0 && isTokenFileMethod && !secret.Renewable { + if secret != nil { + policies, err := secret.TokenPolicies() + if err == nil { + if len(policies) == 1 && policies[0] == "root" { + return true + } + } + } + } + return false +} + +// autoAuthBackoff tracks exponential backoff state. +type autoAuthBackoff struct { + backoff *backoff.Backoff +} + +func newAutoAuthBackoff(min, max time.Duration, exitErr bool) *autoAuthBackoff { + if max <= 0 { + max = consts.DefaultMaxBackoff + } + + if min <= 0 { + min = consts.DefaultMinBackoff + } + + retries := math.MaxInt + if exitErr { + retries = 0 + } + + b := backoff.NewBackoff(retries, min, max) + + return &autoAuthBackoff{ + backoff: b, + } +} + +func (b autoAuthBackoff) String() string { + return b.backoff.Current().Truncate(10 * time.Millisecond).String() +} diff --git a/command/agentproxyshared/auth/auth_test.go b/command/agentproxyshared/auth/auth_test.go new file mode 100644 index 000000000000..b866b317dcd5 --- /dev/null +++ b/command/agentproxyshared/auth/auth_test.go @@ -0,0 +1,200 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package auth + +import ( + "context" + "net/http" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/credential/userpass" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +type userpassTestMethod struct{} + +func newUserpassTestMethod(t *testing.T, client *api.Client) AuthMethod { + err := client.Sys().EnableAuthWithOptions("userpass", &api.EnableAuthOptions{ + Type: "userpass", + Config: api.AuthConfigInput{ + DefaultLeaseTTL: "1s", + MaxLeaseTTL: "3s", + }, + }) + if err != nil { + t.Fatal(err) + } + + return &userpassTestMethod{} +} + +func (u *userpassTestMethod) Authenticate(_ context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) { + _, err := client.Logical().Write("auth/userpass/users/foo", map[string]interface{}{ + "password": "bar", + }) + if err != nil { + return "", nil, nil, err + } + return "auth/userpass/login/foo", nil, map[string]interface{}{ + "password": "bar", + }, nil +} + +func (u *userpassTestMethod) NewCreds() chan struct{} { + return nil +} + +func (u *userpassTestMethod) CredSuccess() { +} + +func (u *userpassTestMethod) Shutdown() { +} + +func TestAuthHandler(t *testing.T) { + coreConfig := &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "userpass": userpass.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + client := cluster.Cores[0].Client + + ctx, cancelFunc := context.WithCancel(context.Background()) + + ah := NewAuthHandler(&AuthHandlerConfig{ + Logger: logging.NewVaultLogger(hclog.Trace).Named("auth.handler"), + Client: client, + }) + + am := newUserpassTestMethod(t, client) + errCh := make(chan error) + go func() { + errCh <- ah.Run(ctx, am) + }() + + // Consume tokens so we don't block + stopTime := time.Now().Add(5 * time.Second) + closed := false +consumption: + for { + select { + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + break consumption + case <-ah.OutputCh: + case <-ah.TemplateTokenCh: + // Nothing + case <-time.After(stopTime.Sub(time.Now())): + if !closed { + cancelFunc() + closed = true + } + } + } +} + +func TestAgentBackoff(t *testing.T) { + max := 1024 * time.Second + backoff := newAutoAuthBackoff(consts.DefaultMinBackoff, max, false) + + // Test initial value + if backoff.backoff.Current() > consts.DefaultMinBackoff || backoff.backoff.Current() < consts.DefaultMinBackoff*3/4 { + t.Fatalf("expected 1s initial backoff, got: %v", backoff.backoff.Current()) + } + + // Test that backoffSleep values are in expected range (75-100% of 2*previous) + next, _ := backoff.backoff.Next() + for i := 0; i < 9; i++ { + old := next + next, _ = backoff.backoff.Next() + + expMax := 2 * old + expMin := 3 * expMax / 4 + + if next < expMin || next > expMax { + t.Fatalf("expected backoffSleep in range %v to %v, got: %v", expMin, expMax, backoff) + } + } + + // Test that backoffSleep is capped + for i := 0; i < 100; i++ { + _, _ = backoff.backoff.Next() + if backoff.backoff.Current() > max { + t.Fatalf("backoff exceeded max of 100s: %v", backoff) + } + } + + // Test reset + backoff.backoff.Reset() + if backoff.backoff.Current() > consts.DefaultMinBackoff || backoff.backoff.Current() < consts.DefaultMinBackoff*3/4 { + t.Fatalf("expected 1s backoff after reset, got: %v", backoff.backoff.Current()) + } +} + +func TestAgentMinBackoffCustom(t *testing.T) { + type test struct { + minBackoff time.Duration + want time.Duration + } + + tests := []test{ + {minBackoff: 0 * time.Second, want: 1 * time.Second}, + {minBackoff: 1 * time.Second, want: 1 * time.Second}, + {minBackoff: 5 * time.Second, want: 5 * time.Second}, + {minBackoff: 10 * time.Second, want: 10 * time.Second}, + } + + for _, test := range tests { + max := 1024 * time.Second + backoff := newAutoAuthBackoff(test.minBackoff, max, false) + + // Test initial value + if backoff.backoff.Current() > test.want || backoff.backoff.Current() < test.want*3/4 { + t.Fatalf("expected %d initial backoffSleep, got: %v", test.want, backoff.backoff.Current()) + } + + // Test that backoffSleep values are in expected range (75-100% of 2*previous) + next, _ := backoff.backoff.Next() + for i := 0; i < 5; i++ { + old := next + next, _ = backoff.backoff.Next() + + expMax := 2 * old + expMin := 3 * expMax / 4 + + if next < expMin || next > expMax { + t.Fatalf("expected backoffSleep in range %v to %v, got: %v", expMin, expMax, backoff) + } + } + + // Test that backoffSleep is capped + for i := 0; i < 100; i++ { + next, _ = backoff.backoff.Next() + if next > max { + t.Fatalf("backoffSleep exceeded max of 100s: %v", backoff) + } + } + + // Test reset + backoff.backoff.Reset() + if backoff.backoff.Current() > test.want || backoff.backoff.Current() < test.want*3/4 { + t.Fatalf("expected %d backoffSleep after reset, got: %v", test.want, backoff.backoff.Current()) + } + } +} diff --git a/command/agent/auth/aws/aws.go b/command/agentproxyshared/auth/aws/aws.go similarity index 97% rename from command/agent/auth/aws/aws.go rename to command/agentproxyshared/auth/aws/aws.go index b45192d8b33b..13ab7e483389 100644 --- a/command/agent/auth/aws/aws.go +++ b/command/agentproxyshared/auth/aws/aws.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package aws @@ -20,7 +20,7 @@ import ( "github.com/hashicorp/go-secure-stdlib/awsutil" "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" ) const ( @@ -158,10 +158,10 @@ func NewAWSAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { // Check for an optional custom frequency at which we should poll for creds. credentialPollIntervalSec := defaultCredentialPollInterval if credentialPollIntervalRaw, ok := conf.Config["credential_poll_interval"]; ok { - if credentialPollInterval, ok := credentialPollIntervalRaw.(int); ok { + if credentialPollInterval, ok := credentialPollIntervalRaw.(int); ok && credentialPollInterval > 0 { credentialPollIntervalSec = credentialPollInterval } else { - return nil, errors.New("could not convert 'credential_poll_interval' into int") + return nil, errors.New("could not convert 'credential_poll_interval' into positive int") } } diff --git a/command/agentproxyshared/auth/azure/azure.go b/command/agentproxyshared/auth/azure/azure.go new file mode 100644 index 000000000000..00e8fea6e297 --- /dev/null +++ b/command/agentproxyshared/auth/azure/azure.go @@ -0,0 +1,279 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package azure + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + + policy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + az "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + cleanhttp "github.com/hashicorp/go-cleanhttp" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +const ( + instanceEndpoint = "http://169.254.169.254/metadata/instance" + identityEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + + // minimum version 2018-02-01 needed for identity metadata + // regional availability: https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service + apiVersion = "2018-02-01" +) + +type azureMethod struct { + logger hclog.Logger + mountPath string + + authenticateFromEnvironment bool + role string + scope string + resource string + objectID string + clientID string +} + +func NewAzureAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + if conf.Config == nil { + return nil, errors.New("empty config data") + } + + a := &azureMethod{ + logger: conf.Logger, + mountPath: conf.MountPath, + } + + roleRaw, ok := conf.Config["role"] + if !ok { + return nil, errors.New("missing 'role' value") + } + a.role, ok = roleRaw.(string) + if !ok { + return nil, errors.New("could not convert 'role' config value to string") + } + + resourceRaw, ok := conf.Config["resource"] + if !ok { + return nil, errors.New("missing 'resource' value") + } + a.resource, ok = resourceRaw.(string) + if !ok { + return nil, errors.New("could not convert 'resource' config value to string") + } + + objectIDRaw, ok := conf.Config["object_id"] + if ok { + a.objectID, ok = objectIDRaw.(string) + if !ok { + return nil, errors.New("could not convert 'object_id' config value to string") + } + } + + clientIDRaw, ok := conf.Config["client_id"] + if ok { + a.clientID, ok = clientIDRaw.(string) + if !ok { + return nil, errors.New("could not convert 'client_id' config value to string") + } + } + + scopeRaw, ok := conf.Config["scope"] + if ok { + a.scope, ok = scopeRaw.(string) + if !ok { + return nil, errors.New("could not convert 'scope' config value to string") + } + } + if a.scope == "" { + a.scope = fmt.Sprintf("%s/.default", a.resource) + } + + authenticateFromEnvironmentRaw, ok := conf.Config["authenticate_from_environment"] + if ok { + authenticateFromEnvironment, err := parseutil.ParseBool(authenticateFromEnvironmentRaw) + if err != nil { + return nil, fmt.Errorf("could not convert 'authenticate_from_environment' config value to bool: %w", err) + } + a.authenticateFromEnvironment = authenticateFromEnvironment + } + + switch { + case a.role == "": + return nil, errors.New("'role' value is empty") + case a.resource == "": + return nil, errors.New("'resource' value is empty") + case a.objectID != "" && a.clientID != "": + return nil, errors.New("only one of 'object_id' or 'client_id' may be provided") + } + + return a, nil +} + +func (a *azureMethod) Authenticate(ctx context.Context, client *api.Client) (retPath string, header http.Header, retData map[string]interface{}, retErr error) { + a.logger.Trace("beginning authentication") + + // Fetch instance data + var instance struct { + Compute struct { + Name string + ResourceGroupName string + SubscriptionID string + VMScaleSetName string + ResourceID string + } + } + + body, err := getInstanceMetadataInfo(ctx) + if err != nil { + retErr = err + return + } + + err = jsonutil.DecodeJSON(body, &instance) + if err != nil { + retErr = fmt.Errorf("error parsing instance metadata response: %w", err) + return + } + + token := "" + if a.authenticateFromEnvironment { + token, err = getAzureTokenFromEnvironment(ctx, a.scope) + if err != nil { + retErr = err + return + } + } else { + token, err = getTokenFromIdentityEndpoint(ctx, a.resource, a.objectID, a.clientID) + if err != nil { + retErr = err + return + } + } + + // Attempt login + data := map[string]interface{}{ + "role": a.role, + "vm_name": instance.Compute.Name, + "vmss_name": instance.Compute.VMScaleSetName, + "resource_group_name": instance.Compute.ResourceGroupName, + "subscription_id": instance.Compute.SubscriptionID, + "jwt": token, + } + + return fmt.Sprintf("%s/login", a.mountPath), nil, data, nil +} + +func (a *azureMethod) NewCreds() chan struct{} { + return nil +} + +func (a *azureMethod) CredSuccess() { +} + +func (a *azureMethod) Shutdown() { +} + +// getAzureTokenFromEnvironment Is Azure's preferred way for authentication, and takes values +// from environment variables to form a credential. +// It uses a DefaultAzureCredential: +// https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#readme-defaultazurecredential +// Environment variables are taken into account in the following order: +// https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#readme-environment-variables +func getAzureTokenFromEnvironment(ctx context.Context, scope string) (string, error) { + cred, err := az.NewDefaultAzureCredential(nil) + if err != nil { + return "", err + } + + tokenOpts := policy.TokenRequestOptions{Scopes: []string{scope}} + tk, err := cred.GetToken(ctx, tokenOpts) + if err != nil { + return "", err + } + return tk.Token, nil +} + +// getInstanceMetadataInfo calls the Azure Instance Metadata endpoint to get +// information about the Azure environment it's running in. +func getInstanceMetadataInfo(ctx context.Context) ([]byte, error) { + return getMetadataInfo(ctx, instanceEndpoint, "", "", "") +} + +// getTokenFromIdentityEndpoint is kept for backwards compatibility purposes. Using the +// newer APIs and the Azure SDK should be preferred over this mechanism. +func getTokenFromIdentityEndpoint(ctx context.Context, resource, objectID, clientID string) (string, error) { + var identity struct { + AccessToken string `json:"access_token"` + } + + body, err := getMetadataInfo(ctx, identityEndpoint, resource, objectID, clientID) + if err != nil { + return "", err + } + + err = jsonutil.DecodeJSON(body, &identity) + if err != nil { + return "", fmt.Errorf("error parsing identity metadata response: %w", err) + } + + return identity.AccessToken, nil +} + +// getMetadataInfo calls the Azure metadata endpoint with the given parameters. +// An empty resource, objectID and clientID will return metadata information. +func getMetadataInfo(ctx context.Context, endpoint, resource, objectID, clientID string) ([]byte, error) { + req, err := http.NewRequest("GET", endpoint, nil) + if err != nil { + return nil, err + } + + q := req.URL.Query() + q.Add("api-version", apiVersion) + if resource != "" { + q.Add("resource", resource) + } + if objectID != "" { + q.Add("object_id", objectID) + } + if clientID != "" { + q.Add("client_id", clientID) + } + req.URL.RawQuery = q.Encode() + req.Header.Set("Metadata", "true") + req.Header.Set("User-Agent", useragent.String()) + req = req.WithContext(ctx) + + client := cleanhttp.DefaultClient() + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("error fetching metadata from %s: %w", endpoint, err) + } + + if resp == nil { + return nil, fmt.Errorf("empty response fetching metadata from %s", endpoint) + } + + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error reading metadata from %s: %w", endpoint, err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("error response in metadata from %s: %s", endpoint, body) + } + + return body, nil +} diff --git a/command/agentproxyshared/auth/azure/azure_test.go b/command/agentproxyshared/auth/azure/azure_test.go new file mode 100644 index 000000000000..0c9b9985d02f --- /dev/null +++ b/command/agentproxyshared/auth/azure/azure_test.go @@ -0,0 +1,96 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package azure + +import ( + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agentproxyshared/auth" +) + +// TestAzureAuthMethod tests that NewAzureAuthMethod succeeds +// with valid config. +func TestAzureAuthMethod(t *testing.T) { + t.Parallel() + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "auth-test", + Config: map[string]interface{}{ + "resource": "test", + "client_id": "test", + "role": "test", + "scope": "test", + "authenticate_from_environment": true, + }, + } + + _, err := NewAzureAuthMethod(config) + if err != nil { + t.Fatal(err) + } +} + +// TestAzureAuthMethod_StringAuthFromEnvironment tests that NewAzureAuthMethod succeeds +// with valid config, where authenticate_from_environment is a string literal. +func TestAzureAuthMethod_StringAuthFromEnvironment(t *testing.T) { + t.Parallel() + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "auth-test", + Config: map[string]interface{}{ + "resource": "test", + "client_id": "test", + "role": "test", + "scope": "test", + "authenticate_from_environment": "true", + }, + } + + _, err := NewAzureAuthMethod(config) + if err != nil { + t.Fatal(err) + } +} + +// TestAzureAuthMethod_BadConfig tests that NewAzureAuthMethod fails with +// an invalid config. +func TestAzureAuthMethod_BadConfig(t *testing.T) { + t.Parallel() + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "auth-test", + Config: map[string]interface{}{ + "bad_value": "abc", + }, + } + + _, err := NewAzureAuthMethod(config) + if err == nil { + t.Fatal("Expected error, got none.") + } +} + +// TestAzureAuthMethod_BadAuthFromEnvironment tests that NewAzureAuthMethod fails +// with otherwise valid config, but where authenticate_from_environment is +// an invalid string literal. +func TestAzureAuthMethod_BadAuthFromEnvironment(t *testing.T) { + t.Parallel() + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "auth-test", + Config: map[string]interface{}{ + "resource": "test", + "client_id": "test", + "role": "test", + "scope": "test", + "authenticate_from_environment": "bad_value", + }, + } + + _, err := NewAzureAuthMethod(config) + if err == nil { + t.Fatal("Expected error, got none.") + } +} diff --git a/command/agentproxyshared/auth/cert/cert.go b/command/agentproxyshared/auth/cert/cert.go new file mode 100644 index 000000000000..fabe9a6365fb --- /dev/null +++ b/command/agentproxyshared/auth/cert/cert.go @@ -0,0 +1,158 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cert + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/sdk/helper/consts" +) + +type certMethod struct { + logger hclog.Logger + mountPath string + name string + + caCert string + clientCert string + clientKey string + reload bool + + // Client is the cached client to use if cert info was provided. + client *api.Client +} + +var _ auth.AuthMethodWithClient = &certMethod{} + +func NewCertAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + + // Not concerned if the conf.Config is empty as the 'name' + // parameter is optional when using TLS Auth + + c := &certMethod{ + logger: conf.Logger, + mountPath: conf.MountPath, + } + + if conf.Config != nil { + nameRaw, ok := conf.Config["name"] + if !ok { + nameRaw = "" + } + c.name, ok = nameRaw.(string) + if !ok { + return nil, errors.New("could not convert 'name' config value to string") + } + + caCertRaw, ok := conf.Config["ca_cert"] + if ok { + c.caCert, ok = caCertRaw.(string) + if !ok { + return nil, errors.New("could not convert 'ca_cert' config value to string") + } + } + + clientCertRaw, ok := conf.Config["client_cert"] + if ok { + c.clientCert, ok = clientCertRaw.(string) + if !ok { + return nil, errors.New("could not convert 'cert_file' config value to string") + } + } + + clientKeyRaw, ok := conf.Config["client_key"] + if ok { + c.clientKey, ok = clientKeyRaw.(string) + if !ok { + return nil, errors.New("could not convert 'cert_key' config value to string") + } + } + + reload, ok := conf.Config["reload"] + if ok { + c.reload, ok = reload.(bool) + if !ok { + return nil, errors.New("could not convert 'reload' config value to bool") + } + } + } + + return c, nil +} + +func (c *certMethod) Authenticate(_ context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) { + c.logger.Trace("beginning authentication") + + authMap := map[string]interface{}{} + + if c.name != "" { + authMap["name"] = c.name + } + + return fmt.Sprintf("%s/login", c.mountPath), nil, authMap, nil +} + +func (c *certMethod) NewCreds() chan struct{} { + return nil +} + +func (c *certMethod) CredSuccess() {} + +func (c *certMethod) Shutdown() {} + +// AuthClient uses the existing client's address and returns a new client with +// the auto-auth method's certificate information if that's provided in its +// config map. +func (c *certMethod) AuthClient(client *api.Client) (*api.Client, error) { + c.logger.Trace("deriving auth client to use") + + clientToAuth := client + + if c.caCert != "" || (c.clientKey != "" && c.clientCert != "") { + // Return cached client if present + if c.client != nil && !c.reload { + return c.client, nil + } + + config := api.DefaultConfig() + if config.Error != nil { + return nil, config.Error + } + config.Address = client.Address() + + t := &api.TLSConfig{ + CACert: c.caCert, + ClientCert: c.clientCert, + ClientKey: c.clientKey, + } + + // Setup TLS config + if err := config.ConfigureTLS(t); err != nil { + return nil, err + } + + var err error + clientToAuth, err = api.NewClient(config) + if err != nil { + return nil, err + } + if ns := client.Headers().Get(consts.NamespaceHeaderName); ns != "" { + clientToAuth.SetNamespace(ns) + } + + // Cache the client for future use + c.client = clientToAuth + } + + return clientToAuth, nil +} diff --git a/command/agentproxyshared/auth/cert/cert_test.go b/command/agentproxyshared/auth/cert/cert_test.go new file mode 100644 index 000000000000..6a7e4f779e9c --- /dev/null +++ b/command/agentproxyshared/auth/cert/cert_test.go @@ -0,0 +1,191 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cert + +import ( + "context" + "os" + "path" + "reflect" + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" +) + +func TestCertAuthMethod_Authenticate(t *testing.T) { + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "cert-test", + Config: map[string]interface{}{ + "name": "foo", + }, + } + + method, err := NewCertAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + client, err := api.NewClient(nil) + if err != nil { + t.Fatal(err) + } + + loginPath, _, authMap, err := method.Authenticate(context.Background(), client) + if err != nil { + t.Fatal(err) + } + + expectedLoginPath := path.Join(config.MountPath, "/login") + if loginPath != expectedLoginPath { + t.Fatalf("mismatch on login path: got: %s, expected: %s", loginPath, expectedLoginPath) + } + + expectedAuthMap := map[string]interface{}{ + "name": config.Config["name"], + } + if !reflect.DeepEqual(authMap, expectedAuthMap) { + t.Fatalf("mismatch on login path:\ngot:\n\t%v\nexpected:\n\t%v", authMap, expectedAuthMap) + } +} + +func TestCertAuthMethod_AuthClient_withoutCerts(t *testing.T) { + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "cert-test", + Config: map[string]interface{}{ + "name": "without-certs", + }, + } + + method, err := NewCertAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + + clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client) + if err != nil { + t.Fatal(err) + } + + if client != clientToUse { + t.Fatal("error: expected AuthClient to return back original client") + } +} + +func TestCertAuthMethod_AuthClient_withCerts(t *testing.T) { + clientCert, err := os.Open("./test-fixtures/keys/cert.pem") + if err != nil { + t.Fatal(err) + } + defer clientCert.Close() + + clientKey, err := os.Open("./test-fixtures/keys/key.pem") + if err != nil { + t.Fatal(err) + } + defer clientKey.Close() + + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "cert-test", + Config: map[string]interface{}{ + "name": "with-certs", + "client_cert": clientCert.Name(), + "client_key": clientKey.Name(), + }, + } + + method, err := NewCertAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + client, err := api.NewClient(nil) + if err != nil { + t.Fatal(err) + } + + clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client) + if err != nil { + t.Fatal(err) + } + + if client == clientToUse { + t.Fatal("expected client from AuthClient to be different from original client") + } + + // Call AuthClient again to get back the cached client + cachedClient, err := method.(auth.AuthMethodWithClient).AuthClient(client) + if err != nil { + t.Fatal(err) + } + + if cachedClient != clientToUse { + t.Fatal("expected client from AuthClient to return back a cached client") + } +} + +func TestCertAuthMethod_AuthClient_withCertsReload(t *testing.T) { + clientCert, err := os.Open("./test-fixtures/keys/cert.pem") + if err != nil { + t.Fatal(err) + } + + defer clientCert.Close() + + clientKey, err := os.Open("./test-fixtures/keys/key.pem") + if err != nil { + t.Fatal(err) + } + + defer clientKey.Close() + + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "cert-test", + Config: map[string]interface{}{ + "name": "with-certs-reloaded", + "client_cert": clientCert.Name(), + "client_key": clientKey.Name(), + "reload": true, + }, + } + + method, err := NewCertAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + client, err := api.NewClient(nil) + if err != nil { + t.Fatal(err) + } + + clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client) + if err != nil { + t.Fatal(err) + } + + if client == clientToUse { + t.Fatal("expected client from AuthClient to be different from original client") + } + + // Call AuthClient again to get back a new client with reloaded certificates + reloadedClient, err := method.(auth.AuthMethodWithClient).AuthClient(client) + if err != nil { + t.Fatal(err) + } + + if reloadedClient == clientToUse { + t.Fatal("expected client from AuthClient to return back a new client") + } +} diff --git a/command/agent/auth/cert/test-fixtures/keys/cert.pem b/command/agentproxyshared/auth/cert/test-fixtures/keys/cert.pem similarity index 100% rename from command/agent/auth/cert/test-fixtures/keys/cert.pem rename to command/agentproxyshared/auth/cert/test-fixtures/keys/cert.pem diff --git a/command/agent/auth/cert/test-fixtures/keys/key.pem b/command/agentproxyshared/auth/cert/test-fixtures/keys/key.pem similarity index 100% rename from command/agent/auth/cert/test-fixtures/keys/key.pem rename to command/agentproxyshared/auth/cert/test-fixtures/keys/key.pem diff --git a/command/agent/auth/cert/test-fixtures/keys/pkioutput b/command/agentproxyshared/auth/cert/test-fixtures/keys/pkioutput similarity index 100% rename from command/agent/auth/cert/test-fixtures/keys/pkioutput rename to command/agentproxyshared/auth/cert/test-fixtures/keys/pkioutput diff --git a/command/agent/auth/cert/test-fixtures/root/pkioutput b/command/agentproxyshared/auth/cert/test-fixtures/root/pkioutput similarity index 100% rename from command/agent/auth/cert/test-fixtures/root/pkioutput rename to command/agentproxyshared/auth/cert/test-fixtures/root/pkioutput diff --git a/command/agent/auth/cert/test-fixtures/root/root.crl b/command/agentproxyshared/auth/cert/test-fixtures/root/root.crl similarity index 100% rename from command/agent/auth/cert/test-fixtures/root/root.crl rename to command/agentproxyshared/auth/cert/test-fixtures/root/root.crl diff --git a/command/agent/auth/cert/test-fixtures/root/rootcacert.pem b/command/agentproxyshared/auth/cert/test-fixtures/root/rootcacert.pem similarity index 100% rename from command/agent/auth/cert/test-fixtures/root/rootcacert.pem rename to command/agentproxyshared/auth/cert/test-fixtures/root/rootcacert.pem diff --git a/command/agent/auth/cert/test-fixtures/root/rootcakey.pem b/command/agentproxyshared/auth/cert/test-fixtures/root/rootcakey.pem similarity index 100% rename from command/agent/auth/cert/test-fixtures/root/rootcakey.pem rename to command/agentproxyshared/auth/cert/test-fixtures/root/rootcakey.pem diff --git a/command/agent/auth/cf/cf.go b/command/agentproxyshared/auth/cf/cf.go similarity index 95% rename from command/agent/auth/cf/cf.go rename to command/agentproxyshared/auth/cf/cf.go index 90ae802e3994..27396be20af7 100644 --- a/command/agent/auth/cf/cf.go +++ b/command/agentproxyshared/auth/cf/cf.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cf @@ -15,7 +15,7 @@ import ( cf "github.com/hashicorp/vault-plugin-auth-cf" "github.com/hashicorp/vault-plugin-auth-cf/signatures" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" ) type cfMethod struct { diff --git a/command/agent/auth/gcp/gcp.go b/command/agentproxyshared/auth/gcp/gcp.go similarity index 97% rename from command/agent/auth/gcp/gcp.go rename to command/agentproxyshared/auth/gcp/gcp.go index 145589b78198..aaaf21d16af0 100644 --- a/command/agent/auth/gcp/gcp.go +++ b/command/agentproxyshared/auth/gcp/gcp.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package gcp @@ -8,7 +8,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net/http" "time" @@ -17,7 +17,7 @@ import ( hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" "golang.org/x/oauth2" "google.golang.org/api/iamcredentials/v1" ) @@ -151,7 +151,7 @@ func (g *gcpMethod) Authenticate(ctx context.Context, client *api.Client) (retPa return } defer resp.Body.Close() - jwtBytes, err := ioutil.ReadAll(resp.Body) + jwtBytes, err := io.ReadAll(resp.Body) if err != nil { retErr = fmt.Errorf("error reading instance token response body: %w", err) return diff --git a/command/agent/auth/jwt/jwt.go b/command/agentproxyshared/auth/jwt/jwt.go similarity index 98% rename from command/agent/auth/jwt/jwt.go rename to command/agentproxyshared/auth/jwt/jwt.go index 50ffe105574d..fce03ad7d76d 100644 --- a/command/agent/auth/jwt/jwt.go +++ b/command/agentproxyshared/auth/jwt/jwt.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package jwt @@ -17,7 +17,7 @@ import ( hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" "github.com/hashicorp/vault/sdk/helper/parseutil" ) diff --git a/command/agent/auth/jwt/jwt_test.go b/command/agentproxyshared/auth/jwt/jwt_test.go similarity index 98% rename from command/agent/auth/jwt/jwt_test.go rename to command/agentproxyshared/auth/jwt/jwt_test.go index 2fa21f0ab128..62fbc24e8110 100644 --- a/command/agent/auth/jwt/jwt_test.go +++ b/command/agentproxyshared/auth/jwt/jwt_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package jwt @@ -12,7 +12,7 @@ import ( "testing" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" ) func TestIngressToken(t *testing.T) { diff --git a/command/agent/auth/kerberos/integtest/integrationtest.sh b/command/agentproxyshared/auth/kerberos/integtest/integrationtest.sh similarity index 99% rename from command/agent/auth/kerberos/integtest/integrationtest.sh rename to command/agentproxyshared/auth/kerberos/integtest/integrationtest.sh index b3d9edf65db9..6b8a6925de95 100755 --- a/command/agent/auth/kerberos/integtest/integrationtest.sh +++ b/command/agentproxyshared/auth/kerberos/integtest/integrationtest.sh @@ -1,6 +1,6 @@ #!/bin/bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 # Instructions # This integration test is for the Vault Kerberos agent. diff --git a/command/agent/auth/kerberos/kerberos.go b/command/agentproxyshared/auth/kerberos/kerberos.go similarity index 96% rename from command/agent/auth/kerberos/kerberos.go rename to command/agentproxyshared/auth/kerberos/kerberos.go index 31ab6c67dba0..566fa222a47f 100644 --- a/command/agent/auth/kerberos/kerberos.go +++ b/command/agentproxyshared/auth/kerberos/kerberos.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package kerberos @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/go-secure-stdlib/parseutil" kerberos "github.com/hashicorp/vault-plugin-auth-kerberos" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" "github.com/jcmturner/gokrb5/v8/spnego" ) diff --git a/command/agent/auth/kerberos/kerberos_test.go b/command/agentproxyshared/auth/kerberos/kerberos_test.go similarity index 96% rename from command/agent/auth/kerberos/kerberos_test.go rename to command/agentproxyshared/auth/kerberos/kerberos_test.go index 25ccccdfdc4b..819cb7dff4ec 100644 --- a/command/agent/auth/kerberos/kerberos_test.go +++ b/command/agentproxyshared/auth/kerberos/kerberos_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package kerberos @@ -7,7 +7,7 @@ import ( "testing" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" ) func TestNewKerberosAuthMethod(t *testing.T) { diff --git a/command/agent/auth/kubernetes/kubernetes.go b/command/agentproxyshared/auth/kubernetes/kubernetes.go similarity index 95% rename from command/agent/auth/kubernetes/kubernetes.go rename to command/agentproxyshared/auth/kubernetes/kubernetes.go index 80bacd3a608b..6f16a2b9b414 100644 --- a/command/agent/auth/kubernetes/kubernetes.go +++ b/command/agentproxyshared/auth/kubernetes/kubernetes.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package kubernetes @@ -8,14 +8,13 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/http" "os" "strings" hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" ) const ( @@ -123,7 +122,7 @@ func (k *kubernetesMethod) readJWT() (string, error) { } defer data.Close() - contentBytes, err := ioutil.ReadAll(data) + contentBytes, err := io.ReadAll(data) if err != nil { return "", err } diff --git a/command/agent/auth/kubernetes/kubernetes_test.go b/command/agentproxyshared/auth/kubernetes/kubernetes_test.go similarity index 97% rename from command/agent/auth/kubernetes/kubernetes_test.go rename to command/agentproxyshared/auth/kubernetes/kubernetes_test.go index d95c71bf7381..93b348c7f521 100644 --- a/command/agent/auth/kubernetes/kubernetes_test.go +++ b/command/agentproxyshared/auth/kubernetes/kubernetes_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package kubernetes @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/errwrap" hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" "github.com/hashicorp/vault/sdk/helper/logging" ) diff --git a/command/agentproxyshared/auth/ldap/ldap.go b/command/agentproxyshared/auth/ldap/ldap.go new file mode 100644 index 000000000000..d654f21898e4 --- /dev/null +++ b/command/agentproxyshared/auth/ldap/ldap.go @@ -0,0 +1,259 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package ldap + +import ( + "context" + "errors" + "fmt" + "io/fs" + "net/http" + "os" + "path/filepath" + "sync" + "sync/atomic" + "time" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/sdk/helper/parseutil" +) + +type ldapMethod struct { + logger hclog.Logger + mountPath string + + username string + passwordFilePath string + removePasswordAfterReading bool + removePasswordFollowsSymlinks bool + credsFound chan struct{} + watchCh chan string + stopCh chan struct{} + doneCh chan struct{} + credSuccessGate chan struct{} + ticker *time.Ticker + once *sync.Once + latestPass *atomic.Value +} + +// NewLdapMethod reads the user configuration and returns a configured +// LdapAuthMethod +func NewLdapAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + if conf.Config == nil { + return nil, errors.New("empty config data") + } + + k := &ldapMethod{ + logger: conf.Logger, + mountPath: conf.MountPath, + removePasswordAfterReading: true, + credsFound: make(chan struct{}), + watchCh: make(chan string), + stopCh: make(chan struct{}), + doneCh: make(chan struct{}), + credSuccessGate: make(chan struct{}), + once: new(sync.Once), + latestPass: new(atomic.Value), + } + + k.latestPass.Store("") + usernameRaw, ok := conf.Config["username"] + if !ok { + return nil, errors.New("missing 'username' value") + } + k.username, ok = usernameRaw.(string) + if !ok { + return nil, errors.New("could not convert 'username' config value to string") + } + + passFilePathRaw, ok := conf.Config["password_file_path"] + if !ok { + return nil, errors.New("missing 'password_file_path' value") + } + k.passwordFilePath, ok = passFilePathRaw.(string) + if !ok { + return nil, errors.New("could not convert 'password_file_path' config value to string") + } + if removePassAfterReadingRaw, ok := conf.Config["remove_password_after_reading"]; ok { + removePassAfterReading, err := parseutil.ParseBool(removePassAfterReadingRaw) + if err != nil { + return nil, fmt.Errorf("error parsing 'remove_password_after_reading' value: %w", err) + } + k.removePasswordAfterReading = removePassAfterReading + } + + if removePassFollowsSymlinksRaw, ok := conf.Config["remove_password_follows_symlinks"]; ok { + removePassFollowsSymlinks, err := parseutil.ParseBool(removePassFollowsSymlinksRaw) + if err != nil { + return nil, fmt.Errorf("error parsing 'remove_password_follows_symlinks' value: %w", err) + } + k.removePasswordFollowsSymlinks = removePassFollowsSymlinks + } + switch { + case k.passwordFilePath == "": + return nil, errors.New("'password_file_path' value is empty") + case k.username == "": + return nil, errors.New("'username' value is empty") + } + + // Default readPeriod + readPeriod := 1 * time.Minute + + if passReadPeriodRaw, ok := conf.Config["password_read_period"]; ok { + passReadPeriod, err := parseutil.ParseDurationSecond(passReadPeriodRaw) + if err != nil || passReadPeriod <= 0 { + return nil, fmt.Errorf("error parsing 'password_read_period' value into a positive value: %w", err) + } + readPeriod = passReadPeriod + } else { + // If we don't delete the password after reading, use a slower reload period, + // otherwise we would re-read the whole file every 500ms, instead of just + // doing a stat on the file every 500ms. + if k.removePasswordAfterReading { + readPeriod = 500 * time.Millisecond + } + } + + k.ticker = time.NewTicker(readPeriod) + + go k.runWatcher() + + k.logger.Info("ldap auth method created", "password_file_path", k.passwordFilePath) + + return k, nil +} + +func (k *ldapMethod) Authenticate(ctx context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) { + k.logger.Trace("beginning authentication") + + k.ingressPass() + + latestPass := k.latestPass.Load().(string) + + if latestPass == "" { + return "", nil, nil, errors.New("latest known password is empty, cannot authenticate") + } + k.logger.Info("last known password in Authentication setup is") + return fmt.Sprintf("%s/login/%s", k.mountPath, k.username), nil, map[string]interface{}{ + "password": latestPass, + }, nil +} + +func (k *ldapMethod) NewCreds() chan struct{} { + return k.credsFound +} + +func (k *ldapMethod) CredSuccess() { + k.once.Do(func() { + close(k.credSuccessGate) + }) +} + +func (k *ldapMethod) Shutdown() { + k.ticker.Stop() + close(k.stopCh) + <-k.doneCh +} + +func (k *ldapMethod) runWatcher() { + defer close(k.doneCh) + + select { + case <-k.stopCh: + return + + case <-k.credSuccessGate: + // We only start the next loop once we're initially successful, + // since at startup Authenticate will be called, and we don't want + // to end up immediately re-authenticating by having found a new + // value + } + + for { + select { + case <-k.stopCh: + return + + case <-k.ticker.C: + latestPass := k.latestPass.Load().(string) + k.ingressPass() + newPass := k.latestPass.Load().(string) + if newPass != latestPass { + k.logger.Debug("new password file found") + k.credsFound <- struct{}{} + } + } + } +} + +func (k *ldapMethod) ingressPass() { + fi, err := os.Lstat(k.passwordFilePath) + if err != nil { + if os.IsNotExist(err) { + return + } + k.logger.Error("error encountered stat'ing password file", "error", err) + return + } + + // Check that the path refers to a file. + // If it's a symlink, it could still be a symlink to a directory, + // but os.ReadFile below will return a descriptive error. + evalSymlinkPath := k.passwordFilePath + switch mode := fi.Mode(); { + case mode.IsRegular(): + // regular file + case mode&fs.ModeSymlink != 0: + // If our file path is a symlink, we should also return early (like above) without error + // if the file that is linked to is not present, otherwise we will error when trying + // to read that file by following the link in the os.ReadFile call. + evalSymlinkPath, err = filepath.EvalSymlinks(k.passwordFilePath) + if err != nil { + k.logger.Error("error encountered evaluating symlinks", "error", err) + return + } + _, err := os.Stat(evalSymlinkPath) + if err != nil { + if os.IsNotExist(err) { + return + } + k.logger.Error("error encountered stat'ing password file after evaluating symlinks", "error", err) + return + } + default: + k.logger.Error("password file is not a regular file or symlink") + return + } + + pass, err := os.ReadFile(k.passwordFilePath) + if err != nil { + k.logger.Error("failed to read password file", "error", err) + return + } + + switch len(pass) { + case 0: + k.logger.Warn("empty password file read") + + default: + k.latestPass.Store(string(pass)) + } + + if k.removePasswordAfterReading { + pathToRemove := k.passwordFilePath + if k.removePasswordFollowsSymlinks { + // If removePassFollowsSymlinks is set, we follow the symlink and delete the password, + // not just the symlink that links to the password file + pathToRemove = evalSymlinkPath + } + if err := os.Remove(pathToRemove); err != nil { + k.logger.Error("error removing password file", "error", err) + } + } +} diff --git a/command/agentproxyshared/auth/ldap/ldap_test.go b/command/agentproxyshared/auth/ldap/ldap_test.go new file mode 100644 index 000000000000..8f612db8c572 --- /dev/null +++ b/command/agentproxyshared/auth/ldap/ldap_test.go @@ -0,0 +1,262 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package ldap + +import ( + "bytes" + "os" + "path" + "strings" + "sync/atomic" + "testing" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agentproxyshared/auth" +) + +func TestIngressPass(t *testing.T) { + const ( + dir = "dir" + file = "file" + empty = "empty" + missing = "missing" + symlinked = "symlinked" + ) + + rootDir, err := os.MkdirTemp("", "vault-agent-ldap-auth-test") + if err != nil { + t.Fatalf("failed to create temp dir: %s", err) + } + defer os.RemoveAll(rootDir) + + setupTestDir := func() string { + testDir, err := os.MkdirTemp(rootDir, "") + if err != nil { + t.Fatal(err) + } + err = os.WriteFile(path.Join(testDir, file), []byte("test"), 0o644) + if err != nil { + t.Fatal(err) + } + _, err = os.Create(path.Join(testDir, empty)) + if err != nil { + t.Fatal(err) + } + err = os.Mkdir(path.Join(testDir, dir), 0o755) + if err != nil { + t.Fatal(err) + } + err = os.Symlink(path.Join(testDir, file), path.Join(testDir, symlinked)) + if err != nil { + t.Fatal(err) + } + + return testDir + } + + for _, tc := range []struct { + name string + path string + errString string + }{ + { + "happy path", + file, + "", + }, + { + "path is directory", + dir, + "[ERROR] password file is not a regular file or symlink", + }, + { + "password file path is symlink", + symlinked, + "", + }, + { + "password file path is missing (implies nothing for ingressPass to do)", + missing, + "", + }, + { + "password file path is empty file", + empty, + "[WARN] empty password file read", + }, + } { + testDir := setupTestDir() + logBuffer := bytes.Buffer{} + ldapAuth := &ldapMethod{ + logger: hclog.New(&hclog.LoggerOptions{ + Output: &logBuffer, + }), + latestPass: new(atomic.Value), + passwordFilePath: path.Join(testDir, tc.path), + } + + ldapAuth.ingressPass() + + if tc.errString != "" { + if !strings.Contains(logBuffer.String(), tc.errString) { + t.Fatal("logs did no contain expected error", tc.errString, logBuffer.String()) + } + } else { + if strings.Contains(logBuffer.String(), "[ERROR]") || strings.Contains(logBuffer.String(), "[WARN]") { + t.Fatal("logs contained unexpected error", logBuffer.String()) + } + } + } +} + +func TestDeleteAfterReading(t *testing.T) { + for _, tc := range map[string]struct { + configValue string + shouldDelete bool + }{ + "default": { + "", + true, + }, + "explicit true": { + "true", + true, + }, + "false": { + "false", + false, + }, + } { + rootDir, err := os.MkdirTemp("", "vault-agent-ldap-auth-test") + if err != nil { + t.Fatalf("failed to create temp dir: %s", err) + } + defer os.RemoveAll(rootDir) + passPath := path.Join(rootDir, "pass") + err = os.WriteFile(passPath, []byte("test"), 0o644) + if err != nil { + t.Fatal(err) + } + + config := &auth.AuthConfig{ + Config: map[string]interface{}{ + "password_file_path": passPath, + "username": "testuser", + }, + Logger: hclog.Default(), + } + if tc.configValue != "" { + config.Config["remove_password_after_reading"] = tc.configValue + } + + ldapAuth, err := NewLdapAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + ldapAuth.(*ldapMethod).ingressPass() + + if _, err := os.Lstat(passPath); tc.shouldDelete { + if err == nil || !os.IsNotExist(err) { + t.Fatal(err) + } + } else { + if err != nil { + t.Fatal(err) + } + } + } +} + +func TestDeleteAfterReadingSymlink(t *testing.T) { + for _, tc := range map[string]struct { + configValue string + shouldDelete bool + removePassFollowsSymlinks bool + }{ + "default": { + "", + true, + false, + }, + "explicit true": { + "true", + true, + false, + }, + "false": { + "false", + false, + false, + }, + "default + removePassFollowsSymlinks": { + "", + true, + true, + }, + "explicit true + removePassFollowsSymlinks": { + "true", + true, + true, + }, + "false + removePassFollowsSymlinks": { + "false", + false, + true, + }, + } { + rootDir, err := os.MkdirTemp("", "vault-agent-ldap-auth-test") + if err != nil { + t.Fatalf("failed to create temp dir: %s", err) + } + defer os.RemoveAll(rootDir) + passPath := path.Join(rootDir, "pass") + err = os.WriteFile(passPath, []byte("test"), 0o644) + if err != nil { + t.Fatal(err) + } + + symlink, err := os.CreateTemp("", "auth.ldap.symlink.test.") + if err != nil { + t.Fatal(err) + } + symlinkName := symlink.Name() + symlink.Close() + os.Remove(symlinkName) + os.Symlink(passPath, symlinkName) + + config := &auth.AuthConfig{ + Config: map[string]interface{}{ + "password_file_path": symlinkName, + "username": "testuser", + }, + Logger: hclog.Default(), + } + if tc.configValue != "" { + config.Config["remove_password_after_reading"] = tc.configValue + } + config.Config["remove_password_follows_symlinks"] = tc.removePassFollowsSymlinks + + ldapAuth, err := NewLdapAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + ldapAuth.(*ldapMethod).ingressPass() + + pathToCheck := symlinkName + if tc.removePassFollowsSymlinks { + pathToCheck = passPath + } + if _, err := os.Lstat(pathToCheck); tc.shouldDelete { + if err == nil || !os.IsNotExist(err) { + t.Fatal(err) + } + } else { + if err != nil { + t.Fatal(err) + } + } + } +} diff --git a/command/agent/auth/oci/oci.go b/command/agentproxyshared/auth/oci/oci.go similarity index 98% rename from command/agent/auth/oci/oci.go rename to command/agentproxyshared/auth/oci/oci.go index 4ce62ade334d..40294065366e 100644 --- a/command/agent/auth/oci/oci.go +++ b/command/agentproxyshared/auth/oci/oci.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package oci @@ -18,7 +18,7 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" "github.com/oracle/oci-go-sdk/common" ociAuth "github.com/oracle/oci-go-sdk/common/auth" ) @@ -165,7 +165,6 @@ func (a *ociMethod) Authenticate(context.Context, *api.Client) (string, http.Hea signer := common.DefaultRequestSigner(a.configurationProvider) err = signer.Sign(request) - if err != nil { return "", nil, nil, fmt.Errorf("error signing authentication request: %w", err) } diff --git a/command/agent/auth/token-file/token_file.go b/command/agentproxyshared/auth/token-file/token_file.go similarity index 95% rename from command/agent/auth/token-file/token_file.go rename to command/agentproxyshared/auth/token-file/token_file.go index c37a0866e80c..c2154f7ab960 100644 --- a/command/agent/auth/token-file/token_file.go +++ b/command/agentproxyshared/auth/token-file/token_file.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package token_file @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" ) type tokenFileMethod struct { diff --git a/command/agent/auth/token-file/token_file_test.go b/command/agentproxyshared/auth/token-file/token_file_test.go similarity index 95% rename from command/agent/auth/token-file/token_file_test.go rename to command/agentproxyshared/auth/token-file/token_file_test.go index 8932beb75d96..7e6e8982b245 100644 --- a/command/agent/auth/token-file/token_file_test.go +++ b/command/agentproxyshared/auth/token-file/token_file_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package token_file @@ -9,7 +9,7 @@ import ( "testing" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" "github.com/hashicorp/vault/sdk/helper/logging" ) diff --git a/command/agentproxyshared/cache/api_proxy.go b/command/agentproxyshared/cache/api_proxy.go new file mode 100644 index 000000000000..cd4efd9f989f --- /dev/null +++ b/command/agentproxyshared/cache/api_proxy.go @@ -0,0 +1,191 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "context" + "fmt" + gohttp "net/http" + "sync" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-retryablehttp" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/http" +) + +//go:generate enumer -type=EnforceConsistency -trimprefix=EnforceConsistency +type EnforceConsistency int + +const ( + EnforceConsistencyNever EnforceConsistency = iota + EnforceConsistencyAlways +) + +//go:generate enumer -type=WhenInconsistentAction -trimprefix=WhenInconsistent +type WhenInconsistentAction int + +const ( + WhenInconsistentFail WhenInconsistentAction = iota + WhenInconsistentRetry + WhenInconsistentForward +) + +// APIProxy is an implementation of the proxier interface that is used to +// forward the request to Vault and get the response. +type APIProxy struct { + client *api.Client + logger hclog.Logger + enforceConsistency EnforceConsistency + whenInconsistentAction WhenInconsistentAction + l sync.RWMutex + lastIndexStates []string + userAgentString string + userAgentStringFunction func(string) string + // clientNamespace is a one-time set representation of the namespace of the client + // (i.e. client.Namespace()) to avoid repeated calls and lock usage. + clientNamespace string + prependConfiguredNamespace bool +} + +var _ Proxier = &APIProxy{} + +type APIProxyConfig struct { + Client *api.Client + Logger hclog.Logger + EnforceConsistency EnforceConsistency + WhenInconsistentAction WhenInconsistentAction + // UserAgentString is used as the User Agent when the proxied client + // does not have a user agent of its own. + UserAgentString string + // UserAgentStringFunction is the function to transform the proxied client's + // user agent into one that includes Vault-specific information. + UserAgentStringFunction func(string) string + // PrependConfiguredNamespace configures whether the client's namespace + // should be prepended to proxied requests + PrependConfiguredNamespace bool +} + +func NewAPIProxy(config *APIProxyConfig) (Proxier, error) { + if config.Client == nil { + return nil, fmt.Errorf("nil API client") + } + return &APIProxy{ + client: config.Client, + logger: config.Logger, + enforceConsistency: config.EnforceConsistency, + whenInconsistentAction: config.WhenInconsistentAction, + userAgentString: config.UserAgentString, + userAgentStringFunction: config.UserAgentStringFunction, + prependConfiguredNamespace: config.PrependConfiguredNamespace, + clientNamespace: namespace.Canonicalize(config.Client.Namespace()), + }, nil +} + +func (ap *APIProxy) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { + client, err := ap.client.Clone() + if err != nil { + return nil, err + } + client.SetToken(req.Token) + + // Derive and set a logger for the client + clientLogger := ap.logger.Named("client") + client.SetLogger(clientLogger) + + // http.Transport will transparently request gzip and decompress the response, but only if + // the client doesn't manually set the header. Removing any Accept-Encoding header allows the + // transparent compression to occur. + req.Request.Header.Del("Accept-Encoding") + + if req.Request.Header == nil { + req.Request.Header = make(gohttp.Header) + } + + // Set our User-Agent to be one indicating we are Vault Agent's API proxy. + // If the sending client had one, preserve it. + if req.Request.Header.Get("User-Agent") != "" { + initialUserAgent := req.Request.Header.Get("User-Agent") + req.Request.Header.Set("User-Agent", ap.userAgentStringFunction(initialUserAgent)) + } else { + req.Request.Header.Set("User-Agent", ap.userAgentString) + } + + client.SetHeaders(req.Request.Header) + if ap.prependConfiguredNamespace && ap.clientNamespace != "" { + currentNamespace := namespace.Canonicalize(client.Namespace()) + newNamespace := namespace.Canonicalize(ap.clientNamespace + currentNamespace) + client.SetNamespace(newNamespace) + } + + fwReq := client.NewRequest(req.Request.Method, req.Request.URL.Path) + fwReq.BodyBytes = req.RequestBody + + query := req.Request.URL.Query() + if len(query) != 0 { + fwReq.Params = query + } + + var newState string + manageState := ap.enforceConsistency == EnforceConsistencyAlways && + req.Request.Header.Get(http.VaultIndexHeaderName) == "" && + req.Request.Header.Get(http.VaultForwardHeaderName) == "" && + req.Request.Header.Get(http.VaultInconsistentHeaderName) == "" + + if manageState { + client = client.WithResponseCallbacks(api.RecordState(&newState)) + ap.l.RLock() + lastStates := ap.lastIndexStates + ap.l.RUnlock() + if len(lastStates) != 0 { + client = client.WithRequestCallbacks(api.RequireState(lastStates...)) + switch ap.whenInconsistentAction { + case WhenInconsistentFail: + // In this mode we want to delegate handling of inconsistency + // failures to the external client talking to Agent. + client.SetCheckRetry(retryablehttp.DefaultRetryPolicy) + case WhenInconsistentRetry: + // In this mode we want to handle retries due to inconsistency + // internally. This is the default api.Client behaviour so + // we needn't do anything. + case WhenInconsistentForward: + fwReq.Headers.Set(http.VaultInconsistentHeaderName, http.VaultInconsistentForward) + } + } + } + + // Make the request to Vault and get the response + ap.logger.Info("forwarding request to Vault", "method", req.Request.Method, "path", req.Request.URL.Path) + + resp, err := client.RawRequestWithContext(ctx, fwReq) + if resp == nil && err != nil { + // We don't want to cache nil responses, so we simply return the error + return nil, err + } + + if newState != "" { + ap.l.Lock() + // We want to be using the "newest" states seen, but newer isn't well + // defined here. There can be two states S1 and S2 which aren't strictly ordered: + // S1 could have a newer localindex and S2 could have a newer replicatedindex. So + // we need to merge them. But we can't merge them because we wouldn't be able to + // "sign" the resulting header because we don't have access to the HMAC key that + // Vault uses to do so. So instead we compare any of the 0-2 saved states + // we have to the new header, keeping the newest 1-2 of these, and sending + // them to Vault to evaluate. + ap.lastIndexStates = api.MergeReplicationStates(ap.lastIndexStates, newState) + ap.l.Unlock() + } + + // Before error checking from the request call, we'd want to initialize a SendResponse to + // potentially return + sendResponse, newErr := NewSendResponse(resp, nil) + if newErr != nil { + return nil, newErr + } + + // Bubble back the api.Response as well for error checking/handling at the handler layer. + return sendResponse, err +} diff --git a/command/agent/cache/api_proxy_test.go b/command/agentproxyshared/cache/api_proxy_test.go similarity index 86% rename from command/agent/cache/api_proxy_test.go rename to command/agentproxyshared/cache/api_proxy_test.go index 4efc21a7642a..680b23eeb8f4 100644 --- a/command/agent/cache/api_proxy_test.go +++ b/command/agentproxyshared/cache/api_proxy_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cache @@ -12,16 +12,16 @@ import ( "testing" "time" - "github.com/hashicorp/vault/builtin/credential/userpass" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" - "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/credential/userpass" "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/useragent" + vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" ) const policyAdmin = ` @@ -35,8 +35,10 @@ func TestAPIProxy(t *testing.T) { defer cleanup() proxier, err := NewAPIProxy(&APIProxyConfig{ - Client: client, - Logger: logging.NewVaultLogger(hclog.Trace), + Client: client, + Logger: logging.NewVaultLogger(hclog.Trace), + UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, + UserAgentString: useragent.ProxyAPIProxyString(), }) if err != nil { t.Fatal(err) @@ -71,8 +73,10 @@ func TestAPIProxyNoCache(t *testing.T) { defer cleanup() proxier, err := NewAPIProxy(&APIProxyConfig{ - Client: client, - Logger: logging.NewVaultLogger(hclog.Trace), + Client: client, + Logger: logging.NewVaultLogger(hclog.Trace), + UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, + UserAgentString: useragent.ProxyAPIProxyString(), }) if err != nil { t.Fatal(err) @@ -109,8 +113,10 @@ func TestAPIProxy_queryParams(t *testing.T) { defer cleanup() proxier, err := NewAPIProxy(&APIProxyConfig{ - Client: client, - Logger: logging.NewVaultLogger(hclog.Trace), + Client: client, + Logger: logging.NewVaultLogger(hclog.Trace), + UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, + UserAgentString: useragent.ProxyAPIProxyString(), }) if err != nil { t.Fatal(err) @@ -180,15 +186,9 @@ func setupClusterAndAgentCommon(ctx context.Context, t *testing.T, coreConfig *v ctx = context.Background() } - // Handle sane defaults if coreConfig == nil { - coreConfig = &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: logging.NewVaultLogger(hclog.Trace), - } + coreConfig = &vault.CoreConfig{} } - // Always set up the userpass backend since we use that to generate an admin // token for the client that will make proxied requests to through the agent. if coreConfig.CredentialBackends == nil || coreConfig.CredentialBackends["userpass"] == nil { @@ -249,12 +249,14 @@ func setupClusterAndAgentCommon(ctx context.Context, t *testing.T, coreConfig *v t.Fatal(err) } - apiProxyLogger := logging.NewVaultLogger(hclog.Trace).Named("apiproxy") + apiProxyLogger := cluster.Logger.Named("apiproxy") // Create the API proxier apiProxy, err := NewAPIProxy(&APIProxyConfig{ - Client: clienToUse, - Logger: apiProxyLogger, + Client: clienToUse, + Logger: apiProxyLogger, + UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, + UserAgentString: useragent.ProxyAPIProxyString(), }) if err != nil { t.Fatal(err) @@ -265,15 +267,17 @@ func setupClusterAndAgentCommon(ctx context.Context, t *testing.T, coreConfig *v var leaseCache *LeaseCache if useCache { - cacheLogger := logging.NewVaultLogger(hclog.Trace).Named("cache") + cacheLogger := cluster.Logger.Named("cache") // Create the lease cache proxier and set its underlying proxier to // the API proxier. leaseCache, err = NewLeaseCache(&LeaseCacheConfig{ - Client: clienToUse, - BaseContext: ctx, - Proxier: apiProxy, - Logger: cacheLogger.Named("leasecache"), + Client: clienToUse, + BaseContext: ctx, + Proxier: apiProxy, + Logger: cacheLogger.Named("leasecache"), + CacheDynamicSecrets: true, + UserAgentToUse: "test", }) if err != nil { t.Fatal(err) @@ -281,9 +285,9 @@ func setupClusterAndAgentCommon(ctx context.Context, t *testing.T, coreConfig *v mux.Handle("/agent/v1/cache-clear", leaseCache.HandleCacheClear(ctx)) - mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, nil, true)) + mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, nil, false, false, nil, nil)) } else { - mux.Handle("/", ProxyHandler(ctx, apiProxyLogger, apiProxy, nil, true)) + mux.Handle("/", ProxyHandler(ctx, apiProxyLogger, apiProxy, nil, false, false, nil, nil)) } server := &http.Server{ diff --git a/command/agentproxyshared/cache/cache_test.go b/command/agentproxyshared/cache/cache_test.go new file mode 100644 index 000000000000..1ab5821f69b0 --- /dev/null +++ b/command/agentproxyshared/cache/cache_test.go @@ -0,0 +1,1207 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "context" + "encoding/json" + "fmt" + "io" + "math/rand" + "net" + "net/http" + "sync" + "testing" + "time" + + "github.com/go-test/deep" + "github.com/hashicorp/go-hclog" + kv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" + "github.com/hashicorp/vault/command/agentproxyshared/sink/mock" + "github.com/hashicorp/vault/helper/namespace" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func tokenRevocationValidation(t *testing.T, sampleSpace map[string]string, expected map[string]string, leaseCache *LeaseCache) { + t.Helper() + for val, valType := range sampleSpace { + index, err := leaseCache.db.Get(valType, val) + if err != nil && err != cachememdb.ErrCacheItemNotFound { + t.Fatal(err) + } + if expected[val] == "" && index != nil { + t.Fatalf("failed to evict index from the cache: type: %q, value: %q", valType, val) + } + if expected[val] != "" && index == nil { + t.Fatalf("evicted an undesired index from cache: type: %q, value: %q", valType, val) + } + } +} + +func TestCache_AutoAuthTokenStripping(t *testing.T) { + response1 := `{"data": {"id": "testid", "accessor": "testaccessor", "request": "lookup-self"}}` + response2 := `{"data": {"id": "testid", "accessor": "testaccessor", "request": "lookup"}}` + response3 := `{"auth": {"client_token": "testid", "accessor": "testaccessor"}}` + response4 := `{"auth": {"client_token": "testid", "accessor": "testaccessor"}}` + responses := []*SendResponse{ + newTestSendResponse(http.StatusOK, response1), + newTestSendResponse(http.StatusOK, response2), + newTestSendResponse(http.StatusOK, response3), + newTestSendResponse(http.StatusOK, response4), + } + + leaseCache := testNewLeaseCache(t, responses) + + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + cacheLogger := logging.NewVaultLogger(hclog.Trace).Named("cache") + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + + ctx := namespace.RootContext(nil) + + // Create a muxer and add paths relevant for the lease cache layer + mux := http.NewServeMux() + mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) + + mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, mock.NewSink("testid"), false, true, nil, nil)) + server := &http.Server{ + Handler: mux, + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 5 * time.Minute, + ErrorLog: cacheLogger.StandardLogger(nil), + } + go server.Serve(listener) + + testClient, err := client.Clone() + if err != nil { + t.Fatal(err) + } + + if err := testClient.SetAddress("http://" + listener.Addr().String()); err != nil { + t.Fatal(err) + } + + // Empty the token in the client. Auto-auth token should be put to use. + testClient.SetToken("") + secret, err := testClient.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + if secret.Data["id"] != nil || secret.Data["accessor"] != nil || secret.Data["request"].(string) != "lookup-self" { + t.Fatalf("failed to strip off auto-auth token on lookup-self") + } + + secret, err = testClient.Auth().Token().Lookup("") + if err != nil { + t.Fatal(err) + } + if secret.Data["id"] != nil || secret.Data["accessor"] != nil || secret.Data["request"].(string) != "lookup" { + t.Fatalf("failed to strip off auto-auth token on lookup") + } + + secret, err = testClient.Auth().Token().RenewSelf(1) + if err != nil { + t.Fatal(err) + } + if secret.Auth == nil { + secretJson, _ := json.Marshal(secret) + t.Fatalf("Expected secret to have Auth but was %s", secretJson) + } + if secret.Auth.ClientToken != "" || secret.Auth.Accessor != "" { + t.Fatalf("failed to strip off auto-auth token on renew-self") + } + + secret, err = testClient.Auth().Token().Renew("testid", 1) + if err != nil { + t.Fatal(err) + } + if secret.Auth == nil { + secretJson, _ := json.Marshal(secret) + t.Fatalf("Expected secret to have Auth but was %s", secretJson) + } + if secret.Auth.ClientToken != "" || secret.Auth.Accessor != "" { + t.Fatalf("failed to strip off auto-auth token on renew") + } +} + +func TestCache_AutoAuthClientTokenProxyStripping(t *testing.T) { + leaseCache := &mockTokenVerifierProxier{} + dummyToken := "DUMMY" + realToken := "testid" + + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + cacheLogger := logging.NewVaultLogger(hclog.Trace).Named("cache") + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + + ctx := namespace.RootContext(nil) + + // Create a muxer and add paths relevant for the lease cache layer + mux := http.NewServeMux() + + mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, mock.NewSink(realToken), true, true, nil, nil)) + server := &http.Server{ + Handler: mux, + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 5 * time.Minute, + ErrorLog: cacheLogger.StandardLogger(nil), + } + go server.Serve(listener) + + testClient, err := client.Clone() + if err != nil { + t.Fatal(err) + } + + if err := testClient.SetAddress("http://" + listener.Addr().String()); err != nil { + t.Fatal(err) + } + + // Empty the token in the client. Auto-auth token should be put to use. + testClient.SetToken(dummyToken) + _, err = testClient.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + if leaseCache.currentToken != realToken { + t.Fatalf("failed to use real token from auto-auth") + } +} + +func TestCache_ConcurrentRequests(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + cleanup, _, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + wg := &sync.WaitGroup{} + for i := 0; i < 100; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + key := fmt.Sprintf("kv/foo/%d_%d", i, rand.Int()) + _, err := testClient.Logical().Write(key, map[string]interface{}{ + "key": key, + }) + if err != nil { + t.Fatal(err) + } + secret, err := testClient.Logical().Read(key) + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Data["key"].(string) != key { + t.Fatal(fmt.Sprintf("failed to read value for key: %q", key)) + } + }(i) + + } + wg.Wait() +} + +func TestCache_TokenRevocations_RevokeOrphan(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + sampleSpace := make(map[string]string) + + cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + token1 := testClient.Token() + sampleSpace[token1] = "token" + + // Mount the kv backend + err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Create a secret in the backend + _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Read the secret and create a lease + leaseResp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease1 := leaseResp.LeaseID + sampleSpace[lease1] = "lease" + + resp, err := testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token2 := resp.Auth.ClientToken + sampleSpace[token2] = "token" + + testClient.SetToken(token2) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease2 := leaseResp.LeaseID + sampleSpace[lease2] = "lease" + + resp, err = testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token3 := resp.Auth.ClientToken + sampleSpace[token3] = "token" + + testClient.SetToken(token3) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease3 := leaseResp.LeaseID + sampleSpace[lease3] = "lease" + + expected := make(map[string]string) + for k, v := range sampleSpace { + expected[k] = v + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) + + // Revoke-orphan the intermediate token. This should result in its own + // eviction and evictions of the revoked token's leases. All other things + // including the child tokens and leases of the child tokens should be + // untouched. + testClient.SetToken(token2) + err = testClient.Auth().Token().RevokeOrphan(token2) + if err != nil { + t.Fatal(err) + } + time.Sleep(1 * time.Second) + + expected = map[string]string{ + token1: "token", + lease1: "lease", + token3: "token", + lease3: "lease", + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) +} + +func TestCache_TokenRevocations_LeafLevelToken(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + sampleSpace := make(map[string]string) + + cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + token1 := testClient.Token() + sampleSpace[token1] = "token" + + // Mount the kv backend + err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Create a secret in the backend + _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Read the secret and create a lease + leaseResp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease1 := leaseResp.LeaseID + sampleSpace[lease1] = "lease" + + resp, err := testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token2 := resp.Auth.ClientToken + sampleSpace[token2] = "token" + + testClient.SetToken(token2) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease2 := leaseResp.LeaseID + sampleSpace[lease2] = "lease" + + resp, err = testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token3 := resp.Auth.ClientToken + sampleSpace[token3] = "token" + + testClient.SetToken(token3) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease3 := leaseResp.LeaseID + sampleSpace[lease3] = "lease" + + expected := make(map[string]string) + for k, v := range sampleSpace { + expected[k] = v + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) + + // Revoke the lef token. This should evict all the leases belonging to this + // token, evict entries for all the child tokens and their respective + // leases. + testClient.SetToken(token3) + err = testClient.Auth().Token().RevokeSelf("") + if err != nil { + t.Fatal(err) + } + time.Sleep(1 * time.Second) + + expected = map[string]string{ + token1: "token", + lease1: "lease", + token2: "token", + lease2: "lease", + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) +} + +func TestCache_TokenRevocations_IntermediateLevelToken(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + sampleSpace := make(map[string]string) + + cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + token1 := testClient.Token() + sampleSpace[token1] = "token" + + // Mount the kv backend + err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Create a secret in the backend + _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Read the secret and create a lease + leaseResp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease1 := leaseResp.LeaseID + sampleSpace[lease1] = "lease" + + resp, err := testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token2 := resp.Auth.ClientToken + sampleSpace[token2] = "token" + + testClient.SetToken(token2) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease2 := leaseResp.LeaseID + sampleSpace[lease2] = "lease" + + resp, err = testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token3 := resp.Auth.ClientToken + sampleSpace[token3] = "token" + + testClient.SetToken(token3) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease3 := leaseResp.LeaseID + sampleSpace[lease3] = "lease" + + expected := make(map[string]string) + for k, v := range sampleSpace { + expected[k] = v + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) + + // Revoke the second level token. This should evict all the leases + // belonging to this token, evict entries for all the child tokens and + // their respective leases. + testClient.SetToken(token2) + err = testClient.Auth().Token().RevokeSelf("") + if err != nil { + t.Fatal(err) + } + time.Sleep(1 * time.Second) + + expected = map[string]string{ + token1: "token", + lease1: "lease", + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) +} + +func TestCache_TokenRevocations_TopLevelToken(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + sampleSpace := make(map[string]string) + + cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + token1 := testClient.Token() + sampleSpace[token1] = "token" + + // Mount the kv backend + err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Create a secret in the backend + _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Read the secret and create a lease + leaseResp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease1 := leaseResp.LeaseID + sampleSpace[lease1] = "lease" + + resp, err := testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token2 := resp.Auth.ClientToken + sampleSpace[token2] = "token" + + testClient.SetToken(token2) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease2 := leaseResp.LeaseID + sampleSpace[lease2] = "lease" + + resp, err = testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token3 := resp.Auth.ClientToken + sampleSpace[token3] = "token" + + testClient.SetToken(token3) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease3 := leaseResp.LeaseID + sampleSpace[lease3] = "lease" + + expected := make(map[string]string) + for k, v := range sampleSpace { + expected[k] = v + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) + + // Revoke the top level token. This should evict all the leases belonging + // to this token, evict entries for all the child tokens and their + // respective leases. + testClient.SetToken(token1) + err = testClient.Auth().Token().RevokeSelf("") + if err != nil { + t.Fatal(err) + } + time.Sleep(1 * time.Second) + + expected = make(map[string]string) + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) +} + +func TestCache_TokenRevocations_Shutdown(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + sampleSpace := make(map[string]string) + + ctx, rootCancelFunc := context.WithCancel(namespace.RootContext(nil)) + cleanup, _, testClient, leaseCache := setupClusterAndAgent(ctx, t, coreConfig) + defer cleanup() + + token1 := testClient.Token() + sampleSpace[token1] = "token" + + // Mount the kv backend + err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Create a secret in the backend + _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Read the secret and create a lease + leaseResp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease1 := leaseResp.LeaseID + sampleSpace[lease1] = "lease" + + resp, err := testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token2 := resp.Auth.ClientToken + sampleSpace[token2] = "token" + + testClient.SetToken(token2) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease2 := leaseResp.LeaseID + sampleSpace[lease2] = "lease" + + resp, err = testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token3 := resp.Auth.ClientToken + sampleSpace[token3] = "token" + + testClient.SetToken(token3) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease3 := leaseResp.LeaseID + sampleSpace[lease3] = "lease" + + expected := make(map[string]string) + for k, v := range sampleSpace { + expected[k] = v + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) + + rootCancelFunc() + time.Sleep(1 * time.Second) + + // Ensure that all the entries are now gone + expected = make(map[string]string) + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) +} + +func TestCache_TokenRevocations_BaseContextCancellation(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + sampleSpace := make(map[string]string) + + cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + token1 := testClient.Token() + sampleSpace[token1] = "token" + + // Mount the kv backend + err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Create a secret in the backend + _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Read the secret and create a lease + leaseResp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease1 := leaseResp.LeaseID + sampleSpace[lease1] = "lease" + + resp, err := testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token2 := resp.Auth.ClientToken + sampleSpace[token2] = "token" + + testClient.SetToken(token2) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease2 := leaseResp.LeaseID + sampleSpace[lease2] = "lease" + + resp, err = testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token3 := resp.Auth.ClientToken + sampleSpace[token3] = "token" + + testClient.SetToken(token3) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease3 := leaseResp.LeaseID + sampleSpace[lease3] = "lease" + + expected := make(map[string]string) + for k, v := range sampleSpace { + expected[k] = v + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) + + // Cancel the base context of the lease cache. This should trigger + // evictions of all the entries from the cache. + leaseCache.baseCtxInfo.CancelFunc() + time.Sleep(1 * time.Second) + + // Ensure that all the entries are now gone + expected = make(map[string]string) + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) +} + +func TestCache_NonCacheable(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": kv.Factory, + }, + } + + cleanup, _, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + // Query mounts first + origMounts, err := testClient.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + // Mount a kv backend + if err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + Options: map[string]string{ + "version": "2", + }, + }); err != nil { + t.Fatal(err) + } + + // Query mounts again + newMounts, err := testClient.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + if diff := deep.Equal(origMounts, newMounts); diff == nil { + t.Logf("response #1: %#v", origMounts) + t.Logf("response #2: %#v", newMounts) + t.Fatal("expected requests to be not cached") + } + + // Query a non-existing mount, expect an error from api.Response + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + r := testClient.NewRequest("GET", "/v1/kv-invalid") + + apiResp, err := testClient.RawRequestWithContext(ctx, r) + if apiResp != nil { + defer apiResp.Body.Close() + } + if apiResp.Error() == nil || (apiResp != nil && apiResp.StatusCode != 404) { + t.Fatalf("expected an error response and a 404 from requesting an invalid path, got: %#v", apiResp) + } + if err == nil { + t.Fatal("expected an error from requesting an invalid path") + } +} + +func TestCache_Caching_AuthResponse(t *testing.T) { + cleanup, _, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, nil) + defer cleanup() + + resp, err := testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token := resp.Auth.ClientToken + testClient.SetToken(token) + + authTokeCreateReq := func(t *testing.T, policies map[string]interface{}) *api.Secret { + resp, err := testClient.Logical().Write("auth/token/create", policies) + if err != nil { + t.Fatal(err) + } + if resp.Auth == nil || resp.Auth.ClientToken == "" { + t.Fatalf("expected a valid client token in the response, got = %#v", resp) + } + + return resp + } + + // Test on auth response by creating a child token + { + proxiedResp := authTokeCreateReq(t, map[string]interface{}{ + "policies": "default", + }) + + cachedResp := authTokeCreateReq(t, map[string]interface{}{ + "policies": "default", + }) + + if diff := deep.Equal(proxiedResp.Auth.ClientToken, cachedResp.Auth.ClientToken); diff != nil { + t.Fatal(diff) + } + } + + // Test on *non-renewable* auth response by creating a child root token + { + proxiedResp := authTokeCreateReq(t, nil) + + cachedResp := authTokeCreateReq(t, nil) + + if diff := deep.Equal(proxiedResp.Auth.ClientToken, cachedResp.Auth.ClientToken); diff != nil { + t.Fatal(diff) + } + } +} + +func TestCache_Caching_LeaseResponse(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + cleanup, client, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + err := client.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Test proxy by issuing two different requests + { + // Write data to the lease-kv backend + _, err := testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + _, err = testClient.Logical().Write("kv/foobar", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + firstResp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + + secondResp, err := testClient.Logical().Read("kv/foobar") + if err != nil { + t.Fatal(err) + } + + if diff := deep.Equal(firstResp, secondResp); diff == nil { + t.Logf("response: %#v", firstResp) + t.Fatal("expected proxied responses, got cached response on second request") + } + } + + // Test caching behavior by issue the same request twice + { + _, err := testClient.Logical().Write("kv/baz", map[string]interface{}{ + "value": "foo", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + proxiedResp, err := testClient.Logical().Read("kv/baz") + if err != nil { + t.Fatal(err) + } + + cachedResp, err := testClient.Logical().Read("kv/baz") + if err != nil { + t.Fatal(err) + } + + if diff := deep.Equal(proxiedResp, cachedResp); diff != nil { + t.Fatal(diff) + } + } +} + +func TestCache_Caching_CacheClear(t *testing.T) { + t.Run("request_path", func(t *testing.T) { + testCachingCacheClearCommon(t, "request_path") + }) + + t.Run("lease", func(t *testing.T) { + testCachingCacheClearCommon(t, "lease") + }) + + t.Run("token", func(t *testing.T) { + testCachingCacheClearCommon(t, "token") + }) + + t.Run("token_accessor", func(t *testing.T) { + testCachingCacheClearCommon(t, "token_accessor") + }) + + t.Run("all", func(t *testing.T) { + testCachingCacheClearCommon(t, "all") + }) +} + +func testCachingCacheClearCommon(t *testing.T, clearType string) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + cleanup, client, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + err := client.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Write data to the lease-kv backend + _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Proxy this request, agent should cache the response + resp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + gotLeaseID := resp.LeaseID + + // Verify the entry exists + idx, err := leaseCache.db.Get(cachememdb.IndexNameLease, gotLeaseID) + if err != nil { + t.Fatal(err) + } + + if idx == nil { + t.Fatalf("expected cached entry, got: %v", idx) + } + + data := map[string]interface{}{ + "type": clearType, + } + + // We need to set the value here depending on what we're trying to test. + // Some values are be static, but others are dynamically generated at runtime. + switch clearType { + case "request_path": + data["value"] = "/v1/kv/foo" + case "lease": + data["value"] = resp.LeaseID + case "token": + data["value"] = testClient.Token() + case "token_accessor": + lookupResp, err := client.Auth().Token().Lookup(testClient.Token()) + if err != nil { + t.Fatal(err) + } + data["value"] = lookupResp.Data["accessor"] + case "all": + default: + t.Fatalf("invalid type provided: %v", clearType) + } + + r := testClient.NewRequest("PUT", consts.AgentPathCacheClear) + if err := r.SetJSONBody(data); err != nil { + t.Fatal(err) + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + apiResp, err := testClient.RawRequestWithContext(ctx, r) + if apiResp != nil { + defer apiResp.Body.Close() + } + if apiResp != nil && apiResp.StatusCode == 404 { + _, parseErr := api.ParseSecret(apiResp.Body) + switch parseErr { + case nil: + case io.EOF: + default: + t.Fatal(err) + } + } + if err != nil { + t.Fatal(err) + } + + time.Sleep(100 * time.Millisecond) + + // Verify the entry is cleared + idx, err = leaseCache.db.Get(cachememdb.IndexNameLease, gotLeaseID) + if err != cachememdb.ErrCacheItemNotFound { + t.Fatal("expected entry to be nil, got", err) + } +} + +func TestCache_AuthTokenCreateOrphan(t *testing.T) { + t.Run("create", func(t *testing.T) { + t.Run("managed", func(t *testing.T) { + cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, nil) + defer cleanup() + + reqOpts := &api.TokenCreateRequest{ + Policies: []string{"default"}, + NoParent: true, + } + resp, err := testClient.Auth().Token().Create(reqOpts) + if err != nil { + t.Fatal(err) + } + token := resp.Auth.ClientToken + + idx, err := leaseCache.db.Get(cachememdb.IndexNameToken, token) + if err != nil { + t.Fatal(err) + } + if idx == nil { + t.Fatalf("expected entry to be non-nil, got: %#v", idx) + } + }) + + t.Run("non-managed", func(t *testing.T) { + cleanup, clusterClient, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, nil) + defer cleanup() + + reqOpts := &api.TokenCreateRequest{ + Policies: []string{"default"}, + NoParent: true, + } + + // Use the test client but set the token to one that's not managed by agent + testClient.SetToken(clusterClient.Token()) + + resp, err := testClient.Auth().Token().Create(reqOpts) + if err != nil { + t.Fatal(err) + } + token := resp.Auth.ClientToken + + idx, err := leaseCache.db.Get(cachememdb.IndexNameToken, token) + if err != nil { + t.Fatal(err) + } + if idx == nil { + t.Fatalf("expected entry to be non-nil, got: %#v", idx) + } + }) + }) + + t.Run("create-orphan", func(t *testing.T) { + t.Run("managed", func(t *testing.T) { + cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, nil) + defer cleanup() + + reqOpts := &api.TokenCreateRequest{ + Policies: []string{"default"}, + } + resp, err := testClient.Auth().Token().CreateOrphan(reqOpts) + if err != nil { + t.Fatal(err) + } + token := resp.Auth.ClientToken + + idx, err := leaseCache.db.Get(cachememdb.IndexNameToken, token) + if err != nil { + t.Fatal(err) + } + if idx == nil { + t.Fatalf("expected entry to be non-nil, got: %#v", idx) + } + }) + + t.Run("non-managed", func(t *testing.T) { + cleanup, clusterClient, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, nil) + defer cleanup() + + reqOpts := &api.TokenCreateRequest{ + Policies: []string{"default"}, + } + + // Use the test client but set the token to one that's not managed by agent + testClient.SetToken(clusterClient.Token()) + + resp, err := testClient.Auth().Token().CreateOrphan(reqOpts) + if err != nil { + t.Fatal(err) + } + token := resp.Auth.ClientToken + + idx, err := leaseCache.db.Get(cachememdb.IndexNameToken, token) + if err != nil { + t.Fatal(err) + } + if idx == nil { + t.Fatalf("expected entry to be non-nil, got: %#v", idx) + } + }) + }) +} diff --git a/command/agent/cache/cacheboltdb/bolt.go b/command/agentproxyshared/cache/cacheboltdb/bolt.go similarity index 95% rename from command/agent/cache/cacheboltdb/bolt.go rename to command/agentproxyshared/cache/cacheboltdb/bolt.go index 434b4116542a..05d5ad93637a 100644 --- a/command/agent/cache/cacheboltdb/bolt.go +++ b/command/agentproxyshared/cache/cacheboltdb/bolt.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cacheboltdb @@ -12,10 +12,10 @@ import ( "time" "github.com/golang/protobuf/proto" + bolt "github.com/hashicorp-forge/bbolt" "github.com/hashicorp/go-hclog" wrapping "github.com/hashicorp/go-kms-wrapping/v2" "github.com/hashicorp/go-multierror" - bolt "go.etcd.io/bbolt" ) const ( @@ -39,6 +39,14 @@ const ( // TokenType - Bucket/type for auto-auth tokens TokenType = "token" + // StaticSecretType - Bucket/type for static secrets + StaticSecretType = "static-secret" + + // TokenCapabilitiesType - Bucket/type for the token capabilities that + // are used to govern access to static secrets. These will be updated + // periodically to ensure that access to the cached secret remains. + TokenCapabilitiesType = "token-capabilities" + // LeaseType - v2 Bucket/type for auth AND secret leases. // // This bucket stores keys in the same order they were created using @@ -157,7 +165,7 @@ func createV1BoltSchema(tx *bolt.Tx) error { func createV2BoltSchema(tx *bolt.Tx) error { // Create the buckets for tokens and leases. - for _, bucket := range []string{TokenType, LeaseType, lookupType} { + for _, bucket := range []string{TokenType, LeaseType, lookupType, StaticSecretType, TokenCapabilitiesType} { if _, err := tx.CreateBucketIfNotExists([]byte(bucket)); err != nil { return fmt.Errorf("failed to create %s bucket: %w", bucket, err) } @@ -259,6 +267,10 @@ func (b *BoltStorage) Set(ctx context.Context, id string, plaintext []byte, inde if err := meta.Put([]byte(AutoAuthToken), protoBlob); err != nil { return fmt.Errorf("failed to set latest auto-auth token: %w", err) } + case StaticSecretType: + key = []byte(id) + case TokenCapabilitiesType: + key = []byte(id) default: return fmt.Errorf("called Set for unsupported type %q", indexType) } @@ -411,7 +423,7 @@ func (b *BoltStorage) Close() error { // the schema/layout func (b *BoltStorage) Clear() error { return b.db.Update(func(tx *bolt.Tx) error { - for _, name := range []string{TokenType, LeaseType, lookupType} { + for _, name := range []string{TokenType, LeaseType, lookupType, StaticSecretType, TokenCapabilitiesType} { b.logger.Trace("deleting bolt bucket", "name", name) if err := tx.DeleteBucket([]byte(name)); err != nil { return err diff --git a/command/agent/cache/cacheboltdb/bolt_test.go b/command/agentproxyshared/cache/cacheboltdb/bolt_test.go similarity index 87% rename from command/agent/cache/cacheboltdb/bolt_test.go rename to command/agentproxyshared/cache/cacheboltdb/bolt_test.go index c5c057d4187f..06a31780b5ad 100644 --- a/command/agent/cache/cacheboltdb/bolt_test.go +++ b/command/agentproxyshared/cache/cacheboltdb/bolt_test.go @@ -1,12 +1,11 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cacheboltdb import ( "context" "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -15,11 +14,11 @@ import ( "time" "github.com/golang/protobuf/proto" + bolt "github.com/hashicorp-forge/bbolt" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agent/cache/keymanager" + "github.com/hashicorp/vault/command/agentproxyshared/cache/keymanager" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - bolt "go.etcd.io/bbolt" ) func getTestKeyManager(t *testing.T) keymanager.KeyManager { @@ -34,7 +33,7 @@ func getTestKeyManager(t *testing.T) keymanager.KeyManager { func TestBolt_SetGet(t *testing.T) { ctx := context.Background() - path, err := ioutil.TempDir("", "bolt-test") + path, err := os.MkdirTemp("", "bolt-test") require.NoError(t, err) defer os.RemoveAll(path) @@ -60,7 +59,7 @@ func TestBolt_SetGet(t *testing.T) { func TestBoltDelete(t *testing.T) { ctx := context.Background() - path, err := ioutil.TempDir("", "bolt-test") + path, err := os.MkdirTemp("", "bolt-test") require.NoError(t, err) defer os.RemoveAll(path) @@ -92,7 +91,7 @@ func TestBoltDelete(t *testing.T) { func TestBoltClear(t *testing.T) { ctx := context.Background() - path, err := ioutil.TempDir("", "bolt-test") + path, err := os.MkdirTemp("", "bolt-test") require.NoError(t, err) defer os.RemoveAll(path) @@ -126,6 +125,20 @@ func TestBoltClear(t *testing.T) { require.Len(t, tokens, 1) assert.Equal(t, []byte("hello"), tokens[0]) + err = b.Set(ctx, "static-secret", []byte("hello"), StaticSecretType) + require.NoError(t, err) + staticSecrets, err := b.GetByType(ctx, StaticSecretType) + require.NoError(t, err) + require.Len(t, staticSecrets, 1) + assert.Equal(t, []byte("hello"), staticSecrets[0]) + + err = b.Set(ctx, "capabilities-index", []byte("hello"), TokenCapabilitiesType) + require.NoError(t, err) + capabilities, err := b.GetByType(ctx, TokenCapabilitiesType) + require.NoError(t, err) + require.Len(t, capabilities, 1) + assert.Equal(t, []byte("hello"), capabilities[0]) + // Clear the bolt db, and check that it's indeed clear err = b.Clear() require.NoError(t, err) @@ -135,12 +148,18 @@ func TestBoltClear(t *testing.T) { tokens, err = b.GetByType(ctx, TokenType) require.NoError(t, err) assert.Len(t, tokens, 0) + staticSecrets, err = b.GetByType(ctx, StaticSecretType) + require.NoError(t, err) + require.Len(t, staticSecrets, 0) + capabilities, err = b.GetByType(ctx, TokenCapabilitiesType) + require.NoError(t, err) + require.Len(t, capabilities, 0) } func TestBoltSetAutoAuthToken(t *testing.T) { ctx := context.Background() - path, err := ioutil.TempDir("", "bolt-test") + path, err := os.MkdirTemp("", "bolt-test") require.NoError(t, err) defer os.RemoveAll(path) @@ -210,11 +229,11 @@ func TestDBFileExists(t *testing.T) { var tmpPath string var err error if tc.mkDir { - tmpPath, err = ioutil.TempDir("", "test-db-path") + tmpPath, err = os.MkdirTemp("", "test-db-path") require.NoError(t, err) } if tc.createFile { - err = ioutil.WriteFile(path.Join(tmpPath, DatabaseFileName), []byte("test-db-path"), 0o600) + err = os.WriteFile(path.Join(tmpPath, DatabaseFileName), []byte("test-db-path"), 0o600) require.NoError(t, err) } exists, err := DBFileExists(tmpPath) @@ -244,7 +263,7 @@ func Test_SetGetRetrievalToken(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - path, err := ioutil.TempDir("", "bolt-test") + path, err := os.MkdirTemp("", "bolt-test") require.NoError(t, err) defer os.RemoveAll(path) @@ -270,7 +289,7 @@ func Test_SetGetRetrievalToken(t *testing.T) { func TestBolt_MigrateFromV1ToV2Schema(t *testing.T) { ctx := context.Background() - path, err := ioutil.TempDir("", "bolt-test") + path, err := os.MkdirTemp("", "bolt-test") require.NoError(t, err) defer os.RemoveAll(path) @@ -342,7 +361,7 @@ func TestBolt_MigrateFromV1ToV2Schema(t *testing.T) { func TestBolt_MigrateFromInvalidToV2Schema(t *testing.T) { ctx := context.Background() - path, err := ioutil.TempDir("", "bolt-test") + path, err := os.MkdirTemp("", "bolt-test") require.NoError(t, err) defer os.RemoveAll(path) diff --git a/command/agentproxyshared/cache/cachememdb/cache_memdb.go b/command/agentproxyshared/cache/cachememdb/cache_memdb.go new file mode 100644 index 000000000000..9746374593ec --- /dev/null +++ b/command/agentproxyshared/cache/cachememdb/cache_memdb.go @@ -0,0 +1,328 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cachememdb + +import ( + "errors" + "fmt" + "sync/atomic" + + memdb "github.com/hashicorp/go-memdb" +) + +const ( + tableNameIndexer = "indexer" + tableNameCapabilitiesIndexer = "capabilities-indexer" +) + +// ErrCacheItemNotFound is returned on Get and GetCapabilitiesIndex calls +// when the entry is not found in the cache. +var ErrCacheItemNotFound = errors.New("cache item not found") + +// CacheMemDB is the underlying cache database for storing indexes. +type CacheMemDB struct { + db *atomic.Value +} + +// New creates a new instance of CacheMemDB. +func New() (*CacheMemDB, error) { + db, err := newDB() + if err != nil { + return nil, err + } + + c := &CacheMemDB{ + db: new(atomic.Value), + } + c.db.Store(db) + + return c, nil +} + +func newDB() (*memdb.MemDB, error) { + cacheSchema := &memdb.DBSchema{ + Tables: map[string]*memdb.TableSchema{ + tableNameIndexer: { + Name: tableNameIndexer, + Indexes: map[string]*memdb.IndexSchema{ + // This index enables fetching the cached item based on the + // identifier of the index. + IndexNameID: { + Name: IndexNameID, + Unique: true, + Indexer: &memdb.StringFieldIndex{ + Field: "ID", + }, + }, + // This index enables fetching all the entries in cache for + // a given request path, in a given namespace. + IndexNameRequestPath: { + Name: IndexNameRequestPath, + Unique: false, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "Namespace", + }, + &memdb.StringFieldIndex{ + Field: "RequestPath", + }, + }, + }, + }, + // This index enables fetching all the entries in cache + // belonging to the leases of a given token. + IndexNameLeaseToken: { + Name: IndexNameLeaseToken, + Unique: false, + AllowMissing: true, + Indexer: &memdb.StringFieldIndex{ + Field: "LeaseToken", + }, + }, + // This index enables fetching all the entries in cache + // that are tied to the given token, regardless of the + // entries belonging to the token or belonging to the + // lease. + IndexNameToken: { + Name: IndexNameToken, + Unique: true, + AllowMissing: true, + Indexer: &memdb.StringFieldIndex{ + Field: "Token", + }, + }, + // This index enables fetching all the entries in cache for + // the given parent token. + IndexNameTokenParent: { + Name: IndexNameTokenParent, + Unique: false, + AllowMissing: true, + Indexer: &memdb.StringFieldIndex{ + Field: "TokenParent", + }, + }, + // This index enables fetching all the entries in cache for + // the given accessor. + IndexNameTokenAccessor: { + Name: IndexNameTokenAccessor, + Unique: true, + AllowMissing: true, + Indexer: &memdb.StringFieldIndex{ + Field: "TokenAccessor", + }, + }, + // This index enables fetching all the entries in cache for + // the given lease identifier. + IndexNameLease: { + Name: IndexNameLease, + Unique: true, + AllowMissing: true, + Indexer: &memdb.StringFieldIndex{ + Field: "Lease", + }, + }, + }, + }, + tableNameCapabilitiesIndexer: { + Name: tableNameCapabilitiesIndexer, + Indexes: map[string]*memdb.IndexSchema{ + // This index enables fetching the cached item based on the + // identifier of the index. + CapabilitiesIndexNameID: { + Name: CapabilitiesIndexNameID, + Unique: true, + Indexer: &memdb.StringFieldIndex{ + Field: "ID", + }, + }, + }, + }, + }, + } + + db, err := memdb.NewMemDB(cacheSchema) + if err != nil { + return nil, err + } + return db, nil +} + +// Get returns the index based on the indexer and the index values provided. +// If the capabilities index isn't present, it will return nil, ErrCacheItemNotFound +func (c *CacheMemDB) Get(indexName string, indexValues ...interface{}) (*Index, error) { + if !validIndexName(indexName) { + return nil, fmt.Errorf("invalid index name %q", indexName) + } + + txn := c.db.Load().(*memdb.MemDB).Txn(false) + + raw, err := txn.First(tableNameIndexer, indexName, indexValues...) + if err != nil { + return nil, err + } + + if raw == nil { + return nil, ErrCacheItemNotFound + } + + index, ok := raw.(*Index) + if !ok { + return nil, errors.New("unable to parse index value from the cache") + } + + return index, nil +} + +// Set stores the index into the cache. +func (c *CacheMemDB) Set(index *Index) error { + if index == nil { + return errors.New("nil index provided") + } + + txn := c.db.Load().(*memdb.MemDB).Txn(true) + defer txn.Abort() + + if err := txn.Insert(tableNameIndexer, index); err != nil { + return fmt.Errorf("unable to insert index into cache: %v", err) + } + + txn.Commit() + + return nil +} + +// GetCapabilitiesIndex returns the CapabilitiesIndex from the cache. +// If the capabilities index isn't present, it will return nil, ErrCacheItemNotFound +func (c *CacheMemDB) GetCapabilitiesIndex(indexName string, indexValues ...interface{}) (*CapabilitiesIndex, error) { + if !validCapabilitiesIndexName(indexName) { + return nil, fmt.Errorf("invalid index name %q", indexName) + } + + txn := c.db.Load().(*memdb.MemDB).Txn(false) + + raw, err := txn.First(tableNameCapabilitiesIndexer, indexName, indexValues...) + if err != nil { + return nil, err + } + + if raw == nil { + return nil, ErrCacheItemNotFound + } + + index, ok := raw.(*CapabilitiesIndex) + if !ok { + return nil, errors.New("unable to parse capabilities index value from the cache") + } + + return index, nil +} + +// SetCapabilitiesIndex stores the CapabilitiesIndex index into the cache. +func (c *CacheMemDB) SetCapabilitiesIndex(index *CapabilitiesIndex) error { + if index == nil { + return errors.New("nil capabilities index provided") + } + + txn := c.db.Load().(*memdb.MemDB).Txn(true) + defer txn.Abort() + + if err := txn.Insert(tableNameCapabilitiesIndexer, index); err != nil { + return fmt.Errorf("unable to insert index into cache: %v", err) + } + + txn.Commit() + + return nil +} + +// EvictCapabilitiesIndex removes a capabilities index from the cache based on index name and value. +func (c *CacheMemDB) EvictCapabilitiesIndex(indexName string, indexValues ...interface{}) error { + index, err := c.GetCapabilitiesIndex(indexName, indexValues...) + if errors.Is(err, ErrCacheItemNotFound) { + return nil + } + if err != nil { + return fmt.Errorf("unable to fetch index on cache deletion: %v", err) + } + + txn := c.db.Load().(*memdb.MemDB).Txn(true) + defer txn.Abort() + + if err := txn.Delete(tableNameCapabilitiesIndexer, index); err != nil { + return fmt.Errorf("unable to delete index from cache: %v", err) + } + + txn.Commit() + + return nil +} + +// GetByPrefix returns all the cached indexes based on the index name and the +// value prefix. +func (c *CacheMemDB) GetByPrefix(indexName string, indexValues ...interface{}) ([]*Index, error) { + if !validIndexName(indexName) { + return nil, fmt.Errorf("invalid index name %q", indexName) + } + + indexName = indexName + "_prefix" + + // Get all the objects + txn := c.db.Load().(*memdb.MemDB).Txn(false) + + iter, err := txn.Get(tableNameIndexer, indexName, indexValues...) + if err != nil { + return nil, err + } + + var indexes []*Index + for { + obj := iter.Next() + if obj == nil { + break + } + index, ok := obj.(*Index) + if !ok { + return nil, fmt.Errorf("failed to cast cached index") + } + + indexes = append(indexes, index) + } + + return indexes, nil +} + +// Evict removes an index from the cache based on index name and value. +func (c *CacheMemDB) Evict(indexName string, indexValues ...interface{}) error { + index, err := c.Get(indexName, indexValues...) + if errors.Is(err, ErrCacheItemNotFound) { + return nil + } + if err != nil { + return fmt.Errorf("unable to fetch index on cache deletion: %v", err) + } + + txn := c.db.Load().(*memdb.MemDB).Txn(true) + defer txn.Abort() + + if err := txn.Delete(tableNameIndexer, index); err != nil { + return fmt.Errorf("unable to delete index from cache: %v", err) + } + + txn.Commit() + + return nil +} + +// Flush resets the underlying cache object. +func (c *CacheMemDB) Flush() error { + newDB, err := newDB() + if err != nil { + return err + } + + c.db.Store(newDB) + + return nil +} diff --git a/command/agentproxyshared/cache/cachememdb/cache_memdb_test.go b/command/agentproxyshared/cache/cachememdb/cache_memdb_test.go new file mode 100644 index 000000000000..245f066281eb --- /dev/null +++ b/command/agentproxyshared/cache/cachememdb/cache_memdb_test.go @@ -0,0 +1,485 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cachememdb + +import ( + "context" + "testing" + + "github.com/go-test/deep" + "github.com/stretchr/testify/require" +) + +func testContextInfo() *ContextInfo { + ctx, cancelFunc := context.WithCancel(context.Background()) + + return &ContextInfo{ + Ctx: ctx, + CancelFunc: cancelFunc, + } +} + +func TestNew(t *testing.T) { + _, err := New() + if err != nil { + t.Fatal(err) + } +} + +func TestCacheMemDB_Get(t *testing.T) { + cache, err := New() + if err != nil { + t.Fatal(err) + } + + // Test invalid index name + _, err = cache.Get("foo", "bar") + if err == nil { + t.Fatal("expected error") + } + + // Test on empty cache + index, err := cache.Get(IndexNameID, "foo") + if err != ErrCacheItemNotFound { + t.Fatal("expected cache item to be not found", err) + } + if index != nil { + t.Fatalf("expected nil index, got: %v", index) + } + + // Populate cache + in := &Index{ + ID: "test_id", + Namespace: "test_ns/", + RequestPath: "/v1/request/path", + Token: "test_token", + TokenAccessor: "test_accessor", + Lease: "test_lease", + Response: []byte("hello world"), + Tokens: map[string]struct{}{}, + } + + if err := cache.Set(in); err != nil { + t.Fatal(err) + } + + testCases := []struct { + name string + indexName string + indexValues []interface{} + }{ + { + "by_index_id", + "id", + []interface{}{in.ID}, + }, + { + "by_request_path", + "request_path", + []interface{}{in.Namespace, in.RequestPath}, + }, + { + "by_lease", + "lease", + []interface{}{in.Lease}, + }, + { + "by_token", + "token", + []interface{}{in.Token}, + }, + { + "by_token_accessor", + "token_accessor", + []interface{}{in.TokenAccessor}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + out, err := cache.Get(tc.indexName, tc.indexValues...) + if err != nil && err != ErrCacheItemNotFound { + t.Fatal(err) + } + if diff := deep.Equal(in, out); diff != nil { + t.Fatal(diff) + } + }) + } +} + +func TestCacheMemDB_GetByPrefix(t *testing.T) { + cache, err := New() + if err != nil { + t.Fatal(err) + } + + // Test invalid index name + _, err = cache.GetByPrefix("foo", "bar", "baz") + if err == nil { + t.Fatal("expected error") + } + + // Test on empty cache + index, err := cache.GetByPrefix(IndexNameRequestPath, "foo", "bar") + if err != nil { + t.Fatal(err) + } + if index != nil { + t.Fatalf("expected nil index, got: %v", index) + } + + // Populate cache + in := &Index{ + ID: "test_id", + Namespace: "test_ns/", + RequestPath: "/v1/request/path/1", + Token: "test_token", + TokenParent: "test_token_parent", + TokenAccessor: "test_accessor", + Lease: "path/to/test_lease/1", + LeaseToken: "test_lease_token", + Response: []byte("hello world"), + } + + if err := cache.Set(in); err != nil { + t.Fatal(err) + } + + // Populate cache + in2 := &Index{ + ID: "test_id_2", + Namespace: "test_ns/", + RequestPath: "/v1/request/path/2", + Token: "test_token2", + TokenParent: "test_token_parent", + TokenAccessor: "test_accessor2", + Lease: "path/to/test_lease/2", + LeaseToken: "test_lease_token", + Response: []byte("hello world"), + } + + if err := cache.Set(in2); err != nil { + t.Fatal(err) + } + + testCases := []struct { + name string + indexName string + indexValues []interface{} + }{ + { + "by_request_path", + IndexNameRequestPath, + []interface{}{"test_ns/", "/v1/request/path"}, + }, + { + "by_lease", + IndexNameLease, + []interface{}{"path/to/test_lease"}, + }, + { + "by_token_parent", + IndexNameTokenParent, + []interface{}{"test_token_parent"}, + }, + { + "by_lease_token", + IndexNameLeaseToken, + []interface{}{"test_lease_token"}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + out, err := cache.GetByPrefix(tc.indexName, tc.indexValues...) + if err != nil { + t.Fatal(err) + } + + if diff := deep.Equal([]*Index{in, in2}, out); diff != nil { + t.Fatal(diff) + } + }) + } +} + +func TestCacheMemDB_Set(t *testing.T) { + cache, err := New() + if err != nil { + t.Fatal(err) + } + + testCases := []struct { + name string + index *Index + wantErr bool + }{ + { + "nil", + nil, + true, + }, + { + "empty_fields", + &Index{}, + true, + }, + { + "missing_required_fields", + &Index{ + Lease: "foo", + }, + true, + }, + { + "all_fields", + &Index{ + ID: "test_id", + Namespace: "test_ns/", + RequestPath: "/v1/request/path", + Token: "test_token", + TokenAccessor: "test_accessor", + Lease: "test_lease", + RenewCtxInfo: testContextInfo(), + }, + false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if err := cache.Set(tc.index); (err != nil) != tc.wantErr { + t.Fatalf("CacheMemDB.Set() error = %v, wantErr = %v", err, tc.wantErr) + } + }) + } +} + +func TestCacheMemDB_Evict(t *testing.T) { + cache, err := New() + if err != nil { + t.Fatal(err) + } + + // Test on empty cache + if err := cache.Evict(IndexNameID, "foo"); err != nil { + t.Fatal(err) + } + + testIndex := &Index{ + ID: "test_id", + Namespace: "test_ns/", + RequestPath: "/v1/request/path", + Token: "test_token", + TokenAccessor: "test_token_accessor", + Lease: "test_lease", + RenewCtxInfo: testContextInfo(), + } + + testCases := []struct { + name string + indexName string + indexValues []interface{} + insertIndex *Index + wantErr bool + }{ + { + "empty_params", + "", + []interface{}{""}, + nil, + true, + }, + { + "invalid_params", + "foo", + []interface{}{"bar"}, + nil, + true, + }, + { + "by_id", + "id", + []interface{}{"test_id"}, + testIndex, + false, + }, + { + "by_request_path", + "request_path", + []interface{}{"test_ns/", "/v1/request/path"}, + testIndex, + false, + }, + { + "by_token", + "token", + []interface{}{"test_token"}, + testIndex, + false, + }, + { + "by_token_accessor", + "token_accessor", + []interface{}{"test_accessor"}, + testIndex, + false, + }, + { + "by_lease", + "lease", + []interface{}{"test_lease"}, + testIndex, + false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if tc.insertIndex != nil { + if err := cache.Set(tc.insertIndex); err != nil { + t.Fatal(err) + } + } + + if err := cache.Evict(tc.indexName, tc.indexValues...); (err != nil) != tc.wantErr { + t.Fatal(err) + } + + // Verify that the cache doesn't contain the entry any more + index, err := cache.Get(tc.indexName, tc.indexValues...) + if err != ErrCacheItemNotFound && !tc.wantErr { + t.Fatal("expected cache item to be not found", err) + } + if index != nil { + t.Fatalf("expected nil entry, got = %#v", index) + } + }) + } +} + +func TestCacheMemDB_Flush(t *testing.T) { + cache, err := New() + if err != nil { + t.Fatal(err) + } + + // Populate cache + in := &Index{ + ID: "test_id", + Token: "test_token", + Lease: "test_lease", + Namespace: "test_ns/", + RequestPath: "/v1/request/path", + Response: []byte("hello world"), + } + + if err := cache.Set(in); err != nil { + t.Fatal(err) + } + + // Reset the cache + if err := cache.Flush(); err != nil { + t.Fatal(err) + } + + // Check the cache doesn't contain inserted index + out, err := cache.Get(IndexNameID, "test_id") + if err != ErrCacheItemNotFound { + t.Fatal("expected cache item to be not found", err) + } + if out != nil { + t.Fatalf("expected cache to be empty, got = %v", out) + } +} + +// TestCacheMemDB_EvictCapabilitiesIndex tests EvictCapabilitiesIndex works as expected. +func TestCacheMemDB_EvictCapabilitiesIndex(t *testing.T) { + cache, err := New() + require.Nil(t, err) + + // Test on empty cache + err = cache.EvictCapabilitiesIndex(IndexNameID, "foo") + require.Nil(t, err) + + capabilitiesIndex := &CapabilitiesIndex{ + ID: "id", + Token: "token", + } + + err = cache.SetCapabilitiesIndex(capabilitiesIndex) + require.Nil(t, err) + + err = cache.EvictCapabilitiesIndex(IndexNameID, capabilitiesIndex.ID) + require.Nil(t, err) + + // Verify that the cache doesn't contain the entry anymore + index, err := cache.GetCapabilitiesIndex(IndexNameID, capabilitiesIndex.ID) + require.Equal(t, ErrCacheItemNotFound, err) + require.Nil(t, index) +} + +// TestCacheMemDB_GetCapabilitiesIndex tests GetCapabilitiesIndex works as expected. +func TestCacheMemDB_GetCapabilitiesIndex(t *testing.T) { + cache, err := New() + require.Nil(t, err) + + capabilitiesIndex := &CapabilitiesIndex{ + ID: "id", + Token: "token", + } + + err = cache.SetCapabilitiesIndex(capabilitiesIndex) + require.Nil(t, err) + + // Verify that we can retrieve the index + index, err := cache.GetCapabilitiesIndex(IndexNameID, capabilitiesIndex.ID) + require.Nil(t, err) + require.Equal(t, capabilitiesIndex, index) + + // Verify behaviour on a non-existing ID + index, err = cache.GetCapabilitiesIndex(IndexNameID, "not a real id") + require.Equal(t, ErrCacheItemNotFound, err) + require.Nil(t, index) + + // Verify behaviour with a non-existing index name + index, err = cache.GetCapabilitiesIndex("not a real name", capabilitiesIndex.ID) + require.NotNil(t, err) +} + +// TestCacheMemDB_SetCapabilitiesIndex tests SetCapabilitiesIndex works as expected. +func TestCacheMemDB_SetCapabilitiesIndex(t *testing.T) { + cache, err := New() + require.Nil(t, err) + + capabilitiesIndex := &CapabilitiesIndex{ + ID: "id", + Token: "token", + } + + err = cache.SetCapabilitiesIndex(capabilitiesIndex) + require.Nil(t, err) + + // Verify we can retrieve the index + index, err := cache.GetCapabilitiesIndex(IndexNameID, capabilitiesIndex.ID) + require.Nil(t, err) + require.Equal(t, capabilitiesIndex, index) + + // Verify behaviour on a nil index + err = cache.SetCapabilitiesIndex(nil) + require.NotNil(t, err) + + // Verify behaviour on an index without id + err = cache.SetCapabilitiesIndex(&CapabilitiesIndex{ + Token: "token", + }) + require.NotNil(t, err) + + // Verify behaviour on an index with only ID + err = cache.SetCapabilitiesIndex(&CapabilitiesIndex{ + ID: "id", + }) + require.Nil(t, err) +} diff --git a/command/agentproxyshared/cache/cachememdb/index.go b/command/agentproxyshared/cache/cachememdb/index.go new file mode 100644 index 000000000000..6297b3df5f2a --- /dev/null +++ b/command/agentproxyshared/cache/cachememdb/index.go @@ -0,0 +1,234 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cachememdb + +import ( + "context" + "encoding/json" + "net/http" + "sync" + "time" +) + +// Index holds the response to be cached along with multiple other values that +// serve as pointers to refer back to this index. +type Index struct { + // ID is a value that uniquely represents the request held by this + // index. This is computed by serializing and hashing the response object. + // Required: true, Unique: true + ID string + + // Token is the token that fetched the response held by this index + // Required: true, Unique: true + Token string + + // Tokens is a set of tokens that can access this cached response, + // which is used for static secret caching, and enabling multiple + // tokens to be able to access the same cache entry for static secrets. + // Implemented as a map so that all values are unique. + // Required: false, Unique: false + Tokens map[string]struct{} + + // TokenParent is the parent token of the token held by this index + // Required: false, Unique: false + TokenParent string + + // TokenAccessor is the accessor of the token being cached in this index + // Required: true, Unique: true + TokenAccessor string + + // Namespace is the namespace that was provided in the request path as the + // Vault namespace to query + Namespace string + + // RequestPath is the path of the request that resulted in the response + // held by this index. + // For dynamic secrets, this will be the actual path sent to the request, + // e.g. /v1/foo/bar (which will not include the namespace if it was included + // in the headers). + // For static secrets, this will be the canonical path to the secret (i.e. + // after calling getStaticSecretPathFromRequest--see its godocs for more + // information). + // Required: true, Unique: false + RequestPath string + + // Versions are the versions of the secret for KVv2 static secrets only. This is + // a map of version to response, where version is the version number and response is the + // serialized cached response for that secret version. + // We could have chosen to put index.Response as Versions[0], but opted not to for consistency, + // and also to elevate the fact that the current version/representation of the path being + // cached here is stored there, not here. + // Required: false, Unique: false + Versions map[int][]byte + + // Lease is the identifier of the lease in Vault, that belongs to the + // response held by this index. + // Required: false, Unique: true + Lease string + + // LeaseToken is the identifier of the token that created the lease held by + // this index. + // Required: false, Unique: false + LeaseToken string + + // Response is the serialized response object that the agent is caching. + Response []byte + + // RenewCtxInfo holds the context and the corresponding cancel func for the + // goroutine that manages the renewal of the secret belonging to the + // response in this index. + RenewCtxInfo *ContextInfo + + // RequestMethod is the HTTP method of the request + RequestMethod string + + // RequestToken is the token used in the request + RequestToken string + + // RequestHeader is the header used in the request + RequestHeader http.Header + + // LastRenewed is the timestamp of last renewal + LastRenewed time.Time + + // Type is the index type (token, auth-lease, secret-lease, static-secret) + Type string + + // IndexLock is a lock held for some indexes to prevent data + // races upon update. + IndexLock sync.RWMutex +} + +// CapabilitiesIndex holds the capabilities for cached static secrets. +// This type of index does not represent a response. +type CapabilitiesIndex struct { + // ID is a value that uniquely represents the request held by this + // index. This is computed by hashing the token that this capabilities + // index represents the capabilities of. + // Required: true, Unique: true + ID string + + // Token is the token that fetched the response held by this index + // Required: true, Unique: true + Token string + + // ReadablePaths is a set of paths with read capabilities for the given token. + // Implemented as a map for uniqueness. The key to the map is a path (such as + // `foo/bar` that we've demonstrated we can read. + ReadablePaths map[string]struct{} + + // IndexLock is a lock held for some indexes to prevent data + // races upon update. + IndexLock sync.RWMutex +} + +type IndexName uint32 + +const ( + // IndexNameID is the ID of the index constructed from the serialized request. + IndexNameID = "id" + + // IndexNameLease is the lease of the index. + IndexNameLease = "lease" + + // IndexNameRequestPath is the request path of the index. + IndexNameRequestPath = "request_path" + + // IndexNameToken is the token of the index. + IndexNameToken = "token" + + // IndexNameTokenAccessor is the token accessor of the index. + IndexNameTokenAccessor = "token_accessor" + + // IndexNameTokenParent is the token parent of the index. + IndexNameTokenParent = "token_parent" + + // IndexNameLeaseToken is the token that created the lease. + IndexNameLeaseToken = "lease_token" + + // CapabilitiesIndexNameID is the ID of the capabilities index. + CapabilitiesIndexNameID = "id" +) + +func validIndexName(indexName string) bool { + switch indexName { + case IndexNameID: + case IndexNameLease: + case IndexNameRequestPath: + case IndexNameToken: + case IndexNameTokenAccessor: + case IndexNameTokenParent: + case IndexNameLeaseToken: + default: + return false + } + return true +} + +func validCapabilitiesIndexName(indexName string) bool { + switch indexName { + case CapabilitiesIndexNameID: + default: + return false + } + return true +} + +type ContextInfo struct { + Ctx context.Context + CancelFunc context.CancelFunc + DoneCh chan struct{} +} + +func NewContextInfo(ctx context.Context) *ContextInfo { + if ctx == nil { + return nil + } + + ctxInfo := new(ContextInfo) + ctxInfo.Ctx, ctxInfo.CancelFunc = context.WithCancel(ctx) + ctxInfo.DoneCh = make(chan struct{}) + return ctxInfo +} + +// Serialize returns a json marshal'ed Index object, without the RenewCtxInfo +func (i Index) Serialize() ([]byte, error) { + i.RenewCtxInfo = nil + + indexBytes, err := json.Marshal(i) + if err != nil { + return nil, err + } + + return indexBytes, nil +} + +// Deserialize converts json bytes to an Index object +// Note: RenewCtxInfo will need to be reconstructed elsewhere. +func Deserialize(indexBytes []byte) (*Index, error) { + index := new(Index) + if err := json.Unmarshal(indexBytes, index); err != nil { + return nil, err + } + return index, nil +} + +// SerializeCapabilitiesIndex returns a json marshal'ed CapabilitiesIndex object +func (i CapabilitiesIndex) SerializeCapabilitiesIndex() ([]byte, error) { + indexBytes, err := json.Marshal(i) + if err != nil { + return nil, err + } + + return indexBytes, nil +} + +// DeserializeCapabilitiesIndex converts json bytes to an CapabilitiesIndex object +func DeserializeCapabilitiesIndex(indexBytes []byte) (*CapabilitiesIndex, error) { + index := new(CapabilitiesIndex) + if err := json.Unmarshal(indexBytes, index); err != nil { + return nil, err + } + return index, nil +} diff --git a/command/agent/cache/cachememdb/index_test.go b/command/agentproxyshared/cache/cachememdb/index_test.go similarity index 92% rename from command/agent/cache/cachememdb/index_test.go rename to command/agentproxyshared/cache/cachememdb/index_test.go index c59ec5cba334..7b348e3402bc 100644 --- a/command/agent/cache/cachememdb/index_test.go +++ b/command/agentproxyshared/cache/cachememdb/index_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cachememdb @@ -17,6 +17,7 @@ func TestSerializeDeserialize(t *testing.T) { testIndex := &Index{ ID: "testid", Token: "testtoken", + Tokens: map[string]struct{}{"token1": {}, "token2": {}}, TokenParent: "parent token", TokenAccessor: "test accessor", Namespace: "test namespace", diff --git a/command/agentproxyshared/cache/enforceconsistency_enumer.go b/command/agentproxyshared/cache/enforceconsistency_enumer.go new file mode 100644 index 000000000000..e2354111df3d --- /dev/null +++ b/command/agentproxyshared/cache/enforceconsistency_enumer.go @@ -0,0 +1,49 @@ +// Code generated by "enumer -type=EnforceConsistency -trimprefix=EnforceConsistency"; DO NOT EDIT. + +package cache + +import ( + "fmt" +) + +const _EnforceConsistencyName = "NeverAlways" + +var _EnforceConsistencyIndex = [...]uint8{0, 5, 11} + +func (i EnforceConsistency) String() string { + if i < 0 || i >= EnforceConsistency(len(_EnforceConsistencyIndex)-1) { + return fmt.Sprintf("EnforceConsistency(%d)", i) + } + return _EnforceConsistencyName[_EnforceConsistencyIndex[i]:_EnforceConsistencyIndex[i+1]] +} + +var _EnforceConsistencyValues = []EnforceConsistency{0, 1} + +var _EnforceConsistencyNameToValueMap = map[string]EnforceConsistency{ + _EnforceConsistencyName[0:5]: 0, + _EnforceConsistencyName[5:11]: 1, +} + +// EnforceConsistencyString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func EnforceConsistencyString(s string) (EnforceConsistency, error) { + if val, ok := _EnforceConsistencyNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to EnforceConsistency values", s) +} + +// EnforceConsistencyValues returns all values of the enum +func EnforceConsistencyValues() []EnforceConsistency { + return _EnforceConsistencyValues +} + +// IsAEnforceConsistency returns "true" if the value is listed in the enum definition. "false" otherwise +func (i EnforceConsistency) IsAEnforceConsistency() bool { + for _, v := range _EnforceConsistencyValues { + if i == v { + return true + } + } + return false +} diff --git a/command/agent/cache/handler.go b/command/agentproxyshared/cache/handler.go similarity index 81% rename from command/agent/cache/handler.go rename to command/agentproxyshared/cache/handler.go index a1f0eda04cce..d1369831787f 100644 --- a/command/agent/cache/handler.go +++ b/command/agentproxyshared/cache/handler.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cache @@ -11,31 +11,36 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/http" + "strings" + "sync/atomic" "time" "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" ) -func ProxyHandler(ctx context.Context, logger hclog.Logger, proxier Proxier, inmemSink sink.Sink, proxyVaultToken bool) http.Handler { +func ProxyHandler(ctx context.Context, logger hclog.Logger, proxier Proxier, inmemSink sink.Sink, forceAutoAuthToken bool, useAutoAuthToken bool, authInProgress *atomic.Bool, invalidTokenErrCh chan error) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { logger.Info("received request", "method", r.Method, "path", r.URL.Path) - if !proxyVaultToken { + if forceAutoAuthToken { r.Header.Del(consts.AuthHeaderName) } token := r.Header.Get(consts.AuthHeaderName) - if token == "" && inmemSink != nil { - logger.Debug("using auto auth token", "method", r.Method, "path", r.URL.Path) - token = inmemSink.(sink.SinkReader).Token() + var autoAuthToken string + if inmemSink != nil { + autoAuthToken = inmemSink.(sink.SinkReader).Token() + if token == "" && useAutoAuthToken { + logger.Debug("using auto auth token", "method", r.Method, "path", r.URL.Path) + token = autoAuthToken + } } // Parse and reset body. @@ -59,15 +64,31 @@ func ProxyHandler(ctx context.Context, logger hclog.Logger, proxier Proxier, inm if err != nil { // If this is an api.Response error, don't wrap the response. if resp != nil && resp.Response.Error() != nil { + responseErrMessage := resp.Response.Error() copyHeader(w.Header(), resp.Response.Header) w.WriteHeader(resp.Response.StatusCode) io.Copy(w, resp.Response.Body) metrics.IncrCounter([]string{"agent", "proxy", "client_error"}, 1) + // Re-trigger auto auth if the token is the same as the auto auth token + if resp.Response.StatusCode == 403 && strings.Contains(responseErrMessage.Error(), logical.ErrInvalidToken.Error()) && + autoAuthToken == token && !authInProgress.Load() { + // Drain the error channel first + logger.Info("proxy received an invalid token error") + select { + case <-invalidTokenErrCh: + default: + } + invalidTokenErrCh <- resp.Response.Error() + } } else { metrics.IncrCounter([]string{"agent", "proxy", "error"}, 1) logical.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to get the response: %w", err)) } return + } else if resp == nil { + metrics.IncrCounter([]string{"agent", "proxy", "error"}, 1) + logical.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to get the response: %w", err)) + return } err = sanitizeAutoAuthTokenResponse(ctx, logger, inmemSink, req, resp) @@ -200,7 +221,7 @@ func sanitizeAutoAuthTokenResponse(ctx context.Context, logger hclog.Logger, inm if resp.Response.Body != nil { resp.Response.Body.Close() } - resp.Response.Body = ioutil.NopCloser(bytes.NewReader(bodyBytes)) + resp.Response.Body = io.NopCloser(bytes.NewReader(bodyBytes)) resp.Response.ContentLength = int64(len(bodyBytes)) // Serialize and re-read the response diff --git a/command/agentproxyshared/cache/keymanager/manager.go b/command/agentproxyshared/cache/keymanager/manager.go new file mode 100644 index 000000000000..46fc499d2e14 --- /dev/null +++ b/command/agentproxyshared/cache/keymanager/manager.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package keymanager + +import ( + "context" + + wrapping "github.com/hashicorp/go-kms-wrapping/v2" +) + +const ( + KeyID = "root" +) + +type KeyManager interface { + // Returns a wrapping.Wrapper which can be used to perform key-related operations. + Wrapper() wrapping.Wrapper + // RetrievalToken is the material returned which can be used to source back the + // encryption key. Depending on the implementation, the token can be the + // encryption key itself or a token/identifier used to exchange the token. + RetrievalToken(ctx context.Context) ([]byte, error) +} diff --git a/command/agent/cache/keymanager/passthrough.go b/command/agentproxyshared/cache/keymanager/passthrough.go similarity index 97% rename from command/agent/cache/keymanager/passthrough.go rename to command/agentproxyshared/cache/keymanager/passthrough.go index cda6b6e5db34..f88d2787a725 100644 --- a/command/agent/cache/keymanager/passthrough.go +++ b/command/agentproxyshared/cache/keymanager/passthrough.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package keymanager diff --git a/command/agent/cache/keymanager/passthrough_test.go b/command/agentproxyshared/cache/keymanager/passthrough_test.go similarity index 96% rename from command/agent/cache/keymanager/passthrough_test.go rename to command/agentproxyshared/cache/keymanager/passthrough_test.go index 9327ee3f0ec6..b3dc9b72525c 100644 --- a/command/agent/cache/keymanager/passthrough_test.go +++ b/command/agentproxyshared/cache/keymanager/passthrough_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package keymanager diff --git a/command/agentproxyshared/cache/lease_cache.go b/command/agentproxyshared/cache/lease_cache.go new file mode 100644 index 000000000000..b29a57461320 --- /dev/null +++ b/command/agentproxyshared/cache/lease_cache.go @@ -0,0 +1,1886 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "bufio" + "bytes" + "context" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/base62" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cacheboltdb" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" + "github.com/hashicorp/vault/helper/namespace" + nshelper "github.com/hashicorp/vault/helper/namespace" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/cryptoutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/logical" + gocache "github.com/patrickmn/go-cache" + "go.uber.org/atomic" +) + +const ( + vaultPathTokenCreate = "/v1/auth/token/create" + vaultPathTokenRevoke = "/v1/auth/token/revoke" + vaultPathTokenRevokeSelf = "/v1/auth/token/revoke-self" + vaultPathTokenRevokeAccessor = "/v1/auth/token/revoke-accessor" + vaultPathTokenRevokeOrphan = "/v1/auth/token/revoke-orphan" + vaultPathTokenLookup = "/v1/auth/token/lookup" + vaultPathTokenLookupSelf = "/v1/auth/token/lookup-self" + vaultPathTokenRenew = "/v1/auth/token/renew" + vaultPathTokenRenewSelf = "/v1/auth/token/renew-self" + vaultPathLeaseRevoke = "/v1/sys/leases/revoke" + vaultPathLeaseRevokeForce = "/v1/sys/leases/revoke-force" + vaultPathLeaseRevokePrefix = "/v1/sys/leases/revoke-prefix" +) + +var ( + contextIndexID = contextIndex{} + errInvalidType = errors.New("invalid type provided") + revocationPaths = []string{ + strings.TrimPrefix(vaultPathTokenRevoke, "/v1"), + strings.TrimPrefix(vaultPathTokenRevokeSelf, "/v1"), + strings.TrimPrefix(vaultPathTokenRevokeAccessor, "/v1"), + strings.TrimPrefix(vaultPathTokenRevokeOrphan, "/v1"), + strings.TrimPrefix(vaultPathLeaseRevoke, "/v1"), + strings.TrimPrefix(vaultPathLeaseRevokeForce, "/v1"), + strings.TrimPrefix(vaultPathLeaseRevokePrefix, "/v1"), + } +) + +type contextIndex struct{} + +type cacheClearRequest struct { + Type string `json:"type"` + Value string `json:"value"` + Namespace string `json:"namespace"` +} + +// LeaseCache is an implementation of Proxier that handles +// the caching of responses. It passes the incoming request +// to an underlying Proxier implementation. +type LeaseCache struct { + client *api.Client + proxier Proxier + logger hclog.Logger + db *cachememdb.CacheMemDB + baseCtxInfo *cachememdb.ContextInfo + l *sync.RWMutex + + // userAgentToUse is the user agent to use when making independent requests + // to Vault. + userAgentToUse string + + // idLocks is used during cache lookup to ensure that identical requests made + // in parallel won't trigger multiple renewal goroutines. + idLocks []*locksutil.LockEntry + + // inflightCache keeps track of inflight requests + inflightCache *gocache.Cache + + // ps is the persistent storage for tokens and leases + ps *cacheboltdb.BoltStorage + + // shuttingDown is used to determine if cache needs to be evicted or not + // when the context is cancelled + shuttingDown atomic.Bool + + // cacheStaticSecrets is used to determine if the cache should also + // cache static secrets, as well as dynamic secrets. + cacheStaticSecrets bool + + // cacheDynamicSecrets is used to determine if the cache should + // cache dynamic secrets + cacheDynamicSecrets bool + + // capabilityManager is used when static secrets are enabled to + // manage the capabilities of cached tokens. + capabilityManager *StaticSecretCapabilityManager +} + +// LeaseCacheConfig is the configuration for initializing a new +// LeaseCache. +type LeaseCacheConfig struct { + Client *api.Client + BaseContext context.Context + Proxier Proxier + Logger hclog.Logger + UserAgentToUse string + Storage *cacheboltdb.BoltStorage + CacheStaticSecrets bool + CacheDynamicSecrets bool +} + +type inflightRequest struct { + // ch is closed by the request that ends up processing the set of + // parallel request + ch chan struct{} + + // remaining is the number of remaining inflight request that needs to + // be processed before this object can be cleaned up + remaining *atomic.Uint64 +} + +func newInflightRequest() *inflightRequest { + return &inflightRequest{ + ch: make(chan struct{}), + remaining: atomic.NewUint64(0), + } +} + +// NewLeaseCache creates a new instance of a LeaseCache. +func NewLeaseCache(conf *LeaseCacheConfig) (*LeaseCache, error) { + if conf == nil { + return nil, errors.New("nil configuration provided") + } + + if conf.Proxier == nil || conf.Logger == nil { + return nil, fmt.Errorf("missing configuration required params: %v", conf) + } + + if conf.Client == nil { + return nil, fmt.Errorf("nil API client") + } + + if conf.UserAgentToUse == "" { + return nil, fmt.Errorf("no user agent specified -- see useragent.go") + } + + db, err := cachememdb.New() + if err != nil { + return nil, err + } + + // Create a base context for the lease cache layer + baseCtxInfo := cachememdb.NewContextInfo(conf.BaseContext) + + return &LeaseCache{ + client: conf.Client, + proxier: conf.Proxier, + logger: conf.Logger, + userAgentToUse: conf.UserAgentToUse, + db: db, + baseCtxInfo: baseCtxInfo, + l: &sync.RWMutex{}, + idLocks: locksutil.CreateLocks(), + inflightCache: gocache.New(gocache.NoExpiration, gocache.NoExpiration), + ps: conf.Storage, + cacheStaticSecrets: conf.CacheStaticSecrets, + cacheDynamicSecrets: conf.CacheDynamicSecrets, + }, nil +} + +// SetCapabilityManager is a setter for CapabilityManager. If set, will manage capabilities +// for capability indexes. +func (c *LeaseCache) SetCapabilityManager(capabilityManager *StaticSecretCapabilityManager) { + c.capabilityManager = capabilityManager +} + +// SetShuttingDown is a setter for the shuttingDown field +func (c *LeaseCache) SetShuttingDown(in bool) { + c.shuttingDown.Store(in) + + // Since we're shutting down, also stop the capability manager's jobs. + // We can do this forcibly since no there's no reason to update + // the cache when we're shutting down. + if c.capabilityManager != nil { + c.capabilityManager.Stop() + } +} + +// SetPersistentStorage is a setter for the persistent storage field in +// LeaseCache +func (c *LeaseCache) SetPersistentStorage(storageIn *cacheboltdb.BoltStorage) { + c.ps = storageIn +} + +// PersistentStorage is a getter for the persistent storage field in +// LeaseCache +func (c *LeaseCache) PersistentStorage() *cacheboltdb.BoltStorage { + return c.ps +} + +// checkCacheForDynamicSecretRequest checks the cache for a particular request based on its +// computed ID. It returns a non-nil *SendResponse if an entry is found. +func (c *LeaseCache) checkCacheForDynamicSecretRequest(id string) (*SendResponse, error) { + c.logger.Trace("checking cache for dynamic secret request", "id", id) + return c.checkCacheForRequest(id, nil) +} + +// checkCacheForStaticSecretRequest checks the cache for a particular request based on its +// computed ID. It returns a non-nil *SendResponse if an entry is found. +// If a request is provided, it will validate that the token is allowed to retrieve this +// cache entry, and return nil if it isn't. It will also evict the cache if this is a non-GET +// request. +func (c *LeaseCache) checkCacheForStaticSecretRequest(id string, req *SendRequest) (*SendResponse, error) { + c.logger.Trace("checking cache for static secret request", "id", id) + return c.checkCacheForRequest(id, req) +} + +// checkCacheForRequest checks the cache for a particular request based on its +// computed ID. It returns a non-nil *SendResponse if an entry is found. +// If a token is provided, it will validate that the token is allowed to retrieve this +// cache entry, and return nil if it isn't. +func (c *LeaseCache) checkCacheForRequest(id string, req *SendRequest) (*SendResponse, error) { + index, err := c.db.Get(cachememdb.IndexNameID, id) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + return nil, nil + } + if err != nil { + return nil, err + } + + index.IndexLock.RLock() + defer index.IndexLock.RUnlock() + + var token string + if req != nil { + // Req will be non-nil if we're checking for a static secret. + // Token might still be "" if it's going to an unauthenticated + // endpoint, or similar. For static secrets, we only care about + // requests with tokens attached, as KV is authenticated. + token = req.Token + } + + if token != "" { + // We are checking for a static secret. We need to ensure that this token + // has previously demonstrated access to this static secret. + // We could check the capabilities cache here, but since these + // indexes should be in sync, this saves us an extra cache get. + if _, ok := index.Tokens[token]; !ok { + // We don't have access to this static secret, so + // we do not return the cached response. + return nil, nil + } + } + + var response []byte + version := getStaticSecretVersionFromRequest(req) + if version == 0 { + response = index.Response + } else { + response = index.Versions[version] + } + + // We don't have this response as either a current or older version. + if response == nil { + return nil, nil + } + + // Cached request is found, deserialize the response + reader := bufio.NewReader(bytes.NewReader(response)) + resp, err := http.ReadResponse(reader, nil) + if err != nil { + c.logger.Error("failed to deserialize response", "error", err) + return nil, err + } + + sendResp, err := NewSendResponse(&api.Response{Response: resp}, response) + if err != nil { + c.logger.Error("failed to create new send response", "error", err) + return nil, err + } + sendResp.CacheMeta.Hit = true + + respTime, err := http.ParseTime(resp.Header.Get("Date")) + if err != nil { + c.logger.Error("failed to parse cached response date", "error", err) + return nil, err + } + sendResp.CacheMeta.Age = time.Now().Sub(respTime) + + return sendResp, nil +} + +// Send performs a cache lookup on the incoming request. If it's a cache hit, +// it will return the cached response, otherwise it will delegate to the +// underlying Proxier and cache the received response. +func (c *LeaseCache) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { + // Compute the index ID for both static and dynamic secrets. + // The primary difference is that for dynamic secrets, the + // Vault token forms part of the index. + dynamicSecretCacheId, err := computeIndexID(req) + if err != nil { + c.logger.Error("failed to compute cache key", "error", err) + return nil, err + } + staticSecretCacheId := computeStaticSecretCacheIndex(req) + + // Check the inflight cache to see if there are other inflight requests + // of the same kind, based on the computed ID. If so, we increment a counter + + // Note: we lock both the dynamic secret cache ID and the static secret cache ID + // as at this stage, we don't know what kind of secret it is. + var inflight *inflightRequest + + defer func() { + // Cleanup on the cache if there are no remaining inflight requests. + // This is the last step, so we defer the call first + if inflight != nil && inflight.remaining.Load() == 0 { + c.inflightCache.Delete(dynamicSecretCacheId) + if staticSecretCacheId != "" { + c.inflightCache.Delete(staticSecretCacheId) + } + } + }() + + idLockDynamicSecret := locksutil.LockForKey(c.idLocks, dynamicSecretCacheId) + + // Briefly grab an ID-based lock in here to emulate a load-or-store behavior + // and prevent concurrent cacheable requests from being proxied twice if + // they both miss the cache due to it being clean when peeking the cache + // entry. + idLockDynamicSecret.Lock() + inflightRaw, found := c.inflightCache.Get(dynamicSecretCacheId) + if found { + idLockDynamicSecret.Unlock() + inflight = inflightRaw.(*inflightRequest) + inflight.remaining.Inc() + defer inflight.remaining.Dec() + + // If found it means that there's an inflight request being processed. + // We wait until that's finished before proceeding further. + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-inflight.ch: + } + } else { + if inflight == nil { + inflight = newInflightRequest() + inflight.remaining.Inc() + defer inflight.remaining.Dec() + defer close(inflight.ch) + } + + c.inflightCache.Set(dynamicSecretCacheId, inflight, gocache.NoExpiration) + idLockDynamicSecret.Unlock() + } + + if staticSecretCacheId != "" { + idLockStaticSecret := locksutil.LockForKey(c.idLocks, staticSecretCacheId) + + // Briefly grab an ID-based lock in here to emulate a load-or-store behavior + // and prevent concurrent cacheable requests from being proxied twice if + // they both miss the cache due to it being clean when peeking the cache + // entry. + idLockStaticSecret.Lock() + inflightRaw, found = c.inflightCache.Get(staticSecretCacheId) + if found { + idLockStaticSecret.Unlock() + inflight = inflightRaw.(*inflightRequest) + inflight.remaining.Inc() + defer inflight.remaining.Dec() + + // If found it means that there's an inflight request being processed. + // We wait until that's finished before proceeding further. + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-inflight.ch: + } + } else { + if inflight == nil { + inflight = newInflightRequest() + inflight.remaining.Inc() + defer inflight.remaining.Dec() + defer close(inflight.ch) + } + + c.inflightCache.Set(staticSecretCacheId, inflight, gocache.NoExpiration) + idLockStaticSecret.Unlock() + } + } + + // Check if the response for this request is already in the dynamic secret cache + cachedResp, err := c.checkCacheForDynamicSecretRequest(dynamicSecretCacheId) + if err != nil { + return nil, err + } + if cachedResp != nil { + c.logger.Debug("returning cached dynamic secret response", "path", req.Request.URL.Path) + return cachedResp, nil + } + + // Check if the response for this request is already in the static secret cache + if staticSecretCacheId != "" && req.Request.Method == http.MethodGet && req.Token != "" { + cachedResp, err = c.checkCacheForStaticSecretRequest(staticSecretCacheId, req) + if err != nil { + return nil, err + } + if cachedResp != nil { + c.logger.Debug("returning cached static secret response", "id", staticSecretCacheId, "path", getStaticSecretPathFromRequest(req)) + return cachedResp, nil + } + } + + c.logger.Debug("forwarding request from cache", "method", req.Request.Method, "path", req.Request.URL.Path) + + // Pass the request down and get a response + resp, err := c.proxier.Send(ctx, req) + if err != nil { + return resp, err + } + + // If this is a non-2xx or if the returned response does not contain JSON payload, + // we skip caching + if resp.Response.StatusCode >= 300 || resp.Response.Header.Get("Content-Type") != "application/json" { + return resp, err + } + + // Get the namespace from the request header + namespace := req.Request.Header.Get(consts.NamespaceHeaderName) + // We need to populate an empty value since go-memdb will skip over indexes + // that contain empty values. + if namespace == "" { + namespace = "root/" + } + + // Build the index to cache based on the response received + index := &cachememdb.Index{ + Namespace: namespace, + RequestPath: req.Request.URL.Path, + LastRenewed: time.Now().UTC(), + } + + secret, err := api.ParseSecret(bytes.NewReader(resp.ResponseBody)) + if err != nil { + c.logger.Error("failed to parse response as secret", "error", err) + return nil, err + } + + isRevocation, err := c.handleRevocationRequest(ctx, req, resp) + if err != nil { + c.logger.Error("failed to process the response", "error", err) + return nil, err + } + + // If this is a revocation request, do not go through cache logic. + if isRevocation { + return resp, nil + } + + // Fast path for responses with no secrets + if secret == nil { + c.logger.Debug("pass-through response; no secret in response", "method", req.Request.Method, "path", req.Request.URL.Path) + return resp, nil + } + + // There shouldn't be a situation where secret.MountType == "kv" and + // staticSecretCacheId == "", but just in case. + // We restrict this to GETs as those are all we want to cache. + if c.cacheStaticSecrets && secret.MountType == "kv" && + staticSecretCacheId != "" && req.Request.Method == http.MethodGet { + index.Type = cacheboltdb.StaticSecretType + index.ID = staticSecretCacheId + // We set the request path to be the canonical static secret path, so that + // two differently shaped (but equivalent) requests to the same path + // will be the same. + // This differs slightly from dynamic secrets, where the /v1/ will be + // included in the request path. + index.RequestPath = getStaticSecretPathFromRequest(req) + + c.logger.Trace("attempting to cache static secret with following request path", "request path", index.RequestPath, "version", getStaticSecretVersionFromRequest(req)) + err := c.cacheStaticSecret(ctx, req, resp, index, secret) + if err != nil { + return nil, err + } + return resp, nil + } else { + // Since it's not a static secret, set the ID to be the dynamic id + index.ID = dynamicSecretCacheId + } + + // Short-circuit if we've been configured to not cache dynamic secrets + if !c.cacheDynamicSecrets { + return resp, nil + } + + // Short-circuit if the secret is not renewable + tokenRenewable, err := secret.TokenIsRenewable() + if err != nil { + c.logger.Error("failed to parse renewable param", "error", err) + return nil, err + } + if !secret.Renewable && !tokenRenewable { + c.logger.Debug("pass-through response; secret not renewable", "method", req.Request.Method, "path", req.Request.URL.Path) + return resp, nil + } + + var renewCtxInfo *cachememdb.ContextInfo + switch { + case secret.LeaseID != "": + c.logger.Debug("processing lease response", "method", req.Request.Method, "path", req.Request.URL.Path) + entry, err := c.db.Get(cachememdb.IndexNameToken, req.Token) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + // If the lease belongs to a token that is not managed by the lease cache, + // return the response without caching it. + c.logger.Debug("pass-through lease response; token not managed by lease cache", "method", req.Request.Method, "path", req.Request.URL.Path) + return resp, nil + } + if err != nil { + return nil, err + } + + // Derive a context for renewal using the token's context + renewCtxInfo = cachememdb.NewContextInfo(entry.RenewCtxInfo.Ctx) + + index.Lease = secret.LeaseID + index.LeaseToken = req.Token + + index.Type = cacheboltdb.LeaseType + + case secret.Auth != nil: + c.logger.Debug("processing auth response", "method", req.Request.Method, "path", req.Request.URL.Path) + + // Check if this token creation request resulted in a non-orphan token, and if so + // correctly set the parentCtx to the request's token context. + var parentCtx context.Context + if !secret.Auth.Orphan { + entry, err := c.db.Get(cachememdb.IndexNameToken, req.Token) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + // If the lease belongs to a token that is not managed by the lease cache, + // return the response without caching it. + c.logger.Debug("pass-through lease response; parent token not managed by lease cache", "method", req.Request.Method, "path", req.Request.URL.Path) + return resp, nil + } + if err != nil { + return nil, err + } + + c.logger.Debug("setting parent context", "method", req.Request.Method, "path", req.Request.URL.Path) + parentCtx = entry.RenewCtxInfo.Ctx + + index.TokenParent = req.Token + } + + renewCtxInfo = c.createCtxInfo(parentCtx) + index.Token = secret.Auth.ClientToken + index.TokenAccessor = secret.Auth.Accessor + + index.Type = cacheboltdb.LeaseType + + default: + // We shouldn't be hitting this, but will err on the side of caution and + // simply proxy. + c.logger.Debug("pass-through response; secret without lease and token", "method", req.Request.Method, "path", req.Request.URL.Path) + return resp, nil + } + + // Serialize the response to store it in the cached index + var respBytes bytes.Buffer + err = resp.Response.Write(&respBytes) + if err != nil { + c.logger.Error("failed to serialize response", "error", err) + return nil, err + } + + // Reset the response body for upper layers to read + if resp.Response.Body != nil { + resp.Response.Body.Close() + } + resp.Response.Body = io.NopCloser(bytes.NewReader(resp.ResponseBody)) + + // Set the index's Response + index.Response = respBytes.Bytes() + + // Store the index ID in the lifetimewatcher context + renewCtx := context.WithValue(renewCtxInfo.Ctx, contextIndexID, index.ID) + + // Store the lifetime watcher context in the index + index.RenewCtxInfo = &cachememdb.ContextInfo{ + Ctx: renewCtx, + CancelFunc: renewCtxInfo.CancelFunc, + DoneCh: renewCtxInfo.DoneCh, + } + + // Add extra information necessary for restoring from persisted cache + index.RequestMethod = req.Request.Method + index.RequestToken = req.Token + index.RequestHeader = req.Request.Header + + if index.Type != cacheboltdb.StaticSecretType { + // Store the index in the cache + c.logger.Debug("storing dynamic secret response into the cache", "method", req.Request.Method, "path", req.Request.URL.Path, "id", index.ID) + err = c.Set(ctx, index) + if err != nil { + c.logger.Error("failed to cache the proxied response", "error", err) + return nil, err + } + + // Start renewing the secret in the response + go c.startRenewing(renewCtx, index, req, secret) + } + + return resp, nil +} + +func (c *LeaseCache) cacheStaticSecret(ctx context.Context, req *SendRequest, resp *SendResponse, index *cachememdb.Index, secret *api.Secret) error { + // If a cached version of this secret exists, we now have access, so + // we don't need to re-cache, just update index.Tokens + indexFromCache, err := c.db.Get(cachememdb.IndexNameID, index.ID) + if err != nil && !errors.Is(err, cachememdb.ErrCacheItemNotFound) { + return err + } + + version := getStaticSecretVersionFromRequest(req) + + // The index already exists, so all we need to do is add our token + // to the index's allowed token list, and if necessary, the new version, + // then re-store it. + if indexFromCache != nil { + // We must hold a lock for the index while it's being updated. + // We keep the two locking mechanisms distinct, so that it's only writes + // that have to be serial. + indexFromCache.IndexLock.Lock() + defer indexFromCache.IndexLock.Unlock() + indexFromCache.Tokens[req.Token] = struct{}{} + + // Are we looking for a version that's already cached? + haveVersion := false + if version != 0 { + _, ok := indexFromCache.Versions[version] + if ok { + haveVersion = true + } + } else { + if indexFromCache.Response != nil { + haveVersion = true + } + } + + if !haveVersion { + var respBytes bytes.Buffer + err = resp.Response.Write(&respBytes) + if err != nil { + c.logger.Error("failed to serialize response", "error", err) + return err + } + + // Reset the response body for upper layers to read + if resp.Response.Body != nil { + resp.Response.Body.Close() + } + resp.Response.Body = io.NopCloser(bytes.NewReader(resp.ResponseBody)) + + // Set the index's Response + if version == 0 { + indexFromCache.Response = respBytes.Bytes() + // For current KVv2 secrets, see if we can add the version that the secret is + // to the versions map, too. If we got the latest version and the version is #2, + // also update Versions[2] + c.addToVersionListForCurrentVersionKVv2Secret(indexFromCache, secret) + } else { + indexFromCache.Versions[version] = respBytes.Bytes() + } + } + + return c.storeStaticSecretIndex(ctx, req, indexFromCache) + } + + // Serialize the response to store it in the cached index + var respBytes bytes.Buffer + err = resp.Response.Write(&respBytes) + if err != nil { + c.logger.Error("failed to serialize response", "error", err) + return err + } + + // Reset the response body for upper layers to read + if resp.Response.Body != nil { + resp.Response.Body.Close() + } + resp.Response.Body = io.NopCloser(bytes.NewReader(resp.ResponseBody)) + + // Initialize the versions + index.Versions = map[int][]byte{} + + // Set the index's Response + if version == 0 { + index.Response = respBytes.Bytes() + // For current KVv2 secrets, see if we can add the version that the secret is + // to the versions map, too. If we got the latest version and the version is #2, + // also update Versions[2] + c.addToVersionListForCurrentVersionKVv2Secret(index, secret) + } else { + index.Versions[version] = respBytes.Bytes() + } + + // Initialize the token map and add this token to it. + index.Tokens = map[string]struct{}{req.Token: {}} + + // Set the index type + index.Type = cacheboltdb.StaticSecretType + + // Store the index: + return c.storeStaticSecretIndex(ctx, req, index) +} + +// addToVersionListForCurrentVersionKVv2Secret takes a secret index and, if it's +// a KVv2 secret, adds the given response to the corresponding version for it. +// This function fails silently, as we could be parsing arbitrary JSON. +// This function can store a version for a KVv1 secret iff: +// - It has 'data' in the path +// - It has a numerical 'metadata.version' field +// However, this risk seems very small, and the negatives of such a secret being +// stored in the cache aren't worth additional mitigations to check if it's a KVv1 +// or KVv2 mount (such as doing a 'preflight' request like the CLI). +// There's no way to access it and it's just a couple of extra bytes, in the +// case that this does happen to a KVv1 secret. +func (c *LeaseCache) addToVersionListForCurrentVersionKVv2Secret(index *cachememdb.Index, secret *api.Secret) { + if secret != nil { + // First do an imperfect but lightweight check. This saves parsing the secret in the case that the secret isn't KVv2. + // KVv2 secrets always contain /data/, but KVv1 secrets can too, so we can't rely on this. + if strings.Contains(index.RequestPath, "/data/") { + metadata, ok := secret.Data["metadata"] + if ok { + metaDataAsMap, ok := metadata.(map[string]interface{}) + if ok { + versionJson, ok := metaDataAsMap["version"].(json.Number) + if ok { + versionInt64, err := versionJson.Int64() + if err == nil { + version := int(versionInt64) + c.logger.Trace("adding response for current KVv2 secret to index's Versions map", "path", index.RequestPath, "version", version) + + if index.Versions == nil { + index.Versions = map[int][]byte{} + } + + index.Versions[version] = index.Response + } + } + } + } + } + } +} + +func (c *LeaseCache) storeStaticSecretIndex(ctx context.Context, req *SendRequest, index *cachememdb.Index) error { + // Store the index in the cache + c.logger.Debug("storing static secret response into the cache", "path", index.RequestPath, "id", index.ID) + err := c.Set(ctx, index) + if err != nil { + c.logger.Error("failed to cache the proxied response", "error", err) + return err + } + + capabilitiesIndex, created, err := c.retrieveOrCreateTokenCapabilitiesEntry(req.Token) + if err != nil { + c.logger.Error("failed to cache the proxied response", "error", err) + return err + } + + path := getStaticSecretPathFromRequest(req) + + capabilitiesIndex.IndexLock.Lock() + // Extra caution -- avoid potential nil + if capabilitiesIndex.ReadablePaths == nil { + capabilitiesIndex.ReadablePaths = make(map[string]struct{}) + } + + // update the index with the new capability: + capabilitiesIndex.ReadablePaths[path] = struct{}{} + capabilitiesIndex.IndexLock.Unlock() + + err = c.SetCapabilitiesIndex(ctx, capabilitiesIndex) + if err != nil { + c.logger.Error("failed to cache token capabilities as part of caching the proxied response", "error", err) + return err + } + + // Lastly, ensure that we start renewing this index, if it's new. + // We require the 'created' check so that we don't renew the same + // index multiple times. + if c.capabilityManager != nil && created { + c.capabilityManager.StartRenewingCapabilities(capabilitiesIndex) + } + + return nil +} + +// retrieveOrCreateTokenCapabilitiesEntry will either retrieve the token +// capabilities entry from the cache, or create a new, empty one. +// The bool represents if a new token capability has been created. +func (c *LeaseCache) retrieveOrCreateTokenCapabilitiesEntry(token string) (*cachememdb.CapabilitiesIndex, bool, error) { + // The index ID is a hash of the token. + indexId := hashStaticSecretIndex(token) + indexFromCache, err := c.db.GetCapabilitiesIndex(cachememdb.IndexNameID, indexId) + if err != nil && !errors.Is(err, cachememdb.ErrCacheItemNotFound) { + return nil, false, err + } + + if indexFromCache != nil { + return indexFromCache, false, nil + } + + // Build the index to cache based on the response received + index := &cachememdb.CapabilitiesIndex{ + ID: indexId, + Token: token, + ReadablePaths: make(map[string]struct{}), + } + + return index, true, nil +} + +func (c *LeaseCache) createCtxInfo(ctx context.Context) *cachememdb.ContextInfo { + if ctx == nil { + c.l.RLock() + ctx = c.baseCtxInfo.Ctx + c.l.RUnlock() + } + return cachememdb.NewContextInfo(ctx) +} + +func (c *LeaseCache) startRenewing(ctx context.Context, index *cachememdb.Index, req *SendRequest, secret *api.Secret) { + defer func() { + id := ctx.Value(contextIndexID).(string) + if c.shuttingDown.Load() { + c.logger.Trace("not evicting index from cache during shutdown", "id", id, "method", req.Request.Method, "path", req.Request.URL.Path) + return + } + c.logger.Debug("evicting index from cache", "id", id, "method", req.Request.Method, "path", req.Request.URL.Path) + err := c.Evict(index) + if err != nil { + c.logger.Error("failed to evict index", "id", id, "error", err) + return + } + }() + + client, err := c.client.Clone() + if err != nil { + c.logger.Error("failed to create API client in the lifetime watcher", "error", err) + return + } + client.SetToken(req.Token) + + headers := client.Headers() + if headers == nil { + headers = make(http.Header) + } + + // We do not preserve any initial User-Agent here since these requests are from + // the proxy subsystem, but are made by the lease cache's lifetime watcher, + // not triggered by a specific request. + headers.Set("User-Agent", c.userAgentToUse) + client.SetHeaders(headers) + + watcher, err := client.NewLifetimeWatcher(&api.LifetimeWatcherInput{ + Secret: secret, + }) + if err != nil { + c.logger.Error("failed to create secret lifetime watcher", "error", err) + return + } + + c.logger.Debug("initiating renewal", "method", req.Request.Method, "path", req.Request.URL.Path) + go watcher.Start() + defer watcher.Stop() + + for { + select { + case <-ctx.Done(): + // This is the case which captures context cancellations from token + // and leases. Since all the contexts are derived from the agent's + // context, this will also cover the shutdown scenario. + c.logger.Debug("context cancelled; stopping lifetime watcher", "path", req.Request.URL.Path) + return + case err := <-watcher.DoneCh(): + // This case covers renewal completion and renewal errors + if err != nil { + c.logger.Error("failed to renew secret", "error", err) + return + } + c.logger.Debug("renewal halted; evicting from cache", "path", req.Request.URL.Path) + return + case <-watcher.RenewCh(): + c.logger.Debug("secret renewed", "path", req.Request.URL.Path) + if c.ps != nil { + if err := c.updateLastRenewed(ctx, index, time.Now().UTC()); err != nil { + c.logger.Warn("not able to update lastRenewed time for cached index", "id", index.ID) + } + } + case <-index.RenewCtxInfo.DoneCh: + // This case indicates the renewal process to shutdown and evict + // the cache entry. This is triggered when a specific secret + // renewal needs to be killed without affecting any of the derived + // context renewals. + c.logger.Debug("done channel closed") + return + } + } +} + +func (c *LeaseCache) updateLastRenewed(ctx context.Context, index *cachememdb.Index, t time.Time) error { + idLock := locksutil.LockForKey(c.idLocks, index.ID) + idLock.Lock() + defer idLock.Unlock() + + getIndex, err := c.db.Get(cachememdb.IndexNameID, index.ID) + if err != nil && err != cachememdb.ErrCacheItemNotFound { + return err + } + index.LastRenewed = t + if err := c.Set(ctx, getIndex); err != nil { + return err + } + return nil +} + +// computeIndexID results in a value that uniquely identifies a request +// received by the agent. It does so by SHA256 hashing the serialized request +// object containing the request path, query parameters and body parameters. +func computeIndexID(req *SendRequest) (string, error) { + var b bytes.Buffer + + cloned := req.Request.Clone(context.Background()) + cloned.Header.Del(vaulthttp.VaultIndexHeaderName) + cloned.Header.Del(vaulthttp.VaultForwardHeaderName) + cloned.Header.Del(vaulthttp.VaultInconsistentHeaderName) + // Serialize the request + if err := cloned.Write(&b); err != nil { + return "", fmt.Errorf("failed to serialize request: %v", err) + } + + // Reset the request body after it has been closed by Write + req.Request.Body = io.NopCloser(bytes.NewReader(req.RequestBody)) + + // Append req.Token into the byte slice. This is needed since auto-auth'ed + // requests sets the token directly into SendRequest.Token + if _, err := b.WriteString(req.Token); err != nil { + return "", fmt.Errorf("failed to write token to hash input: %w", err) + } + + return hex.EncodeToString(cryptoutil.Blake2b256Hash(string(b.Bytes()))), nil +} + +// canonicalizeStaticSecretPath takes an API request path such as +// /v1/foo/bar and a namespace, and turns it into a canonical representation +// of the secret's path in Vault. +// We opt for this form as namespace.Canonicalize returns a namespace in the +// form of "ns1/", so we keep consistent with path canonicalization. +func canonicalizeStaticSecretPath(requestPath string, ns string) string { + // /sys/capabilities accepts both requests that look like foo/bar + // and /foo/bar but not /v1/foo/bar. + // We trim the /v1/ from the start of the URL to get the foo/bar form. + // This means that we can use the paths we retrieve from the + // /sys/capabilities endpoint to access this index + // without having to re-add the /v1/ + path := strings.TrimPrefix(requestPath, "/v1/") + // Trim any leading slashes, as we never want those. + // This ensures /foo/bar gets turned to foo/bar + path = strings.TrimPrefix(path, "/") + + // If a namespace was provided in a way that wasn't directly in the path, + // it must be added to the path. + path = namespace.Canonicalize(ns) + path + + return path +} + +// getStaticSecretVersionFromRequest gets the version of a secret +// from a request. For the latest secret and for KVv1 secrets, +// this will return 0. +func getStaticSecretVersionFromRequest(req *SendRequest) int { + if req == nil || req.Request == nil { + return 0 + } + version := req.Request.FormValue("version") + if version == "" { + return 0 + } + versionInt, err := strconv.Atoi(version) + if err != nil { + // It's not a valid version. + return 0 + } + return versionInt +} + +// getStaticSecretPathFromRequest gets the canonical path for a +// request, taking into account intricacies relating to /v1/ and namespaces +// in the header. +// Returns a path like foo/bar or ns1/foo/bar. +// We opt for this form as namespace.Canonicalize returns a namespace in the +// form of "ns1/", so we keep consistent with path canonicalization. +func getStaticSecretPathFromRequest(req *SendRequest) string { + path := req.Request.URL.Path + // Static secrets always have /v1 as a prefix. This enables us to + // enable a pass-through and never attempt to cache or view-from-cache + // any request without the /v1 prefix. + if !strings.HasPrefix(path, "/v1") { + return "" + } + var namespace string + if header := req.Request.Header; header != nil { + namespace = header.Get(api.NamespaceHeaderName) + } + return canonicalizeStaticSecretPath(path, namespace) +} + +// hashStaticSecretIndex is a simple function that hashes the path into +// a function. This is kept as a helper function for ease of use by downstream functions. +func hashStaticSecretIndex(unhashedIndex string) string { + return hex.EncodeToString(cryptoutil.Blake2b256Hash(unhashedIndex)) +} + +// computeStaticSecretCacheIndex results in a value that uniquely identifies a static +// secret's cached ID. Notably, we intentionally ignore headers (for example, +// the X-Vault-Token header) to remain agnostic to which token is being +// used in the request. We care only about the path. +// This will return "" if the index does not have a /v1 prefix, and therefore +// cannot be a static secret. +func computeStaticSecretCacheIndex(req *SendRequest) string { + path := getStaticSecretPathFromRequest(req) + if path == "" { + return path + } + + return hashStaticSecretIndex(path) +} + +// HandleCacheClear returns a handlerFunc that can perform cache clearing operations. +func (c *LeaseCache) HandleCacheClear(ctx context.Context) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // If the cache is not enabled, return a 200 + if c == nil { + return + } + + // Only handle POST/PUT requests + switch r.Method { + case http.MethodPost: + case http.MethodPut: + default: + return + } + + req := new(cacheClearRequest) + if err := jsonutil.DecodeJSONFromReader(r.Body, req); err != nil { + if err == io.EOF { + err = errors.New("empty JSON provided") + } + logical.RespondError(w, http.StatusBadRequest, fmt.Errorf("failed to parse JSON input: %w", err)) + return + } + + c.logger.Debug("received cache-clear request", "type", req.Type, "namespace", req.Namespace, "value", req.Value) + + in, err := parseCacheClearInput(req) + if err != nil { + c.logger.Error("unable to parse clear input", "error", err) + logical.RespondError(w, http.StatusBadRequest, fmt.Errorf("failed to parse clear input: %w", err)) + return + } + + if err := c.handleCacheClear(ctx, in); err != nil { + // Default to 500 on error, unless the user provided an invalid type, + // which would then be a 400. + httpStatus := http.StatusInternalServerError + if errors.Is(err, errInvalidType) { + httpStatus = http.StatusBadRequest + } + logical.RespondError(w, httpStatus, fmt.Errorf("failed to clear cache: %w", err)) + return + } + + return + }) +} + +func (c *LeaseCache) handleCacheClear(ctx context.Context, in *cacheClearInput) error { + if in == nil { + return errors.New("no value(s) provided to clear corresponding cache entries") + } + + switch in.Type { + case "request_path": + // For this particular case, we need to ensure that there are 2 provided + // indexers for the proper lookup. + if in.RequestPath == "" { + return errors.New("request path not provided") + } + + // The first value provided for this case will be the namespace, but if it's + // an empty value we need to overwrite it with "root/" to ensure proper + // cache lookup. + if in.Namespace == "" { + in.Namespace = "root/" + } + + // Find all the cached entries which has the given request path and + // cancel the contexts of all the respective lifetime watchers + indexes, err := c.db.GetByPrefix(cachememdb.IndexNameRequestPath, in.Namespace, in.RequestPath) + if err != nil { + return err + } + for _, index := range indexes { + // If it's a static secret, we must remove directly, as there + // is no renew func to cancel. + if index.Type == cacheboltdb.StaticSecretType { + err = c.db.Evict(cachememdb.IndexNameID, index.ID) + if err != nil { + return err + } + } else { + if index.RenewCtxInfo != nil { + if index.RenewCtxInfo.CancelFunc != nil { + index.RenewCtxInfo.CancelFunc() + } + } + } + } + + case "token": + if in.Token == "" { + return errors.New("token not provided") + } + + // Get the context for the given token and cancel its context + index, err := c.db.Get(cachememdb.IndexNameToken, in.Token) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + return nil + } + if err != nil { + return err + } + + c.logger.Debug("canceling context of index attached to token") + + index.RenewCtxInfo.CancelFunc() + + case "token_accessor": + if in.TokenAccessor == "" && in.Type != cacheboltdb.StaticSecretType { + return errors.New("token accessor not provided") + } + + // Get the cached index and cancel the corresponding lifetime watcher + // context + index, err := c.db.Get(cachememdb.IndexNameTokenAccessor, in.TokenAccessor) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + return nil + } + if err != nil { + return err + } + + c.logger.Debug("canceling context of index attached to accessor") + + index.RenewCtxInfo.CancelFunc() + + case "lease": + if in.Lease == "" { + return errors.New("lease not provided") + } + + // Get the cached index and cancel the corresponding lifetime watcher + // context + index, err := c.db.Get(cachememdb.IndexNameLease, in.Lease) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + return nil + } + if err != nil { + return err + } + + c.logger.Debug("canceling context of index attached to accessor") + + index.RenewCtxInfo.CancelFunc() + + case "all": + // Cancel the base context which triggers all the goroutines to + // stop and evict entries from cache. + c.logger.Debug("canceling base context") + c.l.Lock() + c.baseCtxInfo.CancelFunc() + // Reset the base context + baseCtx, baseCancel := context.WithCancel(ctx) + c.baseCtxInfo = &cachememdb.ContextInfo{ + Ctx: baseCtx, + CancelFunc: baseCancel, + } + c.l.Unlock() + + // Reset the memdb instance (and persistent storage if enabled) + if err := c.Flush(); err != nil { + return err + } + + default: + return errInvalidType + } + + c.logger.Debug("successfully cleared matching cache entries") + + return nil +} + +// handleRevocationRequest checks whether the originating request is a +// revocation request, and if so perform applicable cache cleanups. +// Returns true is this is a revocation request. +func (c *LeaseCache) handleRevocationRequest(ctx context.Context, req *SendRequest, resp *SendResponse) (bool, error) { + // Lease and token revocations return 204's on success. Fast-path if that's + // not the case. + if resp.Response.StatusCode != http.StatusNoContent { + return false, nil + } + + _, path := deriveNamespaceAndRevocationPath(req) + + switch { + case path == vaultPathTokenRevoke: + // Get the token from the request body + jsonBody := map[string]interface{}{} + if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { + return false, err + } + tokenRaw, ok := jsonBody["token"] + if !ok { + return false, fmt.Errorf("failed to get token from request body") + } + token, ok := tokenRaw.(string) + if !ok { + return false, fmt.Errorf("expected token in the request body to be string") + } + + // Clear the cache entry associated with the token and all the other + // entries belonging to the leases derived from this token. + in := &cacheClearInput{ + Type: "token", + Token: token, + } + if err := c.handleCacheClear(ctx, in); err != nil { + return false, err + } + + case path == vaultPathTokenRevokeSelf: + // Clear the cache entry associated with the token and all the other + // entries belonging to the leases derived from this token. + in := &cacheClearInput{ + Type: "token", + Token: req.Token, + } + if err := c.handleCacheClear(ctx, in); err != nil { + return false, err + } + + case path == vaultPathTokenRevokeAccessor: + jsonBody := map[string]interface{}{} + if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { + return false, err + } + accessorRaw, ok := jsonBody["accessor"] + if !ok { + return false, fmt.Errorf("failed to get accessor from request body") + } + accessor, ok := accessorRaw.(string) + if !ok { + return false, fmt.Errorf("expected accessor in the request body to be string") + } + + in := &cacheClearInput{ + Type: "token_accessor", + TokenAccessor: accessor, + } + if err := c.handleCacheClear(ctx, in); err != nil { + return false, err + } + + case path == vaultPathTokenRevokeOrphan: + jsonBody := map[string]interface{}{} + if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { + return false, err + } + tokenRaw, ok := jsonBody["token"] + if !ok { + return false, fmt.Errorf("failed to get token from request body") + } + token, ok := tokenRaw.(string) + if !ok { + return false, fmt.Errorf("expected token in the request body to be string") + } + + // Kill the lifetime watchers of all the leases attached to the revoked + // token + indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLeaseToken, token) + if err != nil { + return false, err + } + for _, index := range indexes { + index.RenewCtxInfo.CancelFunc() + } + + // Kill the lifetime watchers of the revoked token + index, err := c.db.Get(cachememdb.IndexNameToken, token) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + return true, nil + } + if err != nil { + return false, err + } + + // Indicate the lifetime watcher goroutine for this index to return. + // This will not affect the child tokens because the context is not + // getting cancelled. + close(index.RenewCtxInfo.DoneCh) + + // Clear the parent references of the revoked token in the entries + // belonging to the child tokens of the revoked token. + indexes, err = c.db.GetByPrefix(cachememdb.IndexNameTokenParent, token) + if err != nil { + return false, err + } + for _, index := range indexes { + index.TokenParent = "" + err = c.db.Set(index) + if err != nil { + c.logger.Error("failed to persist index", "error", err) + return false, err + } + } + + case path == vaultPathLeaseRevoke: + // TODO: Should lease present in the URL itself be considered here? + // Get the lease from the request body + jsonBody := map[string]interface{}{} + if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { + return false, err + } + leaseIDRaw, ok := jsonBody["lease_id"] + if !ok { + return false, fmt.Errorf("failed to get lease_id from request body") + } + leaseID, ok := leaseIDRaw.(string) + if !ok { + return false, fmt.Errorf("expected lease_id the request body to be string") + } + in := &cacheClearInput{ + Type: "lease", + Lease: leaseID, + } + if err := c.handleCacheClear(ctx, in); err != nil { + return false, err + } + + case strings.HasPrefix(path, vaultPathLeaseRevokeForce): + // Trim the URL path to get the request path prefix + prefix := strings.TrimPrefix(path, vaultPathLeaseRevokeForce) + // Get all the cache indexes that use the request path containing the + // prefix and cancel the lifetime watcher context of each. + indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLease, prefix) + if err != nil { + return false, err + } + + _, tokenNSID := namespace.SplitIDFromString(req.Token) + for _, index := range indexes { + _, leaseNSID := namespace.SplitIDFromString(index.Lease) + // Only evict leases that match the token's namespace + if tokenNSID == leaseNSID { + index.RenewCtxInfo.CancelFunc() + } + } + + case strings.HasPrefix(path, vaultPathLeaseRevokePrefix): + // Trim the URL path to get the request path prefix + prefix := strings.TrimPrefix(path, vaultPathLeaseRevokePrefix) + // Get all the cache indexes that use the request path containing the + // prefix and cancel the lifetime watcher context of each. + indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLease, prefix) + if err != nil { + return false, err + } + + _, tokenNSID := namespace.SplitIDFromString(req.Token) + for _, index := range indexes { + _, leaseNSID := namespace.SplitIDFromString(index.Lease) + // Only evict leases that match the token's namespace + if tokenNSID == leaseNSID { + index.RenewCtxInfo.CancelFunc() + } + } + + default: + return false, nil + } + + c.logger.Debug("triggered caching eviction from revocation request") + + return true, nil +} + +// Set stores the index in the cachememdb, and also stores it in the persistent +// cache (if enabled) +func (c *LeaseCache) Set(ctx context.Context, index *cachememdb.Index) error { + if err := c.db.Set(index); err != nil { + return err + } + + if c.ps != nil { + plaintext, err := index.Serialize() + if err != nil { + return err + } + + if err := c.ps.Set(ctx, index.ID, plaintext, index.Type); err != nil { + return err + } + c.logger.Trace("set entry in persistent storage", "type", index.Type, "path", index.RequestPath, "id", index.ID) + } + + return nil +} + +// SetCapabilitiesIndex stores the capabilities index in the cachememdb, and also stores it in the persistent +// cache (if enabled) +func (c *LeaseCache) SetCapabilitiesIndex(ctx context.Context, index *cachememdb.CapabilitiesIndex) error { + if err := c.db.SetCapabilitiesIndex(index); err != nil { + return err + } + + if c.ps != nil { + plaintext, err := index.SerializeCapabilitiesIndex() + if err != nil { + return err + } + + if err := c.ps.Set(ctx, index.ID, plaintext, cacheboltdb.TokenCapabilitiesType); err != nil { + return err + } + c.logger.Trace("set entry in persistent storage", "type", cacheboltdb.TokenCapabilitiesType, "id", index.ID) + } + + return nil +} + +// Evict removes an Index from the cachememdb, and also removes it from the +// persistent cache (if enabled) +func (c *LeaseCache) Evict(index *cachememdb.Index) error { + if err := c.db.Evict(cachememdb.IndexNameID, index.ID); err != nil { + return err + } + + if c.ps != nil { + if err := c.ps.Delete(index.ID, index.Type); err != nil { + return err + } + c.logger.Trace("deleted item from persistent storage", "id", index.ID) + } + + return nil +} + +// Flush the cachememdb and persistent cache (if enabled) +func (c *LeaseCache) Flush() error { + if err := c.db.Flush(); err != nil { + return err + } + + if c.ps != nil { + c.logger.Trace("clearing persistent storage") + return c.ps.Clear() + } + + return nil +} + +// Restore loads the cachememdb from the persistent storage passed in. Loads +// tokens first, since restoring a lease's renewal context and watcher requires +// looking up the token in the cachememdb. +// Restore also restarts any capability management for managed static secret +// tokens. +func (c *LeaseCache) Restore(ctx context.Context, storage *cacheboltdb.BoltStorage) error { + var errs *multierror.Error + + // Process tokens first + tokens, err := storage.GetByType(ctx, cacheboltdb.TokenType) + if err != nil { + errs = multierror.Append(errs, err) + } else { + if err := c.restoreTokens(tokens); err != nil { + errs = multierror.Append(errs, err) + } + } + + // Then process leases + leases, err := storage.GetByType(ctx, cacheboltdb.LeaseType) + if err != nil { + errs = multierror.Append(errs, err) + } else { + for _, lease := range leases { + newIndex, err := cachememdb.Deserialize(lease) + if err != nil { + errs = multierror.Append(errs, err) + continue + } + + c.logger.Trace("restoring lease", "id", newIndex.ID, "path", newIndex.RequestPath) + + // Check if this lease has already expired + expired, err := c.hasExpired(time.Now().UTC(), newIndex) + if err != nil { + c.logger.Warn("failed to check if lease is expired", "id", newIndex.ID, "error", err) + } + if expired { + continue + } + + if err := c.restoreLeaseRenewCtx(newIndex); err != nil { + errs = multierror.Append(errs, err) + continue + } + if err := c.db.Set(newIndex); err != nil { + errs = multierror.Append(errs, err) + continue + } + c.logger.Trace("restored lease", "id", newIndex.ID, "path", newIndex.RequestPath) + } + } + + // Then process static secrets and their capabilities + if c.cacheStaticSecrets { + staticSecrets, err := storage.GetByType(ctx, cacheboltdb.StaticSecretType) + if err != nil { + errs = multierror.Append(errs, err) + } else { + for _, staticSecret := range staticSecrets { + newIndex, err := cachememdb.Deserialize(staticSecret) + if err != nil { + errs = multierror.Append(errs, err) + continue + } + + c.logger.Trace("restoring static secret index", "id", newIndex.ID, "path", newIndex.RequestPath) + if err := c.db.Set(newIndex); err != nil { + errs = multierror.Append(errs, err) + continue + } + } + } + + capabilityIndexes, err := storage.GetByType(ctx, cacheboltdb.TokenCapabilitiesType) + if err != nil { + errs = multierror.Append(errs, err) + } else { + for _, capabilityIndex := range capabilityIndexes { + newIndex, err := cachememdb.DeserializeCapabilitiesIndex(capabilityIndex) + if err != nil { + errs = multierror.Append(errs, err) + continue + } + + c.logger.Trace("restoring capability index", "id", newIndex.ID) + if err := c.db.SetCapabilitiesIndex(newIndex); err != nil { + errs = multierror.Append(errs, err) + continue + } + + if c.capabilityManager != nil { + c.capabilityManager.StartRenewingCapabilities(newIndex) + } + } + } + } + + return errs.ErrorOrNil() +} + +func (c *LeaseCache) restoreTokens(tokens [][]byte) error { + var errors *multierror.Error + + for _, token := range tokens { + newIndex, err := cachememdb.Deserialize(token) + if err != nil { + errors = multierror.Append(errors, err) + continue + } + newIndex.RenewCtxInfo = c.createCtxInfo(nil) + if err := c.db.Set(newIndex); err != nil { + errors = multierror.Append(errors, err) + continue + } + c.logger.Trace("restored token", "id", newIndex.ID) + } + + return errors.ErrorOrNil() +} + +// restoreLeaseRenewCtx re-creates a RenewCtx for an index object and starts +// the watcher go routine +func (c *LeaseCache) restoreLeaseRenewCtx(index *cachememdb.Index) error { + if index.Response == nil { + return fmt.Errorf("cached response was nil for %s", index.ID) + } + + // Parse the secret to determine which type it is + reader := bufio.NewReader(bytes.NewReader(index.Response)) + resp, err := http.ReadResponse(reader, nil) + if err != nil { + c.logger.Error("failed to deserialize response", "error", err) + return err + } + secret, err := api.ParseSecret(resp.Body) + if err != nil { + c.logger.Error("failed to parse response as secret", "error", err) + return err + } + + var renewCtxInfo *cachememdb.ContextInfo + switch { + case secret.LeaseID != "": + entry, err := c.db.Get(cachememdb.IndexNameToken, index.RequestToken) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + return fmt.Errorf("could not find parent Token %s for req path %s", index.RequestToken, index.RequestPath) + } + if err != nil { + return err + } + + // Derive a context for renewal using the token's context + renewCtxInfo = cachememdb.NewContextInfo(entry.RenewCtxInfo.Ctx) + + case secret.Auth != nil: + var parentCtx context.Context + if !secret.Auth.Orphan { + entry, err := c.db.Get(cachememdb.IndexNameToken, index.RequestToken) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + // If parent token is not managed by the cache, child shouldn't be + // either. + if entry == nil { + return fmt.Errorf("could not find parent Token %s for req path %s", index.RequestToken, index.RequestPath) + } + } + if err != nil { + return err + } + + c.logger.Debug("setting parent context", "method", index.RequestMethod, "path", index.RequestPath) + parentCtx = entry.RenewCtxInfo.Ctx + } + renewCtxInfo = c.createCtxInfo(parentCtx) + default: + // This isn't a renewable cache entry, i.e. a static secret cache entry. + // We return, because there's nothing to do. + return nil + } + + renewCtx := context.WithValue(renewCtxInfo.Ctx, contextIndexID, index.ID) + index.RenewCtxInfo = &cachememdb.ContextInfo{ + Ctx: renewCtx, + CancelFunc: renewCtxInfo.CancelFunc, + DoneCh: renewCtxInfo.DoneCh, + } + + sendReq := &SendRequest{ + Token: index.RequestToken, + Request: &http.Request{ + Header: index.RequestHeader, + Method: index.RequestMethod, + URL: &url.URL{ + Path: index.RequestPath, + }, + }, + } + go c.startRenewing(renewCtx, index, sendReq, secret) + + return nil +} + +// deriveNamespaceAndRevocationPath returns the namespace and relative path for +// revocation paths. +// +// If the path contains a namespace, but it's not a revocation path, it will be +// returned as-is, since there's no way to tell where the namespace ends and +// where the request path begins purely based off a string. +// +// Case 1: /v1/ns1/leases/revoke -> ns1/, /v1/leases/revoke +// Case 2: ns1/ /v1/leases/revoke -> ns1/, /v1/leases/revoke +// Case 3: /v1/ns1/foo/bar -> root/, /v1/ns1/foo/bar +// Case 4: ns1/ /v1/foo/bar -> ns1/, /v1/foo/bar +func deriveNamespaceAndRevocationPath(req *SendRequest) (string, string) { + namespace := "root/" + nsHeader := req.Request.Header.Get(consts.NamespaceHeaderName) + if nsHeader != "" { + namespace = nsHeader + } + + fullPath := req.Request.URL.Path + nonVersionedPath := strings.TrimPrefix(fullPath, "/v1") + + for _, pathToCheck := range revocationPaths { + // We use strings.Contains here for paths that can contain + // vars in the path, e.g. /v1/lease/revoke-prefix/:prefix + i := strings.Index(nonVersionedPath, pathToCheck) + // If there's no match, move on to the next check + if i == -1 { + continue + } + + // If the index is 0, this is a relative path with no namespace preppended, + // so we can break early + if i == 0 { + break + } + + // We need to turn /ns1 into ns1/, this makes it easy + namespaceInPath := nshelper.Canonicalize(nonVersionedPath[:i]) + + // If it's root, we replace, otherwise we join + if namespace == "root/" { + namespace = namespaceInPath + } else { + namespace = namespace + namespaceInPath + } + + return namespace, fmt.Sprintf("/v1%s", nonVersionedPath[i:]) + } + + return namespace, fmt.Sprintf("/v1%s", nonVersionedPath) +} + +// RegisterAutoAuthToken adds the provided auto-token into the cache. This is +// primarily used to register the auto-auth token and should only be called +// within a sink's WriteToken func. +func (c *LeaseCache) RegisterAutoAuthToken(token string) error { + // Get the token from the cache + oldIndex, err := c.db.Get(cachememdb.IndexNameToken, token) + if err != nil && err != cachememdb.ErrCacheItemNotFound { + return err + } + + // If the index is found, just keep it in the cache and ignore the incoming + // token (since they're the same) + if oldIndex != nil { + c.logger.Trace("auto-auth token already exists in cache; no need to store it again") + return nil + } + + // The following randomly generated values are required for index stored by + // the cache, but are not actually used. We use random values to prevent + // accidental access. + id, err := base62.Random(5) + if err != nil { + return err + } + namespace, err := base62.Random(5) + if err != nil { + return err + } + requestPath, err := base62.Random(5) + if err != nil { + return err + } + + index := &cachememdb.Index{ + ID: id, + Token: token, + Namespace: namespace, + RequestPath: requestPath, + Type: cacheboltdb.TokenType, + } + + // Derive a context off of the lease cache's base context + ctxInfo := c.createCtxInfo(nil) + + index.RenewCtxInfo = &cachememdb.ContextInfo{ + Ctx: ctxInfo.Ctx, + CancelFunc: ctxInfo.CancelFunc, + DoneCh: ctxInfo.DoneCh, + } + + // Store the index in the cache + c.logger.Debug("storing auto-auth token into the cache") + err = c.Set(c.baseCtxInfo.Ctx, index) + if err != nil { + c.logger.Error("failed to cache the auto-auth token", "error", err) + return err + } + + return nil +} + +type cacheClearInput struct { + Type string + + RequestPath string + Namespace string + Token string + TokenAccessor string + Lease string +} + +func parseCacheClearInput(req *cacheClearRequest) (*cacheClearInput, error) { + if req == nil { + return nil, errors.New("nil request options provided") + } + + if req.Type == "" { + return nil, errors.New("no type provided") + } + + in := &cacheClearInput{ + Type: req.Type, + Namespace: req.Namespace, + } + + switch req.Type { + case "request_path": + in.RequestPath = req.Value + case "token": + in.Token = req.Value + case "token_accessor": + in.TokenAccessor = req.Value + case "lease": + in.Lease = req.Value + } + + return in, nil +} + +func (c *LeaseCache) hasExpired(currentTime time.Time, index *cachememdb.Index) (bool, error) { + reader := bufio.NewReader(bytes.NewReader(index.Response)) + resp, err := http.ReadResponse(reader, nil) + if err != nil { + return false, fmt.Errorf("failed to deserialize response: %w", err) + } + secret, err := api.ParseSecret(resp.Body) + if err != nil { + return false, fmt.Errorf("failed to parse response as secret: %w", err) + } + + elapsed := currentTime.Sub(index.LastRenewed) + var leaseDuration int + switch { + case secret.LeaseID != "": + leaseDuration = secret.LeaseDuration + case secret.Auth != nil: + leaseDuration = secret.Auth.LeaseDuration + default: + return false, errors.New("secret without lease encountered in expiration check") + } + + if int(elapsed.Seconds()) > leaseDuration { + c.logger.Trace("secret has expired", "id", index.ID, "elapsed", elapsed, "lease duration", leaseDuration) + return true, nil + } + return false, nil +} diff --git a/command/agentproxyshared/cache/lease_cache_test.go b/command/agentproxyshared/cache/lease_cache_test.go new file mode 100644 index 000000000000..d88171150631 --- /dev/null +++ b/command/agentproxyshared/cache/lease_cache_test.go @@ -0,0 +1,1676 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "context" + "encoding/hex" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/go-test/deep" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cacheboltdb" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" + "github.com/hashicorp/vault/command/agentproxyshared/cache/keymanager" + "github.com/hashicorp/vault/helper/useragent" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/cryptoutil" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" +) + +func testNewLeaseCache(t *testing.T, responses []*SendResponse) *LeaseCache { + t.Helper() + + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + lc, err := NewLeaseCache(&LeaseCacheConfig{ + Client: client, + BaseContext: context.Background(), + Proxier: NewMockProxier(responses), + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"), + CacheStaticSecrets: true, + CacheDynamicSecrets: true, + UserAgentToUse: "test", + }) + if err != nil { + t.Fatal(err) + } + return lc +} + +func testNewLeaseCacheWithDelay(t *testing.T, cacheable bool, delay int) *LeaseCache { + t.Helper() + + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + + lc, err := NewLeaseCache(&LeaseCacheConfig{ + Client: client, + BaseContext: context.Background(), + Proxier: &mockDelayProxier{cacheable, delay}, + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"), + CacheStaticSecrets: true, + CacheDynamicSecrets: true, + UserAgentToUse: "test", + }) + if err != nil { + t.Fatal(err) + } + + return lc +} + +func testNewLeaseCacheWithPersistence(t *testing.T, responses []*SendResponse, storage *cacheboltdb.BoltStorage) *LeaseCache { + t.Helper() + + client, err := api.NewClient(api.DefaultConfig()) + require.NoError(t, err) + + lc, err := NewLeaseCache(&LeaseCacheConfig{ + Client: client, + BaseContext: context.Background(), + Proxier: NewMockProxier(responses), + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"), + Storage: storage, + CacheStaticSecrets: true, + CacheDynamicSecrets: true, + UserAgentToUse: "test", + }) + require.NoError(t, err) + + return lc +} + +func TestCache_ComputeIndexID(t *testing.T) { + tests := []struct { + name string + req *SendRequest + want string + wantErr bool + }{ + { + "basic", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "test", + }, + }, + }, + "7b5db388f211fd9edca8c6c254831fb01ad4e6fe624dbb62711f256b5e803717", + false, + }, + { + "ignore consistency headers", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "test", + }, + Header: http.Header{ + vaulthttp.VaultIndexHeaderName: []string{"foo"}, + vaulthttp.VaultInconsistentHeaderName: []string{"foo"}, + vaulthttp.VaultForwardHeaderName: []string{"foo"}, + }, + }, + }, + "7b5db388f211fd9edca8c6c254831fb01ad4e6fe624dbb62711f256b5e803717", + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := computeIndexID(tt.req) + if (err != nil) != tt.wantErr { + t.Errorf("actual_error: %v, expected_error: %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, string(tt.want)) { + t.Errorf("bad: index id; actual: %q, expected: %q", got, string(tt.want)) + } + }) + } +} + +// TestCache_ComputeStaticSecretIndexID ensures that +// computeStaticSecretCacheIndex works correctly. If this test breaks, then our +// hashing algorithm has changed, and we risk breaking backwards compatibility. +func TestCache_ComputeStaticSecretIndexID(t *testing.T) { + req := &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/foo/bar", + }, + }, + } + + index := computeStaticSecretCacheIndex(req) + // We expect this to be "", as it doesn't start with /v1 + expectedIndex := "" + require.Equal(t, expectedIndex, index) + + req = &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/foo/bar", + }, + }, + } + + expectedIndex = "b117a962f19f17fa372c8681cadcd6fd370d28ee6e0a7012196b780bef601b53" + index2 := computeStaticSecretCacheIndex(req) + require.Equal(t, expectedIndex, index2) +} + +// Test_GetStaticSecretPathFromRequestNoNamespaces tests that getStaticSecretPathFromRequest +// behaves as expected when no namespaces are involved. +func Test_GetStaticSecretPathFromRequestNoNamespaces(t *testing.T) { + req := &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/foo/bar", + }, + }, + } + + path := getStaticSecretPathFromRequest(req) + require.Equal(t, "foo/bar", path) + + req = &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + // Paths like this are not static secrets, so we should return "" + Path: "foo/bar", + }, + }, + } + + path = getStaticSecretPathFromRequest(req) + require.Equal(t, "", path) +} + +// Test_GetStaticSecretPathFromRequestNamespaces tests that getStaticSecretPathFromRequest +// behaves as expected when namespaces are involved. +func Test_GetStaticSecretPathFromRequestNamespaces(t *testing.T) { + req := &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/foo/bar", + }, + Header: map[string][]string{api.NamespaceHeaderName: {"ns1"}}, + }, + } + + path := getStaticSecretPathFromRequest(req) + require.Equal(t, "ns1/foo/bar", path) + + req = &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns1/foo/bar", + }, + }, + } + + path = getStaticSecretPathFromRequest(req) + require.Equal(t, "ns1/foo/bar", path) + + req = &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + // Paths like this are not static secrets, so we should return "" + Path: "ns1/foo/bar", + }, + }, + } + + path = getStaticSecretPathFromRequest(req) + require.Equal(t, "", path) +} + +// TestCache_CanonicalizeStaticSecretPath ensures that +// canonicalizeStaticSecretPath works as expected with all kinds of inputs. +func TestCache_CanonicalizeStaticSecretPath(t *testing.T) { + expected := "foo/bar" + actual := canonicalizeStaticSecretPath("/v1/foo/bar", "") + require.Equal(t, expected, actual) + + actual = canonicalizeStaticSecretPath("foo/bar", "") + require.Equal(t, expected, actual) + actual = canonicalizeStaticSecretPath("/foo/bar", "") + require.Equal(t, expected, actual) + + expected = "ns1/foo/bar" + actual = canonicalizeStaticSecretPath("/v1/ns1/foo/bar", "") + require.Equal(t, expected, actual) + + actual = canonicalizeStaticSecretPath("ns1/foo/bar", "") + require.Equal(t, expected, actual) + actual = canonicalizeStaticSecretPath("/ns1/foo/bar", "") + require.Equal(t, expected, actual) + + expected = "ns1/foo/bar" + actual = canonicalizeStaticSecretPath("/v1/foo/bar", "ns1") + require.Equal(t, expected, actual) + + actual = canonicalizeStaticSecretPath("/foo/bar", "ns1") + require.Equal(t, expected, actual) + actual = canonicalizeStaticSecretPath("foo/bar", "ns1") + require.Equal(t, expected, actual) + + expected = "ns1/foo/bar" + actual = canonicalizeStaticSecretPath("/v1/foo/bar", "ns1/") + require.Equal(t, expected, actual) + + actual = canonicalizeStaticSecretPath("/foo/bar", "ns1/") + require.Equal(t, expected, actual) + actual = canonicalizeStaticSecretPath("foo/bar", "ns1/") + require.Equal(t, expected, actual) + + expected = "ns1/foo/bar" + actual = canonicalizeStaticSecretPath("/v1/foo/bar", "/ns1/") + require.Equal(t, expected, actual) + + actual = canonicalizeStaticSecretPath("/foo/bar", "/ns1/") + require.Equal(t, expected, actual) + actual = canonicalizeStaticSecretPath("foo/bar", "/ns1/") + require.Equal(t, expected, actual) +} + +// TestCache_ComputeStaticSecretIndexIDNamespaces ensures that +// computeStaticSecretCacheIndex correctly identifies that a request +// with a namespace header and a request specifying the namespace in the path +// are equivalent. +func TestCache_ComputeStaticSecretIndexIDNamespaces(t *testing.T) { + req := &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "foo/bar", + }, + Header: map[string][]string{api.NamespaceHeaderName: {"ns1"}}, + }, + } + + index := computeStaticSecretCacheIndex(req) + // Paths like this are not static secrets, so we should expect "" + require.Equal(t, "", index) + + req = &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "ns1/foo/bar", + }, + }, + } + + // Paths like this are not static secrets, so we should expect "" + index2 := computeStaticSecretCacheIndex(req) + require.Equal(t, "", index2) + + req = &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns1/foo/bar", + }, + }, + } + + expectedIndex := "a4605679d269aa1bebac7079a471a33403413f388f63bf0da3c771b225857932" + // We expect that computeStaticSecretCacheIndex will compute the same index + index3 := computeStaticSecretCacheIndex(req) + require.Equal(t, expectedIndex, index3) + + req = &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/foo/bar", + }, + Header: map[string][]string{api.NamespaceHeaderName: {"ns1"}}, + }, + } + + index4 := computeStaticSecretCacheIndex(req) + require.Equal(t, expectedIndex, index4) + + req = &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/foo/bar", + }, + Header: map[string][]string{api.NamespaceHeaderName: {"ns1/"}}, + }, + } + + // Paths like this are not static secrets, so we should expect "" + index5 := computeStaticSecretCacheIndex(req) + require.Equal(t, "", index5) + + req = &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/foo/bar", + }, + Header: map[string][]string{api.NamespaceHeaderName: {"ns1/"}}, + }, + } + + index6 := computeStaticSecretCacheIndex(req) + require.Equal(t, expectedIndex, index6) +} + +func TestLeaseCache_EmptyToken(t *testing.T) { + responses := []*SendResponse{ + newTestSendResponse(http.StatusCreated, `{"value": "invalid", "auth": {"client_token": "testtoken"}}`), + } + lc := testNewLeaseCache(t, responses) + + // Even if the send request doesn't have a token on it, a successful + // cacheable response should result in the index properly getting populated + // with a token and memdb shouldn't complain while inserting the index. + urlPath := "http://example.com/v1/sample/api" + sendReq := &SendRequest{ + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err := lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatalf("expected a non empty response") + } +} + +func TestLeaseCache_SendCacheable(t *testing.T) { + // Emulate 2 responses from the api proxy. One returns a new token and the + // other returns a lease. + responses := []*SendResponse{ + newTestSendResponse(http.StatusCreated, `{"auth": {"client_token": "testtoken", "renewable": true}}`), + newTestSendResponse(http.StatusOK, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}}`), + } + + lc := testNewLeaseCache(t, responses) + // Register a token so that the token and lease requests are cached + require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) + + // Make a request. A response with a new token is returned to the lease + // cache and that will be cached. + urlPath := "http://example.com/v1/sample/api" + sendReq := &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err := lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response.StatusCode, responses[0].Response.StatusCode); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Send the same request again to get the cached response + sendReq = &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response.StatusCode, responses[0].Response.StatusCode); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Check TokenParent + cachedItem, err := lc.db.Get(cachememdb.IndexNameToken, "testtoken") + if err != nil { + t.Fatal(err) + } + if cachedItem == nil { + t.Fatalf("expected token entry from cache") + } + if cachedItem.TokenParent != "autoauthtoken" { + t.Fatalf("unexpected value for tokenparent: %s", cachedItem.TokenParent) + } + + // Modify the request a little bit to ensure the second response is + // returned to the lease cache. + sendReq = &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input_changed"}`)), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response.StatusCode, responses[1].Response.StatusCode); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Make the same request again and ensure that the same response is returned + // again. + sendReq = &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input_changed"}`)), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response.StatusCode, responses[1].Response.StatusCode); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } +} + +// TestLeaseCache_StoreCacheableStaticSecret tests that cacheStaticSecret works +// as expected, creating the two expected cache entries, and also ensures +// that we can evict the cache entry with the cache clear API afterwards. +func TestLeaseCache_StoreCacheableStaticSecret(t *testing.T) { + request := &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/secrets/foo/bar", + }, + }, + Token: "token", + } + response := newTestSendResponse(http.StatusCreated, `{"data": {"foo": "bar"}, "mount_type": "kvv2"}`) + responses := []*SendResponse{ + response, + } + index := &cachememdb.Index{ + Type: cacheboltdb.StaticSecretType, + RequestPath: request.Request.URL.Path, + Namespace: "root/", + Token: "token", + ID: computeStaticSecretCacheIndex(request), + } + + lc := testNewLeaseCache(t, responses) + + // We expect two entries to be stored by this: + // 1. The actual static secret + // 2. The capabilities index + err := lc.cacheStaticSecret(context.Background(), request, response, index, nil) + if err != nil { + return + } + + indexFromDB, err := lc.db.Get(cachememdb.IndexNameID, index.ID) + if err != nil { + return + } + + require.NotNil(t, indexFromDB) + require.Equal(t, "token", indexFromDB.Token) + require.Equal(t, map[string]struct{}{"token": {}}, indexFromDB.Tokens) + require.Equal(t, cacheboltdb.StaticSecretType, indexFromDB.Type) + require.Equal(t, request.Request.URL.Path, indexFromDB.RequestPath) + require.Equal(t, "root/", indexFromDB.Namespace) + + capabilitiesIndexFromDB, err := lc.db.GetCapabilitiesIndex(cachememdb.IndexNameID, hex.EncodeToString(cryptoutil.Blake2b256Hash(index.Token))) + if err != nil { + return + } + + require.NotNil(t, capabilitiesIndexFromDB) + require.Equal(t, "token", capabilitiesIndexFromDB.Token) + require.Equal(t, map[string]struct{}{"secrets/foo/bar": {}}, capabilitiesIndexFromDB.ReadablePaths) + + err = lc.handleCacheClear(context.Background(), &cacheClearInput{ + Type: "request_path", + RequestPath: request.Request.URL.Path, + }) + require.NoError(t, err) + + expectedClearedIndex, err := lc.db.Get(cachememdb.IndexNameID, index.ID) + require.Equal(t, cachememdb.ErrCacheItemNotFound, err) + require.Nil(t, expectedClearedIndex) +} + +// TestLeaseCache_StaticSecret_CacheClear_All tests that static secrets are +// stored correctly, as well as removed from the cache by a cache clear with +// "all" specified as the type. +func TestLeaseCache_StaticSecret_CacheClear_All(t *testing.T) { + request := &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/secrets/foo/bar", + }, + }, + Token: "token", + } + response := newTestSendResponse(http.StatusCreated, `{"data": {"foo": "bar"}, "mount_type": "kvv2"}`) + responses := []*SendResponse{ + response, + } + index := &cachememdb.Index{ + Type: cacheboltdb.StaticSecretType, + RequestPath: request.Request.URL.Path, + Namespace: "root/", + Token: "token", + ID: computeStaticSecretCacheIndex(request), + } + + lc := testNewLeaseCache(t, responses) + + // We expect two entries to be stored by this: + // 1. The actual static secret + // 2. The capabilities index + err := lc.cacheStaticSecret(context.Background(), request, response, index, nil) + if err != nil { + return + } + + indexFromDB, err := lc.db.Get(cachememdb.IndexNameID, index.ID) + if err != nil { + return + } + + require.NotNil(t, indexFromDB) + require.Equal(t, "token", indexFromDB.Token) + require.Equal(t, map[string]struct{}{"token": {}}, indexFromDB.Tokens) + require.Equal(t, cacheboltdb.StaticSecretType, indexFromDB.Type) + require.Equal(t, request.Request.URL.Path, indexFromDB.RequestPath) + require.Equal(t, "root/", indexFromDB.Namespace) + + capabilitiesIndexFromDB, err := lc.db.GetCapabilitiesIndex(cachememdb.IndexNameID, hex.EncodeToString(cryptoutil.Blake2b256Hash(index.Token))) + if err != nil { + t.Fatal(err) + } + + require.NotNil(t, capabilitiesIndexFromDB) + require.Equal(t, "token", capabilitiesIndexFromDB.Token) + require.Equal(t, map[string]struct{}{"secrets/foo/bar": {}}, capabilitiesIndexFromDB.ReadablePaths) + + err = lc.handleCacheClear(context.Background(), &cacheClearInput{ + Type: "all", + }) + require.NoError(t, err) + + expectedClearedIndex, err := lc.db.Get(cachememdb.IndexNameID, index.ID) + require.Equal(t, cachememdb.ErrCacheItemNotFound, err) + require.Nil(t, expectedClearedIndex) + + expectedClearedCapabilitiesIndex, err := lc.db.GetCapabilitiesIndex(cachememdb.IndexNameID, capabilitiesIndexFromDB.ID) + require.Equal(t, cachememdb.ErrCacheItemNotFound, err) + require.Nil(t, expectedClearedCapabilitiesIndex) +} + +// TestLeaseCache_SendCacheableStaticSecret tests that the cache has no issue returning +// static secret style responses. It's similar to TestLeaseCache_SendCacheable in that it +// only tests the surface level of the functionality, but there are other tests that +// test the rest. +func TestLeaseCache_SendCacheableStaticSecret(t *testing.T) { + response := newTestSendResponse(http.StatusCreated, `{"data": {"foo": "bar"}, "mount_type": "kvv2"}`) + responses := []*SendResponse{ + response, + response, + response, + response, + } + + lc := testNewLeaseCache(t, responses) + + // Register a token + require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) + + // Make a request. A response with a new token is returned to the lease + // cache and that will be cached. + urlPath := "http://example.com/v1/sample/api" + sendReq := &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err := lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response.StatusCode, response.Response.StatusCode); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Send the same request again to get the cached response + sendReq = &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response.StatusCode, responses[0].Response.StatusCode); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Modify the request a little to ensure the second response is + // returned to the lease cache. + sendReq = &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input_changed"}`)), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response.StatusCode, response.Response.StatusCode); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Make the same request again and ensure that the same response is returned + // again. + sendReq = &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input_changed"}`)), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response.StatusCode, response.Response.StatusCode); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } +} + +func TestLeaseCache_SendNonCacheable(t *testing.T) { + responses := []*SendResponse{ + newTestSendResponse(http.StatusOK, `{"value": "output"}`), + newTestSendResponse(http.StatusNotFound, `{"value": "invalid"}`), + newTestSendResponse(http.StatusOK, `Hello`), + newTestSendResponse(http.StatusTemporaryRedirect, ""), + } + + lc := testNewLeaseCache(t, responses) + + // Send a request through the lease cache which is not cacheable (there is + // no lease information or auth information in the response) + sendReq := &SendRequest{ + Request: httptest.NewRequest("GET", "http://example.com", strings.NewReader(`{"value": "input"}`)), + } + resp, err := lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response, responses[0].Response); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Since the response is non-cacheable, the second response will be + // returned. + sendReq = &SendRequest{ + Token: "foo", + Request: httptest.NewRequest("GET", "http://example.com", strings.NewReader(`{"value": "input"}`)), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response, responses[1].Response); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Since the response is non-cacheable, the third response will be + // returned. + sendReq = &SendRequest{ + Token: "foo", + Request: httptest.NewRequest("GET", "http://example.com", nil), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response, responses[2].Response); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Since the response is non-cacheable, the fourth response will be + // returned. + sendReq = &SendRequest{ + Token: "foo", + Request: httptest.NewRequest("GET", "http://example.com", nil), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response, responses[3].Response); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } +} + +func TestLeaseCache_SendNonCacheableNonTokenLease(t *testing.T) { + // Create the cache + responses := []*SendResponse{ + newTestSendResponse(http.StatusOK, `{"value": "output", "lease_id": "foo"}`), + newTestSendResponse(http.StatusCreated, `{"value": "invalid", "auth": {"client_token": "testtoken"}}`), + } + lc := testNewLeaseCache(t, responses) + + // Send a request through lease cache which returns a response containing + // lease_id. Response will not be cached because it doesn't belong to a + // token that is managed by the lease cache. + urlPath := "http://example.com/v1/sample/api" + sendReq := &SendRequest{ + Token: "foo", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err := lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response, responses[0].Response); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + _, err = lc.db.Get(cachememdb.IndexNameRequestPath, "root/", urlPath) + if err != cachememdb.ErrCacheItemNotFound { + t.Fatal("expected entry to be nil, got", err) + } + + // Verify that the response is not cached by sending the same request and + // by expecting a different response. + sendReq = &SendRequest{ + Token: "foo", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response, responses[1].Response); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + _, err = lc.db.Get(cachememdb.IndexNameRequestPath, "root/", urlPath) + if err != cachememdb.ErrCacheItemNotFound { + t.Fatal("expected entry to be nil, got", err) + } +} + +func TestLeaseCache_HandleCacheClear(t *testing.T) { + lc := testNewLeaseCache(t, nil) + + handler := lc.HandleCacheClear(context.Background()) + ts := httptest.NewServer(handler) + defer ts.Close() + + // Test missing body, should return 400 + resp, err := http.Post(ts.URL, "application/json", nil) + if err != nil { + t.Fatal() + } + if resp.StatusCode != http.StatusBadRequest { + t.Fatalf("status code mismatch: expected = %v, got = %v", http.StatusBadRequest, resp.StatusCode) + } + + testCases := []struct { + name string + reqType string + reqValue string + expectedStatusCode int + }{ + { + "invalid_type", + "foo", + "", + http.StatusBadRequest, + }, + { + "invalid_value", + "", + "bar", + http.StatusBadRequest, + }, + { + "all", + "all", + "", + http.StatusOK, + }, + { + "by_request_path", + "request_path", + "foo", + http.StatusOK, + }, + { + "by_token", + "token", + "foo", + http.StatusOK, + }, + { + "by_lease", + "lease", + "foo", + http.StatusOK, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + reqBody := fmt.Sprintf("{\"type\": \"%s\", \"value\": \"%s\"}", tc.reqType, tc.reqValue) + resp, err := http.Post(ts.URL, "application/json", strings.NewReader(reqBody)) + if err != nil { + t.Fatal(err) + } + if tc.expectedStatusCode != resp.StatusCode { + t.Fatalf("status code mismatch: expected = %v, got = %v", tc.expectedStatusCode, resp.StatusCode) + } + }) + } +} + +func TestCache_DeriveNamespaceAndRevocationPath(t *testing.T) { + tests := []struct { + name string + req *SendRequest + wantNamespace string + wantRelativePath string + }{ + { + "non_revocation_full_path", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns1/sys/mounts", + }, + }, + }, + "root/", + "/v1/ns1/sys/mounts", + }, + { + "non_revocation_relative_path", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/sys/mounts", + }, + Header: http.Header{ + consts.NamespaceHeaderName: []string{"ns1/"}, + }, + }, + }, + "ns1/", + "/v1/sys/mounts", + }, + { + "non_revocation_relative_path", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns2/sys/mounts", + }, + Header: http.Header{ + consts.NamespaceHeaderName: []string{"ns1/"}, + }, + }, + }, + "ns1/", + "/v1/ns2/sys/mounts", + }, + { + "revocation_full_path", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns1/sys/leases/revoke", + }, + }, + }, + "ns1/", + "/v1/sys/leases/revoke", + }, + { + "revocation_relative_path", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/sys/leases/revoke", + }, + Header: http.Header{ + consts.NamespaceHeaderName: []string{"ns1/"}, + }, + }, + }, + "ns1/", + "/v1/sys/leases/revoke", + }, + { + "revocation_relative_partial_ns", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns2/sys/leases/revoke", + }, + Header: http.Header{ + consts.NamespaceHeaderName: []string{"ns1/"}, + }, + }, + }, + "ns1/ns2/", + "/v1/sys/leases/revoke", + }, + { + "revocation_prefix_full_path", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns1/sys/leases/revoke-prefix/foo", + }, + }, + }, + "ns1/", + "/v1/sys/leases/revoke-prefix/foo", + }, + { + "revocation_prefix_relative_path", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/sys/leases/revoke-prefix/foo", + }, + Header: http.Header{ + consts.NamespaceHeaderName: []string{"ns1/"}, + }, + }, + }, + "ns1/", + "/v1/sys/leases/revoke-prefix/foo", + }, + { + "revocation_prefix_partial_ns", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns2/sys/leases/revoke-prefix/foo", + }, + Header: http.Header{ + consts.NamespaceHeaderName: []string{"ns1/"}, + }, + }, + }, + "ns1/ns2/", + "/v1/sys/leases/revoke-prefix/foo", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotNamespace, gotRelativePath := deriveNamespaceAndRevocationPath(tt.req) + if gotNamespace != tt.wantNamespace { + t.Errorf("deriveNamespaceAndRevocationPath() gotNamespace = %v, want %v", gotNamespace, tt.wantNamespace) + } + if gotRelativePath != tt.wantRelativePath { + t.Errorf("deriveNamespaceAndRevocationPath() gotRelativePath = %v, want %v", gotRelativePath, tt.wantRelativePath) + } + }) + } +} + +func TestLeaseCache_Concurrent_NonCacheable(t *testing.T) { + lc := testNewLeaseCacheWithDelay(t, false, 50) + + // We are going to send 100 requests, each taking 50ms to process. If these + // requests are processed serially, it will take ~5seconds to finish. we + // use a ContextWithTimeout to tell us if this is the case by giving ample + // time for it process them concurrently but time out if they get processed + // serially. + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + wgDoneCh := make(chan struct{}) + errCh := make(chan error) + + go func() { + var wg sync.WaitGroup + // 100 concurrent requests + for i := 0; i < 100; i++ { + wg.Add(1) + + go func() { + defer wg.Done() + + // Send a request through the lease cache which is not cacheable (there is + // no lease information or auth information in the response) + sendReq := &SendRequest{ + Request: httptest.NewRequest("GET", "http://example.com", nil), + } + + _, err := lc.Send(ctx, sendReq) + if err != nil { + errCh <- err + } + }() + } + + wg.Wait() + close(wgDoneCh) + }() + + select { + case <-ctx.Done(): + t.Fatalf("request timed out: %s", ctx.Err()) + case <-wgDoneCh: + case err := <-errCh: + t.Fatal(err) + } +} + +func TestLeaseCache_Concurrent_Cacheable(t *testing.T) { + lc := testNewLeaseCacheWithDelay(t, true, 50) + + if err := lc.RegisterAutoAuthToken("autoauthtoken"); err != nil { + t.Fatal(err) + } + + // We are going to send 100 requests, each taking 50ms to process. If these + // requests are processed serially, it will take ~5seconds to finish, so we + // use a ContextWithTimeout to tell us if this is the case. + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + var cacheCount atomic.Uint32 + wgDoneCh := make(chan struct{}) + errCh := make(chan error) + + go func() { + var wg sync.WaitGroup + // Start 100 concurrent requests + for i := 0; i < 100; i++ { + wg.Add(1) + + go func() { + defer wg.Done() + + sendReq := &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", "http://example.com/v1/sample/api", nil), + } + + resp, err := lc.Send(ctx, sendReq) + if err != nil { + errCh <- err + } + + if resp.CacheMeta != nil && resp.CacheMeta.Hit { + cacheCount.Inc() + } + }() + } + + wg.Wait() + close(wgDoneCh) + }() + + select { + case <-ctx.Done(): + t.Fatalf("request timed out: %s", ctx.Err()) + case <-wgDoneCh: + case err := <-errCh: + t.Fatal(err) + } + + // Ensure that all but one request got proxied. The other 99 should be + // returned from the cache. + if cacheCount.Load() != 99 { + t.Fatalf("Should have returned a cached response 99 times, got %d", cacheCount.Load()) + } +} + +func setupBoltStorage(t *testing.T) (tempCacheDir string, boltStorage *cacheboltdb.BoltStorage) { + t.Helper() + + km, err := keymanager.NewPassthroughKeyManager(context.Background(), nil) + require.NoError(t, err) + + tempCacheDir, err = ioutil.TempDir("", "agent-cache-test") + require.NoError(t, err) + boltStorage, err = cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ + Path: tempCacheDir, + Logger: hclog.Default(), + Wrapper: km.Wrapper(), + }) + require.NoError(t, err) + require.NotNil(t, boltStorage) + // The calling function should `defer boltStorage.Close()` and `defer os.RemoveAll(tempCacheDir)` + return tempCacheDir, boltStorage +} + +func compareBeforeAndAfter(t *testing.T, before, after *LeaseCache, beforeLen, afterLen int) { + beforeDB, err := before.db.GetByPrefix(cachememdb.IndexNameID) + require.NoError(t, err) + assert.Len(t, beforeDB, beforeLen) + afterDB, err := after.db.GetByPrefix(cachememdb.IndexNameID) + require.NoError(t, err) + assert.Len(t, afterDB, afterLen) + for _, cachedItem := range beforeDB { + if strings.Contains(cachedItem.RequestPath, "expect-missing") { + continue + } + restoredItem, err := after.db.Get(cachememdb.IndexNameID, cachedItem.ID) + require.NoError(t, err) + + assert.NoError(t, err) + assert.Equal(t, cachedItem.ID, restoredItem.ID) + assert.Equal(t, cachedItem.Lease, restoredItem.Lease) + assert.Equal(t, cachedItem.LeaseToken, restoredItem.LeaseToken) + assert.Equal(t, cachedItem.Namespace, restoredItem.Namespace) + assert.EqualValues(t, cachedItem.RequestHeader, restoredItem.RequestHeader) + assert.Equal(t, cachedItem.RequestMethod, restoredItem.RequestMethod) + assert.Equal(t, cachedItem.RequestPath, restoredItem.RequestPath) + assert.Equal(t, cachedItem.RequestToken, restoredItem.RequestToken) + assert.Equal(t, cachedItem.Response, restoredItem.Response) + assert.Equal(t, cachedItem.Token, restoredItem.Token) + assert.Equal(t, cachedItem.TokenAccessor, restoredItem.TokenAccessor) + assert.Equal(t, cachedItem.TokenParent, restoredItem.TokenParent) + + // check what we can in the renewal context + assert.NotEmpty(t, restoredItem.RenewCtxInfo.CancelFunc) + assert.NotZero(t, restoredItem.RenewCtxInfo.DoneCh) + require.NotEmpty(t, restoredItem.RenewCtxInfo.Ctx) + assert.Equal(t, + cachedItem.RenewCtxInfo.Ctx.Value(contextIndexID), + restoredItem.RenewCtxInfo.Ctx.Value(contextIndexID), + ) + } +} + +func TestLeaseCache_PersistAndRestore(t *testing.T) { + // Emulate responses from the api proxy. The first two use the auto-auth + // token, and the others use another token. + // The test re-sends each request to ensure that the response is cached + // so the number of responses and cacheTests specified should always be equal. + responses := []*SendResponse{ + newTestSendResponse(200, `{"auth": {"client_token": "testtoken", "renewable": true, "lease_duration": 600}}`), + newTestSendResponse(201, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}, "lease_duration": 600}`), + // The auth token will get manually deleted from the bolt DB storage, causing both of the following two responses + // to be missing from the cache after a restore, because the lease is a child of the auth token. + newTestSendResponse(202, `{"auth": {"client_token": "testtoken2", "renewable": true, "orphan": true, "lease_duration": 600}}`), + newTestSendResponse(203, `{"lease_id": "secret2-lease", "renewable": true, "data": {"number": "two"}, "lease_duration": 600}`), + // 204 No content gets special handling - avoid. + newTestSendResponse(250, `{"auth": {"client_token": "testtoken3", "renewable": true, "orphan": true, "lease_duration": 600}}`), + newTestSendResponse(251, `{"lease_id": "secret3-lease", "renewable": true, "data": {"number": "three"}, "lease_duration": 600}`), + newTestSendResponse(http.StatusCreated, `{"data": {"foo": "bar"}, "mount_type": "kvv2"}`), + } + + tempDir, boltStorage := setupBoltStorage(t) + defer os.RemoveAll(tempDir) + defer boltStorage.Close() + lc := testNewLeaseCacheWithPersistence(t, responses, boltStorage) + + // Register an auto-auth token so that the token and lease requests are cached + err := lc.RegisterAutoAuthToken("autoauthtoken") + require.NoError(t, err) + + cacheTests := []struct { + token string + method string + urlPath string + body string + deleteFromPersistentStore bool // If true, will be deleted from bolt DB to induce an error on restore + expectMissingAfterRestore bool // If true, the response is not expected to be present in the restored cache + }{ + { + // Make a request. A response with a new token is returned to the + // lease cache and that will be cached. + token: "autoauthtoken", + method: "GET", + urlPath: "http://example.com/v1/sample/api", + body: `{"value": "input"}`, + }, + { + // Modify the request a little bit to ensure the second response is + // returned to the lease cache. + token: "autoauthtoken", + method: "GET", + urlPath: "http://example.com/v1/sample/api", + body: `{"value": "input_changed"}`, + }, + { + // Simulate an approle login to get another token + method: "PUT", + urlPath: "http://example.com/v1/auth/approle-expect-missing/login", + body: `{"role_id": "my role", "secret_id": "my secret"}`, + deleteFromPersistentStore: true, + expectMissingAfterRestore: true, + }, + { + // Test caching with the token acquired from the approle login + token: "testtoken2", + method: "GET", + urlPath: "http://example.com/v1/sample-expect-missing/api", + body: `{"second": "input"}`, + // This will be missing from the restored cache because its parent token was deleted + expectMissingAfterRestore: true, + }, + { + // Simulate another approle login to get another token + method: "PUT", + urlPath: "http://example.com/v1/auth/approle/login", + body: `{"role_id": "my role", "secret_id": "my secret"}`, + }, + { + // Test caching with the token acquired from the latest approle login + token: "testtoken3", + method: "GET", + urlPath: "http://example.com/v1/sample3/api", + body: `{"third": "input"}`, + }, + } + + var deleteIDs []string + for i, ct := range cacheTests { + // Send once to cache + req := httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)) + req.Header.Set("User-Agent", useragent.AgentProxyString()) + + sendReq := &SendRequest{ + Token: ct.token, + Request: req, + } + if ct.deleteFromPersistentStore { + deleteID, err := computeIndexID(sendReq) + require.NoError(t, err) + deleteIDs = append(deleteIDs, deleteID) + // Now reset the body after calculating the index + req = httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)) + req.Header.Set("User-Agent", useragent.AgentProxyString()) + sendReq.Request = req + } + resp, err := lc.Send(context.Background(), sendReq) + require.NoError(t, err) + assert.Equal(t, responses[i].Response.StatusCode, resp.Response.StatusCode, "expected proxied response") + assert.Nil(t, resp.CacheMeta) + + // Send again to test cache. If this isn't cached, the response returned + // will be the next in the list and the status code will not match. + req = httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)) + req.Header.Set("User-Agent", useragent.AgentProxyString()) + sendCacheReq := &SendRequest{ + Token: ct.token, + Request: req, + } + respCached, err := lc.Send(context.Background(), sendCacheReq) + require.NoError(t, err, "failed to send request %+v", ct) + assert.Equal(t, responses[i].Response.StatusCode, respCached.Response.StatusCode, "expected proxied response") + require.NotNil(t, respCached.CacheMeta) + assert.True(t, respCached.CacheMeta.Hit) + } + + require.NotEmpty(t, deleteIDs) + for _, deleteID := range deleteIDs { + err = boltStorage.Delete(deleteID, cacheboltdb.LeaseType) + require.NoError(t, err) + } + + // Now we know the cache is working, so try restoring from the persisted + // cache's storage. Responses 3 and 4 have been cleared from the cache, so + // re-send those. + restoredCache := testNewLeaseCache(t, responses[2:4]) + + err = restoredCache.Restore(context.Background(), boltStorage) + errors, ok := err.(*multierror.Error) + require.True(t, ok) + assert.Len(t, errors.Errors, 1) + assert.Contains(t, errors.Error(), "could not find parent Token testtoken2") + + // Now compare the cache contents before and after + compareBeforeAndAfter(t, lc, restoredCache, 7, 5) + + // And finally send the cache requests once to make sure they're all being + // served from the restoredCache unless they were intended to be missing after restore. + for i, ct := range cacheTests { + req := httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)) + req.Header.Set("User-Agent", useragent.AgentProxyString()) + sendCacheReq := &SendRequest{ + Token: ct.token, + Request: req, + } + respCached, err := restoredCache.Send(context.Background(), sendCacheReq) + require.NoError(t, err, "failed to send request %+v", ct) + assert.Equal(t, responses[i].Response.StatusCode, respCached.Response.StatusCode, "expected proxied response") + if ct.expectMissingAfterRestore { + require.Nil(t, respCached.CacheMeta) + } else { + require.NotNil(t, respCached.CacheMeta) + assert.True(t, respCached.CacheMeta.Hit) + } + } +} + +func TestLeaseCache_PersistAndRestore_WithManyDependencies(t *testing.T) { + tempDir, boltStorage := setupBoltStorage(t) + defer os.RemoveAll(tempDir) + defer boltStorage.Close() + + var requests []*SendRequest + var responses []*SendResponse + var orderedRequestPaths []string + + // helper func to generate new auth leases with a child secret lease attached + authAndSecretLease := func(id int, parentToken, newToken string) { + t.Helper() + path := fmt.Sprintf("/v1/auth/approle-%d/login", id) + orderedRequestPaths = append(orderedRequestPaths, path) + requests = append(requests, &SendRequest{ + Token: parentToken, + Request: httptest.NewRequest("PUT", "http://example.com"+path, strings.NewReader("")), + }) + responses = append(responses, newTestSendResponse(200, fmt.Sprintf(`{"auth": {"client_token": "%s", "renewable": true, "lease_duration": 600}}`, newToken))) + + // Fetch a leased secret using the new token + path = fmt.Sprintf("/v1/kv/%d", id) + orderedRequestPaths = append(orderedRequestPaths, path) + requests = append(requests, &SendRequest{ + Token: newToken, + Request: httptest.NewRequest("GET", "http://example.com"+path, strings.NewReader("")), + }) + responses = append(responses, newTestSendResponse(200, fmt.Sprintf(`{"lease_id": "secret-%d-lease", "renewable": true, "data": {"number": %d}, "lease_duration": 600}`, id, id))) + } + + // Pathological case: a long chain of child tokens + authAndSecretLease(0, "autoauthtoken", "many-ancestors-token;0") + for i := 1; i <= 50; i++ { + // Create a new generation of child token + authAndSecretLease(i, fmt.Sprintf("many-ancestors-token;%d", i-1), fmt.Sprintf("many-ancestors-token;%d", i)) + } + + // Lots of sibling tokens with auto auth token as their parent + for i := 51; i <= 100; i++ { + authAndSecretLease(i, "autoauthtoken", fmt.Sprintf("many-siblings-token;%d", i)) + } + + // Also create some extra siblings for an auth token further down the chain + for i := 101; i <= 110; i++ { + authAndSecretLease(i, "many-ancestors-token;25", fmt.Sprintf("many-siblings-for-ancestor-token;%d", i)) + } + + lc := testNewLeaseCacheWithPersistence(t, responses, boltStorage) + + // Register an auto-auth token so that the token and lease requests are cached + err := lc.RegisterAutoAuthToken("autoauthtoken") + require.NoError(t, err) + + for _, req := range requests { + // Send once to cache + resp, err := lc.Send(context.Background(), req) + require.NoError(t, err) + assert.Equal(t, 200, resp.Response.StatusCode, "expected success") + assert.Nil(t, resp.CacheMeta) + } + + // Ensure leases are retrieved in the correct order + var processed int + + leases, err := boltStorage.GetByType(context.Background(), cacheboltdb.LeaseType) + require.NoError(t, err) + for _, lease := range leases { + index, err := cachememdb.Deserialize(lease) + require.NoError(t, err) + require.Equal(t, orderedRequestPaths[processed], index.RequestPath) + processed++ + } + + assert.Equal(t, len(orderedRequestPaths), processed) + + restoredCache := testNewLeaseCache(t, nil) + err = restoredCache.Restore(context.Background(), boltStorage) + require.NoError(t, err) + + // Now compare the cache contents before and after + compareBeforeAndAfter(t, lc, restoredCache, 223, 223) +} + +func TestEvictPersistent(t *testing.T) { + ctx := context.Background() + + responses := []*SendResponse{ + newTestSendResponse(201, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}}`), + } + + tempDir, boltStorage := setupBoltStorage(t) + defer os.RemoveAll(tempDir) + defer boltStorage.Close() + lc := testNewLeaseCacheWithPersistence(t, responses, boltStorage) + + require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) + + // populate cache by sending request through + sendReq := &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", "http://example.com/v1/sample/api", strings.NewReader(`{"value": "some_input"}`)), + } + resp, err := lc.Send(context.Background(), sendReq) + require.NoError(t, err) + assert.Equal(t, resp.Response.StatusCode, 201, "expected proxied response") + assert.Nil(t, resp.CacheMeta) + + // Check bolt for the cached lease + secrets, err := lc.ps.GetByType(ctx, cacheboltdb.LeaseType) + require.NoError(t, err) + assert.Len(t, secrets, 1) + + // Call clear for the request path + err = lc.handleCacheClear(context.Background(), &cacheClearInput{ + Type: "request_path", + RequestPath: "/v1/sample/api", + }) + require.NoError(t, err) + + time.Sleep(2 * time.Second) + + // Check that cached item is gone + secrets, err = lc.ps.GetByType(ctx, cacheboltdb.LeaseType) + require.NoError(t, err) + assert.Len(t, secrets, 0) +} + +func TestRegisterAutoAuth_sameToken(t *testing.T) { + // If the auto-auth token already exists in the cache, it should not be + // stored again in a new index. + lc := testNewLeaseCache(t, nil) + err := lc.RegisterAutoAuthToken("autoauthtoken") + assert.NoError(t, err) + + oldTokenIndex, err := lc.db.Get(cachememdb.IndexNameToken, "autoauthtoken") + assert.NoError(t, err) + oldTokenID := oldTokenIndex.ID + + // register the same token again + err = lc.RegisterAutoAuthToken("autoauthtoken") + assert.NoError(t, err) + + // check that there's only one index for autoauthtoken + entries, err := lc.db.GetByPrefix(cachememdb.IndexNameToken, "autoauthtoken") + assert.NoError(t, err) + assert.Len(t, entries, 1) + + newTokenIndex, err := lc.db.Get(cachememdb.IndexNameToken, "autoauthtoken") + assert.NoError(t, err) + + // compare the ID's since those are randomly generated when an index for a + // token is added to the cache, so if a new token was added, the id's will + // not match. + assert.Equal(t, oldTokenID, newTokenIndex.ID) +} + +func Test_hasExpired(t *testing.T) { + responses := []*SendResponse{ + newTestSendResponse(200, `{"auth": {"client_token": "testtoken", "renewable": true, "lease_duration": 60}}`), + newTestSendResponse(201, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}, "lease_duration": 60}`), + } + lc := testNewLeaseCache(t, responses) + require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) + + cacheTests := []struct { + token string + urlPath string + leaseType string + wantStatusCode int + }{ + { + // auth lease + token: "autoauthtoken", + urlPath: "/v1/sample/auth", + leaseType: cacheboltdb.LeaseType, + wantStatusCode: responses[0].Response.StatusCode, + }, + { + // secret lease + token: "autoauthtoken", + urlPath: "/v1/sample/secret", + leaseType: cacheboltdb.LeaseType, + wantStatusCode: responses[1].Response.StatusCode, + }, + } + + for _, ct := range cacheTests { + // Send once to cache + urlPath := "http://example.com" + ct.urlPath + sendReq := &SendRequest{ + Token: ct.token, + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err := lc.Send(context.Background(), sendReq) + require.NoError(t, err) + assert.Equal(t, resp.Response.StatusCode, ct.wantStatusCode, "expected proxied response") + assert.Nil(t, resp.CacheMeta) + + // get the Index out of the mem cache + index, err := lc.db.Get(cachememdb.IndexNameRequestPath, "root/", ct.urlPath) + require.NoError(t, err) + assert.Equal(t, ct.leaseType, index.Type) + + // The lease duration is 60 seconds, so time.Now() should be within that + notExpired, err := lc.hasExpired(time.Now().UTC(), index) + require.NoError(t, err) + assert.False(t, notExpired) + + // In 90 seconds the index should be "expired" + futureTime := time.Now().UTC().Add(time.Second * 90) + expired, err := lc.hasExpired(futureTime, index) + require.NoError(t, err) + assert.True(t, expired) + } +} + +func TestLeaseCache_hasExpired_wrong_type(t *testing.T) { + index := &cachememdb.Index{ + Type: cacheboltdb.TokenType, + Response: []byte(`HTTP/0.0 200 OK +Content-Type: application/json +Date: Tue, 02 Mar 2021 17:54:16 GMT + +{}`), + } + + lc := testNewLeaseCache(t, nil) + expired, err := lc.hasExpired(time.Now().UTC(), index) + assert.False(t, expired) + assert.EqualError(t, err, `secret without lease encountered in expiration check`) +} + +func TestLeaseCacheRestore_expired(t *testing.T) { + // Emulate 2 responses from the api proxy, both expired + responses := []*SendResponse{ + newTestSendResponse(200, `{"auth": {"client_token": "testtoken", "renewable": true, "lease_duration": -600}}`), + newTestSendResponse(201, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}, "lease_duration": -600}`), + } + + tempDir, boltStorage := setupBoltStorage(t) + defer os.RemoveAll(tempDir) + defer boltStorage.Close() + lc := testNewLeaseCacheWithPersistence(t, responses, boltStorage) + + // Register an auto-auth token so that the token and lease requests are cached in mem + require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) + + cacheTests := []struct { + token string + method string + urlPath string + body string + wantStatusCode int + }{ + { + // Make a request. A response with a new token is returned to the + // lease cache and that will be cached. + token: "autoauthtoken", + method: "GET", + urlPath: "http://example.com/v1/sample/api", + body: `{"value": "input"}`, + wantStatusCode: responses[0].Response.StatusCode, + }, + { + // Modify the request a little bit to ensure the second response is + // returned to the lease cache. + token: "autoauthtoken", + method: "GET", + urlPath: "http://example.com/v1/sample/api", + body: `{"value": "input_changed"}`, + wantStatusCode: responses[1].Response.StatusCode, + }, + } + + for _, ct := range cacheTests { + // Send once to cache + sendReq := &SendRequest{ + Token: ct.token, + Request: httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)), + } + resp, err := lc.Send(context.Background(), sendReq) + require.NoError(t, err) + assert.Equal(t, resp.Response.StatusCode, ct.wantStatusCode, "expected proxied response") + assert.Nil(t, resp.CacheMeta) + } + + // Restore from the persisted cache's storage + restoredCache := testNewLeaseCache(t, nil) + + err := restoredCache.Restore(context.Background(), boltStorage) + assert.NoError(t, err) + + // The original mem cache should between one-to-three items. + // This will usually be three, but could be less if any renewals + // happens before this check, which will evict the expired cache entries. + // e.g. you add a time.Sleep before this, it will be 1. We check + // between the range to reduce flakiness. + beforeDB, err := lc.db.GetByPrefix(cachememdb.IndexNameID) + require.NoError(t, err) + assert.LessOrEqual(t, len(beforeDB), 3) + assert.LessOrEqual(t, 1, len(beforeDB)) + + // There should only be one item in the restored cache: the autoauth token + afterDB, err := restoredCache.db.GetByPrefix(cachememdb.IndexNameID) + require.NoError(t, err) + assert.Len(t, afterDB, 1) + + // Just verify that the one item in the restored mem cache matches one in the original mem cache, and that it's the auto-auth token + beforeItem, err := lc.db.Get(cachememdb.IndexNameID, afterDB[0].ID) + require.NoError(t, err) + assert.NotNil(t, beforeItem) + + assert.Equal(t, "autoauthtoken", afterDB[0].Token) + assert.Equal(t, cacheboltdb.TokenType, afterDB[0].Type) +} diff --git a/command/agentproxyshared/cache/listener.go b/command/agentproxyshared/cache/listener.go new file mode 100644 index 000000000000..c962a2c8c370 --- /dev/null +++ b/command/agentproxyshared/cache/listener.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "crypto/tls" + "fmt" + "net" + "strings" + + "github.com/hashicorp/go-secure-stdlib/reloadutil" + "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/internalshared/listenerutil" +) + +type ListenerBundle struct { + Listener net.Listener + TLSConfig *tls.Config + TLSReloadFunc reloadutil.ReloadFunc +} + +func StartListener(lnConfig *configutil.Listener) (*ListenerBundle, error) { + addr := lnConfig.Address + + var ln net.Listener + var err error + switch lnConfig.Type { + case "tcp": + if addr == "" { + addr = "127.0.0.1:8200" + } + + bindProto := "tcp" + // If they've passed 0.0.0.0, we only want to bind on IPv4 + // rather than golang's dual stack default + if strings.HasPrefix(addr, "0.0.0.0:") { + bindProto = "tcp4" + } + + ln, err = net.Listen(bindProto, addr) + if err != nil { + return nil, err + } + ln = &server.TCPKeepAliveListener{ln.(*net.TCPListener)} + + case "unix": + var uConfig *listenerutil.UnixSocketsConfig + if lnConfig.SocketMode != "" && + lnConfig.SocketUser != "" && + lnConfig.SocketGroup != "" { + uConfig = &listenerutil.UnixSocketsConfig{ + Mode: lnConfig.SocketMode, + User: lnConfig.SocketUser, + Group: lnConfig.SocketGroup, + } + } + ln, err = listenerutil.UnixSocketListener(addr, uConfig) + if err != nil { + return nil, err + } + + default: + return nil, fmt.Errorf("invalid listener type: %q", lnConfig.Type) + } + + props := map[string]string{"addr": ln.Addr().String()} + tlsConf, reloadFunc, err := listenerutil.TLSConfig(lnConfig, props, nil) + if err != nil { + return nil, err + } + if tlsConf != nil { + ln = tls.NewListener(ln, tlsConf) + } + + cfg := &ListenerBundle{ + Listener: ln, + TLSConfig: tlsConf, + TLSReloadFunc: reloadFunc, + } + + return cfg, nil +} diff --git a/command/agentproxyshared/cache/proxy.go b/command/agentproxyshared/cache/proxy.go new file mode 100644 index 000000000000..231c5d9d203b --- /dev/null +++ b/command/agentproxyshared/cache/proxy.go @@ -0,0 +1,79 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "bytes" + "context" + "io" + "net/http" + "time" + + "github.com/hashicorp/vault/api" +) + +// SendRequest is the input for Proxier.Send. +type SendRequest struct { + Token string + Request *http.Request + + // RequestBody is the stored body bytes from Request.Body. It is set here to + // avoid reading and re-setting the stream multiple times. + RequestBody []byte +} + +// SendResponse is the output from Proxier.Send. +type SendResponse struct { + Response *api.Response + + // ResponseBody is the stored body bytes from Response.Body. It is set here to + // avoid reading and re-setting the stream multiple times. + ResponseBody []byte + CacheMeta *CacheMeta +} + +// CacheMeta contains metadata information about the response, +// such as whether it was a cache hit or miss, and the age of the +// cached entry. +type CacheMeta struct { + Hit bool + Age time.Duration +} + +// Proxier is the interface implemented by different components that are +// responsible for performing specific tasks, such as caching and proxying. All +// these tasks combined together would serve the request received by the agent. +type Proxier interface { + Send(ctx context.Context, req *SendRequest) (*SendResponse, error) +} + +// NewSendResponse creates a new SendResponse and takes care of initializing its +// fields properly. +func NewSendResponse(apiResponse *api.Response, responseBody []byte) (*SendResponse, error) { + resp := &SendResponse{ + Response: apiResponse, + CacheMeta: &CacheMeta{}, + } + + // If a response body is separately provided we set that as the SendResponse.ResponseBody, + // otherwise we will do an io.ReadAll to extract the response body from apiResponse. + switch { + case len(responseBody) > 0: + resp.ResponseBody = responseBody + case apiResponse.Body != nil: + respBody, err := io.ReadAll(apiResponse.Body) + if err != nil { + return nil, err + } + // Close the old body + apiResponse.Body.Close() + + // Re-set the response body after reading from the Reader + apiResponse.Body = io.NopCloser(bytes.NewReader(respBody)) + + resp.ResponseBody = respBody + } + + return resp, nil +} diff --git a/command/agentproxyshared/cache/static_secret_cache_updater.go b/command/agentproxyshared/cache/static_secret_cache_updater.go new file mode 100644 index 000000000000..9d316deee88d --- /dev/null +++ b/command/agentproxyshared/cache/static_secret_cache_updater.go @@ -0,0 +1,690 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "sync/atomic" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cacheboltdb" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/sdk/logical" + "golang.org/x/exp/maps" + "nhooyr.io/websocket" +) + +// Example write event (this does not contain all possible fields): +//{ +// "id": "a3be9fb1-b514-519f-5b25-b6f144a8c1ce", +// "source": "https://vaultproject.io/", +// "specversion": "1.0", +// "type": "*", +// "data": { +// "event": { +// "id": "a3be9fb1-b514-519f-5b25-b6f144a8c1ce", +// "metadata": { +// "current_version": "1", +// "data_path": "secret/data/foo", +// "modified": "true", +// "oldest_version": "0", +// "operation": "data-write", +// "path": "secret/data/foo" +// } +// }, +// "event_type": "kv-v2/data-write", +// "plugin_info": { +// "mount_class": "secret", +// "mount_accessor": "kv_5dc4d18e", +// "mount_path": "secret/", +// "plugin": "kv" +// } +// }, +// "datacontentype": "application/cloudevents", +// "time": "2023-09-12T15:19:49.394915-07:00" +//} + +// Example event with namespaces for an undelete (this does not contain all possible fields): +// { +// "id": "6c6b13fd-f133-f351-3cf0-b09ae6a417b1", +// "source": "vault://hostname", +// "specversion": "1.0", +// "type": "*", +// "data": { +// "event": { +// "id": "6c6b13fd-f133-f351-3cf0-b09ae6a417b1", +// "metadata": { +// "current_version": "3", +// "destroyed_versions": "[2,3]", +// "modified": "true", +// "oldest_version": "0", +// "operation": "destroy", +// "path": "secret-v2/destroy/my-secret" +// } +// }, +// "event_type": "kv-v2/destroy", +// "plugin_info": { +// "mount_class": "secret", +// "mount_accessor": "kv_b27b3cad", +// "mount_path": "secret-v2/", +// "plugin": "kv", +// "version": "2" +// } +// }, +// "datacontentype": "application/cloudevents", +// "time": "2024-08-27T12:46:01.373097-04:00" +//} + +// StaticSecretCacheUpdater is a struct that utilizes +// the event system to keep the static secret cache up to date. +type StaticSecretCacheUpdater struct { + client *api.Client + leaseCache *LeaseCache + logger hclog.Logger + tokenSink sink.Sink + + // allowForwardingViaHeaderDisabled is a bool that tracks if + // allow_forwarding_via_header is disabled on the cluster we're talking to. + // If we get an error back saying that it's disabled, we'll set this to true + // and never try to forward again. + allowForwardingViaHeaderDisabled bool +} + +// StaticSecretCacheUpdaterConfig is the configuration for initializing a new +// StaticSecretCacheUpdater. +type StaticSecretCacheUpdaterConfig struct { + Client *api.Client + LeaseCache *LeaseCache + Logger hclog.Logger + // TokenSink is a token sync that will have the latest + // token from auto-auth in it, to be used in event system + // connections. + TokenSink sink.Sink +} + +// NewStaticSecretCacheUpdater creates a new instance of a StaticSecretCacheUpdater. +func NewStaticSecretCacheUpdater(conf *StaticSecretCacheUpdaterConfig) (*StaticSecretCacheUpdater, error) { + if conf == nil { + return nil, errors.New("nil configuration provided") + } + + if conf.LeaseCache == nil { + return nil, fmt.Errorf("nil Lease Cache (a required parameter): %v", conf) + } + + if conf.Logger == nil { + return nil, fmt.Errorf("nil Logger (a required parameter): %v", conf) + } + + if conf.Client == nil { + return nil, fmt.Errorf("nil API client (a required parameter): %v", conf) + } + + if conf.TokenSink == nil { + return nil, fmt.Errorf("nil token sink (a required parameter): %v", conf) + } + + return &StaticSecretCacheUpdater{ + client: conf.Client, + leaseCache: conf.LeaseCache, + logger: conf.Logger, + tokenSink: conf.TokenSink, + }, nil +} + +// streamStaticSecretEvents streams static secret events and updates +// the cache when updates are notified. This method will return errors in cases +// of failed updates, malformed events, and other. +// For best results, the caller of this function should retry on error with backoff, +// if it is desired for the cache to always remain up to date. +func (updater *StaticSecretCacheUpdater) streamStaticSecretEvents(ctx context.Context) error { + // First, ensure our token is up-to-date: + updater.client.SetToken(updater.tokenSink.(sink.SinkReader).Token()) + conn, err := updater.openWebSocketConnection(ctx) + if err != nil { + return err + } + defer conn.Close(websocket.StatusNormalClosure, "") + + err = updater.preEventStreamUpdate(ctx) + if err != nil { + return fmt.Errorf("error when performing pre-event stream secret update: %w", err) + } + + for { + select { + case <-ctx.Done(): + return nil + default: + _, message, err := conn.Read(ctx) + if err != nil { + // The caller of this function should make the decision on if to retry. If it does, then + // the websocket connection will be retried, and we will check for missed events. + return fmt.Errorf("error when attempting to read from event stream, reopening websocket: %w", err) + } + updater.logger.Trace("received event", "message", string(message)) + messageMap := make(map[string]interface{}) + err = json.Unmarshal(message, &messageMap) + if err != nil { + return fmt.Errorf("error when unmarshaling event, message: %s\nerror: %w", string(message), err) + } + data, ok := messageMap["data"].(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected event format when decoding 'data' element, message: %s\nerror: %w", string(message), err) + } + event, ok := data["event"].(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected event format when decoding 'event' element, message: %s\nerror: %w", string(message), err) + } + metadata, ok := event["metadata"].(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected event format when decoding 'metadata' element, message: %s\nerror: %w", string(message), err) + } + modified, ok := metadata["modified"].(string) + if ok && modified == "true" { + // If data_path were in every event, we'd get that instead, but unfortunately it isn't. + path, ok := metadata["path"].(string) + if !ok { + return fmt.Errorf("unexpected event format when decoding 'data_path' element, message: %s\nerror: %w", string(message), err) + } + namespace, ok := data["namespace"].(string) + if ok { + path = namespace + path + } + + deletedOrDestroyedVersions, newPath := checkForDeleteOrDestroyEvent(messageMap) + if len(deletedOrDestroyedVersions) > 0 { + path = newPath + err = updater.handleDeleteDestroyVersions(path, deletedOrDestroyedVersions) + if err != nil { + // While we are kind of 'missing' an event this way, re-calling this function will + // result in the secret remaining up to date. + return fmt.Errorf("error handling delete/destroy versions for static secret: path: %q, message: %s error: %w", path, message, err) + } + } + + // Note: For delete/destroy events, we continue through to updating the secret itself, too. + // This means that if the latest version of the secret gets deleted, then the cache keeps + // knowledge of which the latest version is. + // One intricacy of e.g. destroyed events is that if the latest secret is destroyed, continuing + // to update the secret will 404. This is consistent with other behaviour. For Proxy, this means + // the secret may be evicted. That's okay. + + err = updater.updateStaticSecret(ctx, path) + if err != nil { + // While we are kind of 'missing' an event this way, re-calling this function will + // result in the secret remaining up to date. + return fmt.Errorf("error updating static secret: path: %q, message: %s error: %w", path, message, err) + } + } else { + // This is an event we're not interested in, ignore it and + // carry on. + continue + } + } + } + + return nil +} + +// checkForDeleteOrDestroyEvent checks an event message for delete/destroy events and if there +// are any, returns the versions to be deleted or destroyed, as well as the path to +// If none can be found, returns empty array and empty string. +// We have to do this since events do not always return data_path for all events. If they did, +// we could rely on that instead of doing string manipulation. +// Example return value: [1, 2, 3], "secrets/data/my-secret". +func checkForDeleteOrDestroyEvent(eventMap map[string]interface{}) ([]int, string) { + var versions []int + + data, ok := eventMap["data"].(map[string]interface{}) + if !ok { + return versions, "" + } + + event, ok := data["event"].(map[string]interface{}) + if !ok { + return versions, "" + } + + metadata, ok := event["metadata"].(map[string]interface{}) + if !ok { + return versions, "" + } + + // We should have only one of these: + deletedVersions, ok := metadata["deleted_versions"].(string) + if ok { + err := json.Unmarshal([]byte(deletedVersions), &versions) + if err != nil { + return versions, "" + } + } + + destroyedVersions, ok := metadata["destroyed_versions"].(string) + if ok { + err := json.Unmarshal([]byte(destroyedVersions), &versions) + if err != nil { + return versions, "" + } + } + + undeletedVersions, ok := metadata["undeleted_versions"].(string) + if ok { + err := json.Unmarshal([]byte(undeletedVersions), &versions) + if err != nil { + return versions, "" + } + } + + // We have neither deleted_versions nor destroyed_versions, return early + if len(versions) == 0 { + return versions, "" + } + + path, ok := metadata["path"].(string) + if !ok { + return versions, "" + } + + namespace, ok := data["namespace"].(string) + if ok { + path = namespace + path + } + + pluginInfo, ok := data["plugin_info"].(map[string]interface{}) + if !ok { + return versions, "" + } + + mountPath := pluginInfo["mount_path"].(string) + if !ok { + return versions, "" + } + + // We get the path without the mount path for safety, just in case the namespace or mount path + // have 'data' inside. + namespaceMountPathOnly := namespace + mountPath + pathWithoutMountPath := strings.TrimPrefix(path, namespaceMountPathOnly) + + // We need to trim destroy or delete to add the correct path for where the secret + // is stored. + trimmedPath := strings.TrimPrefix(pathWithoutMountPath, "delete") + trimmedPath = strings.TrimPrefix(trimmedPath, "destroy") + trimmedPath = strings.TrimPrefix(trimmedPath, "undelete") + + // This is how we form the ID of the cached secrets + fixedPath := namespaceMountPathOnly + "data" + trimmedPath + + return versions, fixedPath +} + +// preEventStreamUpdate is called after successful connection to the event system, but before +// we process any events, to ensure we don't miss any updates. +// In some cases, this will result in multiple processing of the same updates, but +// this ensures that we don't lose any updates to secrets that might have been sent +// while the connection is forming. +func (updater *StaticSecretCacheUpdater) preEventStreamUpdate(ctx context.Context) error { + indexes, err := updater.leaseCache.db.GetByPrefix(cachememdb.IndexNameID) + if err != nil { + return err + } + + updater.logger.Debug("starting pre-event stream update of static secrets") + + var errs *multierror.Error + for _, index := range indexes { + if index.Type != cacheboltdb.StaticSecretType { + continue + } + err = updater.updateStaticSecret(ctx, index.RequestPath) + if err != nil { + errs = multierror.Append(errs, err) + } + } + + updater.logger.Debug("finished pre-event stream update of static secrets") + + return errs.ErrorOrNil() +} + +// handleDeleteDestroyVersions will handle calls to deleteVersions and destroyVersions for a given cached +// secret. The handling is simple: remove them from the cache. We do the same for undeletes, as this will +// also affect the cache, but we don't re-grab the secret for undeletes. +func (updater *StaticSecretCacheUpdater) handleDeleteDestroyVersions(path string, versions []int) error { + indexId := hashStaticSecretIndex(path) + // received delete/destroy versions request: path=secret-v2/delete/my-secret + updater.logger.Debug("received delete/undelete/destroy versions request", "path", path, "indexId", indexId, "versions", versions) + + index, err := updater.leaseCache.db.Get(cachememdb.IndexNameID, indexId) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + // This event doesn't correspond to a secret in our cache + // so this is a no-op. + return nil + } + if err != nil { + return err + } + + // Hold the lock as we're modifying the secret + index.IndexLock.Lock() + defer index.IndexLock.Unlock() + + for _, version := range versions { + delete(index.Versions, version) + } + + // Lastly, store the secret + updater.logger.Debug("storing updated secret as result of delete/undelete/destroy", "path", path, "deletedVersions", versions) + err = updater.leaseCache.db.Set(index) + if err != nil { + return err + } + + return nil +} + +// updateStaticSecret checks for updates for a static secret on the path given, +// and updates the cache if appropriate. For KVv2 secrets, we will also update +// the version at index.Versions[currentVersion] with the same data. +func (updater *StaticSecretCacheUpdater) updateStaticSecret(ctx context.Context, path string) error { + // We clone the client, as we won't be using the same token. + client, err := updater.client.Clone() + if err != nil { + return err + } + + // Clear the client's header namespace since we'll be including the + // namespace as part of the path. + client.ClearNamespace() + + indexId := hashStaticSecretIndex(path) + + updater.logger.Debug("received update static secret request", "path", path, "indexId", indexId) + + index, err := updater.leaseCache.db.Get(cachememdb.IndexNameID, indexId) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + // This event doesn't correspond to a secret in our cache + // so this is a no-op. + return nil + } + if err != nil { + return err + } + + // We use a raw request so that we can store all the + // request information, just like we do in the Proxier Send methods. + request := client.NewRequest(http.MethodGet, "/v1/"+path) + if request.Headers == nil { + request.Headers = make(http.Header) + } + request.Headers.Set("User-Agent", useragent.ProxyString()) + + var resp *api.Response + var tokensToRemove []string + var successfulAttempt bool + for _, token := range maps.Keys(index.Tokens) { + client.SetToken(token) + request.Headers.Set(api.AuthHeaderName, token) + + if !updater.allowForwardingViaHeaderDisabled { + // Set this to always forward to active, since events could come before + // replication, and if we're connected to the standby, then we will be + // receiving events from the primary but otherwise getting old values from + // the standby here. This makes sure that Proxy functions properly + // even when its Vault address is set to a standby, since we cannot + // currently receive events from a standby. + // We only try this if updater.allowForwardingViaHeaderDisabled is false + // and if we receive an error indicating that the config is set to false, + // we will never set this header again. + request.Headers.Set(api.HeaderForward, "active-node") + } + + resp, err = client.RawRequestWithContext(ctx, request) + if err != nil { + if strings.Contains(err.Error(), "forwarding via header X-Vault-Forward disabled") { + updater.logger.Info("allow_forwarding_via_header disabled, re-attempting update and no longer attempting to forward") + updater.allowForwardingViaHeaderDisabled = true + + // Try again without the header + request.Headers.Del(api.HeaderForward) + resp, err = client.RawRequestWithContext(ctx, request) + } + } + + if err != nil { + updater.logger.Trace("received error when trying to update cache", "path", path, "err", err, "token", token, "namespace", index.Namespace) + // We cannot access this secret with this token for whatever reason, + // so token for removal. + tokensToRemove = append(tokensToRemove, token) + continue + } else { + // We got our updated secret! + successfulAttempt = true + break + } + } + + if successfulAttempt { + // We need to update the index, so first, hold the lock. + index.IndexLock.Lock() + defer index.IndexLock.Unlock() + + // First, remove the tokens we noted couldn't access the secret from the token index + for _, token := range tokensToRemove { + delete(index.Tokens, token) + } + + sendResponse, err := NewSendResponse(resp, nil) + if err != nil { + return err + } + + // Serialize the response to store it in the cached index + var respBytes bytes.Buffer + err = sendResponse.Response.Write(&respBytes) + if err != nil { + updater.logger.Error("failed to serialize response", "error", err) + return err + } + + index.Response = respBytes.Bytes() + index.LastRenewed = time.Now().UTC() + + // For KVv2 secrets, let's also update index.Versions[version_of_secret] + // with the response we received from the current version. + // Instead of relying on current_version in the event, we should + // check the message we received, since it's possible the secret + // got updated between receipt of the event and when we received + // the request for the secret. + // First, re-read secret into response so that we can parse it again: + reader := bufio.NewReader(bytes.NewReader(index.Response)) + resp, err := http.ReadResponse(reader, nil) + if err != nil { + // This shouldn't happen, but log just in case it does. There's + // no real negative consequences of the following function though. + updater.logger.Warn("failed to deserialize response", "error", err) + } + + secret, err := api.ParseSecret(resp.Body) + if err != nil { + // This shouldn't happen, but log just in case it does. There's + // no real negative consequences of the following function though. + updater.logger.Warn("failed to serialize response", "error", err) + } + + // In case of failures or KVv1 secrets, this function will simply fail silently, + // which is fine (and expected) since this could be arbitrary JSON. + updater.leaseCache.addToVersionListForCurrentVersionKVv2Secret(index, secret) + + // Lastly, store the secret + updater.logger.Debug("storing response into the cache due to update", "path", path) + err = updater.leaseCache.db.Set(index) + if err != nil { + return err + } + } else { + // No token could successfully update the secret, or secret was deleted. + // We should evict the cache instead of re-storing the secret. + updater.logger.Debug("evicting response from cache", "path", path) + err = updater.leaseCache.db.Evict(cachememdb.IndexNameID, indexId) + if err != nil { + return err + } + } + + return nil +} + +// openWebSocketConnection opens a websocket connection to the event system for +// the events that the static secret cache updater is interested in. +func (updater *StaticSecretCacheUpdater) openWebSocketConnection(ctx context.Context) (*websocket.Conn, error) { + // We parse this into a URL object to get the specific host and scheme + // information without nasty string parsing. + vaultURL, err := url.Parse(updater.client.Address()) + if err != nil { + return nil, err + } + vaultHost := vaultURL.Host + // If we're using https, use wss, otherwise ws + scheme := "wss" + if vaultURL.Scheme == "http" { + scheme = "ws" + } + + webSocketURL := url.URL{ + Path: "/v1/sys/events/subscribe/kv*", + Host: vaultHost, + Scheme: scheme, + } + query := webSocketURL.Query() + query.Set("json", "true") + query.Set("namespaces", "*") + webSocketURL.RawQuery = query.Encode() + + updater.client.AddHeader(api.AuthHeaderName, updater.client.Token()) + updater.client.AddHeader(api.NamespaceHeaderName, updater.client.Namespace()) + + // Populate these now to avoid recreating them in the upcoming for loop. + headers := updater.client.Headers() + wsURL := webSocketURL.String() + httpClient := updater.client.CloneConfig().HttpClient + + // We do ten attempts, to ensure we follow forwarding to the leader. + var conn *websocket.Conn + var resp *http.Response + for attempt := 0; attempt < 10; attempt++ { + conn, resp, err = websocket.Dial(ctx, wsURL, &websocket.DialOptions{ + HTTPClient: httpClient, + HTTPHeader: headers, + }) + if err == nil { + break + } + + switch { + case resp == nil: + break + case resp.StatusCode == http.StatusTemporaryRedirect: + wsURL = resp.Header.Get("Location") + continue + default: + break + } + } + + if err != nil { + errMessage := err.Error() + if resp != nil { + if resp.StatusCode == http.StatusNotFound { + return nil, fmt.Errorf("received 404 when opening web socket to %s, ensure Vault is Enterprise version 1.16 or above", wsURL) + } + if resp.StatusCode == http.StatusForbidden { + var errBytes []byte + errBytes, err = io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, fmt.Errorf("error occured when attempting to read error response from Vault server") + } + errMessage = string(errBytes) + } + } + return nil, fmt.Errorf("error returned when opening event stream web socket to %s, ensure auto-auth token"+ + " has correct permissions and Vault is Enterprise version 1.16 or above: %s", wsURL, errMessage) + } + + if conn == nil { + return nil, errors.New(fmt.Sprintf("too many redirects as part of establishing web socket connection to %s", wsURL)) + } + + return conn, nil +} + +// Run is intended to be the method called by Vault Proxy, that runs the subsystem. +// Once a token is provided to the sink, we will start the websocket and start consuming +// events and updating secrets. +// Run will shut down gracefully when the context is cancelled. +func (updater *StaticSecretCacheUpdater) Run(ctx context.Context, authRenewalInProgress *atomic.Bool, invalidTokenErrCh chan error) error { + updater.logger.Info("starting static secret cache updater subsystem") + defer func() { + updater.logger.Info("static secret cache updater subsystem stopped") + }() + +tokenLoop: + for { + select { + case <-ctx.Done(): + return nil + default: + // Wait for the auto-auth token to be populated... + if updater.tokenSink.(sink.SinkReader).Token() != "" { + break tokenLoop + } + time.Sleep(100 * time.Millisecond) + } + } + + shouldBackoff := false + for { + select { + case <-ctx.Done(): + return nil + default: + // If we're erroring and the context isn't done, we should add + // a little backoff to make sure we don't accidentally overload + // Vault or similar. + if shouldBackoff { + time.Sleep(10 * time.Second) + } + err := updater.streamStaticSecretEvents(ctx) + if err != nil { + updater.logger.Error("error occurred during streaming static secret cache update events", "err", err) + shouldBackoff = true + if strings.Contains(err.Error(), logical.ErrInvalidToken.Error()) && !authRenewalInProgress.Load() { + // Drain the channel in case there is an error that has already been sent but not received + select { + case <-invalidTokenErrCh: + default: + } + updater.logger.Error("received invalid token error while opening websocket") + invalidTokenErrCh <- err + } + continue + } + } + } +} diff --git a/command/agentproxyshared/cache/static_secret_cache_updater_test.go b/command/agentproxyshared/cache/static_secret_cache_updater_test.go new file mode 100644 index 000000000000..a115fe608ba5 --- /dev/null +++ b/command/agentproxyshared/cache/static_secret_cache_updater_test.go @@ -0,0 +1,1054 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "context" + "fmt" + "os" + "sync" + syncatomic "sync/atomic" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + kv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cacheboltdb" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/helper/testhelpers/minimal" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + "nhooyr.io/websocket" +) + +// Avoiding a circular dependency in the test. +type mockSink struct { + token *atomic.String +} + +func (m *mockSink) Token() string { + return m.token.Load() +} + +func (m *mockSink) WriteToken(token string) error { + m.token.Store(token) + return nil +} + +func newMockSink(t *testing.T) sink.Sink { + t.Helper() + + return &mockSink{ + token: atomic.NewString(""), + } +} + +// testNewStaticSecretCacheUpdater returns a new StaticSecretCacheUpdater +// for use in tests. +func testNewStaticSecretCacheUpdater(t *testing.T, client *api.Client) *StaticSecretCacheUpdater { + t.Helper() + + lc := testNewLeaseCache(t, []*SendResponse{}) + tokenSink := newMockSink(t) + tokenSink.WriteToken(client.Token()) + + updater, err := NewStaticSecretCacheUpdater(&StaticSecretCacheUpdaterConfig{ + Client: client, + LeaseCache: lc, + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.updater"), + TokenSink: tokenSink, + }) + require.NoError(t, err) + return updater +} + +// TestNewStaticSecretCacheUpdater tests the NewStaticSecretCacheUpdater method, +// to ensure it errors out when appropriate. +func TestNewStaticSecretCacheUpdater(t *testing.T) { + t.Parallel() + + lc := testNewLeaseCache(t, []*SendResponse{}) + config := api.DefaultConfig() + logger := logging.NewVaultLogger(hclog.Trace).Named("cache.updater") + client, err := api.NewClient(config) + require.NoError(t, err) + tokenSink := newMockSink(t) + + // Expect an error if any of the arguments are nil: + updater, err := NewStaticSecretCacheUpdater(&StaticSecretCacheUpdaterConfig{ + Client: nil, + LeaseCache: lc, + Logger: logger, + TokenSink: tokenSink, + }) + require.Error(t, err) + require.Nil(t, updater) + + updater, err = NewStaticSecretCacheUpdater(&StaticSecretCacheUpdaterConfig{ + Client: client, + LeaseCache: nil, + Logger: logger, + TokenSink: tokenSink, + }) + require.Error(t, err) + require.Nil(t, updater) + + updater, err = NewStaticSecretCacheUpdater(&StaticSecretCacheUpdaterConfig{ + Client: client, + LeaseCache: lc, + Logger: nil, + TokenSink: tokenSink, + }) + require.Error(t, err) + require.Nil(t, updater) + + updater, err = NewStaticSecretCacheUpdater(&StaticSecretCacheUpdaterConfig{ + Client: client, + LeaseCache: lc, + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.updater"), + TokenSink: nil, + }) + require.Error(t, err) + require.Nil(t, updater) + + // Don't expect an error if the arguments are as expected + updater, err = NewStaticSecretCacheUpdater(&StaticSecretCacheUpdaterConfig{ + Client: client, + LeaseCache: lc, + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.updater"), + TokenSink: tokenSink, + }) + require.NoError(t, err) + require.NotNil(t, updater) +} + +// TestOpenWebSocketConnection tests that the openWebSocketConnection function +// works as expected (fails on CE, succeeds on ent). +// This uses a TLS enabled (wss) WebSocket connection. +func TestOpenWebSocketConnection(t *testing.T) { + t.Parallel() + // We need a valid cluster for the connection to succeed. + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + updater.tokenSink.WriteToken(client.Token()) + + conn, err := updater.openWebSocketConnection(context.Background()) + if constants.IsEnterprise { + require.NoError(t, err) + require.NotNil(t, conn) + } else { + require.Nil(t, conn) + require.Errorf(t, err, "ensure Vault is Enterprise version 1.16 or above") + } +} + +// TestOpenWebSocketConnection_BadPolicyToken tests attempting to open a websocket +// connection to the events system using a token that has incorrect policy access +// will not trigger auto auth +func TestOpenWebSocketConnection_BadPolicyToken(t *testing.T) { + // We need a valid cluster for the connection to succeed. + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + + eventPolicy := `path "sys/events/subscribe/*" { + capabilities = ["deny"] + }` + client.Sys().PutPolicy("no_events_access", eventPolicy) + + // Create a new token with a bad policy + token, err := client.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{"no_events_access"}, + }) + require.NoError(t, err) + + // Set the client token to one with an invalid policy + updater.tokenSink.WriteToken(token.Auth.ClientToken) + client.SetToken(token.Auth.ClientToken) + + ctx, cancelFunc := context.WithCancel(context.Background()) + + authInProgress := &syncatomic.Bool{} + renewalChannel := make(chan error) + errCh := make(chan error) + go func() { + errCh <- updater.Run(ctx, authInProgress, renewalChannel) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + require.NoError(t, err) + } + }() + + defer cancelFunc() + + // Verify that the token has been written to the sink before checking auto auth + // is not re-triggered + err = updater.streamStaticSecretEvents(ctx) + require.ErrorContains(t, err, logical.ErrPermissionDenied.Error()) + + // Auto auth should not be retriggered + timeout := time.After(2 * time.Second) + select { + case <-renewalChannel: + t.Fatal("incorrectly triggered auto auth") + case <-ctx.Done(): + t.Fatal("context was closed before auto auth could be re-triggered") + case <-timeout: + } +} + +// TestOpenWebSocketConnection_AutoAuthSelfHeal tests attempting to open a websocket +// connection to the events system using an invalid token will re-trigger +// auto auth. +func TestOpenWebSocketConnection_AutoAuthSelfHeal(t *testing.T) { + // We need a valid cluster for the connection to succeed. + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + + // Revoke the token before it can be used to open a connection to the events system + client.Auth().Token().RevokeOrphan(client.Token()) + updater.tokenSink.WriteToken(client.Token()) + time.Sleep(100 * time.Millisecond) + + ctx, cancelFunc := context.WithCancel(context.Background()) + + authInProgress := &syncatomic.Bool{} + renewalChannel := make(chan error) + errCh := make(chan error) + go func() { + errCh <- updater.Run(ctx, authInProgress, renewalChannel) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + require.NoError(t, err) + } + }() + + defer cancelFunc() + + // Wait for static secret updater to begin + timeout := time.After(10 * time.Second) + + select { + case <-renewalChannel: + case <-ctx.Done(): + t.Fatal("context was closed before auto auth could be re-triggered") + case <-timeout: + t.Fatal("timed out before auto auth could be re-triggered") + } + authInProgress.Store(false) + + // Verify that auto auth is re-triggered again because another auth is "not in progress" + timeout = time.After(15 * time.Second) + select { + case <-renewalChannel: + case <-ctx.Done(): + t.Fatal("context was closed before auto auth could be re-triggered") + case <-timeout: + t.Fatal("timed out before auto auth could be re-triggered") + } + authInProgress.Store(true) + + // Verify that auto auth is NOT re-triggered again because another auth is in progress + timeout = time.After(2 * time.Second) + select { + case <-renewalChannel: + t.Fatal("auto auth was incorrectly re-triggered") + case <-ctx.Done(): + t.Fatal("context was closed before auto auth could be re-triggered") + case <-timeout: + } +} + +// TestOpenWebSocketConnectionReceivesEventsDefaultMount tests that the openWebSocketConnection function +// works as expected with the default KVV1 mount, and then the connection can be used to receive an event. +// This acts as more of an event system sanity check than a test of the updater +// logic. It's still important coverage, though. +// It also adds a client timeout of 1 second and checks that the connection does not timeout as this is a +// streaming request. +func TestOpenWebSocketConnectionReceivesEventsDefaultMount(t *testing.T) { + if !constants.IsEnterprise { + t.Skip("test can only run on enterprise due to requiring the event notification system") + } + t.Parallel() + // We need a valid cluster for the connection to succeed. + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + oldClientTimeout := os.Getenv("VAULT_CLIENT_TIMEOUT") + os.Setenv("VAULT_CLIENT_TIMEOUT", "1") + defer os.Setenv("VAULT_CLIENT_TIMEOUT", oldClientTimeout) + + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + + conn, err := updater.openWebSocketConnection(context.Background()) + require.NoError(t, err) + require.NotNil(t, conn) + + t.Cleanup(func() { + conn.Close(websocket.StatusNormalClosure, "") + }) + + makeData := func(i int) map[string]interface{} { + return map[string]interface{}{ + "foo": fmt.Sprintf("bar%d", i), + } + } + // Put a secret, which should trigger an event + err = client.KVv1("secret").Put(context.Background(), "foo", makeData(100)) + require.NoError(t, err) + + for i := 0; i < 5; i++ { + // Do a fresh PUT just to refresh the secret and send a new message + err = client.KVv1("secret").Put(context.Background(), "foo", makeData(i)) + require.NoError(t, err) + + // This method blocks until it gets a secret, so this test + // will only pass if we're receiving events correctly. + // It will fail here if the connection times out. + _, _, err = conn.Read(context.Background()) + require.NoError(t, err) + } +} + +// TestOpenWebSocketConnectionReceivesEventsKVV1 tests that the openWebSocketConnection function +// works as expected with KVV1, and then the connection can be used to receive an event. +// This acts as more of an event system sanity check than a test of the updater +// logic. It's still important coverage, though. +func TestOpenWebSocketConnectionReceivesEventsKVV1(t *testing.T) { + if !constants.IsEnterprise { + t.Skip("test can only run on enterprise due to requiring the event notification system") + } + t.Parallel() + // We need a valid cluster for the connection to succeed. + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": kv.Factory, + }, + }, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + + conn, err := updater.openWebSocketConnection(context.Background()) + require.NoError(t, err) + require.NotNil(t, conn) + + t.Cleanup(func() { + conn.Close(websocket.StatusNormalClosure, "") + }) + + err = client.Sys().Mount("secret-v1", &api.MountInput{ + Type: "kv", + }) + require.NoError(t, err) + + makeData := func(i int) map[string]interface{} { + return map[string]interface{}{ + "foo": fmt.Sprintf("bar%d", i), + } + } + // Put a secret, which should trigger an event + err = client.KVv1("secret-v1").Put(context.Background(), "foo", makeData(100)) + require.NoError(t, err) + + for i := 0; i < 5; i++ { + // Do a fresh PUT just to refresh the secret and send a new message + err = client.KVv1("secret-v1").Put(context.Background(), "foo", makeData(i)) + require.NoError(t, err) + + // This method blocks until it gets a secret, so this test + // will only pass if we're receiving events correctly. + _, _, err := conn.Read(context.Background()) + require.NoError(t, err) + } +} + +// TestOpenWebSocketConnectionReceivesEventsKVV2 tests that the openWebSocketConnection function +// works as expected with KVV2, and then the connection can be used to receive an event. +// This acts as more of an event system sanity check than a test of the updater +// logic. It's still important coverage, though. +func TestOpenWebSocketConnectionReceivesEventsKVV2(t *testing.T) { + if !constants.IsEnterprise { + t.Skip("test can only run on enterprise due to requiring the event notification system") + } + t.Parallel() + // We need a valid cluster for the connection to succeed. + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": kv.VersionedKVFactory, + }, + }, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + + conn, err := updater.openWebSocketConnection(context.Background()) + require.NoError(t, err) + require.NotNil(t, conn) + + t.Cleanup(func() { + conn.Close(websocket.StatusNormalClosure, "") + }) + + makeData := func(i int) map[string]interface{} { + return map[string]interface{}{ + "foo": fmt.Sprintf("bar%d", i), + } + } + + err = client.Sys().Mount("secret-v2", &api.MountInput{ + Type: "kv-v2", + }) + require.NoError(t, err) + + // Put a secret, which should trigger an event + _, err = client.KVv2("secret-v2").Put(context.Background(), "foo", makeData(100)) + require.NoError(t, err) + + for i := 0; i < 5; i++ { + // Do a fresh PUT just to refresh the secret and send a new message + _, err = client.KVv2("secret-v2").Put(context.Background(), "foo", makeData(i)) + require.NoError(t, err) + + // This method blocks until it gets a secret, so this test + // will only pass if we're receiving events correctly. + _, _, err := conn.Read(context.Background()) + require.NoError(t, err) + } +} + +// TestOpenWebSocketConnectionTestServer tests that the openWebSocketConnection function +// works as expected using vaulthttp.TestServer. This server isn't TLS enabled, so tests +// the ws path (as opposed to the wss) path. +func TestOpenWebSocketConnectionTestServer(t *testing.T) { + if !constants.IsEnterprise { + t.Skip("test can only run on enterprise due to requiring the event notification system") + } + t.Parallel() + // We need a valid cluster for the connection to succeed. + core := vault.TestCoreWithConfig(t, &vault.CoreConfig{}) + ln, addr := vaulthttp.TestServer(t, core) + defer ln.Close() + + keys, rootToken := vault.TestCoreInit(t, core) + for _, key := range keys { + _, err := core.Unseal(key) + require.NoError(t, err) + } + + config := api.DefaultConfig() + config.Address = addr + client, err := api.NewClient(config) + require.NoError(t, err) + client.SetToken(rootToken) + updater := testNewStaticSecretCacheUpdater(t, client) + + conn, err := updater.openWebSocketConnection(context.Background()) + require.NoError(t, err) + require.NotNil(t, conn) +} + +// Test_StreamStaticSecretEvents_UpdatesCacheWithNewSecrets tests that an event will +// properly update the corresponding secret in Proxy's cache. This is a little more end-to-end-y +// than TestUpdateStaticSecret, and essentially is testing a similar thing, though is +// ensuring that updateStaticSecret gets called by the event arriving +// (as part of streamStaticSecretEvents) instead of testing calling it explicitly. +func Test_StreamStaticSecretEvents_UpdatesCacheWithNewSecrets(t *testing.T) { + if !constants.IsEnterprise { + t.Skip("test can only run on enterprise due to requiring the event notification system") + } + t.Parallel() + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": kv.VersionedKVFactory, + }, + }, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + leaseCache := updater.leaseCache + + wg := &sync.WaitGroup{} + runStreamStaticSecretEvents := func() { + wg.Add(1) + err := updater.streamStaticSecretEvents(context.Background()) + require.NoError(t, err) + } + go runStreamStaticSecretEvents() + + // First, create the secret in the cache that we expect to be updated: + path := "secret-v2/data/foo" + indexId := hashStaticSecretIndex(path) + initialTime := time.Now().UTC() + // pre-populate the leaseCache with a secret to update + index := &cachememdb.Index{ + Namespace: "root/", + RequestPath: path, + Versions: map[int][]byte{}, + LastRenewed: initialTime, + ID: indexId, + // Valid token provided, so update should work. + Tokens: map[string]struct{}{client.Token(): {}}, + Response: []byte{}, + } + err := leaseCache.db.Set(index) + require.NoError(t, err) + + secretData := map[string]interface{}{ + "foo": "bar", + } + + err = client.Sys().Mount("secret-v2", &api.MountInput{ + Type: "kv-v2", + }) + require.NoError(t, err) + + // Wait for the event stream to be fully up and running. Should be faster than this in reality, but + // we make it five seconds to protect against CI flakiness. + time.Sleep(5 * time.Second) + + // Put a secret, which should trigger an event + _, err = client.KVv2("secret-v2").Put(context.Background(), "foo", secretData) + require.NoError(t, err) + + // Wait for the event to arrive. Events are usually much, much faster + // than this, but we make it five seconds to protect against CI flakiness. + time.Sleep(5 * time.Second) + + // Then, do a GET to see if the index got updated by the event + newIndex, err := leaseCache.db.Get(cachememdb.IndexNameID, indexId) + require.NoError(t, err) + require.NotNil(t, newIndex) + require.NotEqual(t, []byte{}, newIndex.Response) + require.Truef(t, initialTime.Before(newIndex.LastRenewed), "last updated time not updated on index") + require.Equal(t, index.RequestPath, newIndex.RequestPath) + require.Equal(t, index.Tokens, newIndex.Tokens) + + // Assert that the corresponding version got updated too + require.Len(t, newIndex.Versions, 1) + require.NotNil(t, newIndex.Versions) + require.NotNil(t, newIndex.Versions[1]) + require.Equal(t, newIndex.Versions[1], newIndex.Response) + + wg.Done() +} + +// TestUpdateStaticSecret tests that updateStaticSecret works as expected, reaching out +// to Vault to get an updated secret when called. +func TestUpdateStaticSecret(t *testing.T) { + t.Parallel() + // We need a valid cluster for the connection to succeed. + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + leaseCache := updater.leaseCache + + path := "secret/foo" + indexId := hashStaticSecretIndex(path) + initialTime := time.Now().UTC() + // pre-populate the leaseCache with a secret to update + index := &cachememdb.Index{ + Namespace: "root/", + RequestPath: "secret/foo", + LastRenewed: initialTime, + ID: indexId, + Versions: map[int][]byte{}, + // Valid token provided, so update should work. + Tokens: map[string]struct{}{client.Token(): {}}, + Response: []byte{}, + } + err := leaseCache.db.Set(index) + require.NoError(t, err) + + secretData := map[string]interface{}{ + "foo": "bar", + } + + // create the secret in Vault. n.b. the test cluster has already mounted the KVv1 backend at "secret" + err = client.KVv1("secret").Put(context.Background(), "foo", secretData) + require.NoError(t, err) + + // attempt the update + err = updater.updateStaticSecret(context.Background(), path) + require.NoError(t, err) + + newIndex, err := leaseCache.db.Get(cachememdb.IndexNameID, indexId) + require.NoError(t, err) + require.NotNil(t, newIndex) + require.Truef(t, initialTime.Before(newIndex.LastRenewed), "last updated time not updated on index") + require.NotEqual(t, []byte{}, newIndex.Response) + require.Equal(t, index.RequestPath, newIndex.RequestPath) + require.Equal(t, index.Tokens, newIndex.Tokens) + require.Len(t, newIndex.Versions, 0) +} + +// TestUpdateStaticSecret_KVv2 tests that updateStaticSecret works as expected, reaching out +// to Vault to get an updated secret when called. It should also update the corresponding +// version of that secret in the cache index's Versions field. +func TestUpdateStaticSecret_KVv2(t *testing.T) { + t.Parallel() + // We need a valid cluster for the connection to succeed. + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": kv.VersionedKVFactory, + }, + }, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + leaseCache := updater.leaseCache + + path := "secret-v2/data/foo" + indexId := hashStaticSecretIndex(path) + initialTime := time.Now().UTC() + // pre-populate the leaseCache with a secret to update + index := &cachememdb.Index{ + Namespace: "root/", + RequestPath: path, + LastRenewed: initialTime, + ID: indexId, + Versions: map[int][]byte{}, + // Valid token provided, so update should work. + Tokens: map[string]struct{}{client.Token(): {}}, + Response: []byte{}, + } + err := leaseCache.db.Set(index) + require.NoError(t, err) + + secretData := map[string]interface{}{ + "foo": "bar", + } + + err = client.Sys().Mount("secret-v2", &api.MountInput{ + Type: "kv-v2", + }) + require.NoError(t, err) + + // create the secret in Vault + _, err = client.KVv2("secret-v2").Put(context.Background(), "foo", secretData) + require.NoError(t, err) + + // attempt the update + err = updater.updateStaticSecret(context.Background(), path) + require.NoError(t, err) + + newIndex, err := leaseCache.db.Get(cachememdb.IndexNameID, indexId) + require.NoError(t, err) + require.NotNil(t, newIndex) + require.Truef(t, initialTime.Before(newIndex.LastRenewed), "last updated time not updated on index") + require.NotEqual(t, []byte{}, newIndex.Response) + require.Equal(t, index.RequestPath, newIndex.RequestPath) + require.Equal(t, index.Tokens, newIndex.Tokens) + + // It should have also updated version 1 with the same version. + require.Len(t, newIndex.Versions, 1) + require.NotNil(t, newIndex.Versions[1]) + require.Equal(t, newIndex.Versions[1], newIndex.Response) +} + +// TestUpdateStaticSecret_EvictsIfInvalidTokens tests that updateStaticSecret will +// evict secrets from the cache if no valid tokens are left. +func TestUpdateStaticSecret_EvictsIfInvalidTokens(t *testing.T) { + t.Parallel() + // We need a valid cluster for the connection to succeed. + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + leaseCache := updater.leaseCache + + path := "secret/foo" + indexId := hashStaticSecretIndex(path) + renewTime := time.Now().UTC() + + // pre-populate the leaseCache with a secret to update + index := &cachememdb.Index{ + Namespace: "root/", + RequestPath: "secret/foo", + LastRenewed: renewTime, + ID: indexId, + // Note: invalid Tokens value provided, so this secret cannot be updated, and must be evicted + Tokens: map[string]struct{}{"invalid token": {}}, + } + err := leaseCache.db.Set(index) + require.NoError(t, err) + + secretData := map[string]interface{}{ + "foo": "bar", + } + + // create the secret in Vault. n.b. the test cluster has already mounted the KVv1 backend at "secret" + err = client.KVv1("secret").Put(context.Background(), "foo", secretData) + require.NoError(t, err) + + // attempt the update + err = updater.updateStaticSecret(context.Background(), path) + require.NoError(t, err) + + newIndex, err := leaseCache.db.Get(cachememdb.IndexNameID, indexId) + require.Equal(t, cachememdb.ErrCacheItemNotFound, err) + require.Nil(t, newIndex) +} + +// TestUpdateStaticSecret_HandlesNonCachedPaths tests that updateStaticSecret +// doesn't fail or error if we try and give it an update to a path that isn't cached. +func TestUpdateStaticSecret_HandlesNonCachedPaths(t *testing.T) { + t.Parallel() + // We need a valid cluster for the connection to succeed. + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + + path := "secret/foo" + + // Attempt the update + err := updater.updateStaticSecret(context.Background(), path) + require.NoError(t, err) + require.Nil(t, err) +} + +// TestPreEventStreamUpdate tests that preEventStreamUpdate correctly +// updates old static secrets in the cache. +func TestPreEventStreamUpdate(t *testing.T) { + t.Parallel() + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": kv.VersionedKVFactory, + }, + }, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + leaseCache := updater.leaseCache + + // First, create the secret in the cache that we expect to be updated: + path := "secret-v2/data/foo" + indexId := hashStaticSecretIndex(path) + initialTime := time.Now().UTC() + // pre-populate the leaseCache with a secret to update + index := &cachememdb.Index{ + Namespace: "root/", + RequestPath: path, + LastRenewed: initialTime, + ID: indexId, + Versions: map[int][]byte{}, + // Valid token provided, so update should work. + Tokens: map[string]struct{}{client.Token(): {}}, + Response: []byte{}, + Type: cacheboltdb.StaticSecretType, + } + err := leaseCache.db.Set(index) + require.NoError(t, err) + + secretData := map[string]interface{}{ + "foo": "bar", + } + + err = client.Sys().Mount("secret-v2", &api.MountInput{ + Type: "kv-v2", + }) + require.NoError(t, err) + + // Put a secret (with different values to what's currently in the cache) + _, err = client.KVv2("secret-v2").Put(context.Background(), "foo", secretData) + require.NoError(t, err) + + // perform the pre-event stream update: + err = updater.preEventStreamUpdate(context.Background()) + require.Nil(t, err) + + // Then, do a GET to see if the event got updated + newIndex, err := leaseCache.db.Get(cachememdb.IndexNameID, indexId) + require.Nil(t, err) + require.NotNil(t, newIndex) + require.NotEqual(t, []byte{}, newIndex.Response) + require.Truef(t, initialTime.Before(newIndex.LastRenewed), "last updated time not updated on index") + require.Equal(t, index.RequestPath, newIndex.RequestPath) + require.Equal(t, index.Tokens, newIndex.Tokens) + require.Equal(t, index.Versions, newIndex.Versions) +} + +// TestPreEventStreamUpdateErrorUpdating tests that preEventStreamUpdate correctly responds +// to errors on secret updates +func TestPreEventStreamUpdateErrorUpdating(t *testing.T) { + t.Parallel() + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": kv.VersionedKVFactory, + }, + }, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + leaseCache := updater.leaseCache + + // First, create the secret in the cache that we expect to be updated: + path := "secret-v2/data/foo" + indexId := hashStaticSecretIndex(path) + initialTime := time.Now().UTC() + // pre-populate the leaseCache with a secret to update + index := &cachememdb.Index{ + Namespace: "root/", + RequestPath: path, + LastRenewed: initialTime, + ID: indexId, + // Valid token provided, so update should work. + Tokens: map[string]struct{}{client.Token(): {}}, + Response: []byte{}, + Type: cacheboltdb.StaticSecretType, + } + err := leaseCache.db.Set(index) + require.NoError(t, err) + + secretData := map[string]interface{}{ + "foo": "bar", + } + + err = client.Sys().Mount("secret-v2", &api.MountInput{ + Type: "kv-v2", + }) + require.NoError(t, err) + + // Put a secret (with different values to what's currently in the cache) + _, err = client.KVv2("secret-v2").Put(context.Background(), "foo", secretData) + require.NoError(t, err) + + // Seal Vault, so that the update will fail + cluster.EnsureCoresSealed(t) + + // perform the pre-event stream update: + err = updater.preEventStreamUpdate(context.Background()) + require.Nil(t, err) + + // Then, we expect the index to be evicted since the token failed to update + _, err = leaseCache.db.Get(cachememdb.IndexNameID, indexId) + require.Equal(t, cachememdb.ErrCacheItemNotFound, err) +} + +// TestCheckForDeleteOrDestroyEvent tests the behaviour of checkForDeleteOrDestroyEvent +// and assures it gives the right responses for different events. +func TestCheckForDeleteOrDestroyEvent(t *testing.T) { + t.Parallel() + + expectedVersions := []int{1, 3, 5} + jsonFormatExpectedVersions := "[1,3,5]" + expectedPath := "secret-v2/data/my-secret" + deletedVersionEventMap := map[string]interface{}{ + "id": "abc", + "source": "abc", + "data": map[string]interface{}{ + "event": map[string]interface{}{ + "id": "bar", + "metadata": map[string]interface{}{ + "current_version": "2", + "deleted_versions": jsonFormatExpectedVersions, + "modified": true, + "operation": "delete", + "path": "secret-v2/delete/my-secret", + }, + }, + "event_type": "kv-v2/delete", + "plugin_info": map[string]interface{}{ + "mount_path": "secret-v2/", + "plugin": "kv", + "version": 2, + }, + }, + } + + undeletedVersionEventMap := map[string]interface{}{ + "id": "abc", + "source": "abc", + "data": map[string]interface{}{ + "event": map[string]interface{}{ + "id": "bar", + "metadata": map[string]interface{}{ + "current_version": "2", + "undeleted_versions": jsonFormatExpectedVersions, + "modified": true, + "operation": "undelete", + "path": "secret-v2/undelete/my-secret", + }, + }, + "event_type": "kv-v2/undelete", + "plugin_info": map[string]interface{}{ + "mount_path": "secret-v2/", + "plugin": "kv", + "version": 2, + }, + }, + } + + destroyedVersionEventMap := map[string]interface{}{ + "id": "abc", + "source": "abc", + "data": map[string]interface{}{ + "event": map[string]interface{}{ + "id": "bar", + "metadata": map[string]interface{}{ + "current_version": "2", + "destroyed_versions": jsonFormatExpectedVersions, + "modified": true, + "operation": "destroy", + "path": "secret-v2/destroy/my-secret", + }, + }, + "event_type": "kv-v2/destroy", + "plugin_info": map[string]interface{}{ + "mount_path": "secret-v2/", + "plugin": "kv", + "version": 2, + }, + }, + } + + actualVersions, actualPath := checkForDeleteOrDestroyEvent(deletedVersionEventMap) + require.Equal(t, expectedVersions, actualVersions) + require.Equal(t, expectedPath, actualPath) + + actualVersions, actualPath = checkForDeleteOrDestroyEvent(undeletedVersionEventMap) + require.Equal(t, expectedVersions, actualVersions) + require.Equal(t, expectedPath, actualPath) + + actualVersions, actualPath = checkForDeleteOrDestroyEvent(destroyedVersionEventMap) + require.Equal(t, expectedVersions, actualVersions) + require.Equal(t, expectedPath, actualPath) +} + +// TestCheckForDeleteOrDestroyNamespacedEvent tests the behaviour of checkForDeleteOrDestroyEvent +// with namespaces in paths. +func TestCheckForDeleteOrDestroyNamespacedEvent(t *testing.T) { + t.Parallel() + + expectedVersions := []int{1, 3, 5} + jsonFormatExpectedVersions := "[1,3,5]" + expectedPath := "ns/secret-v2/data/my-secret" + deletedVersionEventMap := map[string]interface{}{ + "id": "abc", + "source": "abc", + "data": map[string]interface{}{ + "event": map[string]interface{}{ + "id": "bar", + "metadata": map[string]interface{}{ + "current_version": "2", + "deleted_versions": jsonFormatExpectedVersions, + "modified": true, + "operation": "delete", + "data_path": "secret-v2/data/my-secret", + "path": "secret-v2/delete/my-secret", + }, + }, + "namespace": "ns/", + "event_type": "kv-v2/delete", + "plugin_info": map[string]interface{}{ + "mount_path": "secret-v2/", + "plugin": "kv", + "version": 2, + }, + }, + } + + undeletedVersionEventMap := map[string]interface{}{ + "id": "abc", + "source": "abc", + "data": map[string]interface{}{ + "event": map[string]interface{}{ + "id": "bar", + "metadata": map[string]interface{}{ + "current_version": "2", + "undeleted_versions": jsonFormatExpectedVersions, + "modified": true, + "operation": "undelete", + "data_path": "secret-v2/data/my-secret", + "path": "secret-v2/undelete/my-secret", + }, + }, + "namespace": "ns/", + "event_type": "kv-v2/undelete", + "plugin_info": map[string]interface{}{ + "mount_path": "secret-v2/", + "plugin": "kv", + "version": 2, + }, + }, + } + + destroyedVersionEventMap := map[string]interface{}{ + "id": "abc", + "source": "abc", + "data": map[string]interface{}{ + "event": map[string]interface{}{ + "id": "bar", + "metadata": map[string]interface{}{ + "current_version": "2", + "destroyed_versions": jsonFormatExpectedVersions, + "modified": true, + "operation": "destroy", + "data_path": "secret-v2/data/my-secret", + "path": "secret-v2/destroy/my-secret", + }, + }, + "namespace": "ns/", + "event_type": "kv-v2/destroy", + "plugin_info": map[string]interface{}{ + "mount_path": "secret-v2/", + "plugin": "kv", + "version": 2, + }, + }, + } + + actualVersions, actualPath := checkForDeleteOrDestroyEvent(deletedVersionEventMap) + require.Equal(t, expectedVersions, actualVersions) + require.Equal(t, expectedPath, actualPath) + + actualVersions, actualPath = checkForDeleteOrDestroyEvent(undeletedVersionEventMap) + require.Equal(t, expectedVersions, actualVersions) + require.Equal(t, expectedPath, actualPath) + + actualVersions, actualPath = checkForDeleteOrDestroyEvent(destroyedVersionEventMap) + require.Equal(t, expectedVersions, actualVersions) + require.Equal(t, expectedPath, actualPath) +} diff --git a/command/agentproxyshared/cache/static_secret_capability_manager.go b/command/agentproxyshared/cache/static_secret_capability_manager.go new file mode 100644 index 000000000000..b4dc3b2bd078 --- /dev/null +++ b/command/agentproxyshared/cache/static_secret_capability_manager.go @@ -0,0 +1,290 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "context" + "errors" + "fmt" + "slices" + "strings" + "time" + + "github.com/gammazero/workerpool" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" + "github.com/mitchellh/mapstructure" + "golang.org/x/exp/maps" +) + +type TokenCapabilityRefreshBehaviour int + +const ( + TokenCapabilityRefreshBehaviourOptimistic TokenCapabilityRefreshBehaviour = iota + TokenCapabilityRefreshBehaviourPessimistic +) + +const ( + // DefaultWorkers is the default number of workers for the worker pool. + DefaultWorkers = 5 + + // DefaultStaticSecretTokenCapabilityRefreshInterval is the default time + // between each capability poll. This is configured with the following config value: + // static_secret_token_capability_refresh_interval + DefaultStaticSecretTokenCapabilityRefreshInterval = 5 * time.Minute +) + +// StaticSecretCapabilityManager is a struct that utilizes +// a worker pool to keep capabilities up to date. +type StaticSecretCapabilityManager struct { + client *api.Client + leaseCache *LeaseCache + logger hclog.Logger + workerPool *workerpool.WorkerPool + staticSecretTokenCapabilityRefreshInterval time.Duration + tokenCapabilityRefreshBehaviour TokenCapabilityRefreshBehaviour +} + +// StaticSecretCapabilityManagerConfig is the configuration for initializing a new +// StaticSecretCapabilityManager. +type StaticSecretCapabilityManagerConfig struct { + LeaseCache *LeaseCache + Logger hclog.Logger + Client *api.Client + StaticSecretTokenCapabilityRefreshInterval time.Duration + StaticSecretTokenCapabilityRefreshBehaviour string +} + +// NewStaticSecretCapabilityManager creates a new instance of a StaticSecretCapabilityManager. +func NewStaticSecretCapabilityManager(conf *StaticSecretCapabilityManagerConfig) (*StaticSecretCapabilityManager, error) { + if conf == nil { + return nil, errors.New("nil configuration provided") + } + + if conf.LeaseCache == nil { + return nil, fmt.Errorf("nil Lease Cache (a required parameter): %v", conf) + } + + if conf.Logger == nil { + return nil, fmt.Errorf("nil Logger (a required parameter): %v", conf) + } + + if conf.Client == nil { + return nil, fmt.Errorf("nil Client (a required parameter): %v", conf) + } + + if conf.StaticSecretTokenCapabilityRefreshInterval == 0 { + conf.StaticSecretTokenCapabilityRefreshInterval = DefaultStaticSecretTokenCapabilityRefreshInterval + } + + behaviour := TokenCapabilityRefreshBehaviourOptimistic + if conf.StaticSecretTokenCapabilityRefreshBehaviour != "" { + switch conf.StaticSecretTokenCapabilityRefreshBehaviour { + case "optimistic": + behaviour = TokenCapabilityRefreshBehaviourOptimistic + case "pessimistic": + behaviour = TokenCapabilityRefreshBehaviourPessimistic + default: + return nil, fmt.Errorf("TokenCapabilityRefreshBehaviour must be either \"optimistic\" or \"pessimistic\"") + } + } + + workerPool := workerpool.New(DefaultWorkers) + + return &StaticSecretCapabilityManager{ + client: conf.Client, + leaseCache: conf.LeaseCache, + logger: conf.Logger, + workerPool: workerPool, + staticSecretTokenCapabilityRefreshInterval: conf.StaticSecretTokenCapabilityRefreshInterval, + tokenCapabilityRefreshBehaviour: behaviour, + }, nil +} + +// submitWorkToPoolAfterInterval submits work to the pool after the defined +// staticSecretTokenCapabilityRefreshInterval +func (sscm *StaticSecretCapabilityManager) submitWorkToPoolAfterInterval(work func()) { + time.AfterFunc(sscm.staticSecretTokenCapabilityRefreshInterval, func() { + if !sscm.workerPool.Stopped() { + sscm.workerPool.Submit(work) + } + }) +} + +// Stop stops all ongoing jobs and ensures future jobs will not +// get added to the worker pool. +func (sscm *StaticSecretCapabilityManager) Stop() { + sscm.workerPool.Stop() +} + +// StartRenewingCapabilities takes a polling job and submits a constant renewal of capabilities to the worker pool. +// indexToRenew is the capabilities index we'll renew the capabilities for. +func (sscm *StaticSecretCapabilityManager) StartRenewingCapabilities(indexToRenew *cachememdb.CapabilitiesIndex) { + var work func() + work = func() { + if sscm.workerPool.Stopped() { + sscm.logger.Trace("worker pool stopped, stopping renewal") + return + } + + capabilitiesIndex, err := sscm.leaseCache.db.GetCapabilitiesIndex(cachememdb.IndexNameID, indexToRenew.ID) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + // This cache entry no longer exists, so there is no more work to do. + sscm.logger.Trace("cache item not found for capabilities refresh, stopping the process") + return + } + if err != nil { + sscm.logger.Error("error when attempting to get capabilities index to refresh token capabilities", "indexToRenew.ID", indexToRenew.ID, "err", err) + sscm.submitWorkToPoolAfterInterval(work) + return + } + + capabilitiesIndex.IndexLock.RLock() + token := capabilitiesIndex.Token + indexReadablePathsMap := capabilitiesIndex.ReadablePaths + capabilitiesIndex.IndexLock.RUnlock() + indexReadablePaths := maps.Keys(indexReadablePathsMap) + + client, err := sscm.client.Clone() + if err != nil { + sscm.logger.Error("error when attempting clone client to refresh token capabilities", "indexToRenew.ID", indexToRenew.ID, "err", err) + sscm.submitWorkToPoolAfterInterval(work) + return + } + + client.SetToken(token) + + capabilities, err := getCapabilities(indexReadablePaths, client) + if err != nil { + sscm.logger.Warn("error when attempting to retrieve updated token capabilities", "indexToRenew.ID", indexToRenew.ID, "err", err) + if sscm.tokenCapabilityRefreshBehaviour == TokenCapabilityRefreshBehaviourPessimistic { + // Vault is be sealed or unreachable. If pessimistic, assume we might have + // lost access. Set capabilities to an empty set, so they are all removed. + capabilities = make(map[string][]string) + } else { + sscm.submitWorkToPoolAfterInterval(work) + return + } + } + + newReadablePaths := reconcileCapabilities(indexReadablePaths, capabilities) + if maps.Equal(indexReadablePathsMap, newReadablePaths) { + sscm.logger.Trace("capabilities were the same for index, nothing to do", "indexToRenew.ID", indexToRenew.ID) + // there's nothing to update! + sscm.submitWorkToPoolAfterInterval(work) + return + } + + // before updating or evicting the index, we must update the tokens on + // for each path, update the corresponding index with the diff + for _, path := range indexReadablePaths { + // If the old path isn't contained in the new readable paths, + // we must delete it from the tokens map for its corresponding + // path index. + if _, ok := newReadablePaths[path]; !ok { + indexId := hashStaticSecretIndex(path) + index, err := sscm.leaseCache.db.Get(cachememdb.IndexNameID, indexId) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + // Nothing to update! + continue + } + if err != nil { + sscm.logger.Error("error when attempting to update corresponding paths for capabilities index", "indexToRenew.ID", indexToRenew.ID, "err", err) + sscm.submitWorkToPoolAfterInterval(work) + return + } + sscm.logger.Trace("updating tokens for index, as capability has been lost", "index.ID", index.ID, "request_path", index.RequestPath) + index.IndexLock.Lock() + delete(index.Tokens, capabilitiesIndex.Token) + err = sscm.leaseCache.Set(context.Background(), index) + if err != nil { + sscm.logger.Error("error when attempting to update index in cache", "index.ID", index.ID, "err", err) + } + index.IndexLock.Unlock() + } + } + + // Lastly, we should update the capabilities index, either evicting or updating it + capabilitiesIndex.IndexLock.Lock() + defer capabilitiesIndex.IndexLock.Unlock() + if len(newReadablePaths) == 0 { + err := sscm.leaseCache.db.EvictCapabilitiesIndex(cachememdb.IndexNameID, indexToRenew.ID) + if err != nil { + sscm.logger.Error("error when attempting to evict capabilities from cache", "index.ID", indexToRenew.ID, "err", err) + sscm.submitWorkToPoolAfterInterval(work) + return + } + sscm.logger.Debug("successfully evicted capabilities index from cache", "index.ID", indexToRenew.ID) + // If we successfully evicted the index, no need to re-submit the work to the pool. + return + } + + // The token still has some capabilities, so, update the capabilities index: + capabilitiesIndex.ReadablePaths = newReadablePaths + err = sscm.leaseCache.SetCapabilitiesIndex(context.Background(), capabilitiesIndex) + if err != nil { + sscm.logger.Error("error when attempting to update capabilities from cache", "index.ID", indexToRenew.ID, "err", err) + } + + // Finally, put ourselves back on the work pool after + sscm.submitWorkToPoolAfterInterval(work) + return + } + + sscm.submitWorkToPoolAfterInterval(work) +} + +// getCapabilities is a wrapper around a /sys/capabilities-self call that returns +// capabilities as a map with paths as keys, and capabilities as values. +func getCapabilities(paths []string, client *api.Client) (map[string][]string, error) { + body := make(map[string]interface{}) + body["paths"] = paths + capabilities := make(map[string][]string) + + secret, err := client.Logical().Write("sys/capabilities-self", body) + if err != nil && strings.Contains(err.Error(), "permission denied") { + // Token has expired. Return an empty set of capabilities: + return capabilities, nil + } + if err != nil { + return nil, err + } + + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + for _, path := range paths { + var res []string + err = mapstructure.Decode(secret.Data[path], &res) + if err != nil { + return nil, err + } + + capabilities[path] = res + } + + return capabilities, nil +} + +// reconcileCapabilities takes a set of known readable paths, and a set of capabilities (a response from the +// sys/capabilities-self endpoint) and returns a subset of the readablePaths after taking into account any updated +// capabilities as a set, represented by a map of strings to structs. +// It will delete any path in readablePaths if it does not have a "root" or "read" capability listed in the +// capabilities map. +func reconcileCapabilities(readablePaths []string, capabilities map[string][]string) map[string]struct{} { + newReadablePaths := make(map[string]struct{}) + for pathName, permissions := range capabilities { + if slices.Contains(permissions, "read") || slices.Contains(permissions, "root") { + // We do this as an additional sanity check. We never want to + // add permissions that weren't there before. + if slices.Contains(readablePaths, pathName) { + newReadablePaths[pathName] = struct{}{} + } + } + } + + return newReadablePaths +} diff --git a/command/agentproxyshared/cache/static_secret_capability_manager_test.go b/command/agentproxyshared/cache/static_secret_capability_manager_test.go new file mode 100644 index 000000000000..ea5efcd08b85 --- /dev/null +++ b/command/agentproxyshared/cache/static_secret_capability_manager_test.go @@ -0,0 +1,518 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" + "github.com/hashicorp/vault/helper/testhelpers/minimal" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/stretchr/testify/require" +) + +// testNewStaticSecretCapabilityManager returns a new StaticSecretCapabilityManager +// for use in tests. +func testNewStaticSecretCapabilityManager(t *testing.T, client *api.Client) *StaticSecretCapabilityManager { + t.Helper() + + lc := testNewLeaseCache(t, []*SendResponse{}) + + updater, err := NewStaticSecretCapabilityManager(&StaticSecretCapabilityManagerConfig{ + LeaseCache: lc, + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.capabilitiesmanager"), + Client: client, + StaticSecretTokenCapabilityRefreshInterval: 250 * time.Millisecond, + }) + if err != nil { + t.Fatal(err) + } + + return updater +} + +// TestNewStaticSecretCapabilityManager tests the NewStaticSecretCapabilityManager method, +// to ensure it errors out when appropriate. +func TestNewStaticSecretCapabilityManager(t *testing.T) { + t.Parallel() + + lc := testNewLeaseCache(t, []*SendResponse{}) + logger := logging.NewVaultLogger(hclog.Trace).Named("cache.capabilitiesmanager") + client, err := api.NewClient(api.DefaultConfig()) + require.Nil(t, err) + + // Expect an error if any of the arguments are nil: + updater, err := NewStaticSecretCapabilityManager(&StaticSecretCapabilityManagerConfig{ + LeaseCache: nil, + Logger: logger, + Client: client, + }) + require.Error(t, err) + require.Nil(t, updater) + + updater, err = NewStaticSecretCapabilityManager(&StaticSecretCapabilityManagerConfig{ + LeaseCache: lc, + Logger: nil, + Client: client, + }) + require.Error(t, err) + require.Nil(t, updater) + + updater, err = NewStaticSecretCapabilityManager(&StaticSecretCapabilityManagerConfig{ + LeaseCache: lc, + Logger: logger, + Client: nil, + }) + require.Error(t, err) + require.Nil(t, updater) + + // Don't expect an error if the arguments are as expected + updater, err = NewStaticSecretCapabilityManager(&StaticSecretCapabilityManagerConfig{ + LeaseCache: lc, + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.capabilitiesmanager"), + Client: client, + }) + if err != nil { + t.Fatal(err) + } + require.NotNil(t, updater) + require.NotNil(t, updater.workerPool) + require.NotNil(t, updater.staticSecretTokenCapabilityRefreshInterval) + require.NotNil(t, updater.client) + require.NotNil(t, updater.leaseCache) + require.NotNil(t, updater.logger) + require.Equal(t, DefaultStaticSecretTokenCapabilityRefreshInterval, updater.staticSecretTokenCapabilityRefreshInterval) + + // Lastly, double check that the refresh interval can be properly set + updater, err = NewStaticSecretCapabilityManager(&StaticSecretCapabilityManagerConfig{ + LeaseCache: lc, + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.capabilitiesmanager"), + Client: client, + StaticSecretTokenCapabilityRefreshInterval: time.Hour, + }) + if err != nil { + t.Fatal(err) + } + require.NotNil(t, updater) + require.NotNil(t, updater.workerPool) + require.NotNil(t, updater.staticSecretTokenCapabilityRefreshInterval) + require.NotNil(t, updater.client) + require.NotNil(t, updater.leaseCache) + require.NotNil(t, updater.logger) + require.Equal(t, time.Hour, updater.staticSecretTokenCapabilityRefreshInterval) +} + +// TestGetCapabilitiesRootToken tests the getCapabilities method with the root +// token, expecting to get "root" capabilities on valid paths +func TestGetCapabilitiesRootToken(t *testing.T) { + t.Parallel() + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + + capabilitiesToCheck := []string{"auth/token/create", "sys/health"} + capabilities, err := getCapabilities(capabilitiesToCheck, client) + require.NoError(t, err) + + expectedCapabilities := map[string][]string{ + "auth/token/create": {"root"}, + "sys/health": {"root"}, + } + require.Equal(t, expectedCapabilities, capabilities) +} + +// TestGetCapabilitiesLowPrivilegeToken tests the getCapabilities method with +// a low privilege token, expecting to get deny or non-root capabilities +func TestGetCapabilitiesLowPrivilegeToken(t *testing.T) { + t.Parallel() + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + + renewable := true + // Set the token's policies to 'default' and nothing else + tokenCreateRequest := &api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "30m", + Renewable: &renewable, + } + + secret, err := client.Auth().Token().CreateOrphan(tokenCreateRequest) + require.NoError(t, err) + token := secret.Auth.ClientToken + + client.SetToken(token) + + capabilitiesToCheck := []string{"auth/token/create", "sys/capabilities-self", "auth/token/lookup-self"} + capabilities, err := getCapabilities(capabilitiesToCheck, client) + require.NoError(t, err) + + expectedCapabilities := map[string][]string{ + "auth/token/create": {"deny"}, + "sys/capabilities-self": {"update"}, + "auth/token/lookup-self": {"read"}, + } + require.Equal(t, expectedCapabilities, capabilities) +} + +// TestGetCapabilitiesBadClientToken tests that getCapabilities +// returns an empty set of capabilities if the token is bad (and it gets a 403) +func TestGetCapabilitiesBadClientToken(t *testing.T) { + t.Parallel() + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + client.SetToken("") + + capabilitiesToCheck := []string{"auth/token/create", "sys/capabilities-self", "auth/token/lookup-self"} + capabilities, err := getCapabilities(capabilitiesToCheck, client) + require.Nil(t, err) + require.Equal(t, map[string][]string{}, capabilities) +} + +// TestGetCapabilitiesEmptyPaths tests the getCapabilities will error on an empty +// set of paths to check +func TestGetCapabilitiesEmptyPaths(t *testing.T) { + t.Parallel() + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + + var capabilitiesToCheck []string + _, err := getCapabilities(capabilitiesToCheck, client) + require.Error(t, err) +} + +// TestReconcileCapabilities tests that reconcileCapabilities will +// correctly previously remove readable paths that we don't have read access to. +func TestReconcileCapabilities(t *testing.T) { + t.Parallel() + paths := []string{"auth/token/create", "sys/capabilities-self", "auth/token/lookup-self"} + capabilities := map[string][]string{ + "auth/token/create": {"deny"}, + "sys/capabilities-self": {"update"}, + "auth/token/lookup-self": {"read"}, + } + + updatedCapabilities := reconcileCapabilities(paths, capabilities) + expectedUpdatedCapabilities := map[string]struct{}{ + "auth/token/lookup-self": {}, + } + require.Equal(t, expectedUpdatedCapabilities, updatedCapabilities) +} + +// TestReconcileCapabilitiesNoOp tests that reconcileCapabilities will +// correctly not remove capabilities when they all remain readable. +func TestReconcileCapabilitiesNoOp(t *testing.T) { + t.Parallel() + paths := []string{"foo/bar", "bar/baz", "baz/foo"} + capabilities := map[string][]string{ + "foo/bar": {"read"}, + "bar/baz": {"root"}, + "baz/foo": {"read"}, + } + + updatedCapabilities := reconcileCapabilities(paths, capabilities) + expectedUpdatedCapabilities := map[string]struct{}{ + "foo/bar": {}, + "bar/baz": {}, + "baz/foo": {}, + } + require.Equal(t, expectedUpdatedCapabilities, updatedCapabilities) +} + +// TestReconcileCapabilitiesNoAdding tests that reconcileCapabilities will +// not add any capabilities that weren't present in the first argument to the function +func TestReconcileCapabilitiesNoAdding(t *testing.T) { + t.Parallel() + paths := []string{"auth/token/create", "sys/capabilities-self", "auth/token/lookup-self"} + capabilities := map[string][]string{ + "auth/token/create": {"deny"}, + "sys/capabilities-self": {"update"}, + "auth/token/lookup-self": {"read"}, + "some/new/path": {"read"}, + } + + updatedCapabilities := reconcileCapabilities(paths, capabilities) + expectedUpdatedCapabilities := map[string]struct{}{ + "auth/token/lookup-self": {}, + } + require.Equal(t, expectedUpdatedCapabilities, updatedCapabilities) +} + +// TestSubmitWorkNoOp tests that we will gracefully end if the capabilities index +// does not exist in the cache +func TestSubmitWorkNoOp(t *testing.T) { + t.Parallel() + client, err := api.NewClient(api.DefaultConfig()) + require.Nil(t, err) + sscm := testNewStaticSecretCapabilityManager(t, client) + // This index will be a no-op, as this does not exist in the cache + index := &cachememdb.CapabilitiesIndex{ + ID: "test", + } + sscm.StartRenewingCapabilities(index) + + // Wait for the job to complete... + time.Sleep(1 * time.Second) + require.Equal(t, 0, sscm.workerPool.WaitingQueueSize()) +} + +// TestSubmitWorkUpdatesIndex tests that an index will be correctly updated if the capabilities differ. +func TestSubmitWorkUpdatesIndex(t *testing.T) { + t.Parallel() + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + + // Create a low permission token + renewable := true + // Set the token's policies to 'default' and nothing else + tokenCreateRequest := &api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "30m", + Renewable: &renewable, + } + + secret, err := client.Auth().Token().CreateOrphan(tokenCreateRequest) + require.NoError(t, err) + token := secret.Auth.ClientToken + indexId := hashStaticSecretIndex(token) + + sscm := testNewStaticSecretCapabilityManager(t, client) + index := &cachememdb.CapabilitiesIndex{ + ID: indexId, + Token: token, + // The token will (perhaps obviously) not have + // read access to /foo/bar, but will to /auth/token/lookup-self + ReadablePaths: map[string]struct{}{ + "foo/bar": {}, + "auth/token/lookup-self": {}, + }, + } + err = sscm.leaseCache.db.SetCapabilitiesIndex(index) + require.Nil(t, err) + + sscm.StartRenewingCapabilities(index) + + // Wait for the job to complete at least once... + time.Sleep(3 * time.Second) + + newIndex, err := sscm.leaseCache.db.GetCapabilitiesIndex(cachememdb.IndexNameID, indexId) + require.Nil(t, err) + newIndex.IndexLock.RLock() + require.Equal(t, map[string]struct{}{ + "auth/token/lookup-self": {}, + }, newIndex.ReadablePaths) + newIndex.IndexLock.RUnlock() + + // Forcefully stop any remaining workers + sscm.workerPool.Stop() +} + +// TestSubmitWorkUpdatesIndexWithBadToken tests that an index will be correctly updated if the token +// has expired and we cannot access the sys capabilities endpoint. +func TestSubmitWorkUpdatesIndexWithBadToken(t *testing.T) { + t.Parallel() + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + + token := "not real token" + indexId := hashStaticSecretIndex(token) + + sscm := testNewStaticSecretCapabilityManager(t, client) + index := &cachememdb.CapabilitiesIndex{ + ID: indexId, + Token: token, + ReadablePaths: map[string]struct{}{ + "foo/bar": {}, + "auth/token/lookup-self": {}, + }, + } + err := sscm.leaseCache.db.SetCapabilitiesIndex(index) + require.Nil(t, err) + + sscm.StartRenewingCapabilities(index) + + // Wait for the job to complete at least once... + time.Sleep(3 * time.Second) + + // This entry should be evicted. + newIndex, err := sscm.leaseCache.db.GetCapabilitiesIndex(cachememdb.IndexNameID, indexId) + require.Equal(t, err, cachememdb.ErrCacheItemNotFound) + require.Nil(t, newIndex) + + // Forcefully stop any remaining workers + sscm.workerPool.Stop() +} + +// TestSubmitWorkSealedVaultOptimistic tests that the capability manager +// behaves as expected when +// sscm.tokenCapabilityRefreshBehaviour == TokenCapabilityRefreshBehaviourOptimistic +func TestSubmitWorkSealedVaultOptimistic(t *testing.T) { + t.Parallel() + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + + token := "not real token" + indexId := hashStaticSecretIndex(token) + + sscm := testNewStaticSecretCapabilityManager(t, client) + index := &cachememdb.CapabilitiesIndex{ + ID: indexId, + Token: token, + ReadablePaths: map[string]struct{}{ + "foo/bar": {}, + "auth/token/lookup-self": {}, + }, + } + err := sscm.leaseCache.db.SetCapabilitiesIndex(index) + require.Nil(t, err) + + // Seal the cluster + cluster.EnsureCoresSealed(t) + + sscm.StartRenewingCapabilities(index) + + // Wait for the job to complete at least once... + time.Sleep(3 * time.Second) + + // This entry should not be evicted. + newIndex, err := sscm.leaseCache.db.GetCapabilitiesIndex(cachememdb.IndexNameID, indexId) + require.NoError(t, err) + require.NotNil(t, newIndex) + + // Forcefully stop any remaining workers + sscm.workerPool.Stop() +} + +// TestSubmitWorkSealedVaultPessimistic tests that the capability manager +// behaves as expected when +// sscm.tokenCapabilityRefreshBehaviour == TokenCapabilityRefreshBehaviourPessimistic +func TestSubmitWorkSealedVaultPessimistic(t *testing.T) { + t.Parallel() + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + + token := "not real token" + indexId := hashStaticSecretIndex(token) + + sscm := testNewStaticSecretCapabilityManager(t, client) + sscm.tokenCapabilityRefreshBehaviour = TokenCapabilityRefreshBehaviourPessimistic + + index := &cachememdb.CapabilitiesIndex{ + ID: indexId, + Token: token, + ReadablePaths: map[string]struct{}{ + "foo/bar": {}, + "auth/token/lookup-self": {}, + }, + } + err := sscm.leaseCache.db.SetCapabilitiesIndex(index) + require.Nil(t, err) + + // Seal the cluster + cluster.EnsureCoresSealed(t) + + sscm.StartRenewingCapabilities(index) + + // Wait for the job to complete at least once... + time.Sleep(3 * time.Second) + + // This entry should be evicted. + newIndex, err := sscm.leaseCache.db.GetCapabilitiesIndex(cachememdb.IndexNameID, indexId) + require.Error(t, err) + require.Nil(t, newIndex) + + // Forcefully stop any remaining workers + sscm.workerPool.Stop() +} + +// TestSubmitWorkUpdatesAllIndexes tests that an index will be correctly updated if the capabilities differ, as +// well as the indexes related to the paths that are being checked for. +func TestSubmitWorkUpdatesAllIndexes(t *testing.T) { + t.Parallel() + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + + // Create a low permission token + renewable := true + // Set the token's policies to 'default' and nothing else + tokenCreateRequest := &api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "30m", + Renewable: &renewable, + } + + secret, err := client.Auth().Token().CreateOrphan(tokenCreateRequest) + require.NoError(t, err) + token := secret.Auth.ClientToken + indexId := hashStaticSecretIndex(token) + + sscm := testNewStaticSecretCapabilityManager(t, client) + index := &cachememdb.CapabilitiesIndex{ + ID: indexId, + Token: token, + // The token will (perhaps obviously) not have + // read access to /foo/bar, but will to /auth/token/lookup-self + ReadablePaths: map[string]struct{}{ + "foo/bar": {}, + "auth/token/lookup-self": {}, + }, + } + err = sscm.leaseCache.db.SetCapabilitiesIndex(index) + require.Nil(t, err) + + pathIndexId1 := hashStaticSecretIndex("foo/bar") + pathIndex1 := &cachememdb.Index{ + ID: pathIndexId1, + Namespace: "root/", + Tokens: map[string]struct{}{ + token: {}, + }, + RequestPath: "foo/bar", + Response: []byte{}, + } + + pathIndexId2 := hashStaticSecretIndex("auth/token/lookup-self") + pathIndex2 := &cachememdb.Index{ + ID: pathIndexId2, + Namespace: "root/", + Tokens: map[string]struct{}{ + token: {}, + }, + RequestPath: "auth/token/lookup-self", + Response: []byte{}, + } + + err = sscm.leaseCache.db.Set(pathIndex1) + require.Nil(t, err) + + err = sscm.leaseCache.db.Set(pathIndex2) + require.Nil(t, err) + + sscm.StartRenewingCapabilities(index) + + // Wait for the job to complete at least once... + time.Sleep(1 * time.Second) + + newIndex, err := sscm.leaseCache.db.GetCapabilitiesIndex(cachememdb.IndexNameID, indexId) + require.Nil(t, err) + newIndex.IndexLock.RLock() + require.Equal(t, map[string]struct{}{ + "auth/token/lookup-self": {}, + }, newIndex.ReadablePaths) + newIndex.IndexLock.RUnlock() + + // For this, we expect the token to have been deleted + newPathIndex1, err := sscm.leaseCache.db.Get(cachememdb.IndexNameID, pathIndexId1) + require.Nil(t, err) + require.Equal(t, map[string]struct{}{}, newPathIndex1.Tokens) + + // For this, we expect no change + newPathIndex2, err := sscm.leaseCache.db.Get(cachememdb.IndexNameID, pathIndexId2) + require.Nil(t, err) + require.Equal(t, newPathIndex2, newPathIndex2) + + // Forcefully stop any remaining workers + sscm.workerPool.Stop() +} diff --git a/command/agentproxyshared/cache/testing.go b/command/agentproxyshared/cache/testing.go new file mode 100644 index 000000000000..72ff895e5bdd --- /dev/null +++ b/command/agentproxyshared/cache/testing.go @@ -0,0 +1,113 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "context" + "encoding/json" + "fmt" + "io" + "math/rand" + "net/http" + "strings" + "time" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/useragent" +) + +// mockProxier is a mock implementation of the Proxier interface, used for testing purposes. +// The mock will return the provided responses every time it reaches its Send method, up to +// the last provided response. This lets tests control what the next/underlying Proxier layer +// might expect to return. +type mockProxier struct { + proxiedResponses []*SendResponse + responseIndex int +} + +func NewMockProxier(responses []*SendResponse) *mockProxier { + return &mockProxier{ + proxiedResponses: responses, + } +} + +func (p *mockProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { + if p.responseIndex >= len(p.proxiedResponses) { + return nil, fmt.Errorf("index out of bounds: responseIndex = %d, responses = %d", p.responseIndex, len(p.proxiedResponses)) + } + resp := p.proxiedResponses[p.responseIndex] + + p.responseIndex++ + + return resp, nil +} + +func (p *mockProxier) ResponseIndex() int { + return p.responseIndex +} + +func newTestSendResponse(status int, body string) *SendResponse { + headers := make(http.Header) + headers.Add("User-Agent", useragent.AgentProxyString()) + resp := &SendResponse{ + Response: &api.Response{ + Response: &http.Response{ + StatusCode: status, + Header: headers, + }, + }, + } + resp.Response.Header.Set("Date", time.Now().Format(http.TimeFormat)) + + if body != "" { + resp.Response.Body = io.NopCloser(strings.NewReader(body)) + resp.ResponseBody = []byte(body) + } + + if json.Valid([]byte(body)) { + resp.Response.Header.Set("content-type", "application/json") + } + + return resp +} + +type mockTokenVerifierProxier struct { + currentToken string +} + +func (p *mockTokenVerifierProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { + p.currentToken = req.Token + resp := newTestSendResponse(http.StatusOK, + `{"data": {"id": "`+p.currentToken+`"}}`) + + return resp, nil +} + +func (p *mockTokenVerifierProxier) GetCurrentRequestToken() string { + return p.currentToken +} + +type mockDelayProxier struct { + cacheableResp bool + delay int +} + +func (p *mockDelayProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { + if p.delay > 0 { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(time.Duration(p.delay) * time.Millisecond): + } + } + + // If this is a cacheable response, we return a unique response every time + if p.cacheableResp { + rand.Seed(time.Now().Unix()) + s := fmt.Sprintf(`{"lease_id": "%d", "renewable": true, "data": {"foo": "bar"}}`, rand.Int()) + return newTestSendResponse(http.StatusOK, s), nil + } + + return newTestSendResponse(http.StatusOK, `{"value": "output"}`), nil +} diff --git a/command/agentproxyshared/cache/wheninconsistentaction_enumer.go b/command/agentproxyshared/cache/wheninconsistentaction_enumer.go new file mode 100644 index 000000000000..fdbf58e1baf7 --- /dev/null +++ b/command/agentproxyshared/cache/wheninconsistentaction_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer -type=WhenInconsistentAction -trimprefix=WhenInconsistent"; DO NOT EDIT. + +package cache + +import ( + "fmt" +) + +const _WhenInconsistentActionName = "FailRetryForward" + +var _WhenInconsistentActionIndex = [...]uint8{0, 4, 9, 16} + +func (i WhenInconsistentAction) String() string { + if i < 0 || i >= WhenInconsistentAction(len(_WhenInconsistentActionIndex)-1) { + return fmt.Sprintf("WhenInconsistentAction(%d)", i) + } + return _WhenInconsistentActionName[_WhenInconsistentActionIndex[i]:_WhenInconsistentActionIndex[i+1]] +} + +var _WhenInconsistentActionValues = []WhenInconsistentAction{0, 1, 2} + +var _WhenInconsistentActionNameToValueMap = map[string]WhenInconsistentAction{ + _WhenInconsistentActionName[0:4]: 0, + _WhenInconsistentActionName[4:9]: 1, + _WhenInconsistentActionName[9:16]: 2, +} + +// WhenInconsistentActionString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func WhenInconsistentActionString(s string) (WhenInconsistentAction, error) { + if val, ok := _WhenInconsistentActionNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to WhenInconsistentAction values", s) +} + +// WhenInconsistentActionValues returns all values of the enum +func WhenInconsistentActionValues() []WhenInconsistentAction { + return _WhenInconsistentActionValues +} + +// IsAWhenInconsistentAction returns "true" if the value is listed in the enum definition. "false" otherwise +func (i WhenInconsistentAction) IsAWhenInconsistentAction() bool { + for _, v := range _WhenInconsistentActionValues { + if i == v { + return true + } + } + return false +} diff --git a/command/agentproxyshared/helpers.go b/command/agentproxyshared/helpers.go new file mode 100644 index 000000000000..f1ef47cfee33 --- /dev/null +++ b/command/agentproxyshared/helpers.go @@ -0,0 +1,240 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package agentproxyshared + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth/alicloud" + "github.com/hashicorp/vault/command/agentproxyshared/auth/approle" + "github.com/hashicorp/vault/command/agentproxyshared/auth/aws" + "github.com/hashicorp/vault/command/agentproxyshared/auth/azure" + "github.com/hashicorp/vault/command/agentproxyshared/auth/cert" + "github.com/hashicorp/vault/command/agentproxyshared/auth/cf" + "github.com/hashicorp/vault/command/agentproxyshared/auth/gcp" + "github.com/hashicorp/vault/command/agentproxyshared/auth/jwt" + "github.com/hashicorp/vault/command/agentproxyshared/auth/kerberos" + "github.com/hashicorp/vault/command/agentproxyshared/auth/kubernetes" + "github.com/hashicorp/vault/command/agentproxyshared/auth/ldap" + "github.com/hashicorp/vault/command/agentproxyshared/auth/oci" + token_file "github.com/hashicorp/vault/command/agentproxyshared/auth/token-file" + "github.com/hashicorp/vault/command/agentproxyshared/cache" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cacheboltdb" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" + "github.com/hashicorp/vault/command/agentproxyshared/cache/keymanager" +) + +// GetAutoAuthMethodFromConfig Calls the appropriate NewAutoAuthMethod function, initializing +// the auto-auth method, based on the auto-auth method type. Returns an error if one happens or +// the method type is invalid. +func GetAutoAuthMethodFromConfig(autoAuthMethodType string, authConfig *auth.AuthConfig, vaultAddress string) (auth.AuthMethod, error) { + switch autoAuthMethodType { + case "alicloud": + return alicloud.NewAliCloudAuthMethod(authConfig) + case "aws": + return aws.NewAWSAuthMethod(authConfig) + case "azure": + return azure.NewAzureAuthMethod(authConfig) + case "cert": + return cert.NewCertAuthMethod(authConfig) + case "cf": + return cf.NewCFAuthMethod(authConfig) + case "gcp": + return gcp.NewGCPAuthMethod(authConfig) + case "jwt": + return jwt.NewJWTAuthMethod(authConfig) + case "kerberos": + return kerberos.NewKerberosAuthMethod(authConfig) + case "kubernetes": + return kubernetes.NewKubernetesAuthMethod(authConfig) + case "approle": + return approle.NewApproleAuthMethod(authConfig) + case "oci": + return oci.NewOCIAuthMethod(authConfig, vaultAddress) + case "token_file": + return token_file.NewTokenFileAuthMethod(authConfig) + case "pcf": // Deprecated. + return cf.NewCFAuthMethod(authConfig) + case "ldap": + return ldap.NewLdapAuthMethod(authConfig) + default: + return nil, errors.New(fmt.Sprintf("unknown auth method %q", autoAuthMethodType)) + } +} + +// PersistConfig contains configuration needed for persistent caching +type PersistConfig struct { + Type string + Path string `hcl:"path"` + KeepAfterImport bool `hcl:"keep_after_import"` + ExitOnErr bool `hcl:"exit_on_err"` + ServiceAccountTokenFile string `hcl:"service_account_token_file"` +} + +// AddPersistentStorageToLeaseCache adds persistence to a lease cache, based on a given PersistConfig +// Returns a close function to be deferred and the old token, if found, or an error +func AddPersistentStorageToLeaseCache(ctx context.Context, leaseCache *cache.LeaseCache, persistConfig *PersistConfig, logger log.Logger) (func() error, string, error) { + if persistConfig == nil { + return nil, "", errors.New("persist config was nil") + } + + if persistConfig.Path == "" { + return nil, "", errors.New("must specify persistent cache path") + } + + // Set AAD based on key protection type + var aad string + var err error + switch persistConfig.Type { + case "kubernetes": + aad, err = getServiceAccountJWT(persistConfig.ServiceAccountTokenFile) + if err != nil { + tokenFileName := persistConfig.ServiceAccountTokenFile + if len(tokenFileName) == 0 { + tokenFileName = "/var/run/secrets/kubernetes.io/serviceaccount/token" + } + return nil, "", fmt.Errorf("failed to read service account token from %s: %w", tokenFileName, err) + } + default: + return nil, "", fmt.Errorf("persistent key protection type %q not supported", persistConfig.Type) + } + + // Check if bolt file exists already + dbFileExists, err := cacheboltdb.DBFileExists(persistConfig.Path) + if err != nil { + return nil, "", fmt.Errorf("failed to check if bolt file exists at path %s: %w", persistConfig.Path, err) + } + if dbFileExists { + // Open the bolt file, but wait to setup Encryption + ps, err := cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ + Path: persistConfig.Path, + Logger: logger.Named("cacheboltdb"), + }) + if err != nil { + return nil, "", fmt.Errorf("error opening persistent cache %v", err) + } + + // Get the token from bolt for retrieving the encryption key, + // then setup encryption so that restore is possible + token, err := ps.GetRetrievalToken() + if err != nil { + return nil, "", fmt.Errorf("error getting retrieval token from persistent cache: %w", err) + } + + if err := ps.Close(); err != nil { + return nil, "", fmt.Errorf("failed to close persistent cache file after getting retrieval token: %w", err) + } + + km, err := keymanager.NewPassthroughKeyManager(ctx, token) + if err != nil { + return nil, "", fmt.Errorf("failed to configure persistence encryption for cache: %w", err) + } + + // Open the bolt file with the wrapper provided + ps, err = cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ + Path: persistConfig.Path, + Logger: logger.Named("cacheboltdb"), + Wrapper: km.Wrapper(), + AAD: aad, + }) + if err != nil { + return nil, "", fmt.Errorf("error opening persistent cache with wrapper: %w", err) + } + + // Restore anything in the persistent cache to the memory cache + if err := leaseCache.Restore(ctx, ps); err != nil { + logger.Error(fmt.Sprintf("error restoring in-memory cache from persisted file: %v", err)) + if persistConfig.ExitOnErr { + return nil, "", fmt.Errorf("exiting with error as exit_on_err is set to true") + } + } + logger.Info("loaded memcache from persistent storage") + + // Check for previous auto-auth token + oldTokenBytes, err := ps.GetAutoAuthToken(ctx) + if err != nil { + logger.Error(fmt.Sprintf("error in fetching previous auto-auth token: %v", err)) + if persistConfig.ExitOnErr { + return nil, "", fmt.Errorf("exiting with error as exit_on_err is set to true") + } + } + var previousToken string + if len(oldTokenBytes) > 0 { + oldToken, err := cachememdb.Deserialize(oldTokenBytes) + if err != nil { + logger.Error(fmt.Sprintf("error in deserializing previous auto-auth token cache entryn: %v", err)) + if persistConfig.ExitOnErr { + return nil, "", fmt.Errorf("exiting with error as exit_on_err is set to true") + } + } + previousToken = oldToken.Token + } + + // If keep_after_import true, set persistent storage layer in + // leaseCache, else remove db file + if persistConfig.KeepAfterImport { + leaseCache.SetPersistentStorage(ps) + return ps.Close, previousToken, nil + } else { + if err := ps.Close(); err != nil { + logger.Warn(fmt.Sprintf("failed to close persistent cache file: %s", err)) + } + dbFile := filepath.Join(persistConfig.Path, cacheboltdb.DatabaseFileName) + if err := os.Remove(dbFile); err != nil { + logger.Error(fmt.Sprintf("failed to remove persistent storage file %s: %v", dbFile, err)) + if persistConfig.ExitOnErr { + return nil, "", fmt.Errorf("exiting with error as exit_on_err is set to true") + } + } + return nil, previousToken, nil + } + } else { + km, err := keymanager.NewPassthroughKeyManager(ctx, nil) + if err != nil { + return nil, "", fmt.Errorf("failed to configure persistence encryption for cache: %w", err) + } + ps, err := cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ + Path: persistConfig.Path, + Logger: logger.Named("cacheboltdb"), + Wrapper: km.Wrapper(), + AAD: aad, + }) + if err != nil { + return nil, "", fmt.Errorf("error creating persistent cache: %w", err) + } + logger.Info("configured persistent storage", "path", persistConfig.Path) + + // Stash the key material in bolt + token, err := km.RetrievalToken(ctx) + if err != nil { + return nil, "", fmt.Errorf("error getting persistence key: %w", err) + } + if err := ps.StoreRetrievalToken(token); err != nil { + return nil, "", fmt.Errorf("error setting key in persistent cache: %w", err) + } + + leaseCache.SetPersistentStorage(ps) + return ps.Close, "", nil + } +} + +// getServiceAccountJWT attempts to read the service account JWT from the specified token file path. +// Defaults to using the Kubernetes default service account file path if token file path is empty. +func getServiceAccountJWT(tokenFile string) (string, error) { + if len(tokenFile) == 0 { + tokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" + } + token, err := os.ReadFile(tokenFile) + if err != nil { + return "", err + } + return strings.TrimSpace(string(token)), nil +} diff --git a/command/agentproxyshared/helpers_test.go b/command/agentproxyshared/helpers_test.go new file mode 100644 index 000000000000..e5f1d6007c86 --- /dev/null +++ b/command/agentproxyshared/helpers_test.go @@ -0,0 +1,94 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package agentproxyshared + +import ( + "context" + "os" + "testing" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/cache" + "github.com/hashicorp/vault/sdk/helper/logging" +) + +func testNewLeaseCache(t *testing.T, responses []*cache.SendResponse) *cache.LeaseCache { + t.Helper() + + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + lc, err := cache.NewLeaseCache(&cache.LeaseCacheConfig{ + Client: client, + BaseContext: context.Background(), + Proxier: cache.NewMockProxier(responses), + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"), + CacheDynamicSecrets: true, + UserAgentToUse: "test", + }) + if err != nil { + t.Fatal(err) + } + return lc +} + +func populateTempFile(t *testing.T, name, contents string) *os.File { + t.Helper() + + file, err := os.CreateTemp(t.TempDir(), name) + if err != nil { + t.Fatal(err) + } + + _, err = file.WriteString(contents) + if err != nil { + t.Fatal(err) + } + + err = file.Close() + if err != nil { + t.Fatal(err) + } + + return file +} + +// Test_AddPersistentStorageToLeaseCache Tests that AddPersistentStorageToLeaseCache() correctly +// adds persistent storage to a lease cache +func Test_AddPersistentStorageToLeaseCache(t *testing.T) { + tempDir := t.TempDir() + serviceAccountTokenFile := populateTempFile(t, "proxy-config.hcl", "token") + + persistConfig := &PersistConfig{ + Type: "kubernetes", + Path: tempDir, + KeepAfterImport: false, + ExitOnErr: false, + ServiceAccountTokenFile: serviceAccountTokenFile.Name(), + } + + leaseCache := testNewLeaseCache(t, nil) + if leaseCache.PersistentStorage() != nil { + t.Fatal("persistent storage was available before ours was added") + } + + deferFunc, token, err := AddPersistentStorageToLeaseCache(context.Background(), leaseCache, persistConfig, logging.NewVaultLogger(hclog.Info)) + if err != nil { + t.Fatal(err) + } + + if leaseCache.PersistentStorage() == nil { + t.Fatal("persistent storage was not added") + } + + if token != "" { + t.Fatal("expected token to be empty") + } + + if deferFunc == nil { + t.Fatal("expected deferFunc to not be nil") + } +} diff --git a/command/agent/sink/file/file_sink.go b/command/agentproxyshared/sink/file/file_sink.go similarity index 80% rename from command/agent/sink/file/file_sink.go rename to command/agentproxyshared/sink/file/file_sink.go index 572320fc69ec..6e1b71aa2e58 100644 --- a/command/agent/sink/file/file_sink.go +++ b/command/agentproxyshared/sink/file/file_sink.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package file @@ -12,13 +12,15 @@ import ( hclog "github.com/hashicorp/go-hclog" uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/command/agent/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink" ) // fileSink is a Sink implementation that writes a token to a file type fileSink struct { path string mode os.FileMode + owner int + group int logger hclog.Logger } @@ -33,6 +35,8 @@ func NewFileSink(conf *sink.SinkConfig) (sink.Sink, error) { f := &fileSink{ logger: conf.Logger, mode: 0o640, + owner: os.Getuid(), + group: os.Getgid(), } pathRaw, ok := conf.Config["path"] @@ -61,11 +65,31 @@ func NewFileSink(conf *sink.SinkConfig) (sink.Sink, error) { f.mode = os.FileMode(mode) } + if modeRaw, ok := conf.Config["owner"]; ok { + owner, typeOK := modeRaw.(int) + if !typeOK { + return nil, errors.New("could not parse 'owner' as integer") + } + + f.logger.Debug("overriding default file sink", "owner", owner) + f.owner = owner + } + + if modeRaw, ok := conf.Config["group"]; ok { + group, typeOK := modeRaw.(int) + if !typeOK { + return nil, errors.New("could not parse 'group' as integer") + } + + f.logger.Debug("overriding default file sink", "group", group) + f.group = group + } + if err := f.WriteToken(""); err != nil { return nil, fmt.Errorf("error during write check: %w", err) } - f.logger.Info("file sink configured", "path", f.path, "mode", f.mode) + f.logger.Info("file sink configured", "path", f.path, "mode", f.mode, "owner", f.owner, "group", f.group) return f, nil } @@ -93,6 +117,10 @@ func (f *fileSink) WriteToken(token string) error { return fmt.Errorf("error opening temp file in dir %s for writing: %w", targetDir, err) } + if err := tmpFile.Chown(f.owner, f.group); err != nil { + return fmt.Errorf("error changing ownership of %s: %w", tmpFile.Name(), err) + } + valToWrite := token if token == "" { valToWrite = u diff --git a/command/agentproxyshared/sink/file/file_sink_test.go b/command/agentproxyshared/sink/file/file_sink_test.go new file mode 100644 index 000000000000..de7840748d14 --- /dev/null +++ b/command/agentproxyshared/sink/file/file_sink_test.go @@ -0,0 +1,199 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package file + +import ( + "os" + "path/filepath" + "syscall" + "testing" + + hclog "github.com/hashicorp/go-hclog" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/sdk/helper/logging" +) + +func testFileSink(t *testing.T, log hclog.Logger) (*sink.SinkConfig, string) { + tmpDir := t.TempDir() + + path := filepath.Join(tmpDir, "token") + + config := &sink.SinkConfig{ + Logger: log.Named("sink.file"), + Config: map[string]interface{}{ + "path": path, + }, + } + + s, err := NewFileSink(config) + if err != nil { + t.Fatal(err) + } + config.Sink = s + + return config, tmpDir +} + +func TestFileSink(t *testing.T) { + log := logging.NewVaultLogger(hclog.Trace) + + fs, tmpDir := testFileSink(t, log) + defer os.RemoveAll(tmpDir) + + path := filepath.Join(tmpDir, "token") + + uuidStr, _ := uuid.GenerateUUID() + if err := fs.WriteToken(uuidStr); err != nil { + t.Fatal(err) + } + + file, err := os.Open(path) + if err != nil { + t.Fatal(err) + } + + fi, err := file.Stat() + if err != nil { + t.Fatal(err) + } + if fi.Mode() != os.FileMode(0o640) { + t.Fatalf("wrong file mode was detected at %s", path) + } + err = file.Close() + if err != nil { + t.Fatal(err) + } + + fileBytes, err := os.ReadFile(path) + if err != nil { + t.Fatal(err) + } + + if string(fileBytes) != uuidStr { + t.Fatalf("expected %s, got %s", uuidStr, string(fileBytes)) + } +} + +func testFileSinkMode(t *testing.T, log hclog.Logger, gid int) (*sink.SinkConfig, string) { + tmpDir := t.TempDir() + + path := filepath.Join(tmpDir, "token") + + config := &sink.SinkConfig{ + Logger: log.Named("sink.file"), + Config: map[string]interface{}{ + "path": path, + "mode": 0o644, + "group": gid, + }, + } + + s, err := NewFileSink(config) + if err != nil { + t.Fatal(err) + } + config.Sink = s + + return config, tmpDir +} + +func TestFileSinkMode(t *testing.T) { + log := logging.NewVaultLogger(hclog.Trace) + + fs, tmpDir := testFileSinkMode(t, log, os.Getegid()) + defer os.RemoveAll(tmpDir) + + path := filepath.Join(tmpDir, "token") + + uuidStr, _ := uuid.GenerateUUID() + if err := fs.WriteToken(uuidStr); err != nil { + t.Fatal(err) + } + + file, err := os.Open(path) + if err != nil { + t.Fatal(err) + } + defer file.Close() + + fi, err := file.Stat() + if err != nil { + t.Fatal(err) + } + if fi.Mode() != os.FileMode(0o644) { + t.Fatalf("wrong file mode was detected at %s", path) + } + + fileBytes, err := os.ReadFile(path) + if err != nil { + t.Fatal(err) + } + + if string(fileBytes) != uuidStr { + t.Fatalf("expected %s, got %s", uuidStr, string(fileBytes)) + } +} + +// TestFileSinkMode_Ownership tests that the file is owned by the group specified +// in the configuration. This test requires the current user to be in at least two +// groups. If the user is not in two groups, the test will be skipped. +func TestFileSinkMode_Ownership(t *testing.T) { + groups, err := os.Getgroups() + if err != nil { + t.Fatal(err) + } + + if len(groups) < 2 { + t.Skip("not enough groups to test file ownership") + } + + // find a group that is not the current group + var gid int + for _, g := range groups { + if g != os.Getegid() { + gid = g + break + } + } + + log := logging.NewVaultLogger(hclog.Trace) + + fs, tmpDir := testFileSinkMode(t, log, gid) + defer os.RemoveAll(tmpDir) + + path := filepath.Join(tmpDir, "token") + + uuidStr, _ := uuid.GenerateUUID() + if err := fs.WriteToken(uuidStr); err != nil { + t.Fatal(err) + } + + file, err := os.Open(path) + if err != nil { + t.Fatal(err) + } + defer file.Close() + + fi, err := file.Stat() + if err != nil { + t.Fatal(err) + } + if fi.Mode() != os.FileMode(0o644) { + t.Fatalf("wrong file mode was detected at %s", path) + } + // check if file is owned by the group + if fi.Sys().(*syscall.Stat_t).Gid != uint32(gid) { + t.Fatalf("file is not owned by the group %d", gid) + } + + fileBytes, err := os.ReadFile(path) + if err != nil { + t.Fatal(err) + } + + if string(fileBytes) != uuidStr { + t.Fatalf("expected %s, got %s", uuidStr, string(fileBytes)) + } +} diff --git a/command/agent/sink/file/sink_test.go b/command/agentproxyshared/sink/file/sink_test.go similarity index 77% rename from command/agent/sink/file/sink_test.go rename to command/agentproxyshared/sink/file/sink_test.go index 85089053e186..d08e813b1b2b 100644 --- a/command/agent/sink/file/sink_test.go +++ b/command/agentproxyshared/sink/file/sink_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package file @@ -13,9 +13,9 @@ import ( "testing" "time" - hclog "github.com/hashicorp/go-hclog" - uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/command/agent/sink" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/command/agentproxyshared/sink" "github.com/hashicorp/vault/sdk/helper/logging" ) @@ -37,8 +37,10 @@ func TestSinkServer(t *testing.T) { in := make(chan string) sinks := []*sink.SinkConfig{fs1, fs2} errCh := make(chan error) + tokenRenewalInProgress := &atomic.Bool{} + tokenRenewalInProgress.Store(true) go func() { - errCh <- ss.Run(ctx, in, sinks) + errCh <- ss.Run(ctx, in, sinks, tokenRenewalInProgress) }() // Seed a token @@ -67,6 +69,10 @@ func TestSinkServer(t *testing.T) { t.Fatalf("expected %s, got %s", uuidStr, string(fileBytes)) } } + + if tokenRenewalInProgress.Load() { + t.Fatal("should have reset tokenRenewalInProgress to false") + } } type badSink struct { @@ -104,8 +110,11 @@ func TestSinkServerRetry(t *testing.T) { in := make(chan string) sinks := []*sink.SinkConfig{{Sink: b1}, {Sink: b2}} errCh := make(chan error) + tokenRenewalInProgress := &atomic.Bool{} + tokenRenewalInProgress.Store(true) + go func() { - errCh <- ss.Run(ctx, in, sinks) + errCh <- ss.Run(ctx, in, sinks, tokenRenewalInProgress) }() // Seed a token @@ -120,6 +129,10 @@ func TestSinkServerRetry(t *testing.T) { t.Fatal("bad try count") } + if !tokenRenewalInProgress.Load() { + t.Fatal("token renewal should still be in progress, sink server has not exited") + } + in <- "good" time.Sleep(2 * time.Second) @@ -138,4 +151,8 @@ func TestSinkServerRetry(t *testing.T) { t.Fatal(err) } } + + if tokenRenewalInProgress.Load() { + t.Fatal("should have reset tokenRenewalInProgress to false") + } } diff --git a/command/agent/sink/inmem/inmem_sink.go b/command/agentproxyshared/sink/inmem/inmem_sink.go similarity index 84% rename from command/agent/sink/inmem/inmem_sink.go rename to command/agentproxyshared/sink/inmem/inmem_sink.go index abe0ce639709..a9e7ee9a3036 100644 --- a/command/agent/sink/inmem/inmem_sink.go +++ b/command/agentproxyshared/sink/inmem/inmem_sink.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package inmem @@ -7,8 +7,8 @@ import ( "errors" hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agent/cache" - "github.com/hashicorp/vault/command/agent/sink" + "github.com/hashicorp/vault/command/agentproxyshared/cache" + "github.com/hashicorp/vault/command/agentproxyshared/sink" "go.uber.org/atomic" ) diff --git a/command/agent/sink/mock/mock_sink.go b/command/agentproxyshared/sink/mock/mock_sink.go similarity index 76% rename from command/agent/sink/mock/mock_sink.go rename to command/agentproxyshared/sink/mock/mock_sink.go index d5f11dff56c1..c660da790624 100644 --- a/command/agent/sink/mock/mock_sink.go +++ b/command/agentproxyshared/sink/mock/mock_sink.go @@ -1,10 +1,10 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mock import ( - "github.com/hashicorp/vault/command/agent/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink" ) type mockSink struct { diff --git a/command/agent/sink/sink.go b/command/agentproxyshared/sink/sink.go similarity index 94% rename from command/agent/sink/sink.go rename to command/agentproxyshared/sink/sink.go index 2b64c1762781..0d3cb78061a7 100644 --- a/command/agent/sink/sink.go +++ b/command/agentproxyshared/sink/sink.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package sink @@ -13,7 +13,7 @@ import ( "sync/atomic" "time" - hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/dhutil" "github.com/hashicorp/vault/sdk/helper/jsonutil" @@ -72,7 +72,7 @@ func NewSinkServer(conf *SinkServerConfig) *SinkServer { // Run executes the server's run loop, which is responsible for reading // in new tokens and pushing them out to the various sinks. -func (ss *SinkServer) Run(ctx context.Context, incoming chan string, sinks []*SinkConfig) error { +func (ss *SinkServer) Run(ctx context.Context, incoming chan string, sinks []*SinkConfig, tokenWriteInProgress *atomic.Bool) error { latestToken := new(string) writeSink := func(currSink *SinkConfig, currToken string) error { if currToken != *latestToken { @@ -101,6 +101,7 @@ func (ss *SinkServer) Run(ctx context.Context, incoming chan string, sinks []*Si ss.logger.Info("starting sink server") defer func() { + tokenWriteInProgress.Store(false) ss.logger.Info("sink server stopped") }() @@ -138,6 +139,7 @@ func (ss *SinkServer) Run(ctx context.Context, incoming chan string, sinks []*Si } } else { ss.logger.Trace("no sinks, ignoring new token") + tokenWriteInProgress.Store(false) if ss.exitAfterAuth { ss.logger.Trace("no sinks, exitAfterAuth, bye") return nil @@ -164,8 +166,11 @@ func (ss *SinkServer) Run(ctx context.Context, incoming chan string, sinks []*Si sinkCh <- st } } else { - if atomic.LoadInt32(ss.remaining) == 0 && ss.exitAfterAuth { - return nil + if atomic.LoadInt32(ss.remaining) == 0 { + tokenWriteInProgress.Store(false) + if ss.exitAfterAuth { + return nil + } } } } diff --git a/command/agent/winsvc/service.go b/command/agentproxyshared/winsvc/service.go similarity index 87% rename from command/agent/winsvc/service.go rename to command/agentproxyshared/winsvc/service.go index edd234e0c57d..df15971c9ab8 100644 --- a/command/agent/winsvc/service.go +++ b/command/agentproxyshared/winsvc/service.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package winsvc diff --git a/command/agent/winsvc/service_windows.go b/command/agentproxyshared/winsvc/service_windows.go similarity index 96% rename from command/agent/winsvc/service_windows.go rename to command/agentproxyshared/winsvc/service_windows.go index bb16bf97aeea..f3807d9ec7db 100644 --- a/command/agent/winsvc/service_windows.go +++ b/command/agentproxyshared/winsvc/service_windows.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build windows diff --git a/command/approle_concurrency_integ_test.go b/command/approle_concurrency_integ_test.go index 934f8b33fd45..b40dd352c989 100644 --- a/command/approle_concurrency_integ_test.go +++ b/command/approle_concurrency_integ_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,7 +8,6 @@ import ( "sync" "testing" - log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" auth "github.com/hashicorp/vault/api/auth/approle" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" @@ -20,9 +19,6 @@ import ( func TestAppRole_Integ_ConcurrentLogins(t *testing.T) { var err error coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: log.NewNullLogger(), CredentialBackends: map[string]logical.Factory{ "approle": credAppRole.Factory, }, diff --git a/command/audit.go b/command/audit.go index 606de73eef81..1c59140a9a98 100644 --- a/command/audit.go +++ b/command/audit.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*AuditCommand)(nil) diff --git a/command/audit_disable.go b/command/audit_disable.go index ef9288b0859c..79914ea5d993 100644 --- a/command/audit_disable.go +++ b/command/audit_disable.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/audit_disable_test.go b/command/audit_disable_test.go index 44b782f4dbe6..786140ee326e 100644 --- a/command/audit_disable_test.go +++ b/command/audit_disable_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,8 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testAuditDisableCommand(tb testing.TB) (*cli.MockUi, *AuditDisableCommand) { diff --git a/command/audit_enable.go b/command/audit_enable.go index 652c3c27efef..a163f471cc4f 100644 --- a/command/audit_enable.go +++ b/command/audit_enable.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -9,8 +9,8 @@ import ( "os" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/audit_enable_test.go b/command/audit_enable_test.go index e7dc4ae78604..409f2bc6f255 100644 --- a/command/audit_enable_test.go +++ b/command/audit_enable_test.go @@ -1,15 +1,14 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( - "io/ioutil" "os" "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testAuditEnableCommand(tb testing.TB) (*cli.MockUi, *AuditEnableCommand) { @@ -169,45 +168,25 @@ func TestAuditEnableCommand_Run(t *testing.T) { client, closer := testVaultServerAllBackends(t) defer closer() - files, err := ioutil.ReadDir("../builtin/audit") - if err != nil { - t.Fatal(err) - } - - var backends []string - for _, f := range files { - if f.IsDir() { - backends = append(backends, f.Name()) - } - } - - for _, b := range backends { + for _, name := range []string{"file", "socket", "syslog"} { ui, cmd := testAuditEnableCommand(t) cmd.client = client - args := []string{ - b, - } - switch b { + args := []string{name} + switch name { case "file": args = append(args, "file_path=discard") case "socket": - args = append(args, "address=127.0.0.1:8888", - "skip_test=true") + args = append(args, "address=127.0.0.1:8888", "skip_test=true") case "syslog": if _, exists := os.LookupEnv("WSLENV"); exists { t.Log("skipping syslog test on WSL") continue } - if os.Getenv("CIRCLECI") == "true" { - // TODO install syslog in docker image we run our tests in - t.Log("skipping syslog test on CircleCI") - continue - } } code := cmd.Run(args) if exp := 0; code != exp { - t.Errorf("type %s, expected %d to be %d - %s", b, code, exp, ui.OutputWriter.String()+ui.ErrorWriter.String()) + t.Errorf("type %s, expected %d to be %d - %s", name, code, exp, ui.OutputWriter.String()+ui.ErrorWriter.String()) } } }) diff --git a/command/audit_list.go b/command/audit_list.go index e5af8525eef3..cf3a16f0f079 100644 --- a/command/audit_list.go +++ b/command/audit_list.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,8 +8,8 @@ import ( "sort" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/audit_list_test.go b/command/audit_list_test.go index c2e6eacf47b6..43ddbacf91f2 100644 --- a/command/audit_list_test.go +++ b/command/audit_list_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,8 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testAuditListCommand(tb testing.TB) (*cli.MockUi, *AuditListCommand) { diff --git a/command/auth.go b/command/auth.go index e2bdb81c1ea2..57489a186f1d 100644 --- a/command/auth.go +++ b/command/auth.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*AuthCommand)(nil) diff --git a/command/auth_disable.go b/command/auth_disable.go index 735103bdab60..1476b71d0f07 100644 --- a/command/auth_disable.go +++ b/command/auth_disable.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/auth_disable_test.go b/command/auth_disable_test.go index 385bc4ec73ac..f9da8a7d770c 100644 --- a/command/auth_disable_test.go +++ b/command/auth_disable_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testAuthDisableCommand(tb testing.TB) (*cli.MockUi, *AuthDisableCommand) { diff --git a/command/auth_enable.go b/command/auth_enable.go index 7c7af550dbad..dcea5141fcf0 100644 --- a/command/auth_enable.go +++ b/command/auth_enable.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -10,8 +10,8 @@ import ( "strings" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -40,6 +40,7 @@ type AuthEnableCommand struct { flagTokenType string flagVersion int flagPluginVersion string + flagIdentityTokenKey string } func (c *AuthEnableCommand) Synopsis() string { @@ -209,6 +210,13 @@ func (c *AuthEnableCommand) Flags() *FlagSets { Usage: "Select the semantic version of the plugin to enable.", }) + f.StringVar(&StringVar{ + Name: flagNameIdentityTokenKey, + Target: &c.flagIdentityTokenKey, + Default: "default", + Usage: "Select the key used to sign plugin identity tokens.", + }) + return set } @@ -312,6 +320,10 @@ func (c *AuthEnableCommand) Run(args []string) int { if fl.Name == flagNamePluginVersion { authOpts.Config.PluginVersion = c.flagPluginVersion } + + if fl.Name == flagNameIdentityTokenKey { + authOpts.Config.IdentityTokenKey = c.flagIdentityTokenKey + } }) if err := client.Sys().EnableAuthWithOptions(authPath, authOpts); err != nil { diff --git a/command/auth_enable_test.go b/command/auth_enable_test.go index 4a4292ce80b5..3467c9b00657 100644 --- a/command/auth_enable_test.go +++ b/command/auth_enable_test.go @@ -1,17 +1,20 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "io/ioutil" + "sort" "strings" "testing" "github.com/go-test/deep" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/helper/builtinplugins" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/mitchellh/cli" + "github.com/hashicorp/vault/sdk/helper/strutil" ) func testAuthEnableCommand(tb testing.TB) (*cli.MockUi, *AuthEnableCommand) { @@ -96,6 +99,7 @@ func TestAuthEnableCommand_Run(t *testing.T) { "-passthrough-request-headers", "www-authentication", "-allowed-response-headers", "authorization", "-listing-visibility", "unauth", + "-identity-token-key", "default", "userpass", }) if exp := 0; code != exp { @@ -135,6 +139,9 @@ func TestAuthEnableCommand_Run(t *testing.T) { if diff := deep.Equal([]string{"foo,bar"}, authInfo.Config.AuditNonHMACResponseKeys); len(diff) > 0 { t.Errorf("Failed to find expected values in AuditNonHMACResponseKeys. Difference is: %v", diff) } + if diff := deep.Equal("default", authInfo.Config.IdentityTokenKey); len(diff) > 0 { + t.Errorf("Failed to find expected values in IdentityTokenKey. Difference is: %v", diff) + } }) t.Run("communication_failure", func(t *testing.T) { @@ -180,7 +187,7 @@ func TestAuthEnableCommand_Run(t *testing.T) { var backends []string for _, f := range files { - if f.IsDir() { + if f.IsDir() && f.Name() != "token" { backends = append(backends, f.Name()) } } @@ -205,12 +212,11 @@ func TestAuthEnableCommand_Run(t *testing.T) { // of credential backends. backends = append(backends, "pcf") - // Add 1 to account for the "token" backend, which is visible when you walk the filesystem but - // is treated as special and excluded from the registry. - // Subtract 1 to account for "oidc" which is an alias of "jwt" and not a separate plugin. - expected := len(builtinplugins.Registry.Keys(consts.PluginTypeCredential)) - if len(backends) != expected { - t.Fatalf("expected %d credential backends, got %d", expected, len(backends)) + regkeys := strutil.StrListDelete(builtinplugins.Registry.Keys(consts.PluginTypeCredential), "oidc") + sort.Strings(regkeys) + sort.Strings(backends) + if d := cmp.Diff(regkeys, backends); len(d) > 0 { + t.Fatalf("found credential registry mismatch: %v", d) } for _, b := range backends { diff --git a/command/auth_help.go b/command/auth_help.go index 34b6b9ffa930..68365e737c3b 100644 --- a/command/auth_help.go +++ b/command/auth_help.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/auth_help_test.go b/command/auth_help_test.go index a83695ee3e8f..3c0218b24772 100644 --- a/command/auth_help_test.go +++ b/command/auth_help_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" - + "github.com/hashicorp/cli" credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" ) diff --git a/command/auth_list.go b/command/auth_list.go index 25103a14dc5a..d095156e155b 100644 --- a/command/auth_list.go +++ b/command/auth_list.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -9,8 +9,8 @@ import ( "strconv" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/auth_list_test.go b/command/auth_list_test.go index 2e96f9f2ca07..087010a8ce35 100644 --- a/command/auth_list_test.go +++ b/command/auth_list_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testAuthListCommand(tb testing.TB) (*cli.MockUi, *AuthListCommand) { diff --git a/command/auth_move.go b/command/auth_move.go index 2af5ab65130a..3ede5fc49fc9 100644 --- a/command/auth_move.go +++ b/command/auth_move.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/auth_move_test.go b/command/auth_move_test.go index 877afd27bfee..0b585e7e0031 100644 --- a/command/auth_move_test.go +++ b/command/auth_move_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,8 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testAuthMoveCommand(tb testing.TB) (*cli.MockUi, *AuthMoveCommand) { diff --git a/command/auth_test.go b/command/auth_test.go index dd8abb07f7e6..e6c895df8fe1 100644 --- a/command/auth_test.go +++ b/command/auth_test.go @@ -1,13 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "testing" - "github.com/mitchellh/cli" - + "github.com/hashicorp/cli" "github.com/hashicorp/vault/command/token" ) diff --git a/command/auth_tune.go b/command/auth_tune.go index a7a09797f906..56c2d25fae96 100644 --- a/command/auth_tune.go +++ b/command/auth_tune.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -10,8 +10,8 @@ import ( "strings" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -39,6 +39,7 @@ type AuthTuneCommand struct { flagUserLockoutDuration time.Duration flagUserLockoutCounterResetDuration time.Duration flagUserLockoutDisable bool + flagIdentityTokenKey string } func (c *AuthTuneCommand) Synopsis() string { @@ -195,6 +196,13 @@ func (c *AuthTuneCommand) Flags() *FlagSets { "the plugin catalog, and will not start running until the plugin is reloaded.", }) + f.StringVar(&StringVar{ + Name: flagNameIdentityTokenKey, + Target: &c.flagIdentityTokenKey, + Default: "default", + Usage: "Select the key used to sign plugin identity tokens.", + }) + return set } @@ -294,6 +302,10 @@ func (c *AuthTuneCommand) Run(args []string) int { if fl.Name == flagNamePluginVersion { mountConfigInput.PluginVersion = c.flagPluginVersion } + + if fl.Name == flagNameIdentityTokenKey { + mountConfigInput.IdentityTokenKey = c.flagIdentityTokenKey + } }) // Append /auth (since that's where auths live) and a trailing slash to diff --git a/command/auth_tune_test.go b/command/auth_tune_test.go index aabcd8396020..c9b7923d83de 100644 --- a/command/auth_tune_test.go +++ b/command/auth_tune_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,9 +8,9 @@ import ( "testing" "github.com/go-test/deep" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/testhelpers/corehelpers" - "github.com/mitchellh/cli" ) func testAuthTuneCommand(tb testing.TB) (*cli.MockUi, *AuthTuneCommand) { @@ -78,8 +78,7 @@ func TestAuthTuneCommand_Run(t *testing.T) { t.Run("integration", func(t *testing.T) { t.Run("flags_all", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() @@ -120,6 +119,7 @@ func TestAuthTuneCommand_Run(t *testing.T) { "-allowed-response-headers", "authorization,www-authentication", "-listing-visibility", "unauth", "-plugin-version", version, + "-identity-token-key", "default", "my-auth/", }) if exp := 0; code != exp { @@ -168,6 +168,9 @@ func TestAuthTuneCommand_Run(t *testing.T) { if diff := deep.Equal([]string{"foo,bar"}, mountInfo.Config.AuditNonHMACResponseKeys); len(diff) > 0 { t.Errorf("Failed to find expected values in AuditNonHMACResponseKeys. Difference is: %v", diff) } + if diff := deep.Equal("default", mountInfo.Config.IdentityTokenKey); len(diff) > 0 { + t.Errorf("Failed to find expected values in IdentityTokenKey. Difference is: %v", diff) + } }) t.Run("flags_description", func(t *testing.T) { diff --git a/command/base.go b/command/base.go index 72071587dc2d..47f7be04a8bc 100644 --- a/command/base.go +++ b/command/base.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,18 +8,22 @@ import ( "flag" "fmt" "io" - "io/ioutil" + "net/http" "os" "regexp" "strings" "sync" "time" + "github.com/hashicorp/cli" + hcpvlib "github.com/hashicorp/vault-hcp-lib" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/token" + "github.com/hashicorp/vault/api/cliconfig" + "github.com/hashicorp/vault/api/tokenhelper" + "github.com/hashicorp/vault/command/config" "github.com/hashicorp/vault/helper/namespace" "github.com/mattn/go-isatty" - "github.com/mitchellh/cli" + "github.com/mitchellh/go-homedir" "github.com/pkg/errors" "github.com/posener/complete" ) @@ -42,20 +46,20 @@ type BaseCommand struct { flags *FlagSets flagsOnce sync.Once - flagAddress string - flagAgentAddress string - flagCACert string - flagCAPath string - flagClientCert string - flagClientKey string - flagNamespace string - flagNS string - flagPolicyOverride bool - flagTLSServerName string - flagTLSSkipVerify bool - flagDisableRedirects bool - flagWrapTTL time.Duration - flagUnlockKey string + flagAddress string + flagAgentProxyAddress string + flagCACert string + flagCAPath string + flagClientCert string + flagClientKey string + flagNamespace string + flagNS string + flagPolicyOverride bool + flagTLSServerName string + flagTLSSkipVerify bool + flagDisableRedirects bool + flagWrapTTL time.Duration + flagUnlockKey string flagFormat string flagField string @@ -63,12 +67,14 @@ type BaseCommand struct { flagOutputCurlString bool flagOutputPolicy bool flagNonInteractive bool + addrWarning string flagMFA []string flagHeader map[string]string - tokenHelper token.TokenHelper + tokenHelper tokenhelper.TokenHelper + hcpTokenHelper hcpvlib.HCPTokenHelper client *api.Client } @@ -78,6 +84,15 @@ type BaseCommand struct { func (c *BaseCommand) Client() (*api.Client, error) { // Read the test client if present if c.client != nil { + // Ignoring homedir errors here and moving on to avoid + // spamming user with warnings/errors that homedir isn't set. + path, err := homedir.Dir() + if err == nil { + if err := c.applyHCPConfig(path); err != nil { + return nil, err + } + } + return c.client, nil } @@ -90,8 +105,8 @@ func (c *BaseCommand) Client() (*api.Client, error) { if c.flagAddress != "" { config.Address = c.flagAddress } - if c.flagAgentAddress != "" { - config.Address = c.flagAgentAddress + if c.flagAgentProxyAddress != "" { + config.Address = c.flagAgentProxyAddress } if c.flagOutputCurlString { @@ -186,32 +201,89 @@ func (c *BaseCommand) Client() (*api.Client, error) { c.client = client + // Ignoring homedir errors here and moving on to avoid + // spamming user with warnings/errors that homedir isn't set. + path, err := homedir.Dir() + if err == nil { + if err := c.applyHCPConfig(path); err != nil { + return nil, err + } + } + + if c.addrWarning != "" && c.UI != nil { + if os.Getenv("VAULT_ADDR") == "" && !c.flags.hadAddressFlag { + if !c.flagNonInteractive && isatty.IsTerminal(os.Stdin.Fd()) { + c.UI.Warn(wrapAtLength(c.addrWarning)) + } + } + } + return client, nil } +func (c *BaseCommand) applyHCPConfig(path string) error { + if c.hcpTokenHelper == nil { + c.hcpTokenHelper = c.HCPTokenHelper() + } + + hcpToken, err := c.hcpTokenHelper.GetHCPToken(path) + if err != nil { + return err + } + + if hcpToken != nil { + cookie := &http.Cookie{ + Name: "hcp_access_token", + Value: hcpToken.AccessToken, + Expires: hcpToken.AccessTokenExpiry, + } + + if err := c.client.SetHCPCookie(cookie); err != nil { + return fmt.Errorf("unable to correctly connect to the HCP Vault cluster; please reconnect to HCP: %w", err) + } + + if err := c.client.SetAddress(hcpToken.ProxyAddr); err != nil { + return fmt.Errorf("unable to correctly set the HCP address: %w", err) + } + + // remove address warning since address was set to HCP's address + c.addrWarning = "" + } + + return nil +} + // SetAddress sets the token helper on the command; useful for the demo server and other outside cases. func (c *BaseCommand) SetAddress(addr string) { c.flagAddress = addr } // SetTokenHelper sets the token helper on the command. -func (c *BaseCommand) SetTokenHelper(th token.TokenHelper) { +func (c *BaseCommand) SetTokenHelper(th tokenhelper.TokenHelper) { c.tokenHelper = th } // TokenHelper returns the token helper attached to the command. -func (c *BaseCommand) TokenHelper() (token.TokenHelper, error) { +func (c *BaseCommand) TokenHelper() (tokenhelper.TokenHelper, error) { if c.tokenHelper != nil { return c.tokenHelper, nil } - helper, err := DefaultTokenHelper() + helper, err := cliconfig.DefaultTokenHelper() if err != nil { return nil, err } return helper, nil } +// HCPTokenHelper returns the HCPToken helper attached to the command. +func (c *BaseCommand) HCPTokenHelper() hcpvlib.HCPTokenHelper { + if c.hcpTokenHelper != nil { + return c.hcpTokenHelper + } + return config.DefaultHCPTokenHelper() +} + // DefaultWrappingLookupFunc is the default wrapping function based on the // CLI flag. func (c *BaseCommand) DefaultWrappingLookupFunc(operation, path string) string { @@ -222,7 +294,7 @@ func (c *BaseCommand) DefaultWrappingLookupFunc(operation, path string) string { return api.DefaultWrappingLookupFunc(operation, path) } -// getValidationRequired checks to see if the secret exists and has an MFA +// getMFAValidationRequired checks to see if the secret exists and has an MFA // requirement. If MFA is required and the number of constraints is greater than // 1, we can assert that interactive validation is not required. func (c *BaseCommand) getMFAValidationRequired(secret *api.Secret) bool { @@ -321,16 +393,18 @@ func (c *BaseCommand) flagSet(bit FlagSetBit) *FlagSets { Completion: complete.PredictAnything, Usage: "Address of the Vault server.", } + if c.flagAddress != "" { addrStringVar.Default = c.flagAddress } else { addrStringVar.Default = "https://127.0.0.1:8200" + c.addrWarning = fmt.Sprintf("WARNING! VAULT_ADDR and -address unset. Defaulting to %s.", addrStringVar.Default) } f.StringVar(addrStringVar) agentAddrStringVar := &StringVar{ Name: "agent-address", - Target: &c.flagAgentAddress, + Target: &c.flagAgentProxyAddress, EnvVar: api.EnvVaultAgentAddr, Completion: complete.PredictAnything, Usage: "Address of the Agent.", @@ -552,6 +626,10 @@ type FlagSets struct { hiddens map[string]struct{} completions complete.Flags ui cli.Ui + // hadAddressFlag signals if the FlagSet had an -address + // flag set, for the purposes of warning (see also: + // BaseCommand::addrWarning). + hadAddressFlag bool } // NewFlagSets creates a new flag sets. @@ -560,7 +638,7 @@ func NewFlagSets(ui cli.Ui) *FlagSets { // Errors and usage are controlled by the CLI. mainSet.Usage = func() {} - mainSet.SetOutput(ioutil.Discard) + mainSet.SetOutput(io.Discard) return &FlagSets{ flagSets: make([]*FlagSet, 0, 6), @@ -588,16 +666,34 @@ func (f *FlagSets) Completions() complete.Flags { type ( ParseOptions interface{} ParseOptionAllowRawFormat bool + DisableDisplayFlagWarning bool ) // Parse parses the given flags, returning any errors. // Warnings, if any, regarding the arguments format are sent to stdout func (f *FlagSets) Parse(args []string, opts ...ParseOptions) error { + // Before parsing, check to see if we have an address flag, for the + // purposes of warning later. This must be done now, as the argument + // will be removed during parsing. + for _, arg := range args { + if strings.HasPrefix(arg, "-address") { + f.hadAddressFlag = true + } + } + err := f.mainSet.Parse(args) - warnings := generateFlagWarnings(f.Args()) - if warnings != "" && Format(f.ui) == "table" { - f.ui.Warn(warnings) + displayFlagWarningsDisabled := false + for _, opt := range opts { + if value, ok := opt.(DisableDisplayFlagWarning); ok { + displayFlagWarningsDisabled = bool(value) + } + } + if !displayFlagWarningsDisabled { + warnings := generateFlagWarnings(f.Args()) + if warnings != "" && Format(f.ui) == "table" { + f.ui.Warn(warnings) + } } if err != nil { diff --git a/command/base_flags.go b/command/base_flags.go index c15b7b65551e..3f3fc1abdad1 100644 --- a/command/base_flags.go +++ b/command/base_flags.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -594,7 +594,7 @@ type DurationVar struct { func (f *FlagSet) DurationVar(i *DurationVar) { initial := i.Default if v, exist := os.LookupEnv(i.EnvVar); exist { - if d, err := time.ParseDuration(appendDurationSuffix(v)); err == nil { + if d, err := parseutil.ParseDurationSecond(v); err == nil { initial = d } } @@ -634,7 +634,7 @@ func (d *durationValue) Set(s string) error { s = "-1" } - v, err := time.ParseDuration(appendDurationSuffix(s)) + v, err := parseutil.ParseDurationSecond(s) if err != nil { return err } @@ -989,33 +989,3 @@ func (d *timeValue) Get() interface{} { return *d.target } func (d *timeValue) String() string { return (*d.target).String() } func (d *timeValue) Example() string { return "time" } func (d *timeValue) Hidden() bool { return d.hidden } - -// -- helpers -func envDefault(key, def string) string { - if v, exist := os.LookupEnv(key); exist { - return v - } - return def -} - -func envBoolDefault(key string, def bool) bool { - if v, exist := os.LookupEnv(key); exist { - b, err := strconv.ParseBool(v) - if err != nil { - panic(err) - } - return b - } - return def -} - -func envDurationDefault(key string, def time.Duration) time.Duration { - if v, exist := os.LookupEnv(key); exist { - d, err := time.ParseDuration(v) - if err != nil { - panic(err) - } - return d - } - return def -} diff --git a/command/base_flags_test.go b/command/base_flags_test.go index 580e163de1f9..ea3561ef8f10 100644 --- a/command/base_flags_test.go +++ b/command/base_flags_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command diff --git a/command/base_helpers.go b/command/base_helpers.go index 2595dc56a6a5..06e8b8021932 100644 --- a/command/base_helpers.go +++ b/command/base_helpers.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -80,7 +80,7 @@ func ensureNoLeadingSlash(s string) string { return s } -// columnOuput prints the list of items as a table with no headers. +// columnOutput prints the list of items as a table with no headers. func columnOutput(list []string, c *columnize.Config) string { if len(list) == 0 { return "" diff --git a/command/base_helpers_test.go b/command/base_helpers_test.go index 50cd26441ffa..c5268007368b 100644 --- a/command/base_helpers_test.go +++ b/command/base_helpers_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -61,7 +61,7 @@ func TestParseArgsData(t *testing.T) { if err != nil { t.Fatal(err) } - f.Write([]byte(`{"foo":"bar"}`)) + f.WriteString(`{"foo":"bar"}`) f.Close() defer os.Remove(f.Name()) @@ -82,7 +82,7 @@ func TestParseArgsData(t *testing.T) { if err != nil { t.Fatal(err) } - f.Write([]byte(`bar`)) + f.WriteString(`bar`) f.Close() defer os.Remove(f.Name()) diff --git a/command/base_predict.go b/command/base_predict.go index ee2a771c7a96..ed3edfa30f91 100644 --- a/command/base_predict.go +++ b/command/base_predict.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -10,6 +10,7 @@ import ( "sync" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/api/cliconfig" "github.com/posener/complete" ) @@ -28,7 +29,7 @@ func (p *Predict) Client() *api.Client { client, _ := api.NewClient(nil) if client.Token() == "" { - helper, err := DefaultTokenHelper() + helper, err := cliconfig.DefaultTokenHelper() if err != nil { return } diff --git a/command/base_predict_test.go b/command/base_predict_test.go index 20af0f68810f..387b8f0b84f6 100644 --- a/command/base_predict_test.go +++ b/command/base_predict_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,6 +7,7 @@ import ( "reflect" "testing" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/vault/api" "github.com/posener/complete" @@ -350,7 +351,6 @@ func TestPredict_Plugins(t *testing.T) { "aws", "azure", "cassandra-database-plugin", - "centrify", "cert", "cf", "consul", @@ -389,6 +389,7 @@ func TestPredict_Plugins(t *testing.T) { "redis-database-plugin", "redis-elasticache-database-plugin", "redshift-database-plugin", + "saml", "snowflake-database-plugin", "ssh", "terraform", @@ -435,8 +436,16 @@ func TestPredict_Plugins(t *testing.T) { } } } - if !reflect.DeepEqual(act, tc.exp) { - t.Errorf("expected: %q, got: %q, diff: %v", tc.exp, act, strutil.Difference(act, tc.exp, true)) + if !strutil.StrListContains(act, "saml") { + for i, v := range tc.exp { + if v == "saml" { + tc.exp = append(tc.exp[:i], tc.exp[i+1:]...) + break + } + } + } + if d := cmp.Diff(act, tc.exp); len(d) > 0 { + t.Errorf("expected: %q, got: %q, diff: %v", tc.exp, act, d) } }) } diff --git a/command/base_test.go b/command/base_test.go index af4f0a4d3b62..2be878a09b20 100644 --- a/command/base_test.go +++ b/command/base_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,6 +7,11 @@ import ( "net/http" "reflect" "testing" + + hcpvlib "github.com/hashicorp/vault-hcp-lib" + "github.com/hashicorp/vault/api" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func getDefaultCliHeaders(t *testing.T) http.Header { @@ -70,3 +75,37 @@ func TestClient_FlagHeader(t *testing.T) { } } } + +// TestClient_HCPConfiguration tests that the HCP configuration is applied correctly when it exists in cache. +func TestClient_HCPConfiguration(t *testing.T) { + cases := map[string]struct { + Valid bool + ExpectedAddr string + }{ + "valid hcp configuration": { + Valid: true, + ExpectedAddr: "https://hcp-proxy.addr:8200", + }, + "empty hcp configuration": { + Valid: false, + ExpectedAddr: api.DefaultAddress, + }, + } + + for n, tst := range cases { + t.Run(n, func(t *testing.T) { + bc := &BaseCommand{hcpTokenHelper: &hcpvlib.TestingHCPTokenHelper{tst.Valid}} + cli, err := bc.Client() + assert.NoError(t, err) + + if tst.Valid { + require.Equal(t, tst.ExpectedAddr, cli.Address()) + require.NotEmpty(t, cli.HCPCookie()) + require.Contains(t, cli.HCPCookie(), "hcp_access_token=Test.Access.Token") + } else { + require.Equal(t, tst.ExpectedAddr, cli.Address()) + require.Empty(t, cli.HCPCookie()) + } + }) + } +} diff --git a/command/command_stubs_oss.go b/command/command_stubs_oss.go new file mode 100644 index 000000000000..5d9331db2e83 --- /dev/null +++ b/command/command_stubs_oss.go @@ -0,0 +1,33 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package command + +//go:generate go run github.com/hashicorp/vault/tools/stubmaker + +import ( + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/vault" +) + +func entInitCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions, commands map[string]cli.CommandFactory) { +} + +func entAdjustCoreConfig(config *server.Config, coreConfig *vault.CoreConfig) { +} + +func entCheckStorageType(coreConfig *vault.CoreConfig) bool { + return true +} + +func entGetFIPSInfoKey() string { + return "" +} + +func entCheckRequestLimiter(_cmd *ServerCommand, _config *server.Config) { +} + +func entExtendAddonHandlers(handlers *vaultHandlers) {} diff --git a/command/command_test.go b/command/command_test.go index 73719d583c2f..def68c4fbc89 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -12,25 +12,21 @@ import ( "testing" "time" + "github.com/hashicorp/cli" log "github.com/hashicorp/go-hclog" kv "github.com/hashicorp/vault-plugin-secrets-kv" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/audit" + credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" "github.com/hashicorp/vault/builtin/logical/pki" "github.com/hashicorp/vault/builtin/logical/ssh" "github.com/hashicorp/vault/builtin/logical/transit" - "github.com/hashicorp/vault/helper/benchhelpers" "github.com/hashicorp/vault/helper/builtinplugins" - "github.com/hashicorp/vault/sdk/helper/logging" + vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical/inmem" "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/vault/seal" - "github.com/mitchellh/cli" - - auditFile "github.com/hashicorp/vault/builtin/audit/file" - credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" - vaulthttp "github.com/hashicorp/vault/http" ) var ( @@ -41,7 +37,7 @@ var ( } defaultVaultAuditBackends = map[string]audit.Factory{ - "file": auditFile.Factory, + "file": audit.NewFileBackend, } defaultVaultLogicalBackends = map[string]logical.Factory{ @@ -71,6 +67,50 @@ func testVaultServer(tb testing.TB) (*api.Client, func()) { return client, closer } +func testVaultServerWithSecrets(ctx context.Context, tb testing.TB) (*api.Client, func()) { + tb.Helper() + + client, _, closer := testVaultServerUnseal(tb) + + // enable kv-v1 backend + if err := client.Sys().Mount("kv-v1/", &api.MountInput{ + Type: "kv-v1", + }); err != nil { + tb.Fatal(err) + } + + // enable kv-v2 backend + if err := client.Sys().Mount("kv-v2/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + tb.Fatal(err) + } + + // populate dummy secrets + for _, path := range []string{ + "foo", + "app-1/foo", + "app-1/bar", + "app-1/nested/baz", + } { + if err := client.KVv1("kv-v1").Put(ctx, path, map[string]interface{}{ + "user": "test", + "password": "Hashi123", + }); err != nil { + tb.Fatal(err) + } + + if _, err := client.KVv2("kv-v2").Put(ctx, path, map[string]interface{}{ + "user": "test", + "password": "Hashi123", + }); err != nil { + tb.Fatal(err) + } + } + + return client, closer +} + func testVaultServerWithKVVersion(tb testing.TB, kvVersion string) (*api.Client, func()) { tb.Helper() @@ -81,13 +121,11 @@ func testVaultServerWithKVVersion(tb testing.TB, kvVersion string) (*api.Client, func testVaultServerAllBackends(tb testing.TB) (*api.Client, func()) { tb.Helper() + handlers := newVaultHandlers() client, _, closer := testVaultServerCoreConfig(tb, &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: defaultVaultLogger, - CredentialBackends: credentialBackends, - AuditBackends: auditBackends, - LogicalBackends: logicalBackends, + CredentialBackends: handlers.credentialBackends, + AuditBackends: handlers.auditBackends, + LogicalBackends: handlers.logicalBackends, BuiltinRegistry: builtinplugins.Registry, }) return client, closer @@ -96,11 +134,8 @@ func testVaultServerAllBackends(tb testing.TB) (*api.Client, func()) { // testVaultServerAutoUnseal creates a test vault cluster and sets it up with auto unseal // the function returns a client, the recovery keys, and a closer function func testVaultServerAutoUnseal(tb testing.TB) (*api.Client, []string, func()) { - testSeal := seal.NewTestSeal(nil) - autoSeal, err := vault.NewAutoSeal(testSeal) - if err != nil { - tb.Fatal("unable to create autoseal", err) - } + testSeal, _ := seal.NewTestSeal(nil) + autoSeal := vault.NewAutoSeal(testSeal) return testVaultServerUnsealWithKVVersionWithSeal(tb, "1", autoSeal) } @@ -112,16 +147,8 @@ func testVaultServerUnseal(tb testing.TB) (*api.Client, []string, func()) { func testVaultServerUnsealWithKVVersionWithSeal(tb testing.TB, kvVersion string, seal vault.Seal) (*api.Client, []string, func()) { tb.Helper() - logger := log.NewInterceptLogger(&log.LoggerOptions{ - Output: log.DefaultOutput, - Level: log.Debug, - JSONFormat: logging.ParseEnvLogFormat() == logging.JSONFormat, - }) return testVaultServerCoreConfigWithOpts(tb, &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: logger, CredentialBackends: defaultVaultCredentialBackends, AuditBackends: defaultVaultAuditBackends, LogicalBackends: defaultVaultLogicalBackends, @@ -141,9 +168,6 @@ func testVaultServerPluginDir(tb testing.TB, pluginDir string) (*api.Client, []s tb.Helper() return testVaultServerCoreConfig(tb, &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: defaultVaultLogger, CredentialBackends: defaultVaultCredentialBackends, AuditBackends: defaultVaultAuditBackends, LogicalBackends: defaultVaultLogicalBackends, @@ -165,12 +189,12 @@ func testVaultServerCoreConfig(tb testing.TB, coreConfig *vault.CoreConfig) (*ap func testVaultServerCoreConfigWithOpts(tb testing.TB, coreConfig *vault.CoreConfig, opts *vault.TestClusterOptions) (*api.Client, []string, func()) { tb.Helper() - cluster := vault.NewTestCluster(benchhelpers.TBtoT(tb), coreConfig, opts) + cluster := vault.NewTestCluster(tb, coreConfig, opts) cluster.Start() // Make it easy to get access to the active core := cluster.Cores[0].Core - vault.TestWaitActive(benchhelpers.TBtoT(tb), core) + vault.TestWaitActive(tb, core) // Get the client already setup for us! client := cluster.Cores[0].Client @@ -207,8 +231,6 @@ func testVaultServerUninit(tb testing.TB) (*api.Client, func()) { core, err := vault.NewCore(&vault.CoreConfig{ DisableMlock: true, - DisableCache: true, - Logger: defaultVaultLogger, Physical: inm, CredentialBackends: defaultVaultCredentialBackends, AuditBackends: defaultVaultAuditBackends, diff --git a/command/command_testonly/operator_usage_testonly_test.go b/command/command_testonly/operator_usage_testonly_test.go new file mode 100644 index 000000000000..31de4b88eb15 --- /dev/null +++ b/command/command_testonly/operator_usage_testonly_test.go @@ -0,0 +1,101 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build testonly + +package command_testonly + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/command" + "github.com/hashicorp/vault/helper/timeutil" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/clientcountutil" + "github.com/hashicorp/vault/sdk/helper/clientcountutil/generation" + "github.com/hashicorp/vault/vault" + "github.com/stretchr/testify/require" +) + +func testOperatorUsageCommand(tb testing.TB) (*cli.MockUi, *command.OperatorUsageCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &command.OperatorUsageCommand{ + BaseCommand: &command.BaseCommand{ + UI: ui, + }, + } +} + +// TestOperatorUsageCommandRun writes mock activity log data and runs the +// operator usage command. The test verifies that the output contains the +// expected values per client type. +// This test cannot be run in parallel because it sets the VAULT_TOKEN env +// var +func TestOperatorUsageCommandRun(t *testing.T) { + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: 1, + }) + defer cluster.Cleanup() + core := cluster.Cores[0].Core + vault.TestWaitActive(t, core) + + client := cluster.Cores[0].Client + _, err := client.Logical().Write("sys/internal/counters/config", map[string]interface{}{"enabled": "enable"}) + require.NoError(t, err) + + now := time.Now().UTC() + + _, err = clientcountutil.NewActivityLogData(client). + NewPreviousMonthData(1). + NewClientsSeen(6, clientcountutil.WithClientType("entity")). + NewClientsSeen(4, clientcountutil.WithClientType("non-entity-token")). + NewClientsSeen(2, clientcountutil.WithClientType("secret-sync")). + NewClientsSeen(7, clientcountutil.WithClientType("pki-acme")). + NewCurrentMonthData(). + NewClientsSeen(3, clientcountutil.WithClientType("entity")). + NewClientsSeen(4, clientcountutil.WithClientType("non-entity-token")). + NewClientsSeen(5, clientcountutil.WithClientType("secret-sync")). + NewClientsSeen(8, clientcountutil.WithClientType("pki-acme")). + Write(context.Background(), generation.WriteOptions_WRITE_ENTITIES, generation.WriteOptions_WRITE_PRECOMPUTED_QUERIES) + require.NoError(t, err) + + ui, cmd := testOperatorUsageCommand(t) + + t.Setenv("VAULT_TOKEN", client.Token()) + start := timeutil.MonthsPreviousTo(1, now).Format(time.RFC3339) + end := timeutil.EndOfMonth(now).UTC().Format(time.RFC3339) + // Reset and check output + code := cmd.Run([]string{ + "-address", client.Address(), + "-tls-skip-verify", + "-start-time", start, + "-end-time", end, + }) + require.Equal(t, 0, code, ui.ErrorWriter.String()) + output := ui.OutputWriter.String() + outputLines := strings.Split(output, "\n") + require.Equal(t, fmt.Sprintf("Period start: %s", start), outputLines[0]) + require.Equal(t, fmt.Sprintf("Period end: %s", end), outputLines[1]) + + require.Contains(t, outputLines[3], "Secret sync") + require.Contains(t, outputLines[3], "ACME clients") + nsCounts := strings.Fields(outputLines[5]) + require.Equal(t, "[root]", nsCounts[0]) + require.Equal(t, "9", nsCounts[1]) + require.Equal(t, "8", nsCounts[2]) + require.Equal(t, "7", nsCounts[3]) + require.Equal(t, "15", nsCounts[4]) + require.Equal(t, "39", nsCounts[5]) + + totalCounts := strings.Fields(outputLines[7]) + require.Equal(t, "Total", totalCounts[0]) + require.Equal(t, nsCounts[1:], totalCounts[1:]) +} diff --git a/command/commands.go b/command/commands.go index 40fd57963e79..a4253dd192d0 100644 --- a/command/commands.go +++ b/command/commands.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,69 +8,25 @@ import ( "os/signal" "syscall" - "github.com/hashicorp/vault/audit" - "github.com/hashicorp/vault/builtin/plugin" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/sdk/physical" - "github.com/hashicorp/vault/version" - "github.com/mitchellh/cli" - - /* - The builtinplugins package is initialized here because it, in turn, - initializes the database plugins. - They register multiple database drivers for the "database/sql" package. - */ - _ "github.com/hashicorp/vault/helper/builtinplugins" - - auditFile "github.com/hashicorp/vault/builtin/audit/file" - auditSocket "github.com/hashicorp/vault/builtin/audit/socket" - auditSyslog "github.com/hashicorp/vault/builtin/audit/syslog" - - credAliCloud "github.com/hashicorp/vault-plugin-auth-alicloud" - credCentrify "github.com/hashicorp/vault-plugin-auth-centrify" - credCF "github.com/hashicorp/vault-plugin-auth-cf" - credGcp "github.com/hashicorp/vault-plugin-auth-gcp/plugin" + "github.com/hashicorp/cli" + hcpvlib "github.com/hashicorp/vault-hcp-lib" credOIDC "github.com/hashicorp/vault-plugin-auth-jwt" - credKerb "github.com/hashicorp/vault-plugin-auth-kerberos" - credOCI "github.com/hashicorp/vault-plugin-auth-oci" - credAws "github.com/hashicorp/vault/builtin/credential/aws" + logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/audit" credCert "github.com/hashicorp/vault/builtin/credential/cert" - credGitHub "github.com/hashicorp/vault/builtin/credential/github" - credLdap "github.com/hashicorp/vault/builtin/credential/ldap" - credOkta "github.com/hashicorp/vault/builtin/credential/okta" credToken "github.com/hashicorp/vault/builtin/credential/token" credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" - - logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" logicalDb "github.com/hashicorp/vault/builtin/logical/database" - - physAerospike "github.com/hashicorp/vault/physical/aerospike" - physAliCloudOSS "github.com/hashicorp/vault/physical/alicloudoss" - physAzure "github.com/hashicorp/vault/physical/azure" - physCassandra "github.com/hashicorp/vault/physical/cassandra" - physCockroachDB "github.com/hashicorp/vault/physical/cockroachdb" - physConsul "github.com/hashicorp/vault/physical/consul" - physCouchDB "github.com/hashicorp/vault/physical/couchdb" - physDynamoDB "github.com/hashicorp/vault/physical/dynamodb" - physEtcd "github.com/hashicorp/vault/physical/etcd" - physFoundationDB "github.com/hashicorp/vault/physical/foundationdb" - physGCS "github.com/hashicorp/vault/physical/gcs" - physManta "github.com/hashicorp/vault/physical/manta" - physMSSQL "github.com/hashicorp/vault/physical/mssql" - physMySQL "github.com/hashicorp/vault/physical/mysql" - physOCI "github.com/hashicorp/vault/physical/oci" - physPostgreSQL "github.com/hashicorp/vault/physical/postgresql" + "github.com/hashicorp/vault/builtin/plugin" + _ "github.com/hashicorp/vault/helper/builtinplugins" physRaft "github.com/hashicorp/vault/physical/raft" - physS3 "github.com/hashicorp/vault/physical/s3" - physSpanner "github.com/hashicorp/vault/physical/spanner" - physSwift "github.com/hashicorp/vault/physical/swift" - physZooKeeper "github.com/hashicorp/vault/physical/zookeeper" - physFile "github.com/hashicorp/vault/sdk/physical/file" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical" physInmem "github.com/hashicorp/vault/sdk/physical/inmem" - sr "github.com/hashicorp/vault/serviceregistration" csr "github.com/hashicorp/vault/serviceregistration/consul" ksr "github.com/hashicorp/vault/serviceregistration/kubernetes" + "github.com/hashicorp/vault/version" ) const ( @@ -96,10 +52,9 @@ const ( // logged at startup _per node_. This was initially introduced for the events // system being developed over multiple release cycles. EnvVaultExperiments = "VAULT_EXPERIMENTS" - - // DisableSSCTokens is an env var used to disable index bearing - // token functionality - DisableSSCTokens = "VAULT_DISABLE_SERVER_SIDE_CONSISTENT_TOKENS" + // EnvVaultPluginTmpdir sets the folder to use for Unix sockets when setting + // up containerized plugins. + EnvVaultPluginTmpdir = "VAULT_PLUGIN_TMPDIR" // flagNameAddress is the flag used in the base command to read in the // address of the Vault server. @@ -140,6 +95,8 @@ const ( flagNameAllowedManagedKeys = "allowed-managed-keys" // flagNamePluginVersion selects what version of a plugin should be used. flagNamePluginVersion = "plugin-version" + // flagNameIdentityTokenKey selects the key used to sign plugin identity tokens + flagNameIdentityTokenKey = "identity-token-key" // flagNameUserLockoutThreshold is the flag name used for tuning the auth mount lockout threshold parameter flagNameUserLockoutThreshold = "user-lockout-threshold" // flagNameUserLockoutDuration is the flag name used for tuning the auth mount lockout duration parameter @@ -152,6 +109,8 @@ const ( flagNameDisableRedirects = "disable-redirects" // flagNameCombineLogs is used to specify whether log output should be combined and sent to stdout flagNameCombineLogs = "combine-logs" + // flagDisableGatedLogs is used to disable gated logs and immediately show the vault logs as they become available + flagDisableGatedLogs = "disable-gated-logs" // flagNameLogFile is used to specify the path to the log file that Vault should use for logging flagNameLogFile = "log-file" // flagNameLogRotateBytes is the flag used to specify the number of bytes a log file should be before it is rotated. @@ -165,96 +124,81 @@ const ( // flagNameLogLevel is used to specify the log level applied to logging // Supported log levels: Trace, Debug, Error, Warn, Info flagNameLogLevel = "log-level" + // flagNameDelegatedAuthAccessors allows operators to specify the allowed mount accessors a backend can delegate + // authentication + flagNameDelegatedAuthAccessors = "delegated-auth-accessors" ) -var ( - auditBackends = map[string]audit.Factory{ - "file": auditFile.Factory, - "socket": auditSocket.Factory, - "syslog": auditSyslog.Factory, - } - - credentialBackends = map[string]logical.Factory{ - "plugin": plugin.Factory, - } - - logicalBackends = map[string]logical.Factory{ - "plugin": plugin.Factory, - "database": logicalDb.Factory, - // This is also available in the plugin catalog, but is here due to the need to - // automatically mount it. - "kv": logicalKv.Factory, - } +// vaultHandlers contains the handlers for creating the various Vault backends. +type vaultHandlers struct { + physicalBackends map[string]physical.Factory + loginHandlers map[string]LoginHandler + auditBackends map[string]audit.Factory + credentialBackends map[string]logical.Factory + logicalBackends map[string]logical.Factory + serviceRegistrations map[string]sr.Factory +} - physicalBackends = map[string]physical.Factory{ - "aerospike": physAerospike.NewAerospikeBackend, - "alicloudoss": physAliCloudOSS.NewAliCloudOSSBackend, - "azure": physAzure.NewAzureBackend, - "cassandra": physCassandra.NewCassandraBackend, - "cockroachdb": physCockroachDB.NewCockroachDBBackend, - "consul": physConsul.NewConsulBackend, - "couchdb_transactional": physCouchDB.NewTransactionalCouchDBBackend, - "couchdb": physCouchDB.NewCouchDBBackend, - "dynamodb": physDynamoDB.NewDynamoDBBackend, - "etcd": physEtcd.NewEtcdBackend, - "file_transactional": physFile.NewTransactionalFileBackend, - "file": physFile.NewFileBackend, - "foundationdb": physFoundationDB.NewFDBBackend, - "gcs": physGCS.NewBackend, - "inmem_ha": physInmem.NewInmemHA, - "inmem_transactional_ha": physInmem.NewTransactionalInmemHA, - "inmem_transactional": physInmem.NewTransactionalInmem, - "inmem": physInmem.NewInmem, - "manta": physManta.NewMantaBackend, - "mssql": physMSSQL.NewMSSQLBackend, - "mysql": physMySQL.NewMySQLBackend, - "oci": physOCI.NewBackend, - "postgresql": physPostgreSQL.NewPostgreSQLBackend, - "s3": physS3.NewS3Backend, - "spanner": physSpanner.NewBackend, - "swift": physSwift.NewSwiftBackend, - "raft": physRaft.NewRaftBackend, - "zookeeper": physZooKeeper.NewZooKeeperBackend, +// newMinimalVaultHandlers returns a new vaultHandlers that a minimal Vault would use. +func newMinimalVaultHandlers() *vaultHandlers { + return &vaultHandlers{ + physicalBackends: map[string]physical.Factory{ + "inmem_ha": physInmem.NewInmemHA, + "inmem_transactional_ha": physInmem.NewTransactionalInmemHA, + "inmem_transactional": physInmem.NewTransactionalInmem, + "inmem": physInmem.NewInmem, + "raft": physRaft.NewRaftBackend, + }, + loginHandlers: map[string]LoginHandler{ + "cert": &credCert.CLIHandler{}, + "oidc": &credOIDC.CLIHandler{}, + "token": &credToken.CLIHandler{}, + "userpass": &credUserpass.CLIHandler{ + DefaultMount: "userpass", + }, + }, + auditBackends: map[string]audit.Factory{ + "file": audit.NewFileBackend, + "socket": audit.NewSocketBackend, + "syslog": audit.NewSyslogBackend, + }, + credentialBackends: map[string]logical.Factory{ + "plugin": plugin.Factory, + }, + logicalBackends: map[string]logical.Factory{ + "plugin": plugin.Factory, + "database": logicalDb.Factory, + // This is also available in the plugin catalog, but is here due to the need to + // automatically mount it. + "kv": logicalKv.Factory, + }, + serviceRegistrations: map[string]sr.Factory{ + "consul": csr.NewServiceRegistration, + "kubernetes": ksr.NewServiceRegistration, + }, } +} - serviceRegistrations = map[string]sr.Factory{ - "consul": csr.NewServiceRegistration, - "kubernetes": ksr.NewServiceRegistration, - } +// newVaultHandlers returns a new vaultHandlers composed of newMinimalVaultHandlers() +// and any addon handlers from Vault CE and Vault Enterprise selected by Go build tags. +func newVaultHandlers() *vaultHandlers { + handlers := newMinimalVaultHandlers() + extendAddonHandlers(handlers) + entExtendAddonHandlers(handlers) - initCommandsEnt = func(ui, serverCmdUi cli.Ui, runOpts *RunOptions, commands map[string]cli.CommandFactory) {} -) + return handlers +} func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.CommandFactory { - loginHandlers := map[string]LoginHandler{ - "alicloud": &credAliCloud.CLIHandler{}, - "aws": &credAws.CLIHandler{}, - "centrify": &credCentrify.CLIHandler{}, - "cert": &credCert.CLIHandler{}, - "cf": &credCF.CLIHandler{}, - "gcp": &credGcp.CLIHandler{}, - "github": &credGitHub.CLIHandler{}, - "kerberos": &credKerb.CLIHandler{}, - "ldap": &credLdap.CLIHandler{}, - "oci": &credOCI.CLIHandler{}, - "oidc": &credOIDC.CLIHandler{}, - "okta": &credOkta.CLIHandler{}, - "pcf": &credCF.CLIHandler{}, // Deprecated. - "radius": &credUserpass.CLIHandler{ - DefaultMount: "radius", - }, - "token": &credToken.CLIHandler{}, - "userpass": &credUserpass.CLIHandler{ - DefaultMount: "userpass", - }, - } + handlers := newVaultHandlers() getBaseCommand := func() *BaseCommand { return &BaseCommand{ - UI: ui, - tokenHelper: runOpts.TokenHelper, - flagAddress: runOpts.Address, - client: runOpts.Client, + UI: ui, + tokenHelper: runOpts.TokenHelper, + flagAddress: runOpts.Address, + client: runOpts.Client, + hcpTokenHelper: runOpts.HCPTokenHelper, } } @@ -266,6 +210,12 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.Co }, ShutdownCh: MakeShutdownCh(), SighupCh: MakeSighupCh(), + SigUSR2Ch: MakeSigUSR2Ch(), + }, nil + }, + "agent generate-config": func() (cli.Command, error) { + return &AgentGenerateConfigCommand{ + BaseCommand: getBaseCommand(), }, nil }, "audit": func() (cli.Command, error) { @@ -311,7 +261,7 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.Co "auth help": func() (cli.Command, error) { return &AuthHelpCommand{ BaseCommand: getBaseCommand(), - Handlers: loginHandlers, + Handlers: handlers.loginHandlers, }, nil }, "auth list": func() (cli.Command, error) { @@ -368,7 +318,7 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.Co "login": func() (cli.Command, error) { return &LoginCommand{ BaseCommand: getBaseCommand(), - Handlers: loginHandlers, + Handlers: handlers.loginHandlers, }, nil }, "namespace": func() (cli.Command, error) { @@ -439,7 +389,7 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.Co "operator migrate": func() (cli.Command, error) { return &OperatorMigrateCommand{ BaseCommand: getBaseCommand(), - PhysicalBackends: physicalBackends, + PhysicalBackends: handlers.physicalBackends, ShutdownCh: MakeShutdownCh(), }, nil }, @@ -483,6 +433,11 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.Co BaseCommand: getBaseCommand(), }, nil }, + "operator raft snapshot inspect": func() (cli.Command, error) { + return &OperatorRaftSnapshotInspectCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, "operator raft snapshot restore": func() (cli.Command, error) { return &OperatorRaftSnapshotRestoreCommand{ BaseCommand: getBaseCommand(), @@ -518,6 +473,11 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.Co BaseCommand: getBaseCommand(), }, nil }, + "operator utilization": func() (cli.Command, error) { + return &OperatorUtilizationCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, "operator unseal": func() (cli.Command, error) { return &OperatorUnsealCommand{ BaseCommand: getBaseCommand(), @@ -603,6 +563,41 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.Co BaseCommand: getBaseCommand(), }, nil }, + "plugin runtime": func() (cli.Command, error) { + return &PluginRuntimeCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin runtime register": func() (cli.Command, error) { + return &PluginRuntimeRegisterCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin runtime deregister": func() (cli.Command, error) { + return &PluginRuntimeDeregisterCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin runtime info": func() (cli.Command, error) { + return &PluginRuntimeInfoCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin runtime list": func() (cli.Command, error) { + return &PluginRuntimeListCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "proxy": func() (cli.Command, error) { + return &ProxyCommand{ + BaseCommand: &BaseCommand{ + UI: serverCmdUi, + }, + ShutdownCh: MakeShutdownCh(), + SighupCh: MakeSighupCh(), + SigUSR2Ch: MakeSigUSR2Ch(), + }, nil + }, "policy": func() (cli.Command, error) { return &PolicyCommand{ BaseCommand: getBaseCommand(), @@ -685,12 +680,11 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.Co tokenHelper: runOpts.TokenHelper, flagAddress: runOpts.Address, }, - AuditBackends: auditBackends, - CredentialBackends: credentialBackends, - LogicalBackends: logicalBackends, - PhysicalBackends: physicalBackends, - - ServiceRegistrations: serviceRegistrations, + AuditBackends: handlers.auditBackends, + CredentialBackends: handlers.credentialBackends, + LogicalBackends: handlers.logicalBackends, + PhysicalBackends: handlers.physicalBackends, + ServiceRegistrations: handlers.serviceRegistrations, ShutdownCh: MakeShutdownCh(), SighupCh: MakeSighupCh(), @@ -707,6 +701,21 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.Co BaseCommand: getBaseCommand(), }, nil }, + "transform": func() (cli.Command, error) { + return &TransformCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "transform import": func() (cli.Command, error) { + return &TransformImportCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "transform import-version": func() (cli.Command, error) { + return &TransformImportVersionCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, "transit": func() (cli.Command, error) { return &TransitCommand{ BaseCommand: getBaseCommand(), @@ -856,10 +865,25 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.Co }, } - initCommandsEnt(ui, serverCmdUi, runOpts, commands) + entInitCommands(ui, serverCmdUi, runOpts, commands) + initHCPCommands(ui, commands) + return commands } +func initHCPCommands(ui cli.Ui, commands map[string]cli.CommandFactory) { + for cmd, cmdFactory := range hcpvlib.InitHCPCommand(ui) { + // check for conflicts and only put command in the map in case it doesn't conflict with existing one + _, ok := commands[cmd] + if !ok { + commands[cmd] = cmdFactory + } else { + ui.Error("Failed to initialize HCP commands.") + break + } + } +} + // MakeShutdownCh returns a channel that can be used for shutdown // notifications for commands. This channel will send a message for every // SIGINT or SIGTERM received. diff --git a/command/commands_full.go b/command/commands_full.go new file mode 100644 index 000000000000..8db22350cf89 --- /dev/null +++ b/command/commands_full.go @@ -0,0 +1,96 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !minimal + +package command + +import ( + "maps" + + credAliCloud "github.com/hashicorp/vault-plugin-auth-alicloud" + credCF "github.com/hashicorp/vault-plugin-auth-cf" + credGcp "github.com/hashicorp/vault-plugin-auth-gcp/plugin" + credKerb "github.com/hashicorp/vault-plugin-auth-kerberos" + credOCI "github.com/hashicorp/vault-plugin-auth-oci" + credAws "github.com/hashicorp/vault/builtin/credential/aws" + credGitHub "github.com/hashicorp/vault/builtin/credential/github" + credLdap "github.com/hashicorp/vault/builtin/credential/ldap" + credOkta "github.com/hashicorp/vault/builtin/credential/okta" + credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" + _ "github.com/hashicorp/vault/helper/builtinplugins" + physAerospike "github.com/hashicorp/vault/physical/aerospike" + physAliCloudOSS "github.com/hashicorp/vault/physical/alicloudoss" + physAzure "github.com/hashicorp/vault/physical/azure" + physCassandra "github.com/hashicorp/vault/physical/cassandra" + physCockroachDB "github.com/hashicorp/vault/physical/cockroachdb" + physConsul "github.com/hashicorp/vault/physical/consul" + physCouchDB "github.com/hashicorp/vault/physical/couchdb" + physDynamoDB "github.com/hashicorp/vault/physical/dynamodb" + physEtcd "github.com/hashicorp/vault/physical/etcd" + physFoundationDB "github.com/hashicorp/vault/physical/foundationdb" + physGCS "github.com/hashicorp/vault/physical/gcs" + physManta "github.com/hashicorp/vault/physical/manta" + physMSSQL "github.com/hashicorp/vault/physical/mssql" + physMySQL "github.com/hashicorp/vault/physical/mysql" + physOCI "github.com/hashicorp/vault/physical/oci" + physPostgreSQL "github.com/hashicorp/vault/physical/postgresql" + physS3 "github.com/hashicorp/vault/physical/s3" + physSpanner "github.com/hashicorp/vault/physical/spanner" + physSwift "github.com/hashicorp/vault/physical/swift" + physZooKeeper "github.com/hashicorp/vault/physical/zookeeper" + "github.com/hashicorp/vault/sdk/physical" + physFile "github.com/hashicorp/vault/sdk/physical/file" +) + +func newFullAddonHandlers() (map[string]physical.Factory, map[string]LoginHandler) { + addonPhysicalBackends := map[string]physical.Factory{ + "aerospike": physAerospike.NewAerospikeBackend, + "alicloudoss": physAliCloudOSS.NewAliCloudOSSBackend, + "azure": physAzure.NewAzureBackend, + "cassandra": physCassandra.NewCassandraBackend, + "cockroachdb": physCockroachDB.NewCockroachDBBackend, + "consul": physConsul.NewConsulBackend, + "couchdb_transactional": physCouchDB.NewTransactionalCouchDBBackend, + "couchdb": physCouchDB.NewCouchDBBackend, + "dynamodb": physDynamoDB.NewDynamoDBBackend, + "etcd": physEtcd.NewEtcdBackend, + "file_transactional": physFile.NewTransactionalFileBackend, + "file": physFile.NewFileBackend, + "foundationdb": physFoundationDB.NewFDBBackend, + "gcs": physGCS.NewBackend, + "manta": physManta.NewMantaBackend, + "mssql": physMSSQL.NewMSSQLBackend, + "mysql": physMySQL.NewMySQLBackend, + "oci": physOCI.NewBackend, + "postgresql": physPostgreSQL.NewPostgreSQLBackend, + "s3": physS3.NewS3Backend, + "spanner": physSpanner.NewBackend, + "swift": physSwift.NewSwiftBackend, + "zookeeper": physZooKeeper.NewZooKeeperBackend, + } + addonLoginHandlers := map[string]LoginHandler{ + "alicloud": &credAliCloud.CLIHandler{}, + "aws": &credAws.CLIHandler{}, + "cf": &credCF.CLIHandler{}, + "gcp": &credGcp.CLIHandler{}, + "github": &credGitHub.CLIHandler{}, + "kerberos": &credKerb.CLIHandler{}, + "ldap": &credLdap.CLIHandler{}, + "oci": &credOCI.CLIHandler{}, + "okta": &credOkta.CLIHandler{}, + "pcf": &credCF.CLIHandler{}, // Deprecated. + "radius": &credUserpass.CLIHandler{ + DefaultMount: "radius", + }, + } + + return addonPhysicalBackends, addonLoginHandlers +} + +func extendAddonHandlers(handlers *vaultHandlers) { + addonPhysicalBackends, addonLoginHandlers := newFullAddonHandlers() + + maps.Copy(handlers.physicalBackends, addonPhysicalBackends) + maps.Copy(handlers.loginHandlers, addonLoginHandlers) +} diff --git a/command/commands_full_test.go b/command/commands_full_test.go new file mode 100644 index 000000000000..e22c0fc5f102 --- /dev/null +++ b/command/commands_full_test.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise && !minimal + +package command + +import ( + "maps" + "testing" + + "github.com/stretchr/testify/require" +) + +// Test_extendAddonHandlers tests extendAddonHandlers() extends the minimal Vault handlers with handlers +// generated by newFullAddonHandlers() +func Test_extendAddonHandlers(t *testing.T) { + handlers := newMinimalVaultHandlers() + expMinPhysicalBackends := maps.Clone(handlers.physicalBackends) + expMinLoginHandlers := maps.Clone(handlers.loginHandlers) + + expAddonPhysicalBackends, expAddonLoginHandlers := newFullAddonHandlers() + + extendAddonHandlers(handlers) + + require.Equal(t, len(expMinPhysicalBackends)+len(expAddonPhysicalBackends), len(handlers.physicalBackends), + "extended total physical backends mismatch total of minimal and full addon physical backends") + require.Equal(t, len(expMinLoginHandlers)+len(expAddonLoginHandlers), len(handlers.loginHandlers), + "extended total login handlers mismatch total of minimal and full addon login handlers") + + for k := range expMinPhysicalBackends { + require.Contains(t, handlers.physicalBackends, k, "expected to contain minimal physical backend") + } + + for k := range expAddonPhysicalBackends { + require.Contains(t, handlers.physicalBackends, k, "expected to contain full addon physical backend") + } + + for k := range expMinLoginHandlers { + require.Contains(t, handlers.loginHandlers, k, "expected to contain minimal login handler") + } + + for k := range expAddonLoginHandlers { + require.Contains(t, handlers.loginHandlers, k, "expected to contain full addon login handler") + } +} diff --git a/command/commands_min.go b/command/commands_min.go new file mode 100644 index 000000000000..812c37a40c9e --- /dev/null +++ b/command/commands_min.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build minimal + +package command + +import ( + _ "github.com/hashicorp/vault/helper/builtinplugins" +) + +func extendAddonHandlers(*vaultHandlers) { + // No-op +} diff --git a/command/commands_nonwindows.go b/command/commands_nonwindows.go index f8d128c3fd42..90cfa253a2bc 100644 --- a/command/commands_nonwindows.go +++ b/command/commands_nonwindows.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !windows diff --git a/command/commands_test.go b/command/commands_test.go new file mode 100644 index 000000000000..681e62712599 --- /dev/null +++ b/command/commands_test.go @@ -0,0 +1,48 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "testing" + + "github.com/hashicorp/cli" + "github.com/stretchr/testify/require" +) + +func Test_Commands_HCPInit(t *testing.T) { + tests := map[string]struct { + expectError bool + expectedErrorMsg string + }{ + "initialize with success": { + expectError: false, + }, + "initialize with error: existing commands conflict with init commands": { + expectError: true, + expectedErrorMsg: "Failed to initialize HCP commands.", + }, + } + + for n, tst := range tests { + n := n + tst := tst + + t.Run(n, func(t *testing.T) { + t.Parallel() + + mockUi := cli.NewMockUi() + commands := initCommands(mockUi, nil, nil) + if tst.expectError { + initHCPCommands(mockUi, commands) + errMsg := mockUi.ErrorWriter.String() + require.NotEmpty(t, errMsg) + require.Contains(t, errMsg, tst.expectedErrorMsg) + } else { + errMsg := mockUi.ErrorWriter.String() + require.Empty(t, errMsg) + require.NotEmpty(t, commands) + } + }) + } +} diff --git a/command/commands_windows.go b/command/commands_windows.go index 541a6e4aea8e..016a2d2cb50e 100644 --- a/command/commands_windows.go +++ b/command/commands_windows.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build windows diff --git a/command/config.go b/command/config.go deleted file mode 100644 index 3fbc53a96670..000000000000 --- a/command/config.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "github.com/hashicorp/vault/command/config" -) - -const ( - // DefaultConfigPath is the default path to the configuration file - DefaultConfigPath = "~/.vault" - - // ConfigPathEnv is the environment variable that can be used to - // override where the Vault configuration is. - ConfigPathEnv = "VAULT_CONFIG_PATH" -) - -// Config is the CLI configuration for Vault that can be specified via -// a `$HOME/.vault` file which is HCL-formatted (therefore HCL or JSON). -type DefaultConfig struct { - // TokenHelper is the executable/command that is executed for storing - // and retrieving the authentication token for the Vault CLI. If this - // is not specified, then vault's internal token store will be used, which - // stores the token on disk unencrypted. - TokenHelper string `hcl:"token_helper"` -} - -// Config loads the configuration and returns it. If the configuration -// is already loaded, it is returned. -// -// Config just calls into config.Config for backwards compatibility purposes. -// Use config.Config instead. -func Config() (*DefaultConfig, error) { - conf, err := config.Config() - return (*DefaultConfig)(conf), err -} - -// LoadConfig reads the configuration from the given path. If path is -// empty, then the default path will be used, or the environment variable -// if set. -// -// LoadConfig just calls into config.LoadConfig for backwards compatibility -// purposes. Use config.LoadConfig instead. -func LoadConfig(path string) (*DefaultConfig, error) { - conf, err := config.LoadConfig(path) - return (*DefaultConfig)(conf), err -} - -// ParseConfig parses the given configuration as a string. -// -// ParseConfig just calls into config.ParseConfig for backwards compatibility -// purposes. Use config.ParseConfig instead. -func ParseConfig(contents string) (*DefaultConfig, error) { - conf, err := config.ParseConfig(contents) - return (*DefaultConfig)(conf), err -} diff --git a/command/config/config.go b/command/config/config.go deleted file mode 100644 index 71f9127887d7..000000000000 --- a/command/config/config.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package config - -import ( - "fmt" - "io/ioutil" - "os" - - "github.com/hashicorp/hcl" - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/vault/sdk/helper/hclutil" - homedir "github.com/mitchellh/go-homedir" -) - -const ( - // DefaultConfigPath is the default path to the configuration file - DefaultConfigPath = "~/.vault" - - // ConfigPathEnv is the environment variable that can be used to - // override where the Vault configuration is. - ConfigPathEnv = "VAULT_CONFIG_PATH" -) - -// Config is the CLI configuration for Vault that can be specified via -// a `$HOME/.vault` file which is HCL-formatted (therefore HCL or JSON). -type DefaultConfig struct { - // TokenHelper is the executable/command that is executed for storing - // and retrieving the authentication token for the Vault CLI. If this - // is not specified, then vault's internal token store will be used, which - // stores the token on disk unencrypted. - TokenHelper string `hcl:"token_helper"` -} - -// Config loads the configuration and returns it. If the configuration -// is already loaded, it is returned. -func Config() (*DefaultConfig, error) { - var err error - config, err := LoadConfig("") - if err != nil { - return nil, err - } - - return config, nil -} - -// LoadConfig reads the configuration from the given path. If path is -// empty, then the default path will be used, or the environment variable -// if set. -func LoadConfig(path string) (*DefaultConfig, error) { - if path == "" { - path = DefaultConfigPath - } - if v := os.Getenv(ConfigPathEnv); v != "" { - path = v - } - - // NOTE: requires HOME env var to be set - path, err := homedir.Expand(path) - if err != nil { - return nil, fmt.Errorf("error expanding config path %q: %w", path, err) - } - - contents, err := ioutil.ReadFile(path) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - - conf, err := ParseConfig(string(contents)) - if err != nil { - return nil, fmt.Errorf("error parsing config file at %q: %w; ensure that the file is valid; Ansible Vault is known to conflict with it.", path, err) - } - - return conf, nil -} - -// ParseConfig parses the given configuration as a string. -func ParseConfig(contents string) (*DefaultConfig, error) { - root, err := hcl.Parse(contents) - if err != nil { - return nil, err - } - - // Top-level item should be the object list - list, ok := root.Node.(*ast.ObjectList) - if !ok { - return nil, fmt.Errorf("failed to parse config; does not contain a root object") - } - - valid := []string{ - "token_helper", - } - if err := hclutil.CheckHCLKeys(list, valid); err != nil { - return nil, err - } - - var c DefaultConfig - if err := hcl.DecodeObject(&c, list); err != nil { - return nil, err - } - return &c, nil -} diff --git a/command/config/config_test.go b/command/config/config_test.go deleted file mode 100644 index fef151622259..000000000000 --- a/command/config/config_test.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package config - -import ( - "path/filepath" - "reflect" - "strings" - "testing" -) - -const FixturePath = "../test-fixtures" - -func TestLoadConfig(t *testing.T) { - config, err := LoadConfig(filepath.Join(FixturePath, "config.hcl")) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := &DefaultConfig{ - TokenHelper: "foo", - } - if !reflect.DeepEqual(expected, config) { - t.Fatalf("bad: %#v", config) - } -} - -func TestLoadConfig_noExist(t *testing.T) { - config, err := LoadConfig("nope/not-once/.never") - if err != nil { - t.Fatal(err) - } - - if config.TokenHelper != "" { - t.Errorf("expected %q to be %q", config.TokenHelper, "") - } -} - -func TestParseConfig_badKeys(t *testing.T) { - _, err := ParseConfig(` -token_helper = "/token" -nope = "true" -`) - if err == nil { - t.Fatal("expected error") - } - - if !strings.Contains(err.Error(), `invalid key "nope" on line 3`) { - t.Errorf("bad error: %s", err.Error()) - } -} diff --git a/command/config/hcp_token.go b/command/config/hcp_token.go new file mode 100644 index 000000000000..58bc7832e212 --- /dev/null +++ b/command/config/hcp_token.go @@ -0,0 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package config + +import hcpvlib "github.com/hashicorp/vault-hcp-lib" + +// DefaultHCPTokenHelper returns the HCP token helper that is configured for Vault. +// This helper should only be used for non-server CLI commands. +func DefaultHCPTokenHelper() hcpvlib.HCPTokenHelper { + return &hcpvlib.InternalHCPTokenHelper{} +} diff --git a/command/config/util.go b/command/config/util.go deleted file mode 100644 index f295f462ae50..000000000000 --- a/command/config/util.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package config - -import ( - "github.com/hashicorp/vault/command/token" -) - -// DefaultTokenHelper returns the token helper that is configured for Vault. -// This helper should only be used for non-server CLI commands. -func DefaultTokenHelper() (token.TokenHelper, error) { - config, err := LoadConfig("") - if err != nil { - return nil, err - } - - path := config.TokenHelper - if path == "" { - return token.NewInternalTokenHelper() - } - - path, err = token.ExternalTokenHelperPath(path) - if err != nil { - return nil, err - } - return &token.ExternalTokenHelper{BinaryPath: path}, nil -} diff --git a/command/config/validate_listener.go b/command/config/validate_listener.go index e2d27166a04d..09123bdc75b3 100644 --- a/command/config/validate_listener.go +++ b/command/config/validate_listener.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !fips_140_3 diff --git a/command/config_test.go b/command/config_test.go deleted file mode 100644 index 787c6795765a..000000000000 --- a/command/config_test.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package command - -import ( - "path/filepath" - "reflect" - "strings" - "testing" -) - -const FixturePath = "./test-fixtures" - -func TestLoadConfig(t *testing.T) { - config, err := LoadConfig(filepath.Join(FixturePath, "config.hcl")) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := &DefaultConfig{ - TokenHelper: "foo", - } - if !reflect.DeepEqual(expected, config) { - t.Fatalf("bad: %#v", config) - } -} - -func TestLoadConfig_noExist(t *testing.T) { - config, err := LoadConfig("nope/not-once/.never") - if err != nil { - t.Fatal(err) - } - - if config.TokenHelper != "" { - t.Errorf("expected %q to be %q", config.TokenHelper, "") - } -} - -func TestParseConfig_badKeys(t *testing.T) { - _, err := ParseConfig(` -token_helper = "/token" -nope = "true" -`) - if err == nil { - t.Fatal("expected error") - } - - if !strings.Contains(err.Error(), `invalid key "nope" on line 3`) { - t.Errorf("bad error: %s", err.Error()) - } -} diff --git a/command/debug.go b/command/debug.go index e5440b3b8888..7355372b5e48 100644 --- a/command/debug.go +++ b/command/debug.go @@ -1,13 +1,15 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( + "archive/tar" + "compress/gzip" "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net/url" "os" "path/filepath" @@ -17,6 +19,7 @@ import ( "sync" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/gatedwriter" "github.com/hashicorp/go-secure-stdlib/strutil" @@ -25,8 +28,6 @@ import ( "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/version" - "github.com/mholt/archiver/v3" - "github.com/mitchellh/cli" "github.com/oklog/run" "github.com/posener/complete" ) @@ -374,7 +375,7 @@ func (c *DebugCommand) generateIndex() error { } // Write out file - if err := ioutil.WriteFile(filepath.Join(c.flagOutput, "index.json"), bytes, 0o600); err != nil { + if err := os.WriteFile(filepath.Join(c.flagOutput, "index.json"), bytes, 0o600); err != nil { return fmt.Errorf("error generating index file; %s", err) } @@ -687,17 +688,18 @@ func (c *DebugCommand) collectHostInfo(ctx context.Context) { return } if resp != nil { - defer resp.Body.Close() - secret, err := api.ParseSecret(resp.Body) if err != nil { c.captureError("host", err) + resp.Body.Close() return } if secret != nil && secret.Data != nil { hostEntry := secret.Data c.hostInfoCollection = append(c.hostInfoCollection, hostEntry) } + + resp.Body.Close() } } } @@ -777,7 +779,7 @@ func (c *DebugCommand) collectPprof(ctx context.Context) { return } - err = ioutil.WriteFile(filepath.Join(dirName, target+".prof"), data, 0o600) + err = os.WriteFile(filepath.Join(dirName, target+".prof"), data, 0o600) if err != nil { c.captureError("pprof."+target, err) } @@ -795,13 +797,13 @@ func (c *DebugCommand) collectPprof(ctx context.Context) { return } - err = ioutil.WriteFile(filepath.Join(dirName, "goroutines.txt"), data, 0o600) + err = os.WriteFile(filepath.Join(dirName, "goroutines.txt"), data, 0o600) if err != nil { c.captureError("pprof.goroutines-text", err) } }() - // If the our remaining duration is less than the interval value + // If our remaining duration is less than the interval value // skip profile and trace. runDuration := currentTimestamp.Sub(startTime) if (c.flagDuration+debugDurationGrace)-runDuration < c.flagInterval { @@ -819,7 +821,7 @@ func (c *DebugCommand) collectPprof(ctx context.Context) { return } - err = ioutil.WriteFile(filepath.Join(dirName, "profile.prof"), data, 0o600) + err = os.WriteFile(filepath.Join(dirName, "profile.prof"), data, 0o600) if err != nil { c.captureError("pprof.profile", err) } @@ -835,7 +837,7 @@ func (c *DebugCommand) collectPprof(ctx context.Context) { return } - err = ioutil.WriteFile(filepath.Join(dirName, "trace.out"), data, 0o600) + err = os.WriteFile(filepath.Join(dirName, "trace.out"), data, 0o600) if err != nil { c.captureError("pprof.trace", err) } @@ -971,7 +973,7 @@ func (c *DebugCommand) persistCollection(collection []map[string]interface{}, ou if err != nil { return err } - if err := ioutil.WriteFile(filepath.Join(c.flagOutput, outFile), bytes, 0o600); err != nil { + if err := os.WriteFile(filepath.Join(c.flagOutput, outFile), bytes, 0o600); err != nil { return err } @@ -983,14 +985,100 @@ func (c *DebugCommand) compress(dst string) error { defer osutil.Umask(osutil.Umask(0o077)) } - tgz := archiver.NewTarGz() - if err := tgz.Archive([]string{c.flagOutput}, dst); err != nil { - return fmt.Errorf("failed to compress data: %s", err) + if err := archiveToTgz(c.flagOutput, dst); err != nil { + return fmt.Errorf("failed to compress data: %w", err) } // If everything is fine up to this point, remove original directory if err := os.RemoveAll(c.flagOutput); err != nil { - return fmt.Errorf("failed to remove data directory: %s", err) + return fmt.Errorf("failed to remove data directory: %w", err) + } + + return nil +} + +// archiveToTgz compresses all the files in sourceDir to a +// a tarball at destination. +func archiveToTgz(sourceDir, destination string) error { + file, err := os.Create(destination) + if err != nil { + return fmt.Errorf("failed to create file: %w", err) + } + defer file.Close() + + gzipWriter := gzip.NewWriter(file) + defer gzipWriter.Close() + + tarWriter := tar.NewWriter(gzipWriter) + defer tarWriter.Close() + + err = filepath.Walk(sourceDir, + func(filePath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + return addFileToTar(sourceDir, filePath, tarWriter) + }) + + return err +} + +// addFileToTar takes a file at filePath and adds it to the tar +// being written to by tarWriter, alongside its header. +// The tar header name will be relative. Example: If we're tarring +// a file in ~/a/b/c/foo/bar.json, the header name will be foo/bar.json +func addFileToTar(sourceDir, filePath string, tarWriter *tar.Writer) error { + file, err := os.Open(filePath) + if err != nil { + return fmt.Errorf("failed to open file %q: %w", filePath, err) + } + defer file.Close() + + stat, err := file.Stat() + if err != nil { + return fmt.Errorf("failed to stat file %q: %w", filePath, err) + } + + var link string + mode := stat.Mode() + if mode&os.ModeSymlink != 0 { + if link, err = os.Readlink(filePath); err != nil { + return fmt.Errorf("failed to read symlink for file %q: %w", filePath, err) + } + } + tarHeader, err := tar.FileInfoHeader(stat, link) + if err != nil { + return fmt.Errorf("failed to create tar header for file %q: %w", filePath, err) + } + + // The tar header name should be relative, so remove the sourceDir from it, + // but preserve the last directory name. + // Example: If we're tarring a file in ~/a/b/c/foo/bar.json + // The name should be foo/bar.json + sourceDirExceptLastDir := filepath.Dir(sourceDir) + headerName := strings.TrimPrefix(filepath.Clean(filePath), filepath.Clean(sourceDirExceptLastDir)+"/") + + // Directories should end with a slash. + if stat.IsDir() && !strings.HasSuffix(headerName, "/") { + headerName += "/" + } + tarHeader.Name = headerName + + err = tarWriter.WriteHeader(tarHeader) + if err != nil { + return fmt.Errorf("failed to write tar header for file %q: %w", filePath, err) + } + + // If it's not a regular file (e.g. link or directory) we shouldn't + // copy the file. The body of a tar entry (i.e. what's done by the + // below io.Copy call) is only required for tar files of TypeReg. + if tarHeader.Typeflag != tar.TypeReg { + return nil + } + + _, err = io.Copy(tarWriter, file) + if err != nil { + return fmt.Errorf("failed to copy file %q into tarball: %w", filePath, err) } return nil @@ -1007,7 +1095,7 @@ func pprofTarget(ctx context.Context, client *api.Client, target string, params } defer resp.Body.Close() - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) if err != nil { return nil, err } @@ -1027,7 +1115,7 @@ func pprofProfile(ctx context.Context, client *api.Client, duration time.Duratio } defer resp.Body.Close() - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) if err != nil { return nil, err } @@ -1047,7 +1135,7 @@ func pprofTrace(ctx context.Context, client *api.Client, duration time.Duration) } defer resp.Body.Close() - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/command/debug_test.go b/command/debug_test.go index e63e1ebed058..16d297bf920f 100644 --- a/command/debug_test.go +++ b/command/debug_test.go @@ -1,13 +1,14 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "archive/tar" + "compress/gzip" "encoding/json" "fmt" - "io/ioutil" + "io" "os" "path/filepath" "runtime" @@ -16,9 +17,9 @@ import ( "testing" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mholt/archiver/v3" - "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" ) func testDebugCommand(tb testing.TB) (*cli.MockUi, *DebugCommand) { @@ -35,11 +36,7 @@ func testDebugCommand(tb testing.TB) (*cli.MockUi, *DebugCommand) { func TestDebugCommand_Run(t *testing.T) { t.Parallel() - testDir, err := ioutil.TempDir("", "vault-debug") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(testDir) + testDir := t.TempDir() cases := []struct { name string @@ -104,6 +101,54 @@ func TestDebugCommand_Run(t *testing.T) { } } +// expectHeaderNamesInTarGzFile asserts that the expectedHeaderNames +// match exactly to the header names in the tar.gz file at tarballPath. +// Will error if there are more or less than expected. +// ignoreUnexpectedHeaders toggles ignoring the presence of headers not +// in expectedHeaderNames. +func expectHeaderNamesInTarGzFile(t *testing.T, tarballPath string, expectedHeaderNames []string, ignoreUnexpectedHeaders bool) { + t.Helper() + + file, err := os.Open(tarballPath) + require.NoError(t, err) + + uncompressedStream, err := gzip.NewReader(file) + require.NoError(t, err) + + tarReader := tar.NewReader(uncompressedStream) + headersFoundMap := make(map[string]any) + + for { + header, err := tarReader.Next() + if err == io.EOF { + // We're at the end of the tar. + break + } + require.NoError(t, err) + + // Ignore directories. + if header.Typeflag == tar.TypeDir { + continue + } + + for _, name := range expectedHeaderNames { + if header.Name == name { + headersFoundMap[header.Name] = struct{}{} + } + } + if _, ok := headersFoundMap[header.Name]; !ok && !ignoreUnexpectedHeaders { + t.Fatalf("unexpected file: %s", header.Name) + } + } + + // Expect that every expectedHeader was found at some point + for _, name := range expectedHeaderNames { + if _, ok := headersFoundMap[name]; !ok { + t.Fatalf("missing header from tar: %s", name) + } + } +} + func TestDebugCommand_Archive(t *testing.T) { t.Parallel() @@ -137,11 +182,7 @@ func TestDebugCommand_Archive(t *testing.T) { // Create temp dirs for each test case since os.Stat and tgz.Walk // (called down below) exhibits raciness otherwise. - testDir, err := ioutil.TempDir("", "vault-debug") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(testDir) + testDir := t.TempDir() client, closer := testVaultServer(t) defer closer() @@ -177,32 +218,14 @@ func TestDebugCommand_Archive(t *testing.T) { } bundlePath := filepath.Join(testDir, basePath+expectedExt) - _, err = os.Stat(bundlePath) + _, err := os.Stat(bundlePath) if os.IsNotExist(err) { t.Log(ui.OutputWriter.String()) t.Fatal(err) } - tgz := archiver.NewTarGz() - err = tgz.Walk(bundlePath, func(f archiver.File) error { - fh, ok := f.Header.(*tar.Header) - if !ok { - return fmt.Errorf("invalid file header: %#v", f.Header) - } - - // Ignore base directory and index file - if fh.Name == basePath+"/" || fh.Name == filepath.Join(basePath, "index.json") { - return nil - } - - if fh.Name != filepath.Join(basePath, "server_status.json") { - return fmt.Errorf("unexpected file: %s", fh.Name) - } - return nil - }) - if err != nil { - t.Fatal(err) - } + expectedHeaders := []string{filepath.Join(basePath, "index.json"), filepath.Join(basePath, "server_status.json")} + expectHeaderNamesInTarGzFile(t, bundlePath, expectedHeaders, false) }) } } @@ -258,11 +281,7 @@ func TestDebugCommand_CaptureTargets(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() - testDir, err := ioutil.TempDir("", "vault-debug") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(testDir) + testDir := t.TempDir() client, closer := testVaultServer(t) defer closer() @@ -287,45 +306,22 @@ func TestDebugCommand_CaptureTargets(t *testing.T) { } bundlePath := filepath.Join(testDir, basePath+debugCompressionExt) - _, err = os.Open(bundlePath) + _, err := os.Open(bundlePath) if err != nil { t.Fatalf("failed to open archive: %s", err) } - tgz := archiver.NewTarGz() - err = tgz.Walk(bundlePath, func(f archiver.File) error { - fh, ok := f.Header.(*tar.Header) - if !ok { - t.Fatalf("invalid file header: %#v", f.Header) - } - - // Ignore base directory and index file - if fh.Name == basePath+"/" || fh.Name == filepath.Join(basePath, "index.json") { - return nil - } - - for _, fileName := range tc.expectedFiles { - if fh.Name == filepath.Join(basePath, fileName) { - return nil - } - } - - // If we reach here, it means that this is an unexpected file - return fmt.Errorf("unexpected file: %s", fh.Name) - }) - if err != nil { - t.Fatal(err) + expectedHeaders := []string{filepath.Join(basePath, "index.json")} + for _, fileName := range tc.expectedFiles { + expectedHeaders = append(expectedHeaders, filepath.Join(basePath, fileName)) } + expectHeaderNamesInTarGzFile(t, bundlePath, expectedHeaders, false) }) } } func TestDebugCommand_Pprof(t *testing.T) { - testDir, err := ioutil.TempDir("", "vault-debug") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(testDir) + testDir := t.TempDir() client, closer := testVaultServer(t) defer closer() @@ -379,11 +375,7 @@ func TestDebugCommand_Pprof(t *testing.T) { func TestDebugCommand_IndexFile(t *testing.T) { t.Parallel() - testDir, err := ioutil.TempDir("", "vault-debug") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(testDir) + testDir := t.TempDir() client, closer := testVaultServer(t) defer closer() @@ -409,7 +401,7 @@ func TestDebugCommand_IndexFile(t *testing.T) { t.Fatalf("expected %d to be %d", code, exp) } - content, err := ioutil.ReadFile(filepath.Join(outputPath, "index.json")) + content, err := os.ReadFile(filepath.Join(outputPath, "index.json")) if err != nil { t.Fatal(err) } @@ -426,11 +418,7 @@ func TestDebugCommand_IndexFile(t *testing.T) { func TestDebugCommand_TimingChecks(t *testing.T) { t.Parallel() - testDir, err := ioutil.TempDir("", "vault-debug") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(testDir) + testDir := t.TempDir() cases := []struct { name string @@ -585,11 +573,7 @@ func TestDebugCommand_OutputExists(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() - testDir, err := ioutil.TempDir("", "vault-debug") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(testDir) + testDir := t.TempDir() client, closer := testVaultServer(t) defer closer() @@ -602,12 +586,12 @@ func TestDebugCommand_OutputExists(t *testing.T) { // Create a conflicting file/directory if tc.compress { - _, err = os.Create(outputPath) + _, err := os.Create(outputPath) if err != nil { t.Fatal(err) } } else { - err = os.Mkdir(outputPath, 0o700) + err := os.Mkdir(outputPath, 0o700) if err != nil { t.Fatal(err) } @@ -639,11 +623,7 @@ func TestDebugCommand_OutputExists(t *testing.T) { func TestDebugCommand_PartialPermissions(t *testing.T) { t.Parallel() - testDir, err := ioutil.TempDir("", "vault-debug") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(testDir) + testDir := t.TempDir() client, closer := testVaultServer(t) defer closer() @@ -680,38 +660,14 @@ func TestDebugCommand_PartialPermissions(t *testing.T) { t.Fatalf("failed to open archive: %s", err) } - tgz := archiver.NewTarGz() - err = tgz.Walk(bundlePath, func(f archiver.File) error { - fh, ok := f.Header.(*tar.Header) - if !ok { - t.Fatalf("invalid file header: %#v", f.Header) - } - - // Ignore base directory and index file - if fh.Name == basePath+"/" { - return nil - } - - // Ignore directories, which still get created by pprof but should - // otherwise be empty. - if fh.FileInfo().IsDir() { - return nil - } - - switch { - case fh.Name == filepath.Join(basePath, "index.json"): - case fh.Name == filepath.Join(basePath, "replication_status.json"): - case fh.Name == filepath.Join(basePath, "server_status.json"): - case fh.Name == filepath.Join(basePath, "vault.log"): - default: - return fmt.Errorf("unexpected file: %s", fh.Name) - } - - return nil - }) - if err != nil { - t.Fatal(err) + expectedHeaders := []string{ + filepath.Join(basePath, "index.json"), filepath.Join(basePath, "server_status.json"), + filepath.Join(basePath, "vault.log"), } + + // We set ignoreUnexpectedHeaders to true as replication_status.json is only sometimes + // produced. Relying on it being or not being there would be racy. + expectHeaderNamesInTarGzFile(t, bundlePath, expectedHeaders, true) } // set insecure umask to see if the files and directories get created with right permissions @@ -748,11 +704,7 @@ func TestDebugCommand_InsecureUmask(t *testing.T) { // set insecure umask defer syscall.Umask(syscall.Umask(0)) - testDir, err := ioutil.TempDir("", "vault-debug") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(testDir) + testDir := t.TempDir() client, closer := testVaultServer(t) defer closer() @@ -796,20 +748,22 @@ func TestDebugCommand_InsecureUmask(t *testing.T) { // check permissions of the files within the parent directory switch tc.compress { case true: - tgz := archiver.NewTarGz() + file, err := os.Open(bundlePath) + require.NoError(t, err) - err = tgz.Walk(bundlePath, func(f archiver.File) error { - fh, ok := f.Header.(*tar.Header) - if !ok { - return fmt.Errorf("invalid file header: %#v", f.Header) - } - err = isValidFilePermissions(fh.FileInfo()) - if err != nil { - t.Fatalf(err.Error()) - } - return nil - }) + uncompressedStream, err := gzip.NewReader(file) + require.NoError(t, err) + + tarReader := tar.NewReader(uncompressedStream) + for { + header, err := tarReader.Next() + if err == io.EOF { + break + } + err = isValidFilePermissions(header.FileInfo()) + require.NoError(t, err) + } case false: err = filepath.Walk(bundlePath, func(path string, info os.FileInfo, err error) error { err = isValidFilePermissions(info) @@ -820,9 +774,7 @@ func TestDebugCommand_InsecureUmask(t *testing.T) { }) } - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) }) } } diff --git a/command/delete.go b/command/delete.go index 7da6dd2d9847..6986a84ec343 100644 --- a/command/delete.go +++ b/command/delete.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -9,7 +9,7 @@ import ( "os" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/delete_test.go b/command/delete_test.go index 629be7abb42c..6da1d1d7fa91 100644 --- a/command/delete_test.go +++ b/command/delete_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testDeleteCommand(tb testing.TB) (*cli.MockUi, *DeleteCommand) { diff --git a/command/events.go b/command/events.go index 353c97947d96..48f1fdd22681 100644 --- a/command/events.go +++ b/command/events.go @@ -1,17 +1,18 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "context" + "errors" "fmt" "net/http" "os" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" "nhooyr.io/websocket" ) @@ -23,6 +24,9 @@ var ( type EventsSubscribeCommands struct { *BaseCommand + + namespaces []string + bexprFilter string } func (c *EventsSubscribeCommands) Synopsis() string { @@ -31,10 +35,11 @@ func (c *EventsSubscribeCommands) Synopsis() string { func (c *EventsSubscribeCommands) Help() string { helpText := ` -Usage: vault events subscribe [-format=json] [-timeout=XYZs] eventType +Usage: vault events subscribe [-namespaces=ns1] [-timeout=XYZs] [-filter=filterExpression] eventType - Subscribe to events of the given event type (topic). The events will be - output to standard out. + Subscribe to events of the given event type (topic), which may be a glob + pattern (with "*" treated as a wildcard). The events will be sent to + standard out. The output will be a JSON object serialized using the default protobuf JSON serialization format, with one line per event received. @@ -44,7 +49,27 @@ Usage: vault events subscribe [-format=json] [-timeout=XYZs] eventType func (c *EventsSubscribeCommands) Flags() *FlagSets { set := c.flagSet(FlagSetHTTP) - + f := set.NewFlagSet("Subscribe Options") + f.StringVar(&StringVar{ + Name: "filter", + Usage: `A boolean expression to use to filter events. Only events matching + the filter will be subscribed to. This is applied after any filtering + by event type or namespace.`, + Default: "", + Target: &c.bexprFilter, + }) + f.StringSliceVar(&StringSliceVar{ + Name: "namespaces", + Usage: `Specifies one or more patterns of additional child namespaces + to subscribe to. The namespace of the request is automatically + prepended, so specifying 'ns2' when the request is in the 'ns1' + namespace will result in subscribing to 'ns1/ns2', in addition to + 'ns1'. Patterns can include "*" characters to indicate + wildcards. The default is to subscribe only to the request's + namespace.`, + Default: []string{}, + Target: &c.namespaces, + }) return set } @@ -88,6 +113,22 @@ func (c *EventsSubscribeCommands) Run(args []string) int { return 0 } +// cleanNamespace removes leading and trailing space and /'s from the namespace path. +func cleanNamespace(ns string) string { + ns = strings.TrimSpace(ns) + ns = strings.Trim(ns, "/") + return ns +} + +// cleanNamespaces removes leading and trailing space and /'s from the namespace paths. +func cleanNamespaces(namespaces []string) []string { + cleaned := make([]string, len(namespaces)) + for i, ns := range namespaces { + cleaned[i] = cleanNamespace(ns) + } + return cleaned +} + func (c *EventsSubscribeCommands) subscribeRequest(client *api.Client, path string) error { r := client.NewRequest("GET", "/v1/"+path) u := r.URL @@ -98,19 +139,48 @@ func (c *EventsSubscribeCommands) subscribeRequest(client *api.Client, path stri } q := u.Query() q.Set("json", "true") + if len(c.namespaces) > 0 { + q["namespaces"] = cleanNamespaces(c.namespaces) + } + bexprFilter := strings.TrimSpace(c.bexprFilter) + if bexprFilter != "" { + q.Set("filter", bexprFilter) + } u.RawQuery = q.Encode() client.AddHeader("X-Vault-Token", client.Token()) - client.AddHeader("X-Vault-Namesapce", client.Namespace()) + client.AddHeader("X-Vault-Namespace", client.Namespace()) ctx := context.Background() - conn, resp, err := websocket.Dial(ctx, u.String(), &websocket.DialOptions{ - HTTPClient: client.CloneConfig().HttpClient, - HTTPHeader: client.Headers(), - }) - if err != nil { - if resp != nil && resp.StatusCode == http.StatusNotFound { - return fmt.Errorf("events endpoint not found; check `vault read sys/experiments` to see if an events experiment is available but disabled") + + // Follow redirects in case our request if our request is forwarded to the leader. + url := u.String() + var conn *websocket.Conn + var err error + for attempt := 0; attempt < 10; attempt++ { + var resp *http.Response + conn, resp, err = websocket.Dial(ctx, url, &websocket.DialOptions{ + HTTPClient: client.CloneConfig().HttpClient, + HTTPHeader: client.Headers(), + }) + + if err == nil { + break + } + + switch { + case resp == nil: + return err + case resp.StatusCode == http.StatusTemporaryRedirect: + url = resp.Header.Get("Location") + continue + case resp.StatusCode == http.StatusNotFound: + return errors.New("events endpoint not found; check `vault read sys/experiments` to see if an events experiment is available but disabled") + default: + return err } - return err + } + + if conn == nil { + return fmt.Errorf("too many redirects") } defer conn.Close(websocket.StatusNormalClosure, "") diff --git a/command/events_test.go b/command/events_test.go index bb2aef0b37ba..dfeb12d706b2 100644 --- a/command/events_test.go +++ b/command/events_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testEventsSubscribeCommand(tb testing.TB) (*cli.MockUi, *EventsSubscribeCommands) { diff --git a/command/format.go b/command/format.go index 5e42d31d1f9a..548a9a089c85 100644 --- a/command/format.go +++ b/command/format.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -14,8 +14,8 @@ import ( "time" "github.com/ghodss/yaml" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/ryanuber/columnize" ) @@ -326,13 +326,14 @@ func (t TableFormatter) Output(ui cli.Ui, secret *api.Secret, data interface{}) func (t TableFormatter) OutputSealStatusStruct(ui cli.Ui, secret *api.Secret, data interface{}) error { var status SealStatusOutput = data.(SealStatusOutput) var sealPrefix string - if status.RecoverySeal { - sealPrefix = "Recovery " - } out := []string{} out = append(out, "Key | Value") - out = append(out, fmt.Sprintf("%sSeal Type | %s", sealPrefix, status.Type)) + out = append(out, fmt.Sprintf("Seal Type | %s", status.Type)) + if status.RecoverySeal { + sealPrefix = "Recovery " + out = append(out, fmt.Sprintf("Recovery Seal Type | %s", status.RecoverySealType)) + } out = append(out, fmt.Sprintf("Initialized | %t", status.Initialized)) out = append(out, fmt.Sprintf("Sealed | %t", status.Sealed)) out = append(out, fmt.Sprintf("Total %sShares | %d", sealPrefix, status.N)) diff --git a/command/format_test.go b/command/format_test.go index 2bdc45ebe57a..77093a3e29b7 100644 --- a/command/format_test.go +++ b/command/format_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -108,6 +108,7 @@ func TestStatusFormat(t *testing.T) { expectedOutputString := `Key Value --- ----- +Seal Type type Recovery Seal Type type Initialized true Sealed true @@ -140,6 +141,7 @@ Warnings [warning]` expectedOutputString = `Key Value --- ----- +Seal Type type Recovery Seal Type type Initialized true Sealed true @@ -167,21 +169,22 @@ func getMockStatusData(emptyFields bool) SealStatusOutput { var sealStatusResponseMock api.SealStatusResponse if !emptyFields { sealStatusResponseMock = api.SealStatusResponse{ - Type: "type", - Initialized: true, - Sealed: true, - T: 1, - N: 2, - Progress: 3, - Nonce: "nonce", - Version: "version", - BuildDate: "build date", - Migration: true, - ClusterName: "cluster name", - ClusterID: "cluster id", - RecoverySeal: true, - StorageType: "storage type", - Warnings: []string{"warning"}, + Type: "type", + Initialized: true, + Sealed: true, + T: 1, + N: 2, + Progress: 3, + Nonce: "nonce", + Version: "version", + BuildDate: "build date", + Migration: true, + ClusterName: "cluster name", + ClusterID: "cluster id", + RecoverySeal: true, + RecoverySealType: "type", + StorageType: "storage type", + Warnings: []string{"warning"}, } // must initialize this struct without explicit field names due to embedding @@ -200,20 +203,21 @@ func getMockStatusData(emptyFields bool) SealStatusOutput { } } else { sealStatusResponseMock = api.SealStatusResponse{ - Type: "type", - Initialized: true, - Sealed: true, - T: 1, - N: 2, - Progress: 3, - Nonce: "nonce", - Version: "version", - BuildDate: "build date", - Migration: true, - ClusterName: "", - ClusterID: "", - RecoverySeal: true, - StorageType: "", + Type: "type", + Initialized: true, + Sealed: true, + T: 1, + N: 2, + Progress: 3, + Nonce: "nonce", + Version: "version", + BuildDate: "build date", + Migration: true, + ClusterName: "", + ClusterID: "", + RecoverySeal: true, + StorageType: "", + RecoverySealType: "type", } // must initialize this struct without explicit field names due to embedding diff --git a/command/generaterootkind_enumer.go b/command/generaterootkind_enumer.go new file mode 100644 index 000000000000..a53d2846de82 --- /dev/null +++ b/command/generaterootkind_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer -type=generateRootKind -trimprefix=generateRoot"; DO NOT EDIT. + +package command + +import ( + "fmt" +) + +const _generateRootKindName = "RegularDRRecovery" + +var _generateRootKindIndex = [...]uint8{0, 7, 9, 17} + +func (i generateRootKind) String() string { + if i < 0 || i >= generateRootKind(len(_generateRootKindIndex)-1) { + return fmt.Sprintf("generateRootKind(%d)", i) + } + return _generateRootKindName[_generateRootKindIndex[i]:_generateRootKindIndex[i+1]] +} + +var _generateRootKindValues = []generateRootKind{0, 1, 2} + +var _generateRootKindNameToValueMap = map[string]generateRootKind{ + _generateRootKindName[0:7]: 0, + _generateRootKindName[7:9]: 1, + _generateRootKindName[9:17]: 2, +} + +// generateRootKindString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func generateRootKindString(s string) (generateRootKind, error) { + if val, ok := _generateRootKindNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to generateRootKind values", s) +} + +// generateRootKindValues returns all values of the enum +func generateRootKindValues() []generateRootKind { + return _generateRootKindValues +} + +// IsAgenerateRootKind returns "true" if the value is listed in the enum definition. "false" otherwise +func (i generateRootKind) IsAgenerateRootKind() bool { + for _, v := range _generateRootKindValues { + if i == v { + return true + } + } + return false +} diff --git a/command/healthcheck/healthcheck.go b/command/healthcheck/healthcheck.go index a6fb2040eedb..ba5c9b419638 100644 --- a/command/healthcheck/healthcheck.go +++ b/command/healthcheck/healthcheck.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 /* * The healthcheck package attempts to allow generic checks of arbitrary @@ -128,7 +128,7 @@ func (e *Executor) Execute() (map[string][]*Result, error) { for _, result := range results { result.Endpoint = e.templatePath(result.Endpoint) - result.StatusDisplay = ResultStatusNameMap[result.Status] + result.StatusDisplay = result.Status.String() } ret[checker.Name()] = results @@ -252,6 +252,7 @@ type Check interface { Evaluate(e *Executor) ([]*Result, error) } +//go:generate enumer -type=ResultStatus -trimprefix=Result -transform=snake type ResultStatus int const ( @@ -264,16 +265,6 @@ const ( ResultInsufficientPermissions ) -var ResultStatusNameMap = map[ResultStatus]string{ - ResultNotApplicable: "not_applicable", - ResultOK: "ok", - ResultInformational: "informational", - ResultWarning: "warning", - ResultCritical: "critical", - ResultInvalidVersion: "invalid_version", - ResultInsufficientPermissions: "insufficient_permissions", -} - var NameResultStatusMap = map[string]ResultStatus{ "not_applicable": ResultNotApplicable, "ok": ResultOK, diff --git a/command/healthcheck/pki.go b/command/healthcheck/pki.go index 42f4fc485865..6522cdbc83a7 100644 --- a/command/healthcheck/pki.go +++ b/command/healthcheck/pki.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package healthcheck diff --git a/command/healthcheck/pki_allow_acme_headers.go b/command/healthcheck/pki_allow_acme_headers.go new file mode 100644 index 000000000000..186cc40af966 --- /dev/null +++ b/command/healthcheck/pki_allow_acme_headers.go @@ -0,0 +1,155 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package healthcheck + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/logical" +) + +type AllowAcmeHeaders struct { + Enabled bool + UnsupportedVersion bool + + TuneFetcher *PathFetch + TuneData map[string]interface{} + + AcmeConfigFetcher *PathFetch +} + +func NewAllowAcmeHeaders() Check { + return &AllowAcmeHeaders{} +} + +func (h *AllowAcmeHeaders) Name() string { + return "allow_acme_headers" +} + +func (h *AllowAcmeHeaders) IsEnabled() bool { + return h.Enabled +} + +func (h *AllowAcmeHeaders) DefaultConfig() map[string]interface{} { + return map[string]interface{}{} +} + +func (h *AllowAcmeHeaders) LoadConfig(config map[string]interface{}) error { + enabled, err := parseutil.ParseBool(config["enabled"]) + if err != nil { + return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) + } + h.Enabled = enabled + + return nil +} + +func (h *AllowAcmeHeaders) FetchResources(e *Executor) error { + var err error + h.AcmeConfigFetcher, err = e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/config/acme") + if err != nil { + return err + } + + if h.AcmeConfigFetcher.IsUnsupportedPathError() { + h.UnsupportedVersion = true + } + + _, h.TuneFetcher, h.TuneData, err = fetchMountTune(e, func() { + h.UnsupportedVersion = true + }) + if err != nil { + return err + } + + return nil +} + +func (h *AllowAcmeHeaders) Evaluate(e *Executor) ([]*Result, error) { + if h.UnsupportedVersion { + ret := Result{ + Status: ResultInvalidVersion, + Endpoint: h.AcmeConfigFetcher.Path, + Message: "This health check requires Vault 1.14+ but an earlier version of Vault Server was contacted, preventing this health check from running.", + } + return []*Result{&ret}, nil + } + + if h.AcmeConfigFetcher.IsSecretPermissionsError() { + msg := "Without read access to ACME configuration, this health check is unable to function." + return craftInsufficientPermissionResult(e, h.AcmeConfigFetcher.Path, msg), nil + } + + acmeEnabled, err := isAcmeEnabled(h.AcmeConfigFetcher) + if err != nil { + return nil, err + } + + if !acmeEnabled { + ret := Result{ + Status: ResultNotApplicable, + Endpoint: h.AcmeConfigFetcher.Path, + Message: "ACME is not enabled, no additional response headers required.", + } + return []*Result{&ret}, nil + } + + if h.TuneFetcher.IsSecretPermissionsError() { + msg := "Without access to mount tune information, this health check is unable to function." + return craftInsufficientPermissionResult(e, h.TuneFetcher.Path, msg), nil + } + + resp, err := StringList(h.TuneData["allowed_response_headers"]) + if err != nil { + return nil, fmt.Errorf("unable to parse value from server for allowed_response_headers: %w", err) + } + + requiredResponseHeaders := []string{"Replay-Nonce", "Link", "Location"} + foundResponseHeaders := []string{} + for _, param := range resp { + for _, reqHeader := range requiredResponseHeaders { + if strings.EqualFold(param, reqHeader) { + foundResponseHeaders = append(foundResponseHeaders, reqHeader) + break + } + } + } + + foundAllHeaders := strutil.EquivalentSlices(requiredResponseHeaders, foundResponseHeaders) + + if !foundAllHeaders { + ret := Result{ + Status: ResultWarning, + Endpoint: "/sys/mounts/{{mount}}/tune", + Message: "Mount hasn't enabled 'Replay-Nonce', 'Link', 'Location' response headers, these are required for ACME to function.", + } + return []*Result{&ret}, nil + } + + ret := Result{ + Status: ResultOK, + Endpoint: "/sys/mounts/{{mount}}/tune", + Message: "Mount has enabled 'Replay-Nonce', 'Link', 'Location' response headers.", + } + return []*Result{&ret}, nil +} + +func craftInsufficientPermissionResult(e *Executor, path, errorMsg string) []*Result { + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: path, + Message: errorMsg, + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable read the tune endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission to read the tune endpoint for this mount. " + ret.Message + } + + return []*Result{&ret} +} diff --git a/command/healthcheck/pki_allow_if_modified_since.go b/command/healthcheck/pki_allow_if_modified_since.go index bb5306e05418..38eaee9aab65 100644 --- a/command/healthcheck/pki_allow_if_modified_since.go +++ b/command/healthcheck/pki_allow_if_modified_since.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package healthcheck diff --git a/command/healthcheck/pki_audit_visibility.go b/command/healthcheck/pki_audit_visibility.go index 6b3834fb0c9e..e58543cb6f12 100644 --- a/command/healthcheck/pki_audit_visibility.go +++ b/command/healthcheck/pki_audit_visibility.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package healthcheck diff --git a/command/healthcheck/pki_ca_validity_period.go b/command/healthcheck/pki_ca_validity_period.go index 511de757061d..9ef56d7b9f15 100644 --- a/command/healthcheck/pki_ca_validity_period.go +++ b/command/healthcheck/pki_ca_validity_period.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package healthcheck @@ -65,9 +65,8 @@ func (h *CAValidityPeriod) LoadConfig(config map[string]interface{}) error { if len(name_split) != 3 || name_split[1] != "expiry" { return fmt.Errorf("bad parameter: %v / %v / %v", parameter, len(name_split), name_split[1]) } - - status, present := NameResultStatusMap[name_split[2]] - if !present { + status, err := ResultStatusString(name_split[2]) + if err != nil { return fmt.Errorf("bad parameter: %v's type %v isn't in name map", parameter, name_split[2]) } diff --git a/command/healthcheck/pki_crl_validity_period.go b/command/healthcheck/pki_crl_validity_period.go index 8450a058f994..ad591fa9fb7d 100644 --- a/command/healthcheck/pki_crl_validity_period.go +++ b/command/healthcheck/pki_crl_validity_period.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package healthcheck @@ -8,9 +8,8 @@ import ( "fmt" "time" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/logical" ) type CRLValidityPeriod struct { diff --git a/command/healthcheck/pki_enable_acme_issuance.go b/command/healthcheck/pki_enable_acme_issuance.go new file mode 100644 index 000000000000..990cc37e9d39 --- /dev/null +++ b/command/healthcheck/pki_enable_acme_issuance.go @@ -0,0 +1,235 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package healthcheck + +import ( + "bytes" + "context" + "crypto/tls" + "fmt" + "net/http" + "net/url" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/logical" + "golang.org/x/crypto/acme" +) + +type EnableAcmeIssuance struct { + Enabled bool + UnsupportedVersion bool + + AcmeConfigFetcher *PathFetch + ClusterConfigFetcher *PathFetch + TotalIssuers int + RootIssuers int +} + +func NewEnableAcmeIssuance() Check { + return &EnableAcmeIssuance{} +} + +func (h *EnableAcmeIssuance) Name() string { + return "enable_acme_issuance" +} + +func (h *EnableAcmeIssuance) IsEnabled() bool { + return h.Enabled +} + +func (h *EnableAcmeIssuance) DefaultConfig() map[string]interface{} { + return map[string]interface{}{} +} + +func (h *EnableAcmeIssuance) LoadConfig(config map[string]interface{}) error { + enabled, err := parseutil.ParseBool(config["enabled"]) + if err != nil { + return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) + } + h.Enabled = enabled + + return nil +} + +func (h *EnableAcmeIssuance) FetchResources(e *Executor) error { + var err error + h.AcmeConfigFetcher, err = e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/config/acme") + if err != nil { + return err + } + + if h.AcmeConfigFetcher.IsUnsupportedPathError() { + h.UnsupportedVersion = true + } + + h.ClusterConfigFetcher, err = e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/config/cluster") + if err != nil { + return err + } + + if h.ClusterConfigFetcher.IsUnsupportedPathError() { + h.UnsupportedVersion = true + } + + h.TotalIssuers, h.RootIssuers, err = doesMountContainOnlyRootIssuers(e) + return err +} + +func doesMountContainOnlyRootIssuers(e *Executor) (int, int, error) { + exit, _, issuers, err := pkiFetchIssuersList(e, func() {}) + if exit || err != nil { + return 0, 0, err + } + + totalIssuers := 0 + rootIssuers := 0 + + for _, issuer := range issuers { + skip, _, cert, err := pkiFetchIssuer(e, issuer, func() {}) + + if skip || err != nil { + if err != nil { + return 0, 0, err + } + continue + } + totalIssuers++ + + if !bytes.Equal(cert.RawSubject, cert.RawIssuer) { + continue + } + if err := cert.CheckSignatureFrom(cert); err != nil { + continue + } + rootIssuers++ + } + + return totalIssuers, rootIssuers, nil +} + +func isAcmeEnabled(fetcher *PathFetch) (bool, error) { + isEnabledRaw, ok := fetcher.Secret.Data["enabled"] + if !ok { + return false, fmt.Errorf("enabled configuration field missing from acme config") + } + + parseBool, err := parseutil.ParseBool(isEnabledRaw) + if err != nil { + return false, fmt.Errorf("failed parsing 'enabled' field from ACME config: %w", err) + } + + return parseBool, nil +} + +func verifyLocalPathUrl(h *EnableAcmeIssuance) error { + localPathRaw, ok := h.ClusterConfigFetcher.Secret.Data["path"] + if !ok { + return fmt.Errorf("'path' field missing from config") + } + + localPath, err := parseutil.ParseString(localPathRaw) + if err != nil { + return fmt.Errorf("failed converting 'path' field from local config: %w", err) + } + + if localPath == "" { + return fmt.Errorf("'path' field not configured within /{{mount}}/config/cluster") + } + + parsedUrl, err := url.Parse(localPath) + if err != nil { + return fmt.Errorf("failed to parse URL from path config: %v: %w", localPathRaw, err) + } + + if parsedUrl.Scheme != "https" { + return fmt.Errorf("the configured 'path' field in /{{mount}}/config/cluster was not using an https scheme") + } + + // Avoid issues with SSL certificates for this check, we just want to validate that we would + // hit an ACME server with the path they specified in configuration + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + client := &http.Client{Transport: tr} + acmeDirectoryUrl := parsedUrl.JoinPath("/acme/", "directory") + acmeClient := acme.Client{HTTPClient: client, DirectoryURL: acmeDirectoryUrl.String()} + _, err = acmeClient.Discover(context.Background()) + if err != nil { + return fmt.Errorf("using configured 'path' field ('%s') in /{{mount}}/config/cluster failed to reach the ACME"+ + " directory: %s: %w", parsedUrl.String(), acmeDirectoryUrl.String(), err) + } + + return nil +} + +func (h *EnableAcmeIssuance) Evaluate(e *Executor) (results []*Result, err error) { + if h.UnsupportedVersion { + ret := Result{ + Status: ResultInvalidVersion, + Endpoint: h.AcmeConfigFetcher.Path, + Message: "This health check requires Vault 1.14+ but an earlier version of Vault Server was contacted, preventing this health check from running.", + } + return []*Result{&ret}, nil + } + + if h.AcmeConfigFetcher.IsSecretPermissionsError() { + msg := "Without this information, this health check is unable to function." + return craftInsufficientPermissionResult(e, h.AcmeConfigFetcher.Path, msg), nil + } + + acmeEnabled, err := isAcmeEnabled(h.AcmeConfigFetcher) + if err != nil { + return nil, err + } + + if !acmeEnabled { + if h.TotalIssuers == 0 { + ret := Result{ + Status: ResultNotApplicable, + Endpoint: h.AcmeConfigFetcher.Path, + Message: "No issuers in mount, ACME is not required.", + } + return []*Result{&ret}, nil + } + + if h.TotalIssuers == h.RootIssuers { + ret := Result{ + Status: ResultNotApplicable, + Endpoint: h.AcmeConfigFetcher.Path, + Message: "Mount contains only root issuers, ACME is not required.", + } + return []*Result{&ret}, nil + } + + ret := Result{ + Status: ResultInformational, + Endpoint: h.AcmeConfigFetcher.Path, + Message: "Consider enabling ACME support to support a self-rotating PKI infrastructure.", + } + return []*Result{&ret}, nil + } + + if h.ClusterConfigFetcher.IsSecretPermissionsError() { + msg := "Without this information, this health check is unable to function." + return craftInsufficientPermissionResult(e, h.ClusterConfigFetcher.Path, msg), nil + } + + localPathIssue := verifyLocalPathUrl(h) + + if localPathIssue != nil { + ret := Result{ + Status: ResultWarning, + Endpoint: h.ClusterConfigFetcher.Path, + Message: "ACME enabled in config but not functional: " + localPathIssue.Error(), + } + return []*Result{&ret}, nil + } + + ret := Result{ + Status: ResultOK, + Endpoint: h.ClusterConfigFetcher.Path, + Message: "ACME enabled and successfully connected to the ACME directory.", + } + return []*Result{&ret}, nil +} diff --git a/command/healthcheck/pki_enable_auto_tidy.go b/command/healthcheck/pki_enable_auto_tidy.go index 1734d1adcbf1..d8a43ba0a606 100644 --- a/command/healthcheck/pki_enable_auto_tidy.go +++ b/command/healthcheck/pki_enable_auto_tidy.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package healthcheck @@ -7,9 +7,8 @@ import ( "fmt" "time" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/logical" ) type EnableAutoTidy struct { diff --git a/command/healthcheck/pki_hardware_backed_root.go b/command/healthcheck/pki_hardware_backed_root.go index 2fdda6e4a62f..7fbe306ee746 100644 --- a/command/healthcheck/pki_hardware_backed_root.go +++ b/command/healthcheck/pki_hardware_backed_root.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package healthcheck diff --git a/command/healthcheck/pki_role_allows_glob_wildcards.go b/command/healthcheck/pki_role_allows_glob_wildcards.go index 83c55c23856d..f3edd8bd8a12 100644 --- a/command/healthcheck/pki_role_allows_glob_wildcards.go +++ b/command/healthcheck/pki_role_allows_glob_wildcards.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package healthcheck diff --git a/command/healthcheck/pki_role_allows_localhost.go b/command/healthcheck/pki_role_allows_localhost.go index 0c9b780abef7..aec00dc7942e 100644 --- a/command/healthcheck/pki_role_allows_localhost.go +++ b/command/healthcheck/pki_role_allows_localhost.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package healthcheck diff --git a/command/healthcheck/pki_role_no_store_false.go b/command/healthcheck/pki_role_no_store_false.go index 882955e2bd57..94330cc1b8e2 100644 --- a/command/healthcheck/pki_role_no_store_false.go +++ b/command/healthcheck/pki_role_no_store_false.go @@ -1,14 +1,13 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package healthcheck import ( "fmt" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/logical" ) type RoleNoStoreFalse struct { diff --git a/command/healthcheck/pki_root_issued_leaves.go b/command/healthcheck/pki_root_issued_leaves.go index 615684b0df49..85359b2e59a9 100644 --- a/command/healthcheck/pki_root_issued_leaves.go +++ b/command/healthcheck/pki_root_issued_leaves.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package healthcheck diff --git a/command/healthcheck/pki_tidy_last_run.go b/command/healthcheck/pki_tidy_last_run.go index 9d07369c877d..a699325cbebf 100644 --- a/command/healthcheck/pki_tidy_last_run.go +++ b/command/healthcheck/pki_tidy_last_run.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package healthcheck @@ -7,9 +7,8 @@ import ( "fmt" "time" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/logical" ) type TidyLastRun struct { diff --git a/command/healthcheck/pki_too_many_certs.go b/command/healthcheck/pki_too_many_certs.go index 59722ab2eff0..f7873e640e00 100644 --- a/command/healthcheck/pki_too_many_certs.go +++ b/command/healthcheck/pki_too_many_certs.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package healthcheck diff --git a/command/healthcheck/resultstatus_enumer.go b/command/healthcheck/resultstatus_enumer.go new file mode 100644 index 000000000000..eb8182cb4aca --- /dev/null +++ b/command/healthcheck/resultstatus_enumer.go @@ -0,0 +1,54 @@ +// Code generated by "enumer -type=ResultStatus -trimprefix=Result -transform=snake"; DO NOT EDIT. + +package healthcheck + +import ( + "fmt" +) + +const _ResultStatusName = "not_applicableokinformationalwarningcriticalinvalid_versioninsufficient_permissions" + +var _ResultStatusIndex = [...]uint8{0, 14, 16, 29, 36, 44, 59, 83} + +func (i ResultStatus) String() string { + if i < 0 || i >= ResultStatus(len(_ResultStatusIndex)-1) { + return fmt.Sprintf("ResultStatus(%d)", i) + } + return _ResultStatusName[_ResultStatusIndex[i]:_ResultStatusIndex[i+1]] +} + +var _ResultStatusValues = []ResultStatus{0, 1, 2, 3, 4, 5, 6} + +var _ResultStatusNameToValueMap = map[string]ResultStatus{ + _ResultStatusName[0:14]: 0, + _ResultStatusName[14:16]: 1, + _ResultStatusName[16:29]: 2, + _ResultStatusName[29:36]: 3, + _ResultStatusName[36:44]: 4, + _ResultStatusName[44:59]: 5, + _ResultStatusName[59:83]: 6, +} + +// ResultStatusString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func ResultStatusString(s string) (ResultStatus, error) { + if val, ok := _ResultStatusNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to ResultStatus values", s) +} + +// ResultStatusValues returns all values of the enum +func ResultStatusValues() []ResultStatus { + return _ResultStatusValues +} + +// IsAResultStatus returns "true" if the value is listed in the enum definition. "false" otherwise +func (i ResultStatus) IsAResultStatus() bool { + for _, v := range _ResultStatusValues { + if i == v { + return true + } + } + return false +} diff --git a/command/healthcheck/shared.go b/command/healthcheck/shared.go index 4097704da2c4..611b5337e618 100644 --- a/command/healthcheck/shared.go +++ b/command/healthcheck/shared.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package healthcheck diff --git a/command/healthcheck/util.go b/command/healthcheck/util.go index dd5d66e88573..d8a7ba945e33 100644 --- a/command/healthcheck/util.go +++ b/command/healthcheck/util.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package healthcheck diff --git a/command/kv.go b/command/kv.go index b0834c78c71d..f17baf5d3ab7 100644 --- a/command/kv.go +++ b/command/kv.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*KVCommand)(nil) diff --git a/command/kv_delete.go b/command/kv_delete.go index d10c9ebc3203..67cc56ac4e77 100644 --- a/command/kv_delete.go +++ b/command/kv_delete.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,8 +8,8 @@ import ( "path" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -156,7 +156,7 @@ func (c *KVDeleteCommand) Run(args []string) int { var fullPath string if v2 { secret, err = c.deleteV2(partialPath, mountPath, client) - fullPath = addPrefixToKVPath(partialPath, mountPath, "data") + fullPath = addPrefixToKVPath(partialPath, mountPath, "data", false) } else { // v1 if mountFlagSyntax { @@ -195,13 +195,13 @@ func (c *KVDeleteCommand) deleteV2(path, mountPath string, client *api.Client) ( var secret *api.Secret switch { case len(c.flagVersions) > 0: - path = addPrefixToKVPath(path, mountPath, "delete") + path = addPrefixToKVPath(path, mountPath, "delete", false) data := map[string]interface{}{ "versions": kvParseVersionsFlags(c.flagVersions), } secret, err = client.Logical().Write(path, data) default: - path = addPrefixToKVPath(path, mountPath, "data") + path = addPrefixToKVPath(path, mountPath, "data", false) secret, err = client.Logical().Delete(path) } diff --git a/command/kv_destroy.go b/command/kv_destroy.go index 2ac115cd2e0e..0299be4cea87 100644 --- a/command/kv_destroy.go +++ b/command/kv_destroy.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,7 +8,7 @@ import ( "path" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -155,7 +155,7 @@ func (c *KVDestroyCommand) Run(args []string) int { c.UI.Error("Destroy not supported on KV Version 1") return 1 } - destroyPath := addPrefixToKVPath(partialPath, mountPath, "destroy") + destroyPath := addPrefixToKVPath(partialPath, mountPath, "destroy", false) if err != nil { c.UI.Error(err.Error()) return 2 diff --git a/command/kv_enable_versioning.go b/command/kv_enable_versioning.go index 8282dd296ff5..921c286e3aae 100644 --- a/command/kv_enable_versioning.go +++ b/command/kv_enable_versioning.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,8 @@ import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/kv_get.go b/command/kv_get.go index c5c93fc965c2..e31c8a32a5c7 100644 --- a/command/kv_get.go +++ b/command/kv_get.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,7 +8,7 @@ import ( "path" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -152,7 +152,7 @@ func (c *KVGetCommand) Run(args []string) int { var fullPath string // Add /data to v2 paths only if v2 { - fullPath = addPrefixToKVPath(partialPath, mountPath, "data") + fullPath = addPrefixToKVPath(partialPath, mountPath, "data", false) if c.flagVersion > 0 { versionParam = map[string]string{ diff --git a/command/kv_helpers.go b/command/kv_helpers.go index bcf5f6dd7d15..ed3bc38118e5 100644 --- a/command/kv_helpers.go +++ b/command/kv_helpers.go @@ -1,18 +1,20 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( + "context" "errors" "fmt" "io" - "path" + paths "path" + "sort" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func kvReadRequest(client *api.Client, path string, params map[string]string) (*api.Secret, error) { @@ -126,15 +128,15 @@ func isKVv2(path string, client *api.Client) (string, bool, error) { return mountPath, version == 2, nil } -func addPrefixToKVPath(p, mountPath, apiPrefix string) string { - if p == mountPath || p == strings.TrimSuffix(mountPath, "/") { - return path.Join(mountPath, apiPrefix) +func addPrefixToKVPath(path, mountPath, apiPrefix string, skipIfExists bool) string { + if path == mountPath || path == strings.TrimSuffix(mountPath, "/") { + return paths.Join(mountPath, apiPrefix) } - tp := strings.TrimPrefix(p, mountPath) + pathSuffix := strings.TrimPrefix(path, mountPath) for { // If the entire mountPath is included in the path, we are done - if tp != p { + if pathSuffix != path { break } // Trim the parts of the mountPath that are not included in the @@ -145,10 +147,16 @@ func addPrefixToKVPath(p, mountPath, apiPrefix string) string { break } mountPath = strings.TrimSuffix(partialMountPath[1], "/") - tp = strings.TrimPrefix(tp, mountPath) + pathSuffix = strings.TrimPrefix(pathSuffix, mountPath) } - return path.Join(mountPath, apiPrefix, tp) + if skipIfExists { + if strings.HasPrefix(pathSuffix, apiPrefix) || strings.HasPrefix(pathSuffix, "/"+apiPrefix) { + return paths.Join(mountPath, pathSuffix) + } + } + + return paths.Join(mountPath, apiPrefix, pathSuffix) } func getHeaderForMap(header string, data map[string]interface{}) string { @@ -197,3 +205,65 @@ func padEqualSigns(header string, totalLen int) string { return fmt.Sprintf("%s %s %s", strings.Repeat("=", equalSigns/2), header, strings.Repeat("=", equalSigns/2)) } + +// walkSecretsTree dfs-traverses the secrets tree rooted at the given path +// and calls the `visit` functor for each of the directory and leaf paths. +// Note: for kv-v2, a "metadata" path is expected and "metadata" paths will be +// returned in the visit functor. +func walkSecretsTree(ctx context.Context, client *api.Client, path string, visit func(path string, directory bool) error) error { + resp, err := client.Logical().ListWithContext(ctx, path) + if err != nil { + return fmt.Errorf("could not list %q path: %w", path, err) + } + + if resp == nil || resp.Data == nil { + return fmt.Errorf("no value found at %q: %w", path, err) + } + + keysRaw, ok := resp.Data["keys"] + if !ok { + return fmt.Errorf("unexpected list response at %q", path) + } + + keysRawSlice, ok := keysRaw.([]interface{}) + if !ok { + return fmt.Errorf("unexpected list response type %T at %q", keysRaw, path) + } + + keys := make([]string, 0, len(keysRawSlice)) + + for _, keyRaw := range keysRawSlice { + key, ok := keyRaw.(string) + if !ok { + return fmt.Errorf("unexpected key type %T at %q", keyRaw, path) + } + keys = append(keys, key) + } + + // sort the keys for a deterministic output + sort.Strings(keys) + + for _, key := range keys { + // the keys are relative to the current path: combine them + child := paths.Join(path, key) + + if strings.HasSuffix(key, "/") { + // visit the directory + if err := visit(child, true); err != nil { + return err + } + + // this is not a leaf node: we need to go deeper... + if err := walkSecretsTree(ctx, client, child, visit); err != nil { + return err + } + } else { + // this is a leaf node: add it to the list + if err := visit(child, false); err != nil { + return err + } + } + } + + return nil +} diff --git a/command/kv_helpers_test.go b/command/kv_helpers_test.go new file mode 100644 index 000000000000..06a1bb8ee9ab --- /dev/null +++ b/command/kv_helpers_test.go @@ -0,0 +1,275 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "reflect" + "testing" + "time" + + "github.com/hashicorp/vault/api" +) + +// TestAddPrefixToKVPath tests the addPrefixToKVPath helper function +func TestAddPrefixToKVPath(t *testing.T) { + cases := map[string]struct { + path string + mountPath string + apiPrefix string + skipIfExists bool + expected string + }{ + "simple": { + path: "kv-v2/foo", + mountPath: "kv-v2/", + apiPrefix: "data", + skipIfExists: false, + expected: "kv-v2/data/foo", + }, + + "multi-part": { + path: "my/kv-v2/mount/path/foo/bar/baz", + mountPath: "my/kv-v2/mount/path", + apiPrefix: "metadata", + skipIfExists: false, + expected: "my/kv-v2/mount/path/metadata/foo/bar/baz", + }, + + "with-namespace": { + path: "my/kv-v2/mount/path/foo/bar/baz", + mountPath: "my/ns1/my/kv-v2/mount/path", + apiPrefix: "metadata", + skipIfExists: false, + expected: "my/kv-v2/mount/path/metadata/foo/bar/baz", + }, + + "skip-if-exists-true": { + path: "kv-v2/data/foo", + mountPath: "kv-v2/", + apiPrefix: "data", + skipIfExists: true, + expected: "kv-v2/data/foo", + }, + + "skip-if-exists-false": { + path: "kv-v2/data/foo", + mountPath: "kv-v2", + apiPrefix: "data", + skipIfExists: false, + expected: "kv-v2/data/data/foo", + }, + + "skip-if-exists-with-namespace": { + path: "my/kv-v2/mount/path/metadata/foo/bar/baz", + mountPath: "my/ns1/my/kv-v2/mount/path", + apiPrefix: "metadata", + skipIfExists: true, + expected: "my/kv-v2/mount/path/metadata/foo/bar/baz", + }, + } + + for name, tc := range cases { + name, tc := name, tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + actual := addPrefixToKVPath( + tc.path, + tc.mountPath, + tc.apiPrefix, + tc.skipIfExists, + ) + + if tc.expected != actual { + t.Fatalf("unexpected output; want: %v, got: %v", tc.expected, actual) + } + }) + } +} + +// TestWalkSecretsTree tests the walkSecretsTree helper function +func TestWalkSecretsTree(t *testing.T) { + // test setup + client, closer := testVaultServer(t) + defer closer() + + // enable kv-v1 backend + if err := client.Sys().Mount("kv-v1/", &api.MountInput{ + Type: "kv-v1", + }); err != nil { + t.Fatal(err) + } + time.Sleep(time.Second) + + // enable kv-v2 backend + if err := client.Sys().Mount("kv-v2/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatal(err) + } + time.Sleep(time.Second) + + ctx, cancelContextFunc := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelContextFunc() + + // populate secrets + for _, path := range []string{ + "foo", + "app-1/foo", + "app-1/bar", + "app-1/nested/x/y/z", + "app-1/nested/x/y", + "app-1/nested/bar", + } { + if err := client.KVv1("kv-v1").Put(ctx, path, map[string]interface{}{ + "password": "Hashi123", + }); err != nil { + t.Fatal(err) + } + + if _, err := client.KVv2("kv-v2").Put(ctx, path, map[string]interface{}{ + "password": "Hashi123", + }); err != nil { + t.Fatal(err) + } + } + + type treePath struct { + path string + directory bool + } + + cases := map[string]struct { + path string + expected []treePath + expectedError bool + }{ + "kv-v1-simple": { + path: "kv-v1/app-1/nested/x/y", + expected: []treePath{ + {path: "kv-v1/app-1/nested/x/y/z", directory: false}, + }, + expectedError: false, + }, + + "kv-v2-simple": { + path: "kv-v2/metadata/app-1/nested/x/y", + expected: []treePath{ + {path: "kv-v2/metadata/app-1/nested/x/y/z", directory: false}, + }, + expectedError: false, + }, + + "kv-v1-nested": { + path: "kv-v1/app-1/nested/", + expected: []treePath{ + {path: "kv-v1/app-1/nested/bar", directory: false}, + {path: "kv-v1/app-1/nested/x", directory: true}, + {path: "kv-v1/app-1/nested/x/y", directory: false}, + {path: "kv-v1/app-1/nested/x/y", directory: true}, + {path: "kv-v1/app-1/nested/x/y/z", directory: false}, + }, + expectedError: false, + }, + + "kv-v2-nested": { + path: "kv-v2/metadata/app-1/nested/", + expected: []treePath{ + {path: "kv-v2/metadata/app-1/nested/bar", directory: false}, + {path: "kv-v2/metadata/app-1/nested/x", directory: true}, + {path: "kv-v2/metadata/app-1/nested/x/y", directory: false}, + {path: "kv-v2/metadata/app-1/nested/x/y", directory: true}, + {path: "kv-v2/metadata/app-1/nested/x/y/z", directory: false}, + }, + expectedError: false, + }, + + "kv-v1-all": { + path: "kv-v1", + expected: []treePath{ + {path: "kv-v1/app-1", directory: true}, + {path: "kv-v1/app-1/bar", directory: false}, + {path: "kv-v1/app-1/foo", directory: false}, + {path: "kv-v1/app-1/nested", directory: true}, + {path: "kv-v1/app-1/nested/bar", directory: false}, + {path: "kv-v1/app-1/nested/x", directory: true}, + {path: "kv-v1/app-1/nested/x/y", directory: false}, + {path: "kv-v1/app-1/nested/x/y", directory: true}, + {path: "kv-v1/app-1/nested/x/y/z", directory: false}, + {path: "kv-v1/foo", directory: false}, + }, + expectedError: false, + }, + + "kv-v2-all": { + path: "kv-v2/metadata", + expected: []treePath{ + {path: "kv-v2/metadata/app-1", directory: true}, + {path: "kv-v2/metadata/app-1/bar", directory: false}, + {path: "kv-v2/metadata/app-1/foo", directory: false}, + {path: "kv-v2/metadata/app-1/nested", directory: true}, + {path: "kv-v2/metadata/app-1/nested/bar", directory: false}, + {path: "kv-v2/metadata/app-1/nested/x", directory: true}, + {path: "kv-v2/metadata/app-1/nested/x/y", directory: false}, + {path: "kv-v2/metadata/app-1/nested/x/y", directory: true}, + {path: "kv-v2/metadata/app-1/nested/x/y/z", directory: false}, + {path: "kv-v2/metadata/foo", directory: false}, + }, + expectedError: false, + }, + + "kv-v1-not-found": { + path: "kv-v1/does/not/exist", + expected: nil, + expectedError: true, + }, + + "kv-v2-not-found": { + path: "kv-v2/metadata/does/not/exist", + expected: nil, + expectedError: true, + }, + + "kv-v1-not-listable-leaf-node": { + path: "kv-v1/foo", + expected: nil, + expectedError: true, + }, + + "kv-v2-not-listable-leaf-node": { + path: "kv-v2/metadata/foo", + expected: nil, + expectedError: true, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + var descendants []treePath + + err := walkSecretsTree(ctx, client, tc.path, func(path string, directory bool) error { + descendants = append(descendants, treePath{ + path: path, + directory: directory, + }) + return nil + }) + + if tc.expectedError { + if err == nil { + t.Fatal("an error was expected but the test succeeded") + } + } else { + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(tc.expected, descendants) { + t.Fatalf("unexpected list output; want: %v, got: %v", tc.expected, descendants) + } + } + }) + } +} diff --git a/command/kv_list.go b/command/kv_list.go index 5b74edcc1e09..4e19d9d7ae3b 100644 --- a/command/kv_list.go +++ b/command/kv_list.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,7 +8,7 @@ import ( "path" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -136,7 +136,7 @@ func (c *KVListCommand) Run(args []string) int { // Add /metadata to v2 paths only var fullPath string if v2 { - fullPath = addPrefixToKVPath(partialPath, mountPath, "metadata") + fullPath = addPrefixToKVPath(partialPath, mountPath, "metadata", false) } else { // v1 if mountFlagSyntax { diff --git a/command/kv_metadata.go b/command/kv_metadata.go index 4350311aff32..14e1b5bfa3a5 100644 --- a/command/kv_metadata.go +++ b/command/kv_metadata.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*KVMetadataCommand)(nil) diff --git a/command/kv_metadata_delete.go b/command/kv_metadata_delete.go index 5cb97b0ed6f3..6f672fc6aeac 100644 --- a/command/kv_metadata_delete.go +++ b/command/kv_metadata_delete.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,7 +8,7 @@ import ( "path" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -138,7 +138,7 @@ func (c *KVMetadataDeleteCommand) Run(args []string) int { return 1 } - fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata") + fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata", false) if secret, err := client.Logical().Delete(fullPath); err != nil { c.UI.Error(fmt.Sprintf("Error deleting %s: %s", fullPath, err)) if secret != nil { diff --git a/command/kv_metadata_get.go b/command/kv_metadata_get.go index db5e3f25aad5..2722c330efe0 100644 --- a/command/kv_metadata_get.go +++ b/command/kv_metadata_get.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -10,7 +10,7 @@ import ( "strconv" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -140,7 +140,7 @@ func (c *KVMetadataGetCommand) Run(args []string) int { return 1 } - fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata") + fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata", false) secret, err := client.Logical().Read(fullPath) if err != nil { c.UI.Error(fmt.Sprintf("Error reading %s: %s", fullPath, err)) diff --git a/command/kv_metadata_patch.go b/command/kv_metadata_patch.go index 3de4881ffc51..ff59c12fb7c7 100644 --- a/command/kv_metadata_patch.go +++ b/command/kv_metadata_patch.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -11,7 +11,7 @@ import ( "strings" "time" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -211,7 +211,7 @@ func (c *KVMetadataPatchCommand) Run(args []string) int { return 1 } - fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata") + fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata", false) data := make(map[string]interface{}, 0) diff --git a/command/kv_metadata_patch_test.go b/command/kv_metadata_patch_test.go index 58f4f9152332..1dc0e123773e 100644 --- a/command/kv_metadata_patch_test.go +++ b/command/kv_metadata_patch_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -10,8 +10,8 @@ import ( "testing" "github.com/go-test/deep" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testKVMetadataPatchCommand(tb testing.TB) (*cli.MockUi, *KVMetadataPatchCommand) { diff --git a/command/kv_metadata_put.go b/command/kv_metadata_put.go index 5acea825a74b..5b8124229de9 100644 --- a/command/kv_metadata_put.go +++ b/command/kv_metadata_put.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -10,7 +10,7 @@ import ( "strings" "time" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -199,7 +199,7 @@ func (c *KVMetadataPutCommand) Run(args []string) int { return 1 } - fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata") + fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata", false) data := map[string]interface{}{} if c.flagMaxVersions >= 0 { diff --git a/command/kv_metadata_put_test.go b/command/kv_metadata_put_test.go index 008ded9fdb8d..50f815ac4878 100644 --- a/command/kv_metadata_put_test.go +++ b/command/kv_metadata_put_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -9,8 +9,8 @@ import ( "testing" "github.com/go-test/deep" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testKVMetadataPutCommand(tb testing.TB) (*cli.MockUi, *KVMetadataPutCommand) { @@ -145,7 +145,6 @@ func TestKvMetadataPutCommand_CustomMetadata(t *testing.T) { } metadata, err = client.Logical().Read(metaFullPath) - if err != nil { t.Fatalf("Metadata read error: %#v", err) } diff --git a/command/kv_patch.go b/command/kv_patch.go index e6fad922bf35..791273558e4e 100644 --- a/command/kv_patch.go +++ b/command/kv_patch.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -11,8 +11,8 @@ import ( "path" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -216,11 +216,11 @@ func (c *KVPatchCommand) Run(args []string) int { } if !v2 { - c.UI.Error("K/V engine mount must be version 2 for patch support") + c.UI.Error("KV engine mount must be version 2 for patch support") return 2 } - fullPath := addPrefixToKVPath(partialPath, mountPath, "data") + fullPath := addPrefixToKVPath(partialPath, mountPath, "data", false) if err != nil { c.UI.Error(err.Error()) return 2 @@ -264,6 +264,11 @@ func (c *KVPatchCommand) Run(args []string) int { return PrintRawField(c.UI, secret, c.flagField) } + // If the secret is wrapped, return the wrapped response. + if secret.WrapInfo != nil && secret.WrapInfo.TTL != 0 { + return OutputSecret(c.UI, secret) + } + if Format(c.UI) == "table" { outputPath(c.UI, fullPath, "Secret Path") metadata := secret.Data diff --git a/command/kv_put.go b/command/kv_put.go index bdbd34b0f710..b51705791f51 100644 --- a/command/kv_put.go +++ b/command/kv_put.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -10,7 +10,7 @@ import ( "path" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -181,7 +181,7 @@ func (c *KVPutCommand) Run(args []string) int { // Add /data to v2 paths only var fullPath string if v2 { - fullPath = addPrefixToKVPath(partialPath, mountPath, "data") + fullPath = addPrefixToKVPath(partialPath, mountPath, "data", false) data = map[string]interface{}{ "data": data, "options": map[string]interface{}{}, @@ -219,6 +219,11 @@ func (c *KVPutCommand) Run(args []string) int { return PrintRawField(c.UI, secret, c.flagField) } + // If the secret is wrapped, return the wrapped response. + if secret.WrapInfo != nil && secret.WrapInfo.TTL != 0 { + return OutputSecret(c.UI, secret) + } + if Format(c.UI) == "table" { outputPath(c.UI, fullPath, "Secret Path") metadata := secret.Data diff --git a/command/kv_rollback.go b/command/kv_rollback.go index a0aec415f820..d1f23caee05f 100644 --- a/command/kv_rollback.go +++ b/command/kv_rollback.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -9,7 +9,7 @@ import ( "path" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -160,11 +160,11 @@ func (c *KVRollbackCommand) Run(args []string) int { } if !v2 { - c.UI.Error("K/V engine mount must be version 2 for rollback support") + c.UI.Error("KV engine mount must be version 2 for rollback support") return 2 } - fullPath := addPrefixToKVPath(partialPath, mountPath, "data") + fullPath := addPrefixToKVPath(partialPath, mountPath, "data", false) if err != nil { c.UI.Error(err.Error()) return 2 diff --git a/command/kv_test.go b/command/kv_test.go index 6564208ed335..c5ca555be6be 100644 --- a/command/kv_test.go +++ b/command/kv_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -11,8 +11,8 @@ import ( "testing" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testKVPutCommand(tb testing.TB) (*cli.MockUi, *KVPutCommand) { @@ -1523,6 +1523,156 @@ func TestPadEqualSigns(t *testing.T) { } } +func testKVUndeleteCommand(tb testing.TB) (*cli.MockUi, *KVUndeleteCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &KVUndeleteCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestKVUndeleteCommand(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + outStrings []string + code int + }{ + { + "not_enough_args", + []string{}, + []string{"Not enough arguments"}, + 1, + }, + { + "too_many_args", + []string{"foo", "bar"}, + []string{"Too many arguments"}, + 1, + }, + { + "no_versions", + []string{"-mount", "kv", "/read/foo"}, + []string{"No versions provided"}, + 1, + }, + { + "v2_mount_flag_syntax", + []string{"-versions", "1", "-mount", "kv", "read/foo"}, + []string{"Success! Data written to: kv/undelete/read/foo"}, + 0, + }, + { + "v2_mount_flag_syntax_complex_1", + []string{"-versions", "1", "-mount", "secrets/testapp", "test"}, + []string{"Success! Data written to: secrets/testapp/undelete/test"}, + 0, + }, + { + "v2_mount_flag_syntax_complex_2", + []string{"-versions", "1", "-mount", "secrets/x/testapp", "test"}, + []string{"Success! Data written to: secrets/x/testapp/undelete/test"}, + 0, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + if err := client.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatal(err) + } + + if err := client.Sys().Mount("secrets/testapp", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatal(err) + } + + // Additional layer of mount path + if err := client.Sys().Mount("secrets/x/testapp", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatal(err) + } + + // Give time for the upgrade code to run/finish + time.Sleep(time.Second) + + if _, err := client.Logical().Write("kv/data/read/foo", map[string]interface{}{ + "data": map[string]interface{}{ + "foo": "bar", + }, + }); err != nil { + t.Fatal(err) + } + + // Delete the entry so we can undelete it + if _, err := client.Logical().Delete("kv/data/read/foo"); err != nil { + t.Fatal(err) + } + + if _, err := client.Logical().Write("secrets/testapp/data/test", map[string]interface{}{ + "data": map[string]interface{}{ + "complex": "yes", + }, + }); err != nil { + t.Fatal(err) + } + + if _, err := client.Logical().Write("secrets/x/testapp/data/test", map[string]interface{}{ + "data": map[string]interface{}{ + "complex": "yes", + }, + }); err != nil { + t.Fatal(err) + } + + // Delete the entry so we can undelete it + if _, err := client.Logical().Delete("secrets/x/testapp/data/test"); err != nil { + t.Fatal(err) + } + + // Delete the entry so we can undelete it + if _, err := client.Logical().Delete("secrets/testapp/data/test"); err != nil { + t.Fatal(err) + } + + ui, cmd := testKVUndeleteCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + + for _, str := range tc.outStrings { + if !strings.Contains(combined, str) { + t.Errorf("expected %q to contain %q", combined, str) + } + } + }) + } + }) +} + func createTokenForPolicy(t *testing.T, client *api.Client, policy string) (*api.SecretAuth, error) { t.Helper() diff --git a/command/kv_undelete.go b/command/kv_undelete.go index 8b8eab569c8e..7d438387193b 100644 --- a/command/kv_undelete.go +++ b/command/kv_undelete.go @@ -1,13 +1,14 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "fmt" + "path" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -35,12 +36,12 @@ Usage: vault kv undelete [options] KEY This restores the data, allowing it to be returned on get requests. To undelete version 3 of key "foo": - + $ vault kv undelete -mount=secret -versions=3 foo - The deprecated path-like syntax can also be used, but this should be avoided, - as the fact that it is not actually the full API path to - the secret (secret/data/foo) can cause confusion: + The deprecated path-like syntax can also be used, but this should be avoided, + as the fact that it is not actually the full API path to + the secret (secret/data/foo) can cause confusion: $ vault kv undelete -versions=3 secret/foo @@ -67,10 +68,10 @@ func (c *KVUndeleteCommand) Flags() *FlagSets { Name: "mount", Target: &c.flagMount, Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value - Usage: `Specifies the path where the KV backend is mounted. If specified, - the next argument will be interpreted as the secret path. If this flag is - not specified, the next argument will be interpreted as the combined mount - path and secret path, with /data/ automatically appended between KV + Usage: `Specifies the path where the KV backend is mounted. If specified, + the next argument will be interpreted as the secret path. If this flag is + not specified, the next argument will be interpreted as the combined mount + path and secret path, with /data/ automatically appended between KV v2 secrets.`, }) @@ -134,6 +135,14 @@ func (c *KVUndeleteCommand) Run(args []string) int { c.UI.Error(err.Error()) return 2 } + if v2 { + // Without this join, mountPaths that are deeper + // than the root path E.G. secrets/myapp will get + // pruned down to myapp/undelete/ which + // is incorrect. + // This technique was lifted from kv_delete.go. + partialPath = path.Join(mountPath, partialPath) + } } else { // In this case, this arg is a path-like combination of mountPath/secretPath. // (e.g. "secret/foo") @@ -150,7 +159,7 @@ func (c *KVUndeleteCommand) Run(args []string) int { return 1 } - undeletePath := addPrefixToKVPath(partialPath, mountPath, "undelete") + undeletePath := addPrefixToKVPath(partialPath, mountPath, "undelete", false) data := map[string]interface{}{ "versions": kvParseVersionsFlags(c.flagVersions), } diff --git a/command/lease.go b/command/lease.go index 29ef79fc2d8c..3e0817ffd0d8 100644 --- a/command/lease.go +++ b/command/lease.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*LeaseCommand)(nil) diff --git a/command/lease_lookup.go b/command/lease_lookup.go index ef53ce528635..51b7aab60415 100644 --- a/command/lease_lookup.go +++ b/command/lease_lookup.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/lease_lookup_test.go b/command/lease_lookup_test.go index 536c29c58eee..2c9b81caf5fa 100644 --- a/command/lease_lookup_test.go +++ b/command/lease_lookup_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,8 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testLeaseLookupCommand(tb testing.TB) (*cli.MockUi, *LeaseLookupCommand) { diff --git a/command/lease_renew.go b/command/lease_renew.go index aad41d66b795..b0671c379682 100644 --- a/command/lease_renew.go +++ b/command/lease_renew.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/lease_renew_test.go b/command/lease_renew_test.go index c24b812b911a..eac098fe4634 100644 --- a/command/lease_renew_test.go +++ b/command/lease_renew_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,8 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testLeaseRenewCommand(tb testing.TB) (*cli.MockUi, *LeaseRenewCommand) { diff --git a/command/lease_revoke.go b/command/lease_revoke.go index 5efd5ecf198b..59a09de597e8 100644 --- a/command/lease_revoke.go +++ b/command/lease_revoke.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,8 @@ import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/lease_revoke_test.go b/command/lease_revoke_test.go index 261041e2331a..aeb9987e7ddd 100644 --- a/command/lease_revoke_test.go +++ b/command/lease_revoke_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,8 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testLeaseRevokeCommand(tb testing.TB) (*cli.MockUi, *LeaseRevokeCommand) { diff --git a/command/list.go b/command/list.go index 028f0d391c6c..6505f76af8c3 100644 --- a/command/list.go +++ b/command/list.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/list_test.go b/command/list_test.go index 070184bd91b4..e7a870d7ffb6 100644 --- a/command/list_test.go +++ b/command/list_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testListCommand(tb testing.TB) (*cli.MockUi, *ListCommand) { diff --git a/command/log_flags.go b/command/log_flags.go index 5213d06377ef..cbde95d33088 100644 --- a/command/log_flags.go +++ b/command/log_flags.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -15,6 +15,7 @@ import ( // logFlags are the 'log' related flags that can be shared across commands. type logFlags struct { flagCombineLogs bool + flagDisableGatedLogs bool flagLogLevel string flagLogFormat string flagLogFile string @@ -41,6 +42,13 @@ func (f *FlagSet) addLogFlags(l *logFlags) { Hidden: true, }) + f.BoolVar(&BoolVar{ + Name: flagDisableGatedLogs, + Target: &l.flagDisableGatedLogs, + Default: false, + Hidden: true, + }) + f.StringVar(&StringVar{ Name: flagNameLogLevel, Target: &l.flagLogLevel, diff --git a/command/log_flags_test.go b/command/log_flags_test.go index 38bfa52e95a3..1e54397f87fc 100644 --- a/command/log_flags_test.go +++ b/command/log_flags_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command diff --git a/command/login.go b/command/login.go index c8bc23254c29..6252b7219df8 100644 --- a/command/login.go +++ b/command/login.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command diff --git a/command/login_test.go b/command/login_test.go index 3d41d8e88147..3f7c01d3c86e 100644 --- a/command/login_test.go +++ b/command/login_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -10,8 +10,7 @@ import ( "testing" "time" - "github.com/mitchellh/cli" - + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" credToken "github.com/hashicorp/vault/builtin/credential/token" credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" diff --git a/command/main.go b/command/main.go index 13fbe2181822..465ec5e6e864 100644 --- a/command/main.go +++ b/command/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -15,10 +15,11 @@ import ( "text/tabwriter" "github.com/fatih/color" + "github.com/hashicorp/cli" + hcpvlib "github.com/hashicorp/vault-hcp-lib" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/token" - colorable "github.com/mattn/go-colorable" - "github.com/mitchellh/cli" + "github.com/hashicorp/vault/api/tokenhelper" + "github.com/mattn/go-colorable" ) type VaultUI struct { @@ -134,11 +135,12 @@ func getGlobalFlagValue(arg string) string { } type RunOptions struct { - TokenHelper token.TokenHelper - Stdout io.Writer - Stderr io.Writer - Address string - Client *api.Client + TokenHelper tokenhelper.TokenHelper + HCPTokenHelper hcpvlib.HCPTokenHelper + Stdout io.Writer + Stderr io.Writer + Address string + Client *api.Client } func Run(args []string) int { diff --git a/command/monitor.go b/command/monitor.go index 7545b82da71a..f39ca72a360a 100644 --- a/command/monitor.go +++ b/command/monitor.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,8 +8,8 @@ import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/monitor_test.go b/command/monitor_test.go index 0cc722c98d91..fd1b288fd243 100644 --- a/command/monitor_test.go +++ b/command/monitor_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -9,7 +9,7 @@ import ( "testing" "time" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testMonitorCommand(tb testing.TB) (*cli.MockUi, *MonitorCommand) { diff --git a/command/namespace.go b/command/namespace.go index 18bc6e92eb55..c47b26648c89 100644 --- a/command/namespace.go +++ b/command/namespace.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*NamespaceCommand)(nil) diff --git a/command/namespace_api_lock.go b/command/namespace_api_lock.go index 57b196992c94..4193508ec4fe 100644 --- a/command/namespace_api_lock.go +++ b/command/namespace_api_lock.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,8 @@ import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/helper/namespace" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/namespace_api_unlock.go b/command/namespace_api_unlock.go index 77e829147afb..0c9cd22eadd9 100644 --- a/command/namespace_api_unlock.go +++ b/command/namespace_api_unlock.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,8 @@ import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/helper/namespace" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/namespace_create.go b/command/namespace_create.go index 60df834d739a..6499bf2a25c9 100644 --- a/command/namespace_create.go +++ b/command/namespace_create.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/namespace_delete.go b/command/namespace_delete.go index 5c79c35b1050..e7704ca5cd85 100644 --- a/command/namespace_delete.go +++ b/command/namespace_delete.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/namespace_list.go b/command/namespace_list.go index 7dfd257e8065..e8581670edb7 100644 --- a/command/namespace_list.go +++ b/command/namespace_list.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -40,7 +40,18 @@ Usage: vault namespace list [options] } func (c *NamespaceListCommand) Flags() *FlagSets { - return c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "detailed", + Target: &c.flagDetailed, + Default: false, + Usage: "Print detailed information such as namespace ID.", + }) + + return set } func (c *NamespaceListCommand) AutocompleteArgs() complete.Predictor { @@ -104,5 +115,9 @@ func (c *NamespaceListCommand) Run(args []string) int { return 2 } + if c.flagDetailed && Format(c.UI) != "table" { + return OutputData(c.UI, secret.Data["key_info"]) + } + return OutputList(c.UI, secret) } diff --git a/command/namespace_lookup.go b/command/namespace_lookup.go index ee18736633a5..376a0adc419e 100644 --- a/command/namespace_lookup.go +++ b/command/namespace_lookup.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/namespace_patch.go b/command/namespace_patch.go index 2a4a6dc699a8..d3868c6134fe 100644 --- a/command/namespace_patch.go +++ b/command/namespace_patch.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -9,8 +9,8 @@ import ( "net/http" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/operator.go b/command/operator.go index a79f7bff8406..8d918f3492ac 100644 --- a/command/operator.go +++ b/command/operator.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*OperatorCommand)(nil) diff --git a/command/operator_diagnose.go b/command/operator_diagnose.go index 5abddb7c980d..b530ada5f0cc 100644 --- a/command/operator_diagnose.go +++ b/command/operator_diagnose.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -14,14 +14,12 @@ import ( "sync" "time" - "golang.org/x/term" - - wrapping "github.com/hashicorp/go-kms-wrapping/v2" - + "github.com/hashicorp/cli" "github.com/hashicorp/consul/api" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-kms-wrapping/entropy/v2" "github.com/hashicorp/go-secure-stdlib/reloadutil" - uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/go-uuid" cserver "github.com/hashicorp/vault/command/server" "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/helper/metricsutil" @@ -35,9 +33,10 @@ import ( "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/vault/diagnose" "github.com/hashicorp/vault/vault/hcp_link" + "github.com/hashicorp/vault/vault/seal" "github.com/hashicorp/vault/version" - "github.com/mitchellh/cli" "github.com/posener/complete" + "golang.org/x/term" ) const CoreConfigUninitializedErr = "Diagnose cannot attempt this step because core config could not be set." @@ -70,7 +69,7 @@ func (c *OperatorDiagnoseCommand) Synopsis() string { func (c *OperatorDiagnoseCommand) Help() string { helpText := ` -Usage: vault operator diagnose +Usage: vault operator diagnose This command troubleshoots Vault startup issues, such as TLS configuration or auto-unseal. It should be run using the same environment variables and configuration @@ -78,7 +77,7 @@ Usage: vault operator diagnose reproduced. Start diagnose with a configuration file: - + $ vault operator diagnose -config=/etc/vault/config.hcl Perform a diagnostic check while Vault is still running: @@ -204,17 +203,19 @@ func (c *OperatorDiagnoseCommand) RunWithParsedFlags() int { func (c *OperatorDiagnoseCommand) offlineDiagnostics(ctx context.Context) error { rloadFuncs := make(map[string][]reloadutil.ReloadFunc) + handlers := newVaultHandlers() + server := &ServerCommand{ // TODO: set up a different one? // In particular, a UI instance that won't output? BaseCommand: c.BaseCommand, // TODO: refactor to a common place? - AuditBackends: auditBackends, - CredentialBackends: credentialBackends, - LogicalBackends: logicalBackends, - PhysicalBackends: physicalBackends, - ServiceRegistrations: serviceRegistrations, + AuditBackends: handlers.auditBackends, + CredentialBackends: handlers.credentialBackends, + LogicalBackends: handlers.logicalBackends, + PhysicalBackends: handlers.physicalBackends, + ServiceRegistrations: handlers.serviceRegistrations, // TODO: other ServerCommand options? @@ -432,31 +433,30 @@ func (c *OperatorDiagnoseCommand) offlineDiagnostics(ctx context.Context) error }) sealcontext, sealspan := diagnose.StartSpan(ctx, "Create Vault Server Configuration Seals") - var seals []vault.Seal - var sealConfigError error - barrierSeal, barrierWrapper, unwrapSeal, seals, sealConfigError, err := setSeal(server, config, make([]string, 0), make(map[string]string)) - // Check error here + var setSealResponse *SetSealResponse + var err error + var existingSealGenerationInfo *seal.SealGenerationInfo + if config.IsMultisealEnabled() { + existingSealGenerationInfo, err = vault.PhysicalSealGenInfo(sealcontext, *backend) + if err != nil { + diagnose.Fail(sealcontext, fmt.Sprintf("Unable to get Seal generation information from storage: %s.", err.Error())) + goto SEALFAIL + } + } + + setSealResponse, err = setSeal(server, config, make([]string, 0), make(map[string]string), existingSealGenerationInfo, false /* unsealed vault has no partially wrapped paths */) if err != nil { diagnose.Advise(ctx, "For assistance with the seal stanza, see the Vault configuration documentation.") diagnose.Fail(sealcontext, fmt.Sprintf("Seal creation resulted in the following error: %s.", err.Error())) goto SEALFAIL } - if sealConfigError != nil { - diagnose.Fail(sealcontext, "Seal could not be configured: seals may already be initialized.") - goto SEALFAIL - } - for _, seal := range seals { - // There is always one nil seal. We need to skip it so we don't start an empty Finalize-Seal-Shamir - // section. - if seal == nil { - continue - } + for _, seal := range setSealResponse.getCreatedSeals() { seal := seal // capture range variable // Ensure that the seal finalizer is called, even if using verify-only defer func(seal *vault.Seal) { - sealType := diagnose.CapitalizeFirstLetter((*seal).BarrierType().String()) + sealType := diagnose.CapitalizeFirstLetter((*seal).BarrierSealConfigType().String()) finalizeSealContext, finalizeSealSpan := diagnose.StartSpan(ctx, "Finalize "+sealType+" Seal") err = (*seal).Finalize(finalizeSealContext) if err != nil { @@ -465,16 +465,26 @@ func (c *OperatorDiagnoseCommand) offlineDiagnostics(ctx context.Context) error finalizeSealSpan.End() } finalizeSealSpan.End() - }(&seal) + }(seal) } - if barrierSeal == nil { + if setSealResponse.sealConfigError != nil { + diagnose.Fail(sealcontext, "Seal could not be configured: seals may already be initialized.") + } else if setSealResponse.barrierSeal == nil { diagnose.Fail(sealcontext, "Could not create barrier seal. No error was generated, but it is likely that the seal stanza is misconfigured. For guidance, see Vault's configuration documentation on the seal stanza.") } SEALFAIL: sealspan.End() + var barrierSeal vault.Seal + var unwrapSeal vault.Seal + + if setSealResponse != nil { + barrierSeal = setSealResponse.barrierSeal + unwrapSeal = setSealResponse.unwrapSeal + } + diagnose.Test(ctx, "Check Transit Seal TLS", func(ctx context.Context) error { var checkSealTransit bool for _, seal := range config.Seals { @@ -531,9 +541,20 @@ SEALFAIL: var secureRandomReader io.Reader // prepare a secure random reader for core randReaderTestName := "Initialize Randomness for Core" - secureRandomReader, err = configutil.CreateSecureRandomReaderFunc(config.SharedConfig, barrierWrapper) + var sources []*configutil.EntropySourcerInfo + if barrierSeal != nil { + for _, sealWrapper := range barrierSeal.GetAccess().GetEnabledSealWrappersByPriority() { + if s, ok := sealWrapper.Wrapper.(entropy.Sourcer); ok { + sources = append(sources, &configutil.EntropySourcerInfo{ + Sourcer: s, + Name: sealWrapper.Name, + }) + } + } + } + secureRandomReader, err = configutil.CreateSecureRandomReaderFunc(config.SharedConfig, sources, server.logger) if err != nil { - return diagnose.SpotError(ctx, randReaderTestName, fmt.Errorf("Could not initialize randomness for core: %w.", err)) + return diagnose.SpotError(ctx, randReaderTestName, fmt.Errorf("could not initialize randomness for core: %w", err)) } diagnose.SpotOk(ctx, randReaderTestName, "") coreConfig = createCoreConfig(server, config, *backend, configSR, barrierSeal, unwrapSeal, metricsHelper, metricSink, secureRandomReader) @@ -675,7 +696,7 @@ SEALFAIL: if barrierSeal == nil { return fmt.Errorf("Diagnose could not create a barrier seal object.") } - if barrierSeal.BarrierType() == wrapping.WrapperTypeShamir { + if barrierSeal.BarrierSealConfigType() == vault.SealConfigTypeShamir { diagnose.Skipped(ctx, "Skipping barrier encryption test. Only supported for auto-unseal.") return nil } @@ -684,11 +705,25 @@ SEALFAIL: return fmt.Errorf("Diagnose could not create unique UUID for unsealing.") } barrierEncValue := "diagnose-" + barrierUUID - ciphertext, err := barrierWrapper.Encrypt(ctx, []byte(barrierEncValue), nil) - if err != nil { - return fmt.Errorf("Error encrypting with seal barrier: %w.", err) + ciphertext, errMap := barrierSeal.GetAccess().Encrypt(ctx, []byte(barrierEncValue), nil) + if len(errMap) > 0 { + var sealErrors []error + for name, err := range errMap { + sealErrors = append(sealErrors, fmt.Errorf("error encrypting with seal %q: %w", name, err)) + } + if ciphertext == nil { + // Full failure + if len(sealErrors) == 1 { + return sealErrors[0] + } else { + return fmt.Errorf("complete seal encryption failure: %w", errors.Join()) + } + } else { + // Partial failure + return fmt.Errorf("partial seal encryption failure: %w", errors.Join()) + } } - plaintext, err := barrierWrapper.Decrypt(ctx, ciphertext, nil) + plaintext, _, err := barrierSeal.GetAccess().Decrypt(ctx, ciphertext, nil) if err != nil { return fmt.Errorf("Error decrypting with seal barrier: %w", err) } diff --git a/command/operator_diagnose_test.go b/command/operator_diagnose_test.go index 2c9a1a0363e3..8528637dc2e4 100644 --- a/command/operator_diagnose_test.go +++ b/command/operator_diagnose_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !race @@ -13,8 +13,9 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/vault/diagnose" - "github.com/mitchellh/cli" ) func testOperatorDiagnoseCommand(tb testing.TB) *OperatorDiagnoseCommand { @@ -40,7 +41,7 @@ func TestOperatorDiagnoseCommand_Run(t *testing.T) { { "diagnose_ok", []string{ - "-config", "./server/test-fixtures/config_diagnose_ok.hcl", + "-config", "./server/test-fixtures/config_diagnose_ok_singleseal.hcl", }, []*diagnose.Result{ { @@ -172,6 +173,138 @@ func TestOperatorDiagnoseCommand_Run(t *testing.T) { }, }, }, + { + "diagnose_ok_multiseal", + []string{ + "-config", "./server/test-fixtures/config_diagnose_ok.hcl", + }, + []*diagnose.Result{ + { + Name: "Parse Configuration", + Status: diagnose.OkStatus, + }, + { + Name: "Start Listeners", + Status: diagnose.WarningStatus, + Children: []*diagnose.Result{ + { + Name: "Create Listeners", + Status: diagnose.OkStatus, + }, + { + Name: "Check Listener TLS", + Status: diagnose.WarningStatus, + Warnings: []string{ + "TLS is disabled in a listener config stanza.", + }, + }, + }, + }, + { + Name: "Check Storage", + Status: diagnose.OkStatus, + Children: []*diagnose.Result{ + { + Name: "Create Storage Backend", + Status: diagnose.OkStatus, + }, + { + Name: "Check Consul TLS", + Status: diagnose.SkippedStatus, + }, + { + Name: "Check Consul Direct Storage Access", + Status: diagnose.OkStatus, + }, + }, + }, + { + Name: "Check Service Discovery", + Status: diagnose.OkStatus, + Children: []*diagnose.Result{ + { + Name: "Check Consul Service Discovery TLS", + Status: diagnose.SkippedStatus, + }, + { + Name: "Check Consul Direct Service Discovery", + Status: diagnose.OkStatus, + }, + }, + }, + { + Name: "Create Vault Server Configuration Seals", + // We can't load from storage the existing seal generation info during the test, so we expect an error. + Status: diagnose.ErrorStatus, + }, + { + Name: "Create Core Configuration", + Status: diagnose.OkStatus, + Children: []*diagnose.Result{ + { + Name: "Initialize Randomness for Core", + Status: diagnose.OkStatus, + }, + }, + }, + { + Name: "HA Storage", + Status: diagnose.OkStatus, + Children: []*diagnose.Result{ + { + Name: "Create HA Storage Backend", + Status: diagnose.OkStatus, + }, + { + Name: "Check HA Consul Direct Storage Access", + Status: diagnose.OkStatus, + }, + { + Name: "Check Consul TLS", + Status: diagnose.SkippedStatus, + }, + }, + }, + { + Name: "Determine Redirect Address", + Status: diagnose.OkStatus, + }, + { + Name: "Check Cluster Address", + Status: diagnose.OkStatus, + }, + { + Name: "Check Core Creation", + Status: diagnose.OkStatus, + }, + { + Name: "Start Listeners", + Status: diagnose.WarningStatus, + Children: []*diagnose.Result{ + { + Name: "Create Listeners", + Status: diagnose.OkStatus, + }, + { + Name: "Check Listener TLS", + Status: diagnose.WarningStatus, + Warnings: []string{ + "TLS is disabled in a listener config stanza.", + }, + }, + }, + }, + { + Name: "Check Autounseal Encryption", + Status: diagnose.ErrorStatus, + Message: "Diagnose could not create a barrier seal object.", + }, + { + Name: "Check Server Before Runtime", + Status: diagnose.OkStatus, + }, + }, + }, { "diagnose_raft_problems", []string{ @@ -478,17 +611,23 @@ func TestOperatorDiagnoseCommand_Run(t *testing.T) { for _, tc := range cases { tc := tc t.Run(tc.name, func(t *testing.T) { - t.Parallel() - client, closer := testVaultServer(t) - defer closer() - cmd := testOperatorDiagnoseCommand(t) - cmd.client = client + if tc.name == "diagnose_ok" && constants.IsEnterprise { + t.Skip("Test not valid in ENT") + } else if tc.name == "diagnose_ok_multiseal" && !constants.IsEnterprise { + t.Skip("Test not valid in community edition") + } else { + t.Parallel() + client, closer := testVaultServer(t) + defer closer() + cmd := testOperatorDiagnoseCommand(t) + cmd.client = client - cmd.Run(tc.args) - result := cmd.diagnose.Finalize(context.Background()) + cmd.Run(tc.args) + result := cmd.diagnose.Finalize(context.Background()) - if err := compareResults(tc.expected, result.Children); err != nil { - t.Fatalf("Did not find expected test results: %v", err) + if err := compareResults(tc.expected, result.Children); err != nil { + t.Fatalf("Did not find expected test results: %v", err) + } } }) } diff --git a/command/operator_generate_root.go b/command/operator_generate_root.go index 6665e8bdbf8c..6a4d7bc9e4d5 100644 --- a/command/operator_generate_root.go +++ b/command/operator_generate_root.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -10,11 +10,11 @@ import ( "os" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/go-secure-stdlib/password" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/pgpkeys" "github.com/hashicorp/vault/sdk/helper/roottoken" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -23,6 +23,7 @@ var ( _ cli.CommandAutocomplete = (*OperatorGenerateRootCommand)(nil) ) +//go:generate enumer -type=generateRootKind -trimprefix=generateRoot type generateRootKind int const ( @@ -49,40 +50,98 @@ type OperatorGenerateRootCommand struct { } func (c *OperatorGenerateRootCommand) Synopsis() string { - return "Generates a new root token" + return "Generates a new root, DR operation, or recovery token" } func (c *OperatorGenerateRootCommand) Help() string { helpText := ` -Usage: vault operator generate-root [options] [KEY] +Usage: vault operator generate-root [options] -init [-otp=...] [-pgp-key=...] + vault operator generate-root [options] [-nonce=... KEY] + vault operator generate-root [options] -decode=... -otp=... + vault operator generate-root [options] -generate-otp + vault operator generate-root [options] -status + vault operator generate-root [options] -cancel - Generates a new root token by combining a quorum of share holders. One of - the following must be provided to start the root token generation: + Generates a new root token by combining a quorum of share holders. - - A base64-encoded one-time-password (OTP) provided via the "-otp" flag. - Use the "-generate-otp" flag to generate a usable value. The resulting - token is XORed with this value when it is returned. Use the "-decode" - flag to output the final value. + This command is unusual, as it is effectively six separate subcommands, + selected via the options -init, -decode, -generate-otp, -status, -cancel, + or the absence of any of the previous five options (which selects the + "provide a key share" form). - - A file containing a PGP key or a keybase username in the "-pgp-key" - flag. The resulting token is encrypted with this public key. + With the -dr-token or -recovery-token options, a DR operation token or a + recovery token is generated instead of a root token - the relevant option + must be included in every form of the generate-root command. - An unseal key may be provided directly on the command line as an argument to - the command. If key is specified as "-", the command will read from stdin. If - a TTY is available, the command will prompt for text. + Form 1 (-init) - Start a token generation: - Generate an OTP code for the final token: + When starting a root or privileged operation token generation, you must + choose one of the following protection methods for how the token will be + returned: - $ vault operator generate-root -generate-otp + - A base64-encoded one-time-password (OTP). The resulting token is XORed + with this value when it is returned. Use the "-decode" form of this + command to output the final value. - Start a root token generation: + The Vault server will generate a suitable OTP for you, and return it: - $ vault operator generate-root -init -otp="..." - $ vault operator generate-root -init -pgp-key="..." + $ vault operator generate-root -init - Enter an unseal key to progress root token generation: + Vault versions before 0.11.2, released in 2018, required you to + generate your own OTP (see the "-generate-otp" form) and pass it in, + but this is no longer necessary. The command is still supported for + compatibility, though: - $ vault operator generate-root -otp="..." + $ vault operator generate-root -init -otp="..." + + - A PGP key. The resulting token is encrypted with this public key. + The key may be specified as a path to a file, or a string of the + form "keybase:" to fetch the key from the keybase.io API. + + $ vault operator generate-root -init -pgp-key="..." + + Form 2 (no option) - Enter an unseal key to progress root token generation: + + In the sub-form intended for interactive use, the command will + automatically look up the nonce of the currently active generation + operation, and will prompt for the key to be entered: + + $ vault operator generate-root + + In the sub-form intended for automation, the operation nonce must be + explicitly provided, and the key is provided directly on the command line + + $ vault operator generate-root -nonce=... KEY + + If key is specified as "-", the command will read from stdin. + + Form 3 (-decode) - Decode a generated token protected with an OTP: + + $ vault operator generate-root -decode=ENCODED_TOKEN -otp=OTP + + If encoded token is specified as "-", the command will read from stdin. + + Form 4 (-generate-otp) - Generate an OTP code for the final token: + + $ vault operator generate-root -generate-otp + + Since changes in Vault 0.11.2 in 2018, there is no longer any reason to + use this form, as a suitable OTP will be returned as part of the "-init" + command. + + Form 5 (-status) - Get the status of a token generation that is in progress: + + $ vault operator generate-root -status + + This form also returns the length of the a correct OTP, for the running + version and configuration of Vault. + + Form 6 (-cancel) - Cancel a token generation that is in progress: + + This would be used to remove an in progress generation operation, so that + a new one can be started with different parameters. + + $ vault operator generate-root -cancel ` + c.Flags().Help() return strings.TrimSpace(helpText) @@ -149,7 +208,7 @@ func (c *OperatorGenerateRootCommand) Flags() *FlagSets { Default: false, EnvVar: "", Completion: complete.PredictNothing, - Usage: "Set this flag to do generate root operations on DR Operational " + + Usage: "Set this flag to do generate root operations on DR operation " + "tokens.", }) @@ -159,7 +218,7 @@ func (c *OperatorGenerateRootCommand) Flags() *FlagSets { Default: false, EnvVar: "", Completion: complete.PredictNothing, - Usage: "Set this flag to do generate root operations on Recovery Operational " + + Usage: "Set this flag to do generate root operations on recovery " + "tokens.", }) @@ -182,7 +241,7 @@ func (c *OperatorGenerateRootCommand) Flags() *FlagSets { "public PGP key. This can also be specified as a Keybase username " + "using the format \"keybase:\". When supplied, the generated " + "root token will be encrypted and base64-encoded with the given public " + - "key.", + "key. Must be used with \"-init\".", }) f.StringVar(&StringVar{ @@ -191,8 +250,9 @@ func (c *OperatorGenerateRootCommand) Flags() *FlagSets { Default: "", EnvVar: "", Completion: complete.PredictAnything, - Usage: "Nonce value provided at initialization. The same nonce value " + - "must be provided with each unseal key.", + Usage: "Nonce value returned at initialization. The same nonce value " + + "must be provided with each unseal or recovery key. Only needed " + + "when providing an unseal or recovery key.", }) return set diff --git a/command/operator_generate_root_test.go b/command/operator_generate_root_test.go index 1436ab2ddfae..e27592d856a7 100644 --- a/command/operator_generate_root_test.go +++ b/command/operator_generate_root_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !race @@ -13,9 +13,9 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/sdk/helper/xor" "github.com/hashicorp/vault/vault" - "github.com/mitchellh/cli" ) func testOperatorGenerateRootCommand(tb testing.TB) (*cli.MockUi, *OperatorGenerateRootCommand) { diff --git a/command/operator_init.go b/command/operator_init.go index 080f5853def5..b4b5de9759b0 100644 --- a/command/operator_init.go +++ b/command/operator_init.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -9,12 +9,11 @@ import ( "runtime" "strings" + "github.com/hashicorp/cli" + consulapi "github.com/hashicorp/consul/api" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/pgpkeys" - "github.com/mitchellh/cli" "github.com/posener/complete" - - consulapi "github.com/hashicorp/consul/api" ) var ( diff --git a/command/operator_init_test.go b/command/operator_init_test.go index 06647d727824..73fe4ff59e93 100644 --- a/command/operator_init_test.go +++ b/command/operator_init_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !race @@ -13,10 +13,10 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/pgpkeys" "github.com/hashicorp/vault/vault" - "github.com/mitchellh/cli" ) func testOperatorInitCommand(tb testing.TB) (*cli.MockUi, *OperatorInitCommand) { diff --git a/command/operator_key_status.go b/command/operator_key_status.go index 412a00cd20e6..015bd891f75f 100644 --- a/command/operator_key_status.go +++ b/command/operator_key_status.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/operator_key_status_test.go b/command/operator_key_status_test.go index 9f8fbb0f6dc4..ccaac3883081 100644 --- a/command/operator_key_status_test.go +++ b/command/operator_key_status_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testOperatorKeyStatusCommand(tb testing.TB) (*cli.MockUi, *OperatorKeyStatusCommand) { diff --git a/command/operator_members.go b/command/operator_members.go index 986313a3201b..83d7a2d4301e 100644 --- a/command/operator_members.go +++ b/command/operator_members.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/operator_migrate.go b/command/operator_migrate.go index 01e8bbb21322..3af73e696382 100644 --- a/command/operator_migrate.go +++ b/command/operator_migrate.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -14,6 +14,7 @@ import ( "strings" "time" + "github.com/hashicorp/cli" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/hcl" @@ -23,7 +24,6 @@ import ( "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/vault" - "github.com/mitchellh/cli" "github.com/pkg/errors" "github.com/posener/complete" "golang.org/x/sync/errgroup" diff --git a/command/operator_migrate_test.go b/command/operator_migrate_test.go index 3d2843091484..15190b2640f5 100644 --- a/command/operator_migrate_test.go +++ b/command/operator_migrate_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -32,10 +32,11 @@ func init() { } func TestMigration(t *testing.T) { + handlers := newVaultHandlers() t.Run("Default", func(t *testing.T) { data := generateData() - fromFactory := physicalBackends["file"] + fromFactory := handlers.physicalBackends["file"] folder := t.TempDir() @@ -51,7 +52,7 @@ func TestMigration(t *testing.T) { t.Fatal(err) } - toFactory := physicalBackends["inmem"] + toFactory := handlers.physicalBackends["inmem"] confTo := map[string]string{} to, err := toFactory(confTo, nil) if err != nil { @@ -72,7 +73,7 @@ func TestMigration(t *testing.T) { t.Run("Concurrent migration", func(t *testing.T) { data := generateData() - fromFactory := physicalBackends["file"] + fromFactory := handlers.physicalBackends["file"] folder := t.TempDir() @@ -88,7 +89,7 @@ func TestMigration(t *testing.T) { t.Fatal(err) } - toFactory := physicalBackends["inmem"] + toFactory := handlers.physicalBackends["inmem"] confTo := map[string]string{} to, err := toFactory(confTo, nil) if err != nil { @@ -110,7 +111,7 @@ func TestMigration(t *testing.T) { t.Run("Start option", func(t *testing.T) { data := generateData() - fromFactory := physicalBackends["inmem"] + fromFactory := handlers.physicalBackends["inmem"] confFrom := map[string]string{} from, err := fromFactory(confFrom, nil) if err != nil { @@ -120,7 +121,7 @@ func TestMigration(t *testing.T) { t.Fatal(err) } - toFactory := physicalBackends["file"] + toFactory := handlers.physicalBackends["file"] folder := t.TempDir() confTo := map[string]string{ "path": folder, @@ -149,7 +150,7 @@ func TestMigration(t *testing.T) { t.Run("Start option (parallel)", func(t *testing.T) { data := generateData() - fromFactory := physicalBackends["inmem"] + fromFactory := handlers.physicalBackends["inmem"] confFrom := map[string]string{} from, err := fromFactory(confFrom, nil) if err != nil { @@ -159,7 +160,7 @@ func TestMigration(t *testing.T) { t.Fatal(err) } - toFactory := physicalBackends["file"] + toFactory := handlers.physicalBackends["file"] folder := t.TempDir() confTo := map[string]string{ "path": folder, @@ -269,7 +270,7 @@ storage_destination "dest_type2" { }) t.Run("DFS Scan", func(t *testing.T) { - s, _ := physicalBackends["inmem"](map[string]string{}, nil) + s, _ := handlers.physicalBackends["inmem"](map[string]string{}, nil) data := generateData() data["cc"] = []byte{} diff --git a/command/operator_raft.go b/command/operator_raft.go index 8720b78ce7cb..deaff14cdf2d 100644 --- a/command/operator_raft.go +++ b/command/operator_raft.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*OperatorRaftCommand)(nil) diff --git a/command/operator_raft_autopilot_get_config.go b/command/operator_raft_autopilot_get_config.go index 11d7da87d895..bdeeed6f8c08 100644 --- a/command/operator_raft_autopilot_get_config.go +++ b/command/operator_raft_autopilot_get_config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,8 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" "github.com/posener/complete" ) @@ -18,6 +19,7 @@ var ( type OperatorRaftAutopilotGetConfigCommand struct { *BaseCommand + flagDRToken string } func (c *OperatorRaftAutopilotGetConfigCommand) Synopsis() string { @@ -37,6 +39,17 @@ Usage: vault operator raft autopilot get-config func (c *OperatorRaftAutopilotGetConfigCommand) Flags() *FlagSets { set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "dr-token", + Target: &c.flagDRToken, + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "DR operation token used to authorize this request (if a DR secondary node).", + }) + return set } @@ -70,10 +83,12 @@ func (c *OperatorRaftAutopilotGetConfigCommand) Run(args []string) int { return 2 } - config, err := client.Sys().RaftAutopilotConfiguration() - if err != nil { - c.UI.Error(err.Error()) - return 2 + var config *api.AutopilotConfig + switch { + case c.flagDRToken != "": + config, err = client.Sys().RaftAutopilotConfigurationWithDRToken(c.flagDRToken) + default: + config, err = client.Sys().RaftAutopilotConfiguration() } if config == nil { diff --git a/command/operator_raft_autopilot_set_config.go b/command/operator_raft_autopilot_set_config.go index 0d9a5f336295..39b9b2ddcb6d 100644 --- a/command/operator_raft_autopilot_set_config.go +++ b/command/operator_raft_autopilot_set_config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -26,6 +26,7 @@ type OperatorRaftAutopilotSetConfigCommand struct { flagMinQuorum uint flagServerStabilizationTime time.Duration flagDisableUpgradeMigration BoolPtr + flagDRToken string } func (c *OperatorRaftAutopilotSetConfigCommand) Synopsis() string { @@ -50,36 +51,52 @@ func (c *OperatorRaftAutopilotSetConfigCommand) Flags() *FlagSets { f.BoolPtrVar(&BoolPtrVar{ Name: "cleanup-dead-servers", Target: &c.flagCleanupDeadServers, + Usage: "Controls whether to remove dead servers from the Raft peer list periodically or when a new server joins.", }) f.DurationVar(&DurationVar{ Name: "last-contact-threshold", Target: &c.flagLastContactThreshold, + Usage: "Limit on the amount of time a server can go without leader contact before being considered unhealthy.", }) f.DurationVar(&DurationVar{ Name: "dead-server-last-contact-threshold", Target: &c.flagDeadServerLastContactThreshold, + Usage: "Limit on the amount of time a server can go without leader contact before being considered failed. This takes effect only when cleanup_dead_servers is set.", }) f.Uint64Var(&Uint64Var{ Name: "max-trailing-logs", Target: &c.flagMaxTrailingLogs, + Usage: "Amount of entries in the Raft Log that a server can be behind before being considered unhealthy.", }) f.UintVar(&UintVar{ Name: "min-quorum", Target: &c.flagMinQuorum, + Usage: "Minimum number of servers allowed in a cluster before autopilot can prune dead servers. This should at least be 3.", }) f.DurationVar(&DurationVar{ Name: "server-stabilization-time", Target: &c.flagServerStabilizationTime, + Usage: "Minimum amount of time a server must be in a stable, healthy state before it can be added to the cluster.", }) f.BoolPtrVar(&BoolPtrVar{ Name: "disable-upgrade-migration", Target: &c.flagDisableUpgradeMigration, + Usage: "Whether or not to perform automated version upgrades.", + }) + + f.StringVar(&StringVar{ + Name: "dr-token", + Target: &c.flagDRToken, + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "DR operation token used to authorize this request (if a DR secondary node).", }) return set @@ -137,6 +154,9 @@ func (c *OperatorRaftAutopilotSetConfigCommand) Run(args []string) int { if c.flagDisableUpgradeMigration.IsSet() { data["disable_upgrade_migration"] = c.flagDisableUpgradeMigration.Get() } + if c.flagDRToken != "" { + data["dr_operation_token"] = c.flagDRToken + } secret, err := client.Logical().Write("sys/storage/raft/autopilot/configuration", data) if err != nil { diff --git a/command/operator_raft_autopilot_state.go b/command/operator_raft_autopilot_state.go index 99f188aae78f..4fc4ca4445e8 100644 --- a/command/operator_raft_autopilot_state.go +++ b/command/operator_raft_autopilot_state.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,7 +8,8 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" "github.com/posener/complete" ) @@ -19,6 +20,7 @@ var ( type OperatorRaftAutopilotStateCommand struct { *BaseCommand + flagDRToken string } func (c *OperatorRaftAutopilotStateCommand) Synopsis() string { @@ -38,6 +40,17 @@ Usage: vault operator raft autopilot state func (c *OperatorRaftAutopilotStateCommand) Flags() *FlagSets { set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "dr-token", + Target: &c.flagDRToken, + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "DR operation token used to authorize this request (if a DR secondary node).", + }) + // The output of the state endpoint contains nested values and is not fit for // the default "table" display format. Override the default display format to // "pretty", both in the flag and in the UI. @@ -83,7 +96,14 @@ func (c *OperatorRaftAutopilotStateCommand) Run(args []string) int { return 2 } - state, err := client.Sys().RaftAutopilotState() + var state *api.AutopilotState + switch { + case c.flagDRToken != "": + state, err = client.Sys().RaftAutopilotStateWithDRToken(c.flagDRToken) + default: + state, err = client.Sys().RaftAutopilotState() + } + if err != nil { c.UI.Error(fmt.Sprintf("Error checking autopilot state: %s", err)) return 2 diff --git a/command/operator_raft_join.go b/command/operator_raft_join.go index 57e14a827edc..aaaaf2891e10 100644 --- a/command/operator_raft_join.go +++ b/command/operator_raft_join.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,8 @@ import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/operator_raft_listpeers.go b/command/operator_raft_listpeers.go index 4e82c15da98e..b92ab8f0b517 100644 --- a/command/operator_raft_listpeers.go +++ b/command/operator_raft_listpeers.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,8 @@ import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/operator_raft_remove_peer.go b/command/operator_raft_remove_peer.go index 84b516cf6294..fabd9ce30d39 100644 --- a/command/operator_raft_remove_peer.go +++ b/command/operator_raft_remove_peer.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/operator_raft_snapshot.go b/command/operator_raft_snapshot.go index 036c6ebae09f..02fc4c2b8a43 100644 --- a/command/operator_raft_snapshot.go +++ b/command/operator_raft_snapshot.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*OperatorRaftSnapshotCommand)(nil) @@ -35,6 +35,10 @@ Usage: vault operator raft snapshot [options] [args] $ vault operator raft snapshot save raft.snap + Inspects a snapshot based on a file: + + $ vault operator raft snapshot inspect raft.snap + Please see the individual subcommand help for detailed usage information. ` diff --git a/command/operator_raft_snapshot_inspect.go b/command/operator_raft_snapshot_inspect.go new file mode 100644 index 000000000000..43c3fb0e4a13 --- /dev/null +++ b/command/operator_raft_snapshot_inspect.go @@ -0,0 +1,568 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/json" + "fmt" + "hash" + "io" + "math" + "os" + "sort" + "strconv" + "strings" + "text/tabwriter" + + "github.com/hashicorp/cli" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/raft" + protoio "github.com/hashicorp/vault/physical/raft" + "github.com/hashicorp/vault/sdk/plugin/pb" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*OperatorRaftSnapshotInspectCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRaftSnapshotInspectCommand)(nil) +) + +type OperatorRaftSnapshotInspectCommand struct { + *BaseCommand + details bool + depth int + filter string +} + +func (c *OperatorRaftSnapshotInspectCommand) Synopsis() string { + return "Inspects raft snapshot" +} + +func (c *OperatorRaftSnapshotInspectCommand) Help() string { + helpText := ` + Usage: vault operator raft snapshot inspect + + Inspects a snapshot file. + + $ vault operator raft snapshot inspect raft.snap + + ` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorRaftSnapshotInspectCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "details", + Target: &c.details, + Default: true, + Usage: "Provides information about usage for data stored in the snapshot.", + }) + + f.IntVar(&IntVar{ + Name: "depth", + Target: &c.depth, + Default: 2, + Usage: "Can only be used with -details. The key prefix depth used to breakdown KV store data. If set to 0, all keys will be returned. Defaults to 2.", + }) + + f.StringVar(&StringVar{ + Name: "filter", + Target: &c.filter, + Default: "", + Usage: "Can only be used with -details. Limits the key breakdown using this prefix filter.", + }) + + return set +} + +func (c *OperatorRaftSnapshotInspectCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *OperatorRaftSnapshotInspectCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +type OutputFormat struct { + Meta *MetadataInfo + StatsKV []typeStats + TotalCountKV int + TotalSizeKV int +} + +// SnapshotInfo is used for passing snapshot stat +// information between functions +type SnapshotInfo struct { + Meta MetadataInfo + StatsKV map[string]typeStats + TotalCountKV int + TotalSizeKV int +} + +type MetadataInfo struct { + ID string + Size int64 + Index uint64 + Term uint64 + Version raft.SnapshotVersion +} + +type typeStats struct { + Name string + Count int + Size int +} + +func (c *OperatorRaftSnapshotInspectCommand) Run(args []string) int { + flags := c.Flags() + + if err := flags.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Validate flags + if c.depth < 0 { + c.UI.Error("Depth must be equal to or greater than 0") + return 1 + } + + var file string + args = c.flags.Args() + + switch len(args) { + case 0: + c.UI.Error("Missing FILE argument") + return 1 + case 1: + file = args[0] + default: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + // Open the file. + f, err := os.Open(file) + if err != nil { + c.UI.Error(fmt.Sprintf("Error opening snapshot file: %s", err)) + return 1 + } + defer f.Close() + + // Extract metadata and snapshot info from snapshot file + var info *SnapshotInfo + var meta *raft.SnapshotMeta + info, meta, err = c.Read(hclog.New(nil), f) + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading snapshot: %s", err)) + return 1 + } + + if info == nil { + c.UI.Error(fmt.Sprintf("Error calculating snapshot info: %s", err)) + return 1 + } + + // Generate structs for the formatter with information we read in + metaformat := &MetadataInfo{ + ID: meta.ID, + Size: meta.Size, + Index: meta.Index, + Term: meta.Term, + Version: meta.Version, + } + + formattedStatsKV := generateKVStats(*info) + + data := &OutputFormat{ + Meta: metaformat, + StatsKV: formattedStatsKV, + TotalCountKV: info.TotalCountKV, + TotalSizeKV: info.TotalSizeKV, + } + + if Format(c.UI) != "table" { + return OutputData(c.UI, data) + } + + tableData, err := formatTable(data) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + c.UI.Output(tableData) + + return 0 +} + +func (c *OperatorRaftSnapshotInspectCommand) kvEnhance(val *pb.StorageEntry, info *SnapshotInfo, read int) { + if !c.details { + return + } + + if val.Key == "" { + return + } + + // check for whether a filter is specified. if it is, skip + // any keys that don't match. + if len(c.filter) > 0 && !strings.HasPrefix(val.Key, c.filter) { + return + } + + split := strings.Split(val.Key, "/") + + // handle the situation where the key is shorter than + // the specified depth. + actualDepth := c.depth + if c.depth == 0 || c.depth > len(split) { + actualDepth = len(split) + } + + prefix := strings.Join(split[0:actualDepth], "/") + kvs := info.StatsKV[prefix] + if kvs.Name == "" { + kvs.Name = prefix + } + + kvs.Count++ + kvs.Size += read + info.TotalCountKV++ + info.TotalSizeKV += read + info.StatsKV[prefix] = kvs +} + +// Read from snapshot's state.bin and update the SnapshotInfo struct +func (c *OperatorRaftSnapshotInspectCommand) parseState(r io.Reader) (SnapshotInfo, error) { + info := SnapshotInfo{ + StatsKV: make(map[string]typeStats), + } + + protoReader := protoio.NewDelimitedReader(r, math.MaxInt32) + + for { + s := new(pb.StorageEntry) + if err := protoReader.ReadMsg(s); err != nil { + if err == io.EOF { + break + } + return info, err + } + size := protoReader.GetLastReadSize() + c.kvEnhance(s, &info, size) + } + + return info, nil +} + +// Read contents of snapshot. Parse metadata and snapshot info +// Also, verify validity of snapshot +func (c *OperatorRaftSnapshotInspectCommand) Read(logger hclog.Logger, in io.Reader) (*SnapshotInfo, *raft.SnapshotMeta, error) { + // Wrap the reader in a gzip decompressor. + decomp, err := gzip.NewReader(in) + if err != nil { + return nil, nil, fmt.Errorf("failed to decompress snapshot: %v", err) + } + + defer func() { + if decomp == nil { + return + } + + if err := decomp.Close(); err != nil { + logger.Error("Failed to close snapshot decompressor", "error", err) + } + }() + + // Read the archive. + snapshotInfo, metadata, err := c.read(decomp) + if err != nil { + return nil, nil, fmt.Errorf("failed to read snapshot file: %v", err) + } + + if err := concludeGzipRead(decomp); err != nil { + return nil, nil, err + } + + if err := decomp.Close(); err != nil { + return nil, nil, err + } + decomp = nil + return snapshotInfo, metadata, nil +} + +func formatTable(info *OutputFormat) (string, error) { + var b bytes.Buffer + tw := tabwriter.NewWriter(&b, 8, 8, 6, ' ', 0) + + fmt.Fprintf(tw, " ID\t%s", info.Meta.ID) + fmt.Fprintf(tw, "\n Size\t%d", info.Meta.Size) + fmt.Fprintf(tw, "\n Index\t%d", info.Meta.Index) + fmt.Fprintf(tw, "\n Term\t%d", info.Meta.Term) + fmt.Fprintf(tw, "\n Version\t%d", info.Meta.Version) + fmt.Fprintf(tw, "\n") + + if info.StatsKV != nil { + fmt.Fprintf(tw, "\n") + fmt.Fprintln(tw, "\n Key Name\tCount\tSize") + fmt.Fprintf(tw, " %s\t%s\t%s", "----", "----", "----") + + for _, s := range info.StatsKV { + fmt.Fprintf(tw, "\n %s\t%d\t%s", s.Name, s.Count, ByteSize(uint64(s.Size))) + } + + fmt.Fprintf(tw, "\n %s\t%s", "----", "----") + fmt.Fprintf(tw, "\n Total Size\t\t%s", ByteSize(uint64(info.TotalSizeKV))) + } + + if err := tw.Flush(); err != nil { + return b.String(), err + } + + return b.String(), nil +} + +const ( + BYTE = 1 << (10 * iota) + KILOBYTE + MEGABYTE + GIGABYTE + TERABYTE +) + +func ByteSize(bytes uint64) string { + unit := "" + value := float64(bytes) + + switch { + case bytes >= TERABYTE: + unit = "TB" + value = value / TERABYTE + case bytes >= GIGABYTE: + unit = "GB" + value = value / GIGABYTE + case bytes >= MEGABYTE: + unit = "MB" + value = value / MEGABYTE + case bytes >= KILOBYTE: + unit = "KB" + value = value / KILOBYTE + case bytes >= BYTE: + unit = "B" + case bytes == 0: + return "0" + } + + result := strconv.FormatFloat(value, 'f', 1, 64) + result = strings.TrimSuffix(result, ".0") + return result + unit +} + +// sortTypeStats sorts the stat slice by count and then +// alphabetically in the case the counts are equal +func sortTypeStats(stats []typeStats) []typeStats { + // sort alphabetically if size is equal + sort.Slice(stats, func(i, j int) bool { + // Sort alphabetically if count is equal + if stats[i].Count == stats[j].Count { + return stats[i].Name < stats[j].Name + } + return stats[i].Count > stats[j].Count + }) + + return stats +} + +// generateKVStats reformats the KV stats to work with +// the output struct that's used to produce the printed +// output the user sees. +func generateKVStats(info SnapshotInfo) []typeStats { + kvLen := len(info.StatsKV) + if kvLen > 0 { + ks := make([]typeStats, 0, kvLen) + + for _, s := range info.StatsKV { + ks = append(ks, s) + } + + ks = sortTypeStats(ks) + + return ks + } + + return nil +} + +// hashList manages a list of filenames and their hashes. +type hashList struct { + hashes map[string]hash.Hash +} + +// newHashList returns a new hashList. +func newHashList() *hashList { + return &hashList{ + hashes: make(map[string]hash.Hash), + } +} + +// Add creates a new hash for the given file. +func (hl *hashList) Add(file string) hash.Hash { + if existing, ok := hl.hashes[file]; ok { + return existing + } + + h := sha256.New() + hl.hashes[file] = h + return h +} + +// Encode takes the current sum of all the hashes and saves the hash list as a +// SHA256SUMS-style text file. +func (hl *hashList) Encode(w io.Writer) error { + for file, h := range hl.hashes { + if _, err := fmt.Fprintf(w, "%x %s\n", h.Sum([]byte{}), file); err != nil { + return err + } + } + return nil +} + +// DecodeAndVerify reads a SHA256SUMS-style text file and checks the results +// against the current sums for all the hashes. +func (hl *hashList) DecodeAndVerify(r io.Reader) error { + // Read the file and make sure everything in there has a matching hash. + seen := make(map[string]struct{}) + s := bufio.NewScanner(r) + for s.Scan() { + sha := make([]byte, sha256.Size) + var file string + if _, err := fmt.Sscanf(s.Text(), "%x %s", &sha, &file); err != nil { + return err + } + + h, ok := hl.hashes[file] + if !ok { + return fmt.Errorf("list missing hash for %q", file) + } + if !bytes.Equal(sha, h.Sum([]byte{})) { + return fmt.Errorf("hash check failed for %q", file) + } + seen[file] = struct{}{} + } + if err := s.Err(); err != nil { + return err + } + + // Make sure everything we had a hash for was seen. + for file := range hl.hashes { + if _, ok := seen[file]; !ok { + return fmt.Errorf("file missing for %q", file) + } + } + + return nil +} + +// read takes a reader and extracts the snapshot metadata and snapshot +// info. It also checks the integrity of the snapshot data. +func (c *OperatorRaftSnapshotInspectCommand) read(in io.Reader) (*SnapshotInfo, *raft.SnapshotMeta, error) { + // Start a new tar reader. + archive := tar.NewReader(in) + + // Create a hash list that we will use to compare with the SHA256SUMS + // file in the archive. + hl := newHashList() + + // Populate the hashes for all the files we expect to see. The check at + // the end will make sure these are all present in the SHA256SUMS file + // and that the hashes match. + metaHash := hl.Add("meta.json") + snapHash := hl.Add("state.bin") + + // Look through the archive for the pieces we care about. + var shaBuffer bytes.Buffer + var snapshotInfo SnapshotInfo + var metadata raft.SnapshotMeta + for { + hdr, err := archive.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, nil, fmt.Errorf("failed reading snapshot: %v", err) + } + + switch hdr.Name { + case "meta.json": + // Previously we used json.Decode to decode the archive stream. There are + // edgecases in which it doesn't read all the bytes from the stream, even + // though the json object is still being parsed properly. Since we + // simultaneously feeded everything to metaHash, our hash ended up being + // different than what we calculated when creating the snapshot. Which in + // turn made the snapshot verification fail. By explicitly reading the + // whole thing first we ensure that we calculate the correct hash + // independent of how json.Decode works internally. + buf, err := io.ReadAll(io.TeeReader(archive, metaHash)) + if err != nil { + return nil, nil, fmt.Errorf("failed to read snapshot metadata: %v", err) + } + if err := json.Unmarshal(buf, &metadata); err != nil { + return nil, nil, fmt.Errorf("failed to decode snapshot metadata: %v", err) + } + case "state.bin": + // create reader that writes to snapHash what it reads from archive + wrappedReader := io.TeeReader(archive, snapHash) + var err error + snapshotInfo, err = c.parseState(wrappedReader) + if err != nil { + return nil, nil, fmt.Errorf("error parsing snapshot state: %v", err) + } + + case "SHA256SUMS": + if _, err := io.CopyN(&shaBuffer, archive, 10000); err != nil && err != io.EOF { + return nil, nil, fmt.Errorf("failed to read snapshot hashes: %v", err) + } + + case "SHA256SUMS.sealed": + // Add verification of sealed sum in future + continue + + default: + return nil, nil, fmt.Errorf("unexpected file %q in snapshot", hdr.Name) + } + } + + // Verify all the hashes. + if err := hl.DecodeAndVerify(&shaBuffer); err != nil { + return nil, nil, fmt.Errorf("failed checking integrity of snapshot: %v", err) + } + + return &snapshotInfo, &metadata, nil +} + +// concludeGzipRead should be invoked after you think you've consumed all of +// the data from the gzip stream. It will error if the stream was corrupt. +// +// The docs for gzip.Reader say: "Clients should treat data returned by Read as +// tentative until they receive the io.EOF marking the end of the data." +func concludeGzipRead(decomp *gzip.Reader) error { + extra, err := io.ReadAll(decomp) // ReadAll consumes the EOF + if err != nil { + return err + } + if len(extra) != 0 { + return fmt.Errorf("%d unread uncompressed bytes remain", len(extra)) + } + return nil +} diff --git a/command/operator_raft_snapshot_inspect_test.go b/command/operator_raft_snapshot_inspect_test.go new file mode 100644 index 000000000000..d70037695606 --- /dev/null +++ b/command/operator_raft_snapshot_inspect_test.go @@ -0,0 +1,141 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "fmt" + "os" + "strings" + "testing" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/physical/raft" + "github.com/hashicorp/vault/sdk/physical" +) + +func testOperatorRaftSnapshotInspectCommand(tb testing.TB) (*cli.MockUi, *OperatorRaftSnapshotInspectCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &OperatorRaftSnapshotInspectCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func createSnapshot(tb testing.TB) (*os.File, func(), error) { + // Create new raft backend + r, raftDir := raft.GetRaft(tb, true, false) + defer os.RemoveAll(raftDir) + + // Write some data + for i := 0; i < 100; i++ { + err := r.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + }) + if err != nil { + return nil, nil, fmt.Errorf("Error adding data to snapshot %s", err) + } + } + + // Create temporary file to save snapshot to + snap, err := os.CreateTemp("", "temp_snapshot.snap") + if err != nil { + return nil, nil, fmt.Errorf("Error creating temporary file %s", err) + } + + cleanup := func() { + err := os.RemoveAll(snap.Name()) + if err != nil { + tb.Errorf("Error deleting temporary snapshot %s", err) + } + } + + // Save snapshot + err = r.Snapshot(snap, nil) + if err != nil { + return nil, nil, fmt.Errorf("Error saving raft snapshot %s", err) + } + + return snap, cleanup, nil +} + +func TestOperatorRaftSnapshotInspectCommand_Run(t *testing.T) { + t.Parallel() + + file1, cleanup1, err := createSnapshot(t) + if err != nil { + t.Fatalf("Error creating snapshot %s", err) + } + + file2, cleanup2, err := createSnapshot(t) + if err != nil { + t.Fatalf("Error creating snapshot %s", err) + } + + cases := []struct { + name string + args []string + out string + code int + cleanup func() + }{ + { + "too_many_args", + []string{"test.snap", "test"}, + "Too many arguments", + 1, + nil, + }, + { + "default", + []string{file1.Name()}, + "ID bolt-snapshot", + 0, + cleanup1, + }, + { + "all_flags", + []string{"-details", "-depth", "10", "-filter", "key", file2.Name()}, + "Key Name", + 0, + cleanup2, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testOperatorRaftSnapshotInspectCommand(t) + + cmd.client = client + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + + if tc.cleanup != nil { + tc.cleanup() + } + }) + } + }) +} diff --git a/command/operator_raft_snapshot_restore.go b/command/operator_raft_snapshot_restore.go index 6067adca7904..516fba522fe8 100644 --- a/command/operator_raft_snapshot_restore.go +++ b/command/operator_raft_snapshot_restore.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,7 +8,7 @@ import ( "os" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/operator_raft_snapshot_save.go b/command/operator_raft_snapshot_save.go index 2abbb0ad2234..38580ed5e1f5 100644 --- a/command/operator_raft_snapshot_save.go +++ b/command/operator_raft_snapshot_save.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -9,7 +9,7 @@ import ( "os" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/operator_rekey.go b/command/operator_rekey.go index dde0e5800528..9b4841568281 100644 --- a/command/operator_rekey.go +++ b/command/operator_rekey.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -11,10 +11,10 @@ import ( "strings" "github.com/fatih/structs" + "github.com/hashicorp/cli" "github.com/hashicorp/go-secure-stdlib/password" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/pgpkeys" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/operator_rekey_test.go b/command/operator_rekey_test.go index 570cfe447e69..d8a4ee2537bc 100644 --- a/command/operator_rekey_test.go +++ b/command/operator_rekey_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !race @@ -12,10 +12,9 @@ import ( "strings" "testing" - "github.com/hashicorp/vault/sdk/helper/roottoken" - + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" + "github.com/hashicorp/vault/sdk/helper/roottoken" ) func testOperatorRekeyCommand(tb testing.TB) (*cli.MockUi, *OperatorRekeyCommand) { diff --git a/command/operator_seal.go b/command/operator_seal.go index f52665fd08b1..f390972f2045 100644 --- a/command/operator_seal.go +++ b/command/operator_seal.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/operator_seal_test.go b/command/operator_seal_test.go index 43e150f7e37f..6208d6396328 100644 --- a/command/operator_seal_test.go +++ b/command/operator_seal_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testOperatorSealCommand(tb testing.TB) (*cli.MockUi, *OperatorSealCommand) { diff --git a/command/operator_step_down.go b/command/operator_step_down.go index bfa2d893f18f..e8b93acf0759 100644 --- a/command/operator_step_down.go +++ b/command/operator_step_down.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/operator_step_down_test.go b/command/operator_step_down_test.go index fbe07794d297..8cb108be98c9 100644 --- a/command/operator_step_down_test.go +++ b/command/operator_step_down_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testOperatorStepDownCommand(tb testing.TB) (*cli.MockUi, *OperatorStepDownCommand) { diff --git a/command/operator_unseal.go b/command/operator_unseal.go index 32d9140900a5..a667f209dcba 100644 --- a/command/operator_unseal.go +++ b/command/operator_unseal.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -9,9 +9,9 @@ import ( "os" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/go-secure-stdlib/password" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/operator_unseal_test.go b/command/operator_unseal_test.go index cb4d19603ba4..42f603e4882c 100644 --- a/command/operator_unseal_test.go +++ b/command/operator_unseal_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -11,7 +11,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testOperatorUnsealCommand(tb testing.TB) (*cli.MockUi, *OperatorUnsealCommand) { diff --git a/command/operator_usage.go b/command/operator_usage.go index 8db538a298c5..e96b0ca33d92 100644 --- a/command/operator_usage.go +++ b/command/operator_usage.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -11,8 +11,8 @@ import ( "strings" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/ryanuber/columnize" ) @@ -56,7 +56,7 @@ func (c *OperatorUsageCommand) Flags() *FlagSets { f.TimeVar(&TimeVar{ Name: "start-time", - Usage: "Start of report period. Defaults to 'default_reporting_period' before end time.", + Usage: "Start of report period. Defaults to billing start time", Target: &c.flagStartTime, Completion: complete.PredictNothing, Default: time.Time{}, @@ -64,7 +64,7 @@ func (c *OperatorUsageCommand) Flags() *FlagSets { }) f.TimeVar(&TimeVar{ Name: "end-time", - Usage: "End of report period. Defaults to end of last month.", + Usage: "End of report period. Defaults to end of the current month.", Target: &c.flagEndTime, Completion: complete.PredictNothing, Default: time.Time{}, @@ -132,7 +132,7 @@ func (c *OperatorUsageCommand) Run(args []string) int { c.outputTimestamps(resp.Data) out := []string{ - "Namespace path | Distinct entities | Non-Entity tokens | Active clients", + "Namespace path | Entity Clients | Non-Entity clients | Secret syncs | ACME clients | Active clients", } out = append(out, c.namespacesOutput(resp.Data)...) @@ -142,6 +142,12 @@ func (c *OperatorUsageCommand) Run(args []string) int { colConfig.Empty = " " // Do not show n/a on intentional blank lines colConfig.Glue = " " c.UI.Output(tableOutput(out, colConfig)) + + // Also, output the warnings returned, if any: + for _, warning := range resp.Warnings { + c.UI.Warn(warning) + } + return 0 } @@ -196,8 +202,9 @@ type UsageResponse struct { entityCount int64 // As per 1.9, the tokenCount field will contain the distinct non-entity // token clients instead of each individual token. - tokenCount int64 - + tokenCount int64 + secretSyncs int64 + acmeCount int64 clientCount int64 } @@ -232,16 +239,22 @@ func (c *OperatorUsageCommand) parseNamespaceCount(rawVal interface{}) (UsageRes return ret, errors.New("missing counts") } - ret.entityCount, ok = jsonNumberOK(counts, "distinct_entities") + ret.entityCount, ok = jsonNumberOK(counts, "entity_clients") if !ok { - return ret, errors.New("missing distinct_entities") + return ret, errors.New("missing entity_clients") } - ret.tokenCount, ok = jsonNumberOK(counts, "non_entity_tokens") + ret.tokenCount, ok = jsonNumberOK(counts, "non_entity_clients") if !ok { - return ret, errors.New("missing non_entity_tokens") + return ret, errors.New("missing non_entity_clients") } + // don't error if the secret syncs key is missing + ret.secretSyncs, _ = jsonNumberOK(counts, "secret_syncs") + + // don't error if acme clients is missing + ret.acmeCount, _ = jsonNumberOK(counts, "acme_clients") + ret.clientCount, ok = jsonNumberOK(counts, "clients") if !ok { return ret, errors.New("missing clients") @@ -274,8 +287,8 @@ func (c *OperatorUsageCommand) namespacesOutput(data map[string]interface{}) []s sortOrder = "2" + val.namespacePath } - formattedLine := fmt.Sprintf("%s | %d | %d | %d", - val.namespacePath, val.entityCount, val.tokenCount, val.clientCount) + formattedLine := fmt.Sprintf("%s | %d | %d | %d | %d | %d", + val.namespacePath, val.entityCount, val.tokenCount, val.secretSyncs, val.acmeCount, val.clientCount) nsOut = append(nsOut, UsageCommandNamespace{ formattedLine: formattedLine, sortOrder: sortOrder, @@ -296,7 +309,7 @@ func (c *OperatorUsageCommand) namespacesOutput(data map[string]interface{}) []s func (c *OperatorUsageCommand) totalOutput(data map[string]interface{}) []string { // blank line separating it from namespaces - out := []string{" | | | "} + out := []string{" | | | | | "} total, ok := data["total"].(map[string]interface{}) if !ok { @@ -304,24 +317,30 @@ func (c *OperatorUsageCommand) totalOutput(data map[string]interface{}) []string return out } - entityCount, ok := jsonNumberOK(total, "distinct_entities") + entityCount, ok := jsonNumberOK(total, "entity_clients") if !ok { - c.UI.Error("missing distinct_entities in total") + c.UI.Error("missing entity_clients in total") return out } - tokenCount, ok := jsonNumberOK(total, "non_entity_tokens") + tokenCount, ok := jsonNumberOK(total, "non_entity_clients") if !ok { - c.UI.Error("missing non_entity_tokens in total") + c.UI.Error("missing non_entity_clients in total") return out } + // don't error if secret syncs key is missing + secretSyncs, _ := jsonNumberOK(total, "secret_syncs") + + // don't error if acme clients is missing + acmeCount, _ := jsonNumberOK(total, "acme_clients") + clientCount, ok := jsonNumberOK(total, "clients") if !ok { c.UI.Error("missing clients in total") return out } - out = append(out, fmt.Sprintf("Total | %d | %d | %d", - entityCount, tokenCount, clientCount)) + out = append(out, fmt.Sprintf("Total | %d | %d | %d | %d | %d", + entityCount, tokenCount, secretSyncs, acmeCount, clientCount)) return out } diff --git a/command/operator_utilization.go b/command/operator_utilization.go new file mode 100644 index 000000000000..71291cca73cd --- /dev/null +++ b/command/operator_utilization.go @@ -0,0 +1,213 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "encoding/base64" + "errors" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/mitchellh/mapstructure" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*OperatorUtilizationCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorUtilizationCommand)(nil) +) + +type OperatorUtilizationCommand struct { + *BaseCommand + + flagMessage string + flagTodayOnly BoolPtr + flagOutput string +} + +func (c *OperatorUtilizationCommand) Synopsis() string { + return "Generates license utilization reporting bundle" +} + +func (c *OperatorUtilizationCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *OperatorUtilizationCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorUtilizationCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "message", + Target: &c.flagMessage, + Completion: complete.PredictAnything, + Usage: "Provide context about the conditions under which the report was generated and submitted. This message is not included in the license utilization bundle but will be included in the vault server logs.", + }) + + f.BoolPtrVar(&BoolPtrVar{ + Name: "today-only", + Target: &c.flagTodayOnly, + Usage: "To include only today’s snapshot, no historical snapshots. If no snapshots were persisted in the last 24 hrs, it takes a snapshot and exports it to a bundle.", + }) + + f.StringVar(&StringVar{ + Name: "output", + Target: &c.flagOutput, + Completion: complete.PredictAnything, + Usage: "Specifies the output path for the bundle. Defaults to a time-based generated file name.", + }) + + return set +} + +func (c *OperatorUtilizationCommand) Help() string { + helpText := ` +Usage: vault operator utilization [options] + +Produces a bundle of snapshots that contains license utilization data. If no snapshots were persisted in the last 24 hrs, it takes a snapshot and includes it in the bundle to prevent stale data. + + To create a license utilization bundle that includes all persisted historical snapshots and has the default bundle name: + + $ vault operator utilization + + To create a license utilization bundle with a message about the bundle (Note: this message is not included in the bundle but only included in server logs): + + $ vault operator utilization -message="Change Control 654987" + + To create a license utilization bundle with only today's snapshot: + + $ vault operator utilization -today-only + + To create a license utilization bundle with a specific name: + + $ vault operator utilization -output="/utilization/reports/latest.json" + +` + c.Flags().Help() + + return helpText +} + +func (c *OperatorUtilizationCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + parsedArgs := f.Args() + if len(parsedArgs) > 0 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(parsedArgs))) + return 1 + } + + outputBundleFile, err := getOutputFileName(time.Now().UTC(), c.flagOutput) + if err != nil { + c.UI.Error(fmt.Sprintf("Error during validation: %s", err)) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Capture license utilization reporting data + bundleDataBytes, err := c.getManualReportingCensusData(client) + if err != nil { + c.UI.Error(fmt.Sprintf("Error capturing license utilization reporting data: %s", err)) + return 1 + } + + err = os.WriteFile(outputBundleFile, bundleDataBytes, 0o400) + if err != nil { + c.UI.Error(fmt.Sprintf("Error writing license utilization reporting data to bundle %q: %s", outputBundleFile, err)) + return 1 + } + + c.UI.Info(fmt.Sprintf("Success! License utilization reporting bundle written to: %s", outputBundleFile)) + return 0 +} + +// getOutputFileName returns the file name of the license utilization reporting bundle ending with .json +// If filename is a path with non-existing parent directory, it creates a new directory to which the file with returned filename is added +func getOutputFileName(inputTime time.Time, flagOutput string) (string, error) { + formattedTime := inputTime.Format(fileFriendlyTimeFormat) + switch len(flagOutput) { + case 0: + flagOutput = fmt.Sprintf("vault-utilization-%s.json", formattedTime) + default: + flagOutput = filepath.Clean(flagOutput) + ext := filepath.Ext(flagOutput) + switch ext { + case "": // it's a directory + flagOutput = filepath.Join(flagOutput, fmt.Sprintf("vault-utilization-%s.json", formattedTime)) + case ".json": + default: + return "", fmt.Errorf("invalid file extension %s, must be .json", ext) + } + } + + // Stat the file to ensure we don't override any existing data. + _, err := os.Stat(flagOutput) + switch { + case os.IsNotExist(err): + case err != nil: + return "", fmt.Errorf("unable to stat file: %s", err) + default: + return "", fmt.Errorf("output file already exists: %s", flagOutput) + } + + // output file does not exist, create the parent directory if it doesn't exist + _, err = os.Stat(filepath.Dir(flagOutput)) + switch { + case os.IsNotExist(err): + err := os.MkdirAll(filepath.Dir(flagOutput), 0o700) + if err != nil { + return "", fmt.Errorf("unable to create output directory: %s", err) + } + case err != nil: + return "", fmt.Errorf("unable to stat directory: %s", err) + } + return flagOutput, nil +} + +func (c *OperatorUtilizationCommand) getManualReportingCensusData(client *api.Client) ([]byte, error) { + data := make(map[string]interface{}) + if c.flagTodayOnly.IsSet() { + data["today_only"] = c.flagTodayOnly.Get() + } + if c.flagMessage != "" { + data["message"] = c.flagMessage + } + secret, err := client.Logical().Write("sys/utilization", data) + if err != nil { + return nil, fmt.Errorf("error getting license utilization reporting data: %w", err) + } + if secret == nil { + return nil, errors.New("no license utilization reporting data available") + } + + var bundleBase64Str string + err = mapstructure.Decode(secret.Data["utilization_bundle"], &bundleBase64Str) + if err != nil { + return nil, err + } + + bundleByteArray, err := base64.StdEncoding.DecodeString(bundleBase64Str) + if err != nil { + return nil, err + } + return bundleByteArray, nil +} diff --git a/command/patch.go b/command/patch.go index 9a4cd5886287..f7b006f0f125 100644 --- a/command/patch.go +++ b/command/patch.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -10,7 +10,7 @@ import ( "os" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/patch_test.go b/command/patch_test.go index 410e64465618..357257937309 100644 --- a/command/patch_test.go +++ b/command/patch_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,8 +8,8 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testPatchCommand(tb testing.TB) (*cli.MockUi, *PatchCommand) { diff --git a/command/path_help.go b/command/path_help.go index 41f3bcee66a6..335de684008f 100644 --- a/command/path_help.go +++ b/command/path_help.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,7 +8,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/path_help_test.go b/command/path_help_test.go index eaf4fe8a9f23..33c06b4fe553 100644 --- a/command/path_help_test.go +++ b/command/path_help_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testPathHelpCommand(tb testing.TB) (*cli.MockUi, *PathHelpCommand) { diff --git a/command/pgp_test.go b/command/pgp_test.go index f37e488ed686..df14d895f7ef 100644 --- a/command/pgp_test.go +++ b/command/pgp_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -13,11 +13,10 @@ import ( "sort" "testing" - "github.com/hashicorp/vault/helper/pgpkeys" - "github.com/hashicorp/vault/vault" - "github.com/ProtonMail/go-crypto/openpgp" "github.com/ProtonMail/go-crypto/openpgp/packet" + "github.com/hashicorp/vault/helper/pgpkeys" + "github.com/hashicorp/vault/vault" ) func getPubKeyFiles(t *testing.T) (string, []string, error) { diff --git a/command/pki.go b/command/pki.go index 89770fa44851..8b90a6aa7b38 100644 --- a/command/pki.go +++ b/command/pki.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*PKICommand)(nil) diff --git a/command/pki_health_check.go b/command/pki_health_check.go index c44245ccda81..d70e3ecb0e62 100644 --- a/command/pki_health_check.go +++ b/command/pki_health_check.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -9,10 +9,9 @@ import ( "os" "strings" - "github.com/hashicorp/vault/command/healthcheck" - "github.com/ghodss/yaml" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/command/healthcheck" "github.com/posener/complete" "github.com/ryanuber/columnize" ) @@ -220,6 +219,8 @@ func (c *PKIHealthCheckCommand) Run(args []string) int { executor.AddCheck(healthcheck.NewEnableAutoTidyCheck()) executor.AddCheck(healthcheck.NewTidyLastRunCheck()) executor.AddCheck(healthcheck.NewTooManyCertsCheck()) + executor.AddCheck(healthcheck.NewEnableAcmeIssuance()) + executor.AddCheck(healthcheck.NewAllowAcmeHeaders()) if c.flagDefaultDisabled { executor.DefaultEnabled = false } diff --git a/command/pki_health_check_test.go b/command/pki_health_check_test.go index d44f789f5150..d63574a270a9 100644 --- a/command/pki_health_check_test.go +++ b/command/pki_health_check_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,14 +7,14 @@ import ( "bytes" "encoding/json" "fmt" + "net/url" "strings" "testing" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/command/healthcheck" - - "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) @@ -30,7 +30,7 @@ func TestPKIHC_AllGood(t *testing.T) { AuditNonHMACRequestKeys: healthcheck.VisibleReqParams, AuditNonHMACResponseKeys: healthcheck.VisibleRespParams, PassthroughRequestHeaders: []string{"If-Modified-Since"}, - AllowedResponseHeaders: []string{"Last-Modified"}, + AllowedResponseHeaders: []string{"Last-Modified", "Replay-Nonce", "Link", "Location"}, MaxLeaseTTL: "36500d", }, }); err != nil { @@ -69,6 +69,21 @@ func TestPKIHC_AllGood(t *testing.T) { t.Fatalf("failed to run tidy: %v", err) } + path, err := url.Parse(client.Address()) + require.NoError(t, err, "failed parsing client address") + + if _, err := client.Logical().Write("pki/config/cluster", map[string]interface{}{ + "path": path.JoinPath("/v1/", "pki/").String(), + }); err != nil { + t.Fatalf("failed to update local cluster: %v", err) + } + + if _, err := client.Logical().Write("pki/config/acme", map[string]interface{}{ + "enabled": "true", + }); err != nil { + t.Fatalf("failed to update acme config: %v", err) + } + _, _, results := execPKIHC(t, client, true) validateExpectedPKIHC(t, expectedAllGood, results) @@ -345,6 +360,11 @@ var expectedAllGood = map[string][]map[string]interface{}{ "status": "ok", }, }, + "allow_acme_headers": { + { + "status": "ok", + }, + }, "allow_if_modified_since": { { "status": "ok", @@ -355,6 +375,11 @@ var expectedAllGood = map[string][]map[string]interface{}{ "status": "ok", }, }, + "enable_acme_issuance": { + { + "status": "ok", + }, + }, "enable_auto_tidy": { { "status": "ok", @@ -406,6 +431,11 @@ var expectedAllBad = map[string][]map[string]interface{}{ "status": "critical", }, }, + "allow_acme_headers": { + { + "status": "not_applicable", + }, + }, "allow_if_modified_since": { { "status": "informational", @@ -503,6 +533,11 @@ var expectedAllBad = map[string][]map[string]interface{}{ "status": "informational", }, }, + "enable_acme_issuance": { + { + "status": "not_applicable", + }, + }, "enable_auto_tidy": { { "status": "informational", @@ -554,8 +589,18 @@ var expectedEmptyWithIssuer = map[string][]map[string]interface{}{ "status": "ok", }, }, + "allow_acme_headers": { + { + "status": "not_applicable", + }, + }, "allow_if_modified_since": nil, "audit_visibility": nil, + "enable_acme_issuance": { + { + "status": "not_applicable", + }, + }, "enable_auto_tidy": { { "status": "informational", @@ -598,8 +643,18 @@ var expectedNoPerm = map[string][]map[string]interface{}{ "status": "critical", }, }, + "allow_acme_headers": { + { + "status": "insufficient_permissions", + }, + }, "allow_if_modified_since": nil, "audit_visibility": nil, + "enable_acme_issuance": { + { + "status": "insufficient_permissions", + }, + }, "enable_auto_tidy": { { "status": "insufficient_permissions", diff --git a/command/pki_issue_intermediate.go b/command/pki_issue_intermediate.go index fe16fdaaca91..7545b22cc7dc 100644 --- a/command/pki_issue_intermediate.go +++ b/command/pki_issue_intermediate.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command diff --git a/command/pki_issue_intermediate_test.go b/command/pki_issue_intermediate_test.go index 58f9e6271105..cb66d45e7c5c 100644 --- a/command/pki_issue_intermediate_test.go +++ b/command/pki_issue_intermediate_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command diff --git a/command/pki_list_intermediate.go b/command/pki_list_intermediate.go index c62c58080691..7314e671bceb 100644 --- a/command/pki_list_intermediate.go +++ b/command/pki_list_intermediate.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -9,9 +9,8 @@ import ( "strconv" "strings" - "github.com/hashicorp/vault/api" - "github.com/ghodss/yaml" + "github.com/hashicorp/vault/api" "github.com/ryanuber/columnize" ) diff --git a/command/pki_list_intermediate_test.go b/command/pki_list_intermediate_test.go index d494c193387a..5abfabd55994 100644 --- a/command/pki_list_intermediate_test.go +++ b/command/pki_list_intermediate_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command diff --git a/command/pki_reissue_intermediate.go b/command/pki_reissue_intermediate.go index 852c0c0f1d24..7501d4c8622a 100644 --- a/command/pki_reissue_intermediate.go +++ b/command/pki_reissue_intermediate.go @@ -1,21 +1,17 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" "crypto/x509" "encoding/hex" "fmt" "io" - "net" - "net/url" "os" "strings" + "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/posener/complete" ) @@ -117,6 +113,10 @@ func (c *PKIReIssueCACommand) Run(args []string) int { } templateData, err := parseTemplateCertificate(*certificate, useExistingKey, keyRef) + if err != nil { + c.UI.Error(fmt.Sprintf("Error fetching parsing template certificate: %v", err)) + return 1 + } data := updateTemplateWithData(templateData, userData) return pkiIssue(c.BaseCommand, parentIssuer, intermediateMount, c.flagNewIssuerName, c.flagKeyStorageSource, data) @@ -150,13 +150,13 @@ func parseTemplateCertificate(certificate x509.Certificate, useExistingKey bool, // Generate Certificate Signing Parameters templateData = map[string]interface{}{ "common_name": certificate.Subject.CommonName, - "alt_names": makeAltNamesCommaSeparatedString(certificate.DNSNames, certificate.EmailAddresses), - "ip_sans": makeIpAddressCommaSeparatedString(certificate.IPAddresses), - "uri_sans": makeUriCommaSeparatedString(certificate.URIs), + "alt_names": certutil.MakeAltNamesCommaSeparatedString(certificate.DNSNames, certificate.EmailAddresses), + "ip_sans": certutil.MakeIpAddressCommaSeparatedString(certificate.IPAddresses), + "uri_sans": certutil.MakeUriCommaSeparatedString(certificate.URIs), // other_sans (string: "") - Specifies custom OID/UTF8-string SANs. These must match values specified on the role in allowed_other_sans (see role creation for allowed_other_sans globbing rules). The format is the same as OpenSSL: ;: where the only current valid type is UTF8. This can be a comma-delimited list or a JSON string slice. // Punting on Other_SANs, shouldn't really be on CAs - "signature_bits": findSignatureBits(certificate.SignatureAlgorithm), - "exclude_cn_from_sans": determineExcludeCnFromSans(certificate), + "signature_bits": certutil.FindSignatureBits(certificate.SignatureAlgorithm), + "exclude_cn_from_sans": certutil.DetermineExcludeCnFromCertSans(certificate), "ou": certificate.Subject.OrganizationalUnit, "organization": certificate.Subject.Organization, "country": certificate.Subject.Country, @@ -168,7 +168,7 @@ func parseTemplateCertificate(certificate x509.Certificate, useExistingKey bool, "ttl": (certificate.NotAfter.Sub(certificate.NotBefore)).String(), "max_path_length": certificate.MaxPathLen, "permitted_dns_domains": strings.Join(certificate.PermittedDNSDomains, ","), - "use_pss": isPSS(certificate.SignatureAlgorithm), + "use_pss": certutil.IsPSS(certificate.SignatureAlgorithm), } if useExistingKey { @@ -178,116 +178,9 @@ func parseTemplateCertificate(certificate x509.Certificate, useExistingKey bool, } templateData["key_ref"] = keyRef } else { - templateData["key_type"] = getKeyType(certificate.PublicKeyAlgorithm.String()) - templateData["key_bits"] = findBitLength(certificate.PublicKey) + templateData["key_type"] = certutil.GetKeyType(certificate.PublicKeyAlgorithm.String()) + templateData["key_bits"] = certutil.FindBitLength(certificate.PublicKey) } return templateData, nil } - -func isPSS(algorithm x509.SignatureAlgorithm) bool { - switch algorithm { - case x509.SHA384WithRSAPSS, x509.SHA512WithRSAPSS, x509.SHA256WithRSAPSS: - return true - default: - return false - } -} - -func makeAltNamesCommaSeparatedString(names []string, emails []string) string { - return strings.Join(names, ",") + "," + strings.Join(emails, ",") -} - -func makeUriCommaSeparatedString(uris []*url.URL) string { - stringAddresses := make([]string, len(uris)) - for i, uri := range uris { - stringAddresses[i] = uri.String() - } - return strings.Join(stringAddresses, ",") -} - -func makeIpAddressCommaSeparatedString(addresses []net.IP) string { - stringAddresses := make([]string, len(addresses)) - for i, address := range addresses { - stringAddresses[i] = address.String() - } - return strings.Join(stringAddresses, ",") -} - -func determineExcludeCnFromSans(certificate x509.Certificate) bool { - cn := certificate.Subject.CommonName - if cn == "" { - return false - } - - emails := certificate.EmailAddresses - for _, email := range emails { - if email == cn { - return false - } - } - - dnses := certificate.DNSNames - for _, dns := range dnses { - if dns == cn { - return false - } - } - - return true -} - -func findBitLength(publicKey any) int { - if publicKey == nil { - return 0 - } - switch pub := publicKey.(type) { - case *rsa.PublicKey: - return pub.N.BitLen() - case *ecdsa.PublicKey: - switch pub.Curve { - case elliptic.P224(): - return 224 - case elliptic.P256(): - return 256 - case elliptic.P384(): - return 384 - case elliptic.P521(): - return 521 - default: - return 0 - } - default: - return 0 - } -} - -func findSignatureBits(algo x509.SignatureAlgorithm) int { - switch algo { - case x509.MD2WithRSA, x509.MD5WithRSA, x509.SHA1WithRSA, x509.DSAWithSHA1, x509.ECDSAWithSHA1: - return -1 - case x509.SHA256WithRSA, x509.DSAWithSHA256, x509.ECDSAWithSHA256, x509.SHA256WithRSAPSS: - return 256 - case x509.SHA384WithRSA, x509.ECDSAWithSHA384, x509.SHA384WithRSAPSS: - return 384 - case x509.SHA512WithRSA, x509.SHA512WithRSAPSS, x509.ECDSAWithSHA512: - return 512 - case x509.PureEd25519: - return 0 - default: - return -1 - } -} - -func getKeyType(goKeyType string) string { - switch goKeyType { - case "RSA": - return "rsa" - case "ECDSA": - return "ec" - case "Ed25519": - return "ed25519" - default: - return "" - } -} diff --git a/command/pki_reissue_intermediate_test.go b/command/pki_reissue_intermediate_test.go index e485f04d322b..45657fe11990 100644 --- a/command/pki_reissue_intermediate_test.go +++ b/command/pki_reissue_intermediate_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command diff --git a/command/pki_verify_sign.go b/command/pki_verify_sign.go index b5a864f12c9f..f6d8dc93cd35 100644 --- a/command/pki_verify_sign.go +++ b/command/pki_verify_sign.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -11,10 +11,9 @@ import ( "strconv" "strings" - "github.com/hashicorp/vault/command/healthcheck" - "github.com/ghodss/yaml" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/healthcheck" "github.com/ryanuber/columnize" ) diff --git a/command/pki_verify_sign_test.go b/command/pki_verify_sign_test.go index 3f8986a5b6b1..4001aadbc92d 100644 --- a/command/pki_verify_sign_test.go +++ b/command/pki_verify_sign_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command diff --git a/command/plugin.go b/command/plugin.go index ca55a4bf7317..862b55bb046b 100644 --- a/command/plugin.go +++ b/command/plugin.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*PluginCommand)(nil) diff --git a/command/plugin_deregister.go b/command/plugin_deregister.go index 86b329f1063d..1f1e4360acd5 100644 --- a/command/plugin_deregister.go +++ b/command/plugin_deregister.go @@ -1,15 +1,17 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( + "context" "fmt" + "net/http" "strings" + "github.com/hashicorp/cli" semver "github.com/hashicorp/go-version" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -83,18 +85,16 @@ func (c *PluginDeregisterCommand) Run(args []string) int { var pluginNameRaw, pluginTypeRaw string args = f.Args() - switch len(args) { - case 0: - c.UI.Error("Not enough arguments (expected 1, or 2, got 0)") + positionalArgsCount := len(args) + switch positionalArgsCount { + case 0, 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 2, got %d)", positionalArgsCount)) return 1 - case 1: - pluginTypeRaw = "unknown" - pluginNameRaw = args[0] case 2: pluginTypeRaw = args[0] pluginNameRaw = args[1] default: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, or 2, got %d)", len(args))) + c.UI.Error(fmt.Sprintf("Too many arguments (expected 2, got %d)", positionalArgsCount)) return 1 } @@ -118,7 +118,33 @@ func (c *PluginDeregisterCommand) Run(args []string) int { } } - if err := client.Sys().DeregisterPlugin(&api.DeregisterPluginInput{ + // The deregister endpoint returns 200 if the plugin doesn't exist, so first + // try fetching the plugin to help improve info printed to the user. + // 404 => Return early with a descriptive message. + // Other error => Continue attempting to deregister the plugin anyway. + // Plugin exists but is builtin => Error early. + // Otherwise => If deregister succeeds, we can report that the plugin really + // was deregistered (and not just already absent). + var pluginExists bool + if info, err := client.Sys().GetPluginWithContext(context.Background(), &api.GetPluginInput{ + Name: pluginName, + Type: pluginType, + Version: c.flagPluginVersion, + }); err != nil { + if respErr, ok := err.(*api.ResponseError); ok && respErr.StatusCode == http.StatusNotFound { + c.UI.Output(fmt.Sprintf("Plugin %q (type: %q, version %q) does not exist in the catalog", pluginName, pluginType, c.flagPluginVersion)) + return 0 + } + // Best-effort check, continue trying to deregister. + } else if info != nil { + if info.Builtin { + c.UI.Error(fmt.Sprintf("Plugin %q (type: %q) is a builtin plugin and cannot be deregistered", pluginName, pluginType)) + return 2 + } + pluginExists = true + } + + if err := client.Sys().DeregisterPluginWithContext(context.Background(), &api.DeregisterPluginInput{ Name: pluginName, Type: pluginType, Version: c.flagPluginVersion, @@ -127,6 +153,10 @@ func (c *PluginDeregisterCommand) Run(args []string) int { return 2 } - c.UI.Output(fmt.Sprintf("Success! Deregistered plugin (if it was registered): %s", pluginName)) + if pluginExists { + c.UI.Output(fmt.Sprintf("Success! Deregistered %s plugin: %s", pluginType, pluginName)) + } else { + c.UI.Output(fmt.Sprintf("Success! Deregistered %s plugin (if it was registered): %s", pluginType, pluginName)) + } return 0 } diff --git a/command/plugin_deregister_test.go b/command/plugin_deregister_test.go index b05644e46d4f..46e52df7979a 100644 --- a/command/plugin_deregister_test.go +++ b/command/plugin_deregister_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,10 +7,10 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/mitchellh/cli" ) func testPluginDeregisterCommand(tb testing.TB) (*cli.MockUi, *PluginDeregisterCommand) { @@ -35,7 +35,7 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { }{ { "not_enough_args", - nil, + []string{"foo"}, "Not enough arguments", 1, }, @@ -80,8 +80,7 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { t.Run("integration", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() @@ -109,7 +108,7 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { t.Errorf("expected %d to be %d", code, exp) } - expected := "Success! Deregistered plugin (if it was registered): " + expected := "Success! Deregistered auth plugin: " combined := ui.OutputWriter.String() + ui.ErrorWriter.String() if !strings.Contains(combined, expected) { t.Errorf("expected %q to contain %q", combined, expected) @@ -138,8 +137,7 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { t.Run("integration with version", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() @@ -159,7 +157,7 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { t.Errorf("expected %d to be %d", code, exp) } - expected := "Success! Deregistered plugin (if it was registered): " + expected := "Success! Deregistered auth plugin: " combined := ui.OutputWriter.String() + ui.ErrorWriter.String() if !strings.Contains(combined, expected) { t.Errorf("expected %q to contain %q", combined, expected) @@ -186,8 +184,7 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { t.Run("integration with missing version", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() @@ -206,7 +203,7 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { t.Errorf("expected %d to be %d", code, exp) } - expected := "Success! Deregistered plugin (if it was registered): " + expected := "does not exist in the catalog" combined := ui.OutputWriter.String() + ui.ErrorWriter.String() if !strings.Contains(combined, expected) { t.Errorf("expected %q to contain %q", combined, expected) @@ -230,6 +227,28 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { } }) + t.Run("deregister builtin", func(t *testing.T) { + t.Parallel() + + pluginDir := corehelpers.MakeTestPluginDir(t) + + client, _, closer := testVaultServerPluginDir(t, pluginDir) + defer closer() + + ui, cmd := testPluginDeregisterCommand(t) + cmd.client = client + + expected := "is a builtin plugin" + if code := cmd.Run([]string{ + consts.PluginTypeCredential.String(), + "github", + }); code != 2 { + t.Errorf("expected %d to be %d", code, 2) + } else if !strings.Contains(ui.ErrorWriter.String(), expected) { + t.Errorf("expected %q to contain %q", ui.ErrorWriter.String(), expected) + } + }) + t.Run("communication_failure", func(t *testing.T) { t.Parallel() diff --git a/command/plugin_info.go b/command/plugin_info.go index 1fa9555ba9c2..e47a23c66519 100644 --- a/command/plugin_info.go +++ b/command/plugin_info.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,8 @@ import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -77,23 +77,19 @@ func (c *PluginInfoCommand) Run(args []string) int { var pluginNameRaw, pluginTypeRaw string args = f.Args() + positionalArgsCount := len(args) switch { - case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1 or 2, got %d)", len(args))) + case positionalArgsCount < 2: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 2, got %d)", positionalArgsCount)) return 1 - case len(args) > 2: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1 or 2, got %d)", len(args))) + case positionalArgsCount > 2: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 2, got %d)", positionalArgsCount)) return 1 - - // These cases should come after invalid cases have been checked - case len(args) == 1: - pluginTypeRaw = "unknown" - pluginNameRaw = args[0] - case len(args) == 2: - pluginTypeRaw = args[0] - pluginNameRaw = args[1] } + pluginTypeRaw = args[0] + pluginNameRaw = args[1] + client, err := c.Client() if err != nil { c.UI.Error(err.Error()) @@ -126,6 +122,8 @@ func (c *PluginInfoCommand) Run(args []string) int { "args": resp.Args, "builtin": resp.Builtin, "command": resp.Command, + "oci_image": resp.OCIImage, + "runtime": resp.Runtime, "name": resp.Name, "sha256": resp.SHA256, "deprecation_status": resp.DeprecationStatus, diff --git a/command/plugin_info_test.go b/command/plugin_info_test.go index 921014cec538..58525312d71b 100644 --- a/command/plugin_info_test.go +++ b/command/plugin_info_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,11 +7,11 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/helper/versions" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/mitchellh/cli" ) func testPluginInfoCommand(tb testing.TB) (*cli.MockUi, *PluginInfoCommand) { @@ -34,6 +34,12 @@ func TestPluginInfoCommand_Run(t *testing.T) { out string code int }{ + { + "not_enough_args", + []string{"foo"}, + "Not enough arguments", + 1, + }, { "too_many_args", []string{"foo", "bar", "fizz"}, @@ -79,8 +85,7 @@ func TestPluginInfoCommand_Run(t *testing.T) { t.Run("default", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() @@ -110,8 +115,7 @@ func TestPluginInfoCommand_Run(t *testing.T) { t.Run("version flag", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() @@ -156,8 +160,7 @@ func TestPluginInfoCommand_Run(t *testing.T) { t.Run("field", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() diff --git a/command/plugin_list.go b/command/plugin_list.go index f1b0e5ebfa04..28714adf4f5b 100644 --- a/command/plugin_list.go +++ b/command/plugin_list.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,8 @@ import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -160,9 +160,9 @@ func (c *PluginListCommand) simpleResponse(plugins *api.ListPluginsResponse, plu } func (c *PluginListCommand) detailedResponse(plugins *api.ListPluginsResponse) []string { - out := []string{"Name | Type | Version | Deprecation Status"} + out := []string{"Name | Type | Version | Container | Deprecation Status"} for _, plugin := range plugins.Details { - out = append(out, fmt.Sprintf("%s | %s | %s | %s", plugin.Name, plugin.Type, plugin.Version, plugin.DeprecationStatus)) + out = append(out, fmt.Sprintf("%s | %s | %s | %v | %s", plugin.Name, plugin.Type, plugin.Version, plugin.OCIImage != "", plugin.DeprecationStatus)) } return out diff --git a/command/plugin_list_test.go b/command/plugin_list_test.go index edae76558f37..4ad37868b2c9 100644 --- a/command/plugin_list_test.go +++ b/command/plugin_list_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,7 +8,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testPluginListCommand(tb testing.TB) (*cli.MockUi, *PluginListCommand) { diff --git a/command/plugin_register.go b/command/plugin_register.go index e9d2e5b7c677..d124b38b8917 100644 --- a/command/plugin_register.go +++ b/command/plugin_register.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,8 @@ import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -20,10 +20,13 @@ var ( type PluginRegisterCommand struct { *BaseCommand - flagArgs []string - flagCommand string - flagSHA256 string - flagVersion string + flagArgs []string + flagCommand string + flagSHA256 string + flagVersion string + flagOCIImage string + flagRuntime string + flagEnv []string } func (c *PluginRegisterCommand) Synopsis() string { @@ -64,8 +67,8 @@ func (c *PluginRegisterCommand) Flags() *FlagSets { Name: "args", Target: &c.flagArgs, Completion: complete.PredictAnything, - Usage: "Arguments to pass to the plugin when starting. Separate " + - "multiple arguments with a comma.", + Usage: "Argument to pass to the plugin when starting. This " + + "flag can be specified multiple times to specify multiple args.", }) f.StringVar(&StringVar{ @@ -73,21 +76,44 @@ func (c *PluginRegisterCommand) Flags() *FlagSets { Target: &c.flagCommand, Completion: complete.PredictAnything, Usage: "Command to spawn the plugin. This defaults to the name of the " + - "plugin if unspecified.", + "plugin if both oci_image and command are unspecified.", }) f.StringVar(&StringVar{ Name: "sha256", Target: &c.flagSHA256, Completion: complete.PredictAnything, - Usage: "SHA256 of the plugin binary. This is required for all plugins.", + Usage: "SHA256 of the plugin binary or the oci_image provided. This is required for all plugins.", }) f.StringVar(&StringVar{ Name: "version", Target: &c.flagVersion, Completion: complete.PredictAnything, - Usage: "Semantic version of the plugin. Optional.", + Usage: "Semantic version of the plugin. Used as the tag when specifying oci_image, but with any leading 'v' trimmed. Optional.", + }) + + f.StringVar(&StringVar{ + Name: "oci_image", + Target: &c.flagOCIImage, + Completion: complete.PredictAnything, + Usage: "OCI image to run. If specified, setting command, args, and env will update the " + + "container's entrypoint, args, and environment variables (append-only) respectively.", + }) + + f.StringVar(&StringVar{ + Name: "runtime", + Target: &c.flagRuntime, + Completion: complete.PredictAnything, + Usage: "Vault plugin runtime to use if oci_image is specified.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: "env", + Target: &c.flagEnv, + Completion: complete.PredictAnything, + Usage: "Environment variables to set for the plugin when starting. This " + + "flag can be specified multiple times to specify multiple environment variables.", }) return set @@ -145,17 +171,20 @@ func (c *PluginRegisterCommand) Run(args []string) int { pluginName := strings.TrimSpace(pluginNameRaw) command := c.flagCommand - if command == "" { + if command == "" && c.flagOCIImage == "" { command = pluginName } if err := client.Sys().RegisterPlugin(&api.RegisterPluginInput{ - Name: pluginName, - Type: pluginType, - Args: c.flagArgs, - Command: command, - SHA256: c.flagSHA256, - Version: c.flagVersion, + Name: pluginName, + Type: pluginType, + Args: c.flagArgs, + Command: command, + SHA256: c.flagSHA256, + Version: c.flagVersion, + OCIImage: c.flagOCIImage, + Runtime: c.flagRuntime, + Env: c.flagEnv, }); err != nil { c.UI.Error(fmt.Sprintf("Error registering plugin %s: %s", pluginName, err)) return 2 diff --git a/command/plugin_register_test.go b/command/plugin_register_test.go index eccd5c1f0014..8d04b7733e48 100644 --- a/command/plugin_register_test.go +++ b/command/plugin_register_test.go @@ -1,18 +1,20 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( + "encoding/json" + "fmt" "reflect" "sort" "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/mitchellh/cli" ) func testPluginRegisterCommand(tb testing.TB) (*cli.MockUi, *PluginRegisterCommand) { @@ -83,8 +85,7 @@ func TestPluginRegisterCommand_Run(t *testing.T) { t.Run("integration", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() @@ -132,8 +133,7 @@ func TestPluginRegisterCommand_Run(t *testing.T) { t.Run("integration with version", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() @@ -229,3 +229,107 @@ func TestPluginRegisterCommand_Run(t *testing.T) { assertNoTabs(t, cmd) }) } + +// TestFlagParsing ensures that flags passed to vault plugin register correctly +// translate into the expected JSON body and request path. +func TestFlagParsing(t *testing.T) { + for name, tc := range map[string]struct { + pluginType api.PluginType + name string + command string + ociImage string + runtime string + version string + sha256 string + args []string + env []string + expectedPayload string + }{ + "minimal": { + pluginType: api.PluginTypeUnknown, + name: "foo", + sha256: "abc123", + expectedPayload: `{"type":"unknown","command":"foo","sha256":"abc123"}`, + }, + "full": { + pluginType: api.PluginTypeCredential, + name: "name", + command: "cmd", + ociImage: "image", + runtime: "runtime", + version: "v1.0.0", + sha256: "abc123", + args: []string{"--a=b", "--b=c", "positional"}, + env: []string{"x=1", "y=2"}, + expectedPayload: `{"type":"auth","args":["--a=b","--b=c","positional"],"command":"cmd","sha256":"abc123","version":"v1.0.0","oci_image":"image","runtime":"runtime","env":["x=1","y=2"]}`, + }, + "command remains empty if oci_image specified": { + pluginType: api.PluginTypeCredential, + name: "name", + ociImage: "image", + sha256: "abc123", + expectedPayload: `{"type":"auth","sha256":"abc123","oci_image":"image"}`, + }, + } { + tc := tc + t.Run(name, func(t *testing.T) { + ui, cmd := testPluginRegisterCommand(t) + var requestLogger *recordingRoundTripper + cmd.client, requestLogger = mockClient(t) + + var args []string + if tc.command != "" { + args = append(args, "-command="+tc.command) + } + if tc.ociImage != "" { + args = append(args, "-oci_image="+tc.ociImage) + } + if tc.runtime != "" { + args = append(args, "-runtime="+tc.runtime) + } + if tc.sha256 != "" { + args = append(args, "-sha256="+tc.sha256) + } + if tc.version != "" { + args = append(args, "-version="+tc.version) + } + for _, arg := range tc.args { + args = append(args, "-args="+arg) + } + for _, env := range tc.env { + args = append(args, "-env="+env) + } + if tc.pluginType != api.PluginTypeUnknown { + args = append(args, tc.pluginType.String()) + } + args = append(args, tc.name) + t.Log(args) + + code := cmd.Run(args) + if exp := 0; code != exp { + t.Fatalf("expected %d to be %d\nstdout: %s\nstderr: %s", code, exp, ui.OutputWriter.String(), ui.ErrorWriter.String()) + } + + actual := &api.RegisterPluginInput{} + expected := &api.RegisterPluginInput{} + err := json.Unmarshal(requestLogger.body, actual) + if err != nil { + t.Fatal(err) + } + err = json.Unmarshal([]byte(tc.expectedPayload), expected) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expected, actual) { + t.Errorf("expected: %s\ngot: %s", tc.expectedPayload, requestLogger.body) + } + expectedPath := fmt.Sprintf("/v1/sys/plugins/catalog/%s/%s", tc.pluginType.String(), tc.name) + if tc.pluginType == api.PluginTypeUnknown { + expectedPath = fmt.Sprintf("/v1/sys/plugins/catalog/%s", tc.name) + } + if requestLogger.path != expectedPath { + t.Errorf("Expected path %s, got %s", expectedPath, requestLogger.path) + } + }) + } +} diff --git a/command/plugin_reload.go b/command/plugin_reload.go index 2e95fdd1430d..bdcd6f696efb 100644 --- a/command/plugin_reload.go +++ b/command/plugin_reload.go @@ -1,14 +1,15 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( + "context" "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -19,9 +20,10 @@ var ( type PluginReloadCommand struct { *BaseCommand - plugin string - mounts []string - scope string + plugin string + mounts []string + scope string + pluginType string } func (c *PluginReloadCommand) Synopsis() string { @@ -36,9 +38,16 @@ Usage: vault plugin reload [options] mount(s) must be provided, but not both. In case the plugin name is provided, all of its corresponding mounted paths that use the plugin backend will be reloaded. - Reload the plugin named "my-custom-plugin": + If run with a Vault namespace other than the root namespace, only plugins + running in the same namespace will be reloaded. - $ vault plugin reload -plugin=my-custom-plugin + Reload the secret plugin named "my-custom-plugin" on the current node: + + $ vault plugin reload -type=secret -plugin=my-custom-plugin + + Reload the secret plugin named "my-custom-plugin" across all nodes and replicated clusters: + + $ vault plugin reload -type=secret -plugin=my-custom-plugin -scope=global ` + c.Flags().Help() @@ -68,7 +77,15 @@ func (c *PluginReloadCommand) Flags() *FlagSets { Name: "scope", Target: &c.scope, Completion: complete.PredictAnything, - Usage: "The scope of the reload, omitted for local, 'global', for replicated reloads", + Usage: "The scope of the reload, omitted for local, 'global', for replicated reloads.", + }) + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.pluginType, + Completion: complete.PredictAnything, + Usage: "The type of plugin to reload, one of auth, secret, or database. Mutually " + + "exclusive with -mounts. If not provided, all plugins with a matching name will be reloaded.", }) return set @@ -90,15 +107,23 @@ func (c *PluginReloadCommand) Run(args []string) int { return 1 } + positionalArgs := len(f.Args()) switch { + case positionalArgs != 0: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", positionalArgs)) + return 1 case c.plugin == "" && len(c.mounts) == 0: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + c.UI.Error("No plugins specified, must specify exactly one of -plugin or -mounts") return 1 case c.plugin != "" && len(c.mounts) > 0: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + c.UI.Error("Must specify exactly one of -plugin or -mounts") return 1 case c.scope != "" && c.scope != "global": c.UI.Error(fmt.Sprintf("Invalid reload scope: %s", c.scope)) + return 1 + case len(c.mounts) > 0 && c.pluginType != "": + c.UI.Error("Cannot specify -type with -mounts") + return 1 } client, err := c.Client() @@ -107,25 +132,46 @@ func (c *PluginReloadCommand) Run(args []string) int { return 2 } - rid, err := client.Sys().ReloadPlugin(&api.ReloadPluginInput{ - Plugin: c.plugin, - Mounts: c.mounts, - Scope: c.scope, - }) + var reloadID string + if client.Namespace() == "" { + pluginType := api.PluginTypeUnknown + pluginTypeStr := strings.TrimSpace(c.pluginType) + if pluginTypeStr != "" { + var err error + pluginType, err = api.ParsePluginType(pluginTypeStr) + if err != nil { + c.UI.Error(fmt.Sprintf("Error parsing -type as a plugin type, must be unset or one of auth, secret, or database: %s", err)) + return 1 + } + } + + reloadID, err = client.Sys().RootReloadPlugin(context.Background(), &api.RootReloadPluginInput{ + Plugin: c.plugin, + Type: pluginType, + Scope: c.scope, + }) + } else { + reloadID, err = client.Sys().ReloadPlugin(&api.ReloadPluginInput{ + Plugin: c.plugin, + Mounts: c.mounts, + Scope: c.scope, + }) + } + if err != nil { c.UI.Error(fmt.Sprintf("Error reloading plugin/mounts: %s", err)) return 2 } if len(c.mounts) > 0 { - if rid != "" { - c.UI.Output(fmt.Sprintf("Success! Reloading mounts: %s, reload_id: %s", c.mounts, rid)) + if reloadID != "" { + c.UI.Output(fmt.Sprintf("Success! Reloading mounts: %s, reload_id: %s", c.mounts, reloadID)) } else { c.UI.Output(fmt.Sprintf("Success! Reloaded mounts: %s", c.mounts)) } } else { - if rid != "" { - c.UI.Output(fmt.Sprintf("Success! Reloading plugin: %s, reload_id: %s", c.plugin, rid)) + if reloadID != "" { + c.UI.Output(fmt.Sprintf("Success! Reloading plugin: %s, reload_id: %s", c.plugin, reloadID)) } else { c.UI.Output(fmt.Sprintf("Success! Reloaded plugin: %s", c.plugin)) } diff --git a/command/plugin_reload_status.go b/command/plugin_reload_status.go index e527a07d1bfb..c653955978aa 100644 --- a/command/plugin_reload_status.go +++ b/command/plugin_reload_status.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,8 @@ import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/plugin_reload_test.go b/command/plugin_reload_test.go index 646fda924505..d84062d8d251 100644 --- a/command/plugin_reload_test.go +++ b/command/plugin_reload_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,9 +7,9 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/testhelpers/corehelpers" - "github.com/mitchellh/cli" ) func testPluginReloadCommand(tb testing.TB) (*cli.MockUi, *PluginReloadCommand) { @@ -46,13 +46,25 @@ func TestPluginReloadCommand_Run(t *testing.T) { { "not_enough_args", nil, - "Not enough arguments", + "No plugins specified, must specify exactly one of -plugin or -mounts", 1, }, { "too_many_args", []string{"-plugin", "foo", "-mounts", "bar"}, - "Too many arguments", + "Must specify exactly one of -plugin or -mounts", + 1, + }, + { + "type_and_mounts_mutually_exclusive", + []string{"-mounts", "bar", "-type", "secret"}, + "Cannot specify -type with -mounts", + 1, + }, + { + "invalid_type", + []string{"-plugin", "bar", "-type", "unsupported"}, + "Error parsing -type as a plugin type", 1, }, } @@ -85,8 +97,7 @@ func TestPluginReloadCommand_Run(t *testing.T) { t.Run("integration", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() @@ -147,7 +158,7 @@ func TestPluginReloadStatusCommand_Run(t *testing.T) { client, closer := testVaultServer(t) defer closer() - ui, cmd := testPluginReloadCommand(t) + ui, cmd := testPluginReloadStatusCommand(t) cmd.client = client args := append([]string{}, tc.args...) diff --git a/command/plugin_runtime.go b/command/plugin_runtime.go new file mode 100644 index 000000000000..ce15bb31fdbf --- /dev/null +++ b/command/plugin_runtime.go @@ -0,0 +1,54 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "strings" + + "github.com/hashicorp/cli" +) + +var _ cli.Command = (*PluginRuntimeCommand)(nil) + +type PluginRuntimeCommand struct { + *BaseCommand +} + +func (c *PluginRuntimeCommand) Synopsis() string { + return "Interact with Vault plugin runtimes catalog." +} + +func (c *PluginRuntimeCommand) Help() string { + helpText := ` +Usage: vault plugin runtime [options] [args] + + This command groups subcommands for interacting with Vault's plugin runtimes and the + plugin runtime catalog. The plugin runtime catalog is divided into types. Currently, + Vault only supports "container" plugin runtimes. A plugin runtime allows users to + fine-tune the parameters with which a plugin is executed. For example, you can select + a different OCI-compatible runtime, or set resource limits. A plugin runtime can + optionally be referenced during plugin registration. A type must be specified on each call. + Here are a few examples of the plugin runtime commands. + + List all available plugin runtimes in the catalog of a particular type: + + $ vault plugin runtime list -type=container + + Register a new plugin runtime to the catalog as a particular type: + + $ vault plugin runtime register -type=container -oci_runtime=my-oci-runtime my-custom-plugin-runtime + + Get information about a plugin runtime in the catalog listed under a particular type: + + $ vault plugin runtime info -type=container my-custom-plugin-runtime + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *PluginRuntimeCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/plugin_runtime_deregister.go b/command/plugin_runtime_deregister.go new file mode 100644 index 000000000000..47b790f2cc03 --- /dev/null +++ b/command/plugin_runtime_deregister.go @@ -0,0 +1,124 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PluginRuntimeDeregisterCommand)(nil) + _ cli.CommandAutocomplete = (*PluginRuntimeDeregisterCommand)(nil) +) + +type PluginRuntimeDeregisterCommand struct { + *BaseCommand + + flagType string +} + +func (c *PluginRuntimeDeregisterCommand) Synopsis() string { + return "Deregister an existing plugin runtime in the catalog" +} + +func (c *PluginRuntimeDeregisterCommand) Help() string { + helpText := ` +Usage: vault plugin runtime deregister [options] NAME + + Deregister an existing plugin runtime in the catalog with the given name. If + any registered plugin references the plugin runtime, an error is returned. If + the plugin runtime does not exist, an error is returned. The -type flag + currently only accepts "container". + + Deregister a plugin runtime: + + $ vault plugin runtime deregister -type=container my-plugin-runtime + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PluginRuntimeDeregisterCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagType, + Completion: complete.PredictAnything, + Usage: "Plugin runtime type. Vault currently only supports \"container\" runtime type.", + }) + + return set +} + +func (c *PluginRuntimeDeregisterCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *PluginRuntimeDeregisterCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PluginRuntimeDeregisterCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + runtimeTyeRaw := strings.TrimSpace(c.flagType) + if len(runtimeTyeRaw) == 0 { + c.UI.Error("-type is required for plugin runtime deregistration") + return 1 + } + + runtimeType, err := api.ParsePluginRuntimeType(runtimeTyeRaw) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + var runtimeNameRaw string + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + + // This case should come after invalid cases have been checked + case len(args) == 1: + runtimeNameRaw = args[0] + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + runtimeName := strings.TrimSpace(runtimeNameRaw) + if err = client.Sys().DeregisterPluginRuntime(context.Background(), &api.DeregisterPluginRuntimeInput{ + Name: runtimeName, + Type: runtimeType, + }); err != nil { + c.UI.Error(fmt.Sprintf("Error deregistering plugin runtime named %s: %s", runtimeName, err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Success! Deregistered plugin runtime: %s", runtimeName)) + return 0 +} diff --git a/command/plugin_runtime_deregister_test.go b/command/plugin_runtime_deregister_test.go new file mode 100644 index 000000000000..1569fceb3f11 --- /dev/null +++ b/command/plugin_runtime_deregister_test.go @@ -0,0 +1,116 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "regexp" + "strings" + "testing" + + "github.com/hashicorp/cli" +) + +func testPluginRuntimeDeregisterCommand(tb testing.TB) (*cli.MockUi, *PluginRuntimeDeregisterCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PluginRuntimeDeregisterCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPluginRuntimeDeregisterCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{"-type=container"}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"-type=container", "foo", "baz"}, + "Too many arguments", + 1, + }, + { + "invalid_runtime_type", + []string{"-type=foo", "bar"}, + "\"foo\" is not a supported plugin runtime type", + 2, + }, + { + "info_container_on_empty_plugin_runtime_catalog", + []string{"-type=container", "my-plugin-runtime"}, + "Error deregistering plugin runtime named my-plugin-runtime", + 2, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPluginRuntimeDeregisterCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + matcher := regexp.MustCompile(tc.out) + if !matcher.MatchString(combined) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPluginRuntimeDeregisterCommand(t) + cmd.client = client + + code := cmd.Run([]string{"-type=container", "my-plugin-runtime"}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error deregistering plugin runtime named my-plugin-runtime" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPluginRuntimeDeregisterCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/plugin_runtime_info.go b/command/plugin_runtime_info.go new file mode 100644 index 000000000000..22c95a233570 --- /dev/null +++ b/command/plugin_runtime_info.go @@ -0,0 +1,140 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PluginRuntimeInfoCommand)(nil) + _ cli.CommandAutocomplete = (*PluginRuntimeInfoCommand)(nil) +) + +type PluginRuntimeInfoCommand struct { + *BaseCommand + + flagType string +} + +func (c *PluginRuntimeInfoCommand) Synopsis() string { + return "Read information about a plugin runtime in the catalog" +} + +func (c *PluginRuntimeInfoCommand) Help() string { + helpText := ` +Usage: vault plugin runtime info [options] NAME + + Displays information about a plugin runtime in the catalog with the given name. If + the plugin runtime does not exist, an error is returned. The -type flag + currently only accepts "container". + + Get info about a plugin runtime: + + $ vault plugin runtime info -type=container my-plugin-runtime + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PluginRuntimeInfoCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagType, + Completion: complete.PredictAnything, + Usage: "Plugin runtime type. Vault currently only supports \"container\" runtime type.", + }) + + return set +} + +func (c *PluginRuntimeInfoCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *PluginRuntimeInfoCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PluginRuntimeInfoCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + runtimeTyeRaw := strings.TrimSpace(c.flagType) + if len(runtimeTyeRaw) == 0 { + c.UI.Error("-type is required for plugin runtime info retrieval") + return 1 + } + + runtimeType, err := api.ParsePluginRuntimeType(runtimeTyeRaw) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + var runtimeNameRaw string + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + + // This case should come after invalid cases have been checked + case len(args) == 1: + runtimeNameRaw = args[0] + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + runtimeName := strings.TrimSpace(runtimeNameRaw) + resp, err := client.Sys().GetPluginRuntime(context.Background(), &api.GetPluginRuntimeInput{ + Name: runtimeName, + Type: runtimeType, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading plugin runtime named %s: %s", runtimeName, err)) + return 2 + } + + if resp == nil { + c.UI.Error(fmt.Sprintf("No value found for plugin runtime %q", runtimeName)) + return 2 + } + + data := map[string]interface{}{ + "name": resp.Name, + "type": resp.Type, + "oci_runtime": resp.OCIRuntime, + "cgroup_parent": resp.CgroupParent, + "cpu_nanos": resp.CPU, + "memory_bytes": resp.Memory, + } + + if c.flagField != "" { + return PrintRawField(c.UI, data, c.flagField) + } + return OutputData(c.UI, data) +} diff --git a/command/plugin_runtime_info_test.go b/command/plugin_runtime_info_test.go new file mode 100644 index 000000000000..40166b094bf9 --- /dev/null +++ b/command/plugin_runtime_info_test.go @@ -0,0 +1,116 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "regexp" + "strings" + "testing" + + "github.com/hashicorp/cli" +) + +func testPluginRuntimeInfoCommand(tb testing.TB) (*cli.MockUi, *PluginRuntimeInfoCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PluginRuntimeInfoCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPluginRuntimeInfoCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{"-type=container"}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"-type=container", "bar", "baz"}, + "Too many arguments", + 1, + }, + { + "invalid_runtime_type", + []string{"-type=foo", "bar"}, + "\"foo\" is not a supported plugin runtime type", + 2, + }, + { + "info_container_on_empty_plugin_runtime_catalog", + []string{"-type=container", "my-plugin-runtime"}, + "Error reading plugin runtime named my-plugin-runtime", + 2, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPluginRuntimeInfoCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + matcher := regexp.MustCompile(tc.out) + if !matcher.MatchString(combined) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPluginRuntimeInfoCommand(t) + cmd.client = client + + code := cmd.Run([]string{"-type=container", "my-plugin-runtime"}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error reading plugin runtime named my-plugin-runtime" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPluginRuntimeInfoCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/plugin_runtime_list.go b/command/plugin_runtime_list.go new file mode 100644 index 000000000000..64cca1805f63 --- /dev/null +++ b/command/plugin_runtime_list.go @@ -0,0 +1,131 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PluginRuntimeListCommand)(nil) + _ cli.CommandAutocomplete = (*PluginRuntimeListCommand)(nil) +) + +type PluginRuntimeListCommand struct { + *BaseCommand + + flagType string +} + +func (c *PluginRuntimeListCommand) Synopsis() string { + return "Lists available plugin runtimes" +} + +func (c *PluginRuntimeListCommand) Help() string { + helpText := ` +Usage: vault plugin runtime list [options] + + Lists available plugin runtimes registered in the catalog. This does not list whether + plugin runtimes are in use, but rather just their availability. + + List all available plugin runtimes in the catalog: + + $ vault plugin runtime list + + List all available container plugin runtimes in the catalog: + + $ vault plugin runtime list -type=container + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PluginRuntimeListCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagType, + Completion: complete.PredictAnything, + Usage: "Plugin runtime type. Vault currently only supports \"container\" runtime type.", + }) + + return set +} + +func (c *PluginRuntimeListCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *PluginRuntimeListCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PluginRuntimeListCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + if len(f.Args()) > 0 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args))) + return 1 + } + + var input *api.ListPluginRuntimesInput + runtimeTyeRaw := strings.TrimSpace(c.flagType) + if len(runtimeTyeRaw) > 0 { + runtimeType, err := api.ParsePluginRuntimeType(runtimeTyeRaw) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + input = &api.ListPluginRuntimesInput{Type: runtimeType} + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + resp, err := client.Sys().ListPluginRuntimes(context.Background(), input) + if err != nil { + c.UI.Error(fmt.Sprintf("Error listing available plugin runtimes: %s", err)) + return 2 + } + if resp == nil { + c.UI.Error("No tableResponse from server when listing plugin runtimes") + return 2 + } + + switch Format(c.UI) { + case "table": + c.UI.Output(tableOutput(c.tableResponse(resp), nil)) + return 0 + default: + return OutputData(c.UI, resp.Runtimes) + } +} + +func (c *PluginRuntimeListCommand) tableResponse(response *api.ListPluginRuntimesResponse) []string { + out := []string{"Name | Type | OCI Runtime | Parent Cgroup | CPU Nanos | Memory Bytes"} + for _, runtime := range response.Runtimes { + out = append(out, fmt.Sprintf("%s | %s | %s | %s | %d | %d", + runtime.Name, runtime.Type, runtime.OCIRuntime, runtime.CgroupParent, runtime.CPU, runtime.Memory)) + } + + return out +} diff --git a/command/plugin_runtime_list_test.go b/command/plugin_runtime_list_test.go new file mode 100644 index 000000000000..8f8d209ca413 --- /dev/null +++ b/command/plugin_runtime_list_test.go @@ -0,0 +1,116 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "regexp" + "strings" + "testing" + + "github.com/hashicorp/cli" +) + +func testPluginRuntimeListCommand(tb testing.TB) (*cli.MockUi, *PluginRuntimeListCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PluginRuntimeListCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPluginRuntimeListCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "too_many_args", + []string{"foo"}, + "Too many arguments", + 1, + }, + { + "invalid_runtime_type", + []string{"-type=foo"}, + "\"foo\" is not a supported plugin runtime type", + 2, + }, + { + "list container on empty plugin runtime catalog", + []string{"-type=container"}, + "OCI Runtime", + 0, + }, + { + "list on empty plugin runtime catalog", + nil, + "OCI Runtime", + 0, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPluginRuntimeListCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + matcher := regexp.MustCompile(tc.out) + if !matcher.MatchString(combined) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPluginRuntimeListCommand(t) + cmd.client = client + + code := cmd.Run([]string{"-type=container"}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error listing available plugin runtimes: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPluginRuntimeListCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/plugin_runtime_register.go b/command/plugin_runtime_register.go new file mode 100644 index 000000000000..175be302f519 --- /dev/null +++ b/command/plugin_runtime_register.go @@ -0,0 +1,172 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PluginRuntimeRegisterCommand)(nil) + _ cli.CommandAutocomplete = (*PluginRuntimeRegisterCommand)(nil) +) + +type PluginRuntimeRegisterCommand struct { + *BaseCommand + + flagType string + flagOCIRuntime string + flagCgroupParent string + flagCPUNanos int64 + flagMemoryBytes int64 + flagRootless bool +} + +func (c *PluginRuntimeRegisterCommand) Synopsis() string { + return "Registers a new plugin runtime in the catalog" +} + +func (c *PluginRuntimeRegisterCommand) Help() string { + helpText := ` +Usage: vault plugin runtime register [options] NAME + + Registers a new plugin runtime in the catalog. Currently, Vault only supports registering runtimes of type "container". +The OCI runtime must be available on Vault's host. If no OCI runtime is specified, Vault will use "runsc", gVisor's OCI runtime. + + Register the plugin runtime named my-custom-plugin-runtime: + + $ vault plugin runtime register -type=container -oci_runtime=my-oci-runtime my-custom-plugin-runtime + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PluginRuntimeRegisterCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagType, + Completion: complete.PredictAnything, + Usage: "Plugin runtime type. Vault currently only supports \"container\" runtime type.", + }) + + f.StringVar(&StringVar{ + Name: "oci_runtime", + Target: &c.flagOCIRuntime, + Completion: complete.PredictAnything, + Usage: "OCI runtime. Default is \"runsc\", gVisor's OCI runtime.", + }) + + f.StringVar(&StringVar{ + Name: "cgroup_parent", + Target: &c.flagCgroupParent, + Completion: complete.PredictAnything, + Usage: "Parent cgroup to set for each container. This can be used to control the total resource usage for a group of plugins.", + }) + + f.Int64Var(&Int64Var{ + Name: "cpu_nanos", + Target: &c.flagCPUNanos, + Completion: complete.PredictAnything, + Usage: "CPU limit to set per container in nanos. Defaults to no limit.", + }) + + f.Int64Var(&Int64Var{ + Name: "memory_bytes", + Target: &c.flagMemoryBytes, + Completion: complete.PredictAnything, + Usage: "Memory limit to set per container in bytes. Defaults to no limit.", + }) + + f.BoolVar(&BoolVar{ + Name: "rootless", + Target: &c.flagRootless, + Completion: complete.PredictAnything, + Usage: "Whether the container runtime is configured to run as a " + + "non-privileged (non-root) user. Required if the plugin container " + + "image is also configured to run as a non-root user.", + }) + + return set +} + +func (c *PluginRuntimeRegisterCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *PluginRuntimeRegisterCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PluginRuntimeRegisterCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + runtimeTyeRaw := strings.TrimSpace(c.flagType) + if len(runtimeTyeRaw) == 0 { + c.UI.Error("-type is required for plugin runtime registration") + return 1 + } + + runtimeType, err := api.ParsePluginRuntimeType(runtimeTyeRaw) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + var runtimeNameRaw string + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + + // This case should come after invalid cases have been checked + case len(args) == 1: + runtimeNameRaw = args[0] + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + runtimeName := strings.TrimSpace(runtimeNameRaw) + ociRuntime := strings.TrimSpace(c.flagOCIRuntime) + cgroupParent := strings.TrimSpace(c.flagCgroupParent) + + if err := client.Sys().RegisterPluginRuntime(context.Background(), &api.RegisterPluginRuntimeInput{ + Name: runtimeName, + Type: runtimeType, + OCIRuntime: ociRuntime, + CgroupParent: cgroupParent, + CPU: c.flagCPUNanos, + Memory: c.flagMemoryBytes, + Rootless: c.flagRootless, + }); err != nil { + c.UI.Error(fmt.Sprintf("Error registering plugin runtime %s: %s", runtimeName, err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Success! Registered plugin runtime: %s", runtimeName)) + return 0 +} diff --git a/command/plugin_runtime_register_test.go b/command/plugin_runtime_register_test.go new file mode 100644 index 000000000000..3b28587cf62a --- /dev/null +++ b/command/plugin_runtime_register_test.go @@ -0,0 +1,206 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/consts" +) + +func testPluginRuntimeRegisterCommand(tb testing.TB) (*cli.MockUi, *PluginRuntimeRegisterCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PluginRuntimeRegisterCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPluginRuntimeRegisterCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + flags []string + args []string + out string + code int + }{ + { + "no type specified", + []string{}, + []string{"foo"}, + "-type is required for plugin runtime registration", + 1, + }, + { + "invalid type", + []string{"-type", "foo"}, + []string{"not"}, + "\"foo\" is not a supported plugin runtime type", + 2, + }, + { + "not_enough_args", + []string{"-type", consts.PluginRuntimeTypeContainer.String()}, + []string{}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"-type", consts.PluginRuntimeTypeContainer.String()}, + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPluginRuntimeRegisterCommand(t) + cmd.client = client + + args := append(tc.flags, tc.args...) + code := cmd.Run(args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPluginRuntimeRegisterCommand(t) + cmd.client = client + + code := cmd.Run([]string{"-type", consts.PluginRuntimeTypeContainer.String(), "my-plugin-runtime"}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error registering plugin runtime my-plugin-runtime" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPluginRuntimeRegisterCommand(t) + assertNoTabs(t, cmd) + }) +} + +// TestPluginRuntimeFlagParsing ensures that flags passed to vault plugin runtime register correctly +// translate into the expected JSON body and request path. +func TestPluginRuntimeFlagParsing(t *testing.T) { + for name, tc := range map[string]struct { + runtimeType api.PluginRuntimeType + name string + ociRuntime string + cgroupParent string + cpu int64 + memory int64 + rootless bool + expectedPayload string + }{ + "minimal": { + runtimeType: api.PluginRuntimeTypeContainer, + name: "foo", + expectedPayload: `{"type":1,"name":"foo"}`, + }, + "full": { + runtimeType: api.PluginRuntimeTypeContainer, + name: "foo", + cgroupParent: "/cpulimit/", + ociRuntime: "runtime", + cpu: 5678, + memory: 1234, + rootless: true, + expectedPayload: `{"type":1,"cgroup_parent":"/cpulimit/","memory_bytes":1234,"cpu_nanos":5678,"oci_runtime":"runtime","rootless":true}`, + }, + } { + tc := tc + t.Run(name, func(t *testing.T) { + ui, cmd := testPluginRuntimeRegisterCommand(t) + var requestLogger *recordingRoundTripper + cmd.client, requestLogger = mockClient(t) + + var args []string + if tc.cgroupParent != "" { + args = append(args, "-cgroup_parent="+tc.cgroupParent) + } + if tc.ociRuntime != "" { + args = append(args, "-oci_runtime="+tc.ociRuntime) + } + if tc.memory != 0 { + args = append(args, fmt.Sprintf("-memory_bytes=%d", tc.memory)) + } + if tc.cpu != 0 { + args = append(args, fmt.Sprintf("-cpu_nanos=%d", tc.cpu)) + } + if tc.rootless { + args = append(args, "-rootless=true") + } + + if tc.runtimeType != api.PluginRuntimeTypeUnsupported { + args = append(args, "-type="+tc.runtimeType.String()) + } + args = append(args, tc.name) + t.Log(args) + + code := cmd.Run(args) + if exp := 0; code != exp { + t.Fatalf("expected %d to be %d\nstdout: %s\nstderr: %s", code, exp, ui.OutputWriter.String(), ui.ErrorWriter.String()) + } + + actual := &api.RegisterPluginRuntimeInput{} + expected := &api.RegisterPluginRuntimeInput{} + err := json.Unmarshal(requestLogger.body, actual) + if err != nil { + t.Fatal(err) + } + err = json.Unmarshal([]byte(tc.expectedPayload), expected) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expected, actual) { + t.Errorf("expected: %s\ngot: %s", tc.expectedPayload, requestLogger.body) + } + expectedPath := fmt.Sprintf("/v1/sys/plugins/runtimes/catalog/%s/%s", tc.runtimeType.String(), tc.name) + + if requestLogger.path != expectedPath { + t.Errorf("Expected path %s, got %s", expectedPath, requestLogger.path) + } + }) + } +} diff --git a/command/plugin_test.go b/command/plugin_test.go index 08c350cbd088..2e72bb7c1898 100644 --- a/command/plugin_test.go +++ b/command/plugin_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command diff --git a/command/policy.go b/command/policy.go index 289aae134a4f..5e5f61bb2d72 100644 --- a/command/policy.go +++ b/command/policy.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*PolicyCommand)(nil) diff --git a/command/policy_delete.go b/command/policy_delete.go index 199fb74a9663..d5c3b8aabc11 100644 --- a/command/policy_delete.go +++ b/command/policy_delete.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/policy_delete_test.go b/command/policy_delete_test.go index 008cd59766da..6b3bd01e3f32 100644 --- a/command/policy_delete_test.go +++ b/command/policy_delete_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,7 +8,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testPolicyDeleteCommand(tb testing.TB) (*cli.MockUi, *PolicyDeleteCommand) { diff --git a/command/policy_fmt.go b/command/policy_fmt.go index 75a91791327c..ea3dd2ab9958 100644 --- a/command/policy_fmt.go +++ b/command/policy_fmt.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,10 +8,10 @@ import ( "io/ioutil" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/hcl/hcl/printer" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/vault" - "github.com/mitchellh/cli" homedir "github.com/mitchellh/go-homedir" "github.com/posener/complete" ) diff --git a/command/policy_fmt_test.go b/command/policy_fmt_test.go index 89ed5215b6da..41de53c9e6c8 100644 --- a/command/policy_fmt_test.go +++ b/command/policy_fmt_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -9,7 +9,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testPolicyFmtCommand(tb testing.TB) (*cli.MockUi, *PolicyFmtCommand) { @@ -89,7 +89,7 @@ path "secret" { t.Fatal(err) } defer os.Remove(f.Name()) - if _, err := f.Write([]byte(policy)); err != nil { + if _, err := f.WriteString(policy); err != nil { t.Fatal(err) } f.Close() @@ -132,7 +132,7 @@ path "secret" { t.Fatal(err) } defer os.Remove(f.Name()) - if _, err := f.Write([]byte(policy)); err != nil { + if _, err := f.WriteString(policy); err != nil { t.Fatal(err) } f.Close() @@ -167,7 +167,7 @@ path "secret" { t.Fatal(err) } defer os.Remove(f.Name()) - if _, err := f.Write([]byte(policy)); err != nil { + if _, err := f.WriteString(policy); err != nil { t.Fatal(err) } f.Close() @@ -202,7 +202,7 @@ path "secret" { t.Fatal(err) } defer os.Remove(f.Name()) - if _, err := f.Write([]byte(policy)); err != nil { + if _, err := f.WriteString(policy); err != nil { t.Fatal(err) } f.Close() diff --git a/command/policy_list.go b/command/policy_list.go index 7b5bfc12c98e..147efb971672 100644 --- a/command/policy_list.go +++ b/command/policy_list.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/policy_list_test.go b/command/policy_list_test.go index 19766978c733..c603d310fcd2 100644 --- a/command/policy_list_test.go +++ b/command/policy_list_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testPolicyListCommand(tb testing.TB) (*cli.MockUi, *PolicyListCommand) { diff --git a/command/policy_read.go b/command/policy_read.go index 4f226444bea8..dd7a698de65c 100644 --- a/command/policy_read.go +++ b/command/policy_read.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/policy_read_test.go b/command/policy_read_test.go index f091749176b5..e18298e5115e 100644 --- a/command/policy_read_test.go +++ b/command/policy_read_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testPolicyReadCommand(tb testing.TB) (*cli.MockUi, *PolicyReadCommand) { diff --git a/command/policy_write.go b/command/policy_write.go index 81ff2b3e1993..193c94968809 100644 --- a/command/policy_write.go +++ b/command/policy_write.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -10,7 +10,7 @@ import ( "os" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/policy_write_test.go b/command/policy_write_test.go index 8294ef1934db..64f67eb2a8b4 100644 --- a/command/policy_write_test.go +++ b/command/policy_write_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -12,7 +12,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testPolicyWriteCommand(tb testing.TB) (*cli.MockUi, *PolicyWriteCommand) { diff --git a/command/print.go b/command/print.go index 19ac0a674dbc..d5e3b2a5529b 100644 --- a/command/print.go +++ b/command/print.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/print_token.go b/command/print_token.go index 862af23e0b2d..9402e8a15238 100644 --- a/command/print_token.go +++ b/command/print_token.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/proxy.go b/command/proxy.go new file mode 100644 index 000000000000..05a52398224f --- /dev/null +++ b/command/proxy.go @@ -0,0 +1,1230 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "crypto/tls" + "flag" + "fmt" + "io" + "net" + "net/http" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + systemd "github.com/coreos/go-systemd/daemon" + "github.com/hashicorp/cli" + ctconfig "github.com/hashicorp/consul-template/config" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/gatedwriter" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/reloadutil" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/command/agentproxyshared/cache" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/sink/inmem" + "github.com/hashicorp/vault/command/agentproxyshared/winsvc" + proxyConfig "github.com/hashicorp/vault/command/proxy/config" + "github.com/hashicorp/vault/helper/logging" + "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/internalshared/listenerutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/version" + "github.com/kr/pretty" + "github.com/oklog/run" + "github.com/posener/complete" + "golang.org/x/text/cases" + "golang.org/x/text/language" + "google.golang.org/grpc/test/bufconn" +) + +var ( + _ cli.Command = (*ProxyCommand)(nil) + _ cli.CommandAutocomplete = (*ProxyCommand)(nil) +) + +const ( + // flagNameProxyExitAfterAuth is used as a Proxy specific flag to indicate + // that proxy should exit after a single successful auth + flagNameProxyExitAfterAuth = "exit-after-auth" + nameProxy = "proxy" +) + +type ProxyCommand struct { + *BaseCommand + logFlags logFlags + + config *proxyConfig.Config + + ShutdownCh chan struct{} + SighupCh chan struct{} + SigUSR2Ch chan struct{} + + tlsReloadFuncsLock sync.RWMutex + tlsReloadFuncs []reloadutil.ReloadFunc + + logWriter io.Writer + logGate *gatedwriter.Writer + logger log.Logger + + // Telemetry object + metricsHelper *metricsutil.MetricsHelper + + cleanupGuard sync.Once + + startedCh chan struct{} // for tests + reloadedCh chan struct{} // for tests + + flagConfigs []string + flagExitAfterAuth bool + flagTestVerifyOnly bool +} + +func (c *ProxyCommand) Synopsis() string { + return "Start a Vault Proxy" +} + +func (c *ProxyCommand) Help() string { + helpText := ` +Usage: vault proxy [options] + + This command starts a Vault Proxy that can perform automatic authentication + in certain environments. + + Start a proxy with a configuration file: + + $ vault proxy -config=/etc/vault/config.hcl + + For a full list of examples, please see the documentation. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *ProxyCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + // Augment with the log flags + f.addLogFlags(&c.logFlags) + + f.StringSliceVar(&StringSliceVar{ + Name: "config", + Target: &c.flagConfigs, + Completion: complete.PredictOr( + complete.PredictFiles("*.hcl"), + complete.PredictFiles("*.json"), + ), + Usage: "Path to a configuration file. This configuration file should " + + "contain only proxy directives.", + }) + + f.BoolVar(&BoolVar{ + Name: flagNameProxyExitAfterAuth, + Target: &c.flagExitAfterAuth, + Default: false, + Usage: "If set to true, the proxy will exit with code 0 after a single " + + "successful auth, where success means that a token was retrieved and " + + "all sinks successfully wrote it", + }) + + // Internal-only flags to follow. + // + // Why hello there little source code reader! Welcome to the Vault source + // code. The remaining options are intentionally undocumented and come with + // no warranty or backwards-compatibility promise. Do not use these flags + // in production. Do not build automation using these flags. Unless you are + // developing against Vault, you should not need any of these flags. + f.BoolVar(&BoolVar{ + Name: "test-verify-only", + Target: &c.flagTestVerifyOnly, + Default: false, + Hidden: true, + }) + + // End internal-only flags. + + return set +} + +func (c *ProxyCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *ProxyCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *ProxyCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Create a logger. We wrap it in a gated writer so that it doesn't + // start logging too early. + c.logGate = gatedwriter.NewWriter(os.Stderr) + c.logWriter = c.logGate + + if c.logFlags.flagCombineLogs { + c.logWriter = os.Stdout + } + + // Validation + if len(c.flagConfigs) < 1 { + c.UI.Error("Must specify exactly at least one config path using -config") + return 1 + } + + config, err := c.loadConfig(c.flagConfigs) + if err != nil { + c.outputErrors(err) + return 1 + } + + if config.AutoAuth == nil { + c.UI.Info("No auto_auth block found in config, the automatic authentication feature will not be started") + } + + c.applyConfigOverrides(f, config) // This only needs to happen on start-up to aggregate config from flags and env vars + c.config = config + + l, err := c.newLogger() + if err != nil { + c.outputErrors(err) + return 1 + } + c.logger = l + + // release log gate if the disable-gated-logs flag is set + if c.logFlags.flagDisableGatedLogs { + c.logGate.Flush() + } + + infoKeys := make([]string, 0, 10) + info := make(map[string]string) + info["log level"] = config.LogLevel + infoKeys = append(infoKeys, "log level") + + infoKeys = append(infoKeys, "version") + verInfo := version.GetVersion() + info["version"] = verInfo.FullVersionNumber(false) + if verInfo.Revision != "" { + info["version sha"] = strings.Trim(verInfo.Revision, "'") + infoKeys = append(infoKeys, "version sha") + } + infoKeys = append(infoKeys, "cgo") + info["cgo"] = "disabled" + if version.CgoEnabled { + info["cgo"] = "enabled" + } + + // Tests might not want to start a vault server and just want to verify + // the configuration. + if c.flagTestVerifyOnly { + if os.Getenv("VAULT_TEST_VERIFY_ONLY_DUMP_CONFIG") != "" { + c.UI.Output(fmt.Sprintf( + "\nConfiguration:\n%s\n", + pretty.Sprint(*c.config))) + } + return 0 + } + + // Ignore any setting of Agent/Proxy's address. This client is used by the Proxy + // to reach out to Vault. This should never loop back to the proxy. + c.flagAgentProxyAddress = "" + client, err := c.Client() + if err != nil { + c.UI.Error(fmt.Sprintf( + "Error fetching client: %v", + err)) + return 1 + } + + serverHealth, err := client.Sys().Health() + // We don't have any special behaviour if the error != nil, as this + // is not worth stopping the Proxy process over. + if err == nil { + // Note that we don't exit if the versions don't match, as this is a valid + // configuration, but we should still let the user know. + serverVersion := serverHealth.Version + proxyVersion := version.GetVersion().VersionNumber() + if serverVersion != proxyVersion { + c.UI.Info("==> Note: Vault Proxy version does not match Vault server version. " + + fmt.Sprintf("Vault Proxy version: %s, Vault server version: %s", proxyVersion, serverVersion)) + } + } + + // telemetry configuration + inmemMetrics, _, prometheusEnabled, err := configutil.SetupTelemetry(&configutil.SetupTelemetryOpts{ + Config: config.Telemetry, + Ui: c.UI, + ServiceName: "vault", + DisplayName: "Vault", + UserAgent: useragent.ProxyString(), + ClusterName: config.ClusterName, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error initializing telemetry: %s", err)) + return 1 + } + c.metricsHelper = metricsutil.NewMetricsHelper(inmemMetrics, prometheusEnabled) + + // This indicates whether the namespace for the client has been set by environment variable. + // If it has, we don't touch it + namespaceSetByEnvironmentVariable := client.Namespace() != "" + + if !namespaceSetByEnvironmentVariable && config.Vault != nil && config.Vault.Namespace != "" { + client.SetNamespace(config.Vault.Namespace) + } + + var method auth.AuthMethod + var sinks []*sink.SinkConfig + if config.AutoAuth != nil { + // Note: This will only set namespace header to the value in config.AutoAuth.Method.Namespace + // only if it hasn't been set by config.Vault.Namespace above. In that case, the config value + // present at config.AutoAuth.Method.Namespace will still be used for auto-auth. + if !namespaceSetByEnvironmentVariable && config.AutoAuth.Method.Namespace != "" { + client.SetNamespace(config.AutoAuth.Method.Namespace) + } + + sinkClient, err := client.CloneWithHeaders() + if err != nil { + c.UI.Error(fmt.Sprintf("Error cloning client for file sink: %v", err)) + return 1 + } + + if config.DisableIdleConnsAutoAuth { + sinkClient.SetMaxIdleConnections(-1) + } + + if config.DisableKeepAlivesAutoAuth { + sinkClient.SetDisableKeepAlives(true) + } + + for _, sc := range config.AutoAuth.Sinks { + switch sc.Type { + case "file": + config := &sink.SinkConfig{ + Logger: c.logger.Named("sink.file"), + Config: sc.Config, + Client: sinkClient, + WrapTTL: sc.WrapTTL, + DHType: sc.DHType, + DeriveKey: sc.DeriveKey, + DHPath: sc.DHPath, + AAD: sc.AAD, + } + s, err := file.NewFileSink(config) + if err != nil { + c.UI.Error(fmt.Errorf("error creating file sink: %w", err).Error()) + return 1 + } + config.Sink = s + sinks = append(sinks, config) + default: + c.UI.Error(fmt.Sprintf("Unknown sink type %q", sc.Type)) + return 1 + } + } + + authConfig := &auth.AuthConfig{ + Logger: c.logger.Named(fmt.Sprintf("auth.%s", config.AutoAuth.Method.Type)), + MountPath: config.AutoAuth.Method.MountPath, + Config: config.AutoAuth.Method.Config, + } + method, err = agentproxyshared.GetAutoAuthMethodFromConfig(config.AutoAuth.Method.Type, authConfig, config.Vault.Address) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating %s auth method: %v", config.AutoAuth.Method.Type, err)) + return 1 + } + } + + // We do this after auto-auth has been configured, because we don't want to + // confuse the issue of retries for auth failures which have their own + // config and are handled a bit differently. + if os.Getenv(api.EnvVaultMaxRetries) == "" { + client.SetMaxRetries(ctconfig.DefaultRetryAttempts) + if config.Vault != nil { + if config.Vault.Retry != nil { + client.SetMaxRetries(config.Vault.Retry.NumRetries) + } + } + } + + enforceConsistency := cache.EnforceConsistencyNever + whenInconsistent := cache.WhenInconsistentFail + if config.APIProxy != nil { + switch config.APIProxy.EnforceConsistency { + case "always": + enforceConsistency = cache.EnforceConsistencyAlways + case "never", "": + default: + c.UI.Error(fmt.Sprintf("Unknown api_proxy setting for enforce_consistency: %q", config.APIProxy.EnforceConsistency)) + return 1 + } + + switch config.APIProxy.WhenInconsistent { + case "retry": + whenInconsistent = cache.WhenInconsistentRetry + case "forward": + whenInconsistent = cache.WhenInconsistentForward + case "fail", "": + default: + c.UI.Error(fmt.Sprintf("Unknown api_proxy setting for when_inconsistent: %q", config.APIProxy.WhenInconsistent)) + return 1 + } + } + + // Warn if cache _and_ cert auto-auth is enabled but certificates were not + // provided in the auto_auth.method["cert"].config stanza. + if config.Cache != nil && (config.AutoAuth != nil && config.AutoAuth.Method != nil && config.AutoAuth.Method.Type == "cert") { + _, okCertFile := config.AutoAuth.Method.Config["client_cert"] + _, okCertKey := config.AutoAuth.Method.Config["client_key"] + + // If neither of these exists in the cert stanza, proxy will use the + // certs from the vault stanza. + if !okCertFile && !okCertKey { + c.UI.Warn(wrapAtLength("WARNING! Cache is enabled and using the same certificates " + + "from the 'cert' auto-auth method specified in the 'vault' stanza. Consider " + + "specifying certificate information in the 'cert' auto-auth's config stanza.")) + } + + } + + // Output the header that the proxy has started + if !c.logFlags.flagCombineLogs { + c.UI.Output("==> Vault Proxy started! Log data will stream in below:\n") + } + + var leaseCache *cache.LeaseCache + var previousToken string + + proxyClient, err := client.CloneWithHeaders() + if err != nil { + c.UI.Error(fmt.Sprintf("Error cloning client for proxying: %v", err)) + return 1 + } + + if config.DisableIdleConnsAPIProxy { + proxyClient.SetMaxIdleConnections(-1) + } + + if config.DisableKeepAlivesAPIProxy { + proxyClient.SetDisableKeepAlives(true) + } + + apiProxyLogger := c.logger.Named("apiproxy") + + // The API proxy to be used, if listeners are configured + apiProxy, err := cache.NewAPIProxy(&cache.APIProxyConfig{ + Client: proxyClient, + Logger: apiProxyLogger, + EnforceConsistency: enforceConsistency, + WhenInconsistentAction: whenInconsistent, + UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, + UserAgentString: useragent.ProxyAPIProxyString(), + PrependConfiguredNamespace: config.APIProxy != nil && config.APIProxy.PrependConfiguredNamespace, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating API proxy: %v", err)) + return 1 + } + + // ctx and cancelFunc are passed to the AuthHandler, SinkServer, + // and other subsystems, so that they can listen for ctx.Done() to + // fire and shut down accordingly. + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + + var updater *cache.StaticSecretCacheUpdater + + // Parse proxy cache configurations + if config.Cache != nil { + cacheLogger := c.logger.Named("cache") + + // Create the lease cache proxier and set its underlying proxier to + // the API proxier. + leaseCache, err = cache.NewLeaseCache(&cache.LeaseCacheConfig{ + Client: proxyClient, + BaseContext: ctx, + Proxier: apiProxy, + Logger: cacheLogger.Named("leasecache"), + CacheStaticSecrets: config.Cache.CacheStaticSecrets, + // dynamic secrets are configured as default-on to preserve backwards compatibility + CacheDynamicSecrets: !config.Cache.DisableCachingDynamicSecrets, + UserAgentToUse: useragent.AgentProxyString(), + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating lease cache: %v", err)) + return 1 + } + + cacheLogger.Info("cache configured", "cache_static_secrets", config.Cache.CacheStaticSecrets, "disable_caching_dynamic_secrets", config.Cache.DisableCachingDynamicSecrets) + + // Configure persistent storage and add to LeaseCache + if config.Cache.Persist != nil { + deferFunc, oldToken, err := agentproxyshared.AddPersistentStorageToLeaseCache(ctx, leaseCache, config.Cache.Persist, cacheLogger) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating persistent cache: %v", err)) + return 1 + } + previousToken = oldToken + if deferFunc != nil { + defer deferFunc() + } + } + + // If we're caching static secrets, we need to start the updater, too + if config.Cache.CacheStaticSecrets { + staticSecretCacheUpdaterLogger := c.logger.Named("cache.staticsecretcacheupdater") + inmemSink, err := inmem.New(&sink.SinkConfig{ + Logger: staticSecretCacheUpdaterLogger, + }, leaseCache) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating inmem sink for static secret updater susbsystem: %v", err)) + return 1 + } + sinks = append(sinks, &sink.SinkConfig{ + Logger: staticSecretCacheUpdaterLogger, + Sink: inmemSink, + }) + + updater, err = cache.NewStaticSecretCacheUpdater(&cache.StaticSecretCacheUpdaterConfig{ + Client: client, + LeaseCache: leaseCache, + Logger: staticSecretCacheUpdaterLogger, + TokenSink: inmemSink, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating static secret cache updater: %v", err)) + return 1 + } + + capabilityManager, err := cache.NewStaticSecretCapabilityManager(&cache.StaticSecretCapabilityManagerConfig{ + LeaseCache: leaseCache, + Logger: c.logger.Named("cache.staticsecretcapabilitymanager"), + Client: client, + StaticSecretTokenCapabilityRefreshInterval: config.Cache.StaticSecretTokenCapabilityRefreshInterval, + StaticSecretTokenCapabilityRefreshBehaviour: config.Cache.StaticSecretTokenCapabilityRefreshBehaviour, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating static secret capability manager: %v", err)) + return 1 + } + leaseCache.SetCapabilityManager(capabilityManager) + } + } + + // Create the AuthHandler and the Sink Server so that we can pass AuthHandler struct + // values into the Proxy http.Handler. We will wait to actually start these servers + // once we have configured handlers for each listener below + authInProgress := &atomic.Bool{} + invalidTokenErrCh := make(chan error) + var ah *auth.AuthHandler + var ss *sink.SinkServer + if method != nil { + // Auth Handler is going to set its own retry values, so we want to + // work on a copy of the client to not affect other subsystems. + ahClient, err := c.client.CloneWithHeaders() + if err != nil { + c.UI.Error(fmt.Sprintf("Error cloning client for auth handler: %v", err)) + return 1 + } + + // Override the set namespace with the auto-auth specific namespace + if !namespaceSetByEnvironmentVariable && config.AutoAuth.Method.Namespace != "" { + ahClient.SetNamespace(config.AutoAuth.Method.Namespace) + } + + if config.DisableIdleConnsAutoAuth { + ahClient.SetMaxIdleConnections(-1) + } + + if config.DisableKeepAlivesAutoAuth { + ahClient.SetDisableKeepAlives(true) + } + + ah = auth.NewAuthHandler(&auth.AuthHandlerConfig{ + Logger: c.logger.Named("auth.handler"), + Client: ahClient, + WrapTTL: config.AutoAuth.Method.WrapTTL, + MinBackoff: config.AutoAuth.Method.MinBackoff, + MaxBackoff: config.AutoAuth.Method.MaxBackoff, + EnableReauthOnNewCredentials: config.AutoAuth.EnableReauthOnNewCredentials, + Token: previousToken, + ExitOnError: config.AutoAuth.Method.ExitOnError, + UserAgent: useragent.ProxyAutoAuthString(), + MetricsSignifier: "proxy", + }) + + authInProgress = ah.AuthInProgress + invalidTokenErrCh = ah.InvalidToken + + ss = sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: c.logger.Named("sink.server"), + Client: ahClient, + ExitAfterAuth: config.ExitAfterAuth, + }) + } + + var listeners []net.Listener + + // Ensure we've added all the reload funcs for TLS before anyone triggers a reload. + c.tlsReloadFuncsLock.Lock() + + for i, lnConfig := range config.Listeners { + var ln net.Listener + var tlsCfg *tls.Config + + if lnConfig.Type == listenerutil.BufConnType { + inProcListener := bufconn.Listen(1024 * 1024) + if config.Cache != nil { + config.Cache.InProcDialer = listenerutil.NewBufConnWrapper(inProcListener) + } + ln = inProcListener + } else { + lnBundle, err := cache.StartListener(lnConfig) + if err != nil { + c.UI.Error(fmt.Sprintf("Error starting listener: %v", err)) + c.tlsReloadFuncsLock.Unlock() + return 1 + } + + tlsCfg = lnBundle.TLSConfig + ln = lnBundle.Listener + + // Track the reload func, so we can reload later if needed. + c.tlsReloadFuncs = append(c.tlsReloadFuncs, lnBundle.TLSReloadFunc) + } + + listeners = append(listeners, ln) + + apiProxyLogger.Debug("configuring inmem auto-auth sink") + inmemSink, err := inmem.New(&sink.SinkConfig{ + Logger: apiProxyLogger, + }, leaseCache) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating inmem sink for cache: %v", err)) + c.tlsReloadFuncsLock.Unlock() + return 1 + } + sinks = append(sinks, &sink.SinkConfig{ + Logger: apiProxyLogger, + Sink: inmemSink, + }) + useAutoAuthToken := false + forceAutoAuthToken := false + if config.APIProxy != nil { + useAutoAuthToken = config.APIProxy.UseAutoAuthToken + forceAutoAuthToken = config.APIProxy.ForceAutoAuthToken + } + + var muxHandler http.Handler + if leaseCache != nil { + muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, leaseCache, inmemSink, forceAutoAuthToken, useAutoAuthToken, authInProgress, invalidTokenErrCh) + } else { + muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, apiProxy, inmemSink, forceAutoAuthToken, useAutoAuthToken, authInProgress, invalidTokenErrCh) + } + + // Parse 'require_request_header' listener config option, and wrap + // the request handler if necessary + if lnConfig.RequireRequestHeader && ("metrics_only" != lnConfig.Role) { + muxHandler = verifyRequestHeader(muxHandler) + } + + // Create a muxer and add paths relevant for the lease cache layer + mux := http.NewServeMux() + quitEnabled := lnConfig.ProxyAPI != nil && lnConfig.ProxyAPI.EnableQuit + + mux.Handle(consts.ProxyPathMetrics, c.handleMetrics()) + if "metrics_only" != lnConfig.Role { + mux.Handle(consts.ProxyPathCacheClear, leaseCache.HandleCacheClear(ctx)) + mux.Handle(consts.ProxyPathQuit, c.handleQuit(quitEnabled)) + mux.Handle("/", muxHandler) + } + + scheme := "https://" + if tlsCfg == nil { + scheme = "http://" + } + if ln.Addr().Network() == "unix" { + scheme = "unix://" + } + + infoKey := fmt.Sprintf("api address %d", i+1) + info[infoKey] = scheme + ln.Addr().String() + infoKeys = append(infoKeys, infoKey) + + server := &http.Server{ + Addr: ln.Addr().String(), + TLSConfig: tlsCfg, + Handler: mux, + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 5 * time.Minute, + ErrorLog: apiProxyLogger.StandardLogger(nil), + } + + go server.Serve(ln) + } + + c.tlsReloadFuncsLock.Unlock() + + // Ensure that listeners are closed at all the exits + listenerCloseFunc := func() { + for _, ln := range listeners { + ln.Close() + } + } + defer c.cleanupGuard.Do(listenerCloseFunc) + + // Inform any tests that the server is ready + if c.startedCh != nil { + close(c.startedCh) + } + + var g run.Group + + g.Add(func() error { + for { + select { + case <-c.SighupCh: + c.UI.Output("==> Vault Proxy config reload triggered") + err := c.reloadConfig(c.flagConfigs) + if err != nil { + c.outputErrors(err) + } + // Send the 'reloaded' message on the relevant channel + select { + case c.reloadedCh <- struct{}{}: + default: + } + case <-c.SigUSR2Ch: + pprofPath := filepath.Join(os.TempDir(), "vault-proxy-pprof") + cpuProfileDuration := time.Second * 1 + err := WritePprofToFile(pprofPath, cpuProfileDuration) + if err != nil { + c.logger.Error(err.Error()) + continue + } + + c.logger.Info(fmt.Sprintf("Wrote pprof files to: %s", pprofPath)) + case <-ctx.Done(): + return nil + } + } + }, func(error) { + cancelFunc() + }) + + // This run group watches for signal termination + g.Add(func() error { + for { + select { + case <-c.ShutdownCh: + c.UI.Output("==> Vault Proxy shutdown triggered") + // Notify systemd that the server is shutting down + // Let the lease cache know this is a shutdown; no need to evict everything + if leaseCache != nil { + leaseCache.SetShuttingDown(true) + } + return nil + case <-ctx.Done(): + return nil + case <-winsvc.ShutdownChannel(): + return nil + } + } + }, func(error) {}) + + // Start auto-auth and sink servers + if method != nil { + g.Add(func() error { + return ah.Run(ctx, method) + }, func(error) { + // Let the lease cache know this is a shutdown; no need to evict + // everything + if leaseCache != nil { + leaseCache.SetShuttingDown(true) + } + cancelFunc() + }) + + g.Add(func() error { + err := ss.Run(ctx, ah.OutputCh, sinks, ah.AuthInProgress) + c.logger.Info("sinks finished, exiting") + + // Start goroutine to drain from ah.OutputCh from this point onward + // to prevent ah.Run from being blocked. + go func() { + for { + select { + case <-ctx.Done(): + return + case <-ah.OutputCh: + } + } + }() + + return err + }, func(error) { + // Let the lease cache know this is a shutdown; no need to evict + // everything + if leaseCache != nil { + leaseCache.SetShuttingDown(true) + } + cancelFunc() + }) + } + + // Add the static secret cache updater, if appropriate + if updater != nil { + g.Add(func() error { + err := updater.Run(ctx, authInProgress, invalidTokenErrCh) + return err + }, func(error) { + cancelFunc() + }) + } + + // Server configuration output + padding := 24 + sort.Strings(infoKeys) + caser := cases.Title(language.English) + c.UI.Output("==> Vault Proxy configuration:\n") + for _, k := range infoKeys { + c.UI.Output(fmt.Sprintf( + "%s%s: %s", + strings.Repeat(" ", padding-len(k)), + caser.String(k), + info[k])) + } + c.UI.Output("") + + // Release the log gate. + c.logGate.Flush() + + // Write out the PID to the file now that server has successfully started + if err := c.storePidFile(config.PidFile); err != nil { + c.UI.Error(fmt.Sprintf("Error storing PID: %s", err)) + return 1 + } + + // Notify systemd that the server is ready (if applicable) + c.notifySystemd(systemd.SdNotifyReady) + + defer func() { + if err := c.removePidFile(config.PidFile); err != nil { + c.UI.Error(fmt.Sprintf("Error deleting the PID file: %s", err)) + } + }() + + var exitCode int + if err := g.Run(); err != nil { + c.logger.Error("runtime error encountered", "error", err) + c.UI.Error("Error encountered during run, refer to logs for more details.") + exitCode = 1 + } + c.notifySystemd(systemd.SdNotifyStopping) + return exitCode +} + +// applyConfigOverrides ensures that the config object accurately reflects the desired +// settings as configured by the user. It applies the relevant config setting based +// on the precedence (env var overrides file config, cli overrides env var). +// It mutates the config object supplied. +func (c *ProxyCommand) applyConfigOverrides(f *FlagSets, config *proxyConfig.Config) { + if config.Vault == nil { + config.Vault = &proxyConfig.Vault{} + } + + f.applyLogConfigOverrides(config.SharedConfig) + + f.Visit(func(fl *flag.Flag) { + if fl.Name == flagNameProxyExitAfterAuth { + config.ExitAfterAuth = c.flagExitAfterAuth + } + }) + + c.setStringFlag(f, config.Vault.Address, &StringVar{ + Name: flagNameAddress, + Target: &c.flagAddress, + Default: "https://127.0.0.1:8200", + EnvVar: api.EnvVaultAddress, + }) + config.Vault.Address = c.flagAddress + c.setStringFlag(f, config.Vault.CACert, &StringVar{ + Name: flagNameCACert, + Target: &c.flagCACert, + Default: "", + EnvVar: api.EnvVaultCACert, + }) + config.Vault.CACert = c.flagCACert + c.setStringFlag(f, config.Vault.CAPath, &StringVar{ + Name: flagNameCAPath, + Target: &c.flagCAPath, + Default: "", + EnvVar: api.EnvVaultCAPath, + }) + config.Vault.CAPath = c.flagCAPath + c.setStringFlag(f, config.Vault.ClientCert, &StringVar{ + Name: flagNameClientCert, + Target: &c.flagClientCert, + Default: "", + EnvVar: api.EnvVaultClientCert, + }) + config.Vault.ClientCert = c.flagClientCert + c.setStringFlag(f, config.Vault.ClientKey, &StringVar{ + Name: flagNameClientKey, + Target: &c.flagClientKey, + Default: "", + EnvVar: api.EnvVaultClientKey, + }) + config.Vault.ClientKey = c.flagClientKey + c.setBoolFlag(f, config.Vault.TLSSkipVerify, &BoolVar{ + Name: flagNameTLSSkipVerify, + Target: &c.flagTLSSkipVerify, + Default: false, + EnvVar: api.EnvVaultSkipVerify, + }) + config.Vault.TLSSkipVerify = c.flagTLSSkipVerify + c.setStringFlag(f, config.Vault.TLSServerName, &StringVar{ + Name: flagTLSServerName, + Target: &c.flagTLSServerName, + Default: "", + EnvVar: api.EnvVaultTLSServerName, + }) + config.Vault.TLSServerName = c.flagTLSServerName +} + +func (c *ProxyCommand) notifySystemd(status string) { + sent, err := systemd.SdNotify(false, status) + if err != nil { + c.logger.Error("error notifying systemd", "error", err) + } else { + if sent { + c.logger.Debug("sent systemd notification", "notification", status) + } else { + c.logger.Debug("would have sent systemd notification (systemd not present)", "notification", status) + } + } +} + +func (c *ProxyCommand) setStringFlag(f *FlagSets, configVal string, fVar *StringVar) { + var isFlagSet bool + f.Visit(func(f *flag.Flag) { + if f.Name == fVar.Name { + isFlagSet = true + } + }) + + flagEnvValue, flagEnvSet := os.LookupEnv(fVar.EnvVar) + switch { + case isFlagSet: + // Don't do anything as the flag is already set from the command line + case flagEnvSet: + // Use value from env var + *fVar.Target = flagEnvValue + case configVal != "": + // Use value from config + *fVar.Target = configVal + default: + // Use the default value + *fVar.Target = fVar.Default + } +} + +func (c *ProxyCommand) setBoolFlag(f *FlagSets, configVal bool, fVar *BoolVar) { + var isFlagSet bool + f.Visit(func(f *flag.Flag) { + if f.Name == fVar.Name { + isFlagSet = true + } + }) + + flagEnvValue, flagEnvSet := os.LookupEnv(fVar.EnvVar) + switch { + case isFlagSet: + // Don't do anything as the flag is already set from the command line + case flagEnvSet: + // Use value from env var + val, err := parseutil.ParseBool(flagEnvValue) + if err != nil { + c.logger.Error("error parsing bool from environment variable, using default instead", "environment variable", fVar.EnvVar, "provided value", flagEnvValue, "default", fVar.Default, "err", err) + val = fVar.Default + } + *fVar.Target = val + case configVal: + // Use value from config + *fVar.Target = configVal + default: + // Use the default value + *fVar.Target = fVar.Default + } +} + +// storePidFile is used to write out our PID to a file if necessary +func (c *ProxyCommand) storePidFile(pidPath string) error { + // Quit fast if no pidfile + if pidPath == "" { + return nil + } + + // Open the PID file + pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o600) + if err != nil { + return fmt.Errorf("could not open pid file: %w", err) + } + defer pidFile.Close() + + // Write out the PID + pid := os.Getpid() + _, err = pidFile.WriteString(fmt.Sprintf("%d", pid)) + if err != nil { + return fmt.Errorf("could not write to pid file: %w", err) + } + return nil +} + +// removePidFile is used to cleanup the PID file if necessary +func (c *ProxyCommand) removePidFile(pidPath string) error { + if pidPath == "" { + return nil + } + return os.Remove(pidPath) +} + +func (c *ProxyCommand) handleMetrics() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + logical.RespondError(w, http.StatusMethodNotAllowed, nil) + return + } + + if err := r.ParseForm(); err != nil { + logical.RespondError(w, http.StatusBadRequest, err) + return + } + + format := r.Form.Get("format") + if format == "" { + format = metricsutil.FormatFromRequest(&logical.Request{ + Headers: r.Header, + }) + } + + resp := c.metricsHelper.ResponseForFormat(format) + + status := resp.Data[logical.HTTPStatusCode].(int) + w.Header().Set("Content-Type", resp.Data[logical.HTTPContentType].(string)) + switch v := resp.Data[logical.HTTPRawBody].(type) { + case string: + w.WriteHeader(status) + w.Write([]byte(v)) + case []byte: + w.WriteHeader(status) + w.Write(v) + default: + logical.RespondError(w, http.StatusInternalServerError, fmt.Errorf("wrong response returned")) + } + }) +} + +func (c *ProxyCommand) handleQuit(enabled bool) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !enabled { + w.WriteHeader(http.StatusNotFound) + return + } + + switch r.Method { + case http.MethodPost: + default: + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + c.logger.Debug("received quit request") + close(c.ShutdownCh) + }) +} + +// newLogger creates a logger based on parsed config field on the Proxy Command struct. +func (c *ProxyCommand) newLogger() (log.InterceptLogger, error) { + if c.config == nil { + return nil, fmt.Errorf("cannot create logger, no config") + } + + var errors error + + // Parse all the log related config + logLevel, err := logging.ParseLogLevel(c.config.LogLevel) + if err != nil { + errors = multierror.Append(errors, err) + } + + logFormat, err := logging.ParseLogFormat(c.config.LogFormat) + if err != nil { + errors = multierror.Append(errors, err) + } + + logRotateDuration, err := parseutil.ParseDurationSecond(c.config.LogRotateDuration) + if err != nil { + errors = multierror.Append(errors, err) + } + + if errors != nil { + return nil, errors + } + + logCfg, err := logging.NewLogConfig(nameProxy) + if err != nil { + return nil, err + } + logCfg.Name = nameProxy + logCfg.LogLevel = logLevel + logCfg.LogFormat = logFormat + logCfg.LogFilePath = c.config.LogFile + logCfg.LogRotateDuration = logRotateDuration + logCfg.LogRotateBytes = c.config.LogRotateBytes + logCfg.LogRotateMaxFiles = c.config.LogRotateMaxFiles + + l, err := logging.Setup(logCfg, c.logWriter) + if err != nil { + return nil, err + } + + return l, nil +} + +// loadConfig attempts to generate a Proxy config from the file(s) specified. +func (c *ProxyCommand) loadConfig(paths []string) (*proxyConfig.Config, error) { + var errors error + cfg := proxyConfig.NewConfig() + + for _, configPath := range paths { + configFromPath, err := proxyConfig.LoadConfig(configPath) + if err != nil { + errors = multierror.Append(errors, fmt.Errorf("error loading configuration from %s: %w", configPath, err)) + } else { + cfg = cfg.Merge(configFromPath) + } + } + + if errors != nil { + return nil, errors + } + + if err := cfg.ValidateConfig(); err != nil { + return nil, fmt.Errorf("error validating configuration: %w", err) + } + + return cfg, nil +} + +// reloadConfig will attempt to reload the config from file(s) and adjust certain +// config values without requiring a restart of the Vault Proxy. +// If config is retrieved without error it is stored in the config field of the ProxyCommand. +// This operation is not atomic and could result in updated config but partially applied config settings. +// The error returned from this func may be a multierror. +// This function will most likely be called due to Vault Proxy receiving a SIGHUP signal. +// Currently only reloading the following are supported: +// * log level +// * TLS certs for listeners +func (c *ProxyCommand) reloadConfig(paths []string) error { + // Notify systemd that the server is reloading + c.notifySystemd(systemd.SdNotifyReloading) + defer c.notifySystemd(systemd.SdNotifyReady) + + var errors error + + // Reload the config + cfg, err := c.loadConfig(paths) + if err != nil { + // Returning single error as we won't continue with bad config and won't 'commit' it. + return err + } + c.config = cfg + + // Update the log level + err = c.reloadLogLevel() + if err != nil { + errors = multierror.Append(errors, err) + } + + // Update certs + err = c.reloadCerts() + if err != nil { + errors = multierror.Append(errors, err) + } + + return errors +} + +// reloadLogLevel will attempt to update the log level for the logger attached +// to the ProxyCommand struct using the value currently set in config. +func (c *ProxyCommand) reloadLogLevel() error { + logLevel, err := logging.ParseLogLevel(c.config.LogLevel) + if err != nil { + return err + } + + c.logger.SetLevel(logLevel) + + return nil +} + +// reloadCerts will attempt to reload certificates using a reload func which +// was provided when the listeners were configured, only funcs that were appended +// to the ProxyCommand slice will be invoked. +// This function returns a multierror type so that every func can report an error +// if it encounters one. +func (c *ProxyCommand) reloadCerts() error { + var errors error + + c.tlsReloadFuncsLock.RLock() + defer c.tlsReloadFuncsLock.RUnlock() + + for _, reloadFunc := range c.tlsReloadFuncs { + // Non-TLS listeners will have a nil reload func. + if reloadFunc != nil { + err := reloadFunc() + if err != nil { + errors = multierror.Append(errors, err) + } + } + } + + return errors +} + +// outputErrors will take an error or multierror and handle outputting each to the UI +func (c *ProxyCommand) outputErrors(err error) { + if err != nil { + if me, ok := err.(*multierror.Error); ok { + for _, err := range me.Errors { + c.UI.Error(err.Error()) + } + } else { + c.UI.Error(err.Error()) + } + } +} diff --git a/command/proxy/config/config.go b/command/proxy/config/config.go new file mode 100644 index 000000000000..2f5f5b320181 --- /dev/null +++ b/command/proxy/config/config.go @@ -0,0 +1,865 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package config + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "os" + "path/filepath" + "strings" + "time" + + ctconfig "github.com/hashicorp/consul-template/config" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/command/agentproxyshared" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/internalshared/configutil" +) + +// Config is the configuration for Vault Proxy. +type Config struct { + *configutil.SharedConfig `hcl:"-"` + + AutoAuth *AutoAuth `hcl:"auto_auth"` + ExitAfterAuth bool `hcl:"exit_after_auth"` + Cache *Cache `hcl:"cache"` + APIProxy *APIProxy `hcl:"api_proxy""` + Vault *Vault `hcl:"vault"` + DisableIdleConns []string `hcl:"disable_idle_connections"` + DisableIdleConnsAPIProxy bool `hcl:"-"` + DisableIdleConnsAutoAuth bool `hcl:"-"` + DisableKeepAlives []string `hcl:"disable_keep_alives"` + DisableKeepAlivesAPIProxy bool `hcl:"-"` + DisableKeepAlivesAutoAuth bool `hcl:"-"` +} + +const ( + DisableIdleConnsEnv = "VAULT_PROXY_DISABLE_IDLE_CONNECTIONS" + DisableKeepAlivesEnv = "VAULT_PROXY_DISABLE_KEEP_ALIVES" +) + +func (c *Config) Prune() { + for _, l := range c.Listeners { + l.RawConfig = nil + l.Profiling.UnusedKeys = nil + l.Telemetry.UnusedKeys = nil + l.CustomResponseHeaders = nil + } + c.FoundKeys = nil + c.UnusedKeys = nil + c.SharedConfig.FoundKeys = nil + c.SharedConfig.UnusedKeys = nil + if c.Telemetry != nil { + c.Telemetry.FoundKeys = nil + c.Telemetry.UnusedKeys = nil + } +} + +type Retry struct { + NumRetries int `hcl:"num_retries"` +} + +// Vault contains configuration for connecting to Vault servers +type Vault struct { + Address string `hcl:"address"` + CACert string `hcl:"ca_cert"` + CAPath string `hcl:"ca_path"` + TLSSkipVerify bool `hcl:"-"` + TLSSkipVerifyRaw interface{} `hcl:"tls_skip_verify"` + ClientCert string `hcl:"client_cert"` + ClientKey string `hcl:"client_key"` + TLSServerName string `hcl:"tls_server_name"` + Namespace string `hcl:"namespace"` + Retry *Retry `hcl:"retry"` +} + +// transportDialer is an interface that allows passing a custom dialer function +// to an HTTP client's transport config +type transportDialer interface { + // Dial is intended to match https://pkg.go.dev/net#Dialer.Dial + Dial(network, address string) (net.Conn, error) + + // DialContext is intended to match https://pkg.go.dev/net#Dialer.DialContext + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +// APIProxy contains any configuration needed for proxy mode +type APIProxy struct { + UseAutoAuthTokenRaw interface{} `hcl:"use_auto_auth_token"` + UseAutoAuthToken bool `hcl:"-"` + ForceAutoAuthToken bool `hcl:"-"` + EnforceConsistency string `hcl:"enforce_consistency"` + WhenInconsistent string `hcl:"when_inconsistent"` + PrependConfiguredNamespace bool `hcl:"prepend_configured_namespace"` +} + +// Cache contains any configuration needed for Cache mode +type Cache struct { + Persist *agentproxyshared.PersistConfig `hcl:"persist"` + InProcDialer transportDialer `hcl:"-"` + CacheStaticSecrets bool `hcl:"cache_static_secrets"` + DisableCachingDynamicSecrets bool `hcl:"disable_caching_dynamic_secrets"` + StaticSecretTokenCapabilityRefreshIntervalRaw interface{} `hcl:"static_secret_token_capability_refresh_interval"` + StaticSecretTokenCapabilityRefreshInterval time.Duration `hcl:"-"` + StaticSecretTokenCapabilityRefreshBehaviour string `hcl:"static_secret_token_capability_refresh_behavior"` +} + +// AutoAuth is the configured authentication method and sinks +type AutoAuth struct { + Method *Method `hcl:"-"` + Sinks []*Sink `hcl:"sinks"` + + // NOTE: This is unsupported outside of testing and may disappear at any + // time. + EnableReauthOnNewCredentials bool `hcl:"enable_reauth_on_new_credentials"` +} + +// Method represents the configuration for the authentication backend +type Method struct { + Type string + MountPath string `hcl:"mount_path"` + WrapTTLRaw interface{} `hcl:"wrap_ttl"` + WrapTTL time.Duration `hcl:"-"` + MinBackoffRaw interface{} `hcl:"min_backoff"` + MinBackoff time.Duration `hcl:"-"` + MaxBackoffRaw interface{} `hcl:"max_backoff"` + MaxBackoff time.Duration `hcl:"-"` + Namespace string `hcl:"namespace"` + ExitOnError bool `hcl:"exit_on_err"` + Config map[string]interface{} +} + +// Sink defines a location to write the authenticated token +type Sink struct { + Type string + WrapTTLRaw interface{} `hcl:"wrap_ttl"` + WrapTTL time.Duration `hcl:"-"` + DHType string `hcl:"dh_type"` + DeriveKey bool `hcl:"derive_key"` + DHPath string `hcl:"dh_path"` + AAD string `hcl:"aad"` + AADEnvVar string `hcl:"aad_env_var"` + Config map[string]interface{} +} + +func NewConfig() *Config { + return &Config{ + SharedConfig: new(configutil.SharedConfig), + } +} + +// Merge merges two Proxy configurations. +func (c *Config) Merge(c2 *Config) *Config { + if c2 == nil { + return c + } + + result := NewConfig() + + result.SharedConfig = c.SharedConfig + if c2.SharedConfig != nil { + result.SharedConfig = c.SharedConfig.Merge(c2.SharedConfig) + } + + result.AutoAuth = c.AutoAuth + if c2.AutoAuth != nil { + result.AutoAuth = c2.AutoAuth + } + + result.Cache = c.Cache + if c2.Cache != nil { + result.Cache = c2.Cache + } + + result.APIProxy = c.APIProxy + if c2.APIProxy != nil { + result.APIProxy = c2.APIProxy + } + + result.DisableMlock = c.DisableMlock + if c2.DisableMlock { + result.DisableMlock = c2.DisableMlock + } + + // For these, ignore the non-specific one and overwrite them all + result.DisableIdleConnsAutoAuth = c.DisableIdleConnsAutoAuth + if c2.DisableIdleConnsAutoAuth { + result.DisableIdleConnsAutoAuth = c2.DisableIdleConnsAutoAuth + } + + result.DisableIdleConnsAPIProxy = c.DisableIdleConnsAPIProxy + if c2.DisableIdleConnsAPIProxy { + result.DisableIdleConnsAPIProxy = c2.DisableIdleConnsAPIProxy + } + + result.DisableKeepAlivesAutoAuth = c.DisableKeepAlivesAutoAuth + if c2.DisableKeepAlivesAutoAuth { + result.DisableKeepAlivesAutoAuth = c2.DisableKeepAlivesAutoAuth + } + + result.DisableKeepAlivesAPIProxy = c.DisableKeepAlivesAPIProxy + if c2.DisableKeepAlivesAPIProxy { + result.DisableKeepAlivesAPIProxy = c2.DisableKeepAlivesAPIProxy + } + + result.ExitAfterAuth = c.ExitAfterAuth + if c2.ExitAfterAuth { + result.ExitAfterAuth = c2.ExitAfterAuth + } + + result.Vault = c.Vault + if c2.Vault != nil { + result.Vault = c2.Vault + } + + result.PidFile = c.PidFile + if c2.PidFile != "" { + result.PidFile = c2.PidFile + } + + return result +} + +// ValidateConfig validates a Vault configuration after it has been fully merged together, to +// ensure that required combinations of configs are there +func (c *Config) ValidateConfig() error { + if c.Cache != nil { + if len(c.Listeners) < 1 { + return fmt.Errorf("enabling the cache requires at least 1 listener to be defined") + } + } + + if c.APIProxy != nil { + if len(c.Listeners) < 1 { + return fmt.Errorf("configuring the api_proxy requires at least 1 listener to be defined") + } + + if c.APIProxy.UseAutoAuthToken { + if c.AutoAuth == nil { + return fmt.Errorf("api_proxy.use_auto_auth_token is true but auto_auth not configured") + } + if c.AutoAuth != nil && c.AutoAuth.Method != nil && c.AutoAuth.Method.WrapTTL > 0 { + return fmt.Errorf("api_proxy.use_auto_auth_token is true and auto_auth uses wrapping") + } + } + } + + if c.AutoAuth != nil { + cacheStaticSecrets := c.Cache != nil && c.Cache.CacheStaticSecrets + if len(c.AutoAuth.Sinks) == 0 && + (c.APIProxy == nil || !c.APIProxy.UseAutoAuthToken) && !cacheStaticSecrets { + return fmt.Errorf("auto_auth requires at least one sink, api_proxy.use_auto_auth_token=true, or cache.cache_static_secrets=true") + } + } + + if c.Cache != nil && c.Cache.CacheStaticSecrets && c.AutoAuth == nil { + return fmt.Errorf("cache.cache_static_secrets=true requires an auto-auth block configured, to use the token to connect with Vault's event system") + } + + if c.Cache != nil && !c.Cache.CacheStaticSecrets && c.Cache.DisableCachingDynamicSecrets { + return fmt.Errorf("to enable the cache, the cache must be configured to either cache static secrets or dynamic secrets") + } + + if c.AutoAuth == nil && c.Cache == nil && len(c.Listeners) == 0 { + return fmt.Errorf("no auto_auth, cache, or listener block found in config") + } + + if c.Cache != nil && c.Cache.StaticSecretTokenCapabilityRefreshBehaviour != "" { + switch c.Cache.StaticSecretTokenCapabilityRefreshBehaviour { + case "pessimistic": + case "optimistic": + default: + return fmt.Errorf("cache.static_secret_token_capability_refresh_behavior must be either \"optimistic\" or \"pessimistic\"") + } + } + + return nil +} + +// LoadConfig loads the configuration at the given path, regardless if +// it's a file or directory. +func LoadConfig(path string) (*Config, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + + if fi.IsDir() { + return LoadConfigDir(path) + } + return LoadConfigFile(path) +} + +// LoadConfigDir loads the configuration at the given path if it's a directory +func LoadConfigDir(dir string) (*Config, error) { + f, err := os.Open(dir) + if err != nil { + return nil, err + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + return nil, err + } + if !fi.IsDir() { + return nil, fmt.Errorf("configuration path must be a directory: %q", dir) + } + + var files []string + err = nil + for err != io.EOF { + var fis []os.FileInfo + fis, err = f.Readdir(128) + if err != nil && err != io.EOF { + return nil, err + } + + for _, fi := range fis { + // Ignore directories + if fi.IsDir() { + continue + } + + // Only care about files that are valid to load. + name := fi.Name() + skip := true + if strings.HasSuffix(name, ".hcl") { + skip = false + } else if strings.HasSuffix(name, ".json") { + skip = false + } + if skip || isTemporaryFile(name) { + continue + } + + path := filepath.Join(dir, name) + files = append(files, path) + } + } + + result := NewConfig() + for _, f := range files { + config, err := LoadConfigFile(f) + if err != nil { + return nil, fmt.Errorf("error loading %q: %w", f, err) + } + + if result == nil { + result = config + } else { + result = result.Merge(config) + } + } + + return result, nil +} + +// isTemporaryFile returns true or false depending on whether the +// provided file name is a temporary file for the following editors: +// emacs or vim. +func isTemporaryFile(name string) bool { + return strings.HasSuffix(name, "~") || // vim + strings.HasPrefix(name, ".#") || // emacs + (strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#")) // emacs +} + +// LoadConfigFile loads the configuration at the given path if it's a file +func LoadConfigFile(path string) (*Config, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + + if fi.IsDir() { + return nil, fmt.Errorf("location is a directory, not a file") + } + + // Read the file + d, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + // Parse! + obj, err := hcl.Parse(string(d)) + if err != nil { + return nil, err + } + + // Attribute + ast.Walk(obj, func(n ast.Node) (ast.Node, bool) { + if k, ok := n.(*ast.ObjectKey); ok { + k.Token.Pos.Filename = path + } + return n, true + }) + + // Start building the result + result := NewConfig() + if err := hcl.DecodeObject(result, obj); err != nil { + return nil, err + } + + sharedConfig, err := configutil.ParseConfig(string(d)) + if err != nil { + return nil, err + } + + // Pruning custom headers for Vault for now + for _, ln := range sharedConfig.Listeners { + ln.CustomResponseHeaders = nil + } + + result.SharedConfig = sharedConfig + + list, ok := obj.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("error parsing: file doesn't contain a root object") + } + + if err := parseAutoAuth(result, list); err != nil { + return nil, fmt.Errorf("error parsing 'auto_auth': %w", err) + } + + if err := parseCache(result, list); err != nil { + return nil, fmt.Errorf("error parsing 'cache':%w", err) + } + + if err := parseAPIProxy(result, list); err != nil { + return nil, fmt.Errorf("error parsing 'api_proxy':%w", err) + } + + err = parseVault(result, list) + if err != nil { + return nil, fmt.Errorf("error parsing 'vault':%w", err) + } + + if result.Vault != nil { + // Set defaults + if result.Vault.Retry == nil { + result.Vault.Retry = &Retry{} + } + switch result.Vault.Retry.NumRetries { + case 0: + result.Vault.Retry.NumRetries = ctconfig.DefaultRetryAttempts + case -1: + result.Vault.Retry.NumRetries = 0 + } + } + + if disableIdleConnsEnv := os.Getenv(DisableIdleConnsEnv); disableIdleConnsEnv != "" { + result.DisableIdleConns, err = parseutil.ParseCommaStringSlice(strings.ToLower(disableIdleConnsEnv)) + if err != nil { + return nil, fmt.Errorf("error parsing environment variable %s: %v", DisableIdleConnsEnv, err) + } + } + + for _, subsystem := range result.DisableIdleConns { + switch subsystem { + case "auto-auth": + result.DisableIdleConnsAutoAuth = true + case "caching", "proxying": + result.DisableIdleConnsAPIProxy = true + case "": + continue + default: + return nil, fmt.Errorf("unknown disable_idle_connections value: %s", subsystem) + } + } + + if disableKeepAlivesEnv := os.Getenv(DisableKeepAlivesEnv); disableKeepAlivesEnv != "" { + result.DisableKeepAlives, err = parseutil.ParseCommaStringSlice(strings.ToLower(disableKeepAlivesEnv)) + if err != nil { + return nil, fmt.Errorf("error parsing environment variable %s: %v", DisableKeepAlivesEnv, err) + } + } + + for _, subsystem := range result.DisableKeepAlives { + switch subsystem { + case "auto-auth": + result.DisableKeepAlivesAutoAuth = true + case "caching", "proxying": + result.DisableKeepAlivesAPIProxy = true + case "": + continue + default: + return nil, fmt.Errorf("unknown disable_keep_alives value: %s", subsystem) + } + } + + return result, nil +} + +func parseVault(result *Config, list *ast.ObjectList) error { + name := "vault" + + vaultList := list.Filter(name) + if len(vaultList.Items) == 0 { + return nil + } + + if len(vaultList.Items) > 1 { + return fmt.Errorf("one and only one %q block is required", name) + } + + item := vaultList.Items[0] + + var v Vault + err := hcl.DecodeObject(&v, item.Val) + if err != nil { + return err + } + + if v.TLSSkipVerifyRaw != nil { + v.TLSSkipVerify, err = parseutil.ParseBool(v.TLSSkipVerifyRaw) + if err != nil { + return err + } + } + + result.Vault = &v + + subs, ok := item.Val.(*ast.ObjectType) + if !ok { + return fmt.Errorf("could not parse %q as an object", name) + } + + if err := parseRetry(result, subs.List); err != nil { + return fmt.Errorf("error parsing 'retry': %w", err) + } + + return nil +} + +func parseRetry(result *Config, list *ast.ObjectList) error { + name := "retry" + + retryList := list.Filter(name) + if len(retryList.Items) == 0 { + return nil + } + + if len(retryList.Items) > 1 { + return fmt.Errorf("one and only one %q block is required", name) + } + + item := retryList.Items[0] + + var r Retry + err := hcl.DecodeObject(&r, item.Val) + if err != nil { + return err + } + + result.Vault.Retry = &r + + return nil +} + +func parseAPIProxy(result *Config, list *ast.ObjectList) error { + name := "api_proxy" + + apiProxyList := list.Filter(name) + if len(apiProxyList.Items) == 0 { + return nil + } + + if len(apiProxyList.Items) > 1 { + return fmt.Errorf("one and only one %q block is required", name) + } + + item := apiProxyList.Items[0] + + var apiProxy APIProxy + err := hcl.DecodeObject(&apiProxy, item.Val) + if err != nil { + return err + } + + if apiProxy.UseAutoAuthTokenRaw != nil { + apiProxy.UseAutoAuthToken, err = parseutil.ParseBool(apiProxy.UseAutoAuthTokenRaw) + if err != nil { + // Could be a value of "force" instead of "true"/"false" + switch apiProxy.UseAutoAuthTokenRaw.(type) { + case string: + v := apiProxy.UseAutoAuthTokenRaw.(string) + + if !strings.EqualFold(v, "force") { + return fmt.Errorf("value of 'use_auto_auth_token' can be either true/false/force, %q is an invalid option", apiProxy.UseAutoAuthTokenRaw) + } + apiProxy.UseAutoAuthToken = true + apiProxy.ForceAutoAuthToken = true + + default: + return err + } + } + } + result.APIProxy = &apiProxy + + return nil +} + +func parseCache(result *Config, list *ast.ObjectList) error { + name := "cache" + + cacheList := list.Filter(name) + if len(cacheList.Items) == 0 { + return nil + } + + if len(cacheList.Items) > 1 { + return fmt.Errorf("one and only one %q block is required", name) + } + + item := cacheList.Items[0] + + var c Cache + err := hcl.DecodeObject(&c, item.Val) + if err != nil { + return err + } + + result.Cache = &c + + subs, ok := item.Val.(*ast.ObjectType) + if !ok { + return fmt.Errorf("could not parse %q as an object", name) + } + subList := subs.List + if err := parsePersist(result, subList); err != nil { + return fmt.Errorf("error parsing persist: %w", err) + } + + if result.Cache.StaticSecretTokenCapabilityRefreshIntervalRaw != nil { + var err error + if result.Cache.StaticSecretTokenCapabilityRefreshInterval, err = parseutil.ParseDurationSecond(result.Cache.StaticSecretTokenCapabilityRefreshIntervalRaw); err != nil { + return fmt.Errorf("error parsing static_secret_token_capability_refresh_interval, must be provided as a duration string: %w", err) + } + result.Cache.StaticSecretTokenCapabilityRefreshIntervalRaw = nil + } + + return nil +} + +func parsePersist(result *Config, list *ast.ObjectList) error { + name := "persist" + + persistList := list.Filter(name) + if len(persistList.Items) == 0 { + return nil + } + + if len(persistList.Items) > 1 { + return fmt.Errorf("only one %q block is required", name) + } + + item := persistList.Items[0] + + var p agentproxyshared.PersistConfig + err := hcl.DecodeObject(&p, item.Val) + if err != nil { + return err + } + + if p.Type == "" { + if len(item.Keys) == 1 { + p.Type = strings.ToLower(item.Keys[0].Token.Value().(string)) + } + if p.Type == "" { + return errors.New("persist type must be specified") + } + } + + result.Cache.Persist = &p + + return nil +} + +func parseAutoAuth(result *Config, list *ast.ObjectList) error { + name := "auto_auth" + + autoAuthList := list.Filter(name) + if len(autoAuthList.Items) == 0 { + return nil + } + if len(autoAuthList.Items) > 1 { + return fmt.Errorf("at most one %q block is allowed", name) + } + + // Get our item + item := autoAuthList.Items[0] + + var a AutoAuth + if err := hcl.DecodeObject(&a, item.Val); err != nil { + return err + } + + result.AutoAuth = &a + + subs, ok := item.Val.(*ast.ObjectType) + if !ok { + return fmt.Errorf("could not parse %q as an object", name) + } + subList := subs.List + + if err := parseMethod(result, subList); err != nil { + return fmt.Errorf("error parsing 'method': %w", err) + } + if a.Method == nil { + return fmt.Errorf("no 'method' block found") + } + + if err := parseSinks(result, subList); err != nil { + return fmt.Errorf("error parsing 'sink' stanzas: %w", err) + } + + if result.AutoAuth.Method.WrapTTL > 0 { + if len(result.AutoAuth.Sinks) != 1 { + return fmt.Errorf("error parsing auto_auth: wrapping enabled on auth method and 0 or many sinks defined") + } + + if result.AutoAuth.Sinks[0].WrapTTL > 0 { + return fmt.Errorf("error parsing auto_auth: wrapping enabled both on auth method and sink") + } + } + + if result.AutoAuth.Method.MaxBackoffRaw != nil { + var err error + if result.AutoAuth.Method.MaxBackoff, err = parseutil.ParseDurationSecond(result.AutoAuth.Method.MaxBackoffRaw); err != nil { + return err + } + result.AutoAuth.Method.MaxBackoffRaw = nil + } + + if result.AutoAuth.Method.MinBackoffRaw != nil { + var err error + if result.AutoAuth.Method.MinBackoff, err = parseutil.ParseDurationSecond(result.AutoAuth.Method.MinBackoffRaw); err != nil { + return err + } + result.AutoAuth.Method.MinBackoffRaw = nil + } + + return nil +} + +func parseMethod(result *Config, list *ast.ObjectList) error { + name := "method" + + methodList := list.Filter(name) + if len(methodList.Items) != 1 { + return fmt.Errorf("one and only one %q block is required", name) + } + + // Get our item + item := methodList.Items[0] + + var m Method + if err := hcl.DecodeObject(&m, item.Val); err != nil { + return err + } + + if m.Type == "" { + if len(item.Keys) == 1 { + m.Type = strings.ToLower(item.Keys[0].Token.Value().(string)) + } + if m.Type == "" { + return errors.New("method type must be specified") + } + } + + // Default to Vault's default + if m.MountPath == "" { + m.MountPath = fmt.Sprintf("auth/%s", m.Type) + } + // Standardize on no trailing slash + m.MountPath = strings.TrimSuffix(m.MountPath, "/") + + if m.WrapTTLRaw != nil { + var err error + if m.WrapTTL, err = parseutil.ParseDurationSecond(m.WrapTTLRaw); err != nil { + return err + } + m.WrapTTLRaw = nil + } + + // Canonicalize namespace path if provided + m.Namespace = namespace.Canonicalize(m.Namespace) + + result.AutoAuth.Method = &m + return nil +} + +func parseSinks(result *Config, list *ast.ObjectList) error { + name := "sink" + + sinkList := list.Filter(name) + if len(sinkList.Items) < 1 { + return nil + } + + var ts []*Sink + + for _, item := range sinkList.Items { + var s Sink + if err := hcl.DecodeObject(&s, item.Val); err != nil { + return err + } + + if s.Type == "" { + if len(item.Keys) == 1 { + s.Type = strings.ToLower(item.Keys[0].Token.Value().(string)) + } + if s.Type == "" { + return errors.New("sink type must be specified") + } + } + + if s.WrapTTLRaw != nil { + var err error + if s.WrapTTL, err = parseutil.ParseDurationSecond(s.WrapTTLRaw); err != nil { + return multierror.Prefix(err, fmt.Sprintf("sink.%s", s.Type)) + } + s.WrapTTLRaw = nil + } + + switch s.DHType { + case "": + case "curve25519": + default: + return multierror.Prefix(errors.New("invalid value for 'dh_type'"), fmt.Sprintf("sink.%s", s.Type)) + } + + if s.AADEnvVar != "" { + s.AAD = os.Getenv(s.AADEnvVar) + s.AADEnvVar = "" + } + + switch { + case s.DHPath == "" && s.DHType == "": + if s.AAD != "" { + return multierror.Prefix(errors.New("specifying AAD data without 'dh_type' does not make sense"), fmt.Sprintf("sink.%s", s.Type)) + } + if s.DeriveKey { + return multierror.Prefix(errors.New("specifying 'derive_key' data without 'dh_type' does not make sense"), fmt.Sprintf("sink.%s", s.Type)) + } + case s.DHPath != "" && s.DHType != "": + default: + return multierror.Prefix(errors.New("'dh_type' and 'dh_path' must be specified together"), fmt.Sprintf("sink.%s", s.Type)) + } + + ts = append(ts, &s) + } + + result.AutoAuth.Sinks = ts + return nil +} diff --git a/command/proxy/config/config_test.go b/command/proxy/config/config_test.go new file mode 100644 index 000000000000..e0afc50de54a --- /dev/null +++ b/command/proxy/config/config_test.go @@ -0,0 +1,205 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package config + +import ( + "testing" + "time" + + "github.com/go-test/deep" + "github.com/hashicorp/vault/command/agentproxyshared" + "github.com/hashicorp/vault/internalshared/configutil" +) + +// TestLoadConfigFile_ProxyCache tests loading a config file containing a cache +// as well as a valid proxy config. +func TestLoadConfigFile_ProxyCache(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-cache.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + Listeners: []*configutil.Listener{ + { + Type: "unix", + Address: "/path/to/socket", + TLSDisable: true, + SocketMode: "configmode", + SocketUser: "configuser", + SocketGroup: "configgroup", + }, + { + Type: "tcp", + Address: "127.0.0.1:8300", + TLSDisable: true, + }, + { + Type: "tcp", + Address: "127.0.0.1:3000", + Role: "metrics_only", + TLSDisable: true, + }, + { + Type: "tcp", + Role: "default", + Address: "127.0.0.1:8400", + TLSKeyFile: "/path/to/cakey.pem", + TLSCertFile: "/path/to/cacert.pem", + }, + }, + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + APIProxy: &APIProxy{ + EnforceConsistency: "always", + WhenInconsistent: "retry", + UseAutoAuthTokenRaw: true, + UseAutoAuthToken: true, + ForceAutoAuthToken: false, + }, + Cache: &Cache{ + Persist: &agentproxyshared.PersistConfig{ + Type: "kubernetes", + Path: "/vault/agent-cache/", + KeepAfterImport: true, + ExitOnErr: true, + ServiceAccountTokenFile: "/tmp/serviceaccount/token", + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + CACert: "config_ca_cert", + CAPath: "config_ca_path", + TLSSkipVerifyRaw: interface{}("true"), + TLSSkipVerify: true, + ClientCert: "config_client_cert", + ClientKey: "config_client_key", + Retry: &Retry{ + NumRetries: 12, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } + + config, err = LoadConfigFile("./test-fixtures/config-cache-embedded-type.hcl") + if err != nil { + t.Fatal(err) + } + expected.Vault.TLSSkipVerifyRaw = interface{}(true) + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +// TestLoadConfigFile_NoCachingEnabled tests that you cannot enable a cache +// without either of the options to enable caching secrets +func TestLoadConfigFile_NoCachingEnabled(t *testing.T) { + cfg, err := LoadConfigFile("./test-fixtures/config-cache-but-no-secrets.hcl") + if err != nil { + t.Fatal(err) + } + + if err := cfg.ValidateConfig(); err == nil { + t.Fatalf("expected error, as you cannot configure a cache without caching secrets") + } +} + +// TestLoadConfigFile_StaticSecretCachingWithoutAutoAuth tests that loading +// a config file with static secret caching enabled but no auto auth will fail. +func TestLoadConfigFile_StaticSecretCachingWithoutAutoAuth(t *testing.T) { + cfg, err := LoadConfigFile("./test-fixtures/config-cache-static-no-auto-auth.hcl") + if err != nil { + t.Fatal(err) + } + + if err := cfg.ValidateConfig(); err == nil { + t.Fatalf("expected error, as static secret caching requires auto-auth") + } +} + +// TestLoadConfigFile_ProxyCacheStaticSecrets tests loading a config file containing a cache +// as well as a valid proxy config with static secret caching enabled +func TestLoadConfigFile_ProxyCacheStaticSecrets(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-cache-static-secret-cache.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:8300", + TLSDisable: true, + }, + }, + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + Cache: &Cache{ + CacheStaticSecrets: true, + StaticSecretTokenCapabilityRefreshInterval: 1 * time.Hour, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + TLSSkipVerify: true, + TLSSkipVerifyRaw: interface{}("true"), + Retry: &Retry{ + NumRetries: 12, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} diff --git a/command/proxy/config/test-fixtures/config-cache-but-no-secrets.hcl b/command/proxy/config/test-fixtures/config-cache-but-no-secrets.hcl new file mode 100644 index 000000000000..edd8e6a2a584 --- /dev/null +++ b/command/proxy/config/test-fixtures/config-cache-but-no-secrets.hcl @@ -0,0 +1,19 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +pid_file = "./pidfile" + +cache { + cache_static_secrets = false + disable_caching_dynamic_secrets = true +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} + +vault { + address = "http://127.0.0.1:1111" + tls_skip_verify = "true" +} diff --git a/command/proxy/config/test-fixtures/config-cache-embedded-type.hcl b/command/proxy/config/test-fixtures/config-cache-embedded-type.hcl new file mode 100644 index 000000000000..cd953e7f3d0a --- /dev/null +++ b/command/proxy/config/test-fixtures/config-cache-embedded-type.hcl @@ -0,0 +1,77 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +api_proxy { + use_auto_auth_token = true + enforce_consistency = "always" + when_inconsistent = "retry" +} + +cache { + persist "kubernetes" { + path = "/vault/agent-cache/" + keep_after_import = true + exit_on_err = true + service_account_token_file = "/tmp/serviceaccount/token" + } +} + +listener { + type = "unix" + address = "/path/to/socket" + tls_disable = true + socket_mode = "configmode" + socket_user = "configuser" + socket_group = "configgroup" +} + +listener { + type = "tcp" + address = "127.0.0.1:8300" + tls_disable = true +} + +listener { + type = "tcp" + address = "127.0.0.1:3000" + tls_disable = true + role = "metrics_only" +} + +listener { + type = "tcp" + role = "default" + address = "127.0.0.1:8400" + tls_key_file = "/path/to/cakey.pem" + tls_cert_file = "/path/to/cacert.pem" +} + +vault { + address = "http://127.0.0.1:1111" + ca_cert = "config_ca_cert" + ca_path = "config_ca_path" + tls_skip_verify = true + client_cert = "config_client_cert" + client_key = "config_client_key" +} diff --git a/command/proxy/config/test-fixtures/config-cache-static-no-auto-auth.hcl b/command/proxy/config/test-fixtures/config-cache-static-no-auto-auth.hcl new file mode 100644 index 000000000000..815d7fd8e615 --- /dev/null +++ b/command/proxy/config/test-fixtures/config-cache-static-no-auto-auth.hcl @@ -0,0 +1,18 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +pid_file = "./pidfile" + +cache { + cache_static_secrets = true +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} + +vault { + address = "http://127.0.0.1:1111" + tls_skip_verify = "true" +} diff --git a/command/proxy/config/test-fixtures/config-cache-static-secret-cache.hcl b/command/proxy/config/test-fixtures/config-cache-static-secret-cache.hcl new file mode 100644 index 000000000000..fa395bd8bdc5 --- /dev/null +++ b/command/proxy/config/test-fixtures/config-cache-static-secret-cache.hcl @@ -0,0 +1,38 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +cache { + cache_static_secrets = true + static_secret_token_capability_refresh_interval = "1h" +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} + +vault { + address = "http://127.0.0.1:1111" + tls_skip_verify = "true" +} diff --git a/command/proxy/config/test-fixtures/config-cache.hcl b/command/proxy/config/test-fixtures/config-cache.hcl new file mode 100644 index 000000000000..caf153479560 --- /dev/null +++ b/command/proxy/config/test-fixtures/config-cache.hcl @@ -0,0 +1,75 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +api_proxy { + use_auto_auth_token = true + enforce_consistency = "always" + when_inconsistent = "retry" +} + +cache { + persist = { + type = "kubernetes" + path = "/vault/agent-cache/" + keep_after_import = true + exit_on_err = true + service_account_token_file = "/tmp/serviceaccount/token" + } +} + +listener "unix" { + address = "/path/to/socket" + tls_disable = true + socket_mode = "configmode" + socket_user = "configuser" + socket_group = "configgroup" +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} + +listener { + type = "tcp" + address = "127.0.0.1:3000" + tls_disable = true + role = "metrics_only" +} + +listener "tcp" { + role = "default" + address = "127.0.0.1:8400" + tls_key_file = "/path/to/cakey.pem" + tls_cert_file = "/path/to/cacert.pem" +} + +vault { + address = "http://127.0.0.1:1111" + ca_cert = "config_ca_cert" + ca_path = "config_ca_path" + tls_skip_verify = "true" + client_cert = "config_client_cert" + client_key = "config_client_key" +} diff --git a/command/proxy/test-fixtures/reload/reload_bar.key b/command/proxy/test-fixtures/reload/reload_bar.key new file mode 100644 index 000000000000..10849fbe1d7f --- /dev/null +++ b/command/proxy/test-fixtures/reload/reload_bar.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAwF7sRAyUiLcd6es6VeaTRUBOusFFGkmKJ5lU351waCJqXFju +Z6i/SQYNAAnnRgotXSTE1fIPjE2kZNH1hvqE5IpTGgAwy50xpjJrrBBI6e9lyKqj +7T8gLVNBvtC0cpQi+pGrszEI0ckDQCSZHqi/PAzcpmLUgh2KMrgagT+YlN35KHtl +/bQ/Fsn+kqykVqNw69n/CDKNKdDHn1qPwiX9q/fTMj3EG6g+3ntKrUOh8V/gHKPz +q8QGP/wIud2K+tTSorVXr/4zx7xgzlbJkCakzcQQiP6K+paPnDRlE8fK+1gRRyR7 +XCzyp0irUl8G1NjYAR/tVWxiUhlk/jZutb8PpwIDAQABAoIBAEOzJELuindyujxQ +ZD9G3h1I/GwNCFyv9Mbq10u7BIwhUH0fbwdcA7WXQ4v38ERd4IkfH4aLoZ0m1ewF +V/sgvxQO+h/0YTfHImny5KGxOXfaoF92bipYROKuojydBmQsbgLwsRRm9UufCl3Q +g3KewG5JuH112oPQEYq379v8nZ4FxC3Ano1OFBTm9UhHIAX1Dn22kcHOIIw8jCsQ +zp7TZOW+nwtkS41cBwhvV4VIeL6yse2UgbOfRVRwI7B0OtswS5VgW3wysO2mTDKt +V/WCmeht1il/6ZogEHgi/mvDCKpj20wQ1EzGnPdFLdiFJFylf0oufQD/7N/uezbC +is0qJEECgYEA3AE7SeLpe3SZApj2RmE2lcD9/Saj1Y30PznxB7M7hK0sZ1yXEbtS +Qf894iDDD/Cn3ufA4xk/K52CXgAcqvH/h2geG4pWLYsT1mdWhGftprtOMCIvJvzU +8uWJzKdOGVMG7R59wNgEpPDZDpBISjexwQsFo3aw1L/H1/Sa8cdY3a0CgYEA39hB +1oLmGRyE32Q4GF/srG4FqKL1EsbISGDUEYTnaYg2XiM43gu3tC/ikfclk27Jwc2L +m7cA5FxxaEyfoOgfAizfU/uWTAbx9GoXgWsO0hWSN9+YNq61gc5WKoHyrJ/rfrti +y5d7k0OCeBxckLqGDuJqICQ0myiz0El6FU8h5SMCgYEAuhigmiNC9JbwRu40g9v/ +XDVfox9oPmBRVpogdC78DYKeqN/9OZaGQiUxp3GnDni2xyqqUm8srCwT9oeJuF/z +kgpUTV96/hNCuH25BU8UC5Es1jJUSFpdlwjqwx5SRcGhfjnojZMseojwUg1h2MW7 +qls0bc0cTxnaZaYW2qWRWhECgYBrT0cwyQv6GdvxJCBoPwQ9HXmFAKowWC+H0zOX +Onmd8/jsZEJM4J0uuo4Jn8vZxBDg4eL9wVuiHlcXwzP7dYv4BP8DSechh2rS21Ft +b59pQ4IXWw+jl1nYYsyYEDgAXaIN3VNder95N7ICVsZhc6n01MI/qlu1zmt1fOQT +9x2utQKBgHI9SbsfWfbGiu6oLS3+9V1t4dORhj8D8b7z3trvECrD6tPhxoZqtfrH +4apKr3OKRSXk3K+1K6pkMHJHunspucnA1ChXLhzfNF08BSRJkQDGYuaRLS6VGgab +JZTl54bGvO1GkszEBE/9QFcqNVtWGMWXnUPwNNv8t//yJT5rvQil +-----END RSA PRIVATE KEY----- diff --git a/command/proxy/test-fixtures/reload/reload_bar.pem b/command/proxy/test-fixtures/reload/reload_bar.pem new file mode 100644 index 000000000000..a8217be5c7df --- /dev/null +++ b/command/proxy/test-fixtures/reload/reload_bar.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDQzCCAiugAwIBAgIULLCz3mZKmg2xy3rWCud0f1zcmBwwDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNjQ0WhcNMzYw +MzA1MDEzNzE0WjAaMRgwFgYDVQQDEw9iYXIuZXhhbXBsZS5jb20wggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAXuxEDJSItx3p6zpV5pNFQE66wUUaSYon +mVTfnXBoImpcWO5nqL9JBg0ACedGCi1dJMTV8g+MTaRk0fWG+oTkilMaADDLnTGm +MmusEEjp72XIqqPtPyAtU0G+0LRylCL6kauzMQjRyQNAJJkeqL88DNymYtSCHYoy +uBqBP5iU3fkoe2X9tD8Wyf6SrKRWo3Dr2f8IMo0p0MefWo/CJf2r99MyPcQbqD7e +e0qtQ6HxX+Aco/OrxAY//Ai53Yr61NKitVev/jPHvGDOVsmQJqTNxBCI/or6lo+c +NGUTx8r7WBFHJHtcLPKnSKtSXwbU2NgBH+1VbGJSGWT+Nm61vw+nAgMBAAGjgYQw +gYEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSVoF8F +7qbzSryIFrldurAG78LvSjAfBgNVHSMEGDAWgBRzDNvqF/Tq21OgWs13B5YydZjl +vzAgBgNVHREEGTAXgg9iYXIuZXhhbXBsZS5jb22HBH8AAAEwDQYJKoZIhvcNAQEL +BQADggEBAGmz2N282iT2IaEZvOmzIE4znHGkvoxZmrr/2byq5PskBg9ysyCHfUvw +SFA8U7jWjezKTnGRUu5blB+yZdjrMtB4AePWyEqtkJwVsZ2SPeP+9V2gNYK4iktP +UF3aIgBbAbw8rNuGIIB0T4D+6Zyo9Y3MCygs6/N4bRPZgLhewWn1ilklfnl3eqaC +a+JY1NBuTgCMa28NuC+Hy3mCveqhI8tFNiOthlLdgAEbuQaOuNutAG73utZ2aq6Q +W4pajFm3lEf5zt7Lo6ZCFtY/Q8jjURJ9e4O7VjXcqIhBM5bSMI6+fgQyOH0SLboj +RNanJ2bcyF1iPVyPBGzV3dF0ngYzxEY= +-----END CERTIFICATE----- diff --git a/command/proxy/test-fixtures/reload/reload_ca.pem b/command/proxy/test-fixtures/reload/reload_ca.pem new file mode 100644 index 000000000000..72a74440c482 --- /dev/null +++ b/command/proxy/test-fixtures/reload/reload_ca.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDNTCCAh2gAwIBAgIUBeVo+Ce2BrdRT1cogKvJLtdOky8wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNTM4WhcNMzYw +MzA1MDIzNjA4WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAPTQGWPRIOECGeJB6tR/ftvvtioC9f84fY2QdJ5k +JBupXjPAGYKgS4MGzyT5bz9yY400tCtmh6h7p9tZwHl/TElTugtLQ/8ilMbJTiOM +SiyaMDPHiMJJYKTjm9bu6bKeU1qPZ0Cryes4rygbqs7w2XPgA2RxNmDh7JdX7/h+ +VB5onBmv8g4WFSayowGyDcJWWCbu5yv6ZdH1bqQjgRzQ5xp17WXNmvlzdp2vate/ +9UqPdA8sdJzW/91Gvmros0o/FnG7c2pULhk22wFqO8t2HRjKb3nuxALEJvqoPvad +KjpDTaq1L1ZzxcB7wvWyhy/lNLZL7jiNWy0mN1YB0UpSWdECAwEAAaN7MHkwDgYD +VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHMM2+oX9Orb +U6BazXcHljJ1mOW/MB8GA1UdIwQYMBaAFHMM2+oX9OrbU6BazXcHljJ1mOW/MBYG +A1UdEQQPMA2CC2V4YW1wbGUuY29tMA0GCSqGSIb3DQEBCwUAA4IBAQAp17XsOaT9 +hculRqrFptn3+zkH3HrIckHm+28R5xYT8ASFXFcLFugGizJAXVL5lvsRVRIwCoOX +Nhi8XSNEFP640VbHcEl81I84bbRIIDS+Yheu6JDZGemTaDYLv1J3D5SHwgoM+nyf +oTRgotUCIXcwJHmTpWEUkZFKuqBxsoTGzk0jO8wOP6xoJkzxVVG5PvNxs924rxY8 +Y8iaLdDfMeT7Pi0XIliBa/aSp/iqSW8XKyJl5R5vXg9+DOgZUrVzIxObaF5RBl/a +mJOeklJBdNVzQm5+iMpO42lu0TA9eWtpP+YiUEXU17XDvFeQWOocFbQ1Peo0W895 +XRz2GCwCNyvW +-----END CERTIFICATE----- diff --git a/command/proxy/test-fixtures/reload/reload_foo.key b/command/proxy/test-fixtures/reload/reload_foo.key new file mode 100644 index 000000000000..86e6cce63e64 --- /dev/null +++ b/command/proxy/test-fixtures/reload/reload_foo.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpgIBAAKCAQEAzNyVieSti9XBb5/celB5u8YKRJv3mQS9A4/X0mqY1ePznt1i +ilG7OmG0yM2VAk0ceIAQac3Bsn74jxn2cDlrrVniPXcNgYtMtW0kRqNEo4doo4EX +xZguS9vNBu29useHhif1TGX/pA3dgvaVycUCjzTEVk6qI8UEehMK6gEGZb7nOr0A +A9nipSqoeHpDLe3a4KVqj1vtlJKUvD2i1MuBuQ130cB1K9rufLCShGu7mEgzEosc +gr+K3Bf03IejbeVRyIfLtgj1zuvV1katec75UqRA/bsvt5G9JfJqiZ9mwFN0vp3g +Cr7pdQBSBQ2q4yf9s8CuY5c5w9fl3F8f5QFQoQIDAQABAoIBAQCbCb1qNFRa5ZSV +I8i6ELlwMDqJHfhOJ9XcIjpVljLAfNlcu3Ld92jYkCU/asaAjVckotbJG9yhd5Io +yp9E40/oS4P6vGTOS1vsWgMAKoPBtrKsOwCAm+E9q8UIn1fdSS/5ibgM74x+3bds +a62Em8KKGocUQkhk9a+jq1GxMsFisbHRxEHvClLmDMgGnW3FyGmWwT6yZLPSC0ey +szmmjt3ouP8cLAOmSjzcQBMmEZpQMCgR6Qckg6nrLQAGzZyTdCd875wbGA57DpWX +Lssn95+A5EFvr/6b7DkXeIFCrYBFFa+UQN3PWGEQ6Zjmiw4VgV2vO8yX2kCLlUhU +02bL393ZAoGBAPXPD/0yWINbKUPcRlx/WfWQxfz0bu50ytwIXzVK+pRoAMuNqehK +BJ6kNzTTBq40u+IZ4f5jbLDulymR+4zSkirLE7CyWFJOLNI/8K4Pf5DJUgNdrZjJ +LCtP9XRdxiPatQF0NGfdgHlSJh+/CiRJP4AgB17AnB/4z9/M0ZlJGVrzAoGBANVa +69P3Rp/WPBQv0wx6f0tWppJolWekAHKcDIdQ5HdOZE5CPAYSlTrTUW3uJuqMwU2L +M0Er2gIPKWIR5X+9r7Fvu9hQW6l2v3xLlcrGPiapp3STJvuMxzhRAmXmu3bZfVn1 +Vn7Vf1jPULHtTFSlNFEvYG5UJmygK9BeyyVO5KMbAoGBAMCyAibLQPg4jrDUDZSV +gUAwrgUO2ae1hxHWvkxY6vdMUNNByuB+pgB3W4/dnm8Sh/dHsxJpftt1Lqs39ar/ +p/ZEHLt4FCTxg9GOrm7FV4t5RwG8fko36phJpnIC0UFqQltRbYO+8OgqrhhU+u5X +PaCDe0OcWsf1lYAsYGN6GpZhAoGBAMJ5Ksa9+YEODRs1cIFKUyd/5ztC2xRqOAI/ +3WemQ2nAacuvsfizDZVeMzYpww0+maAuBt0btI719PmwaGmkpDXvK+EDdlmkpOwO +FY6MXvBs6fdnfjwCWUErDi2GQFAX9Jt/9oSL5JU1+08DhvUM1QA/V/2Y9KFE6kr3 +bOIn5F4LAoGBAKQzH/AThDGhT3hwr4ktmReF3qKxBgxzjVa8veXtkY5VWwyN09iT +jnTTt6N1CchZoK5WCETjdzNYP7cuBTcV4d3bPNRiJmxXaNVvx3Tlrk98OiffT8Qa +5DO/Wfb43rNHYXBjU6l0n2zWcQ4PUSSbu0P0bM2JTQPRCqSthXvSHw2P +-----END RSA PRIVATE KEY----- diff --git a/command/proxy/test-fixtures/reload/reload_foo.pem b/command/proxy/test-fixtures/reload/reload_foo.pem new file mode 100644 index 000000000000..c8b868bcd0f0 --- /dev/null +++ b/command/proxy/test-fixtures/reload/reload_foo.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDQzCCAiugAwIBAgIUFVW6i/M+yJUsDrXWgRKO/Dnb+L4wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNjA1WhcNMzYw +MzA1MDEzNjM1WjAaMRgwFgYDVQQDEw9mb28uZXhhbXBsZS5jb20wggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDM3JWJ5K2L1cFvn9x6UHm7xgpEm/eZBL0D +j9fSapjV4/Oe3WKKUbs6YbTIzZUCTRx4gBBpzcGyfviPGfZwOWutWeI9dw2Bi0y1 +bSRGo0Sjh2ijgRfFmC5L280G7b26x4eGJ/VMZf+kDd2C9pXJxQKPNMRWTqojxQR6 +EwrqAQZlvuc6vQAD2eKlKqh4ekMt7drgpWqPW+2UkpS8PaLUy4G5DXfRwHUr2u58 +sJKEa7uYSDMSixyCv4rcF/Tch6Nt5VHIh8u2CPXO69XWRq15zvlSpED9uy+3kb0l +8mqJn2bAU3S+neAKvul1AFIFDarjJ/2zwK5jlznD1+XcXx/lAVChAgMBAAGjgYQw +gYEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBRNJoOJ +dnazDiuqLhV6truQ4cRe9jAfBgNVHSMEGDAWgBRzDNvqF/Tq21OgWs13B5YydZjl +vzAgBgNVHREEGTAXgg9mb28uZXhhbXBsZS5jb22HBH8AAAEwDQYJKoZIhvcNAQEL +BQADggEBAHzv67mtbxMWcuMsxCFBN1PJNAyUDZVCB+1gWhk59EySbVg81hWJDCBy +fl3TKjz3i7wBGAv+C2iTxmwsSJbda22v8JQbuscXIfLFbNALsPzF+J0vxAgJs5Gc +sDbfJ7EQOIIOVKQhHLYnQoLnigSSPc1kd0JjYyHEBjgIaSuXgRRTBAeqLiBMx0yh +RKL1lQ+WoBU/9SXUZZkwokqWt5G7khi5qZkNxVXZCm8VGPg0iywf6gGyhI1SU5S2 +oR219S6kA4JY/stw1qne85/EmHmoImHGt08xex3GoU72jKAjsIpqRWopcD/+uene +Tc9nn3fTQW/Z9fsoJ5iF5OdJnDEswqE= +-----END CERTIFICATE----- diff --git a/command/proxy_test.go b/command/proxy_test.go new file mode 100644 index 000000000000..3c2fdd001618 --- /dev/null +++ b/command/proxy_test.go @@ -0,0 +1,2198 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "net/http" + "os" + "path/filepath" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/hashicorp/cli" + "github.com/hashicorp/go-hclog" + vaultjwt "github.com/hashicorp/vault-plugin-auth-jwt" + logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/api" + credAppRole "github.com/hashicorp/vault/builtin/credential/approle" + "github.com/hashicorp/vault/command/agent" + proxyConfig "github.com/hashicorp/vault/command/proxy/config" + "github.com/hashicorp/vault/helper/testhelpers/minimal" + "github.com/hashicorp/vault/helper/useragent" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func testProxyCommand(tb testing.TB, logger hclog.Logger) (*cli.MockUi, *ProxyCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &ProxyCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + ShutdownCh: MakeShutdownCh(), + SighupCh: MakeSighupCh(), + SigUSR2Ch: MakeSigUSR2Ch(), + logger: logger, + startedCh: make(chan struct{}, 5), + reloadedCh: make(chan struct{}, 5), + } +} + +// TestProxy_ExitAfterAuth tests the exit_after_auth flag, provided both +// as config and via -exit-after-auth. +func TestProxy_ExitAfterAuth(t *testing.T) { + t.Run("via_config", func(t *testing.T) { + testProxyExitAfterAuth(t, false) + }) + + t.Run("via_flag", func(t *testing.T) { + testProxyExitAfterAuth(t, true) + }) +} + +func testProxyExitAfterAuth(t *testing.T, viaFlag bool) { + logger := logging.NewVaultLogger(hclog.Trace) + coreConfig := &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "jwt": vaultjwt.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + client := cluster.Cores[0].Client + + // Setup Vault + err := client.Sys().EnableAuthWithOptions("jwt", &api.EnableAuthOptions{ + Type: "jwt", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("auth/jwt/config", map[string]interface{}{ + "bound_issuer": "https://team-vault.auth0.com/", + "jwt_validation_pubkeys": agent.TestECDSAPubKey, + "jwt_supported_algs": "ES256", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("auth/jwt/role/test", map[string]interface{}{ + "role_type": "jwt", + "bound_subject": "r3qXcK2bix9eFECzsU3Sbmh0K16fatW6@clients", + "bound_audiences": "https://vault.plugin.auth.jwt.test", + "user_claim": "https://vault/user", + "groups_claim": "https://vault/groups", + "policies": "test", + "period": "3s", + }) + if err != nil { + t.Fatal(err) + } + + dir := t.TempDir() + inf, err := os.CreateTemp(dir, "auth.jwt.test.") + if err != nil { + t.Fatal(err) + } + in := inf.Name() + inf.Close() + // We remove these files in this test since we don't need the files, we just need + // a non-conflicting file name for the config. + os.Remove(in) + t.Logf("input: %s", in) + + sinkFileName1 := makeTempFile(t, "sink-file", "") + sinkFileName2 := makeTempFile(t, "sink-file", "") + + conff, err := os.CreateTemp(dir, "conf.jwt.test.") + if err != nil { + t.Fatal(err) + } + conf := conff.Name() + conff.Close() + os.Remove(conf) + t.Logf("config: %s", conf) + + jwtToken, _ := agent.GetTestJWT(t) + if err := os.WriteFile(in, []byte(jwtToken), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test jwt", "path", in) + } + + exitAfterAuthTemplText := "exit_after_auth = true" + if viaFlag { + exitAfterAuthTemplText = "" + } + + config := ` +%s + +auto_auth { + method { + type = "jwt" + config = { + role = "test" + path = "%s" + } + } + + sink { + type = "file" + config = { + path = "%s" + } + } + + sink "file" { + config = { + path = "%s" + } + } +} +` + + config = fmt.Sprintf(config, exitAfterAuthTemplText, in, sinkFileName1, sinkFileName2) + if err := os.WriteFile(conf, []byte(config), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test config", "path", conf) + } + + doneCh := make(chan struct{}) + go func() { + ui, cmd := testProxyCommand(t, logger) + cmd.client = client + + args := []string{"-config", conf} + if viaFlag { + args = append(args, "-exit-after-auth") + } + + code := cmd.Run(args) + if code != 0 { + t.Errorf("expected %d to be %d", code, 0) + t.Logf("output from proxy:\n%s", ui.OutputWriter.String()) + t.Logf("error from proxy:\n%s", ui.ErrorWriter.String()) + } + close(doneCh) + }() + + select { + case <-doneCh: + break + case <-time.After(1 * time.Minute): + t.Fatal("timeout reached while waiting for proxy to exit") + } + + sink1Bytes, err := os.ReadFile(sinkFileName1) + if err != nil { + t.Fatal(err) + } + if len(sink1Bytes) == 0 { + t.Fatal("got no output from sink 1") + } + + sink2Bytes, err := os.ReadFile(sinkFileName2) + if err != nil { + t.Fatal(err) + } + if len(sink2Bytes) == 0 { + t.Fatal("got no output from sink 2") + } + + if string(sink1Bytes) != string(sink2Bytes) { + t.Fatal("sink 1/2 values don't match") + } +} + +// TestProxy_NoTriggerAutoAuth_BadPolicy tests that auto auth is not re-triggered +// if Proxy uses a token with incorrect policy access. +func TestProxy_NoTriggerAutoAuth_BadPolicy(t *testing.T) { + proxyLogger := logging.NewVaultLogger(hclog.Trace) + vaultLogger := logging.NewVaultLogger(hclog.Info) + cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.Handler, + Logger: vaultLogger, + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Add a secret to the KV engine + _, err := serverClient.Logical().Write("secret/foo", map[string]interface{}{"user": "something"}) + require.NoError(t, err) + + // Create kv read policy + noKvAccess := `path "secret/*" { +capabilities = ["deny"] +}` + err = serverClient.Sys().PutPolicy("noKvAccess", noKvAccess) + require.NoError(t, err) + + // Create a token with that policy + opts := &api.TokenCreateRequest{Policies: []string{"noKvAccess"}} + tokenResp, err := serverClient.Auth().Token().Create(opts) + require.NoError(t, err) + firstToken := tokenResp.Auth.ClientToken + + // Create token file + tokenFileName := makeTempFile(t, "token-file", firstToken) + + sinkFileName := makeTempFile(t, "sink-file", "") + + autoAuthConfig := fmt.Sprintf(` +auto_auth { + method { + type = "token_file" + config = { + token_file_path = "%s" + } + } + sink "file" { + config = { + path = "%s" + } + } +}`, tokenFileName, sinkFileName) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` + listener "tcp" { + address = "%s" + tls_disable = true + } + `, listenAddr) + + config := fmt.Sprintf(` + vault { + address = "%s" + tls_skip_verify = true + } + api_proxy { + use_auto_auth_token = "force" + } + %s + %s + `, serverClient.Address(), listenConfig, autoAuthConfig) + configPath := makeTempFile(t, "config.hcl", config) + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + // Start proxy + _, cmd := testProxyCommand(t, proxyLogger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Fatalf("timeout") + } + + // Validate that the auto-auth token has been correctly attained + // and works for LookupSelf + conf := api.DefaultConfig() + conf.Address = "http://" + listenAddr + proxyClient, err := api.NewClient(conf) + if err != nil { + t.Fatal(err) + } + + proxyClient.SetToken("") + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + // Wait for re-triggered auto auth to write new token to sink + waitForFile := func(prevModTime time.Time) time.Time { + ticker := time.Tick(100 * time.Millisecond) + timeout := time.After(15 * time.Second) + for { + select { + case <-ticker: + case <-timeout: + return prevModTime + } + modTime, err := os.Stat(sinkFileName) + require.NoError(t, err) + if modTime.ModTime().After(prevModTime) { + return modTime.ModTime() + } + } + } + + // Wait for the token to be sent to syncs and be available to be used + initialModTime := waitForFile(time.Time{}) + req := proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + _ = request(t, proxyClient, req, 200) + + // Write a new token to the token file + newTokenResp, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{}) + require.NoError(t, err) + secondToken := newTokenResp.Auth.ClientToken + err = os.WriteFile(tokenFileName, []byte(secondToken), 0o600) + require.NoError(t, err) + + // Make a request to a path that the token does not have access to + req = proxyClient.NewRequest("GET", "/v1/secret/foo") + _, err = proxyClient.RawRequest(req) + require.Error(t, err) + require.ErrorContains(t, err, logical.ErrPermissionDenied.Error()) + require.NotContains(t, err.Error(), logical.ErrInvalidToken.Error()) + + // Sleep for a bit to ensure that auto auth is not re-triggered + newModTime := waitForFile(initialModTime) + if newModTime.After(initialModTime) { + t.Fatal("auto auth was incorrectly re-triggered") + } + + // Read from the sink file and verify that the token has not changed + newToken, err := os.ReadFile(sinkFileName) + require.Equal(t, firstToken, string(newToken)) + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_NoTriggerAutoAuth_ProxyTokenNotAutoAuth tests that auto auth is not re-triggered +// if Proxy uses a token that is not equal to the auto auth token +func TestProxy_NoTriggerAutoAuth_ProxyTokenNotAutoAuth(t *testing.T) { + proxyLogger := logging.NewVaultLogger(hclog.Info) + cluster := minimal.NewTestSoloCluster(t, nil) + + serverClient := cluster.Cores[0].Client + + // Create a token + tokenResp, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{}) + require.NoError(t, err) + firstToken := tokenResp.Auth.ClientToken + + // Create token file + tokenFileName := makeTempFile(t, "token-file", firstToken) + + sinkFileName := makeTempFile(t, "sink-file", "") + + autoAuthConfig := fmt.Sprintf(` +auto_auth { + method { + type = "token_file" + config = { + token_file_path = "%s" + } + } + sink "file" { + config = { + path = "%s" + } + } +}`, tokenFileName, sinkFileName) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` + listener "tcp" { + address = "%s" + tls_disable = true + } + `, listenAddr) + + // Do not use the auto auth token if a token is provided with the proxy client + config := fmt.Sprintf(` + vault { + address = "%s" + tls_skip_verify = true + } + api_proxy { + use_auto_auth_token = true + } + %s + %s + `, serverClient.Address(), listenConfig, autoAuthConfig) + configPath := makeTempFile(t, "config.hcl", config) + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + // Start proxy + _, cmd := testProxyCommand(t, proxyLogger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Fatalf("timeout") + } + + // Validate that the auto-auth token has been correctly attained + // and works for LookupSelf + conf := api.DefaultConfig() + conf.Address = "http://" + listenAddr + proxyClient, err := api.NewClient(conf) + if err != nil { + t.Fatal(err) + } + + proxyClient.SetToken(firstToken) + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + // Wait for re-triggered auto auth to write new token to sink + waitForFile := func(prevModTime time.Time) time.Time { + ticker := time.Tick(100 * time.Millisecond) + timeout := time.After(15 * time.Second) + for { + select { + case <-ticker: + case <-timeout: + return prevModTime + } + modTime, err := os.Stat(sinkFileName) + require.NoError(t, err) + if modTime.ModTime().After(prevModTime) { + return modTime.ModTime() + } + } + } + + // Wait for the token is available to be used + createTime := waitForFile(time.Time{}) + require.NoError(t, err) + _, err = serverClient.Auth().Token().LookupSelf() + require.NoError(t, err) + + // Revoke token + err = serverClient.Auth().Token().RevokeOrphan(firstToken) + require.NoError(t, err) + + // Write a new token to the token file + newTokenResp, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{}) + require.NoError(t, err) + secondToken := newTokenResp.Auth.ClientToken + err = os.WriteFile(tokenFileName, []byte(secondToken), 0o600) + require.NoError(t, err) + + // Proxy uses revoked token to make request and should result in an error + proxyClient.SetToken("random token") + _, err = proxyClient.Auth().Token().LookupSelf() + require.Error(t, err) + + // Wait to see if the sink file is modified + newModTime := waitForFile(createTime) + if newModTime.After(createTime) { + t.Fatal("auto auth was incorrectly re-triggered") + } + + // Read from the sink and verify that the token has not changed + newToken, err := os.ReadFile(sinkFileName) + require.Equal(t, firstToken, string(newToken)) + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_ReTriggerAutoAuth_ForceAutoAuthToken tests that auto auth is re-triggered +// if Proxy always forcibly uses the auto auth token +func TestProxy_ReTriggerAutoAuth_ForceAutoAuthToken(t *testing.T) { + proxyLogger := logging.NewVaultLogger(hclog.Trace) + vaultLogger := logging.NewVaultLogger(hclog.Info) + cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.Handler, + Logger: vaultLogger, + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Create a token + tokenResp, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{}) + require.NoError(t, err) + firstToken := tokenResp.Auth.ClientToken + + // Create token file + tokenFileName := makeTempFile(t, "token-file", firstToken) + + sinkFileName := makeTempFile(t, "sink-file", "") + + autoAuthConfig := fmt.Sprintf(` +auto_auth { + method { + type = "token_file" + config = { + token_file_path = "%s" + } + } + sink "file" { + config = { + path = "%s" + } + } +}`, tokenFileName, sinkFileName) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` + listener "tcp" { + address = "%s" + tls_disable = true + } + `, listenAddr) + + // Do not use the auto auth token if a token is provided with the proxy client + config := fmt.Sprintf(` + vault { + address = "%s" + tls_skip_verify = true + } + api_proxy { + use_auto_auth_token = "force" + } + %s + %s + `, serverClient.Address(), listenConfig, autoAuthConfig) + configPath := makeTempFile(t, "config.hcl", config) + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + // Start proxy + _, cmd := testProxyCommand(t, proxyLogger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Fatalf("timeout") + } + + // Validate that the auto-auth token has been correctly attained + // and works for LookupSelf + conf := api.DefaultConfig() + conf.Address = "http://" + listenAddr + proxyClient, err := api.NewClient(conf) + if err != nil { + t.Fatal(err) + } + + proxyClient.SetToken(firstToken) + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + // Wait for re-triggered auto auth to write new token to sink + waitForFile := func(prevModTime time.Time) time.Time { + ticker := time.Tick(100 * time.Millisecond) + timeout := time.After(15 * time.Second) + for { + select { + case <-ticker: + case <-timeout: + return prevModTime + } + modTime, err := os.Stat(sinkFileName) + require.NoError(t, err) + if modTime.ModTime().After(prevModTime) { + return modTime.ModTime() + } + } + } + + // Wait for the token is available to be used + createTime := waitForFile(time.Time{}) + require.NoError(t, err) + req := proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + _, err = proxyClient.RawRequest(req) + require.NoError(t, err) + + // Revoke token + req = serverClient.NewRequest("PUT", "/v1/auth/token/revoke") + req.BodyBytes = []byte(fmt.Sprintf(`{ + "token": "%s" + }`, firstToken)) + _ = request(t, serverClient, req, 204) + + // Create new token + newTokenResp, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{}) + require.NoError(t, err) + secondToken := newTokenResp.Auth.ClientToken + + // Proxy uses the same token in the token file to make a request, which should result in error + req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + _, err = proxyClient.RawRequest(req) + require.Error(t, err) + + // Write a new token to the token file so that auto auth can write new token to sink + err = os.WriteFile(tokenFileName, []byte(secondToken), 0o600) + require.NoError(t, err) + + // Wait to see if that the sink file is modified + waitForFile(createTime) + + // Read from the sink and verify that the sink contains the new token + newToken, err := os.ReadFile(sinkFileName) + require.Equal(t, secondToken, string(newToken)) + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_ReTriggerAutoAuth_ProxyIsAutoAuthToken tests that auto auth is re-triggered +// the proxy client uses a token that is equal to the auto auth token +func TestProxy_ReTriggerAutoAuth_ProxyIsAutoAuthToken(t *testing.T) { + proxyLogger := logging.NewVaultLogger(hclog.Trace) + vaultLogger := logging.NewVaultLogger(hclog.Info) + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + }, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.Handler, + Logger: vaultLogger, + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Enable the approle auth method + req := serverClient.NewRequest("POST", "/v1/sys/auth/approle") + req.BodyBytes = []byte(`{ + "type": "approle" + }`) + request(t, serverClient, req, 204) + + // Create a named role + req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role") + req.BodyBytes = []byte(`{ + "secret_id_num_uses": "10", + "secret_id_ttl": "1m", + "token_max_ttl": "4m", + "token_num_uses": "10", + "token_ttl": "4m", + "policies": "default" + }`) + request(t, serverClient, req, 204) + + // Fetch the RoleID of the named role + req = serverClient.NewRequest("GET", "/v1/auth/approle/role/test-role/role-id") + body := request(t, serverClient, req, 200) + data := body["data"].(map[string]interface{}) + roleID := data["role_id"].(string) + + // Get a SecretID issued against the named role + req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role/secret-id") + body = request(t, serverClient, req, 200) + data = body["data"].(map[string]interface{}) + secretID := data["secret_id"].(string) + + // Write the RoleID and SecretID to temp files + roleIDPath := makeTempFile(t, "role_id.txt", roleID+"\n") + secretIDPath := makeTempFile(t, "secret_id.txt", secretID+"\n") + + sinkFileName := makeTempFile(t, "sink-file", "") + + autoAuthConfig := fmt.Sprintf(` +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + } + } + + sink "file" { + config = { + path = "%s" + } + } +}`, roleIDPath, secretIDPath, sinkFileName) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +api_proxy { + use_auto_auth_token = true +} +%s +%s +`, serverClient.Address(), listenConfig, autoAuthConfig) + configPath := makeTempFile(t, "config.hcl", config) + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + // Start proxy + _, cmd := testProxyCommand(t, proxyLogger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // Validate that the auto-auth token has been correctly attained + // and works for LookupSelf + conf := api.DefaultConfig() + conf.Address = "http://" + listenAddr + proxyClient, err := api.NewClient(conf) + if err != nil { + t.Fatal(err) + } + + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + // Wait for re-triggered auto auth to write new token to sink + waitForFile := func(prevModTime time.Time) { + ticker := time.Tick(100 * time.Millisecond) + timeout := time.After(15 * time.Second) + for { + select { + case <-ticker: + case <-timeout: + t.Fatal("timed out waiting for re-triggered auto auth to complete") + } + modTime, err := os.Stat(sinkFileName) + require.NoError(t, err) + if modTime.ModTime().After(prevModTime) { + return + } + } + } + + // Wait for the token to be sent to syncs and be available to be used + waitForFile(time.Time{}) + oldToken, err := os.ReadFile(sinkFileName) + require.NoError(t, err) + prevModTime, err := os.Stat(sinkFileName) + require.NoError(t, err) + + // Set proxy token + proxyClient.SetToken(string(oldToken)) + + // Make request using proxy client to test that token is valid + req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + body = request(t, proxyClient, req, 200) + + // Revoke token + req = serverClient.NewRequest("PUT", "/v1/auth/token/revoke") + req.BodyBytes = []byte(fmt.Sprintf(`{ + "token": "%s" + }`, oldToken)) + body = request(t, serverClient, req, 204) + + // Proxy uses revoked token to make request and should result in an error + req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + _, err = proxyClient.RawRequest(req) + require.Error(t, err) + + // Wait for new token to be written and available to use + waitForFile(prevModTime.ModTime()) + + // Verify new token is not equal to the old token + newToken, err := os.ReadFile(sinkFileName) + require.NoError(t, err) + require.NotEqual(t, string(newToken), string(oldToken)) + + // Verify that proxy no longer fails when making a request with the new token + proxyClient.SetToken(string(newToken)) + req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + body = request(t, proxyClient, req, 200) + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_ReTriggerAutoAuth_RevokedToken tests that auto auth is re-triggered +// when Proxy uses a revoked auto auth token to make a request +func TestProxy_ReTriggerAutoAuth_RevokedToken(t *testing.T) { + proxyLogger := logging.NewVaultLogger(hclog.Trace) + vaultLogger := logging.NewVaultLogger(hclog.Info) + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + }, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.Handler, + Logger: vaultLogger, + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Enable the approle auth method + req := serverClient.NewRequest("POST", "/v1/sys/auth/approle") + req.BodyBytes = []byte(`{ + "type": "approle" + }`) + request(t, serverClient, req, 204) + + // Create a named role + req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role") + req.BodyBytes = []byte(`{ + "secret_id_num_uses": "10", + "secret_id_ttl": "1m", + "token_max_ttl": "4m", + "token_num_uses": "10", + "token_ttl": "4m", + "policies": "default" + }`) + request(t, serverClient, req, 204) + + // Fetch the RoleID of the named role + req = serverClient.NewRequest("GET", "/v1/auth/approle/role/test-role/role-id") + body := request(t, serverClient, req, 200) + data := body["data"].(map[string]interface{}) + roleID := data["role_id"].(string) + + // Get a SecretID issued against the named role + req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role/secret-id") + body = request(t, serverClient, req, 200) + data = body["data"].(map[string]interface{}) + secretID := data["secret_id"].(string) + + // Write the RoleID and SecretID to temp files + roleIDPath := makeTempFile(t, "role_id.txt", roleID+"\n") + secretIDPath := makeTempFile(t, "secret_id.txt", secretID+"\n") + + sinkFileName := makeTempFile(t, "sink-file", "") + autoAuthConfig := fmt.Sprintf(` +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + } + } + + sink "file" { + config = { + path = "%s" + } + } +}`, roleIDPath, secretIDPath, sinkFileName) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +api_proxy { + use_auto_auth_token = "force" +} +%s +%s +`, serverClient.Address(), listenConfig, autoAuthConfig) + configPath := makeTempFile(t, "config.hcl", config) + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + // Start proxy + _, cmd := testProxyCommand(t, proxyLogger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // Validate that the auto-auth token has been correctly attained + // and works for LookupSelf + conf := api.DefaultConfig() + conf.Address = "http://" + listenAddr + proxyClient, err := api.NewClient(conf) + if err != nil { + t.Fatal(err) + } + + proxyClient.SetToken("") + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + // Wait for re-triggered auto auth to write new token to sink + waitForFile := func(prevModTime time.Time) { + ticker := time.Tick(100 * time.Millisecond) + timeout := time.After(15 * time.Second) + for { + select { + case <-ticker: + case <-timeout: + t.Fatal("timed out waiting for re-triggered auto auth to complete") + } + modTime, err := os.Stat(sinkFileName) + require.NoError(t, err) + if modTime.ModTime().After(prevModTime) { + return + } + } + } + + // Wait for the token to be sent to syncs and be available to be used + waitForFile(time.Time{}) + req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + body = request(t, proxyClient, req, 200) + + oldToken, err := os.ReadFile(sinkFileName) + require.NoError(t, err) + prevModTime, err := os.Stat(sinkFileName) + require.NoError(t, err) + + // Revoke token + req = serverClient.NewRequest("PUT", "/v1/auth/token/revoke") + req.BodyBytes = []byte(fmt.Sprintf(`{ + "token": "%s" + }`, oldToken)) + body = request(t, serverClient, req, 204) + + // Proxy uses revoked token to make request and should result in an error + req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + _, err = proxyClient.RawRequest(req) + require.Error(t, err) + + // Wait for new token to be written and available to use + waitForFile(prevModTime.ModTime()) + + // Verify new token is not equal to the old token + newToken, err := os.ReadFile(sinkFileName) + require.NoError(t, err) + require.NotEqual(t, string(newToken), string(oldToken)) + + // Verify that proxy no longer fails when making a request + req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + body = request(t, proxyClient, req, 200) + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_AutoAuth_UserAgent tests that the User-Agent sent +// to Vault by Vault Proxy is correct when performing Auto-Auth. +// Uses the custom handler userAgentHandler (defined above) so +// that Vault validates the User-Agent on requests sent by Proxy. +func TestProxy_AutoAuth_UserAgent(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + var h userAgentHandler + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + }, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.ProxyAutoAuthString() + h.requestMethodToCheck = "PUT" + h.pathToCheck = "auth/approle/login" + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Enable the approle auth method + req := serverClient.NewRequest("POST", "/v1/sys/auth/approle") + req.BodyBytes = []byte(`{ + "type": "approle" + }`) + request(t, serverClient, req, 204) + + // Create a named role + req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role") + req.BodyBytes = []byte(`{ + "secret_id_num_uses": "10", + "secret_id_ttl": "1m", + "token_max_ttl": "1m", + "token_num_uses": "10", + "token_ttl": "1m", + "policies": "default" + }`) + request(t, serverClient, req, 204) + + // Fetch the RoleID of the named role + req = serverClient.NewRequest("GET", "/v1/auth/approle/role/test-role/role-id") + body := request(t, serverClient, req, 200) + data := body["data"].(map[string]interface{}) + roleID := data["role_id"].(string) + + // Get a SecretID issued against the named role + req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role/secret-id") + body = request(t, serverClient, req, 200) + data = body["data"].(map[string]interface{}) + secretID := data["secret_id"].(string) + + // Write the RoleID and SecretID to temp files + roleIDPath := makeTempFile(t, "role_id.txt", roleID+"\n") + secretIDPath := makeTempFile(t, "secret_id.txt", secretID+"\n") + + sinkFileName := makeTempFile(t, "sink-file", "") + autoAuthConfig := fmt.Sprintf(` +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + } + } + + sink "file" { + config = { + path = "%s" + } + } +}`, roleIDPath, secretIDPath, sinkFileName) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +api_proxy { + use_auto_auth_token = true +} +%s +%s +`, serverClient.Address(), listenConfig, autoAuthConfig) + configPath := makeTempFile(t, "config.hcl", config) + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + // Start proxy + _, cmd := testProxyCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // Validate that the auto-auth token has been correctly attained + // and works for LookupSelf + conf := api.DefaultConfig() + conf.Address = "http://" + listenAddr + proxyClient, err := api.NewClient(conf) + if err != nil { + t.Fatalf("err: %s", err) + } + + proxyClient.SetToken("") + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + // Wait for the token to be sent to syncs and be available to be used + time.Sleep(5 * time.Second) + + req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + body = request(t, proxyClient, req, 200) + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_APIProxyWithoutCache_UserAgent tests that the User-Agent sent +// to Vault by Vault Proxy is correct using the API proxy without +// the cache configured. Uses the custom handler +// userAgentHandler struct defined in this test package, so that Vault validates the +// User-Agent on requests sent by Proxy. +func TestProxy_APIProxyWithoutCache_UserAgent(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + userAgentForProxiedClient := "proxied-client" + var h userAgentHandler + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.ProxyStringWithProxiedUserAgent(userAgentForProxiedClient) + h.pathToCheck = "/v1/auth/token/lookup-self" + h.requestMethodToCheck = "GET" + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +%s +`, serverClient.Address(), listenConfig) + configPath := makeTempFile(t, "config.hcl", config) + + // Start the proxy + _, cmd := testProxyCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + proxyClient, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + proxyClient.AddHeader("User-Agent", userAgentForProxiedClient) + proxyClient.SetToken(serverClient.Token()) + proxyClient.SetMaxRetries(0) + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + _, err = proxyClient.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_APIProxyWithCache_UserAgent tests that the User-Agent sent +// to Vault by Vault Proxy is correct using the API proxy with +// the cache configured. Uses the custom handler +// userAgentHandler struct defined in this test package, so that Vault validates the +// User-Agent on requests sent by Proxy. +func TestProxy_APIProxyWithCache_UserAgent(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + userAgentForProxiedClient := "proxied-client" + var h userAgentHandler + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.ProxyStringWithProxiedUserAgent(userAgentForProxiedClient) + h.pathToCheck = "/v1/auth/token/lookup-self" + h.requestMethodToCheck = "GET" + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + cacheConfig := ` +cache { +}` + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +%s +%s +`, serverClient.Address(), listenConfig, cacheConfig) + configPath := makeTempFile(t, "config.hcl", config) + + // Start the proxy + _, cmd := testProxyCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + proxyClient, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + proxyClient.AddHeader("User-Agent", userAgentForProxiedClient) + proxyClient.SetToken(serverClient.Token()) + proxyClient.SetMaxRetries(0) + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + _, err = proxyClient.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_Cache_DynamicSecret tests that the cache successfully caches a dynamic secret +// going through the Proxy, and that a subsequent request will be served from the cache. +func TestProxy_Cache_DynamicSecret(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + cacheConfig := ` +cache { +} +` + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +%s +%s +`, serverClient.Address(), cacheConfig, listenConfig) + configPath := makeTempFile(t, "config.hcl", config) + + // Start proxy + _, cmd := testProxyCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + proxyClient, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + proxyClient.SetToken(serverClient.Token()) + proxyClient.SetMaxRetries(0) + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + renewable := true + tokenCreateRequest := &api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "30m", + Renewable: &renewable, + } + + // This was the simplest test I could find to trigger the caching behaviour, + // i.e. the most concise I could make the test that I can tell + // creating an orphan token returns Auth, is renewable, and isn't a token + // that's managed elsewhere (since it's an orphan) + secret, err := proxyClient.Auth().Token().CreateOrphan(tokenCreateRequest) + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Auth == nil { + t.Fatalf("secret not as expected: %v", secret) + } + + token := secret.Auth.ClientToken + + secret, err = proxyClient.Auth().Token().CreateOrphan(tokenCreateRequest) + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Auth == nil { + t.Fatalf("secret not as expected: %v", secret) + } + + token2 := secret.Auth.ClientToken + + if token != token2 { + t.Fatalf("token create response not cached when it should have been, as tokens differ") + } + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_NoAutoAuthTokenIfNotConfigured tests that Proxy will not use the auto-auth token +// unless configured to. +func TestProxy_NoAutoAuthTokenIfNotConfigured(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + // Create token file + tokenFileName := makeTempFile(t, "token-file", serverClient.Token()) + + sinkFileName := makeTempFile(t, "sink-file", "") + + autoAuthConfig := fmt.Sprintf(` +auto_auth { + method { + type = "token_file" + config = { + token_file_path = "%s" + } + } + + sink "file" { + config = { + path = "%s" + } + } +}`, tokenFileName, sinkFileName) + + apiProxyConfig := ` +api_proxy { + use_auto_auth_token = false +} +` + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +%s +%s +%s +`, serverClient.Address(), apiProxyConfig, listenConfig, autoAuthConfig) + configPath := makeTempFile(t, "config.hcl", config) + + // Start proxy + ui, cmd := testProxyCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + code := cmd.Run([]string{"-config", configPath}) + if code != 0 { + t.Errorf("non-zero return code when running proxy: %d", code) + t.Logf("STDOUT from proxy:\n%s", ui.OutputWriter.String()) + t.Logf("STDERR from proxy:\n%s", ui.ErrorWriter.String()) + } + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + proxyClient, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + proxyClient.SetToken("") + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + // Wait for the sink to be populated. + // Realistically won't be this long, but keeping it long just in case, for CI. + time.Sleep(10 * time.Second) + + secret, err := proxyClient.Auth().Token().CreateOrphan(&api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "30m", + }) + if secret != nil || err == nil { + t.Fatal("expected this to fail, since without a token you should not be able to make a token") + } + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_ApiProxy_Retry Tests the retry functionalities of Vault Proxy's API Proxy +func TestProxy_ApiProxy_Retry(t *testing.T) { + // ---------------------------------------------------- + // Start the server and proxy + // ---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + var h handler + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc(func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + _, err := serverClient.Logical().Write("secret/foo", map[string]interface{}{ + "bar": "baz", + }) + if err != nil { + t.Fatal(err) + } + + intRef := func(i int) *int { + return &i + } + + // start test cases here + testCases := map[string]struct { + retries *int + expectError bool + }{ + "none": { + retries: intRef(-1), + expectError: true, + }, + "one": { + retries: intRef(1), + expectError: true, + }, + "two": { + retries: intRef(2), + expectError: false, + }, + "missing": { + retries: nil, + expectError: false, + }, + "default": { + retries: intRef(0), + expectError: false, + }, + } + + for tcname, tc := range testCases { + t.Run(tcname, func(t *testing.T) { + h.failCount = 2 + + cacheConfig := ` +cache { +} +` + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + var retryConf string + if tc.retries != nil { + retryConf = fmt.Sprintf("retry { num_retries = %d }", *tc.retries) + } + + config := fmt.Sprintf(` +vault { + address = "%s" + %s + tls_skip_verify = true +} +%s +%s +`, serverClient.Address(), retryConf, cacheConfig, listenConfig) + configPath := makeTempFile(t, "config.hcl", config) + + _, cmd := testProxyCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + client.SetToken(serverClient.Token()) + client.SetMaxRetries(0) + err = client.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + secret, err := client.Logical().Read("secret/foo") + switch { + case (err != nil || secret == nil) && tc.expectError: + case (err == nil || secret != nil) && !tc.expectError: + default: + t.Fatalf("%s expectError=%v error=%v secret=%v", tcname, tc.expectError, err, secret) + } + if secret != nil && secret.Data["foo"] != nil { + val := secret.Data["foo"].(map[string]interface{}) + if !reflect.DeepEqual(val, map[string]interface{}{"bar": "baz"}) { + t.Fatalf("expected key 'foo' to yield bar=baz, got: %v", val) + } + } + time.Sleep(time.Second) + + close(cmd.ShutdownCh) + wg.Wait() + }) + } +} + +// TestProxy_Metrics tests that metrics are being properly reported. +func TestProxy_Metrics(t *testing.T) { + // Start a vault server + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, nil, + &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Create a config file + listenAddr := generateListenerAddress(t) + config := fmt.Sprintf(` +cache {} + +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + configPath := makeTempFile(t, "config.hcl", config) + + ui, cmd := testProxyCommand(t, logger) + cmd.client = serverClient + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + code := cmd.Run([]string{"-config", configPath}) + if code != 0 { + t.Errorf("non-zero return code when running proxy: %d", code) + t.Logf("STDOUT from proxy:\n%s", ui.OutputWriter.String()) + t.Logf("STDERR from proxy:\n%s", ui.ErrorWriter.String()) + } + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // defer proxy shutdown + defer func() { + cmd.ShutdownCh <- struct{}{} + wg.Wait() + }() + + conf := api.DefaultConfig() + conf.Address = "http://" + listenAddr + proxyClient, err := api.NewClient(conf) + if err != nil { + t.Fatalf("err: %s", err) + } + + req := proxyClient.NewRequest("GET", "/proxy/v1/metrics") + body := request(t, proxyClient, req, 200) + keys := []string{} + for k := range body { + keys = append(keys, k) + } + require.ElementsMatch(t, keys, []string{ + "Counters", + "Samples", + "Timestamp", + "Gauges", + "Points", + }) +} + +// TestProxy_QuitAPI Tests the /proxy/v1/quit API that can be enabled for the proxy. +func TestProxy_QuitAPI(t *testing.T) { + cluster := minimal.NewTestSoloCluster(t, nil) + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + err := os.Unsetenv(api.EnvVaultAddress) + if err != nil { + t.Fatal(err) + } + + listenAddr := generateListenerAddress(t) + listenAddr2 := generateListenerAddress(t) + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} + +listener "tcp" { + address = "%s" + tls_disable = true +} + +listener "tcp" { + address = "%s" + tls_disable = true + proxy_api { + enable_quit = true + } +} + +cache {} +`, serverClient.Address(), listenAddr, listenAddr2) + + configPath := makeTempFile(t, "config.hcl", config) + + _, cmd := testProxyCommand(t, nil) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + client.SetToken(serverClient.Token()) + client.SetMaxRetries(0) + err = client.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + // First try on listener 1 where the API should be disabled. + resp, err := client.RawRequest(client.NewRequest(http.MethodPost, "/proxy/v1/quit")) + if err == nil { + t.Fatalf("expected error") + } + if resp != nil && resp.StatusCode != http.StatusNotFound { + t.Fatalf("expected %d but got: %d", http.StatusNotFound, resp.StatusCode) + } + + // Now try on listener 2 where the quit API should be enabled. + err = client.SetAddress("http://" + listenAddr2) + if err != nil { + t.Fatal(err) + } + + _, err = client.RawRequest(client.NewRequest(http.MethodPost, "/proxy/v1/quit")) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + select { + case <-cmd.ShutdownCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + wg.Wait() +} + +// TestProxy_LogFile_CliOverridesConfig tests that the CLI values +// override the config for log files +func TestProxy_LogFile_CliOverridesConfig(t *testing.T) { + // Create basic config + configFile := populateTempFile(t, "proxy-config.hcl", BasicHclConfig) + cfg, err := proxyConfig.LoadConfigFile(configFile.Name()) + if err != nil { + t.Fatal("Cannot load config to test update/merge", err) + } + + // Sanity check that the config value is the current value + assert.Equal(t, "TMPDIR/juan.log", cfg.LogFile) + + // Initialize the command and parse any flags + cmd := &ProxyCommand{BaseCommand: &BaseCommand{}} + f := cmd.Flags() + // Simulate the flag being specified + err = f.Parse([]string{"-log-file=/foo/bar/test.log"}) + if err != nil { + t.Fatal(err) + } + + // Update the config based on the inputs. + cmd.applyConfigOverrides(f, cfg) + + assert.NotEqual(t, "TMPDIR/juan.log", cfg.LogFile) + assert.NotEqual(t, "/squiggle/logs.txt", cfg.LogFile) + assert.Equal(t, "/foo/bar/test.log", cfg.LogFile) +} + +// TestProxy_LogFile_Config tests log file config when loaded from config +func TestProxy_LogFile_Config(t *testing.T) { + configFile := populateTempFile(t, "proxy-config.hcl", BasicHclConfig) + + cfg, err := proxyConfig.LoadConfigFile(configFile.Name()) + if err != nil { + t.Fatal("Cannot load config to test update/merge", err) + } + + // Sanity check that the config value is the current value + assert.Equal(t, "TMPDIR/juan.log", cfg.LogFile, "sanity check on log config failed") + assert.Equal(t, 2, cfg.LogRotateMaxFiles) + assert.Equal(t, 1048576, cfg.LogRotateBytes) + + // Parse the cli flags (but we pass in an empty slice) + cmd := &ProxyCommand{BaseCommand: &BaseCommand{}} + f := cmd.Flags() + err = f.Parse([]string{}) + if err != nil { + t.Fatal(err) + } + + // Should change nothing... + cmd.applyConfigOverrides(f, cfg) + + assert.Equal(t, "TMPDIR/juan.log", cfg.LogFile, "actual config check") + assert.Equal(t, 2, cfg.LogRotateMaxFiles) + assert.Equal(t, 1048576, cfg.LogRotateBytes) +} + +// TestProxy_EnvVar_Overrides tests that environment variables are properly +// parsed and override defaults. +func TestProxy_EnvVar_Overrides(t *testing.T) { + configFile := populateTempFile(t, "proxy-config.hcl", BasicHclConfig) + + cfg, err := proxyConfig.LoadConfigFile(configFile.Name()) + if err != nil { + t.Fatal("Cannot load config to test update/merge", err) + } + + assert.Equal(t, false, cfg.Vault.TLSSkipVerify) + + t.Setenv("VAULT_SKIP_VERIFY", "true") + // Parse the cli flags (but we pass in an empty slice) + cmd := &ProxyCommand{BaseCommand: &BaseCommand{}} + f := cmd.Flags() + err = f.Parse([]string{}) + if err != nil { + t.Fatal(err) + } + + cmd.applyConfigOverrides(f, cfg) + assert.Equal(t, true, cfg.Vault.TLSSkipVerify) + + t.Setenv("VAULT_SKIP_VERIFY", "false") + + cmd.applyConfigOverrides(f, cfg) + assert.Equal(t, false, cfg.Vault.TLSSkipVerify) +} + +// TestProxy_Config_NewLogger_Default Tests defaults for log level and +// specifically cmd.newLogger() +func TestProxy_Config_NewLogger_Default(t *testing.T) { + cmd := &ProxyCommand{BaseCommand: &BaseCommand{}} + cmd.config = proxyConfig.NewConfig() + logger, err := cmd.newLogger() + + assert.NoError(t, err) + assert.NotNil(t, logger) + assert.Equal(t, hclog.Info.String(), logger.GetLevel().String()) +} + +// TestProxy_Config_ReloadLogLevel Tests reloading updates the log +// level as expected. +func TestProxy_Config_ReloadLogLevel(t *testing.T) { + cmd := &ProxyCommand{BaseCommand: &BaseCommand{}} + var err error + tempDir := t.TempDir() + + // Load an initial config + hcl := strings.ReplaceAll(BasicHclConfig, "TMPDIR", tempDir) + configFile := populateTempFile(t, "proxy-config.hcl", hcl) + cmd.config, err = proxyConfig.LoadConfigFile(configFile.Name()) + if err != nil { + t.Fatal("Cannot load config to test update/merge", err) + } + + // Tweak the loaded config to make sure we can put log files into a temp dir + // and systemd log attempts work fine, this would usually happen during Run. + cmd.logWriter = os.Stdout + cmd.logger, err = cmd.newLogger() + if err != nil { + t.Fatal("logger required for systemd log messages", err) + } + + // Sanity check + assert.Equal(t, "warn", cmd.config.LogLevel) + + // Load a new config + hcl = strings.ReplaceAll(BasicHclConfig2, "TMPDIR", tempDir) + configFile = populateTempFile(t, "proxy-config.hcl", hcl) + err = cmd.reloadConfig([]string{configFile.Name()}) + assert.NoError(t, err) + assert.Equal(t, "debug", cmd.config.LogLevel) +} + +// TestProxy_Config_ReloadTls Tests that the TLS certs for the listener are +// correctly reloaded. +func TestProxy_Config_ReloadTls(t *testing.T) { + var wg sync.WaitGroup + wd, err := os.Getwd() + if err != nil { + t.Fatal("unable to get current working directory") + } + workingDir := filepath.Join(wd, "/proxy/test-fixtures/reload") + fooCert := "reload_foo.pem" + fooKey := "reload_foo.key" + + barCert := "reload_bar.pem" + barKey := "reload_bar.key" + + reloadCert := "reload_cert.pem" + reloadKey := "reload_key.pem" + caPem := "reload_ca.pem" + + tempDir := t.TempDir() + + // Set up initial 'foo' certs + inBytes, err := os.ReadFile(filepath.Join(workingDir, fooCert)) + if err != nil { + t.Fatal("unable to read cert required for test", fooCert, err) + } + err = os.WriteFile(filepath.Join(tempDir, reloadCert), inBytes, 0o777) + if err != nil { + t.Fatal("unable to write temp cert required for test", reloadCert, err) + } + + inBytes, err = os.ReadFile(filepath.Join(workingDir, fooKey)) + if err != nil { + t.Fatal("unable to read cert key required for test", fooKey, err) + } + err = os.WriteFile(filepath.Join(tempDir, reloadKey), inBytes, 0o777) + if err != nil { + t.Fatal("unable to write temp cert key required for test", reloadKey, err) + } + + inBytes, err = os.ReadFile(filepath.Join(workingDir, caPem)) + if err != nil { + t.Fatal("unable to read CA pem required for test", caPem, err) + } + certPool := x509.NewCertPool() + ok := certPool.AppendCertsFromPEM(inBytes) + if !ok { + t.Fatal("not ok when appending CA cert") + } + + replacedHcl := strings.ReplaceAll(BasicHclConfig, "TMPDIR", tempDir) + configFile := populateTempFile(t, "proxy-config.hcl", replacedHcl) + + // Set up Proxy + logger := logging.NewVaultLogger(hclog.Trace) + ui, cmd := testProxyCommand(t, logger) + + var output string + var code int + wg.Add(1) + args := []string{"-config", configFile.Name()} + go func() { + if code = cmd.Run(args); code != 0 { + output = ui.ErrorWriter.String() + ui.OutputWriter.String() + } + wg.Done() + }() + + testCertificateName := func(cn string) error { + conn, err := tls.Dial("tcp", "127.0.0.1:8100", &tls.Config{ + RootCAs: certPool, + }) + if err != nil { + return err + } + defer conn.Close() + if err = conn.Handshake(); err != nil { + return err + } + servName := conn.ConnectionState().PeerCertificates[0].Subject.CommonName + if servName != cn { + return fmt.Errorf("expected %s, got %s", cn, servName) + } + return nil + } + + // Start + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Fatalf("timeout") + } + + if err := testCertificateName("foo.example.com"); err != nil { + t.Fatalf("certificate name didn't check out: %s", err) + } + + // Swap out certs + inBytes, err = os.ReadFile(filepath.Join(workingDir, barCert)) + if err != nil { + t.Fatal("unable to read cert required for test", barCert, err) + } + err = os.WriteFile(filepath.Join(tempDir, reloadCert), inBytes, 0o777) + if err != nil { + t.Fatal("unable to write temp cert required for test", reloadCert, err) + } + + inBytes, err = os.ReadFile(filepath.Join(workingDir, barKey)) + if err != nil { + t.Fatal("unable to read cert key required for test", barKey, err) + } + err = os.WriteFile(filepath.Join(tempDir, reloadKey), inBytes, 0o777) + if err != nil { + t.Fatal("unable to write temp cert key required for test", reloadKey, err) + } + + // Reload + cmd.SighupCh <- struct{}{} + select { + case <-cmd.reloadedCh: + case <-time.After(5 * time.Second): + t.Fatalf("timeout") + } + + if err := testCertificateName("bar.example.com"); err != nil { + t.Fatalf("certificate name didn't check out: %s", err) + } + + // Shut down + cmd.ShutdownCh <- struct{}{} + wg.Wait() + + if code != 0 { + t.Fatalf("got a non-zero exit status: %d, stdout/stderr: %s", code, output) + } +} diff --git a/command/read.go b/command/read.go index 17b85529e12d..67ee2d6d7cd5 100644 --- a/command/read.go +++ b/command/read.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -10,7 +10,7 @@ import ( "os" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -36,9 +36,17 @@ Usage: vault read [options] PATH Reads data from Vault at the given path. This can be used to read secrets, generate dynamic credentials, get configuration details, and more. - Read a secret from the static secrets engine: + Read details of your own token: - $ vault read secret/my-secret + $ vault read auth/token/lookup-self + + Read entity details of a given ID: + + $ vault read identity/entity/id/2f09126d-d161-abb8-2241-555886491d97 + + Generate credentials for my-role in an AWS secrets engine: + + $ vault read aws/creds/my-role For a full list of examples and paths, please see the documentation that corresponds to the secrets engine in use. diff --git a/command/read_test.go b/command/read_test.go index fbe7ab414fa5..fe8961afb669 100644 --- a/command/read_test.go +++ b/command/read_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testReadCommand(tb testing.TB) (*cli.MockUi, *ReadCommand) { diff --git a/command/rotate.go b/command/rotate.go index 7a174f34eb9c..2a17e41f9b5e 100644 --- a/command/rotate.go +++ b/command/rotate.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/rotate_test.go b/command/rotate_test.go index bfd48f7b026c..927812934c3f 100644 --- a/command/rotate_test.go +++ b/command/rotate_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testOperatorRotateCommand(tb testing.TB) (*cli.MockUi, *OperatorRotateCommand) { diff --git a/command/secrets.go b/command/secrets.go index 320167226c20..a205aae17443 100644 --- a/command/secrets.go +++ b/command/secrets.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*SecretsCommand)(nil) diff --git a/command/secrets_disable.go b/command/secrets_disable.go index 8d782a524577..163af4a785c7 100644 --- a/command/secrets_disable.go +++ b/command/secrets_disable.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/secrets_disable_test.go b/command/secrets_disable_test.go index d7c7da713bd7..253107136430 100644 --- a/command/secrets_disable_test.go +++ b/command/secrets_disable_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,8 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testSecretsDisableCommand(tb testing.TB) (*cli.MockUi, *SecretsDisableCommand) { diff --git a/command/secrets_enable.go b/command/secrets_enable.go index 39ce3bf1b880..a73a5e49ef87 100644 --- a/command/secrets_enable.go +++ b/command/secrets_enable.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -10,8 +10,8 @@ import ( "strings" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -41,6 +41,8 @@ type SecretsEnableCommand struct { flagExternalEntropyAccess bool flagVersion int flagAllowedManagedKeys []string + flagDelegatedAuthAccessors []string + flagIdentityTokenKey string } func (c *SecretsEnableCommand) Synopsis() string { @@ -228,6 +230,21 @@ func (c *SecretsEnableCommand) Flags() *FlagSets { "each time with 1 key.", }) + f.StringSliceVar(&StringSliceVar{ + Name: flagNameDelegatedAuthAccessors, + Target: &c.flagDelegatedAuthAccessors, + Usage: "A list of permitted authentication accessors this backend can delegate authentication to. " + + "Note that multiple values may be specified by providing this option multiple times, " + + "each time with 1 accessor.", + }) + + f.StringVar(&StringVar{ + Name: flagNameIdentityTokenKey, + Target: &c.flagIdentityTokenKey, + Default: "default", + Usage: "Select the key used to sign plugin identity tokens.", + }) + return set } @@ -331,9 +348,17 @@ func (c *SecretsEnableCommand) Run(args []string) int { mountInput.Config.AllowedManagedKeys = c.flagAllowedManagedKeys } + if fl.Name == flagNameDelegatedAuthAccessors { + mountInput.Config.DelegatedAuthAccessors = c.flagDelegatedAuthAccessors + } + if fl.Name == flagNamePluginVersion { mountInput.Config.PluginVersion = c.flagPluginVersion } + + if fl.Name == flagNameIdentityTokenKey { + mountInput.Config.IdentityTokenKey = c.flagIdentityTokenKey + } }) if err := client.Sys().Mount(mountPath, mountInput); err != nil { diff --git a/command/secrets_enable_test.go b/command/secrets_enable_test.go index 93984b3c33dd..3efc171a7be1 100644 --- a/command/secrets_enable_test.go +++ b/command/secrets_enable_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,20 +7,18 @@ import ( "errors" "io/ioutil" "os" + "sort" "strings" "testing" "github.com/go-test/deep" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/cli" + "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/vault/helper/builtinplugins" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/mitchellh/cli" ) -// logicalBackendAdjustmentFactor is set to plus 1 for the database backend -// which is a plugin but not found in go.mod files, and minus 1 for the ldap -// and openldap secret backends which have the same underlying plugin. -var logicalBackendAdjustmentFactor = 1 - 1 - func testSecretsEnableCommand(tb testing.TB) (*cli.MockUi, *SecretsEnableCommand) { tb.Helper() @@ -120,6 +118,8 @@ func TestSecretsEnableCommand_Run(t *testing.T) { "-passthrough-request-headers", "www-authentication", "-allowed-response-headers", "authorization", "-allowed-managed-keys", "key1,key2", + "-identity-token-key", "default", + "-delegated-auth-accessors", "authAcc1,authAcc2", "-force-no-cache", "pki", }) @@ -172,6 +172,12 @@ func TestSecretsEnableCommand_Run(t *testing.T) { if diff := deep.Equal([]string{"key1,key2"}, mountInfo.Config.AllowedManagedKeys); len(diff) > 0 { t.Errorf("Failed to find expected values in AllowedManagedKeys. Difference is: %v", diff) } + if diff := deep.Equal([]string{"authAcc1,authAcc2"}, mountInfo.Config.DelegatedAuthAccessors); len(diff) > 0 { + t.Errorf("Failed to find expected values in DelegatedAuthAccessors. Difference is: %v", diff) + } + if diff := deep.Equal("default", mountInfo.Config.IdentityTokenKey); len(diff) > 0 { + t.Errorf("Failed to find expected values in IdentityTokenKey. Difference is: %v", diff) + } }) t.Run("communication_failure", func(t *testing.T) { @@ -218,7 +224,7 @@ func TestSecretsEnableCommand_Run(t *testing.T) { var backends []string for _, f := range files { if f.IsDir() { - if f.Name() == "plugin" { + if f.Name() == "plugin" || f.Name() == "database" { continue } if _, err := os.Stat("../builtin/logical/" + f.Name() + "/backend.go"); errors.Is(err, os.ErrNotExist) { @@ -245,10 +251,12 @@ func TestSecretsEnableCommand_Run(t *testing.T) { } } - // backends are found by walking the directory, which includes the database backend, - // however, the plugins registry omits that one - if len(backends) != len(builtinplugins.Registry.Keys(consts.PluginTypeSecrets))+logicalBackendAdjustmentFactor { - t.Fatalf("expected %d logical backends, got %d", len(builtinplugins.Registry.Keys(consts.PluginTypeSecrets))+logicalBackendAdjustmentFactor, len(backends)) + regkeys := strutil.StrListDelete(builtinplugins.Registry.Keys(consts.PluginTypeSecrets), "ldap") + sort.Strings(regkeys) + sort.Strings(backends) + + if d := cmp.Diff(regkeys, backends); len(d) > 0 { + t.Fatalf("found logical registry mismatch: %v", d) } for _, b := range backends { diff --git a/command/secrets_list.go b/command/secrets_list.go index 90a8fe8ed973..2819e2a1d390 100644 --- a/command/secrets_list.go +++ b/command/secrets_list.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -9,8 +9,8 @@ import ( "strconv" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/secrets_list_test.go b/command/secrets_list_test.go index 95b60e34071a..dcc51eb01892 100644 --- a/command/secrets_list_test.go +++ b/command/secrets_list_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testSecretsListCommand(tb testing.TB) (*cli.MockUi, *SecretsListCommand) { diff --git a/command/secrets_move.go b/command/secrets_move.go index b74adcd6af7b..bd4062969a49 100644 --- a/command/secrets_move.go +++ b/command/secrets_move.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/secrets_move_test.go b/command/secrets_move_test.go index 3aabaa179bbe..ed7a5a5c629c 100644 --- a/command/secrets_move_test.go +++ b/command/secrets_move_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testSecretsMoveCommand(tb testing.TB) (*cli.MockUi, *SecretsMoveCommand) { diff --git a/command/secrets_tune.go b/command/secrets_tune.go index 74753e29d333..b853aec2711b 100644 --- a/command/secrets_tune.go +++ b/command/secrets_tune.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -10,8 +10,8 @@ import ( "strings" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -35,6 +35,8 @@ type SecretsTuneCommand struct { flagVersion int flagPluginVersion string flagAllowedManagedKeys []string + flagDelegatedAuthAccessors []string + flagIdentityTokenKey string } func (c *SecretsTuneCommand) Synopsis() string { @@ -158,6 +160,21 @@ func (c *SecretsTuneCommand) Flags() *FlagSets { "the plugin catalog, and will not start running until the plugin is reloaded.", }) + f.StringSliceVar(&StringSliceVar{ + Name: flagNameDelegatedAuthAccessors, + Target: &c.flagDelegatedAuthAccessors, + Usage: "A list of permitted authentication accessors this backend can delegate authentication to. " + + "Note that multiple values may be specified by providing this option multiple times, " + + "each time with 1 accessor.", + }) + + f.StringVar(&StringVar{ + Name: flagNameIdentityTokenKey, + Target: &c.flagIdentityTokenKey, + Default: "default", + Usage: "Select the key used to sign plugin identity tokens.", + }) + return set } @@ -242,6 +259,14 @@ func (c *SecretsTuneCommand) Run(args []string) int { if fl.Name == flagNamePluginVersion { mountConfigInput.PluginVersion = c.flagPluginVersion } + + if fl.Name == flagNameDelegatedAuthAccessors { + mountConfigInput.DelegatedAuthAccessors = c.flagDelegatedAuthAccessors + } + + if fl.Name == flagNameIdentityTokenKey { + mountConfigInput.IdentityTokenKey = c.flagIdentityTokenKey + } }) if err := client.Sys().TuneMount(mountPath, mountConfigInput); err != nil { diff --git a/command/secrets_tune_test.go b/command/secrets_tune_test.go index 25b8a7ce791f..b2d932779fd8 100644 --- a/command/secrets_tune_test.go +++ b/command/secrets_tune_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,9 +8,9 @@ import ( "testing" "github.com/go-test/deep" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/testhelpers/corehelpers" - "github.com/mitchellh/cli" ) func testSecretsTuneCommand(tb testing.TB) (*cli.MockUi, *SecretsTuneCommand) { @@ -152,8 +152,7 @@ func TestSecretsTuneCommand_Run(t *testing.T) { t.Run("integration", func(t *testing.T) { t.Run("flags_all", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() @@ -193,8 +192,10 @@ func TestSecretsTuneCommand_Run(t *testing.T) { "-passthrough-request-headers", "www-authentication", "-allowed-response-headers", "authorization,www-authentication", "-allowed-managed-keys", "key1,key2", + "-identity-token-key", "default", "-listing-visibility", "unauth", "-plugin-version", version, + "-delegated-auth-accessors", "authAcc1,authAcc2", "mount_tune_integration/", }) if exp := 0; code != exp { @@ -246,6 +247,12 @@ func TestSecretsTuneCommand_Run(t *testing.T) { if diff := deep.Equal([]string{"key1,key2"}, mountInfo.Config.AllowedManagedKeys); len(diff) > 0 { t.Errorf("Failed to find expected values in AllowedManagedKeys. Difference is: %v", diff) } + if diff := deep.Equal([]string{"authAcc1,authAcc2"}, mountInfo.Config.DelegatedAuthAccessors); len(diff) > 0 { + t.Errorf("Failed to find expected values in DelegatedAuthAccessors. Difference is: %v", diff) + } + if diff := deep.Equal("default", mountInfo.Config.IdentityTokenKey); len(diff) > 0 { + t.Errorf("Failed to find expected values in IdentityTokenKey. Difference is: %v", diff) + } }) t.Run("flags_description", func(t *testing.T) { diff --git a/command/server.go b/command/server.go index 2e25ee6f2806..8de060772971 100644 --- a/command/server.go +++ b/command/server.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,9 +8,9 @@ import ( "crypto/sha256" "encoding/base64" "encoding/hex" + "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -25,8 +25,11 @@ import ( "time" systemd "github.com/coreos/go-systemd/daemon" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/cli" "github.com/hashicorp/errwrap" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-kms-wrapping/entropy/v2" wrapping "github.com/hashicorp/go-kms-wrapping/v2" aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2" "github.com/hashicorp/go-multierror" @@ -50,17 +53,17 @@ import ( "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/helper/testcluster" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical" sr "github.com/hashicorp/vault/serviceregistration" "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/vault/hcp_link" + "github.com/hashicorp/vault/vault/plugincatalog" vaultseal "github.com/hashicorp/vault/vault/seal" "github.com/hashicorp/vault/version" - "github.com/mitchellh/cli" - "github.com/mitchellh/go-testing-interface" - "github.com/pkg/errors" "github.com/posener/complete" + "github.com/sasha-s/go-deadlock" "go.uber.org/atomic" "golang.org/x/net/http/httpproxy" "google.golang.org/grpc/grpclog" @@ -73,11 +76,6 @@ var ( var memProfilerEnabled = false -var enableFourClusterDev = func(c *ServerCommand, base *vault.CoreConfig, info map[string]string, infoKeys []string, devListenAddress, tempDir string) int { - c.logger.Error("-dev-four-cluster only supported in enterprise Vault") - return 1 -} - const ( storageMigrationLock = "core/migration" @@ -124,6 +122,7 @@ type ServerCommand struct { flagDev bool flagDevTLS bool flagDevTLSCertDir string + flagDevTLSSANs []string flagDevRootTokenID string flagDevListenAddr string flagDevNoStoreToken bool @@ -133,16 +132,18 @@ type ServerCommand struct { flagDevLatency int flagDevLatencyJitter int flagDevLeasedKV bool + flagDevNoKV bool flagDevKVV1 bool flagDevSkipInit bool - flagDevThreeNode bool - flagDevFourCluster bool flagDevTransactional bool flagDevAutoSeal bool + flagDevClusterJson string flagTestVerifyOnly bool flagTestServerConfig bool flagDevConsul bool flagExitOnCoreShutdown bool + + sealsToFinalize []*vault.Seal } func (c *ServerCommand) Synopsis() string { @@ -205,8 +206,8 @@ func (c *ServerCommand) Flags() *FlagSets { f.BoolVar(&BoolVar{ Name: "recovery", Target: &c.flagRecovery, - Usage: "Enable recovery mode. In this mode, Vault is used to perform recovery actions." + - "Using a recovery operation token, \"sys/raw\" API can be used to manipulate the storage.", + Usage: "Enable recovery mode. In this mode, Vault is used to perform recovery actions. " + + "Using a recovery token, \"sys/raw\" API can be used to manipulate the storage.", }) f.StringSliceVar(&StringSliceVar{ @@ -247,6 +248,18 @@ func (c *ServerCommand) Flags() *FlagSets { "specified. If left unset, files are generated in a temporary directory.", }) + f.StringSliceVar(&StringSliceVar{ + Name: "dev-tls-san", + Target: &c.flagDevTLSSANs, + Default: nil, + Usage: "Additional Subject Alternative Name (as a DNS name or IP address) " + + "to generate the certificate with if `-dev-tls` is specified. The " + + "certificate will always use localhost, localhost4, localhost6, " + + "localhost.localdomain, and the host name as alternate DNS names, " + + "and 127.0.0.1 as an alternate IP address. This flag can be specified " + + "multiple times to specify multiple SANs.", + }) + f.StringVar(&StringVar{ Name: "dev-root-token-id", Target: &c.flagDevRootTokenID, @@ -328,6 +341,13 @@ func (c *ServerCommand) Flags() *FlagSets { Hidden: true, }) + f.BoolVar(&BoolVar{ + Name: "dev-no-kv", + Target: &c.flagDevNoKV, + Default: false, + Hidden: true, + }) + f.BoolVar(&BoolVar{ Name: "dev-kv-v1", Target: &c.flagDevKVV1, @@ -349,20 +369,6 @@ func (c *ServerCommand) Flags() *FlagSets { Hidden: true, }) - f.BoolVar(&BoolVar{ - Name: "dev-three-node", - Target: &c.flagDevThreeNode, - Default: false, - Hidden: true, - }) - - f.BoolVar(&BoolVar{ - Name: "dev-four-cluster", - Target: &c.flagDevFourCluster, - Default: false, - Hidden: true, - }) - f.BoolVar(&BoolVar{ Name: "dev-consul", Target: &c.flagDevConsul, @@ -370,6 +376,12 @@ func (c *ServerCommand) Flags() *FlagSets { Hidden: true, }) + f.StringVar(&StringVar{ + Name: "dev-cluster-json", + Target: &c.flagDevClusterJson, + Usage: "File to write cluster definition to", + }) + // TODO: should the below flags be public? f.BoolVar(&BoolVar{ Name: "test-verify-only", @@ -428,6 +440,8 @@ func (c *ServerCommand) parseConfig() (*server.Config, []configutil.ConfigError, config.Entropy = nil } + entCheckRequestLimiter(c, config) + return config, configErrors, nil } @@ -449,7 +463,7 @@ func (c *ServerCommand) runRecoveryMode() int { } // Update the 'log' related aspects of shared config based on config/env var/cli - c.Flags().applyLogConfigOverrides(config.SharedConfig) + c.flags.applyLogConfigOverrides(config.SharedConfig) l, err := c.configureLogging(config) if err != nil { c.UI.Error(err.Error()) @@ -515,57 +529,37 @@ func (c *ServerCommand) runRecoveryMode() int { var barrierSeal vault.Seal var sealConfigError error - var wrapper wrapping.Wrapper if len(config.Seals) == 0 { config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.WrapperTypeShamir.String()}) } - if len(config.Seals) > 1 { - c.UI.Error("Only one seal block is accepted in recovery mode") + ctx := context.Background() + existingSealGenerationInfo, err := vault.PhysicalSealGenInfo(ctx, backend) + if err != nil { + c.UI.Error(fmt.Sprintf("Error getting seal generation info: %v", err)) return 1 } - configSeal := config.Seals[0] - sealType := wrapping.WrapperTypeShamir.String() - if !configSeal.Disabled && os.Getenv("VAULT_SEAL_TYPE") != "" { - sealType = os.Getenv("VAULT_SEAL_TYPE") - configSeal.Type = sealType - } else { - sealType = configSeal.Type + hasPartialPaths, err := hasPartiallyWrappedPaths(ctx, backend) + if err != nil { + c.UI.Error(fmt.Sprintf("Cannot determine if there are partially seal wrapped entries in storage: %v", err)) + return 1 } - - infoKeys = append(infoKeys, "Seal Type") - info["Seal Type"] = sealType - - var seal vault.Seal - defaultSeal := vault.NewDefaultSeal(&vaultseal.Access{ - Wrapper: aeadwrapper.NewShamirWrapper(), - }) - sealLogger := c.logger.ResetNamed(fmt.Sprintf("seal.%s", sealType)) - wrapper, sealConfigError = configutil.ConfigureWrapper(configSeal, &infoKeys, &info, sealLogger) - if sealConfigError != nil { - if !errwrap.ContainsType(sealConfigError, new(logical.KeyNotFoundError)) { - c.UI.Error(fmt.Sprintf( - "Error parsing Seal configuration: %s", sealConfigError)) - return 1 - } + setSealResponse, err := setSeal(c, config, infoKeys, info, existingSealGenerationInfo, hasPartialPaths) + if err != nil { + c.UI.Error(err.Error()) + return 1 } - if wrapper == nil { - seal = defaultSeal - } else { - seal, err = vault.NewAutoSeal(&vaultseal.Access{ - Wrapper: wrapper, - }) - if err != nil { - c.UI.Error(fmt.Sprintf("error creating auto seal: %v", err)) - } + if setSealResponse.barrierSeal == nil { + c.UI.Error(fmt.Sprintf("Error setting up seal: %v", setSealResponse.sealConfigError)) + return 1 } - barrierSeal = seal + barrierSeal = setSealResponse.barrierSeal // Ensure that the seal finalizer is called, even if using verify-only defer func() { - err = seal.Finalize(context.Background()) + err = barrierSeal.Finalize(ctx) if err != nil { c.UI.Error(fmt.Sprintf("Error finalizing seals: %v", err)) } @@ -575,6 +569,7 @@ func (c *ServerCommand) runRecoveryMode() int { Physical: backend, StorageType: config.Storage.Type, Seal: barrierSeal, + UnwrapSeal: setSealResponse.unwrapSeal, LogLevel: config.LogLevel, Logger: c.logger, DisableMlock: config.DisableMlock, @@ -590,7 +585,7 @@ func (c *ServerCommand) runRecoveryMode() int { } } - if err := core.InitializeRecovery(context.Background()); err != nil { + if err := core.InitializeRecovery(ctx); err != nil { c.UI.Error(fmt.Sprintf("Error initializing core in recovery mode: %s", err)) return 1 } @@ -642,7 +637,7 @@ func (c *ServerCommand) runRecoveryMode() int { infoKeys = append(infoKeys, "go version") info["go version"] = runtime.Version() - fipsStatus := getFIPSInfoKey() + fipsStatus := entGetFIPSInfoKey() if fipsStatus != "" { infoKeys = append(infoKeys, "fips") info["fips"] = fipsStatus @@ -664,6 +659,12 @@ func (c *ServerCommand) runRecoveryMode() int { c.UI.Output("") + // Tests might not want to start a vault server and just want to verify + // the configuration. + if c.flagTestVerifyOnly { + return 0 + } + for _, ln := range lns { handler := vaulthttp.Handler.Handler(&vault.HandlerProperties{ Core: core, @@ -685,7 +686,7 @@ func (c *ServerCommand) runRecoveryMode() int { } if sealConfigError != nil { - init, err := core.InitializedLocally(context.Background()) + init, err := core.InitializedLocally(ctx) if err != nil { c.UI.Error(fmt.Sprintf("Error checking if core is initialized: %v", err)) return 1 @@ -853,9 +854,9 @@ func (c *ServerCommand) InitListeners(config *server.Config, disableClustering b } if reloadFunc != nil { - relSlice := (*c.reloadFuncs)["listener|"+lnConfig.Type] + relSlice := (*c.reloadFuncs)[fmt.Sprintf("listener|%s", lnConfig.Type)] relSlice = append(relSlice, reloadFunc) - (*c.reloadFuncs)["listener|"+lnConfig.Type] = relSlice + (*c.reloadFuncs)[fmt.Sprintf("listener|%s", lnConfig.Type)] = relSlice } if !disableClustering && lnConfig.Type == "tcp" { @@ -893,6 +894,12 @@ func (c *ServerCommand) InitListeners(config *server.Config, disableClustering b } props["max_request_duration"] = lnConfig.MaxRequestDuration.String() + props["disable_request_limiter"] = strconv.FormatBool(lnConfig.DisableRequestLimiter) + + if lnConfig.ChrootNamespace != "" { + props["chroot_namespace"] = lnConfig.ChrootNamespace + } + lns = append(lns, listenerutil.Listener{ Listener: ln, Config: lnConfig, @@ -919,6 +926,79 @@ func (c *ServerCommand) InitListeners(config *server.Config, disableClustering b return 0, lns, clusterAddrs, nil } +func configureDevTLS(c *ServerCommand) (func(), *server.Config, string, error) { + var devStorageType string + + switch { + case c.flagDevConsul: + devStorageType = "consul" + case c.flagDevHA && c.flagDevTransactional: + devStorageType = "inmem_transactional_ha" + case !c.flagDevHA && c.flagDevTransactional: + devStorageType = "inmem_transactional" + case c.flagDevHA && !c.flagDevTransactional: + devStorageType = "inmem_ha" + default: + devStorageType = "inmem" + } + + var certDir string + var err error + var config *server.Config + var f func() + + if c.flagDevTLS { + if c.flagDevTLSCertDir != "" { + if _, err = os.Stat(c.flagDevTLSCertDir); err != nil { + return nil, nil, "", err + } + + certDir = c.flagDevTLSCertDir + } else { + if certDir, err = os.MkdirTemp("", "vault-tls"); err != nil { + return nil, nil, certDir, err + } + } + extraSANs := c.flagDevTLSSANs + host, _, err := net.SplitHostPort(c.flagDevListenAddr) + if err == nil { + // 127.0.0.1 is the default, and already included in the SANs. + // Empty host means listen on all interfaces, but users should use the + // -dev-tls-san flag to get the right SANs in that case. + if host != "" && host != "127.0.0.1" { + extraSANs = append(extraSANs, host) + } + } + config, err = server.DevTLSConfig(devStorageType, certDir, extraSANs) + + f = func() { + if err := os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevCAFilename)); err != nil { + c.UI.Error(err.Error()) + } + + if err := os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevCertFilename)); err != nil { + c.UI.Error(err.Error()) + } + + if err := os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevKeyFilename)); err != nil { + c.UI.Error(err.Error()) + } + + // Only delete temp directories we made. + if c.flagDevTLSCertDir == "" { + if err := os.Remove(certDir); err != nil { + c.UI.Error(err.Error()) + } + } + } + + } else { + config, err = server.DevConfig(devStorageType) + } + + return f, config, certDir, err +} + func (c *ServerCommand) Run(args []string) int { f := c.Flags() @@ -927,6 +1007,9 @@ func (c *ServerCommand) Run(args []string) int { return 1 } + // Don't exit just because we saw a potential deadlock. + deadlock.Opts.OnPotentialDeadlock = func() {} + c.logGate = gatedwriter.NewWriter(os.Stderr) c.logWriter = c.logGate @@ -939,7 +1022,7 @@ func (c *ServerCommand) Run(args []string) int { } // Automatically enable dev mode if other dev flags are provided. - if c.flagDevConsul || c.flagDevHA || c.flagDevTransactional || c.flagDevLeasedKV || c.flagDevThreeNode || c.flagDevFourCluster || c.flagDevAutoSeal || c.flagDevKVV1 || c.flagDevTLS { + if c.flagDevConsul || c.flagDevHA || c.flagDevTransactional || c.flagDevLeasedKV || c.flagDevAutoSeal || c.flagDevKVV1 || c.flagDevNoKV || c.flagDevTLS { c.flagDev = true } @@ -959,68 +1042,11 @@ func (c *ServerCommand) Run(args []string) int { // Load the configuration var config *server.Config - var err error var certDir string if c.flagDev { - var devStorageType string - switch { - case c.flagDevConsul: - devStorageType = "consul" - case c.flagDevHA && c.flagDevTransactional: - devStorageType = "inmem_transactional_ha" - case !c.flagDevHA && c.flagDevTransactional: - devStorageType = "inmem_transactional" - case c.flagDevHA && !c.flagDevTransactional: - devStorageType = "inmem_ha" - default: - devStorageType = "inmem" - } - - if c.flagDevTLS { - if c.flagDevTLSCertDir != "" { - _, err := os.Stat(c.flagDevTLSCertDir) - if err != nil { - c.UI.Error(err.Error()) - return 1 - } - - certDir = c.flagDevTLSCertDir - } else { - certDir, err = os.MkdirTemp("", "vault-tls") - if err != nil { - c.UI.Error(err.Error()) - return 1 - } - } - config, err = server.DevTLSConfig(devStorageType, certDir) - - defer func() { - err := os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevCAFilename)) - if err != nil { - c.UI.Error(err.Error()) - } - - err = os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevCertFilename)) - if err != nil { - c.UI.Error(err.Error()) - } - - err = os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevKeyFilename)) - if err != nil { - c.UI.Error(err.Error()) - } - - // Only delete temp directories we made. - if c.flagDevTLSCertDir == "" { - err = os.Remove(certDir) - if err != nil { - c.UI.Error(err.Error()) - } - } - }() - - } else { - config, err = server.DevConfig(devStorageType) + df, cfg, dir, err := configureDevTLS(c) + if df != nil { + defer df() } if err != nil { @@ -1028,6 +1054,9 @@ func (c *ServerCommand) Run(args []string) int { return 1 } + config = cfg + certDir = dir + if c.flagDevListenAddr != "" { config.Listeners[0].Address = c.flagDevListenAddr } @@ -1057,11 +1086,6 @@ func (c *ServerCommand) Run(args []string) int { f.applyLogConfigOverrides(config.SharedConfig) - // Set 'trace' log level for the following 'dev' clusters - if c.flagDevThreeNode || c.flagDevFourCluster { - config.LogLevel = "trace" - } - l, err := c.configureLogging(config) if err != nil { c.UI.Error(err.Error()) @@ -1070,6 +1094,11 @@ func (c *ServerCommand) Run(args []string) int { c.logger = l c.allLoggers = append(c.allLoggers, l) + // flush logs right away if the server is started with the disable-gated-logs flag + if c.logFlags.flagDisableGatedLogs { + c.flushLog() + } + // reporting Errors found in the config for _, cErr := range configErrors { c.logger.Warn(cErr.String()) @@ -1111,14 +1140,9 @@ func (c *ServerCommand) Run(args []string) int { if envLicense := os.Getenv(EnvVaultLicense); envLicense != "" { config.License = envLicense } - if disableSSC := os.Getenv(DisableSSCTokens); disableSSC != "" { - var err error - config.DisableSSCTokens, err = strconv.ParseBool(disableSSC) - if err != nil { - c.UI.Warn(wrapAtLength("WARNING! failed to parse " + - "VAULT_DISABLE_SERVER_SIDE_CONSISTENT_TOKENS env var: " + - "setting to default value false")) - } + + if envPluginTmpdir := os.Getenv(EnvVaultPluginTmpdir); envPluginTmpdir != "" { + config.PluginTmpdir = envPluginTmpdir } if err := server.ExperimentsFromEnvAndCLI(config, EnvVaultExperiments, c.flagExperiments); err != nil { @@ -1126,6 +1150,12 @@ func (c *ServerCommand) Run(args []string) int { return 1 } + for _, experiment := range config.Experiments { + if experiments.IsUnused(experiment) { + c.UI.Warn(fmt.Sprintf("WARNING! Experiment %s is no longer used", experiment)) + } + } + // If mlockall(2) isn't supported, show a warning. We disable this in dev // because it is quite scary to see when first using Vault. We also disable // this if the user has explicitly disabled mlock in configuration. @@ -1138,13 +1168,35 @@ func (c *ServerCommand) Run(args []string) int { "in a Docker container, provide the IPC_LOCK cap to the container.")) } + // Initialize the storage backend + var backend physical.Backend + if !c.flagDev || config.Storage != nil { + backend, err = c.setupStorage(config) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + // Prevent server startup if migration is active + // TODO: Use OpenTelemetry to integrate this into Diagnose + if c.storageMigrationActive(backend) { + return 1 + } + } + + clusterName := config.ClusterName + + // Attempt to retrieve cluster name from insecure storage + if clusterName == "" { + clusterName, err = c.readClusterNameFromInsecureStorage(backend) + } + inmemMetrics, metricSink, prometheusEnabled, err := configutil.SetupTelemetry(&configutil.SetupTelemetryOpts{ Config: config.Telemetry, Ui: c.UI, ServiceName: "vault", DisplayName: "Vault", UserAgent: useragent.String(), - ClusterName: config.ClusterName, + ClusterName: clusterName, }) if err != nil { c.UI.Error(fmt.Sprintf("Error initializing telemetry: %s", err)) @@ -1152,19 +1204,6 @@ func (c *ServerCommand) Run(args []string) int { } metricsHelper := metricsutil.NewMetricsHelper(inmemMetrics, prometheusEnabled) - // Initialize the storage backend - backend, err := c.setupStorage(config) - if err != nil { - c.UI.Error(err.Error()) - return 1 - } - - // Prevent server startup if migration is active - // TODO: Use OpenTelemetry to integrate this into Diagnose - if c.storageMigrationActive(backend) { - return 1 - } - // Initialize the Service Discovery, if there is one var configSR sr.ServiceRegistration if config.ServiceRegistration != nil { @@ -1200,49 +1239,20 @@ func (c *ServerCommand) Run(args []string) int { infoKeys = append(infoKeys, expKey) } - barrierSeal, barrierWrapper, unwrapSeal, seals, sealConfigError, err := setSeal(c, config, infoKeys, info) - // Check error here - if err != nil { - c.UI.Error(err.Error()) - return 1 - } - - for _, seal := range seals { - // There is always one nil seal. We need to skip it so we don't start an empty Finalize-Seal-Shamir - // section. - if seal == nil { - continue - } - seal := seal // capture range variable - // Ensure that the seal finalizer is called, even if using verify-only - defer func(seal *vault.Seal) { - err = (*seal).Finalize(context.Background()) - if err != nil { - c.UI.Error(fmt.Sprintf("Error finalizing seals: %v", err)) - } - }(&seal) - } - - if barrierSeal == nil { - c.UI.Error("Could not create barrier seal! Most likely proper Seal configuration information was not set, but no error was generated.") - return 1 - } + ctx := context.Background() - // prepare a secure random reader for core - secureRandomReader, err := configutil.CreateSecureRandomReaderFunc(config.SharedConfig, barrierWrapper) + setSealResponse, secureRandomReader, err := c.configureSeals(ctx, config, backend, infoKeys, info) if err != nil { c.UI.Error(err.Error()) return 1 } - coreConfig := createCoreConfig(c, config, backend, configSR, barrierSeal, unwrapSeal, metricsHelper, metricSink, secureRandomReader) - if c.flagDevThreeNode { - return c.enableThreeNodeDevCluster(&coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR")) - } + c.setSealsToFinalize(setSealResponse.getCreatedSeals()) + defer func() { + c.finalizeSeals(ctx, c.sealsToFinalize) + }() - if c.flagDevFourCluster { - return enableFourClusterDev(c, &coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR")) - } + coreConfig := createCoreConfig(c, config, backend, configSR, setSealResponse.barrierSeal, setSealResponse.unwrapSeal, metricsHelper, metricSink, secureRandomReader) if allowPendingRemoval := os.Getenv(consts.EnvVaultAllowPendingRemovalMounts); allowPendingRemoval != "" { var err error @@ -1293,9 +1303,9 @@ func (c *ServerCommand) Run(args []string) int { } // Apply any enterprise configuration onto the coreConfig. - adjustCoreConfigForEnt(config, &coreConfig) + entAdjustCoreConfig(config, &coreConfig) - if !storageSupportedForEnt(&coreConfig) { + if !entCheckStorageType(&coreConfig) { c.UI.Warn("") c.UI.Warn(wrapAtLength(fmt.Sprintf("WARNING: storage configured to use %q which is not supported for Vault Enterprise, must be \"raft\" or \"consul\"", coreConfig.StorageType))) c.UI.Warn("") @@ -1398,7 +1408,7 @@ func (c *ServerCommand) Run(args []string) int { infoKeys = append(infoKeys, "go version") info["go version"] = runtime.Version() - fipsStatus := getFIPSInfoKey() + fipsStatus := entGetFIPSInfoKey() if fipsStatus != "" { infoKeys = append(infoKeys, "fips") info["fips"] = fipsStatus @@ -1415,6 +1425,9 @@ func (c *ServerCommand) Run(args []string) int { info["HCP resource ID"] = config.HCPLinkConf.Resource.ID } + infoKeys = append(infoKeys, "administrative namespace") + info["administrative namespace"] = config.AdministrativeNamespacePath + sort.Strings(infoKeys) c.UI.Output("==> Vault server configuration:\n") @@ -1437,22 +1450,23 @@ func (c *ServerCommand) Run(args []string) int { // mode if it's set core.SetClusterListenerAddrs(clusterAddrs) core.SetClusterHandler(vaulthttp.Handler.Handler(&vault.HandlerProperties{ - Core: core, + Core: core, + ListenerConfig: &configutil.Listener{}, })) // Attempt unsealing in a background goroutine. This is needed for when a // Vault cluster with multiple servers is configured with auto-unseal but is // uninitialized. Once one server initializes the storage backend, this // goroutine will pick up the unseal keys and unseal this instance. - if !core.IsInSealMigrationMode() { - go runUnseal(c, core, context.Background()) + if !core.IsInSealMigrationMode(true) { + go runUnseal(c, core, ctx) } // When the underlying storage is raft, kick off retry join if it was specified // in the configuration // TODO: Should we also support retry_join for ha_storage? if config.Storage.Type == storageTypeRaft { - if err := core.InitiateRetryJoin(context.Background()); err != nil { + if err := core.InitiateRetryJoin(ctx); err != nil { c.UI.Error(fmt.Sprintf("Failed to initiate raft retry join, %q", err.Error())) return 1 } @@ -1471,7 +1485,8 @@ func (c *ServerCommand) Run(args []string) int { } // If we're in Dev mode, then initialize the core - err = initDevCore(c, &coreConfig, config, core, certDir) + clusterJson := &testcluster.ClusterJson{} + err = initDevCore(c, &coreConfig, config, core, certDir, clusterJson) if err != nil { c.UI.Error(err.Error()) return 1 @@ -1496,8 +1511,8 @@ func (c *ServerCommand) Run(args []string) int { return 0 } - if sealConfigError != nil { - init, err := core.InitializedLocally(context.Background()) + if setSealResponse.sealConfigError != nil { + init, err := core.InitializedLocally(ctx) if err != nil { c.UI.Error(fmt.Sprintf("Error checking if core is initialized: %v", err)) return 1 @@ -1508,6 +1523,10 @@ func (c *ServerCommand) Run(args []string) int { } } + core.SetSealReloadFunc(func(ctx context.Context) error { + return c.reloadSealsOnLeaderActivation(ctx, core) + }) + // Output the header that the server has started if !c.logFlags.flagCombineLogs { c.UI.Output("==> Vault server started! Log data will stream in below:\n") @@ -1531,6 +1550,34 @@ func (c *ServerCommand) Run(args []string) int { // Notify systemd that the server is ready (if applicable) c.notifySystemd(systemd.SdNotifyReady) + if c.flagDev { + protocol := "http://" + if c.flagDevTLS { + protocol = "https://" + } + clusterJson.Nodes = []testcluster.ClusterNode{ + { + APIAddress: protocol + config.Listeners[0].Address, + }, + } + if c.flagDevTLS { + clusterJson.CACertPath = fmt.Sprintf("%s/%s", certDir, server.VaultDevCAFilename) + } + + if c.flagDevClusterJson != "" { + b, err := jsonutil.EncodeJSON(clusterJson) + if err != nil { + c.UI.Error(fmt.Sprintf("Error encoding cluster.json: %s", err)) + return 1 + } + err = os.WriteFile(c.flagDevClusterJson, b, 0o600) + if err != nil { + c.UI.Error(fmt.Sprintf("Error writing cluster.json %q: %s", c.flagDevClusterJson, err)) + return 1 + } + } + } + defer func() { if err := c.removePidFile(config.PidFile); err != nil { c.UI.Error(fmt.Sprintf("Error deleting the PID file: %s", err)) @@ -1562,22 +1609,10 @@ func (c *ServerCommand) Run(args []string) int { c.notifySystemd(systemd.SdNotifyReloading) // Check for new log level - var config *server.Config - var configErrors []configutil.ConfigError - for _, path := range c.flagConfigs { - current, err := server.LoadConfig(path) - if err != nil { - c.logger.Error("could not reload config", "path", path, "error", err) - goto RUNRELOADFUNCS - } - - configErrors = append(configErrors, current.Validate(path)...) - - if config == nil { - config = current - } else { - config = config.Merge(current) - } + config, configErrors, err := c.reloadConfigFiles() + if err != nil { + c.logger.Error("could not reload config", "error", err) + goto RUNRELOADFUNCS } // Ensure at least one config was found. @@ -1591,6 +1626,16 @@ func (c *ServerCommand) Run(args []string) int { c.logger.Warn(cErr.String()) } + // Note that seal reloading can also be triggered via Core.TriggerSealReload. + // See the call to Core.SetSealReloadFunc above. + if reloaded, err := c.reloadSealsOnSigHup(ctx, core, config); err != nil { + c.UI.Error(fmt.Errorf("error reloading seal config: %s", err).Error()) + config.Seals = core.GetCoreConfigInternal().Seals + goto RUNRELOADFUNCS + } else if !reloaded { + config.Seals = core.GetCoreConfigInternal().Seals + } + core.SetConfig(config) // reloading custom response headers to make sure we have @@ -1602,6 +1647,10 @@ func (c *ServerCommand) Run(args []string) int { // Setting log request with the new value in the config after reload core.ReloadLogRequestsLevel() + core.ReloadRequestLimiter() + + core.ReloadOverloadController() + // reloading HCP link hcpLink, err = c.reloadHCPLink(hcpLink, config, core, hcpLogger) if err != nil { @@ -1613,18 +1662,33 @@ func (c *ServerCommand) Run(args []string) int { level, err := loghelper.ParseLogLevel(config.LogLevel) if err != nil { c.logger.Error("unknown log level found on reload", "level", config.LogLevel) - goto RUNRELOADFUNCS + } else { + core.SetLogLevel(level) } - core.SetLogLevel(level) } + // notify ServiceRegistration that a configuration reload has occurred + if sr := coreConfig.GetServiceRegistration(); sr != nil { + var srConfig *map[string]string + if config.ServiceRegistration != nil { + srConfig = &config.ServiceRegistration.Config + } + sr.NotifyConfigurationReload(srConfig) + } + + if err := core.ReloadCensusManager(false); err != nil { + c.UI.Error(err.Error()) + } + + core.ReloadReplicationCanaryWriteInterval() + RUNRELOADFUNCS: if err := c.Reload(c.reloadFuncsLock, c.reloadFuncs, c.flagConfigs, core); err != nil { c.UI.Error(fmt.Sprintf("Error(s) were encountered during reload: %s", err)) } // Reload license file - if err = vault.LicenseReload(core); err != nil { + if err = core.EntReloadLicense(); err != nil { c.UI.Error(err.Error()) } @@ -1678,6 +1742,19 @@ func (c *ServerCommand) Run(args []string) int { c.logger.Info(fmt.Sprintf("Wrote stacktrace to: %s", f.Name())) f.Close() } + + // We can only get pprof outputs via the API but sometimes Vault can get + // into a state where it cannot process requests so we can get pprof outputs + // via SIGUSR2. + pprofPath := filepath.Join(os.TempDir(), "vault-pprof") + cpuProfileDuration := time.Second * 1 + err := WritePprofToFile(pprofPath, cpuProfileDuration) + if err != nil { + c.logger.Error(err.Error()) + continue + } + + c.logger.Info(fmt.Sprintf("Wrote pprof files to: %s", pprofPath)) } } // Notify systemd that the server is shutting down @@ -1704,39 +1781,120 @@ func (c *ServerCommand) Run(args []string) int { return retCode } -// configureLogging takes the configuration and attempts to parse config values into 'log' friendly configuration values -// If all goes to plan, a logger is created and setup. -func (c *ServerCommand) configureLogging(config *server.Config) (hclog.InterceptLogger, error) { - // Parse all the log related config - logLevel, err := loghelper.ParseLogLevel(config.LogLevel) - if err != nil { - return nil, err +func (c *ServerCommand) reloadConfigFiles() (*server.Config, []configutil.ConfigError, error) { + var config *server.Config + var configErrors []configutil.ConfigError + for _, path := range c.flagConfigs { + current, err := server.LoadConfig(path) + if err != nil { + return nil, nil, err + } + + configErrors = append(configErrors, current.Validate(path)...) + + if config == nil { + config = current + } else { + config = config.Merge(current) + } } - logFormat, err := loghelper.ParseLogFormat(config.LogFormat) + return config, configErrors, nil +} + +func (c *ServerCommand) configureSeals(ctx context.Context, config *server.Config, backend physical.Backend, infoKeys []string, info map[string]string) (*SetSealResponse, io.Reader, error) { + existingSealGenerationInfo, err := vault.PhysicalSealGenInfo(ctx, backend) if err != nil { - return nil, err + return nil, nil, fmt.Errorf("Error getting seal generation info: %v", err) } - logRotateDuration, err := parseutil.ParseDurationSecond(config.LogRotateDuration) + hasPartialPaths, err := hasPartiallyWrappedPaths(ctx, backend) if err != nil { - return nil, err + return nil, nil, fmt.Errorf("Cannot determine if there are partially seal wrapped entries in storage: %v", err) } - - logCfg := &loghelper.LogConfig{ - LogLevel: logLevel, - LogFormat: logFormat, - LogFilePath: config.LogFile, - LogRotateDuration: logRotateDuration, - LogRotateBytes: config.LogRotateBytes, - LogRotateMaxFiles: config.LogRotateMaxFiles, + setSealResponse, err := setSeal(c, config, infoKeys, info, existingSealGenerationInfo, hasPartialPaths) + if err != nil { + return nil, nil, err + } + if setSealResponse.sealConfigWarning != nil { + c.UI.Warn(fmt.Sprintf("Warnings during seal configuration: %v", setSealResponse.sealConfigWarning)) } - return loghelper.Setup(logCfg, c.logWriter) -} + if setSealResponse.barrierSeal == nil { + return nil, nil, errors.New("Could not create barrier seal! Most likely proper Seal configuration information was not set, but no error was generated.") + } -func (c *ServerCommand) reloadHCPLink(hcpLinkVault *hcp_link.HCPLinkVault, conf *server.Config, core *vault.Core, hcpLogger hclog.Logger) (*hcp_link.HCPLinkVault, error) { - // trigger a shutdown + // prepare a secure random reader for core + entropyAugLogger := c.logger.Named("entropy-augmentation") + var entropySources []*configutil.EntropySourcerInfo + for _, sealWrapper := range setSealResponse.barrierSeal.GetAccess().GetEnabledSealWrappersByPriority() { + if s, ok := sealWrapper.Wrapper.(entropy.Sourcer); ok { + entropySources = append(entropySources, &configutil.EntropySourcerInfo{ + Sourcer: s, + Name: sealWrapper.Name, + }) + } + } + secureRandomReader, err := configutil.CreateSecureRandomReaderFunc(config.SharedConfig, entropySources, entropyAugLogger) + if err != nil { + return nil, nil, err + } + + return setSealResponse, secureRandomReader, nil +} + +func (c *ServerCommand) setSealsToFinalize(seals []*vault.Seal) { + prev := c.sealsToFinalize + c.sealsToFinalize = seals + + c.finalizeSeals(context.Background(), prev) +} + +func (c *ServerCommand) finalizeSeals(ctx context.Context, seals []*vault.Seal) { + for _, seal := range seals { + // Ensure that the seal finalizer is called, even if using verify-only + err := (*seal).Finalize(ctx) + if err != nil { + c.UI.Error(fmt.Sprintf("Error finalizing seals: %v", err)) + } + } +} + +// configureLogging takes the configuration and attempts to parse config values into 'log' friendly configuration values +// If all goes to plan, a logger is created and setup. +func (c *ServerCommand) configureLogging(config *server.Config) (hclog.InterceptLogger, error) { + // Parse all the log related config + logLevel, err := loghelper.ParseLogLevel(config.LogLevel) + if err != nil { + return nil, err + } + + logFormat, err := loghelper.ParseLogFormat(config.LogFormat) + if err != nil { + return nil, err + } + + logRotateDuration, err := parseutil.ParseDurationSecond(config.LogRotateDuration) + if err != nil { + return nil, err + } + + logCfg, err := loghelper.NewLogConfig("vault") + if err != nil { + return nil, err + } + logCfg.LogLevel = logLevel + logCfg.LogFormat = logFormat + logCfg.LogFilePath = config.LogFile + logCfg.LogRotateDuration = logRotateDuration + logCfg.LogRotateBytes = config.LogRotateBytes + logCfg.LogRotateMaxFiles = config.LogRotateMaxFiles + + return loghelper.Setup(logCfg, c.logWriter) +} + +func (c *ServerCommand) reloadHCPLink(hcpLinkVault *hcp_link.HCPLinkVault, conf *server.Config, core *vault.Core, hcpLogger hclog.Logger) (*hcp_link.HCPLinkVault, error) { + // trigger a shutdown if hcpLinkVault != nil { err := hcpLinkVault.Shutdown() if err != nil { @@ -1780,6 +1938,7 @@ func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig barrierConfig := &vault.SealConfig{ SecretShares: 1, SecretThreshold: 1, + Name: "shamir", } if core.SealAccess().RecoveryKeySupported() { @@ -1891,228 +2050,34 @@ func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig } } - kvVer := "2" - if c.flagDevKVV1 || c.flagDevLeasedKV { - kvVer = "1" - } - req := &logical.Request{ - Operation: logical.UpdateOperation, - ClientToken: init.RootToken, - Path: "sys/mounts/secret", - Data: map[string]interface{}{ - "type": "kv", - "path": "secret/", - "description": "key/value secret storage", - "options": map[string]string{ - "version": kvVer, - }, - }, - } - resp, err := core.HandleRequest(ctx, req) - if err != nil { - return nil, fmt.Errorf("error creating default K/V store: %w", err) - } - if resp.IsError() { - return nil, fmt.Errorf("failed to create default K/V store: %w", resp.Error()) - } - - return init, nil -} - -func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info map[string]string, infoKeys []string, devListenAddress, tempDir string) int { - testCluster := vault.NewTestCluster(&testing.RuntimeT{}, base, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - BaseListenAddress: c.flagDevListenAddr, - Logger: c.logger, - TempDir: tempDir, - }) - defer c.cleanupGuard.Do(testCluster.Cleanup) - - info["cluster parameters path"] = testCluster.TempDir - infoKeys = append(infoKeys, "cluster parameters path") - - for i, core := range testCluster.Cores { - info[fmt.Sprintf("node %d api address", i)] = fmt.Sprintf("https://%s", core.Listeners[0].Address.String()) - infoKeys = append(infoKeys, fmt.Sprintf("node %d api address", i)) - } - - infoKeys = append(infoKeys, "version") - verInfo := version.GetVersion() - info["version"] = verInfo.FullVersionNumber(false) - if verInfo.Revision != "" { - info["version sha"] = strings.Trim(verInfo.Revision, "'") - infoKeys = append(infoKeys, "version sha") - } - - infoKeys = append(infoKeys, "cgo") - info["cgo"] = "disabled" - if version.CgoEnabled { - info["cgo"] = "enabled" - } - - infoKeys = append(infoKeys, "go version") - info["go version"] = runtime.Version() - - fipsStatus := getFIPSInfoKey() - if fipsStatus != "" { - infoKeys = append(infoKeys, "fips") - info["fips"] = fipsStatus - } - - // Server configuration output - padding := 24 - - sort.Strings(infoKeys) - c.UI.Output("==> Vault server configuration:\n") - - for _, k := range infoKeys { - c.UI.Output(fmt.Sprintf( - "%s%s: %s", - strings.Repeat(" ", padding-len(k)), - strings.Title(k), - info[k])) - } - - c.UI.Output("") - - for _, core := range testCluster.Cores { - core.Server.Handler = vaulthttp.Handler.Handler(&vault.HandlerProperties{ - Core: core.Core, - }) - core.SetClusterHandler(core.Server.Handler) - } - - testCluster.Start() - - ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace) - - if base.DevToken != "" { + if !c.flagDevNoKV { + kvVer := "2" + if c.flagDevKVV1 || c.flagDevLeasedKV { + kvVer = "1" + } req := &logical.Request{ - ID: "dev-gen-root", Operation: logical.UpdateOperation, - ClientToken: testCluster.RootToken, - Path: "auth/token/create", + ClientToken: init.RootToken, + Path: "sys/mounts/secret", Data: map[string]interface{}{ - "id": base.DevToken, - "policies": []string{"root"}, - "no_parent": true, - "no_default_policy": true, + "type": "kv", + "path": "secret/", + "description": "key/value secret storage", + "options": map[string]string{ + "version": kvVer, + }, }, } - resp, err := testCluster.Cores[0].HandleRequest(ctx, req) + resp, err := core.HandleRequest(ctx, req) if err != nil { - c.UI.Error(fmt.Sprintf("failed to create root token with ID %s: %s", base.DevToken, err)) - return 1 - } - if resp == nil { - c.UI.Error(fmt.Sprintf("nil response when creating root token with ID %s", base.DevToken)) - return 1 + return nil, fmt.Errorf("error creating default KV store: %w", err) } - if resp.Auth == nil { - c.UI.Error(fmt.Sprintf("nil auth when creating root token with ID %s", base.DevToken)) - return 1 - } - - testCluster.RootToken = resp.Auth.ClientToken - - req.ID = "dev-revoke-init-root" - req.Path = "auth/token/revoke-self" - req.Data = nil - _, err = testCluster.Cores[0].HandleRequest(ctx, req) - if err != nil { - c.UI.Output(fmt.Sprintf("failed to revoke initial root token: %s", err)) - return 1 + if resp.IsError() { + return nil, fmt.Errorf("failed to create default KV store: %w", resp.Error()) } } - // Set the token - tokenHelper, err := c.TokenHelper() - if err != nil { - c.UI.Error(fmt.Sprintf("Error getting token helper: %s", err)) - return 1 - } - if err := tokenHelper.Store(testCluster.RootToken); err != nil { - c.UI.Error(fmt.Sprintf("Error storing in token helper: %s", err)) - return 1 - } - - if err := ioutil.WriteFile(filepath.Join(testCluster.TempDir, "root_token"), []byte(testCluster.RootToken), 0o600); err != nil { - c.UI.Error(fmt.Sprintf("Error writing token to tempfile: %s", err)) - return 1 - } - - c.UI.Output(fmt.Sprintf( - "==> Three node dev mode is enabled\n\n" + - "The unseal key and root token are reproduced below in case you\n" + - "want to seal/unseal the Vault or play with authentication.\n", - )) - - for i, key := range testCluster.BarrierKeys { - c.UI.Output(fmt.Sprintf( - "Unseal Key %d: %s", - i+1, base64.StdEncoding.EncodeToString(key), - )) - } - - c.UI.Output(fmt.Sprintf( - "\nRoot Token: %s\n", testCluster.RootToken, - )) - - c.UI.Output(fmt.Sprintf( - "\nUseful env vars:\n"+ - "VAULT_TOKEN=%s\n"+ - "VAULT_ADDR=%s\n"+ - "VAULT_CACERT=%s/ca_cert.pem\n", - testCluster.RootToken, - testCluster.Cores[0].Client.Address(), - testCluster.TempDir, - )) - - // Output the header that the server has started - c.UI.Output("==> Vault server started! Log data will stream in below:\n") - - // Inform any tests that the server is ready - select { - case c.startedCh <- struct{}{}: - default: - } - - // Release the log gate. - c.flushLog() - - // Wait for shutdown - shutdownTriggered := false - - for !shutdownTriggered { - select { - case <-c.ShutdownCh: - c.UI.Output("==> Vault shutdown triggered") - - // Stop the listeners so that we don't process further client requests. - c.cleanupGuard.Do(testCluster.Cleanup) - - // Finalize will wait until after Vault is sealed, which means the - // request forwarding listeners will also be closed (and also - // waited for). - for _, core := range testCluster.Cores { - if err := core.Shutdown(); err != nil { - c.UI.Error(fmt.Sprintf("Error with core shutdown: %s", err)) - } - } - - shutdownTriggered = true - - case <-c.SighupCh: - c.UI.Output("==> Vault reload triggered") - for _, core := range testCluster.Cores { - if err := c.Reload(core.ReloadFuncsLock, core.ReloadFuncs, nil, core.Core); err != nil { - c.UI.Error(fmt.Sprintf("Error(s) were encountered during reload: %s", err)) - } - } - } - } - - return 0 + return init, nil } // addPlugin adds any plugins to the catalog @@ -2353,86 +2318,315 @@ func CheckStorageMigration(b physical.Backend) (*StorageMigrationStatus, error) return &status, nil } -// setSeal return barrierSeal, barrierWrapper, unwrapSeal, and all the created seals from the configs so we can close them in Run -// The two errors are the sealConfigError and the regular error -func setSeal(c *ServerCommand, config *server.Config, infoKeys []string, info map[string]string) (vault.Seal, wrapping.Wrapper, vault.Seal, []vault.Seal, error, error) { - var barrierSeal vault.Seal - var unwrapSeal vault.Seal +type SetSealResponse struct { + barrierSeal vault.Seal + unwrapSeal vault.Seal - var sealConfigError error - var wrapper wrapping.Wrapper - var barrierWrapper wrapping.Wrapper + // sealConfigError is present if there was an error configuring wrappers, other than KeyNotFound. + sealConfigError error + sealConfigWarning error + hasPartiallyWrappedPaths bool +} + +func (r *SetSealResponse) getCreatedSeals() []*vault.Seal { + var ret []*vault.Seal + if r.barrierSeal != nil { + ret = append(ret, &r.barrierSeal) + } + if r.unwrapSeal != nil { + ret = append(ret, &r.unwrapSeal) + } + return ret +} + +// setSeal return barrierSeal, barrierWrapper, unwrapSeal, all the created seals, and all the provided seals from the configs so we can close them in Run +// The two errors are the sealConfigError and the regular error +func setSeal(c *ServerCommand, config *server.Config, infoKeys []string, info map[string]string, existingSealGenerationInfo *vaultseal.SealGenerationInfo, hasPartiallyWrappedPaths bool) (*SetSealResponse, error) { if c.flagDevAutoSeal { - var err error - barrierSeal, err = vault.NewAutoSeal(vaultseal.NewTestSeal(nil)) - if err != nil { - return nil, nil, nil, nil, nil, err - } - return barrierSeal, nil, nil, nil, nil, nil + access, _ := vaultseal.NewTestSeal(nil) + barrierSeal := vault.NewAutoSeal(access) + + return &SetSealResponse{barrierSeal: barrierSeal}, nil } // Handle the case where no seal is provided switch len(config.Seals) { case 0: - config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.WrapperTypeShamir.String()}) - case 1: - // If there's only one seal and it's disabled assume they want to + config.Seals = append(config.Seals, &configutil.KMS{ + Type: vault.SealConfigTypeShamir.String(), + Priority: 1, + Name: "shamir", + }) + default: + allSealsDisabled := true + for _, c := range config.Seals { + if !c.Disabled { + allSealsDisabled = false + } else if c.Type == vault.SealConfigTypeShamir.String() { + return nil, errors.New("shamir seals cannot be set disabled (they should simply not be set)") + } + } + // If all seals are disabled assume they want to // migrate to a shamir seal and simply didn't provide it - if config.Seals[0].Disabled { - config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.WrapperTypeShamir.String()}) + if allSealsDisabled { + config.Seals = append(config.Seals, &configutil.KMS{ + Type: vault.SealConfigTypeShamir.String(), + Priority: 1, + Name: "shamir", + }) } } - var createdSeals []vault.Seal = make([]vault.Seal, len(config.Seals)) + + var sealConfigError error + var sealConfigWarning error + recordSealConfigError := func(err error) { + sealConfigError = errors.Join(sealConfigError, err) + } + recordSealConfigWarning := func(err error) { + sealConfigWarning = errors.Join(sealConfigWarning, err) + } + enabledSealWrappers := make([]*vaultseal.SealWrapper, 0) + disabledSealWrappers := make([]*vaultseal.SealWrapper, 0) + allSealKmsConfigs := make([]*configutil.KMS, 0) + + type infoKeysAndMap struct { + keys []string + theMap map[string]string + } + sealWrapperInfoKeysMap := make(map[string]infoKeysAndMap) + + configuredSeals := 0 for _, configSeal := range config.Seals { - sealType := wrapping.WrapperTypeShamir.String() - if !configSeal.Disabled && os.Getenv("VAULT_SEAL_TYPE") != "" { - sealType = os.Getenv("VAULT_SEAL_TYPE") + sealTypeEnvVarName := "VAULT_SEAL_TYPE" + if configSeal.Priority > 1 { + sealTypeEnvVarName = sealTypeEnvVarName + "_" + configSeal.Name + } + + if !configSeal.Disabled && os.Getenv(sealTypeEnvVarName) != "" { + sealType := os.Getenv(sealTypeEnvVarName) configSeal.Type = sealType - } else { - sealType = configSeal.Type } - var seal vault.Seal - sealLogger := c.logger.ResetNamed(fmt.Sprintf("seal.%s", sealType)) + sealLogger := c.logger.ResetNamed(fmt.Sprintf("seal.%s", configSeal.Type)) c.allLoggers = append(c.allLoggers, sealLogger) - defaultSeal := vault.NewDefaultSeal(&vaultseal.Access{ - Wrapper: aeadwrapper.NewShamirWrapper(), - }) - var sealInfoKeys []string - sealInfoMap := map[string]string{} - wrapper, sealConfigError = configutil.ConfigureWrapper(configSeal, &sealInfoKeys, &sealInfoMap, sealLogger) - if sealConfigError != nil { - if !errwrap.ContainsType(sealConfigError, new(logical.KeyNotFoundError)) { - return barrierSeal, barrierWrapper, unwrapSeal, createdSeals, sealConfigError, fmt.Errorf( - "Error parsing Seal configuration: %s", sealConfigError) + + allSealKmsConfigs = append(allSealKmsConfigs, configSeal) + var wrapperInfoKeys []string + wrapperInfoMap := map[string]string{} + wrapper, wrapperConfigError := configutil.ConfigureWrapper(configSeal, &wrapperInfoKeys, &wrapperInfoMap, sealLogger) + if wrapperConfigError == nil { + // for some reason configureWrapper in kms.go returns nil wrapper and nil error for wrapping.WrapperTypeShamir + if wrapper == nil { + wrapper = aeadwrapper.NewShamirWrapper() } - } - if wrapper == nil { - seal = defaultSeal + configuredSeals++ + } else if config.IsMultisealEnabled() { + recordSealConfigWarning(fmt.Errorf("error configuring seal: %v", wrapperConfigError)) } else { - var err error - seal, err = vault.NewAutoSeal(&vaultseal.Access{ - Wrapper: wrapper, - }) - if err != nil { - return nil, nil, nil, nil, nil, err + // It seems that we are checking for this particular error here is to distinguish between a + // mis-configured seal vs one that fails for another reason. Apparently the only other reason is + // a key not found error. It seems the intention is for the key not found error to be returned + // as a seal specific error later + if !errwrap.ContainsType(wrapperConfigError, new(logical.KeyNotFoundError)) { + return nil, fmt.Errorf("error parsing Seal configuration: %s", wrapperConfigError) + } else { + sealLogger.Error("error configuring seal", "name", configSeal.Name, "err", wrapperConfigError) + recordSealConfigError(wrapperConfigError) } } - infoPrefix := "" + + sealWrapper := vaultseal.NewSealWrapper( + wrapper, + configSeal.Priority, + configSeal.Name, + configSeal.Type, + configSeal.Disabled, + wrapperConfigError == nil, + ) + if configSeal.Disabled { - unwrapSeal = seal - infoPrefix = "Old " + disabledSealWrappers = append(disabledSealWrappers, sealWrapper) } else { - barrierSeal = seal - barrierWrapper = wrapper + enabledSealWrappers = append(enabledSealWrappers, sealWrapper) + } + + sealWrapperInfoKeysMap[sealWrapper.Name] = infoKeysAndMap{ + keys: wrapperInfoKeys, + theMap: wrapperInfoMap, + } + } + + if len(enabledSealWrappers) == 0 && len(disabledSealWrappers) == 0 && sealConfigWarning != nil { + // All of them errored out, so warnings are now errors + recordSealConfigError(sealConfigWarning) + sealConfigWarning = nil + } + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // Set the info keys, this modifies the function arguments `info` and `infoKeys` + // TODO(SEALHA): Why are we doing this? What is its use? + appendWrapperInfoKeys := func(prefix string, sealWrappers []*vaultseal.SealWrapper) { + if len(sealWrappers) == 0 { + return + } + useName := false + if len(sealWrappers) > 1 { + useName = true + } + for _, sealWrapper := range sealWrappers { + if useName { + prefix = fmt.Sprintf("%s %s ", prefix, sealWrapper.Name) + } + for _, k := range sealWrapperInfoKeysMap[sealWrapper.Name].keys { + infoKeys = append(infoKeys, prefix+k) + info[prefix+k] = sealWrapperInfoKeysMap[sealWrapper.Name].theMap[k] + } + } + } + appendWrapperInfoKeys("", enabledSealWrappers) + appendWrapperInfoKeys("Old", disabledSealWrappers) + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // Compute seal generation + sealGenerationInfo, err := c.computeSealGenerationInfo(existingSealGenerationInfo, allSealKmsConfigs, hasPartiallyWrappedPaths, config.IsMultisealEnabled()) + if err != nil { + return nil, err + } + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // Create the Seals + + containsShamir := func(sealWrappers []*vaultseal.SealWrapper) bool { + for _, si := range sealWrappers { + if vault.SealConfigTypeShamir.IsSameAs(si.SealConfigType) { + return true + } + } + return false + } + + var barrierSeal vault.Seal + var unwrapSeal vault.Seal + + sealLogger := c.logger + switch { + case len(enabledSealWrappers) == 0: + return nil, errors.Join(sealConfigWarning, errors.New("no enabled Seals in configuration")) + case configuredSeals == 0: + return nil, errors.Join(sealConfigWarning, errors.New("no seals were successfully initialized")) + case len(enabledSealWrappers) == 1 && containsShamir(enabledSealWrappers): + // The barrier seal is Shamir. If there are any disabled seals, then we put them all in the same + // autoSeal. + a, err := vaultseal.NewAccess(sealLogger, sealGenerationInfo, enabledSealWrappers) + if err != nil { + return nil, err + } + barrierSeal = vault.NewDefaultSeal(a) + if len(disabledSealWrappers) > 0 { + a, err = vaultseal.NewAccess(sealLogger, sealGenerationInfo, disabledSealWrappers) + if err != nil { + return nil, err + } + unwrapSeal = vault.NewAutoSeal(a) + } else if sealGenerationInfo.Generation == 1 { + // First generation, and shamir, with no disabled wrapperrs, so there can be no wrapped values + sealGenerationInfo.SetRewrapped(true) + } + + case len(disabledSealWrappers) == 1 && containsShamir(disabledSealWrappers): + // The unwrap seal is Shamir, we are migrating to an autoSeal. + a, err := vaultseal.NewAccess(sealLogger, sealGenerationInfo, enabledSealWrappers) + if err != nil { + return nil, err + } + barrierSeal = vault.NewAutoSeal(a) + a, err = vaultseal.NewAccess(sealLogger, sealGenerationInfo, disabledSealWrappers) + if err != nil { + return nil, err + } + unwrapSeal = vault.NewDefaultSeal(a) + + case config.IsMultisealEnabled(): + // We know we are not using Shamir seal, that we are not migrating away from one, and multi seal is supported, + // so just put enabled and disabled wrappers on the same seal Access + allSealWrappers := append(enabledSealWrappers, disabledSealWrappers...) + a, err := vaultseal.NewAccess(sealLogger, sealGenerationInfo, allSealWrappers) + if err != nil { + return nil, err + } + barrierSeal = vault.NewAutoSeal(a) + if configuredSeals < len(enabledSealWrappers) { + c.UI.Warn("WARNING: running with fewer than all configured seals during unseal. Will not be fully highly available until errors are corrected and Vault restarted.") + } + case len(enabledSealWrappers) == 1: + // We may have multiple seals disabled, but we know Shamir is not one of them. + a, err := vaultseal.NewAccess(sealLogger, sealGenerationInfo, enabledSealWrappers) + if err != nil { + return nil, err + } + barrierSeal = vault.NewAutoSeal(a) + if len(disabledSealWrappers) > 0 { + a, err = vaultseal.NewAccess(sealLogger, sealGenerationInfo, disabledSealWrappers) + if err != nil { + return nil, err + } + unwrapSeal = vault.NewAutoSeal(a) + } + + default: + // We know there are multiple enabled seals but multi seal is not supported. + return nil, errors.Join(sealConfigWarning, errors.New("error: more than one enabled seal found")) + } + + return &SetSealResponse{ + barrierSeal: barrierSeal, + unwrapSeal: unwrapSeal, + sealConfigError: sealConfigError, + sealConfigWarning: sealConfigWarning, + hasPartiallyWrappedPaths: hasPartiallyWrappedPaths, + }, nil +} + +func (c *ServerCommand) computeSealGenerationInfo(existingSealGenInfo *vaultseal.SealGenerationInfo, sealConfigs []*configutil.KMS, hasPartiallyWrappedPaths, multisealEnabled bool) (*vaultseal.SealGenerationInfo, error) { + generation := uint64(1) + + if existingSealGenInfo != nil { + // This forces a seal re-wrap on all seal related config changes, as we can't + // be sure what effect the config change might do. This is purposefully different + // from within the Validate call below that just matches on seal configs based + // on name/type. + if cmp.Equal(existingSealGenInfo.Seals, sealConfigs) { + return existingSealGenInfo, nil } - for _, k := range sealInfoKeys { - infoKeys = append(infoKeys, infoPrefix+k) - info[infoPrefix+k] = sealInfoMap[k] + generation = existingSealGenInfo.Generation + 1 + } + c.logger.Info("incrementing seal generation", "generation", generation) + + // If the stored copy doesn't match the current configuration, we introduce a new generation + // which keeps track if a rewrap of all CSPs and seal wrapped values has completed (initially false). + newSealGenInfo := &vaultseal.SealGenerationInfo{ + Generation: generation, + Seals: sealConfigs, + Enabled: multisealEnabled, + } + + if multisealEnabled || (existingSealGenInfo != nil && existingSealGenInfo.Enabled) { + err := newSealGenInfo.Validate(existingSealGenInfo, hasPartiallyWrappedPaths) + if err != nil { + return nil, err } - createdSeals = append(createdSeals, seal) } - return barrierSeal, barrierWrapper, unwrapSeal, createdSeals, sealConfigError, nil + + return newSealGenInfo, nil +} + +func hasPartiallyWrappedPaths(ctx context.Context, backend physical.Backend) (bool, error) { + paths, err := vault.GetPartiallySealWrappedPaths(ctx, backend) + if err != nil { + return false, err + } + + return len(paths) > 0, nil } func initHaBackend(c *ServerCommand, config *server.Config, coreConfig *vault.CoreConfig, backend physical.Backend) (bool, error) { @@ -2638,8 +2832,10 @@ func createCoreConfig(c *ServerCommand, config *server.Config, backend physical. AuditBackends: c.AuditBackends, CredentialBackends: c.CredentialBackends, LogicalBackends: c.LogicalBackends, + LogLevel: config.LogLevel, Logger: c.logger, DetectDeadlocks: config.DetectDeadlocks, + ImpreciseLeaseRoleTracking: config.ImpreciseLeaseRoleTracking, DisableSentinelTrace: config.DisableSentinelTrace, DisableCache: config.DisableCache, DisableMlock: config.DisableMlock, @@ -2648,6 +2844,7 @@ func createCoreConfig(c *ServerCommand, config *server.Config, backend physical. ClusterName: config.ClusterName, CacheSize: config.CacheSize, PluginDirectory: config.PluginDirectory, + PluginTmpdir: config.PluginTmpdir, PluginFileUid: config.PluginFileUid, PluginFilePermissions: config.PluginFilePermissions, EnableUI: config.EnableUI, @@ -2668,6 +2865,7 @@ func createCoreConfig(c *ServerCommand, config *server.Config, backend physical. LicensePath: config.LicensePath, DisableSSCTokens: config.DisableSSCTokens, Experiments: config.Experiments, + AdministrativeNamespacePath: config.AdministrativeNamespacePath, } if c.flagDev { @@ -2701,7 +2899,7 @@ func runListeners(c *ServerCommand, coreConfig *vault.CoreConfig, config *server return nil } -func initDevCore(c *ServerCommand, coreConfig *vault.CoreConfig, config *server.Config, core *vault.Core, certDir string) error { +func initDevCore(c *ServerCommand, coreConfig *vault.CoreConfig, config *server.Config, core *vault.Core, certDir string, clusterJSON *testcluster.ClusterJson) error { if c.flagDev && !c.flagDevSkipInit { init, err := c.enableDev(core, coreConfig) @@ -2709,6 +2907,10 @@ func initDevCore(c *ServerCommand, coreConfig *vault.CoreConfig, config *server. return fmt.Errorf("Error initializing Dev mode: %s", err) } + if clusterJSON != nil { + clusterJSON.RootToken = init.RootToken + } + var plugins, pluginsNotLoaded []string if c.flagDevPluginDir != "" && c.flagDevPluginInit { @@ -2726,7 +2928,7 @@ func initDevCore(c *ServerCommand, coreConfig *vault.CoreConfig, config *server. for _, name := range list { path := filepath.Join(f.Name(), name) if err := c.addPlugin(path, init.RootToken, core); err != nil { - if !errwrap.Contains(err, vault.ErrPluginBadType.Error()) { + if !errwrap.Contains(err, plugincatalog.ErrPluginBadType.Error()) { return fmt.Errorf("Error enabling plugin %s: %s", name, err) } pluginsNotLoaded = append(pluginsNotLoaded, name) @@ -2837,7 +3039,7 @@ func initDevCore(c *ServerCommand, coreConfig *vault.CoreConfig, config *server. func startHttpServers(c *ServerCommand, core *vault.Core, config *server.Config, lns []listenerutil.Listener) error { for _, ln := range lns { if ln.Config == nil { - return fmt.Errorf("Found nil listener config after parsing") + return fmt.Errorf("found nil listener config after parsing") } if err := config2.IsValidListener(ln.Config); err != nil { @@ -2888,6 +3090,187 @@ func startHttpServers(c *ServerCommand, core *vault.Core, config *server.Config, return nil } +// reloadSealsOnLeaderActivation checks to see if the in-memory seal generation info is stale, and if so, +// reloads the seal configuration. +func (c *ServerCommand) reloadSealsOnLeaderActivation(ctx context.Context, core *vault.Core) error { + existingSealGenerationInfo, err := vault.PhysicalSealGenInfo(ctx, core.PhysicalAccess()) + if err != nil { + return fmt.Errorf("error checking for stale seal generation info: %w", err) + } + if existingSealGenerationInfo == nil { + c.logger.Debug("not reloading seals config since there is no seal generation info in storage") + return nil + } + + currentSealGenerationInfo := core.SealAccess().GetAccess().GetSealGenerationInfo() + if currentSealGenerationInfo == nil { + c.logger.Debug("not reloading seal config since there is no current generation info (the seal has not been initialized)") + return nil + } + if currentSealGenerationInfo.Generation >= existingSealGenerationInfo.Generation { + c.logger.Debug("seal generation info is up to date, not reloading seal configuration") + return nil + } + + // Reload seal configuration + + config, _, err := c.reloadConfigFiles() + if err != nil { + return fmt.Errorf("error reading configuration files while reloading seal configuration: %w", err) + } + if config == nil { + return errors.New("no configuration files found while reloading seal configuration") + } + reloaded, err := c.reloadSeals(ctx, false, core, config) + if reloaded { + core.SetConfig(config) + } + return err +} + +// reloadSealsOnSigHup will reload seal configurtion as a result of receiving a SIGHUP signal. +func (c *ServerCommand) reloadSealsOnSigHup(ctx context.Context, core *vault.Core, config *server.Config) (bool, error) { + return c.reloadSeals(ctx, true, core, config) +} + +// reloadSeals reloads configuration files and determines whether it needs to re-create the Seal.Access() objects. +// This function needs do detect that core.SealAccess() is no longer using the seal Wrapper that is specified +// in the seal configuration files. +// This function returns true if the newConfig was used to re-create the Seal.Access() objects. In other words, +// if false is returned, there were no changes done to the seals. +func (c *ServerCommand) reloadSeals(ctx context.Context, grabStateLock bool, core *vault.Core, newConfig *server.Config) (bool, error) { + if core.IsInSealMigrationMode(grabStateLock) { + c.logger.Debug("not reloading seal configuration since Vault is in migration mode") + return false, nil + } + + currentConfig := core.GetCoreConfigInternal() + + // We only want to reload if multiseal is currently enabled, or it is being enabled + if !(currentConfig.IsMultisealEnabled() || newConfig.IsMultisealEnabled()) { + c.logger.Debug("not reloading seal configuration since enable_multiseal is not set, nor is it being disabled") + return false, nil + } + + if conf, err := core.PhysicalBarrierSealConfig(ctx); err != nil { + return false, fmt.Errorf("error reading barrier seal configuration from storage while reloading seals: %w", err) + } else if conf == nil { + c.logger.Debug("not reloading seal configuration since there is no barrier config in storage (the seal has not been initialized)") + return false, nil + } + + if core.SealAccess().BarrierSealConfigType() == vault.SealConfigTypeShamir { + switch { + case len(newConfig.Seals) == 0: + // We are fine, our ServerCommand.reloadConfigFiles() does not do the "automagic" creation + // of the Shamir seal configuration. + c.logger.Debug("not reloading seal configuration since the new one has no seal stanzas") + return false, nil + + case len(newConfig.Seals) == 1 && newConfig.Seals[0].Disabled: + // If we have only one seal and it is disabled, it means that the newConfig wants to migrate + // to Shamir, which is not supported by seal reloading. + c.logger.Debug("not reloading seal configuration since the new one specifies migration to Shamir") + return false, nil + + case len(newConfig.Seals) == 1 && newConfig.Seals[0].Type == vault.SealConfigTypeShamir.String(): + // Having a single Shamir seal in newConfig is not really possible, since a Shamir seal + // is specified in configuration by *not* having a seal stanza. If we were to hit this + // case, though, it is equivalent to trying to migrate to Shamir, which is not supported + // by seal reloading. + c.logger.Debug("not reloading seal configuration since the new one has single Shamir stanza") + return false, nil + } + } + + // Verify that the new config we picked up is not trying to migrate from autoseal to shamir + if len(newConfig.Seals) == 1 && newConfig.Seals[0].Disabled { + // If we get here, it means the node was not started in migration mode, but the new config says + // we should go into migration mode. This case should be caught by the core.IsInSealMigrationMode() + // above. + + return false, errors.New("not reloading seal configuration: moving from autoseal to shamir requires seal migration") + } + + // Verify that the new config we picked up is not trying to migrate shamir to autoseal + if core.SealAccess().BarrierSealConfigType() == vault.SealConfigTypeShamir { + return false, errors.New("not reloading seal configuration: moving from Shamir to autoseal requires seal migration") + } + + infoKeysReload := make([]string, 0) + infoReload := make(map[string]string) + + core.SetMultisealEnabled(newConfig.IsMultisealEnabled()) + setSealResponse, secureRandomReader, err := c.configureSeals(ctx, newConfig, core.PhysicalAccess(), infoKeysReload, infoReload) + if err != nil { + return false, fmt.Errorf("error reloading seal configuration: %w", err) + } + if setSealResponse.sealConfigError != nil { + return false, fmt.Errorf("error reloading seal configuration: %w", setSealResponse.sealConfigError) + } + + newGen := setSealResponse.barrierSeal.GetAccess().GetSealGenerationInfo() + + var standby, perf bool + if grabStateLock { + // If grabStateLock is false we know we are on a leader activation + standby, perf = core.StandbyStates() + } + switch { + case !perf && !standby: + c.logger.Debug("persisting reloaded seals as we are the active node") + err = core.SetSeals(ctx, grabStateLock, setSealResponse.barrierSeal, secureRandomReader, !newGen.IsRewrapped() || setSealResponse.hasPartiallyWrappedPaths) + if err != nil { + return false, fmt.Errorf("error setting seal: %s", err) + } + + if err := core.SetPhysicalSealGenInfo(ctx, newGen); err != nil { + c.logger.Warn("could not update seal information in storage", "err", err) + } + case perf: + c.logger.Debug("updating reloaded seals in memory on perf standby") + err = core.SetSealsOnPerfStandby(ctx, grabStateLock, setSealResponse.barrierSeal, secureRandomReader) + if err != nil { + return false, fmt.Errorf("error setting seal on perf standby: %s", err) + } + default: + return false, errors.New("skipping seal reload as we are a standby") + } + + // finalize the old seals and set the new seals as the current ones + c.setSealsToFinalize(setSealResponse.getCreatedSeals()) + + c.logger.Debug("seal configuration reloaded successfully") + + return true, nil +} + +// Attempt to read the cluster name from the insecure storage. +func (c *ServerCommand) readClusterNameFromInsecureStorage(b physical.Backend) (string, error) { + ctx := context.Background() + entry, err := b.Get(ctx, "core/cluster/local/name") + if err != nil { + return "", err + } + + var result map[string]interface{} + // Decode JSON data into the map + + if entry != nil { + if err := jsonutil.DecodeJSON(entry.Value, &result); err != nil { + return "", fmt.Errorf("failed to decode JSON data: %w", err) + } + } + + // Retrieve the value of the "name" field from the map + name, ok := result["name"].(string) + if !ok { + return "", fmt.Errorf("failed to extract name field from decoded JSON") + } + + return name, nil +} + func SetStorageMigration(b physical.Backend, active bool) error { if !active { return b.Delete(context.Background(), storageMigrationLock) diff --git a/command/server/config.go b/command/server/config.go index ebeb7753386d..9d31ed67b31e 100644 --- a/command/server/config.go +++ b/command/server/config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package server @@ -24,6 +24,8 @@ import ( "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/helper/testcluster" + "github.com/mitchellh/mapstructure" ) const ( @@ -32,14 +34,8 @@ const ( VaultDevKeyFilename = "vault-key.pem" ) -var ( - entConfigValidate = func(_ *Config, _ string) []configutil.ConfigError { - return nil - } - - // Modified internally for testing. - validExperiments = experiments.ValidExperiments() -) +// Modified internally for testing. +var validExperiments = experiments.ValidExperiments() // Config is the configuration for the vault server. type Config struct { @@ -73,6 +69,7 @@ type Config struct { ClusterCipherSuites string `hcl:"cluster_cipher_suites"` PluginDirectory string `hcl:"plugin_directory"` + PluginTmpdir string `hcl:"plugin_tmpdir"` PluginFileUid int `hcl:"plugin_file_uid"` @@ -110,6 +107,8 @@ type Config struct { DetectDeadlocks string `hcl:"detect_deadlocks"` + ImpreciseLeaseRoleTracking bool `hcl:"imprecise_lease_role_tracking"` + EnableResponseHeaderRaftNodeID bool `hcl:"-"` EnableResponseHeaderRaftNodeIDRaw interface{} `hcl:"enable_response_header_raft_node_id"` @@ -133,14 +132,10 @@ func (c *Config) Validate(sourceFilePath string) []configutil.ConfigError { for _, l := range c.Listeners { results = append(results, l.Validate(sourceFilePath)...) } - results = append(results, c.validateEnt(sourceFilePath)...) + results = append(results, entValidateConfig(c, sourceFilePath)...) return results } -func (c *Config) validateEnt(sourceFilePath string) []configutil.ConfigError { - return entConfigValidate(c, sourceFilePath) -} - // DevConfig is a Config that is used for dev mode of Vault. func DevConfig(storageType string) (*Config, error) { hclStr := ` @@ -174,13 +169,13 @@ ui = true } // DevTLSConfig is a Config that is used for dev tls mode of Vault. -func DevTLSConfig(storageType, certDir string) (*Config, error) { +func DevTLSConfig(storageType, certDir string, extraSANs []string) (*Config, error) { ca, err := GenerateCA() if err != nil { return nil, err } - cert, key, err := GenerateCert(ca.Template, ca.Signer) + cert, key, err := generateCert(ca.Template, ca.Signer, extraSANs) if err != nil { return nil, err } @@ -196,7 +191,10 @@ func DevTLSConfig(storageType, certDir string) (*Config, error) { if err := os.WriteFile(fmt.Sprintf("%s/%s", certDir, VaultDevKeyFilename), []byte(key), 0o400); err != nil { return nil, err } + return parseDevTLSConfig(storageType, certDir) +} +func parseDevTLSConfig(storageType, certDir string) (*Config, error) { hclStr := ` disable_mlock = true @@ -219,8 +217,8 @@ storage "%s" { ui = true ` - - hclStr = fmt.Sprintf(hclStr, certDir, certDir, storageType) + certDirEscaped := strings.Replace(certDir, "\\", "\\\\", -1) + hclStr = fmt.Sprintf(hclStr, certDirEscaped, certDirEscaped, storageType) parsed, err := ParseConfig(hclStr, "") if err != nil { return nil, err @@ -366,6 +364,11 @@ func (c *Config) Merge(c2 *Config) *Config { result.PluginDirectory = c2.PluginDirectory } + result.PluginTmpdir = c.PluginTmpdir + if c2.PluginTmpdir != "" { + result.PluginTmpdir = c2.PluginTmpdir + } + result.PluginFileUid = c.PluginFileUid if c2.PluginFileUid != 0 { result.PluginFileUid = c2.PluginFileUid @@ -407,6 +410,11 @@ func (c *Config) Merge(c2 *Config) *Config { result.DetectDeadlocks = c2.DetectDeadlocks } + result.ImpreciseLeaseRoleTracking = c.ImpreciseLeaseRoleTracking + if c2.ImpreciseLeaseRoleTracking { + result.ImpreciseLeaseRoleTracking = c2.ImpreciseLeaseRoleTracking + } + result.EnableResponseHeaderRaftNodeID = c.EnableResponseHeaderRaftNodeID if c2.EnableResponseHeaderRaftNodeID { result.EnableResponseHeaderRaftNodeID = c2.EnableResponseHeaderRaftNodeID @@ -442,6 +450,11 @@ func (c *Config) Merge(c2 *Config) *Config { } } + result.AdministrativeNamespacePath = c.AdministrativeNamespacePath + if c2.AdministrativeNamespacePath != "" { + result.AdministrativeNamespacePath = c2.AdministrativeNamespacePath + } + result.entConfig = c.entConfig.Merge(c2.entConfig) result.Experiments = mergeExperiments(c.Experiments, c2.Experiments) @@ -489,13 +502,21 @@ func CheckConfig(c *Config, e error) (*Config, error) { return c, e } - if len(c.Seals) == 2 { - switch { - case c.Seals[0].Disabled && c.Seals[1].Disabled: - return nil, errors.New("seals: two seals provided but both are disabled") - case !c.Seals[0].Disabled && !c.Seals[1].Disabled: - return nil, errors.New("seals: two seals provided but neither is disabled") + if err := c.checkSealConfig(); err != nil { + return nil, err + } + + sealMap := make(map[string]*configutil.KMS) + for _, seal := range c.Seals { + if seal.Name == "" { + return nil, errors.New("seals: seal name is empty") } + + if _, ok := sealMap[seal.Name]; ok { + return nil, errors.New("seals: seal names must be unique") + } + + sealMap[seal.Name] = seal } return c, nil @@ -727,7 +748,7 @@ func ParseConfig(d, source string) (*Config, error) { return nil, fmt.Errorf("error validating experiment(s) from config: %w", err) } - if err := result.parseConfig(list); err != nil { + if err := result.parseConfig(list, source); err != nil { return nil, fmt.Errorf("error parsing enterprise config: %w", err) } @@ -766,7 +787,7 @@ func ExperimentsFromEnvAndCLI(config *Config, envKey string, flagExperiments []s return nil } -// Validate checks each experiment is a known experiment. +// validateExperiments checks each experiment is a known experiment. func validateExperiments(experiments []string) error { var invalid []string @@ -1099,6 +1120,7 @@ func (c *Config) Sanitized() map[string]interface{} { "cluster_cipher_suites": c.ClusterCipherSuites, "plugin_directory": c.PluginDirectory, + "plugin_tmpdir": c.PluginTmpdir, "plugin_file_uid": c.PluginFileUid, @@ -1126,6 +1148,8 @@ func (c *Config) Sanitized() map[string]interface{} { "experiments": c.Experiments, "detect_deadlocks": c.DetectDeadlocks, + + "imprecise_lease_role_tracking": c.ImpreciseLeaseRoleTracking, } for k, v := range sharedResult { result[k] = v @@ -1133,23 +1157,39 @@ func (c *Config) Sanitized() map[string]interface{} { // Sanitize storage stanza if c.Storage != nil { + storageType := c.Storage.Type sanitizedStorage := map[string]interface{}{ - "type": c.Storage.Type, + "type": storageType, "redirect_addr": c.Storage.RedirectAddr, "cluster_addr": c.Storage.ClusterAddr, "disable_clustering": c.Storage.DisableClustering, } + + if storageType == "raft" { + sanitizedStorage["raft"] = map[string]interface{}{ + "max_entry_size": c.Storage.Config["max_entry_size"], + } + } + result["storage"] = sanitizedStorage } // Sanitize HA storage stanza if c.HAStorage != nil { + haStorageType := c.HAStorage.Type sanitizedHAStorage := map[string]interface{}{ - "type": c.HAStorage.Type, + "type": haStorageType, "redirect_addr": c.HAStorage.RedirectAddr, "cluster_addr": c.HAStorage.ClusterAddr, "disable_clustering": c.HAStorage.DisableClustering, } + + if haStorageType == "raft" { + sanitizedHAStorage["raft"] = map[string]interface{}{ + "max_entry_size": c.HAStorage.Config["max_entry_size"], + } + } + result["ha_storage"] = sanitizedHAStorage } @@ -1188,3 +1228,12 @@ func (c *Config) found(s, k string) { delete(c.UnusedKeys, s) c.FoundKeys = append(c.FoundKeys, k) } + +func (c *Config) ToVaultNodeConfig() (*testcluster.VaultNodeConfig, error) { + var vnc testcluster.VaultNodeConfig + err := mapstructure.Decode(c, &vnc) + if err != nil { + return nil, err + } + return &vnc, nil +} diff --git a/command/server/config_custom_response_headers_test.go b/command/server/config_custom_response_headers_test.go index 11c4300b4160..8db646bfd128 100644 --- a/command/server/config_custom_response_headers_test.go +++ b/command/server/config_custom_response_headers_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package server diff --git a/command/server/config_oss.go b/command/server/config_oss.go new file mode 100644 index 000000000000..22abae3003f4 --- /dev/null +++ b/command/server/config_oss.go @@ -0,0 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package server + +func (c *Config) IsMultisealEnabled() bool { + return false +} diff --git a/command/server/config_oss_test.go b/command/server/config_oss_test.go index 4a08ddf78ad0..20e97c1cfe7e 100644 --- a/command/server/config_oss_test.go +++ b/command/server/config_oss_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !enterprise diff --git a/command/server/config_telemetry_test.go b/command/server/config_telemetry_test.go index 54245d05151a..1f29a3862e9e 100644 --- a/command/server/config_telemetry_test.go +++ b/command/server/config_telemetry_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package server @@ -7,6 +7,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestMetricFilterConfigs(t *testing.T) { @@ -41,3 +42,35 @@ func TestMetricFilterConfigs(t *testing.T) { } }) } + +// TestRollbackMountPointMetricsConfig verifies that the add_mount_point_rollback_metrics +// config option is parsed correctly, when it is set to true. Also verifies that +// the default for this setting is false +func TestRollbackMountPointMetricsConfig(t *testing.T) { + t.Parallel() + testCases := []struct { + name string + configFile string + wantMountPoint bool + }{ + { + name: "include mount point", + configFile: "./test-fixtures/telemetry/rollback_mount_point.hcl", + wantMountPoint: true, + }, + { + name: "exclude mount point", + configFile: "./test-fixtures/telemetry/valid_prefix_filter.hcl", + wantMountPoint: false, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + config, err := LoadConfigFile(tc.configFile) + require.NoError(t, err) + require.Equal(t, tc.wantMountPoint, config.Telemetry.RollbackMetricsIncludeMountPoint) + }) + } +} diff --git a/command/server/config_test.go b/command/server/config_test.go index 99a8e03d66de..9fa20b182fd2 100644 --- a/command/server/config_test.go +++ b/command/server/config_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package server @@ -8,6 +8,9 @@ import ( "reflect" "strings" "testing" + + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/stretchr/testify/require" ) func TestLoadConfigFile(t *testing.T) { @@ -62,6 +65,12 @@ func TestParseStorage(t *testing.T) { testParseStorageTemplate(t) } +// TestConfigWithAdministrativeNamespace tests that .hcl and .json configurations are correctly parsed when the administrative_namespace_path is present. +func TestConfigWithAdministrativeNamespace(t *testing.T) { + testConfigWithAdministrativeNamespaceHcl(t) + testConfigWithAdministrativeNamespaceJson(t) +} + func TestUnknownFieldValidation(t *testing.T) { testUnknownFieldValidation(t) } @@ -186,3 +195,102 @@ func TestMerge(t *testing.T) { }) } } + +// Test_parseDevTLSConfig verifies that both Windows and Unix directories are correctly escaped when creating a dev TLS +// configuration in HCL +func Test_parseDevTLSConfig(t *testing.T) { + tests := []struct { + name string + certDirectory string + }{ + { + name: "windows path", + certDirectory: `C:\Users\ADMINI~1\AppData\Local\Temp\2\vault-tls4169358130`, + }, + { + name: "unix path", + certDirectory: "/tmp/vault-tls4169358130", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg, err := parseDevTLSConfig("file", tt.certDirectory) + require.NoError(t, err) + require.Equal(t, fmt.Sprintf("%s/%s", tt.certDirectory, VaultDevCertFilename), cfg.Listeners[0].TLSCertFile) + require.Equal(t, fmt.Sprintf("%s/%s", tt.certDirectory, VaultDevKeyFilename), cfg.Listeners[0].TLSKeyFile) + }) + } +} + +func TestCheckConfig(t *testing.T) { + testCases := []struct { + name string + config *Config + expectError bool + }{ + { + name: "no-seals-configured", + config: &Config{SharedConfig: &configutil.SharedConfig{Seals: []*configutil.KMS{}}}, + expectError: false, + }, + { + name: "seal-with-empty-name", + config: &Config{SharedConfig: &configutil.SharedConfig{ + Seals: []*configutil.KMS{ + { + Type: "awskms", + Disabled: false, + }, + }, + }}, + expectError: true, + }, + { + name: "seals-with-unique-names", + config: &Config{SharedConfig: &configutil.SharedConfig{ + Seals: []*configutil.KMS{ + { + Type: "awskms", + Disabled: false, + Name: "enabled-awskms", + }, + { + Type: "awskms", + Disabled: true, + Name: "disabled-awskms", + }, + }, + }}, + expectError: false, + }, + { + name: "seals-with-same-names", + config: &Config{SharedConfig: &configutil.SharedConfig{ + Seals: []*configutil.KMS{ + { + Type: "awskms", + Disabled: false, + Name: "awskms", + }, + { + Type: "awskms", + Disabled: true, + Name: "awskms", + }, + }, + }}, + expectError: true, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + _, err := CheckConfig(tt.config, nil) + if tt.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/command/server/config_test_helpers.go b/command/server/config_test_helpers.go index f0327f2e61ec..c017e15ec9d8 100644 --- a/command/server/config_test_helpers.go +++ b/command/server/config_test_helpers.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package server @@ -11,13 +11,12 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - "github.com/go-test/deep" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/hcl/hcl/token" "github.com/hashicorp/vault/internalshared/configutil" + "github.com/stretchr/testify/require" ) var DefaultCustomHeaders = map[string]map[string]string{ @@ -102,18 +101,22 @@ func testLoadConfigFile_topLevel(t *testing.T, entropy *configutil.Entropy) { Seals: []*configutil.KMS{ { Type: "nopurpose", + Name: "nopurpose", }, { Type: "stringpurpose", Purpose: []string{"foo"}, + Name: "stringpurpose", }, { Type: "commastringpurpose", Purpose: []string{"foo", "bar"}, + Name: "commastringpurpose", }, { Type: "slicepurpose", Purpose: []string{"zip", "zap"}, + Name: "slicepurpose", }, }, }, @@ -472,6 +475,9 @@ func testLoadConfigFile(t *testing.T) { EnableResponseHeaderRaftNodeIDRaw: true, LicensePath: "/path/to/license", + + PluginDirectory: "/path/to/plugins", + PluginTmpdir: "/tmp/plugins", } addExpectedEntConfig(expected, []string{}) @@ -503,7 +509,7 @@ func testUnknownFieldValidation(t *testing.T) { Problem: "unknown or unsupported field bad_value found in configuration", Position: token.Pos{ Filename: "./test-fixtures/config.hcl", - Offset: 651, + Offset: 652, Line: 37, Column: 5, }, @@ -572,6 +578,28 @@ func testUnknownFieldValidationHcl(t *testing.T) { } } +// testConfigWithAdministrativeNamespaceJson tests that a config with a valid administrative namespace path is correctly validated and loaded. +func testConfigWithAdministrativeNamespaceJson(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config_with_valid_admin_ns.json") + require.NoError(t, err) + + configErrors := config.Validate("./test-fixtures/config_with_valid_admin_ns.json") + require.Empty(t, configErrors) + + require.NotEmpty(t, config.AdministrativeNamespacePath) +} + +// testConfigWithAdministrativeNamespaceHcl tests that a config with a valid administrative namespace path is correctly validated and loaded. +func testConfigWithAdministrativeNamespaceHcl(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config_with_valid_admin_ns.hcl") + require.NoError(t, err) + + configErrors := config.Validate("./test-fixtures/config_with_valid_admin_ns.hcl") + require.Empty(t, configErrors) + + require.NotEmpty(t, config.AdministrativeNamespacePath) +} + func testLoadConfigFile_json(t *testing.T) { config, err := LoadConfigFile("./test-fixtures/config.hcl.json") if err != nil { @@ -763,9 +791,11 @@ func testConfig_Sanitized(t *testing.T) { "listeners": []interface{}{ map[string]interface{}{ "config": map[string]interface{}{ - "address": "127.0.0.1:443", + "address": "127.0.0.1:443", + "chroot_namespace": "admin/", + "disable_request_limiter": false, }, - "type": "tcp", + "type": configutil.TCP, }, }, "log_format": "", @@ -773,10 +803,12 @@ func testConfig_Sanitized(t *testing.T) { "max_lease_ttl": (30 * 24 * time.Hour) / time.Second, "pid_file": "./pidfile", "plugin_directory": "", + "plugin_tmpdir": "", "seals": []interface{}{ map[string]interface{}{ "disabled": false, "type": "awskms", + "name": "awskms", }, }, "storage": map[string]interface{}{ @@ -818,7 +850,10 @@ func testConfig_Sanitized(t *testing.T) { "lease_metrics_epsilon": time.Hour, "num_lease_metrics_buckets": 168, "add_lease_metrics_namespace_labels": false, + "add_mount_point_rollback_metrics": false, }, + "administrative_namespace_path": "admin/", + "imprecise_lease_role_tracking": false, } addExpectedEntSanitizedConfig(expected, []string{"http"}) @@ -851,6 +886,24 @@ listener "tcp" { agent_api { enable_quit = true } + proxy_api { + enable_quit = true + } + chroot_namespace = "admin" + redact_addresses = true + redact_cluster_name = true + redact_version = true + disable_request_limiter = true +} +listener "unix" { + address = "/var/run/vault.sock" + socket_mode = "644" + socket_user = "1000" + socket_group = "1000" + redact_addresses = true + redact_cluster_name = true + redact_version = true + disable_request_limiter = true }`)) config := Config{ @@ -858,16 +911,21 @@ listener "tcp" { } list, _ := obj.Node.(*ast.ObjectList) objList := list.Filter("listener") - configutil.ParseListeners(config.SharedConfig, objList) - listeners := config.Listeners - if len(listeners) == 0 { - t.Fatalf("expected at least one listener in the config") - } - listener := listeners[0] - if listener.Type != "tcp" { - t.Fatalf("expected tcp listener in the config") + listeners, err := configutil.ParseListeners(objList) + require.NoError(t, err) + // Update the shared config + config.Listeners = listeners + // Track which types of listener were found. + for _, l := range config.Listeners { + config.found(l.Type.String(), l.Type.String()) } + require.Len(t, config.Listeners, 2) + tcpListener := config.Listeners[0] + require.Equal(t, configutil.TCP, tcpListener.Type) + unixListner := config.Listeners[1] + require.Equal(t, configutil.Unix, unixListner.Type) + expected := &Config{ SharedConfig: &configutil.SharedConfig{ Listeners: []*configutil.Listener{ @@ -891,7 +949,26 @@ listener "tcp" { AgentAPI: &configutil.AgentAPI{ EnableQuit: true, }, + ProxyAPI: &configutil.ProxyAPI{ + EnableQuit: true, + }, CustomResponseHeaders: DefaultCustomHeaders, + ChrootNamespace: "admin/", + RedactAddresses: true, + RedactClusterName: true, + RedactVersion: true, + DisableRequestLimiter: true, + }, + { + Type: "unix", + Address: "/var/run/vault.sock", + SocketMode: "644", + SocketUser: "1000", + SocketGroup: "1000", + RedactAddresses: false, + RedactClusterName: false, + RedactVersion: false, + DisableRequestLimiter: true, }, }, }, @@ -1080,6 +1157,7 @@ func testParseSeals(t *testing.T) { "default_hmac_key_label": "vault-hsm-hmac-key", "generate_key": "true", }, + Name: "pkcs11", }, { Type: "pkcs11", @@ -1096,10 +1174,12 @@ func testParseSeals(t *testing.T) { "default_hmac_key_label": "vault-hsm-hmac-key", "generate_key": "true", }, + Name: "pkcs11-disabled", }, }, }, } + addExpectedDefaultEntConfig(expected) config.Prune() require.Equal(t, config, expected) } diff --git a/command/server/config_test_helpers_stubs_oss.go b/command/server/config_test_helpers_stubs_oss.go new file mode 100644 index 000000000000..f7b6ef7c115e --- /dev/null +++ b/command/server/config_test_helpers_stubs_oss.go @@ -0,0 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package server + +//go:generate go run github.com/hashicorp/vault/tools/stubmaker + +func addExpectedEntConfig(c *Config, sentinelModules []string) {} +func addExpectedDefaultEntConfig(c *Config) {} +func addExpectedEntSanitizedConfig(c map[string]interface{}, sentinelModules []string) {} diff --git a/command/server/config_test_helpers_util.go b/command/server/config_test_helpers_util.go deleted file mode 100644 index ff72cd6dbc5a..000000000000 --- a/command/server/config_test_helpers_util.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -//go:build !enterprise - -package server - -func addExpectedEntConfig(c *Config, sentinelModules []string) {} -func addExpectedEntSanitizedConfig(c map[string]interface{}, sentinelModules []string) {} diff --git a/command/server/config_util.go b/command/server/config_util.go index 3570b9a59bb2..9447ded65222 100644 --- a/command/server/config_util.go +++ b/command/server/config_util.go @@ -1,17 +1,20 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !enterprise package server import ( + "errors" + "fmt" + "github.com/hashicorp/hcl/hcl/ast" ) type entConfig struct{} -func (ec *entConfig) parseConfig(list *ast.ObjectList) error { +func (ec *entConfig) parseConfig(list *ast.ObjectList, source string) error { return nil } @@ -23,3 +26,30 @@ func (ec entConfig) Merge(ec2 entConfig) entConfig { func (ec entConfig) Sanitized() map[string]interface{} { return nil } + +func (c *Config) checkSealConfig() error { + if len(c.Seals) == 0 { + return nil + } + + if len(c.Seals) > 2 { + return fmt.Errorf("seals: at most 2 seals can be provided: received %d", len(c.Seals)) + } + + disabledSeals := 0 + for _, seal := range c.Seals { + if seal.Disabled { + disabledSeals++ + } + } + + if len(c.Seals) > 1 && disabledSeals == len(c.Seals) { + return errors.New("seals: seals provided but all are disabled") + } + + if disabledSeals < len(c.Seals)-1 { + return errors.New("seals: only one seal can be enabled") + } + + return nil +} diff --git a/command/server/config_util_test.go b/command/server/config_util_test.go new file mode 100644 index 000000000000..21e98a22f9be --- /dev/null +++ b/command/server/config_util_test.go @@ -0,0 +1,88 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package server + +import ( + "testing" + + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/stretchr/testify/require" +) + +func TestCheckSealConfig(t *testing.T) { + testCases := []struct { + name string + config Config + expectError bool + }{ + { + name: "no-seals", + config: Config{SharedConfig: &configutil.SharedConfig{Seals: []*configutil.KMS{}}}, + }, + { + name: "one-seal", + config: Config{SharedConfig: &configutil.SharedConfig{Seals: []*configutil.KMS{ + { + Disabled: false, + }, + }}}, + }, + { + name: "one-disabled-seal", + config: Config{SharedConfig: &configutil.SharedConfig{Seals: []*configutil.KMS{ + { + Disabled: true, + }, + }}}, + }, + { + name: "two-seals-one-disabled", + config: Config{SharedConfig: &configutil.SharedConfig{Seals: []*configutil.KMS{ + { + Disabled: false, + }, + { + Disabled: true, + }, + }}}, + }, + { + name: "two-seals-enabled", + config: Config{SharedConfig: &configutil.SharedConfig{Seals: []*configutil.KMS{ + { + Disabled: false, + }, + { + Disabled: false, + }, + }}}, + expectError: true, + }, + { + name: "two-disabled-seals", + config: Config{SharedConfig: &configutil.SharedConfig{Seals: []*configutil.KMS{ + { + Disabled: true, + }, + { + Disabled: true, + }, + }}}, + expectError: true, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + err := tt.config.checkSealConfig() + if tt.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/command/server/hcp_link_config_test.go b/command/server/hcp_link_config_test.go index 121b855138c3..c038e9b99062 100644 --- a/command/server/hcp_link_config_test.go +++ b/command/server/hcp_link_config_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package server diff --git a/command/server/listener.go b/command/server/listener.go index 19b89565ab5c..bdc2117d77ed 100644 --- a/command/server/listener.go +++ b/command/server/listener.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package server @@ -9,20 +9,17 @@ import ( "io" "net" - // We must import sha512 so that it registers with the runtime so that - // certificates that use it can be parsed. - + "github.com/hashicorp/cli" "github.com/hashicorp/go-secure-stdlib/reloadutil" "github.com/hashicorp/vault/helper/proxyutil" "github.com/hashicorp/vault/internalshared/configutil" - "github.com/mitchellh/cli" ) // ListenerFactory is the factory function to create a listener. type ListenerFactory func(*configutil.Listener, io.Writer, cli.Ui) (net.Listener, map[string]string, reloadutil.ReloadFunc, error) // BuiltinListeners is the list of built-in listener types. -var BuiltinListeners = map[string]ListenerFactory{ +var BuiltinListeners = map[configutil.ListenerType]ListenerFactory{ "tcp": tcpListenerFactory, "unix": unixListenerFactory, } diff --git a/command/server/listener_tcp.go b/command/server/listener_tcp.go index 29b601c4bfaa..5ca52faa043c 100644 --- a/command/server/listener_tcp.go +++ b/command/server/listener_tcp.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package server @@ -12,10 +12,10 @@ import ( "strings" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/go-secure-stdlib/reloadutil" "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/internalshared/listenerutil" - "github.com/mitchellh/cli" ) func tcpListenerFactory(l *configutil.Listener, _ io.Writer, ui cli.Ui) (net.Listener, map[string]string, reloadutil.ReloadFunc, error) { @@ -64,6 +64,14 @@ func tcpListenerFactory(l *configutil.Listener, _ io.Writer, ui cli.Ui) (net.Lis if len(l.XForwardedForAuthorizedAddrs) > 0 { props["x_forwarded_for_reject_not_authorized"] = strconv.FormatBool(l.XForwardedForRejectNotAuthorized) } + + if len(l.XForwardedForAuthorizedAddrs) > 0 { + props["x_forwarded_for_client_cert_header"] = fmt.Sprintf("%s", l.XForwardedForClientCertHeader) + } + + if len(l.XForwardedForAuthorizedAddrs) > 0 { + props["x_forwarded_for_client_cert_header_decoders"] = fmt.Sprintf("%s", l.XForwardedForClientCertHeaderDecoders) + } } tlsConfig, reloadFunc, err := listenerutil.TLSConfig(l, props, ui) diff --git a/command/server/listener_tcp_test.go b/command/server/listener_tcp_test.go index 6d73cf2cb32d..6508e7a8afa4 100644 --- a/command/server/listener_tcp_test.go +++ b/command/server/listener_tcp_test.go @@ -1,23 +1,22 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package server import ( "crypto/tls" "crypto/x509" - "fmt" "io/ioutil" - "math/rand" "net" "os" "testing" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/go-sockaddr" "github.com/hashicorp/vault/internalshared/configutil" - "github.com/mitchellh/cli" "github.com/pires/go-proxyproto" + "github.com/stretchr/testify/require" ) func TestTCPListener(t *testing.T) { @@ -25,9 +24,7 @@ func TestTCPListener(t *testing.T) { Address: "127.0.0.1:0", TLSDisable: true, }, nil, cli.NewMockUi()) - if err != nil { - t.Fatalf("err: %s", err) - } + require.NoError(t, err) connFn := func(lnReal net.Listener) (net.Conn, error) { return net.Dial("tcp", ln.Addr().String()) @@ -41,19 +38,13 @@ func TestTCPListener_tls(t *testing.T) { wd, _ := os.Getwd() wd += "/test-fixtures/reload/" - td, err := ioutil.TempDir("", fmt.Sprintf("vault-test-%d", rand.New(rand.NewSource(time.Now().Unix())).Int63())) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(td) - // Setup initial certs - inBytes, _ := ioutil.ReadFile(wd + "reload_ca.pem") + inBytes, err := os.ReadFile(wd + "reload_ca.pem") + require.NoError(t, err) + certPool := x509.NewCertPool() ok := certPool.AppendCertsFromPEM(inBytes) - if !ok { - t.Fatal("not ok when appending CA cert") - } + require.True(t, ok, "not ok when appending CA cert") ln, _, _, err := tcpListenerFactory(&configutil.Listener{ Address: "127.0.0.1:0", @@ -62,9 +53,8 @@ func TestTCPListener_tls(t *testing.T) { TLSRequireAndVerifyClientCert: true, TLSClientCAFile: wd + "reload_ca.pem", }, nil, cli.NewMockUi()) - if err != nil { - t.Fatalf("err: %s", err) - } + require.NoError(t, err) + cwd, _ := os.Getwd() clientCert, _ := tls.LoadX509KeyPair( @@ -100,9 +90,7 @@ func TestTCPListener_tls(t *testing.T) { TLSDisableClientCerts: true, TLSClientCAFile: wd + "reload_ca.pem", }, nil, cli.NewMockUi()) - if err == nil { - t.Fatal("expected error due to mutually exclusive client cert options") - } + require.Error(t, err, "expected error due to mutually exclusive client cert options") ln, _, _, err = tcpListenerFactory(&configutil.Listener{ Address: "127.0.0.1:0", @@ -111,9 +99,7 @@ func TestTCPListener_tls(t *testing.T) { TLSDisableClientCerts: true, TLSClientCAFile: wd + "reload_ca.pem", }, nil, cli.NewMockUi()) - if err != nil { - t.Fatalf("err: %s", err) - } + require.NoError(t, err) testListenerImpl(t, ln, connFn(false), "foo.example.com", 0, "127.0.0.1", false) } @@ -122,19 +108,11 @@ func TestTCPListener_tls13(t *testing.T) { wd, _ := os.Getwd() wd += "/test-fixtures/reload/" - td, err := ioutil.TempDir("", fmt.Sprintf("vault-test-%d", rand.New(rand.NewSource(time.Now().Unix())).Int63())) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(td) - // Setup initial certs inBytes, _ := ioutil.ReadFile(wd + "reload_ca.pem") certPool := x509.NewCertPool() ok := certPool.AppendCertsFromPEM(inBytes) - if !ok { - t.Fatal("not ok when appending CA cert") - } + require.True(t, ok, "not ok when appending CA cert") ln, _, _, err := tcpListenerFactory(&configutil.Listener{ Address: "127.0.0.1:0", @@ -144,9 +122,8 @@ func TestTCPListener_tls13(t *testing.T) { TLSClientCAFile: wd + "reload_ca.pem", TLSMinVersion: "tls13", }, nil, cli.NewMockUi()) - if err != nil { - t.Fatalf("err: %s", err) - } + require.NoError(t, err) + cwd, _ := os.Getwd() clientCert, _ := tls.LoadX509KeyPair( @@ -183,9 +160,7 @@ func TestTCPListener_tls13(t *testing.T) { TLSClientCAFile: wd + "reload_ca.pem", TLSMinVersion: "tls13", }, nil, cli.NewMockUi()) - if err == nil { - t.Fatal("expected error due to mutually exclusive client cert options") - } + require.Error(t, err, "expected error due to mutually exclusive client cert options") ln, _, _, err = tcpListenerFactory(&configutil.Listener{ Address: "127.0.0.1:0", @@ -195,9 +170,7 @@ func TestTCPListener_tls13(t *testing.T) { TLSClientCAFile: wd + "reload_ca.pem", TLSMinVersion: "tls13", }, nil, cli.NewMockUi()) - if err != nil { - t.Fatalf("err: %s", err) - } + require.NoError(t, err) testListenerImpl(t, ln, connFn(false), "foo.example.com", tls.VersionTLS13, "127.0.0.1", false) @@ -209,9 +182,7 @@ func TestTCPListener_tls13(t *testing.T) { TLSClientCAFile: wd + "reload_ca.pem", TLSMaxVersion: "tls12", }, nil, cli.NewMockUi()) - if err != nil { - t.Fatalf("err: %s", err) - } + require.NoError(t, err) testListenerImpl(t, ln, connFn(false), "foo.example.com", tls.VersionTLS12, "127.0.0.1", false) } @@ -429,9 +400,7 @@ func TestTCPListener_proxyProtocol(t *testing.T) { proxyProtocolAuthorizedAddrs := []*sockaddr.SockAddrMarshaler{} if tc.AuthorizedAddr != "" { sockAddr, err := sockaddr.NewSockAddr(tc.AuthorizedAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) proxyProtocolAuthorizedAddrs = append( proxyProtocolAuthorizedAddrs, &sockaddr.SockAddrMarshaler{SockAddr: sockAddr}, @@ -444,12 +413,11 @@ func TestTCPListener_proxyProtocol(t *testing.T) { ProxyProtocolBehavior: tc.Behavior, ProxyProtocolAuthorizedAddrs: proxyProtocolAuthorizedAddrs, }, nil, cli.NewMockUi()) - if err != nil { - t.Fatalf("err: %s", err) - } + require.NoError(t, err) connFn := func(lnReal net.Listener) (net.Conn, error) { - conn, err := net.Dial("tcp", ln.Addr().String()) + d := net.Dialer{Timeout: 3 * time.Second} + conn, err := d.Dial("tcp", lnReal.Addr().String()) if err != nil { return nil, err } @@ -464,3 +432,73 @@ func TestTCPListener_proxyProtocol(t *testing.T) { }) } } + +// TestTCPListener_proxyProtocol_keepAcceptingOnInvalidUpstream ensures that the server side listener +// never returns an error from the listener.Accept method if the error is that the +// upstream proxy isn't trusted. If an error is returned, underlying Go HTTP native +// libraries may close down a server and stop listening. +func TestTCPListener_proxyProtocol_keepAcceptingOnInvalidUpstream(t *testing.T) { + timeout := 3 * time.Second + + // Configure proxy so we hit the deny unauthorized behavior. + header := &proxyproto.Header{ + Version: 1, + Command: proxyproto.PROXY, + TransportProtocol: proxyproto.TCPv4, + SourceAddr: &net.TCPAddr{ + IP: net.ParseIP("10.1.1.1"), + Port: 1000, + }, + DestinationAddr: &net.TCPAddr{ + IP: net.ParseIP("20.2.2.2"), + Port: 2000, + }, + } + + var authAddrs []*sockaddr.SockAddrMarshaler + sockAddr, err := sockaddr.NewSockAddr("10.0.0.1/32") + require.NoError(t, err) + authAddrs = append(authAddrs, &sockaddr.SockAddrMarshaler{SockAddr: sockAddr}) + + ln, _, _, err := tcpListenerFactory(&configutil.Listener{ + Address: "127.0.0.1:0", + TLSDisable: true, + ProxyProtocolBehavior: "deny_unauthorized", + ProxyProtocolAuthorizedAddrs: authAddrs, + }, nil, cli.NewMockUi()) + require.NoError(t, err) + + // Kick off setting up server side, if we ever accept a connection send it out + // via a channel. + serverConnCh := make(chan net.Conn, 1) + go func() { + serverConn, err := ln.Accept() + // We shouldn't ever have an error if the problem was only that the upstream + // proxy wasn't trusted. + // An error would lead to the http.Serve closing the listener and giving up. + require.NoError(t, err, "server side listener errored") + serverConnCh <- serverConn + }() + + // Now try to connect as the client. + d := net.Dialer{Timeout: timeout} + clientConn, err := d.Dial("tcp", ln.Addr().String()) + require.NoError(t, err) + defer clientConn.Close() + _, err = header.WriteTo(clientConn) + require.NoError(t, err) + + // Wait for the server to have accepted a connection, or we time out. + select { + case <-time.After(timeout): + // The server still hasn't accepted any valid client connection. + // Try to write another header using the same connection which should have + // been closed by the server, we expect that this client side connection was + // closed as it us untrusted, + _, err = header.WriteTo(clientConn) + require.Error(t, err, "reused a rejected connection without error") + case serverConn := <-serverConnCh: + require.NotNil(t, serverConn) + defer serverConn.Close() + } +} diff --git a/command/server/listener_test.go b/command/server/listener_test.go index f4d555c5c2ca..cffba69eed02 100644 --- a/command/server/listener_test.go +++ b/command/server/listener_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package server @@ -9,6 +9,7 @@ import ( "io" "net" "testing" + "time" ) type testListenerConnFn func(net.Listener) (net.Conn, error) @@ -60,7 +61,11 @@ func testListenerImpl(t *testing.T, ln net.Listener, connFn testListenerConnFn, } } - server := <-serverCh + var server net.Conn + select { + case <-time.After(3 * time.Second): + case server = <-serverCh: + } if server == nil { if !expectError { diff --git a/command/server/listener_unix.go b/command/server/listener_unix.go index d5ea772eb476..35306d166699 100644 --- a/command/server/listener_unix.go +++ b/command/server/listener_unix.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package server @@ -7,10 +7,10 @@ import ( "io" "net" + "github.com/hashicorp/cli" "github.com/hashicorp/go-secure-stdlib/reloadutil" "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/internalshared/listenerutil" - "github.com/mitchellh/cli" ) func unixListenerFactory(l *configutil.Listener, _ io.Writer, ui cli.Ui) (net.Listener, map[string]string, reloadutil.ReloadFunc, error) { diff --git a/command/server/listener_unix_test.go b/command/server/listener_unix_test.go index 91eaf121ccde..72f21bb471cd 100644 --- a/command/server/listener_unix_test.go +++ b/command/server/listener_unix_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package server @@ -8,8 +8,8 @@ import ( "path/filepath" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/internalshared/configutil" - "github.com/mitchellh/cli" ) func TestUnixListener(t *testing.T) { diff --git a/command/server/server_seal_transit_acc_test.go b/command/server/server_seal_transit_acc_test.go index 439dfa58a6c0..45e4e9b5165a 100644 --- a/command/server/server_seal_transit_acc_test.go +++ b/command/server/server_seal_transit_acc_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package server @@ -14,8 +14,8 @@ import ( "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/helper/testhelpers/docker" "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/sdk/helper/docker" ) func TestTransitWrapper_Lifecycle(t *testing.T) { diff --git a/command/server/server_stubs_oss.go b/command/server/server_stubs_oss.go new file mode 100644 index 000000000000..6426318df2a6 --- /dev/null +++ b/command/server/server_stubs_oss.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package server + +import "github.com/hashicorp/vault/internalshared/configutil" + +//go:generate go run github.com/hashicorp/vault/tools/stubmaker + +func entValidateConfig(_ *Config, _ string) []configutil.ConfigError { + return nil +} diff --git a/command/server/test-fixtures/config-dir/baz.hcl b/command/server/test-fixtures/config-dir/baz.hcl index 171a07dd7a99..3f2e01d58dd6 100644 --- a/command/server/test-fixtures/config-dir/baz.hcl +++ b/command/server/test-fixtures/config-dir/baz.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 telemetry { statsd_address = "baz" diff --git a/command/server/test-fixtures/config-dir/foo.hcl b/command/server/test-fixtures/config-dir/foo.hcl index 0ef439a8037e..2731eb55191f 100644 --- a/command/server/test-fixtures/config-dir/foo.hcl +++ b/command/server/test-fixtures/config-dir/foo.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/config.hcl b/command/server/test-fixtures/config.hcl index 2a53289bf4c5..7750e5e6565f 100644 --- a/command/server/test-fixtures/config.hcl +++ b/command/server/test-fixtures/config.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 disable_cache = true disable_mlock = true @@ -51,4 +51,6 @@ disable_sealwrap = true disable_printable_check = true enable_response_header_hostname = true enable_response_header_raft_node_id = true -license_path = "/path/to/license" \ No newline at end of file +license_path = "/path/to/license" +plugin_directory = "/path/to/plugins" +plugin_tmpdir = "/tmp/plugins" \ No newline at end of file diff --git a/command/server/test-fixtures/config2.hcl b/command/server/test-fixtures/config2.hcl index 4d9cdf7cba1a..0e383fb25910 100644 --- a/command/server/test-fixtures/config2.hcl +++ b/command/server/test-fixtures/config2.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/config3.hcl b/command/server/test-fixtures/config3.hcl index 96b93318ffda..587698b35e9e 100644 --- a/command/server/test-fixtures/config3.hcl +++ b/command/server/test-fixtures/config3.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 disable_cache = true disable_mlock = true @@ -12,6 +12,8 @@ cluster_addr = "top_level_cluster_addr" listener "tcp" { address = "127.0.0.1:443" + chroot_namespace="admin/" + disable_request_limiter = false } backend "consul" { @@ -55,3 +57,4 @@ pid_file = "./pidfile" raw_storage_endpoint = true disable_sealwrap = true disable_sentinel_trace = true +administrative_namespace_path = "admin/" diff --git a/command/server/test-fixtures/config4.hcl b/command/server/test-fixtures/config4.hcl index be49453da03f..69c767fd6973 100644 --- a/command/server/test-fixtures/config4.hcl +++ b/command/server/test-fixtures/config4.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/config5.hcl b/command/server/test-fixtures/config5.hcl index 3f5b2460822f..5fc5935953b4 100644 --- a/command/server/test-fixtures/config5.hcl +++ b/command/server/test-fixtures/config5.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/config_bad_https_storage.hcl b/command/server/test-fixtures/config_bad_https_storage.hcl index b53673c35395..41b78ba57431 100644 --- a/command/server/test-fixtures/config_bad_https_storage.hcl +++ b/command/server/test-fixtures/config_bad_https_storage.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/config_custom_response_headers_1.hcl b/command/server/test-fixtures/config_custom_response_headers_1.hcl index bc458da53a25..b12f2059b822 100644 --- a/command/server/test-fixtures/config_custom_response_headers_1.hcl +++ b/command/server/test-fixtures/config_custom_response_headers_1.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 storage "inmem" {} listener "tcp" { diff --git a/command/server/test-fixtures/config_custom_response_headers_multiple_listeners.hcl b/command/server/test-fixtures/config_custom_response_headers_multiple_listeners.hcl index 9ae5b408d647..99c62b537d95 100644 --- a/command/server/test-fixtures/config_custom_response_headers_multiple_listeners.hcl +++ b/command/server/test-fixtures/config_custom_response_headers_multiple_listeners.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 storage "inmem" {} listener "tcp" { diff --git a/command/server/test-fixtures/config_diagnose_hastorage_bad_https.hcl b/command/server/test-fixtures/config_diagnose_hastorage_bad_https.hcl index 9e1ac5e10bd6..264c6dca701b 100644 --- a/command/server/test-fixtures/config_diagnose_hastorage_bad_https.hcl +++ b/command/server/test-fixtures/config_diagnose_hastorage_bad_https.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/config_diagnose_ok.hcl b/command/server/test-fixtures/config_diagnose_ok.hcl index 0b903ee6e0ee..5e1986762828 100644 --- a/command/server/test-fixtures/config_diagnose_ok.hcl +++ b/command/server/test-fixtures/config_diagnose_ok.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 disable_cache = true disable_mlock = true @@ -45,3 +45,4 @@ pid_file = "./pidfile" raw_storage_endpoint = true disable_sealwrap = true disable_printable_check = true +enable_multiseal = true \ No newline at end of file diff --git a/command/server/test-fixtures/config_diagnose_ok_singleseal.hcl b/command/server/test-fixtures/config_diagnose_ok_singleseal.hcl new file mode 100644 index 000000000000..761d87e7b2c6 --- /dev/null +++ b/command/server/test-fixtures/config_diagnose_ok_singleseal.hcl @@ -0,0 +1,47 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +disable_cache = true +disable_mlock = true + +ui = true + +listener "tcp" { + address = "127.0.0.1:1024" + tls_disable = true +} + +backend "consul" { + address = "127.0.0.1:1025" +} + +ha_backend "consul" { + address = "127.0.0.1:8500" + bar = "baz" + advertise_addr = "https://127.0.0.1:8500" + disable_clustering = "true" +} + +service_registration "consul" { + address = "127.0.0.1:8500" + foo = "bar" +} + +telemetry { + statsd_address = "bar" + usage_gauge_period = "5m" + maximum_gauge_cardinality = 100 + + statsite_address = "foo" + dogstatsd_addr = "127.0.0.1:7254" + dogstatsd_tags = ["tag_1:val_1", "tag_2:val_2"] + metrics_prefix = "myprefix" +} + +max_lease_ttl = "10h" +default_lease_ttl = "10h" +cluster_name = "testcluster" +pid_file = "./pidfile" +raw_storage_endpoint = true +disable_sealwrap = true +disable_printable_check = true diff --git a/command/server/test-fixtures/config_raft.hcl b/command/server/test-fixtures/config_raft.hcl index 7cb585b39b5c..9563d011d1f6 100644 --- a/command/server/test-fixtures/config_raft.hcl +++ b/command/server/test-fixtures/config_raft.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/config_seals.hcl b/command/server/test-fixtures/config_seals.hcl index 6fdd13381843..0761ff19ba7d 100644 --- a/command/server/test-fixtures/config_seals.hcl +++ b/command/server/test-fixtures/config_seals.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 listener "tcp" { address = "127.0.0.1:443" diff --git a/command/server/test-fixtures/config_small.hcl b/command/server/test-fixtures/config_small.hcl index a8e3c7a605c6..982162f98a61 100644 --- a/command/server/test-fixtures/config_small.hcl +++ b/command/server/test-fixtures/config_small.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 storage "raft" { path = "/path/to/raft" diff --git a/command/server/test-fixtures/config_with_valid_admin_ns.hcl b/command/server/test-fixtures/config_with_valid_admin_ns.hcl new file mode 100644 index 000000000000..af8630612f14 --- /dev/null +++ b/command/server/test-fixtures/config_with_valid_admin_ns.hcl @@ -0,0 +1,19 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +storage "raft" { + path = "/path/to/raft" + node_id = "raft_node_1" +} +listener "tcp" { + address = "127.0.0.1:8200" + tls_cert_file = "/path/to/cert.pem" + tls_key_file = "/path/to/key.key" +} +seal "awskms" { + kms_key_id = "alias/kms-unseal-key" +} +service_registration "consul" { + address = "127.0.0.1:8500" +} +administrative_namespace_path = "admin/" \ No newline at end of file diff --git a/command/server/test-fixtures/config_with_valid_admin_ns.json b/command/server/test-fixtures/config_with_valid_admin_ns.json new file mode 100644 index 000000000000..9f6041381b09 --- /dev/null +++ b/command/server/test-fixtures/config_with_valid_admin_ns.json @@ -0,0 +1,28 @@ +{ + "listener": { + "tcp": { + "address": "0.0.0.0:8200", + "tls_cert_file": "/path/to/cert.pem", + "tls_key_file": "/path/to/key.key" + } + }, + "seal": { + "awskms": { + "kms_key_id": "alias/kms-unseal-key" + } + }, + "storage": { + "raft": { + "path": "/path/to/raft", + "node_id": "raft_node_1" + } + }, + "cluster_addr": "http://127.0.0.1:8201", + "api_addr": "http://127.0.0.1:8200", + "service_registration": { + "consul": { + "address": "127.0.0.1:8500" + } + }, + "administrative_namespace_path": "admin/" +} \ No newline at end of file diff --git a/command/server/test-fixtures/diagnose_bad_https_consul_sr.hcl b/command/server/test-fixtures/diagnose_bad_https_consul_sr.hcl index bc5a71e13a58..8019194148a5 100644 --- a/command/server/test-fixtures/diagnose_bad_https_consul_sr.hcl +++ b/command/server/test-fixtures/diagnose_bad_https_consul_sr.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/diagnose_bad_telemetry1.hcl b/command/server/test-fixtures/diagnose_bad_telemetry1.hcl index a634b162ebc5..815b671df7dd 100644 --- a/command/server/test-fixtures/diagnose_bad_telemetry1.hcl +++ b/command/server/test-fixtures/diagnose_bad_telemetry1.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/diagnose_bad_telemetry2.hcl b/command/server/test-fixtures/diagnose_bad_telemetry2.hcl index afb195d55383..090a7ff6e3e3 100644 --- a/command/server/test-fixtures/diagnose_bad_telemetry2.hcl +++ b/command/server/test-fixtures/diagnose_bad_telemetry2.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/diagnose_bad_telemetry3.hcl b/command/server/test-fixtures/diagnose_bad_telemetry3.hcl index 422351febc33..0a41c80fec68 100644 --- a/command/server/test-fixtures/diagnose_bad_telemetry3.hcl +++ b/command/server/test-fixtures/diagnose_bad_telemetry3.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/diagnose_ok_storage_direct_access.hcl b/command/server/test-fixtures/diagnose_ok_storage_direct_access.hcl index cda9e2a2da93..905345301179 100644 --- a/command/server/test-fixtures/diagnose_ok_storage_direct_access.hcl +++ b/command/server/test-fixtures/diagnose_ok_storage_direct_access.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/diagnose_raft_no_bolt_folder.hcl b/command/server/test-fixtures/diagnose_raft_no_bolt_folder.hcl index e28c1bcb1ae4..eaf3660d5975 100644 --- a/command/server/test-fixtures/diagnose_raft_no_bolt_folder.hcl +++ b/command/server/test-fixtures/diagnose_raft_no_bolt_folder.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 storage "raft" { path = "/path/to/raft/data" diff --git a/command/server/test-fixtures/diagnose_seal_transit_tls_check.hcl b/command/server/test-fixtures/diagnose_seal_transit_tls_check.hcl index 0c0edcfce22b..a7007d57313a 100644 --- a/command/server/test-fixtures/diagnose_seal_transit_tls_check.hcl +++ b/command/server/test-fixtures/diagnose_seal_transit_tls_check.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 disable_cache = true disable_mlock = true @@ -57,3 +57,4 @@ pid_file = "./pidfile" raw_storage_endpoint = true disable_sealwrap = true disable_printable_check = true +enable_multiseal = true \ No newline at end of file diff --git a/command/server/test-fixtures/hcp_link_config.hcl b/command/server/test-fixtures/hcp_link_config.hcl index 1a909e3b48a4..bffbe83ae607 100644 --- a/command/server/test-fixtures/hcp_link_config.hcl +++ b/command/server/test-fixtures/hcp_link_config.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 storage "inmem" {} listener "tcp" { diff --git a/command/server/test-fixtures/nostore_config.hcl b/command/server/test-fixtures/nostore_config.hcl index a80e385b3d6c..306ef7c9cacd 100644 --- a/command/server/test-fixtures/nostore_config.hcl +++ b/command/server/test-fixtures/nostore_config.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/raft_retry_join.hcl b/command/server/test-fixtures/raft_retry_join.hcl index 6f7fe9e4771c..844dd744e40c 100644 --- a/command/server/test-fixtures/raft_retry_join.hcl +++ b/command/server/test-fixtures/raft_retry_join.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 storage "raft" { path = "/storage/path/raft" diff --git a/command/server/test-fixtures/telemetry/filter_default_override.hcl b/command/server/test-fixtures/telemetry/filter_default_override.hcl index 4fc70e9ebf09..d3d540715ee5 100644 --- a/command/server/test-fixtures/telemetry/filter_default_override.hcl +++ b/command/server/test-fixtures/telemetry/filter_default_override.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 disable_mlock = true ui = true diff --git a/command/server/test-fixtures/telemetry/rollback_mount_point.hcl b/command/server/test-fixtures/telemetry/rollback_mount_point.hcl new file mode 100644 index 000000000000..5aa5a287c8f8 --- /dev/null +++ b/command/server/test-fixtures/telemetry/rollback_mount_point.hcl @@ -0,0 +1,9 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +disable_mlock = true +ui = true + +telemetry { + add_mount_point_rollback_metrics = true +} \ No newline at end of file diff --git a/command/server/test-fixtures/telemetry/valid_prefix_filter.hcl b/command/server/test-fixtures/telemetry/valid_prefix_filter.hcl index 055f12259e46..a40e392c0921 100644 --- a/command/server/test-fixtures/telemetry/valid_prefix_filter.hcl +++ b/command/server/test-fixtures/telemetry/valid_prefix_filter.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 disable_mlock = true ui = true diff --git a/command/server/test-fixtures/tls_config_ok.hcl b/command/server/test-fixtures/tls_config_ok.hcl index 4cbd4fa4644c..02a2733d4138 100644 --- a/command/server/test-fixtures/tls_config_ok.hcl +++ b/command/server/test-fixtures/tls_config_ok.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/unauth_in_flight_access.hcl b/command/server/test-fixtures/unauth_in_flight_access.hcl index c191f139b35b..bb04d3d29ecb 100644 --- a/command/server/test-fixtures/unauth_in_flight_access.hcl +++ b/command/server/test-fixtures/unauth_in_flight_access.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 storage "inmem" {} listener "tcp" { diff --git a/command/server/tls_util.go b/command/server/tls_util.go index a038c2ae6290..cd07dde92758 100644 --- a/command/server/tls_util.go +++ b/command/server/tls_util.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package server @@ -27,8 +27,8 @@ type CaCert struct { Signer crypto.Signer } -// GenerateCert creates a new leaf cert from provided CA template and signer -func GenerateCert(caCertTemplate *x509.Certificate, caSigner crypto.Signer) (string, string, error) { +// generateCert creates a new leaf cert from provided CA template and signer +func generateCert(caCertTemplate *x509.Certificate, caSigner crypto.Signer, extraSANs []string) (string, string, error) { // Create the private key signer, keyPEM, err := privateKey() if err != nil { @@ -80,6 +80,13 @@ func GenerateCert(caCertTemplate *x509.Certificate, caSigner crypto.Signer) (str if !foundHostname { template.DNSNames = append(template.DNSNames, hostname) } + for _, san := range extraSANs { + if ip := net.ParseIP(san); ip != nil { + template.IPAddresses = append(template.IPAddresses, ip) + } else { + template.DNSNames = append(template.DNSNames, san) + } + } bs, err := x509.CreateCertificate( rand.Reader, &template, caCertTemplate, signer.Public(), caSigner) diff --git a/command/server/tls_util_test.go b/command/server/tls_util_test.go new file mode 100644 index 000000000000..acb010d4109b --- /dev/null +++ b/command/server/tls_util_test.go @@ -0,0 +1,83 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package server + +import ( + "crypto/x509" + "encoding/pem" + "testing" + + "github.com/hashicorp/go-secure-stdlib/strutil" +) + +// TestGenerateCertExtraSans ensures the implementation backing the flag +// -dev-tls-san populates alternate DNS and IP address names in the generated +// certificate as expected. +func TestGenerateCertExtraSans(t *testing.T) { + ca, err := GenerateCA() + if err != nil { + t.Fatal(err) + } + + for name, tc := range map[string]struct { + extraSans []string + expectedDNSNames []string + expectedIPAddresses []string + }{ + "empty": {}, + "DNS names": { + extraSans: []string{"foo", "foo.bar"}, + expectedDNSNames: []string{"foo", "foo.bar"}, + }, + "IP addresses": { + extraSans: []string{"0.0.0.0", "::1"}, + expectedIPAddresses: []string{"0.0.0.0", "::1"}, + }, + "mixed": { + extraSans: []string{"bar", "0.0.0.0", "::1"}, + expectedDNSNames: []string{"bar"}, + expectedIPAddresses: []string{"0.0.0.0", "::1"}, + }, + } { + t.Run(name, func(t *testing.T) { + certStr, _, err := generateCert(ca.Template, ca.Signer, tc.extraSans) + if err != nil { + t.Fatal(err) + } + + block, _ := pem.Decode([]byte(certStr)) + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatal(err) + } + + expectedDNSNamesLen := len(tc.expectedDNSNames) + 5 + if len(cert.DNSNames) != expectedDNSNamesLen { + t.Errorf("Wrong number of DNS names, expected %d but got %v", expectedDNSNamesLen, cert.DNSNames) + } + expectedIPAddrLen := len(tc.expectedIPAddresses) + 1 + if len(cert.IPAddresses) != expectedIPAddrLen { + t.Errorf("Wrong number of IP addresses, expected %d but got %v", expectedIPAddrLen, cert.IPAddresses) + } + + for _, expected := range tc.expectedDNSNames { + if !strutil.StrListContains(cert.DNSNames, expected) { + t.Errorf("Missing DNS name %s", expected) + } + } + for _, expected := range tc.expectedIPAddresses { + var found bool + for _, ip := range cert.IPAddresses { + if ip.String() == expected { + found = true + break + } + } + if !found { + t.Errorf("Missing IP address %s", expected) + } + } + }) + } +} diff --git a/command/server_noprofile.go b/command/server_noprofile.go index 1cf516a1aaf3..07a9a3902e2a 100644 --- a/command/server_noprofile.go +++ b/command/server_noprofile.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !memprofiler diff --git a/command/server_profile.go b/command/server_profile.go index 42f07d2e8519..fe5cbf7087dc 100644 --- a/command/server_profile.go +++ b/command/server_profile.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build memprofiler diff --git a/command/server_test.go b/command/server_test.go index 8e71543ea24f..9a1328ad7eb9 100644 --- a/command/server_test.go +++ b/command/server_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !race && !hsm && !fips_140_3 @@ -11,6 +11,7 @@ package command import ( + "context" "crypto/tls" "crypto/x509" "fmt" @@ -21,9 +22,14 @@ import ( "testing" "time" - "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/internalshared/configutil" physInmem "github.com/hashicorp/vault/sdk/physical/inmem" - "github.com/mitchellh/cli" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/seal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func init() { @@ -89,29 +95,6 @@ cloud { ` ) -func testServerCommand(tb testing.TB) (*cli.MockUi, *ServerCommand) { - tb.Helper() - - ui := cli.NewMockUi() - return ui, &ServerCommand{ - BaseCommand: &BaseCommand{ - UI: ui, - }, - ShutdownCh: MakeShutdownCh(), - SighupCh: MakeSighupCh(), - SigUSR2Ch: MakeSigUSR2Ch(), - PhysicalBackends: map[string]physical.Factory{ - "inmem": physInmem.NewInmem, - "inmem_ha": physInmem.NewInmemHA, - }, - - // These prevent us from random sleep guessing... - startedCh: make(chan struct{}, 5), - reloadedCh: make(chan struct{}, 5), - licenseReloadedCh: make(chan error), - } -} - func TestServer_ReloadListener(t *testing.T) { t.Parallel() @@ -285,6 +268,13 @@ func TestServer(t *testing.T) { 0, []string{"-test-verify-only"}, }, + { + "recovery_mode", + testBaseHCL(t, "") + inmemHCL, + "", + 0, + []string{"-test-verify-only", "-recovery"}, + }, } for _, tc := range cases { @@ -294,26 +284,142 @@ func TestServer(t *testing.T) { t.Parallel() ui, cmd := testServerCommand(t) - f, err := ioutil.TempFile("", "") - if err != nil { - t.Fatalf("error creating temp dir: %v", err) - } - f.WriteString(tc.contents) - f.Close() - defer os.Remove(f.Name()) - args := append(tc.args, "-config", f.Name()) + f, err := os.CreateTemp(t.TempDir(), "") + require.NoErrorf(t, err, "error creating temp dir: %v", err) + + _, err = f.WriteString(tc.contents) + require.NoErrorf(t, err, "cannot write temp file contents") + + err = f.Close() + require.NoErrorf(t, err, "unable to close temp file") + args := append(tc.args, "-config", f.Name()) code := cmd.Run(args) output := ui.ErrorWriter.String() + ui.OutputWriter.String() + require.Equal(t, tc.code, code, "expected %d to be %d: %s", code, tc.code, output) + require.Contains(t, output, tc.exp, "expected %q to contain %q", output, tc.exp) + }) + } +} - if code != tc.code { - t.Errorf("expected %d to be %d: %s", code, tc.code, output) - } +// TestServer_DevTLS verifies that a vault server starts up correctly with the -dev-tls flag +func TestServer_DevTLS(t *testing.T) { + ui, cmd := testServerCommand(t) + args := []string{"-dev-tls", "-dev-listen-address=127.0.0.1:0", "-test-server-config"} + retCode := cmd.Run(args) + output := ui.ErrorWriter.String() + ui.OutputWriter.String() + require.Equal(t, 0, retCode, output) + require.Contains(t, output, `tls: "enabled"`) +} + +// TestConfigureDevTLS verifies the various logic paths that flow through the +// configureDevTLS function. +func TestConfigureDevTLS(t *testing.T) { + testcases := []struct { + ServerCommand *ServerCommand + DeferFuncNotNil bool + ConfigNotNil bool + TLSDisable bool + CertPathEmpty bool + ErrNotNil bool + TestDescription string + }{ + { + ServerCommand: &ServerCommand{ + flagDevTLS: false, + }, + ConfigNotNil: true, + TLSDisable: true, + CertPathEmpty: true, + ErrNotNil: false, + TestDescription: "flagDev is false, nothing will be configured", + }, + { + ServerCommand: &ServerCommand{ + flagDevTLS: true, + flagDevTLSCertDir: "", + }, + DeferFuncNotNil: true, + ConfigNotNil: true, + ErrNotNil: false, + TestDescription: "flagDevTLSCertDir is empty", + }, + { + ServerCommand: &ServerCommand{ + flagDevTLS: true, + flagDevTLSCertDir: "@/#", + }, + CertPathEmpty: true, + ErrNotNil: true, + TestDescription: "flagDevTLSCertDir is set to something invalid", + }, + } - if !strings.Contains(output, tc.exp) { - t.Fatalf("expected %q to contain %q", output, tc.exp) + for _, testcase := range testcases { + fun, cfg, certPath, err := configureDevTLS(testcase.ServerCommand) + if fun != nil { + // If a function is returned, call it right away to clean up + // files created in the temporary directory before anything else has + // a chance to fail this test. + fun() + } + + t.Run(testcase.TestDescription, func(t *testing.T) { + assert.Equal(t, testcase.DeferFuncNotNil, (fun != nil)) + assert.Equal(t, testcase.ConfigNotNil, cfg != nil) + if testcase.ConfigNotNil && cfg != nil { + assert.True(t, len(cfg.Listeners) > 0) + assert.Equal(t, testcase.TLSDisable, cfg.Listeners[0].TLSDisable) + } + assert.Equal(t, testcase.CertPathEmpty, len(certPath) == 0) + if testcase.ErrNotNil { + assert.Error(t, err) + } else { + assert.NoError(t, err) } }) } } + +func TestConfigureSeals(t *testing.T) { + testConfig := server.Config{SharedConfig: &configutil.SharedConfig{}} + _, testCommand := testServerCommand(t) + + logger := corehelpers.NewTestLogger(t) + backend, err := physInmem.NewInmem(nil, logger) + if err != nil { + t.Fatal(err) + } + testCommand.logger = logger + + setSealResponse, _, err := testCommand.configureSeals(context.Background(), &testConfig, backend, []string{}, map[string]string{}) + if err != nil { + t.Fatal(err) + } + + if len(setSealResponse.barrierSeal.GetAccess().GetAllSealWrappersByPriority()) != 1 { + t.Fatalf("expected 1 seal, got %d", len(setSealResponse.barrierSeal.GetAccess().GetAllSealWrappersByPriority())) + } + + if setSealResponse.barrierSeal.BarrierSealConfigType() != vault.SealConfigTypeShamir { + t.Fatalf("expected shamir seal, got seal type %s", setSealResponse.barrierSeal.BarrierSealConfigType()) + } +} + +func TestReloadSeals(t *testing.T) { + testCore := vault.TestCoreWithSeal(t, vault.NewTestSeal(t, &seal.TestSealOpts{StoredKeys: seal.StoredKeysSupportedShamirRoot}), false) + _, testCommand := testServerCommand(t) + testConfig := server.Config{SharedConfig: &configutil.SharedConfig{}} + + testCommand.logger = corehelpers.NewTestLogger(t) + ctx := context.Background() + reloaded, err := testCommand.reloadSealsOnSigHup(ctx, testCore, &testConfig) + require.NoError(t, err) + require.False(t, reloaded, "reloadSeals does not support Shamir seals") + + testConfig = server.Config{SharedConfig: &configutil.SharedConfig{Seals: []*configutil.KMS{{Disabled: true}}}} + reloaded, err = testCommand.reloadSealsOnSigHup(ctx, testCore, &testConfig) + require.NoError(t, err) + require.False(t, reloaded, "reloadSeals does not support Shamir seals") +} diff --git a/command/server_util.go b/command/server_util.go index 7bf3196fc8d5..c0227ea28ba7 100644 --- a/command/server_util.go +++ b/command/server_util.go @@ -1,27 +1,53 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( - "github.com/hashicorp/vault/command/server" - "github.com/hashicorp/vault/vault" -) + "testing" -var ( - adjustCoreConfigForEnt = adjustCoreConfigForEntNoop - storageSupportedForEnt = checkStorageTypeForEntNoop + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/sdk/physical" + physInmem "github.com/hashicorp/vault/sdk/physical/inmem" ) -func adjustCoreConfigForEntNoop(config *server.Config, coreConfig *vault.CoreConfig) { +func TestServerCommand(tb testing.TB) (*cli.MockUi, *ServerCommand) { + tb.Helper() + return testServerCommand(tb) +} + +func (c *ServerCommand) StartedCh() chan struct{} { + return c.startedCh } -var getFIPSInfoKey = getFIPSInfoKeyNoop +func (c *ServerCommand) ReloadedCh() chan struct{} { + return c.reloadedCh +} -func getFIPSInfoKeyNoop() string { - return "" +func (c *ServerCommand) LicenseReloadedCh() chan error { + return c.licenseReloadedCh } -func checkStorageTypeForEntNoop(coreConfig *vault.CoreConfig) bool { - return true +func testServerCommand(tb testing.TB) (*cli.MockUi, *ServerCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &ServerCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + ShutdownCh: MakeShutdownCh(), + SighupCh: MakeSighupCh(), + SigUSR2Ch: MakeSigUSR2Ch(), + PhysicalBackends: map[string]physical.Factory{ + "inmem": physInmem.NewInmem, + "inmem_ha": physInmem.NewInmemHA, + "inmem_transactional": physInmem.NewTransactionalInmem, + }, + + // These prevent us from random sleep guessing... + startedCh: make(chan struct{}, 5), + reloadedCh: make(chan struct{}, 5), + licenseReloadedCh: make(chan error), + } } diff --git a/command/ssh.go b/command/ssh.go index 6553a2b0a45a..cd39ad45782d 100644 --- a/command/ssh.go +++ b/command/ssh.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -14,9 +14,9 @@ import ( "strings" "syscall" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/builtin/logical/ssh" - "github.com/mitchellh/cli" "github.com/mitchellh/mapstructure" "github.com/pkg/errors" "github.com/posener/complete" @@ -241,7 +241,7 @@ type SSHCredentialResp struct { func (c *SSHCommand) Run(args []string) int { f := c.Flags() - if err := f.Parse(args); err != nil { + if err := f.Parse(args, DisableDisplayFlagWarning(true)); err != nil { c.UI.Error(err.Error()) return 1 } diff --git a/command/ssh_test.go b/command/ssh_test.go index 837865a9ae2f..b6dfd563d242 100644 --- a/command/ssh_test.go +++ b/command/ssh_test.go @@ -1,12 +1,13 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( + "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testSSHCommand(tb testing.TB) (*cli.MockUi, *SSHCommand) { @@ -217,3 +218,18 @@ func TestIsSingleSSHArg(t *testing.T) { }) } } + +// TestSSHCommandOmitFlagWarning checks if flags warning messages are printed +// in the output of the CLI command or not. If so, it will fail. +func TestSSHCommandOmitFlagWarning(t *testing.T) { + t.Parallel() + + ui, cmd := testSSHCommand(t) + + _ = cmd.Run([]string{"-mode", "ca", "-role", "otp_key_role", "user@1.2.3.4", "-extraFlag", "bug"}) + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if strings.Contains(combined, "Command flags must be provided before positional arguments. The following arguments will not be parsed as flags") { + t.Fatalf("ssh command displayed flag warnings") + } +} diff --git a/command/status.go b/command/status.go index 0b7c61974256..9f7c7010f86d 100644 --- a/command/status.go +++ b/command/status.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/status_test.go b/command/status_test.go index 5731945ae78f..47a2803d66cb 100644 --- a/command/status_test.go +++ b/command/status_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testStatusCommand(tb testing.TB) (*cli.MockUi, *StatusCommand) { diff --git a/command/test-backend/main.go b/command/test-backend/main.go new file mode 100644 index 000000000000..69a6fcd0aa5f --- /dev/null +++ b/command/test-backend/main.go @@ -0,0 +1,4 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package test_backend diff --git a/command/test-fixtures/config.hcl b/command/test-fixtures/config.hcl index 164acd29cc80..e15c243e41dc 100644 --- a/command/test-fixtures/config.hcl +++ b/command/test-fixtures/config.hcl @@ -1,4 +1,4 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 -token_helper = "foo" +token_helper = "foo" \ No newline at end of file diff --git a/command/test-fixtures/policy.hcl b/command/test-fixtures/policy.hcl index 267fc5cecdc5..6160bf780274 100644 --- a/command/test-fixtures/policy.hcl +++ b/command/test-fixtures/policy.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 path "secret/foo" { policy = "write" diff --git a/command/token.go b/command/token.go index 7b15275283bc..eb430b48daa0 100644 --- a/command/token.go +++ b/command/token.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*TokenCommand)(nil) diff --git a/command/token/helper_testing.go b/command/token/helper_testing.go index e95ff3558047..a536c3c3cc52 100644 --- a/command/token/helper_testing.go +++ b/command/token/helper_testing.go @@ -1,13 +1,15 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package token import ( "sync" + + "github.com/hashicorp/vault/api/tokenhelper" ) -var _ TokenHelper = (*TestingTokenHelper)(nil) +var _ tokenhelper.TokenHelper = (*TestingTokenHelper)(nil) // TestingTokenHelper implements token.TokenHelper which runs entirely // in-memory. This should not be used outside of testing. diff --git a/command/token/testing.go b/command/token/testing.go deleted file mode 100644 index 85da0840c84a..000000000000 --- a/command/token/testing.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package token - -import ( - "fmt" - "os" - "strings" - "testing" - - "github.com/mitchellh/cli" -) - -// Test is a public function that can be used in other tests to -// test that a helper is functioning properly. -func Test(t *testing.T, h TokenHelper) { - if err := h.Store("foo"); err != nil { - t.Fatalf("err: %s", err) - } - - v, err := h.Get() - if err != nil { - t.Fatalf("err: %s", err) - } - - if v != "foo" { - t.Fatalf("bad: %#v", v) - } - - if err := h.Erase(); err != nil { - t.Fatalf("err: %s", err) - } - - v, err = h.Get() - if err != nil { - t.Fatalf("err: %s", err) - } - - if v != "" { - t.Fatalf("bad: %#v", v) - } -} - -// TestProcess is used to re-execute this test in order to use it as the -// helper process. For this to work, the TestExternalTokenHelperProcess function must -// exist. -func TestProcess(t *testing.T, s ...string) { - h := &ExternalTokenHelper{BinaryPath: TestProcessPath(t, s...)} - Test(t, h) -} - -// TestProcessPath returns the path to the test process. -func TestProcessPath(t *testing.T, s ...string) string { - cs := []string{"-test.run=TestExternalTokenHelperProcess", "--", "GO_WANT_HELPER_PROCESS"} - cs = append(cs, s...) - return fmt.Sprintf( - "%s %s", - os.Args[0], - strings.Join(cs, " ")) -} - -// TestExternalTokenHelperProcessCLI can be called to implement TestExternalTokenHelperProcess -// for TestProcess that just executes a CLI command. -func TestExternalTokenHelperProcessCLI(t *testing.T, cmd cli.Command) { - args := os.Args - for len(args) > 0 { - if args[0] == "--" { - args = args[1:] - break - } - - args = args[1:] - } - if len(args) == 0 || args[0] != "GO_WANT_HELPER_PROCESS" { - return - } - args = args[1:] - - os.Exit(cmd.Run(args)) -} diff --git a/command/token_capabilities.go b/command/token_capabilities.go index f7e359c06f13..239793658bad 100644 --- a/command/token_capabilities.go +++ b/command/token_capabilities.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,7 +8,7 @@ import ( "sort" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -19,6 +19,8 @@ var ( type TokenCapabilitiesCommand struct { *BaseCommand + + flagAccessor bool } func (c *TokenCapabilitiesCommand) Synopsis() string { @@ -27,12 +29,15 @@ func (c *TokenCapabilitiesCommand) Synopsis() string { func (c *TokenCapabilitiesCommand) Help() string { helpText := ` -Usage: vault token capabilities [options] [TOKEN] PATH +Usage: vault token capabilities [options] [TOKEN | ACCESSOR] PATH - Fetches the capabilities of a token for a given path. If a TOKEN is provided - as an argument, the "/sys/capabilities" endpoint and permission is used. If - no TOKEN is provided, the "/sys/capabilities-self" endpoint and permission - is used with the locally authenticated token. + Fetches the capabilities of a token or accessor for a given path. If a TOKEN + is provided as an argument, the "/sys/capabilities" endpoint is used, which + returns the capabilities of the provided TOKEN. If an ACCESSOR is provided + as an argument along with the -accessor option, the "/sys/capabilities-accessor" + endpoint is used, which returns the capabilities of the token referenced by + ACCESSOR. If no TOKEN is provided, the "/sys/capabilities-self" endpoint + is used, which returns the capabilities of the locally authenticated token. List capabilities for the local token on the "secret/foo" path: @@ -42,6 +47,10 @@ Usage: vault token capabilities [options] [TOKEN] PATH $ vault token capabilities 96ddf4bc-d217-f3ba-f9bd-017055595017 cubbyhole/foo + List capabilities for a token on the "cubbyhole/foo" path via its accessor: + + $ vault token capabilities -accessor 9793c9b3-e04a-46f3-e7b8-748d7da248da cubbyhole/foo + For a full list of examples, please see the documentation. ` + c.Flags().Help() @@ -50,7 +59,20 @@ Usage: vault token capabilities [options] [TOKEN] PATH } func (c *TokenCapabilitiesCommand) Flags() *FlagSets { - return c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "accessor", + Target: &c.flagAccessor, + Default: false, + EnvVar: "", + Completion: complete.PredictNothing, + Usage: "Treat the argument as an accessor instead of a token.", + }) + + return set } func (c *TokenCapabilitiesCommand) AutocompleteArgs() complete.Predictor { @@ -72,13 +94,19 @@ func (c *TokenCapabilitiesCommand) Run(args []string) int { token := "" path := "" args = f.Args() - switch len(args) { - case 0: + switch { + case c.flagAccessor && len(args) < 2: + c.UI.Error(fmt.Sprintf("Not enough arguments with -accessor (expected 2, got %d)", len(args))) + return 1 + case c.flagAccessor && len(args) > 2: + c.UI.Error(fmt.Sprintf("Too many arguments with -accessor (expected 2, got %d)", len(args))) + return 1 + case len(args) == 0: c.UI.Error("Not enough arguments (expected 1-2, got 0)") return 1 - case 1: + case len(args) == 1: path = args[0] - case 2: + case len(args) == 2: token, path = args[0], args[1] default: c.UI.Error(fmt.Sprintf("Too many arguments (expected 1-2, got %d)", len(args))) @@ -92,11 +120,15 @@ func (c *TokenCapabilitiesCommand) Run(args []string) int { } var capabilities []string - if token == "" { + switch { + case token == "": capabilities, err = client.Sys().CapabilitiesSelf(path) - } else { + case c.flagAccessor: + capabilities, err = client.Sys().CapabilitiesAccessor(token, path) + default: capabilities, err = client.Sys().Capabilities(token, path) } + if err != nil { c.UI.Error(fmt.Sprintf("Error listing capabilities: %s", err)) return 2 diff --git a/command/token_capabilities_test.go b/command/token_capabilities_test.go index 14b0f2765834..1588b14a330a 100644 --- a/command/token_capabilities_test.go +++ b/command/token_capabilities_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,8 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testTokenCapabilitiesCommand(tb testing.TB) (*cli.MockUi, *TokenCapabilitiesCommand) { @@ -31,6 +31,24 @@ func TestTokenCapabilitiesCommand_Run(t *testing.T) { out string code int }{ + { + "accessor_no_args", + []string{"-accessor"}, + "Not enough arguments", + 1, + }, + { + "accessor_too_few_args", + []string{"-accessor", "abcd1234"}, + "Not enough arguments", + 1, + }, + { + "accessor_too_many_args", + []string{"-accessor", "abcd1234", "efgh5678", "ijkl9012"}, + "Too many arguments", + 1, + }, { "too_many_args", []string{"foo", "bar", "zip"}, @@ -103,6 +121,48 @@ func TestTokenCapabilitiesCommand_Run(t *testing.T) { } }) + t.Run("accessor", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + policy := `path "secret/foo" { capabilities = ["read"] }` + if err := client.Sys().PutPolicy("policy", policy); err != nil { + t.Error(err) + } + + secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{"policy"}, + TTL: "30m", + }) + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Auth == nil || secret.Auth.ClientToken == "" { + t.Fatalf("missing auth data: %#v", secret) + } + accessor := secret.Auth.Accessor + + ui, cmd := testTokenCapabilitiesCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-accessor", + accessor, + "secret/foo", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "read" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + t.Run("local", func(t *testing.T) { t.Parallel() diff --git a/command/token_create.go b/command/token_create.go index 1efee5ebff06..3e49bb2ca72b 100644 --- a/command/token_create.go +++ b/command/token_create.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,8 +8,8 @@ import ( "strings" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/token_create_test.go b/command/token_create_test.go index b80eec5d7d2b..3acd2dd1474e 100644 --- a/command/token_create_test.go +++ b/command/token_create_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,7 +8,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testTokenCreateCommand(tb testing.TB) (*cli.MockUi, *TokenCreateCommand) { @@ -71,6 +71,12 @@ func TestTokenCreateCommand_Run(t *testing.T) { "not present in secret", 1, }, + { + "ttl", + []string{"-ttl", "1d", "-explicit-max-ttl", "2d"}, + "token", + 0, + }, } t.Run("validations", func(t *testing.T) { diff --git a/command/token_lookup.go b/command/token_lookup.go index 24161399a2c9..afb622372b1e 100644 --- a/command/token_lookup.go +++ b/command/token_lookup.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,8 @@ import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/token_lookup_test.go b/command/token_lookup_test.go index 3f5636527661..6a351f781c5a 100644 --- a/command/token_lookup_test.go +++ b/command/token_lookup_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testTokenLookupCommand(tb testing.TB) (*cli.MockUi, *TokenLookupCommand) { diff --git a/command/token_renew.go b/command/token_renew.go index 7a61487b5a25..c354b4e6a506 100644 --- a/command/token_renew.go +++ b/command/token_renew.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,8 +8,8 @@ import ( "strings" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/token_renew_test.go b/command/token_renew_test.go index 29d9292018cf..4fc469995b05 100644 --- a/command/token_renew_test.go +++ b/command/token_renew_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -9,7 +9,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testTokenRenewCommand(tb testing.TB) (*cli.MockUi, *TokenRenewCommand) { diff --git a/command/token_revoke.go b/command/token_revoke.go index 48ccc27ac261..c9f6a2b7f22f 100644 --- a/command/token_revoke.go +++ b/command/token_revoke.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/token_revoke_test.go b/command/token_revoke_test.go index 6ff8898301a1..3cdf13d615e8 100644 --- a/command/token_revoke_test.go +++ b/command/token_revoke_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testTokenRevokeCommand(tb testing.TB) (*cli.MockUi, *TokenRevokeCommand) { diff --git a/command/transform.go b/command/transform.go new file mode 100644 index 000000000000..46129cd32e29 --- /dev/null +++ b/command/transform.go @@ -0,0 +1,44 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "strings" + + "github.com/hashicorp/cli" +) + +var _ cli.Command = (*TransformCommand)(nil) + +type TransformCommand struct { + *BaseCommand +} + +func (c *TransformCommand) Synopsis() string { + return "Interact with Vault's Transform Secrets Engine" +} + +func (c *TransformCommand) Help() string { + helpText := ` +Usage: vault transform [options] [args] + + This command has subcommands for interacting with Vault's Transform Secrets + Engine. Here are some simple examples, and more detailed examples are + available in the subcommands or the documentation. + + To import a key into a new FPE transformation: + + $ vault transform import transform/transformations/fpe/new-transformation @path/to/key \ + template=identifier \ + allowed_roles=physical-access + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *TransformCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/transform_import_key.go b/command/transform_import_key.go new file mode 100644 index 000000000000..d01100acea04 --- /dev/null +++ b/command/transform_import_key.go @@ -0,0 +1,79 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "errors" + "regexp" + "strings" + + "github.com/hashicorp/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*TransformImportCommand)(nil) + _ cli.CommandAutocomplete = (*TransformImportCommand)(nil) + transformKeyPath = regexp.MustCompile("^(.*)/transformations/(fpe|tokenization)/([^/]*)$") +) + +type TransformImportCommand struct { + *BaseCommand +} + +func (c *TransformImportCommand) Synopsis() string { + return "Import a key into the Transform secrets engines." +} + +func (c *TransformImportCommand) Help() string { + helpText := ` +Usage: vault transform import PATH KEY [options...] + + Using the Transform key wrapping system, imports key material from + the base64 encoded KEY (either directly on the CLI or via @path notation), + into a new FPE or tokenization transformation whose API path is PATH. + + To import a new key version into an existing tokenization transformation, + use import_version. + + The remaining options after KEY (key=value style) are passed on to + Create/Update FPE Transformation or Create/Update Tokenization Transformation + API endpoints. + + For example: + $ vault transform import transform/transformations/tokenization/application-form @path/to/key \ + allowed_roles=legacy-system +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *TransformImportCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *TransformImportCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *TransformImportCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *TransformImportCommand) Run(args []string) int { + return ImportKey(c.BaseCommand, "import", transformImportKeyPath, c.Flags(), args) +} + +func transformImportKeyPath(s string, operation string) (path string, apiPath string, err error) { + parts := transformKeyPath.FindStringSubmatch(s) + if len(parts) != 4 { + return "", "", errors.New("expected transform path and key name in the form :path:/transformations/fpe|tokenization/:name:") + } + path = parts[1] + transformation := parts[2] + keyName := parts[3] + apiPath = path + "/transformations/" + transformation + "/" + keyName + "/" + operation + + return path, apiPath, nil +} diff --git a/command/transform_import_key_version.go b/command/transform_import_key_version.go new file mode 100644 index 000000000000..61a6db45b674 --- /dev/null +++ b/command/transform_import_key_version.go @@ -0,0 +1,59 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "strings" + + "github.com/hashicorp/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*TransformImportVersionCommand)(nil) + _ cli.CommandAutocomplete = (*TransformImportVersionCommand)(nil) +) + +type TransformImportVersionCommand struct { + *BaseCommand +} + +func (c *TransformImportVersionCommand) Synopsis() string { + return "Import key material into a new key version in the Transform secrets engines." +} + +func (c *TransformImportVersionCommand) Help() string { + helpText := ` +Usage: vault transform import-version PATH KEY [...] + + Using the Transform key wrapping system, imports new key material from + the base64 encoded KEY (either directly on the CLI or via @path notation), + into an existing tokenization transformation whose API path is PATH. + + The remaining options after KEY (key=value style) are passed on to + Create/Update Tokenization Transformation API endpoint. + + For example: + $ vault transform import-version transform/transformations/tokenization/application-form @path/to/new_version \ + allowed_roles=legacy-system +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *TransformImportVersionCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *TransformImportVersionCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *TransformImportVersionCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *TransformImportVersionCommand) Run(args []string) int { + return ImportKey(c.BaseCommand, "import_version", transformImportKeyPath, c.Flags(), args) +} diff --git a/command/transit.go b/command/transit.go index 602bc198c997..9b988d7e3c70 100644 --- a/command/transit.go +++ b/command/transit.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*TransitCommand)(nil) @@ -27,7 +27,7 @@ Usage: vault transit [options] [args] Engine. Here are some simple examples, and more detailed examples are available in the subcommands or the documentation. - To import a key into the specified Transit or Transform mount: + To import a key into the specified Transit mount: $ vault transit import transit/keys/newly-imported @path/to/key type=rsa-2048 diff --git a/command/transit_import_key.go b/command/transit_import_key.go index 795545b69635..994bf14ede8a 100644 --- a/command/transit_import_key.go +++ b/command/transit_import_key.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -10,17 +10,16 @@ import ( "crypto/x509" "encoding/base64" "encoding/pem" + "errors" "fmt" "os" "regexp" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - - "github.com/google/tink/go/kwp/subtle" - - "github.com/mitchellh/cli" "github.com/posener/complete" + "github.com/tink-crypto/tink-go/v2/kwp/subtle" ) var ( @@ -34,18 +33,18 @@ type TransitImportCommand struct { } func (c *TransitImportCommand) Synopsis() string { - return "Import a key into the Transit or Transform secrets engines." + return "Import a key into the Transit secrets engines." } func (c *TransitImportCommand) Help() string { helpText := ` Usage: vault transit import PATH KEY [options...] - Using the Transit or Transform key wrapping system, imports key material from + Using the Transit key wrapping system, imports key material from the base64 encoded KEY (either directly on the CLI or via @path notation), into a new key whose API path is PATH. To import a new version into an existing key, use import_version. The remaining options after KEY (key=value - style) are passed on to the Transit or Transform create key endpoint. If your + style) are passed on to the Transit create key endpoint. If your system or device natively supports the RSA AES key wrap mechanism (such as the PKCS#11 mechanism CKM_RSA_AES_KEY_WRAP), you should use it directly rather than this command. @@ -68,11 +67,25 @@ func (c *TransitImportCommand) AutocompleteFlags() complete.Flags { } func (c *TransitImportCommand) Run(args []string) int { - return importKey(c.BaseCommand, "import", c.Flags(), args) + return ImportKey(c.BaseCommand, "import", transitImportKeyPath, c.Flags(), args) } +func transitImportKeyPath(s string, operation string) (path string, apiPath string, err error) { + parts := keyPath.FindStringSubmatch(s) + if len(parts) != 3 { + return "", "", errors.New("expected transit path and key name in the form :path:/keys/:name:") + } + path = parts[1] + keyName := parts[2] + apiPath = path + "/keys/" + keyName + "/" + operation + + return path, apiPath, nil +} + +type ImportKeyFunc func(s string, operation string) (path string, apiPath string, err error) + // error codes: 1: user error, 2: internal computation error, 3: remote api call error -func importKey(c *BaseCommand, operation string, flags *FlagSets, args []string) int { +func ImportKey(c *BaseCommand, operation string, pathFunc ImportKeyFunc, flags *FlagSets, args []string) int { // Parse and validate the arguments. if err := flags.Parse(args); err != nil { c.UI.Error(err.Error()) @@ -96,14 +109,11 @@ func importKey(c *BaseCommand, operation string, flags *FlagSets, args []string) if err != nil { c.UI.Error(fmt.Sprintf("failed to generate ephemeral key: %v", err)) } - parts := keyPath.FindStringSubmatch(args[0]) - if len(parts) != 3 { - c.UI.Error("expected transit path and key name in the form :path:/keys/:name:") + path, apiPath, err := pathFunc(args[0], operation) + if err != nil { + c.UI.Error(err.Error()) return 1 } - path := parts[1] - keyName := parts[2] - keyMaterial := args[1] if keyMaterial[0] == '@' { keyMaterialBytes, err := os.ReadFile(keyMaterial[1:]) @@ -121,8 +131,8 @@ func importKey(c *BaseCommand, operation string, flags *FlagSets, args []string) return 1 } // Fetch the wrapping key - c.UI.Output("Retrieving transit wrapping key.") - wrappingKey, err := fetchWrappingKey(c, client, path) + c.UI.Output("Retrieving wrapping key.") + wrappingKey, err := fetchWrappingKey(client, path) if err != nil { c.UI.Error(fmt.Sprintf("failed to fetch wrapping key: %v", err)) return 3 @@ -138,11 +148,11 @@ func importKey(c *BaseCommand, operation string, flags *FlagSets, args []string) c.UI.Error(fmt.Sprintf("failure wrapping source key: %v", err)) return 2 } - c.UI.Output("Encrypting ephemeral key with transit wrapping key.") + c.UI.Output("Encrypting ephemeral key with wrapping key.") wrappedAESKey, err := rsa.EncryptOAEP( sha256.New(), rand.Reader, - wrappingKey.(*rsa.PublicKey), + wrappingKey, ephemeralAESKey, []byte{}, ) @@ -165,9 +175,10 @@ func importKey(c *BaseCommand, operation string, flags *FlagSets, args []string) data["ciphertext"] = importCiphertext - c.UI.Output("Submitting wrapped key to Vault transit.") + c.UI.Output("Submitting wrapped key.") // Finally, call import - _, err = client.Logical().Write(path+"/keys/"+keyName+"/"+operation, data) + + _, err = client.Logical().Write(apiPath, data) if err != nil { c.UI.Error(fmt.Sprintf("failed to call import:%v", err)) return 3 @@ -177,22 +188,29 @@ func importKey(c *BaseCommand, operation string, flags *FlagSets, args []string) } } -func fetchWrappingKey(c *BaseCommand, client *api.Client, path string) (any, error) { +func fetchWrappingKey(client *api.Client, path string) (*rsa.PublicKey, error) { resp, err := client.Logical().Read(path + "/wrapping_key") if err != nil { return nil, fmt.Errorf("error fetching wrapping key: %w", err) } if resp == nil { - return nil, fmt.Errorf("transit not mounted at %s: %v", path, err) + return nil, fmt.Errorf("no mount found at %s: %v", path, err) } key, ok := resp.Data["public_key"] if !ok { - c.UI.Error("could not find wrapping key") + return nil, fmt.Errorf("missing public_key field in response") } keyBlock, _ := pem.Decode([]byte(key.(string))) + if keyBlock == nil { + return nil, fmt.Errorf("failed to decode PEM information from public_key response field") + } parsedKey, err := x509.ParsePKIXPublicKey(keyBlock.Bytes) if err != nil { return nil, fmt.Errorf("error parsing wrapping key: %w", err) } - return parsedKey, nil + rsaKey, ok := parsedKey.(*rsa.PublicKey) + if !ok { + return nil, fmt.Errorf("returned value was not an RSA public key but a %T", rsaKey) + } + return rsaKey, nil } diff --git a/command/transit_import_key_test.go b/command/transit_import_key_test.go index e01c03fa00bb..847ab59ff78f 100644 --- a/command/transit_import_key_test.go +++ b/command/transit_import_key_test.go @@ -1,18 +1,19 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "bytes" + "context" "crypto/rand" "crypto/rsa" "crypto/x509" "encoding/base64" "testing" + "time" "github.com/hashicorp/vault/api" - "github.com/stretchr/testify/require" ) @@ -29,6 +30,15 @@ func TestTransitImport(t *testing.T) { t.Fatalf("transit mount error: %#v", err) } + // Force the generation of the Transit wrapping key now with a longer context + // to help the 32bit nightly tests. This creates a 4096-bit RSA key which can take + // a while on an overloaded system + genWrappingKeyCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + if _, err := client.Logical().ReadWithContext(genWrappingKeyCtx, "transit/wrapping_key"); err != nil { + t.Fatalf("transit failed generating wrapping key: %#v", err) + } + rsa1, rsa2, aes128, aes256 := generateKeys(t) type testCase struct { diff --git a/command/transit_import_key_version.go b/command/transit_import_key_version.go index 272fe01d11c2..cf248554f779 100644 --- a/command/transit_import_key_version.go +++ b/command/transit_import_key_version.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -20,18 +20,18 @@ type TransitImportVersionCommand struct { } func (c *TransitImportVersionCommand) Synopsis() string { - return "Import key material into a new key version in the Transit or Transform secrets engines." + return "Import key material into a new key version in the Transit secrets engines." } func (c *TransitImportVersionCommand) Help() string { helpText := ` Usage: vault transit import-version PATH KEY [...] - Using the Transit or Transform key wrapping system, imports key material from + Using the Transit key wrapping system, imports key material from the base64 encoded KEY (either directly on the CLI or via @path notation), - into a new key whose API path is PATH. To import a new Transit or Transform + into a new key whose API path is PATH. To import a new Transit key, use the import command instead. The remaining options after KEY - (key=value style) are passed on to the Transit or Transform create key endpoint. + (key=value style) are passed on to the Transit create key endpoint. If your system or device natively supports the RSA AES key wrap mechanism (such as the PKCS#11 mechanism CKM_RSA_AES_KEY_WRAP), you should use it directly rather than this command. @@ -54,5 +54,5 @@ func (c *TransitImportVersionCommand) AutocompleteFlags() complete.Flags { } func (c *TransitImportVersionCommand) Run(args []string) int { - return importKey(c.BaseCommand, "import_version", c.Flags(), args) + return ImportKey(c.BaseCommand, "import_version", transitImportKeyPath, c.Flags(), args) } diff --git a/command/unwrap.go b/command/unwrap.go index 1f920e7806eb..a671071c15a0 100644 --- a/command/unwrap.go +++ b/command/unwrap.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/unwrap_test.go b/command/unwrap_test.go index 608edff51778..518d32fb12d6 100644 --- a/command/unwrap_test.go +++ b/command/unwrap_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,8 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testUnwrapCommand(tb testing.TB) (*cli.MockUi, *UnwrapCommand) { diff --git a/command/util.go b/command/util.go index e24d65d7259f..55aa7778c21a 100644 --- a/command/util.go +++ b/command/util.go @@ -1,25 +1,30 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "fmt" "io" + "net/http" "os" + "path/filepath" + "runtime/pprof" + "testing" "time" "github.com/fatih/color" + "github.com/hashicorp/cli" + "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/config" - "github.com/hashicorp/vault/command/token" - "github.com/mitchellh/cli" + "github.com/hashicorp/vault/api/cliconfig" + "github.com/hashicorp/vault/api/tokenhelper" ) // DefaultTokenHelper returns the token helper that is configured for Vault. // This helper should only be used for non-server CLI commands. -func DefaultTokenHelper() (token.TokenHelper, error) { - return config.DefaultTokenHelper() +func DefaultTokenHelper() (tokenhelper.TokenHelper, error) { + return cliconfig.DefaultTokenHelper() } // RawField extracts the raw field from the given data and returns it as a @@ -161,3 +166,77 @@ func getWriterFromUI(ui cli.Ui) io.Writer { return os.Stdout } } + +func mockClient(t *testing.T) (*api.Client, *recordingRoundTripper) { + t.Helper() + + config := api.DefaultConfig() + httpClient := cleanhttp.DefaultClient() + roundTripper := &recordingRoundTripper{} + httpClient.Transport = roundTripper + config.HttpClient = httpClient + client, err := api.NewClient(config) + if err != nil { + t.Fatal(err) + } + + return client, roundTripper +} + +var _ http.RoundTripper = (*recordingRoundTripper)(nil) + +type recordingRoundTripper struct { + path string + body []byte +} + +func (r *recordingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + r.path = req.URL.Path + defer req.Body.Close() + body, err := io.ReadAll(req.Body) + if err != nil { + return nil, err + } + + r.body = body + return &http.Response{ + StatusCode: 200, + }, nil +} + +// WritePprofToFile will create a temporary directory at the specified path +// and generate pprof files at that location. CPU requires polling over a +// duration. For most situations 1 second is enough. +func WritePprofToFile(path string, cpuProfileDuration time.Duration) error { + err := os.MkdirAll(path, os.ModePerm) + if err != nil { + return fmt.Errorf("could not create temporary directory for pprof: %v", err) + } + + dumps := []string{"goroutine", "heap", "allocs", "threadcreate", "profile"} + for _, dump := range dumps { + pFile, err := os.Create(filepath.Join(path, dump)) + if err != nil { + return fmt.Errorf("error creating pprof file %s: %v", dump, err) + } + + if dump != "profile" { + err = pprof.Lookup(dump).WriteTo(pFile, 0) + if err != nil { + pFile.Close() + return fmt.Errorf("error generating pprof data for %s: %v", dump, err) + } + } else { + // CPU profiles need to run for a duration so we're going to run it + // just for one second to avoid blocking here. + if err := pprof.StartCPUProfile(pFile); err != nil { + pFile.Close() + return fmt.Errorf("could not start CPU profile: %v", err) + } + time.Sleep(cpuProfileDuration) + pprof.StopCPUProfile() + } + pFile.Close() + } + return nil +} diff --git a/command/version.go b/command/version.go index e9b17227b0bf..8b54511c099a 100644 --- a/command/version.go +++ b/command/version.go @@ -1,13 +1,13 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command import ( "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/version" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/version_history.go b/command/version_history.go index b2dfbae425d9..7326bffdeddf 100644 --- a/command/version_history.go +++ b/command/version_history.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" "github.com/ryanuber/columnize" ) diff --git a/command/version_history_test.go b/command/version_history_test.go index c011a4bf4d7b..8d2e18445107 100644 --- a/command/version_history_test.go +++ b/command/version_history_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -9,8 +9,8 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/version" - "github.com/mitchellh/cli" ) func testVersionHistoryCommand(tb testing.TB) (*cli.MockUi, *VersionHistoryCommand) { diff --git a/command/version_test.go b/command/version_test.go index ede21e62a9cf..abacfd3662c0 100644 --- a/command/version_test.go +++ b/command/version_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -7,8 +7,8 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/version" - "github.com/mitchellh/cli" ) func testVersionCommand(tb testing.TB) (*cli.MockUi, *VersionCommand) { diff --git a/command/write.go b/command/write.go index 2cc93b8a3504..0b10ec46564b 100644 --- a/command/write.go +++ b/command/write.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -9,8 +9,8 @@ import ( "os" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -51,13 +51,15 @@ Usage: vault write [options] PATH [DATA K=V...] it is loaded from a file. If the value is "-", Vault will read the value from stdin. - Persist data in the generic secrets engine: + Store an arbitrary secret in the token's cubbyhole. - $ vault write secret/my-secret foo=bar + $ vault write cubbyhole/git-credentials username="student01" password="p@$$w0rd" Create a new encryption key in the transit secrets engine: - $ vault write -f transit/keys/my-key + $ vault write -force transit/keys/my-key + + The -force / -f flag allows a write operation without any input data. Upload an AWS IAM policy from a file on disk: @@ -67,6 +69,10 @@ Usage: vault write [options] PATH [DATA K=V...] $ echo $MY_TOKEN | vault write consul/config/access token=- + Create a token + + $ vault write auth/token/create policies="admin" policies="secops" ttl=8h num_uses=3 + For a full list of examples and paths, please see the documentation that corresponds to the secret engines in use. @@ -155,7 +161,8 @@ func handleWriteSecretOutput(c *BaseCommand, path string, secret *api.Secret, er } if secret == nil { // Don't output anything unless using the "table" format - if Format(c.UI) == "table" { + // and even then, don't output anything if a specific field was requested + if c.flagField == "" && Format(c.UI) == "table" { c.UI.Info(fmt.Sprintf("Success! Data written to: %s", path)) } return 0 diff --git a/command/write_test.go b/command/write_test.go index 9b76d391c854..2e7a32833fa1 100644 --- a/command/write_test.go +++ b/command/write_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package command @@ -8,8 +8,8 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testWriteCommand(tb testing.TB) (*cli.MockUi, *WriteCommand) { @@ -118,6 +118,30 @@ func TestWriteCommand_Run(t *testing.T) { }) } + // If we ask for a field and get an empty result, do not output "Success!" or anything else + t.Run("field_from_nothing", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testWriteCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-field", "somefield", + "secret/write/foo", "foo=bar", + }) + if exp := 0; code != exp { + t.Fatalf("expected %d to be %d: %q", code, exp, ui.ErrorWriter.String()) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if combined != "" { + t.Errorf("expected %q to be empty", combined) + } + }) + t.Run("force", func(t *testing.T) { t.Parallel() diff --git a/dependencies/2-25-21/deps-upgrade-output.txt b/dependencies/2-25-21/deps-upgrade-output.txt deleted file mode 100644 index 52eea40c58e6..000000000000 --- a/dependencies/2-25-21/deps-upgrade-output.txt +++ /dev/null @@ -1,17 +0,0 @@ -hridoyroy@Hridoys-MBP vault % python3 deps_upgrade.py dep.txt -github.com/satori/go.uuid -golang.org/x/text -github.com/hashicorp/go-gcp-common -github.com/hashicorp/vault-plugin-secrets-azure -go.mongodb.org/mongo-driver -github.com/Microsoft/hcsshim -package github.com/Microsoft/hcsshim - imports github.com/Microsoft/go-winio/pkg/guid - imports golang.org/x/sys/windows: build constraints exclude all Go files in /Users/hridoyroy/go/pkg/mod/golang.org/x/sys@v0.0.0-20210124154548-22da62e12c0c/windows -golang.org/x/crypto -github.com/containerd/containerd -github.com/aws/aws-sdk-go -github.com/hashicorp/serf -github.com/miekg/dns -github.com/hashicorp/go-discover -github.com/hashicorp/serf \ No newline at end of file diff --git a/dependencies/2-25-21/deps-upgrade.txt b/dependencies/2-25-21/deps-upgrade.txt deleted file mode 100644 index 64c7dae59c45..000000000000 --- a/dependencies/2-25-21/deps-upgrade.txt +++ /dev/null @@ -1,12 +0,0 @@ -golang.org/x/text -github.com/hashicorp/go-gcp-common -github.com/hashicorp/vault-plugin-secrets-azure -go.mongodb.org/mongo-driver -github.com/Microsoft/hcsshim -golang.org/x/crypto -github.com/containerd/containerd -github.com/aws/aws-sdk-go -github.com/hashicorp/serf -github.com/miekg/dns -github.com/hashicorp/go-discover -github.com/hashicorp/serf \ No newline at end of file diff --git a/enos/Makefile b/enos/Makefile index ad27fb0ffbe9..4a5532b2174a 100644 --- a/enos/Makefile +++ b/enos/Makefile @@ -1,11 +1,11 @@ .PHONY: default -default: check-fmt +default: check-fmt shellcheck .PHONY: check-fmt -check-fmt: check-fmt-enos check-fmt-modules +check-fmt: check-fmt-enos check-fmt-modules check-shfmt .PHONY: fmt -fmt: fmt-enos fmt-modules +fmt: fmt-enos fmt-modules shfmt .PHONY: check-fmt-enos check-fmt-enos: @@ -22,3 +22,22 @@ check-fmt-modules: .PHONY: fmt-modules fmt-modules: terraform fmt -diff -recursive ./modules + +.PHONY: validate-enos +validate-enos: + enos scenario validate --timeout 30m0s + +.PHONY: lint +lint: check-fmt check-fmt-modules check-shfmt shellcheck validate-enos + +.PHONY: shellcheck +shellcheck: + find ./modules/ -type f -name '*.sh' | xargs shellcheck + +.PHONY: shfmt +shfmt: + find ./modules/ -type f -name '*.sh' | xargs shfmt -l -w -i 2 -bn -ci -kp -sr + +.PHONY: check-shfmt +check-shfmt: + find ./modules/ -type f -name '*.sh' | xargs shfmt -l -d -i 2 -bn -ci -kp -sr diff --git a/enos/README.md b/enos/README.md index a33f4abe1b9e..1ec6b8e13d4b 100644 --- a/enos/README.md +++ b/enos/README.md @@ -18,34 +18,35 @@ is going to give you faster feedback and execution time, whereas Enos is going to give you a real-world execution and validation of the requirement. Consider the following cases as examples of when one might opt for an Enos scenario: -* The feature require third-party integrations. Whether that be networked +- The feature require third-party integrations. Whether that be networked dependencies like a real Consul backend, a real KMS key to test awskms auto-unseal, auto-join discovery using AWS tags, or Cloud hardware KMS's. -* The feature might behave differently under multiple configuration variants +- The feature might behave differently under multiple configuration variants and therefore should be tested with both combinations, e.g. auto-unseal and manual shamir unseal or replication in HA mode with integrated storage or Consul storage. -* The scenario requires coordination between multiple targets. For example, +- The scenario requires coordination between multiple targets. For example, consider the complex lifecycle event of migrating the seal type or storage, or manually triggering a raft disaster scenario by partitioning the network between the leader and follower nodes. Or perhaps an auto-pilot upgrade between a stable version of Vault and our candidate version. -* The scenario has specific deployment strategy requirements. For example, +- The scenario has specific deployment strategy requirements. For example, if we want to add a regression test for an issue that only arises when the software is deployed in a certain manner. -* The scenario needs to use actual build artifacts that will be promoted +- The scenario needs to use actual build artifacts that will be promoted through the pipeline. ## Requirements -* AWS access. HashiCorp Vault developers should use Doormat. -* Terraform >= 1.2 -* Enos >= v0.0.10. You can [install it from a release channel](https://github.com/hashicorp/Enos-Docs/blob/main/installation.md). -* Access to the QTI org in Terraform Cloud. HashiCorp Vault developers can - access a shared token in 1Password or request their own in #team-quality on - Slack. -* An SSH keypair in the AWS region you wish to run the scenario. You can use +- AWS access. HashiCorp Vault developers should use Doormat. +- Terraform >= 1.7 +- Enos >= v0.0.28. You can [download a release](https://github.com/hashicorp/enos/releases/) or + install it with Homebrew: + ```shell + brew tap hashicorp/tap && brew update && brew install hashicorp/tap/enos + ``` +- An SSH keypair in the AWS region you wish to run the scenario. You can use Doormat to log in to the AWS console to create or upload an existing keypair. -* A Vault artifact is downloaded from the GHA artifacts when using the `artifact_source:crt` variants, from Artifactory when using `artifact_source:artifactory`, and is built locally from the current branch when using `artifact_source:local` variant. +- A Vault artifact is downloaded from the GHA artifacts when using the `artifact_source:crt` variants, from Artifactory when using `artifact_source:artifactory`, and is built locally from the current branch when using `artifact_source:local` variant. ## Scenario Variables In CI, each scenario is executed via Github Actions and has been configured using @@ -57,7 +58,6 @@ variables, or you can update `enos.vars.hcl` with values and uncomment the lines Variables that are required: * `aws_ssh_keypair_name` * `aws_ssh_private_key_path` -* `tfc_api_token` * `vault_bundle_path` * `vault_license_path` (only required for non-OSS editions) @@ -206,7 +206,6 @@ This variant is for running the Enos scenario to test an artifact from Artifacto * `artifactory_token` * `aws_ssh_keypair_name` * `aws_ssh_private_key_path` -* `tfc_api_token` * `vault_product_version` * `vault_revision` @@ -234,7 +233,6 @@ and destroyed each time a scenario is run, the Terraform state will be managed b Here are the steps to configure the GitHub Actions service user: #### Pre-requisites -- Access to the `hashicorp-qti` organization in Terraform Cloud. - Full access to the CI AWS account is required. **Notes:** diff --git a/enos/ci/aws-nuke.yml b/enos/ci/aws-nuke.yml index 50a567704925..c8e4204465fa 100644 --- a/enos/ci/aws-nuke.yml +++ b/enos/ci/aws-nuke.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 regions: - eu-north-1 diff --git a/enos/ci/bootstrap/main.tf b/enos/ci/bootstrap/main.tf index c5ce812d90b2..db89663153e0 100644 --- a/enos/ci/bootstrap/main.tf +++ b/enos/ci/bootstrap/main.tf @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { diff --git a/enos/ci/bootstrap/outputs.tf b/enos/ci/bootstrap/outputs.tf index e6ff37270bd5..a83ef9eb080e 100644 --- a/enos/ci/bootstrap/outputs.tf +++ b/enos/ci/bootstrap/outputs.tf @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 output "keys" { value = { diff --git a/enos/ci/bootstrap/variables.tf b/enos/ci/bootstrap/variables.tf index 3fb53bc51777..7e80d5ccc919 100644 --- a/enos/ci/bootstrap/variables.tf +++ b/enos/ci/bootstrap/variables.tf @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 variable "aws_ssh_public_key" { description = "The public key to use for the ssh key" diff --git a/enos/ci/service-user-iam/main.tf b/enos/ci/service-user-iam/main.tf index 6aafd9a3819f..24265052a45e 100644 --- a/enos/ci/service-user-iam/main.tf +++ b/enos/ci/service-user-iam/main.tf @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { @@ -31,6 +31,7 @@ resource "aws_iam_role" "role" { data "aws_iam_policy_document" "assume_role_policy_document" { provider = aws.us_east_1 + statement { effect = "Allow" actions = ["sts:AssumeRole"] @@ -46,11 +47,47 @@ resource "aws_iam_role_policy" "role_policy" { provider = aws.us_east_1 role = aws_iam_role.role.name name = "${local.service_user}_policy" - policy = data.aws_iam_policy_document.iam_policy_document.json + policy = data.aws_iam_policy_document.role_policy.json +} + +data "aws_iam_policy_document" "role_policy" { + source_policy_documents = [ + data.aws_iam_policy_document.enos_scenario.json, + data.aws_iam_policy_document.aws_nuke.json, + ] +} + +data "aws_iam_policy_document" "aws_nuke" { + provider = aws.us_east_1 + + statement { + effect = "Allow" + actions = [ + "ec2:DescribeInternetGateways", + "ec2:DescribeNatGateways", + "ec2:DescribeRegions", + "ec2:DescribeVpnGateways", + "iam:DeleteAccessKey", + "iam:DeleteUser", + "iam:DeleteUserPolicy", + "iam:GetUser", + "iam:ListAccessKeys", + "iam:ListAccountAliases", + "iam:ListGroupsForUser", + "iam:ListUserPolicies", + "iam:ListUserTags", + "iam:ListUsers", + "iam:UntagUser", + "servicequotas:ListServiceQuotas" + ] + + resources = ["*"] + } } -data "aws_iam_policy_document" "iam_policy_document" { +data "aws_iam_policy_document" "enos_scenario" { provider = aws.us_east_1 + statement { effect = "Allow" actions = [ @@ -58,25 +95,42 @@ data "aws_iam_policy_document" "iam_policy_document" { "ec2:AttachInternetGateway", "ec2:AuthorizeSecurityGroupEgress", "ec2:AuthorizeSecurityGroupIngress", + "ec2:CancelSpotFleetRequests", + "ec2:CancelSpotInstanceRequests", + "ec2:CreateEgressOnlyInternetGateway", "ec2:CreateInternetGateway", "ec2:CreateKeyPair", + "ec2:CreateFleet", + "ec2:CreateLaunchTemplate", + "ec2:CreateLaunchTemplateVersion", "ec2:CreateRoute", "ec2:CreateRouteTable", "ec2:CreateSecurityGroup", + "ec2:CreateSpotDatafeedSubscription", "ec2:CreateSubnet", "ec2:CreateTags", "ec2:CreateVolume", "ec2:CreateVPC", + "ec2:DeleteEgressOnlyInternetGateway", + "ec2:DeleteFleets", "ec2:DeleteInternetGateway", + "ec2:DeleteLaunchTemplate", + "ec2:DeleteLaunchTemplateVersions", "ec2:DeleteKeyPair", + "ec2:DeleteRoute", "ec2:DeleteRouteTable", "ec2:DeleteSecurityGroup", + "ec2:DeleteSpotDatafeedSubscription", "ec2:DeleteSubnet", "ec2:DeleteTags", "ec2:DeleteVolume", "ec2:DeleteVPC", "ec2:DescribeAccountAttributes", "ec2:DescribeAvailabilityZones", + "ec2:DescribeEgressOnlyInternetGateways", + "ec2:DescribeFleets", + "ec2:DescribeFleetHistory", + "ec2:DescribeFleetInstances", "ec2:DescribeImages", "ec2:DescribeInstanceAttribute", "ec2:DescribeInstanceCreditSpecifications", @@ -84,14 +138,22 @@ data "aws_iam_policy_document" "iam_policy_document" { "ec2:DescribeInstanceTypeOfferings", "ec2:DescribeInstanceTypes", "ec2:DescribeInternetGateways", - "ec2:DescribeInternetGateways", "ec2:DescribeKeyPairs", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeLaunchTemplateVersions", "ec2:DescribeNatGateways", "ec2:DescribeNetworkAcls", "ec2:DescribeNetworkInterfaces", "ec2:DescribeRegions", "ec2:DescribeRouteTables", "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotDatafeedSubscription", + "ec2:DescribeSpotFleetInstances", + "ec2:DescribeSpotFleetInstanceRequests", + "ec2:DescribeSpotFleetRequests", + "ec2:DescribeSpotFleetRequestHistory", + "ec2:DescribeSpotInstanceRequests", + "ec2:DescribeSpotPriceHistory", "ec2:DescribeSubnets", "ec2:DescribeTags", "ec2:DescribeVolumes", @@ -102,14 +164,22 @@ data "aws_iam_policy_document" "iam_policy_document" { "ec2:DescribeVpnGateways", "ec2:DetachInternetGateway", "ec2:DisassociateRouteTable", + "ec2:GetLaunchTemplateData", + "ec2:GetSpotPlacementScores", "ec2:ImportKeyPair", + "ec2:ModifyFleet", "ec2:ModifyInstanceAttribute", + "ec2:ModifyLaunchTemplate", + "ec2:ModifySpotFleetRequest", "ec2:ModifySubnetAttribute", "ec2:ModifyVPCAttribute", + "ec2:RequestSpotInstances", + "ec2:RequestSpotFleet", "ec2:ResetInstanceAttribute", "ec2:RevokeSecurityGroupEgress", "ec2:RevokeSecurityGroupIngress", "ec2:RunInstances", + "ec2:SendSpotInstanceInterruptions", "ec2:TerminateInstances", "elasticloadbalancing:DescribeLoadBalancers", "elasticloadbalancing:DescribeTargetGroups", @@ -118,11 +188,10 @@ data "aws_iam_policy_document" "iam_policy_document" { "iam:CreateInstanceProfile", "iam:CreatePolicy", "iam:CreateRole", - "iam:CreateRole", + "iam:CreateServiceLinkedRole", "iam:DeleteInstanceProfile", "iam:DeletePolicy", "iam:DeleteRole", - "iam:DeleteRole", "iam:DeleteRolePolicy", "iam:DetachRolePolicy", "iam:GetInstanceProfile", @@ -135,7 +204,6 @@ data "aws_iam_policy_document" "iam_policy_document" { "iam:ListPolicies", "iam:ListRolePolicies", "iam:ListRoles", - "iam:ListRoles", "iam:PassRole", "iam:PutRolePolicy", "iam:RemoveRoleFromInstanceProfile", @@ -151,8 +219,10 @@ data "aws_iam_policy_document" "iam_policy_document" { "kms:ListKeys", "kms:ListResourceTags", "kms:ScheduleKeyDeletion", + "kms:TagResource", "servicequotas:ListServiceQuotas" ] + resources = ["*"] } } diff --git a/enos/ci/service-user-iam/outputs.tf b/enos/ci/service-user-iam/outputs.tf index ba980d59d076..348696b4d723 100644 --- a/enos/ci/service-user-iam/outputs.tf +++ b/enos/ci/service-user-iam/outputs.tf @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 output "ci_role" { value = { diff --git a/enos/ci/service-user-iam/providers.tf b/enos/ci/service-user-iam/providers.tf index 7baba3344006..cf2d21e20296 100644 --- a/enos/ci/service-user-iam/providers.tf +++ b/enos/ci/service-user-iam/providers.tf @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 provider "aws" { region = "us-east-1" diff --git a/enos/ci/service-user-iam/service-quotas.tf b/enos/ci/service-user-iam/service-quotas.tf index 57ab9aa415b2..676bbb0a3a53 100644 --- a/enos/ci/service-user-iam/service-quotas.tf +++ b/enos/ci/service-user-iam/service-quotas.tf @@ -1,36 +1,65 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 locals { // This is the code of the service quota to request a change for. Each adjustable limit has a // unique code. See, https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/servicequotas_service_quota#quota_code - subnets_per_vps_quota = "L-F678F1CE" + subnets_per_vpcs_quota = "L-F678F1CE" + standard_spot_instance_requests_quota = "L-34B43A08" } resource "aws_servicequotas_service_quota" "vpcs_per_region_us_east_1" { - provider = aws.us_east_2 - quota_code = local.subnets_per_vps_quota + provider = aws.us_east_1 + quota_code = local.subnets_per_vpcs_quota service_code = "vpc" - value = 50 + value = 100 } resource "aws_servicequotas_service_quota" "vpcs_per_region_us_east_2" { provider = aws.us_east_2 - quota_code = local.subnets_per_vps_quota + quota_code = local.subnets_per_vpcs_quota service_code = "vpc" - value = 50 + value = 100 } resource "aws_servicequotas_service_quota" "vpcs_per_region_us_west_1" { provider = aws.us_west_1 - quota_code = local.subnets_per_vps_quota + quota_code = local.subnets_per_vpcs_quota service_code = "vpc" - value = 50 + value = 100 } resource "aws_servicequotas_service_quota" "vpcs_per_region_us_west_2" { provider = aws.us_west_2 - quota_code = local.subnets_per_vps_quota + quota_code = local.subnets_per_vpcs_quota service_code = "vpc" - value = 50 + value = 100 +} + +resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_east_1" { + provider = aws.us_east_1 + quota_code = local.standard_spot_instance_requests_quota + service_code = "ec2" + value = 640 +} + +resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_east_2" { + provider = aws.us_east_2 + quota_code = local.standard_spot_instance_requests_quota + service_code = "ec2" + value = 640 +} + +resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_west_1" { + provider = aws.us_west_1 + quota_code = local.standard_spot_instance_requests_quota + service_code = "ec2" + value = 640 +} + +resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_west_2" { + provider = aws.us_west_2 + quota_code = local.standard_spot_instance_requests_quota + service_code = "ec2" + value = 640 } diff --git a/enos/ci/service-user-iam/variables.tf b/enos/ci/service-user-iam/variables.tf index a80d83ca98c3..b69c07b81fe9 100644 --- a/enos/ci/service-user-iam/variables.tf +++ b/enos/ci/service-user-iam/variables.tf @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 variable "repository" { description = "The GitHub repository, either vault or vault-enterprise" diff --git a/enos/enos-descriptions.hcl b/enos/enos-descriptions.hcl new file mode 100644 index 000000000000..6ec60150ecc8 --- /dev/null +++ b/enos/enos-descriptions.hcl @@ -0,0 +1,205 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +globals { + description = { + build_vault = <<-EOF + Determine which Vault artifact we want to use for the scenario. Depending on the + 'artifact_source' variant we'll either build Vault from the local branch, fetch a candidate + build from Artifactory, or use a local artifact that was built in CI via CRT. + EOF + + create_backend_cluster = <<-EOF + Create a storage backend cluster if necessary. When configured to use Consul it will + install, configure, and start the Consul cluster on the target hosts and wait for the Consul + cluster to become healthy. When using integrated raft storage this step is a no-op as the + Vault cluster nodes will provide their own integrated storage. + EOF + + create_seal_key = <<-EOF + Create the necessary seal key infrastructure for Vaults auto-unseal functionality. Depending + on the 'seal' variant this step will perform different actions. When using 'shamir' the step + is a no-op as we won't require an external seal mechanism. When using 'pkcs11' this step will + create a SoftHSM slot and associated token which can be distributed to all target nodes. When + using 'awskms' a new AWSKMS key will be created. The necessary security groups and policies + for Vault target nodes to access it the AWSKMS key are handled in the target modules. + EOF + + create_vault_cluster = <<-EOF + Create the the Vault cluster. In this module we'll install, configure, start, initialize and + unseal all the nodes in the Vault. After initialization it also enables various audit engines. + EOF + + create_vault_cluster_backend_targets = <<-EOF + Create the target machines that we'll install Consul onto when using Consul for storage. We + also handle creating AWS instance profiles and security groups that allow for auto-discovery + via the retry_join functionality in Consul. The security group firewall rules will + automatically allow SSH access from the host external IP address of the machine executing + Enos, in addition to all of the required ports for Consul to function and be accessible in the + VPC. + EOF + + create_vault_cluster_targets = <<-EOF + Create the target machines that we'll install Vault onto. We also handle creating AWS instance + profiles and security groups that allow for auto-discovery via the retry_join functionality in + Consul. The security group firewall rules will automatically allow SSH access from the host + external IP address of the machine executing Enos, in addition to all of the required ports + for Vault to function and be accessible in the VPC. + EOF + + create_vpc = <<-EOF + Create an AWS VPC, internet gateway, default security group, and default subnet that allows + egress traffic via the internet gateway. + EOF + + ec2_info = <<-EOF + Query various endpoints in AWS Ec2 to gather metadata we'll use later in our run when creating + infrastructure for the Vault cluster. This metadata includes: + - AMI IDs for different Linux distributions and platform architectures + - Available Ec2 Regions + - Availability Zones for our desired machine instance types + EOF + + enable_multiseal = <<-EOF + Configure the Vault cluster with 'enable_multiseal' and up to three auto-unseal methods + via individual, prioritized 'seal' stanzas. + EOF + + get_local_metadata = <<-EOF + Performs several Vault quality verification that are dynamically modified based on the Vault + binary version, commit SHA, build-date (commit SHA date), and edition metadata. When we're + testing existing artifacts this expected metadata is passed in via Enos variables. When we're + building a local by using the 'artifact_source:local' variant, this step executes and + populates the expected metadata with that of our branch so that we don't have to update the + Enos variables on each commit. + EOF + + get_vault_cluster_ip_addresses = <<-EOF + Map the public and private IP addresses of the Vault cluster nodes and segregate them by + their leader status. This allows us to easily determine the public IP addresses of the leader + and follower nodes. + EOF + + read_backend_license = <<-EOF + When using Consul Enterprise as a storage backend, ensure that a Consul Enterprise license is + present on disk and read its contents so that we can utilize it when configuring the storage + cluster. Must have the 'backend:consul' and 'consul_edition:ent' variants. + EOF + + read_vault_license = <<-EOF + When deploying Vault Enterprise, ensure a Vault Enterprise license is present on disk and + read its contents so that we can utilize it when configuring the Vault Enterprise cluster. + Must have the 'edition' variant to be set to any Enterprise edition. + EOF + + shutdown_nodes = <<-EOF + Shut down the nodes to ensure that they are no longer operating software as part of the + cluster. + EOF + + start_vault_agent = <<-EOF + Create an agent approle in the auth engine, generate a Vault Agent configuration file, and + start the Vault agent. + EOF + + stop_vault = <<-EOF + Stop the Vault cluster by stopping the vault service via systemctl. + EOF + + vault_leader_step_down = <<-EOF + Force the Vault cluster leader to step down which forces the Vault cluster to perform a leader + election. + EOF + + verify_agent_output = <<-EOF + Vault running in Agent mode uses templates to create log output. + EOF + + verify_raft_cluster_all_nodes_are_voters = <<-EOF + When configured with a 'backend:raft' variant, verify that all nodes in the cluster are + healthy and are voters. + EOF + + verify_autopilot_idle_state = <<-EOF + Wait for the Autopilot to upgrade the entire Vault cluster and ensure that the target version + matches the candidate version. Ensure that the cluster reaches an upgrade state of + 'await-server-removal'. + EOF + + verify_replication_status = <<-EOF + Verify that the default replication status is correct depending on the edition of Vault that + been deployed. When testing a Community Edition of Vault we'll ensure that replication is not + enabled. When testing any Enterprise edition of Vault we'll ensure that Performance and + Disaster Recovery replication are available. + EOF + + verify_seal_rewrap_entries_processed_eq_entries_succeeded_post_rewrap = <<-EOF + Verify that the v1/sys/sealwrap/rewrap Vault API returns the rewrap data and + 'entries.processed' equals 'entries.succeeded' after the rewrap has completed. + EOF + + verify_seal_rewrap_entries_processed_is_gt_zero_post_rewrap = <<-EOF + Verify that the /sys/sealwrap/rewrap Vault API returns the rewrap data and the 'entries.processed' has + processed at least one entry after the rewrap has completed. + EOF + + verify_seal_rewrap_is_running_false_post_rewrap = <<-EOF + Verify that the v1/sys/sealwrap/rewrap Vault API returns the rewrap data and 'is_running' is set to + 'false' after a rewrap has completed. + EOF + + verify_seal_rewrap_no_entries_fail_during_rewrap = <<-EOF + Verify that the v1/sys/sealwrap/rewrap Vault API returns the rewrap data and 'entries.failed' is '0' + after the rewrap has completed. + EOF + + verify_seal_type = <<-EOF + Vault's reported seal type matches our configuration. + EOF + + verify_secrets_engines_create = <<-EOF + Verify that Vault is capable mounting, configuring, and using various secrets engines and auth + methods. These currently include: + - v1/auth/userpass/* + - v1/identity/* + - v1/kv/* + - v1/sys/policy/* + EOF + + verify_secrets_engines_read = <<-EOF + Verify that data that we've created previously is still valid, consistent, and duarable. + This includes: + - v1/auth/userpass/* + - v1/identity/* + - v1/kv/* + - v1/sys/policy/* + EOF + + verify_ui = <<-EOF + The Vault UI assets are embedded in the Vault binary and available when running. + EOF + + verify_vault_unsealed = <<-EOF + Verify that the Vault cluster has successfully unsealed. + EOF + + verify_vault_version = <<-EOF + Verify that the Vault CLI has the correct embedded version metadata and that the Vault Cluster + verision history includes our expected version. The CLI metadata that is validated includes + the Vault version, edition, build date, and any special prerelease metadata. + EOF + + wait_for_cluster_to_have_leader = <<-EOF + Wait for a leader election to occur before we proceed with any further quality verification. + EOF + + wait_for_seal_rewrap = <<-EOF + Wait for the Vault cluster seal rewrap process to complete. + EOF + + verify_billing_start_date = <<-EOF + Verify that the billing start date has successfully rolled over to the latest billing year if needed. + EOF + + } +} diff --git a/enos/enos-dev-scenario-pr-replication.hcl b/enos/enos-dev-scenario-pr-replication.hcl new file mode 100644 index 000000000000..a22ab0641102 --- /dev/null +++ b/enos/enos-dev-scenario-pr-replication.hcl @@ -0,0 +1,935 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +scenario "dev_pr_replication" { + description = <<-EOF + This scenario spins up a two Vault clusters with either an external Consul cluster or + integrated Raft for storage. The secondary cluster is configured with performance replication + from the primary cluster. None of our test verification is included in this scenario in order + to improve end-to-end speed. If you wish to perform such verification you'll need to a non-dev + scenario. + + The scenario supports finding and installing any released 'linux/amd64' or 'linux/arm64' Vault + artifact as long as its version is >= 1.8. You can also use the 'artifact:local' variant to + build and deploy the current branch! + + In order to execute this scenario you'll need to install the enos CLI: + - $ brew tap hashicorp/tap && brew update && brew install hashicorp/tap/enos + + You'll also need access to an AWS account via Doormat, follow the guide here: + https://eng-handbook.hashicorp.services/internal-tools/enos/common-setup-steps/#authenticate-with-doormat + + Follow this guide to get an SSH keypair set up in the AWS account: + https://eng-handbook.hashicorp.services/internal-tools/enos/common-setup-steps/#set-your-aws-key-pair-name-and-private-key + + Please note that this scenario requires several inputs variables to be set in order to function + properly. While not all variants will require all variables, it's suggested that you look over + the scenario outline to determine which variables affect which steps and which have inputs that + you should set. You can use the following command to get a textual outline of the entire + scenario: + enos scenario outline dev_pr_replication + + You can also create an HTML version that is suitable for viewing in web browsers: + enos scenario outline dev_pr_replication --format html > index.html + open index.html + + To configure the required variables you have a couple of choices. You can create an + 'enos-local.vars' file in the same 'enos' directory where this scenario is defined. In it you + declare your desired variable values. For example, you could copy the following content and + then set the values as necessary: + + artifactory_username = "username@hashicorp.com" + artifactory_token = " + aws_region = "us-west-2" + aws_ssh_keypair_name = "" + aws_ssh_keypair_key_path = "/path/to/your/private/key.pem" + dev_build_local_ui = false + dev_consul_version = "1.18.1" + vault_license_path = "./support/vault.hclic" + vault_product_version = "1.16.2" + + Alternatively, you can set them in your environment: + export ENOS_VAR_aws_region="us-west-2" + export ENOS_VAR_vault_license_path="./support/vault.hclic" + + After you've configured your inputs you can list and filter the available scenarios and then + subsequently launch and destroy them. + enos scenario list --help + enos scenario launch --help + enos scenario list dev_pr_replication + enos scenario launch dev_pr_replication arch:amd64 artifact:deb distro:ubuntu edition:ent.hsm primary_backend:raft primary_seal:awskms secondary_backend:raft secondary_seal:pkcs11 + + When the scenario is finished launching you refer to the scenario outputs to see information + related to your cluster. You can use this information to SSH into nodes and/or to interact + with vault. + enos scenario output dev_pr_replication arch:amd64 artifact:deb distro:ubuntu edition:ent.hsm primary_backend:raft primary_seal:awskms secondary_backend:raft secondary_seal:pkcs11 + ssh -i /path/to/your/private/key.pem + vault status + + After you've finished you can tear down the cluster + enos scenario destroy dev_pr_replication arch:amd64 artifact:deb distro:ubuntu edition:ent.hsm primary_backend:raft primary_seal:awskms secondary_backend:raft secondary_seal:pkcs11 + EOF + + // The matrix is where we define all the baseline combinations that enos can utilize to customize + // your scenario. By default enos attempts to perform your command on the entire product of these + // possible comginations! Most of the time you'll want to reduce that by passing in a filter. + // Run 'enos scenario list --help' to see more about how filtering scenarios works in enos. + matrix { + arch = ["amd64", "arm64"] + artifact = ["local", "deb", "rpm", "zip"] + distro = ["amzn", "leap", "rhel", "sles", "ubuntu"] + edition = ["ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + primary_backend = ["consul", "raft"] + primary_seal = ["awskms", "pkcs11", "shamir"] + secondary_backend = ["consul", "raft"] + secondary_seal = ["awskms", "pkcs11", "shamir"] + + exclude { + edition = ["ent.hsm", "ent.fips1402", "ent.hsm.fips1402"] + arch = ["arm64"] + } + + exclude { + artifact = ["rpm"] + distro = ["ubuntu"] + } + + exclude { + artifact = ["deb"] + distro = ["rhel"] + } + + exclude { + primary_seal = ["pkcs11"] + edition = ["ce", "ent", "ent.fips1402"] + } + + exclude { + secondary_seal = ["pkcs11"] + edition = ["ce", "ent", "ent.fips1402"] + } + } + + // Specify which Terraform configs and providers to use in this scenario. Most of the time you'll + // never need to change this! If you wanted to test with different terraform or terraform CLI + // settings you can define them and assign them here. + terraform_cli = terraform_cli.default + terraform = terraform.default + + // Here we declare all of the providers that we might need for our scenario. + providers = [ + provider.aws.default, + provider.enos.ec2_user, + provider.enos.ubuntu + ] + + // These are variable values that are local to our scenario. They are evaluated after external + // variables and scenario matrices but before any of our steps. + locals { + // The enos provider uses different ssh transport configs for different distros (as + // specified in enos-providers.hcl), and we need to be able to access both of those here. + enos_provider = { + amzn = provider.enos.ec2_user + leap = provider.enos.ec2_user + rhel = provider.enos.ec2_user + sles = provider.enos.ec2_user + ubuntu = provider.enos.ubuntu + } + // We install vault packages from artifactory. If you wish to use one of these variants you'll + // need to configure your artifactory credentials. + use_artifactory = matrix.artifact == "deb" || matrix.artifact == "rpm" + // The IP version to use for the Vault listener and associated things. + ip_version = 4 + // Zip bundles and local builds don't come with systemd units or any associated configuration. + // When this is true we'll let enos handle this for us. + manage_service = matrix.artifact == "zip" || matrix.artifact == "local" + // If you are using an ent edition, you will need a Vault license. Common convention + // is to store it at ./support/vault.hclic, but you may change this path according + // to your own preference. + vault_install_dir = matrix.artifact == "zip" || matrix.artifact == "local" ? global.vault_install_dir["bundle"] : global.vault_install_dir["package"] + } + + // Begin scenario steps. These are the steps we'll perform to get your cluster up and running. + step "build_or_find_vault_artifact" { + description = <<-EOF + Depending on how we intend to get our Vault artifact, this step either builds vault from our + current branch or finds debian or redhat packages in Artifactory. If we're using a zip bundle + we'll get it from releases.hashicorp.com and skip this step entirely. Please note that if you + wish to use a deb or rpm artifact you'll have to configure your artifactory credentials! + + Variables that are used in this step: + + artifactory_host: + The artifactory host to search. It's very unlikely that you'll want to change this. The + default value is the HashiCorp Artifactory instance. + artifactory_repo + The artifactory host to search. It's very unlikely that you'll want to change this. The + default value is where CRT will publish packages. + artifactory_username + The artifactory username associated with your token. You'll need this if you wish to use + deb or rpm artifacts! You can request access via Okta. + artifactory_token + The artifactory token associated with your username. You'll need this if you wish to use + deb or rpm artifacts! You can create a token by logging into Artifactory via Okta. + vault_product_version: + When using the artifact:rpm or artifact:deb variants we'll use this variable to determine + which version of the Vault pacakge we should fetch from Artifactory. + vault_artifact_path: + When using the artifact:local variant we'll utilize this variable to determine where + to create the vault.zip archive from the local branch. Default: to /tmp/vault.zip. + vault_local_tags: + When using the artifact:local variant we'll use this variable to inject custom build + tags. If left unset we'll automatically use the build tags that correspond to the edition + variant. + EOF + module = matrix.artifact == "local" ? "build_local" : local.use_artifactory ? "build_artifactory_package" : "build_crt" + + variables { + // Used for all modules + arch = matrix.arch + edition = matrix.edition + product_version = var.vault_product_version + // Required for the local build which will always result in using a local zip bundle + artifact_path = matrix.artifact == "local" ? abspath(var.vault_artifact_path) : null + build_ui = var.dev_build_local_ui + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + goarch = matrix.arch + goos = "linux" + // Required when using a RPM or Deb package + // Some of these variables don't have default values so we'll only set them if they are + // required. + artifactory_host = local.use_artifactory ? var.artifactory_host : null + artifactory_repo = local.use_artifactory ? var.artifactory_repo : null + artifactory_username = local.use_artifactory ? var.artifactory_username : null + artifactory_token = local.use_artifactory ? var.artifactory_token : null + distro = matrix.distro + } + } + + step "ec2_info" { + description = "This discovers usefull metadata in Ec2 like AWS AMI ID's that we use in later modules." + module = module.ec2_info + } + + step "create_vpc" { + description = <<-EOF + Create the VPC resources required for our scenario. + + Variables that are used in this step: + tags: + If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable + and they'll be added to resources when possible. + EOF + module = module.create_vpc + depends_on = [step.ec2_info] + + variables { + common_tags = global.tags + } + } + + step "read_backend_license" { + description = <<-EOF + Read the contents of the backend license if we're using a Consul backend for either cluster + and the backend_edition variable is set to "ent". + + Variables that are used in this step: + backend_edition: + The edition of Consul to use. If left unset it will default to CE. + backend_license_path: + If this variable is set we'll use it to determine the local path on disk that contains a + Consul Enterprise license. If it is not set we'll attempt to load it from + ./support/consul.hclic. + EOF + skip_step = (var.backend_edition == "ce" || var.backend_edition == "oss") || (matrix.primary_backend == "raft" && matrix.secondary_backend == "raft") + module = module.read_license + + variables { + file_name = global.backend_license_path + } + } + + step "read_vault_license" { + description = <<-EOF + Validates and reads into memory the contents of a local Vault Enterprise license if we're + using an Enterprise edition. This step does not run when using a community edition of Vault. + + Variables that are used in this step: + vault_license_path: + If this variable is set we'll use it to determine the local path on disk that contains a + Vault Enterprise license. If it is not set we'll attempt to load it from + ./support/vault.hclic. + EOF + module = module.read_license + + variables { + file_name = global.vault_license_path + } + } + + step "create_primary_seal_key" { + description = <<-EOF + Create the necessary seal keys depending on our configured seal. + + Variables that are used in this step: + tags: + If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable + and they'll be added to resources when possible. + EOF + module = "seal_${matrix.primary_seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + cluster_meta = "primary" + common_tags = global.tags + } + } + + step "create_secondary_seal_key" { + description = <<-EOF + Create the necessary seal keys depending on our configured seal. + + Variables that are used in this step: + tags: + If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable + and they'll be added to resources when possible. + EOF + module = "seal_${matrix.secondary_seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + cluster_meta = "secondary" + common_tags = global.tags + other_resources = step.create_primary_seal_key.resource_names + } + } + + step "create_primary_cluster_targets" { + description = <<-EOF + Creates the necessary machine infrastructure targets for the Vault cluster. We also ensure + that the firewall is configured to allow the necessary Vault and Consul traffic and SSH + from the machine executing the Enos scenario. + + Variables that are used in this step: + aws_ssh_keypair_name: + The AWS SSH Keypair name to use for target machines. + project_name: + The project name is used for additional tag metadata on resources. + tags: + If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable + and they'll be added to resources when possible. + vault_instance_count: + How many instances to provision for the Vault cluster. If left unset it will use a default + of three. + EOF + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + instance_count = try(var.vault_instance_count, 3) + seal_key_names = step.create_primary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_primary_cluster_backend_targets" { + description = <<-EOF + Creates the necessary machine infrastructure targets for the backend Consul storage cluster. + We also ensure that the firewall is configured to allow the necessary Consul traffic and SSH + from the machine executing the Enos scenario. When using integrated storage this step is a + no-op that does nothing. + + Variables that are used in this step: + tags: + If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable + and they'll be added to resources when possible. + project_name: + The project name is used for additional tag metadata on resources. + aws_ssh_keypair_name: + The AWS SSH Keypair name to use for target machines. + EOF + module = matrix.primary_backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"][global.distro_version["ubuntu"]] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_primary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_secondary_cluster_targets" { + description = <<-EOF + Creates the necessary machine infrastructure targets for the Vault cluster. We also ensure + that the firewall is configured to allow the necessary Vault and Consul traffic and SSH + from the machine executing the Enos scenario. + EOF + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_secondary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_secondary_cluster_backend_targets" { + description = <<-EOF + Creates the necessary machine infrastructure targets for the backend Consul storage cluster. + We also ensure that the firewall is configured to allow the necessary Consul traffic and SSH + from the machine executing the Enos scenario. When using integrated storage this step is a + no-op that does nothing. + EOF + + module = matrix.secondary_backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"][global.distro_version["ubuntu"]] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_secondary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_primary_backend_cluster" { + description = <<-EOF + Install, configure, and start the backend Consul storage cluster for the primary Vault Cluster. + When we are using the raft storage variant this step is a no-op. + + Variables that are used in this step: + backend_edition: + When configured with the backend:consul variant we'll utilize this variable to determine + the edition of Consul to use for the cluster. Note that if you set it to 'ent' you will + also need a valid license configured for the read_backend_license step. Default: ce. + dev_consul_version: + When configured with the backend:consul variant we'll utilize this variable to determine + the version of Consul to use for the cluster. + EOF + module = "backend_${matrix.primary_backend}" + depends_on = [ + step.create_primary_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_primary_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + hosts = step.create_primary_cluster_backend_targets.hosts + license = matrix.primary_backend == "consul" ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = var.dev_consul_version + } + } + } + + step "create_primary_cluster" { + description = <<-EOF + Install, configure, start, initialize and unseal the primary Vault cluster on the specified + target instances. + + Variables that are used in this step: + backend_edition: + When configured with the backend:consul variant we'll utilize this variable to determine + which version of the consul client to install on each node for Consul storage. Note that + if you set it to 'ent' you will also need a valid license configured for the + read_backend_license step. If left unset we'll use an unlicensed CE version. + dev_config_mode: + You can set this variable to instruct enos on how to primarily configure Vault when starting + the service. Options are 'file' and 'env' for configuration file or environment variables. + If left unset we'll use the default value. + dev_consul_version: + When configured with the backend:consul variant we'll utilize this variable to determine + which version of Consul to install. If left unset we'll utilize the default value. + vault_artifact_path: + When using the artifact:local variant this variable is utilized to specify where on + the local disk the vault.zip file we've built is located. It can be left unset to use + the default value. + vault_enable_audit_devices: + Whether or not to enable various audit devices after unsealing the Vault cluster. By default + we'll configure syslog, socket, and file auditing. + vault_product_version: + When using the artifact:zip variant this variable is utilized to specify the version of + Vault to download from releases.hashicorp.com. + EOF + module = module.vault_cluster + depends_on = [ + step.create_primary_backend_cluster, + step.create_primary_cluster_targets, + step.build_or_find_vault_artifact, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + // We set vault_artifactory_release when we want to get a .deb or .rpm package from Artifactory. + // We set vault_release when we want to get a .zip bundle from releases.hashicorp.com + // We only set one or the other, never both. + artifactory_release = local.use_artifactory ? step.build_or_find_vault_artifact.release : null + backend_cluster_name = step.create_primary_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_primary_cluster_targets.cluster_name + config_mode = var.dev_config_mode + consul_license = matrix.primary_backend == "consul" ? step.read_backend_license.license : null + consul_release = matrix.primary_backend == "consul" ? { + edition = var.backend_edition + version = var.dev_consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + hosts = step.create_primary_cluster_targets.hosts + install_dir = local.vault_install_dir + ip_version = local.ip_version + license = step.read_vault_license.license + local_artifact_path = matrix.artifact == "local" ? abspath(var.vault_artifact_path) : null + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro][global.distro_version[matrix.distro]]) + release = matrix.artifact == "zip" ? { version = var.vault_product_version, edition = matrix.edition } : null + seal_attributes = step.create_primary_seal_key.attributes + seal_type = matrix.primary_seal + storage_backend = matrix.primary_backend + } + } + + step "create_secondary_backend_cluster" { + description = <<-EOF + Install, configure, and start the backend Consul storage cluster for the primary Vault Cluster. + When we are using the raft storage variant this step is a no-op. + + Variables that are used in this step: + backend_edition: + When configured with the backend:consul variant we'll utilize this variable to determine + the edition of Consul to use for the cluster. Note that if you set it to 'ent' you will + also need a valid license configured for the read_backend_license step. Default: ce. + dev_consul_version: + When configured with the backend:consul variant we'll utilize this variable to determine + the version of Consul to use for the cluster. + EOF + module = "backend_${matrix.secondary_backend}" + depends_on = [ + step.create_secondary_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_secondary_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + hosts = step.create_secondary_cluster_backend_targets.hosts + license = matrix.secondary_backend == "consul" ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = var.dev_consul_version + } + } + } + + step "create_secondary_cluster" { + description = <<-EOF + Install, configure, start, initialize and unseal the secondary Vault cluster on the specified + target instances. + + Variables that are used in this step: + backend_edition: + When configured with the backend:consul variant we'll utilize this variable to determine + which version of the consul client to install on each node for Consul storage. Note that + if you set it to 'ent' you will also need a valid license configured for the + read_backend_license step. If left unset we'll use an unlicensed CE version. + dev_config_mode: + You can set this variable to instruct enos on how to primarily configure Vault when starting + the service. Options are 'file' and 'env' for configuration file or environment variables. + If left unset we'll use the default value. + dev_consul_version: + When configured with the backend:consul variant we'll utilize this variable to determine + which version of Consul to install. If left unset we'll utilize the default value. + vault_artifact_path: + When using the artifact:local variant this variable is utilized to specify where on + the local disk the vault.zip file we've built is located. It can be left unset to use + the default value. + vault_enable_audit_devices: + Whether or not to enable various audit devices after unsealing the Vault cluster. By default + we'll configure syslog, socket, and file auditing. + vault_product_version: + When using the artifact:zip variant this variable is utilized to specify the version of + Vault to download from releases.hashicorp.com. + EOF + module = module.vault_cluster + depends_on = [ + step.create_secondary_backend_cluster, + step.create_secondary_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + // We set vault_artifactory_release when we want to get a .deb or .rpm package from Artifactory. + // We set vault_release when we want to get a .zip bundle from releases.hashicorp.com + // We only set one or the other, never both. + artifactory_release = local.use_artifactory ? step.build_or_find_vault_artifact.release : null + backend_cluster_name = step.create_secondary_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_secondary_cluster_targets.cluster_name + config_mode = var.dev_config_mode + consul_license = matrix.secondary_backend == "consul" ? step.read_backend_license.license : null + consul_release = matrix.secondary_backend == "consul" ? { + edition = var.backend_edition + version = var.dev_consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + hosts = step.create_secondary_cluster_targets.hosts + install_dir = local.vault_install_dir + ip_version = local.ip_version + license = step.read_vault_license.license + local_artifact_path = matrix.artifact == "local" ? abspath(var.vault_artifact_path) : null + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro][global.distro_version[matrix.distro]]) + release = matrix.artifact == "zip" ? { version = var.vault_product_version, edition = matrix.edition } : null + seal_attributes = step.create_secondary_seal_key.attributes + seal_type = matrix.secondary_seal + storage_backend = matrix.secondary_backend + } + } + + step "verify_that_vault_primary_cluster_is_unsealed" { + description = <<-EOF + Wait for the for the primary cluster to unseal and reach a healthy state. + EOF + module = module.vault_wait_for_cluster_unsealed + depends_on = [ + step.create_primary_cluster + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_primary_cluster_targets.hosts + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + } + } + + step "verify_that_vault_secondary_cluster_is_unsealed" { + description = <<-EOF + Wait for the for the secondary cluster to unseal and reach a healthy state. + EOF + module = module.vault_wait_for_cluster_unsealed + depends_on = [ + step.create_secondary_cluster + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_secondary_cluster_targets.hosts + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + } + } + + step "get_primary_cluster_ips" { + description = <<-EOF + Determine which node is the primary and which are followers and map their private IP address + to their public IP address. We'll use this information so that we can enable performance + replication on the leader. + EOF + module = module.vault_get_cluster_ips + depends_on = [step.verify_that_vault_primary_cluster_is_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_primary_cluster_targets.hosts + ip_version = local.ip_version + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "get_secondary_cluster_ips" { + description = <<-EOF + Determine which node is the primary and which are followers and map their private IP address + to their public IP address. We'll use this information so that we can enable performance + replication on the leader. + EOF + module = module.vault_get_cluster_ips + depends_on = [step.verify_that_vault_secondary_cluster_is_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_secondary_cluster_targets.hosts + ip_version = local.ip_version + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_secondary_cluster.root_token + } + } + + step "setup_userpass_for_replication_auth" { + description = <<-EOF + Enable the auth userpass method and create a new user. + EOF + module = module.vault_verify_secrets_engines_create + depends_on = [step.get_primary_cluster_ips] + + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_primary_cluster_targets.hosts + leader_host = step.get_primary_cluster_ips.leader_host + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "configure_performance_replication_primary" { + description = <<-EOF + Create a superuser policy write it for our new user. Activate performance replication on + the primary. + EOF + module = module.vault_setup_perf_primary + depends_on = [ + step.get_primary_cluster_ips, + step.get_secondary_cluster_ips, + step.setup_userpass_for_replication_auth, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ip_version = local.ip_version + primary_leader_host = step.get_primary_cluster_ips.leader_host + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "generate_secondary_token" { + description = <<-EOF + Create a random token and write it to sys/replication/performance/primary/secondary-token on + the primary. + EOF + module = module.generate_secondary_token + depends_on = [step.configure_performance_replication_primary] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ip_version = local.ip_version + primary_leader_host = step.get_primary_cluster_ips.leader_host + replication_type = "performance" + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "configure_performance_replication_secondary" { + description = <<-EOF + Enable performance replication on the secondary using the new shared token. + EOF + module = module.vault_setup_replication_secondary + depends_on = [step.generate_secondary_token] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ip_version = local.ip_version + secondary_leader_host = step.get_secondary_cluster_ips.leader_host + replication_type = "performance" + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_secondary_cluster.root_token + wrapping_token = step.generate_secondary_token.secondary_token + } + } + + step "unseal_secondary_followers" { + description = <<-EOF + After replication is enabled we need to unseal the followers on the secondary cluster. + Depending on how we're configured we'll pass the unseal keys according to this guide: + https://developer.hashicorp.com/vault/docs/enterprise/replication#seals + EOF + module = module.vault_unseal_replication_followers + depends_on = [ + step.create_primary_cluster, + step.create_secondary_cluster, + step.get_secondary_cluster_ips, + step.configure_performance_replication_secondary + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.get_secondary_cluster_ips.follower_hosts + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + vault_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : step.create_primary_cluster.recovery_keys_hex + vault_seal_type = matrix.primary_seal == "shamir" ? matrix.primary_seal : matrix.secondary_seal + } + } + + step "verify_secondary_cluster_is_unsealed_after_enabling_replication" { + description = <<-EOF + Verify that the secondary cluster is unsealed after we enable PR replication. + EOF + module = module.vault_wait_for_cluster_unsealed + depends_on = [ + step.unseal_secondary_followers + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_secondary_cluster_targets.hosts + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + } + } + + step "verify_performance_replication" { + description = <<-EOF + Check sys/replication/performance/status and ensure that all nodes are in the correct state + after enabling performance replication. + EOF + module = module.vault_verify_performance_replication + depends_on = [step.verify_secondary_cluster_is_unsealed_after_enabling_replication] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ip_version = local.ip_version + primary_leader_host = step.get_primary_cluster_ips.leader_host + secondary_leader_host = step.get_secondary_cluster_ips.leader_host + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + } + } + + // When using a Consul backend, these output values will be for the Consul backend. + // When using a Raft backend, these output values will be null. + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_primary_cluster.audit_device_file_path + } + + output "primary_cluster_hosts" { + description = "The Vault primary cluster target hosts" + value = step.create_primary_cluster_targets.hosts + } + + output "primary_cluster_root_token" { + description = "The Vault primary cluster root token" + value = step.create_primary_cluster.root_token + } + + output "primary_cluster_unseal_keys_b64" { + description = "The Vault primary cluster unseal keys" + value = step.create_primary_cluster.unseal_keys_b64 + } + + output "primary_cluster_unseal_keys_hex" { + description = "The Vault primary cluster unseal keys hex" + value = step.create_primary_cluster.unseal_keys_hex + } + + output "primary_cluster_recovery_key_shares" { + description = "The Vault primary cluster recovery key shares" + value = step.create_primary_cluster.recovery_key_shares + } + + output "primary_cluster_recovery_keys_b64" { + description = "The Vault primary cluster recovery keys b64" + value = step.create_primary_cluster.recovery_keys_b64 + } + + output "primary_cluster_recovery_keys_hex" { + description = "The Vault primary cluster recovery keys hex" + value = step.create_primary_cluster.recovery_keys_hex + } + + output "secondary_cluster_hosts" { + description = "The Vault secondary cluster public IPs" + value = step.create_secondary_cluster_targets.hosts + } + + output "secondary_cluster_root_token" { + description = "The Vault secondary cluster root token" + value = step.create_secondary_cluster.root_token + } + + output "performance_secondary_token" { + description = "The performance secondary replication token" + value = step.generate_secondary_token.secondary_token + } +} diff --git a/enos/enos-dev-scenario-single-cluster.hcl b/enos/enos-dev-scenario-single-cluster.hcl new file mode 100644 index 000000000000..1f413f05d123 --- /dev/null +++ b/enos/enos-dev-scenario-single-cluster.hcl @@ -0,0 +1,517 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +scenario "dev_single_cluster" { + description = <<-EOF + This scenario spins up a single Vault cluster with either an external Consul cluster or + integrated Raft for storage. None of our test verification is included in this scenario in order + to improve end-to-end speed. If you wish to perform such verification you'll need to use a + non-dev scenario instead. + + The scenario supports finding and installing any released 'linux/amd64' or 'linux/arm64' Vault + artifact as long as its version is >= 1.8. You can also use the 'artifact:local' variant to + build and deploy the current branch! + + In order to execute this scenario you'll need to install the enos CLI: + brew tap hashicorp/tap && brew update && brew install hashicorp/tap/enos + + You'll also need access to an AWS account with an SSH keypair. + Perform the steps here to get AWS access with Doormat https://eng-handbook.hashicorp.services/internal-tools/enos/common-setup-steps/#authenticate-with-doormat + Perform the steps here to get an AWS keypair set up: https://eng-handbook.hashicorp.services/internal-tools/enos/common-setup-steps/#set-your-aws-key-pair-name-and-private-key + + Please note that this scenario requires several inputs variables to be set in order to function + properly. While not all variants will require all variables, it's suggested that you look over + the scenario outline to determine which variables affect which steps and which have inputs that + you should set. You can use the following command to get a textual outline of the entire + scenario: + enos scenario outline dev_single_cluster + + You can also create an HTML version that is suitable for viewing in web browsers: + enos scenario outline dev_single_cluster --format html > index.html + open index.html + + To configure the required variables you have a couple of choices. You can create an + 'enos-local.vars' file in the same 'enos' directory where this scenario is defined. In it you + declare your desired variable values. For example, you could copy the following content and + then set the values as necessary: + + artifactory_username = "username@hashicorp.com" + artifactory_token = " + aws_region = "us-west-2" + aws_ssh_keypair_name = "" + aws_ssh_keypair_key_path = "/path/to/your/private/key.pem" + dev_build_local_ui = false + dev_consul_version = "1.18.1" + vault_license_path = "./support/vault.hclic" + vault_product_version = "1.16.2" + + Alternatively, you can set them in your environment: + export ENOS_VAR_aws_region="us-west-2" + export ENOS_VAR_vault_license_path="./support/vault.hclic" + + After you've configured your inputs you can list and filter the available scenarios and then + subsequently launch and destroy them. + enos scenario list --help + enos scenario launch --help + enos scenario list dev_single_cluster + enos scenario launch dev_single_cluster arch:arm64 artifact:local backend:raft distro:ubuntu edition:ce seal:awskms + + When the scenario is finished launching you refer to the scenario outputs to see information + related to your cluster. You can use this information to SSH into nodes and/or to interact + with vault. + enos scenario output dev_single_cluster arch:arm64 artifact:local backend:raft distro:ubuntu edition:ce seal:awskms + ssh -i /path/to/your/private/key.pem + vault status + + After you've finished you can tear down the cluster + enos scenario destroy dev_single_cluster arch:arm64 artifact:local backend:raft distro:ubuntu edition:ce seal:awskms + EOF + + // The matrix is where we define all the baseline combinations that enos can utilize to customize + // your scenario. By default enos attempts to perform your command on the entire product of these + // possible comginations! Most of the time you'll want to reduce that by passing in a filter. + // Run 'enos scenario list --help' to see more about how filtering scenarios works in enos. + matrix { + arch = ["amd64", "arm64"] + artifact = ["local", "deb", "rpm", "zip"] + backend = ["consul", "raft"] + distro = ["amzn", "leap", "rhel", "sles", "ubuntu"] + edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + seal = ["awskms", "pkcs11", "shamir"] + + exclude { + edition = ["ent.hsm", "ent.fips1402", "ent.hsm.fips1402"] + arch = ["arm64"] + } + + exclude { + artifact = ["rpm"] + distro = ["ubuntu"] + } + + exclude { + artifact = ["deb"] + distro = ["rhel"] + } + + exclude { + seal = ["pkcs11"] + edition = ["ce", "ent", "ent.fips1402"] + } + } + + // Specify which Terraform configs and providers to use in this scenario. Most of the time you'll + // never need to change this! If you wanted to test with different terraform or terraform CLI + // settings you can define them and assign them here. + terraform_cli = terraform_cli.default + terraform = terraform.default + + // Here we declare all of the providers that we might need for our scenario. + // There are two different configurations for the Enos provider, each specifying + // SSH transport configs for different Linux distros. + providers = [ + provider.aws.default, + provider.enos.ec2_user, + provider.enos.ubuntu + ] + + // These are variable values that are local to our scenario. They are evaluated after external + // variables and scenario matrices but before any of our steps. + locals { + // The enos provider uses different ssh transport configs for different distros (as + // specified in enos-providers.hcl), and we need to be able to access both of those here. + enos_provider = { + amzn = provider.enos.ec2_user + leap = provider.enos.ec2_user + rhel = provider.enos.ec2_user + sles = provider.enos.ec2_user + ubuntu = provider.enos.ubuntu + } + // We install vault packages from artifactory. If you wish to use one of these variants you'll + // need to configure your artifactory credentials. + use_artifactory = matrix.artifact == "deb" || matrix.artifact == "rpm" + // The IP version to use for the Vault listener and associated things. + ip_version = 4 + // Zip bundles and local builds don't come with systemd units or any associated configuration. + // When this is true we'll let enos handle this for us. + manage_service = matrix.artifact == "zip" || matrix.artifact == "local" + // If you are using an ent edition, you will need a Vault license. Common convention + // is to store it at ./support/vault.hclic, but you may change this path according + // to your own preference. + vault_install_dir = matrix.artifact == "zip" || matrix.artifact == "local" ? global.vault_install_dir["bundle"] : global.vault_install_dir["package"] + } + + // Begin scenario steps. These are the steps we'll perform to get your cluster up and running. + step "build_or_find_vault_artifact" { + description = <<-EOF + Depending on how we intend to get our Vault artifact, this step either builds vault from our + current branch or finds debian or redhat packages in Artifactory. If we're using a zip bundle + we'll get it from releases.hashicorp.com and skip this step entirely. Please note that if you + wish to use a deb or rpm artifact you'll have to configure your artifactory credentials! + + Variables that are used in this step: + + artifactory_host: + The artifactory host to search. It's very unlikely that you'll want to change this. The + default value is the HashiCorp Artifactory instance. + artifactory_repo + The artifactory host to search. It's very unlikely that you'll want to change this. The + default value is where CRT will publish packages. + artifactory_username + The artifactory username associated with your token. You'll need this if you wish to use + deb or rpm artifacts! You can request access via Okta. + artifactory_token + The artifactory token associated with your username. You'll need this if you wish to use + deb or rpm artifacts! You can create a token by logging into Artifactory via Okta. + vault_product_version: + When using the artifact:rpm or artifact:deb variants we'll use this variable to determine + which version of the Vault pacakge we should fetch from Artifactory. + vault_artifact_path: + When using the artifact:local variant we'll utilize this variable to determine where + to create the vault.zip archive from the local branch. Default: to /tmp/vault.zip. + vault_local_build_tags: + When using the artifact:local variant we'll use this variable to inject custom build + tags. If left unset we'll automatically use the build tags that correspond to the edition + variant. + EOF + module = matrix.artifact == "local" ? "build_local" : local.use_artifactory ? "build_artifactory_package" : "build_crt" + + variables { + // Used for all modules + arch = matrix.arch + edition = matrix.edition + product_version = var.vault_product_version + // Required for the local build which will always result in using a local zip bundle + artifact_path = matrix.artifact == "local" ? abspath(var.vault_artifact_path) : null + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + build_ui = var.dev_build_local_ui + goarch = matrix.arch + goos = "linux" + // Required when using a RPM or Deb package + // Some of these variables don't have default values so we'll only set them if they are + // required. + artifactory_host = local.use_artifactory ? var.artifactory_host : null + artifactory_repo = local.use_artifactory ? var.artifactory_repo : null + artifactory_username = local.use_artifactory ? var.artifactory_username : null + artifactory_token = local.use_artifactory ? var.artifactory_token : null + distro = matrix.distro + } + } + + step "ec2_info" { + description = "This discovers usefull metadata in Ec2 like AWS AMI ID's that we use in later modules." + module = module.ec2_info + } + + step "create_vpc" { + description = <<-EOF + Create the VPC resources required for our scenario. + + Variables that are used in this step: + tags: + If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable + and they'll be added to resources when possible. + EOF + module = module.create_vpc + depends_on = [step.ec2_info] + + variables { + common_tags = global.tags + } + } + + step "read_backend_license" { + description = <<-EOF + Read the contents of the backend license if we're using a Consul backend and the edition is "ent". + + Variables that are used in this step: + backend_edition: + The edition of Consul to use. If left unset it will default to CE. + backend_license_path: + If this variable is set we'll use it to determine the local path on disk that contains a + Consul Enterprise license. If it is not set we'll attempt to load it from + ./support/consul.hclic. + EOF + skip_step = matrix.backend == "raft" || var.backend_edition == "oss" || var.backend_edition == "ce" + module = module.read_license + + variables { + file_name = global.backend_license_path + } + } + + step "read_vault_license" { + description = <<-EOF + Validates and reads into memory the contents of a local Vault Enterprise license if we're + using an Enterprise edition. This step does not run when using a community edition of Vault. + + Variables that are used in this step: + vault_license_path: + If this variable is set we'll use it to determine the local path on disk that contains a + Vault Enterprise license. If it is not set we'll attempt to load it from + ./support/vault.hclic. + EOF + skip_step = matrix.edition == "ce" + module = module.read_license + + variables { + file_name = global.vault_license_path + } + } + + step "create_seal_key" { + description = <<-EOF + Create the necessary seal keys depending on our configured seal. + + Variables that are used in this step: + tags: + If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable + and they'll be added to resources when possible. + EOF + module = "seal_${matrix.seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + common_tags = global.tags + } + } + + step "create_vault_cluster_targets" { + description = <<-EOF + Creates the necessary machine infrastructure targets for the Vault cluster. We also ensure + that the firewall is configured to allow the necessary Vault and Consul traffic and SSH + from the machine executing the Enos scenario. + + Variables that are used in this step: + aws_ssh_keypair_name: + The AWS SSH Keypair name to use for target machines. + project_name: + The project name is used for additional tag metadata on resources. + tags: + If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable + and they'll be added to resources when possible. + vault_instance_count: + How many instances to provision for the Vault cluster. If left unset it will use a default + of three. + EOF + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + instance_count = try(var.vault_instance_count, 3) + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_backend_targets" { + description = <<-EOF + Creates the necessary machine infrastructure targets for the backend Consul storage cluster. + We also ensure that the firewall is configured to allow the necessary Consul traffic and SSH + from the machine executing the Enos scenario. When using integrated storage this step is a + no-op that does nothing. + + Variables that are used in this step: + tags: + If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable + and they'll be added to resources when possible. + project_name: + The project name is used for additional tag metadata on resources. + aws_ssh_keypair_name: + The AWS SSH Keypair name to use for target machines. + EOF + + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"][global.distro_version["ubuntu"]] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_backend_cluster" { + description = <<-EOF + Install, configure, and start the backend Consul storage cluster. When we are using the raft + storage variant this step is a no-op. + + Variables that are used in this step: + backend_edition: + When configured with the backend:consul variant we'll utilize this variable to determine + the edition of Consul to use for the cluster. Note that if you set it to 'ent' you will + also need a valid license configured for the read_backend_license step. Default: ce. + dev_consul_version: + When configured with the backend:consul variant we'll utilize this variable to determine + the version of Consul to use for the cluster. + EOF + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + hosts = step.create_vault_cluster_backend_targets.hosts + license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = var.dev_consul_version + } + } + } + + step "create_vault_cluster" { + description = <<-EOF + Install, configure, start, initialize and unseal the Vault cluster on the specified target + instances. + + Variables that are used in this step: + backend_edition: + When configured with the backend:consul variant we'll utilize this variable to determine + which version of the consul client to install on each node for Consul storage. Note that + if you set it to 'ent' you will also need a valid license configured for the + read_backend_license step. If left unset we'll use an unlicensed CE version. + dev_config_mode: + You can set this variable to instruct enos on how to primarily configure Vault when starting + the service. Options are 'file' and 'env' for configuration file or environment variables. + If left unset we'll use the default value. + dev_consul_version: + When configured with the backend:consul variant we'll utilize this variable to determine + which version of Consul to install. If left unset we'll utilize the default value. + vault_artifact_path: + When using the artifact:local variant this variable is utilized to specify where on + the local disk the vault.zip file we've built is located. It can be left unset to use + the default value. + vault_enable_audit_devices: + Whether or not to enable various audit devices after unsealing the Vault cluster. By default + we'll configure syslog, socket, and file auditing. + vault_product_version: + When using the artifact:zip variant this variable is utilized to specify the version of + Vault to download from releases.hashicorp.com. + EOF + module = module.vault_cluster + depends_on = [ + step.create_backend_cluster, + step.create_vault_cluster_targets, + step.build_or_find_vault_artifact, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + // We set vault_artifactory_release when we want to get a .deb or .rpm package from Artifactory. + // We set vault_release when we want to get a .zip bundle from releases.hashicorp.com + // We only set one or the other, never both. + artifactory_release = local.use_artifactory ? step.build_or_find_vault_artifact.release : null + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = var.dev_config_mode + consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.backend == "consul" ? { + edition = var.backend_edition + version = var.dev_consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + hosts = step.create_vault_cluster_targets.hosts + install_dir = local.vault_install_dir + ip_version = local.ip_version + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = matrix.artifact == "local" ? abspath(var.vault_artifact_path) : null + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro][global.distro_version[matrix.distro]]) + release = matrix.artifact == "zip" ? { version = var.vault_product_version, edition = matrix.edition } : null + seal_attributes = step.create_seal_key.attributes + seal_type = matrix.seal + storage_backend = matrix.backend + } + } + + // When using a Consul backend, these output values will be for the Consul backend. + // When using a Raft backend, these output values will be null. + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.hosts + } + + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "seal_key_attributes" { + description = "The Vault cluster seal attributes" + value = step.create_seal_key.attributes + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } +} diff --git a/enos/enos-dev-variables.hcl b/enos/enos-dev-variables.hcl new file mode 100644 index 000000000000..ed7ab2407690 --- /dev/null +++ b/enos/enos-dev-variables.hcl @@ -0,0 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +variable "dev_build_local_ui" { + type = bool + description = "Whether or not to build the web UI when using the local builder var. If the assets have already been built we'll still include them" + default = false +} + +variable "dev_config_mode" { + type = string + description = "The method to use when configuring Vault. When set to 'env' we will configure Vault using VAULT_ style environment variables if possible. When 'file' we'll use the HCL configuration file for all configuration options." + default = "file" // or "env" +} + +variable "dev_consul_version" { + type = string + description = "The version of Consul to use when using Consul for storage!" + default = "1.18.1" + // NOTE: You can also set the "backend_edition" if you want to use Consul Enterprise +} diff --git a/enos/enos-globals.hcl b/enos/enos-globals.hcl new file mode 100644 index 000000000000..a17bc357f7e7 --- /dev/null +++ b/enos/enos-globals.hcl @@ -0,0 +1,181 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +globals { + archs = ["amd64", "arm64"] + artifact_sources = ["local", "crt", "artifactory"] + artifact_types = ["bundle", "package"] + backends = ["consul", "raft"] + backend_license_path = abspath(var.backend_license_path != null ? var.backend_license_path : joinpath(path.root, "./support/consul.hclic")) + backend_tag_key = "VaultStorage" + build_tags = { + "ce" = ["ui"] + "ent" = ["ui", "enterprise", "ent"] + "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] + "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] + "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] + } + config_modes = ["env", "file"] + consul_editions = ["ce", "ent"] + consul_versions = ["1.14.11", "1.15.7", "1.16.3", "1.17.0"] + distros = ["amzn", "leap", "rhel", "sles", "ubuntu"] + // Different distros may require different packages, or use different aliases for the same package + distro_packages = { + amzn = { + "2" = ["nc"] + "2023" = ["nc"] + } + leap = { + "15.6" = ["netcat", "openssl"] + } + rhel = { + "8.10" = ["nc"] + "9.4" = ["nc"] + } + sles = { + // When installing Vault RPM packages on a SLES AMI, the openssl package provided + // isn't named "openssl, which rpm doesn't know how to handle. Therefore we add the + // "correctly" named one in our package installation before installing Vault. + "15.6" = ["netcat-openbsd", "openssl"] + } + ubuntu = { + "20.04" = ["netcat"] + "22.04" = ["netcat"] + "24.04" = ["netcat-openbsd"] + } + } + distro_version = { + amzn = var.distro_version_amzn + leap = var.distro_version_leap + rhel = var.distro_version_rhel + sles = var.distro_version_sles + ubuntu = var.distro_version_ubuntu + } + editions = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + enterprise_editions = [for e in global.editions : e if e != "ce"] + ip_versions = ["4", "6"] + package_manager = { + "amzn" = "yum" + "leap" = "zypper" + "rhel" = "yum" + "sles" = "zypper" + "ubuntu" = "apt" + } + packages = ["jq"] + // Ports that we'll open up for ingress in the security group for all target machines. + // Port protocol maps to the IpProtocol schema: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IpPermission.html + ports = { + ssh : { + description = "SSH" + port = 22 + protocol = "tcp" + }, + vault_agent : { + description = "Vault Agent" + port = 8100 + protocol = "tcp" + }, + vault_proxy : { + description = "Vault Proxy" + port = 8101 + protocol = "tcp" + }, + vault_listener : { + description = "Vault Addr listener" + port = 8200 + protocol = "tcp" + }, + vault_cluster : { + description = "Vault Cluster listener" + port = 8201 + protocol = "tcp" + }, + consul_rpc : { + description = "Consul internal communication" + port = 8300 + protocol = "tcp" + }, + consul_serf_lan_tcp : { + description = "Consul Serf LAN TCP" + port = 8301 + protocol = "tcp" + }, + consul_serf_lan_udp : { + description = "Consul Serf LAN UDP" + port = 8301 + protocol = "udp" + }, + consul_serf_wan_tcp : { + description = "Consul Serf WAN TCP" + port = 8302 + protocol = "tcp" + }, + consul_serf_wan_udp : { + description = "Consul Serf WAN UDP" + port = 8302 + protocol = "udp" + }, + consul_http : { + description = "Consul HTTP API" + port = 8500 + protocol = "tcp" + }, + consul_https : { + description = "Consul HTTPS API" + port = 8501 + protocol = "tcp" + }, + consul_grpc : { + description = "Consul gRPC API" + port = 8502 + protocol = "tcp" + }, + consul_grpc_tls : { + description = "Consul gRPC TLS API" + port = 8503 + protocol = "tcp" + }, + consul_dns_tcp : { + description = "Consul TCP DNS Server" + port = 8600 + protocol = "tcp" + }, + consul_dns_udp : { + description = "Consul UDP DNS Server" + port = 8600 + protocol = "udp" + }, + } + sample_attributes = { + aws_region = ["us-east-1", "us-west-2"] + distro_version_amzn = ["2023"] + distro_version_leap = ["15.6"] + distro_version_rhel = ["8.10", "9.4"] + distro_version_sles = ["15.6"] + distro_version_ubuntu = ["20.04", "24.04"] + } + seals = ["awskms", "pkcs11", "shamir"] + tags = merge({ + "Project Name" : var.project_name + "Project" : "Enos", + "Environment" : "ci" + }, var.tags) + // This reads the VERSION file, strips any pre-release metadata, and selects only initial + // versions that are less than our current version. E.g. A VERSION file containing 1.17.0-beta2 + // would render: semverconstraint(v, "<1.17.0-0") + upgrade_version_stripped = join("-", [split("-", chomp(file("../version/VERSION")))[0], "0"]) + // NOTE: when backporting, make sure that our initial versions are less than that + // release branch's version. Also beware if adding versions below 1.11.x. Some scenarios + // that use this global might not work as expected with earlier versions. Below 1.8.x is + // not supported in any way. + upgrade_all_initial_versions_ce = ["1.8.12", "1.9.10", "1.10.11", "1.11.12", "1.12.11", "1.13.13", "1.14.10", "1.15.6", "1.16.3", "1.17.0"] + upgrade_all_initial_versions_ent = ["1.8.12", "1.9.10", "1.10.11", "1.11.12", "1.12.11", "1.13.13", "1.14.13", "1.15.10", "1.16.4", "1.17.0"] + upgrade_initial_versions_ce = [for v in global.upgrade_all_initial_versions_ce : v if semverconstraint(v, "<${global.upgrade_version_stripped}")] + upgrade_initial_versions_ent = [for v in global.upgrade_all_initial_versions_ent : v if semverconstraint(v, "<${global.upgrade_version_stripped}")] + vault_install_dir = { + bundle = "/opt/vault/bin" + package = "/usr/bin" + } + vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) + vault_tag_key = "vault-cluster" +} diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl index bfb69a2bdac0..9a11042d5f7b 100644 --- a/enos/enos-modules.hcl +++ b/enos/enos-modules.hcl @@ -1,65 +1,118 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 module "autopilot_upgrade_storageconfig" { source = "./modules/autopilot_upgrade_storageconfig" } -module "az_finder" { - source = "./modules/az_finder" -} - module "backend_consul" { - source = "app.terraform.io/hashicorp-qti/aws-consul/enos" - - project_name = var.project_name - environment = "ci" - common_tags = var.tags - ssh_aws_keypair = var.aws_ssh_keypair_name + source = "./modules/backend_consul" - # Set this to a real license vault if using an Enterprise edition of Consul - consul_license = var.backend_license_path == null ? "none" : file(abspath(var.backend_license_path)) + license = var.backend_license_path == null ? null : file(abspath(var.backend_license_path)) + log_level = var.backend_log_level } module "backend_raft" { source = "./modules/backend_raft" } +// Find any artifact in Artifactory. Requires the version, revision, and edition. +module "build_artifactory" { + source = "./modules/build_artifactory_artifact" +} + +// Find any released RPM or Deb in Artifactory. Requires the version, edition, distro, and distro +// version. +module "build_artifactory_package" { + source = "./modules/build_artifactory_package" +} + +// A shim "build module" suitable for use when using locally pre-built artifacts or a zip bundle +// from releases.hashicorp.com. When using a local pre-built artifact it requires the local +// artifact path. When using a release zip it does nothing as you'll need to configure the +// vault_cluster module with release info instead. module "build_crt" { source = "./modules/build_crt" } +// Build the local branch and package it into a zip artifact. Requires the goarch, goos, build tags, +// and bundle path. module "build_local" { source = "./modules/build_local" } -module "build_artifactory" { - source = "./modules/vault_artifactory_artifact" -} - module "create_vpc" { - source = "app.terraform.io/hashicorp-qti/aws-infra/enos" + source = "./modules/create_vpc" + + environment = "ci" + common_tags = var.tags +} - project_name = var.project_name - environment = "ci" - common_tags = var.tags - ami_architectures = ["amd64", "arm64"] +module "ec2_info" { + source = "./modules/ec2_info" } module "get_local_metadata" { source = "./modules/get_local_metadata" } +module "generate_dr_operation_token" { + source = "./modules/generate_dr_operation_token" + + vault_install_dir = var.vault_install_dir +} + +module "generate_failover_secondary_token" { + source = "./modules/generate_failover_secondary_token" + + vault_install_dir = var.vault_install_dir +} + +module "generate_secondary_public_key" { + source = "./modules/generate_secondary_public_key" + + vault_install_dir = var.vault_install_dir +} + module "generate_secondary_token" { source = "./modules/generate_secondary_token" vault_install_dir = var.vault_install_dir } +module "install_packages" { + source = "./modules/install_packages" +} + module "read_license" { source = "./modules/read_license" } +module "replication_data" { + source = "./modules/replication_data" +} + +module "seal_awskms" { + source = "./modules/seal_awskms" + + cluster_ssh_keypair = var.aws_ssh_keypair_name + common_tags = var.tags +} + +module "seal_shamir" { + source = "./modules/seal_shamir" + + cluster_ssh_keypair = var.aws_ssh_keypair_name + common_tags = var.tags +} + +module "seal_pkcs11" { + source = "./modules/seal_pkcs11" + + cluster_ssh_keypair = var.aws_ssh_keypair_name + common_tags = var.tags +} + module "shutdown_node" { source = "./modules/shutdown_node" } @@ -68,30 +121,80 @@ module "shutdown_multiple_nodes" { source = "./modules/shutdown_multiple_nodes" } +module "start_vault" { + source = "./modules/start_vault" + + install_dir = var.vault_install_dir + log_level = var.vault_log_level +} + +module "stop_vault" { + source = "./modules/stop_vault" +} + +// create target instances using ec2:CreateFleet +module "target_ec2_fleet" { + source = "./modules/target_ec2_fleet" + + common_tags = var.tags + project_name = var.project_name + ssh_keypair = var.aws_ssh_keypair_name +} + +// create target instances using ec2:RunInstances +module "target_ec2_instances" { + source = "./modules/target_ec2_instances" + + common_tags = var.tags + ports_ingress = values(global.ports) + project_name = var.project_name + ssh_keypair = var.aws_ssh_keypair_name +} + +// don't create instances but satisfy the module interface +module "target_ec2_shim" { + source = "./modules/target_ec2_shim" + + common_tags = var.tags + ports_ingress = values(global.ports) + project_name = var.project_name + ssh_keypair = var.aws_ssh_keypair_name +} + +// create target instances using ec2:RequestSpotFleet +module "target_ec2_spot_fleet" { + source = "./modules/target_ec2_spot_fleet" + + common_tags = var.tags + project_name = var.project_name + ssh_keypair = var.aws_ssh_keypair_name +} + module "vault_agent" { source = "./modules/vault_agent" - vault_install_dir = var.vault_install_dir - vault_instance_count = var.vault_instance_count + vault_install_dir = var.vault_install_dir + vault_agent_port = global.ports["vault_agent"]["port"] } +module "vault_proxy" { + source = "./modules/vault_proxy" + + vault_install_dir = var.vault_install_dir + vault_proxy_port = global.ports["vault_proxy"]["port"] +} module "vault_verify_agent_output" { source = "./modules/vault_verify_agent_output" - - vault_instance_count = var.vault_instance_count } module "vault_cluster" { - source = "app.terraform.io/hashicorp-qti/aws-vault/enos" - # source = "../../terraform-enos-aws-vault" + source = "./modules/vault_cluster" - common_tags = var.tags - environment = "ci" - instance_count = var.vault_instance_count - project_name = var.project_name - ssh_aws_keypair = var.aws_ssh_keypair_name - vault_install_dir = var.vault_install_dir + install_dir = var.vault_install_dir + consul_license = var.backend_license_path == null ? null : file(abspath(var.backend_license_path)) + cluster_tag_key = global.vault_tag_key + log_level = var.vault_log_level } module "vault_get_cluster_ips" { @@ -100,18 +203,69 @@ module "vault_get_cluster_ips" { vault_install_dir = var.vault_install_dir } -module "vault_unseal_nodes" { - source = "./modules/vault_unseal_nodes" +module "vault_failover_demote_dr_primary" { + source = "./modules/vault_failover_demote_dr_primary" + + vault_install_dir = var.vault_install_dir +} + +module "vault_failover_promote_dr_secondary" { + source = "./modules/vault_failover_promote_dr_secondary" - vault_install_dir = var.vault_install_dir - vault_instance_count = var.vault_instance_count + vault_install_dir = var.vault_install_dir +} + +module "vault_failover_update_dr_primary" { + source = "./modules/vault_failover_update_dr_primary" + + vault_install_dir = var.vault_install_dir +} + +module "vault_raft_remove_peer" { + source = "./modules/vault_raft_remove_peer" + vault_install_dir = var.vault_install_dir +} + +module "vault_setup_dr_primary" { + source = "./modules/vault_setup_dr_primary" + + vault_install_dir = var.vault_install_dir +} + +module "vault_setup_perf_primary" { + source = "./modules/vault_setup_perf_primary" + + vault_install_dir = var.vault_install_dir +} + +module "vault_setup_replication_secondary" { + source = "./modules/vault_setup_replication_secondary" + + vault_install_dir = var.vault_install_dir +} + +module "vault_step_down" { + source = "./modules/vault_step_down" + + vault_install_dir = var.vault_install_dir +} + +module "vault_test_ui" { + source = "./modules/vault_test_ui" + + ui_run_tests = var.ui_run_tests +} + +module "vault_unseal_replication_followers" { + source = "./modules/vault_unseal_replication_followers" + + vault_install_dir = var.vault_install_dir } module "vault_upgrade" { source = "./modules/vault_upgrade" - vault_install_dir = var.vault_install_dir - vault_instance_count = var.vault_instance_count + vault_install_dir = var.vault_install_dir } module "vault_verify_autopilot" { @@ -119,65 +273,61 @@ module "vault_verify_autopilot" { vault_autopilot_upgrade_status = "await-server-removal" vault_install_dir = var.vault_install_dir - vault_instance_count = var.vault_instance_count } -module "vault_verify_raft_auto_join_voter" { - source = "./modules/vault_verify_raft_auto_join_voter" +module "vault_verify_dr_replication" { + source = "./modules/vault_verify_dr_replication" - vault_install_dir = var.vault_install_dir - vault_instance_count = var.vault_instance_count + vault_install_dir = var.vault_install_dir } -module "vault_verify_undo_logs" { - source = "./modules/vault_verify_undo_logs" +module "vault_verify_secrets_engines_create" { + source = "./modules/verify_secrets_engines/modules/create" - vault_install_dir = var.vault_install_dir - vault_instance_count = var.vault_instance_count + vault_install_dir = var.vault_install_dir } -module "vault_verify_replication" { - source = "./modules/vault_verify_replication" +module "vault_verify_secrets_engines_read" { + source = "./modules/verify_secrets_engines/modules/read" - vault_install_dir = var.vault_install_dir - vault_instance_count = var.vault_instance_count + vault_install_dir = var.vault_install_dir } -module "vault_verify_ui" { - source = "./modules/vault_verify_ui" +module "vault_verify_default_lcq" { + source = "./modules/vault_verify_default_lcq" - vault_install_dir = var.vault_install_dir - vault_instance_count = var.vault_instance_count + vault_autopilot_default_max_leases = "300000" } -module "vault_verify_unsealed" { - source = "./modules/vault_verify_unsealed" +module "vault_verify_performance_replication" { + source = "./modules/vault_verify_performance_replication" - vault_install_dir = var.vault_install_dir - vault_instance_count = var.vault_instance_count + vault_install_dir = var.vault_install_dir } -module "vault_setup_perf_primary" { - source = "./modules/vault_setup_perf_primary" +module "vault_verify_raft_auto_join_voter" { + source = "./modules/vault_verify_raft_auto_join_voter" - vault_install_dir = var.vault_install_dir + vault_install_dir = var.vault_install_dir + vault_cluster_addr_port = global.ports["vault_cluster"]["port"] } -module "vault_setup_perf_secondary" { - source = "./modules/vault_setup_perf_secondary" +module "vault_verify_replication" { + source = "./modules/vault_verify_replication" +} - vault_install_dir = var.vault_install_dir +module "vault_verify_ui" { + source = "./modules/vault_verify_ui" } -module "vault_verify_read_data" { - source = "./modules/vault_verify_read_data" +module "vault_verify_undo_logs" { + source = "./modules/vault_verify_undo_logs" - vault_install_dir = var.vault_install_dir - vault_instance_count = var.vault_instance_count + vault_install_dir = var.vault_install_dir } -module "vault_verify_performance_replication" { - source = "./modules/vault_verify_performance_replication" +module "vault_wait_for_cluster_unsealed" { + source = "./modules/vault_wait_for_cluster_unsealed" vault_install_dir = var.vault_install_dir } @@ -185,24 +335,32 @@ module "vault_verify_performance_replication" { module "vault_verify_version" { source = "./modules/vault_verify_version" - vault_install_dir = var.vault_install_dir - vault_instance_count = var.vault_instance_count + vault_install_dir = var.vault_install_dir } -module "vault_verify_write_data" { - source = "./modules/vault_verify_write_data" +module "vault_wait_for_leader" { + source = "./modules/vault_wait_for_leader" - vault_install_dir = var.vault_install_dir - vault_instance_count = var.vault_instance_count + vault_install_dir = var.vault_install_dir } -module "vault_raft_remove_peer" { - source = "./modules/vault_raft_remove_peer" +module "vault_wait_for_seal_rewrap" { + source = "./modules/vault_wait_for_seal_rewrap" + vault_install_dir = var.vault_install_dir } -module "vault_test_ui" { - source = "./modules/vault_test_ui" +module "verify_seal_type" { + source = "./modules/verify_seal_type" - ui_run_tests = var.ui_run_tests + vault_install_dir = var.vault_install_dir } + +module "vault_verify_billing_start_date" { + source = "./modules/vault_verify_billing_start_date" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count + vault_cluster_addr_port = global.ports["vault_cluster"]["port"] +} + diff --git a/enos/enos-providers.hcl b/enos/enos-providers.hcl index f277c57e29f0..89c79bd1005d 100644 --- a/enos/enos-providers.hcl +++ b/enos/enos-providers.hcl @@ -1,11 +1,12 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 provider "aws" "default" { region = var.aws_region } -provider "enos" "rhel" { +// This default SSH user is used in RHEL, Amazon Linux, SUSE, and Leap distros +provider "enos" "ec2_user" { transport = { ssh = { user = "ec2-user" @@ -14,6 +15,7 @@ provider "enos" "rhel" { } } +// This default SSH user is used in the Ubuntu distro provider "enos" "ubuntu" { transport = { ssh = { diff --git a/enos/enos-qualities.hcl b/enos/enos-qualities.hcl new file mode 100644 index 000000000000..698ef6a57bc0 --- /dev/null +++ b/enos/enos-qualities.hcl @@ -0,0 +1,629 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +quality "consul_api_agent_host_read" { + description = "The /v1/agent/host Consul API returns host info for each node in the cluster" +} + +quality "consul_api_health_node_read" { + description = <<-EOF + The /v1/health/node/ Consul API returns health info for each node in the cluster + EOF +} + +quality "consul_api_operator_raft_config_read" { + description = "The /v1/operator/raft/configuration Consul API returns raft info for the cluster" +} + +quality "consul_autojoin_aws" { + description = "The Consul cluster auto-joins with AWS tag discovery" +} + +quality "consul_cli_validate" { + description = "The 'consul validate' command validates the Consul configuration" +} + +quality "consul_config_file" { + description = "Consul starts when configured with a configuration file" +} + +quality "consul_ha_leader_election" { + description = "The Consul cluster elects a leader node on start up" +} + +quality "consul_health_state_passing_read_nodes_minimum" { + description = <<-EOF + The Consul cluster meets the minimum of number of healthy nodes according to the + /v1/health/state/passing Consul API + EOF +} + +quality "consul_operator_raft_configuration_read_voters_minimum" { + description = <<-EOF + The Consul cluster meets the minimum number of raft voters according to the + /v1/operator/raft/configuration Consul API + EOF +} + +quality "consul_service_start_client" { + description = "The Consul service starts in client mode" +} + +quality "consul_service_start_server" { + description = "The Consul service starts in server mode" +} + +quality "consul_service_systemd_notified" { + description = "The Consul binary notifies systemd when the service is active" +} + +quality "consul_service_systemd_unit" { + description = "The 'consul.service' systemd unit starts the service" +} + +quality "vault_agent_auto_auth_approle" { + description = <<-EOF + Vault running in Agent mode utilizes the approle auth method to do auto-auth via a role and + read secrets from a file source + EOF +} + +quality "vault_agent_log_template" { + description = global.description.verify_agent_output +} + +quality "vault_api_auth_userpass_login_write" { + description = "The v1/auth/userpass/login/ Vault API creates a token for a user" +} + +quality "vault_api_auth_userpass_user_write" { + description = "The v1/auth/userpass/users/ Vault API associates a policy with a user" +} + +quality "vault_api_identity_entity_read" { + description = <<-EOF + The v1/identity/entity Vault API returns an identity entity, has the correct metadata, and is + associated with the expected entity-alias, groups, and policies + EOF +} + +quality "vault_api_identity_entity_write" { + description = "The v1/identity/entity Vault API creates an identity entity" +} + +quality "vault_api_identity_entity_alias_write" { + description = "The v1/identity/entity-alias Vault API creates an identity entity alias" +} + +quality "vault_api_identity_group_write" { + description = "The v1/identity/group/ Vault API creates an identity group" +} + +quality "vault_api_identity_oidc_config_read" { + description = <<-EOF + The v1/identity/oidc/config Vault API returns the built-in identity secrets engine configuration + EOF +} + +quality "vault_api_identity_oidc_config_write" { + description = "The v1/identity/oidc/config Vault API configures the built-in identity secrets engine" +} + +quality "vault_api_identity_oidc_introspect_write" { + description = "The v1/identity/oidc/introspect Vault API creates introspect verifies the active state of a signed OIDC token" +} + +quality "vault_api_identity_oidc_key_read" { + description = <<-EOF + The v1/identity/oidc/key Vault API returns the OIDC signing key and verifies the key's algorithm, + rotation_period, and verification_ttl are correct + EOF +} + +quality "vault_api_identity_oidc_key_write" { + description = "The v1/identity/oidc/key Vault API creates an OIDC signing key" +} + +quality "vault_api_identity_oidc_key_rotate_write" { + description = "The v1/identity/oidc/key//rotate Vault API rotates an OIDC signing key and applies a new verification TTL" +} + +quality "vault_api_identity_oidc_role_read" { + description = <<-EOF + The v1/identity/oidc/role Vault API returns the OIDC role and verifies that the roles key and + ttl are corect. + EOF +} + +quality "vault_api_identity_oidc_role_write" { + description = "The v1/identity/oidc/role Vault API creates an OIDC role associated with a key and clients" +} + +quality "vault_api_identity_oidc_token_read" { + description = "The v1/identity/oidc/token Vault API creates an OIDC token associated with a role" +} + +quality "vault_api_sys_auth_userpass_user_write" { + description = "The v1/sys/auth/userpass/users/ Vault API associates a superuser policy with a user" +} + +quality "vault_api_sys_config_read" { + description = <<-EOF + The v1/sys/config/sanitized Vault API returns sanitized configuration which matches our given + configuration + EOF +} + +quality "vault_api_sys_ha_status_read" { + description = "The v1/sys/ha-status Vault API returns the HA status of the cluster" +} + +quality "vault_api_sys_health_read" { + description = <<-EOF + The v1/sys/health Vault API returns the correct codes depending on the replication and + 'seal-status' of the cluster + EOF +} + +quality "vault_api_sys_host_info_read" { + description = "The v1/sys/host-info Vault API returns the host info for each node in the cluster" +} + +quality "vault_api_sys_leader_read" { + description = "The v1/sys/leader Vault API returns the cluster leader info" +} + +quality "vault_api_sys_metrics_vault_core_replication_write_undo_logs_enabled" { + description = <<-EOF + The v1/sys/metrics Vault API returns metrics and verifies that + 'Gauges[vault.core.replication.write_undo_logs]' is enabled + EOF +} + +quality "vault_api_sys_policy_write" { + description = "The v1/sys/policy Vault API writes a policy" +} + +quality "vault_api_sys_quotas_lease_count_read_max_leases_default" { + description = <<-EOF + The v1/sys/quotas/lease-count/default Vault API returns the lease 'count' and 'max_leases' is + set to 300,000 + EOF +} + +quality "vault_api_sys_replication_dr_primary_enable_write" { + description = <<-EOF + The v1/sys/replication/dr/primary/enable Vault API enables DR replication + EOF +} + +quality "vault_api_sys_replication_dr_primary_secondary_token_write" { + description = <<-EOF + The v1/sys/replication/dr/primary/secondary-token Vault API configures the DR replication + secondary token + EOF +} + +quality "vault_api_sys_replication_dr_secondary_enable_write" { + description = <<-EOF + The v1/sys/replication/dr/secondary/enable Vault API enables DR replication + EOF +} + +quality "vault_api_sys_replication_dr_read_connection_status_connected" { + description = <<-EOF + The v1/sys/replication/dr/status Vault API returns status info and the + 'connection_status' is correct for the given node + EOF +} + +quality "vault_api_sys_replication_dr_status_known_primary_cluster_addrs" { + description = <<-EOF + The v1/sys/replication/dr/status Vault API returns the DR replication status and + 'known_primary_cluster_address' is the expected primary cluster leader + EOF +} + +quality "vault_api_sys_replication_dr_status_read" { + description = <<-EOF + The v1/sys/replication/dr/status Vault API returns the DR replication status + EOF +} + +quality "vault_api_sys_replication_dr_status_read_cluster_address" { + description = <<-EOF + The v1/sys/replication/dr/status Vault API returns the DR replication status + and the '{primaries,secondaries}[*].cluster_address' is correct for the given node + EOF +} + +quality "vault_api_sys_replication_dr_status_read_state_not_idle" { + description = <<-EOF + The v1/sys/replication/dr/status Vault API returns the DR replication status + and the state is not idle + EOF +} + +quality "vault_api_sys_replication_performance_primary_enable_write" { + description = <<-EOF + The v1/sys/replication/performance/primary/enable Vault API enables performance replication + EOF +} + +quality "vault_api_sys_replication_performance_primary_secondary_token_write" { + description = <<-EOF + The v1/sys/replication/performance/primary/secondary-token Vault API configures the replication + token + EOF +} + +quality "vault_api_sys_replication_performance_secondary_enable_write" { + description = <<-EOF + The v1/sys/replication/performance/secondary/enable Vault API enables performance replication + EOF +} + +quality "vault_api_sys_replication_performance_read_connection_status_connected" { + description = <<-EOF + The v1/sys/replication/performance/status Vault API returns status info and the + 'connection_status' is correct for the given node + EOF +} + +quality "vault_api_sys_replication_performance_status_known_primary_cluster_addrs" { + description = <<-EOF + The v1/sys/replication/performance/status Vault API returns the replication status and + 'known_primary_cluster_address' is the expected primary cluster leader + EOF +} + +quality "vault_api_sys_replication_performance_status_read" { + description = <<-EOF + The v1/sys/replication/performance/status Vault API returns the performance replication status + EOF +} + +quality "vault_api_sys_replication_performance_status_read_cluster_address" { + description = <<-EOF + The v1/sys/replication/performance/status Vault API returns the performance replication status + and the '{primaries,secondaries}[*].cluster_address' is correct for the given node + EOF +} + +quality "vault_api_sys_replication_performance_status_read_state_not_idle" { + description = <<-EOF + The v1/sys/replication/performance/status Vault API returns the performance replication status + and the state is not idle + EOF +} + +quality "vault_api_sys_replication_status_read" { + description = <<-EOF + The v1/sys/replication/status Vault API returns the performance replication status of the + cluster + EOF +} + +quality "vault_api_sys_seal_status_api_read_matches_sys_health" { + description = <<-EOF + The v1/sys/seal-status Vault API and v1/sys/health Vault API agree on the health of each node + and the cluster + EOF +} + +quality "vault_api_sys_sealwrap_rewrap_read_entries_processed_eq_entries_succeeded_post_rewrap" { + description = global.description.verify_seal_rewrap_entries_processed_eq_entries_succeeded_post_rewrap +} + +quality "vault_api_sys_sealwrap_rewrap_read_entries_processed_gt_zero_post_rewrap" { + description = global.description.verify_seal_rewrap_entries_processed_is_gt_zero_post_rewrap +} + +quality "vault_api_sys_sealwrap_rewrap_read_is_running_false_post_rewrap" { + description = global.description.verify_seal_rewrap_is_running_false_post_rewrap +} + +quality "vault_api_sys_sealwrap_rewrap_read_no_entries_fail_during_rewrap" { + description = global.description.verify_seal_rewrap_no_entries_fail_during_rewrap +} + +quality "vault_api_sys_step_down_steps_down" { + description = <<-EOF + The v1/sys/step-down Vault API forces the cluster leader to step down and intiates a new leader + election + EOF +} + +quality "vault_api_sys_storage_raft_autopilot_configuration_read" { + description = <<-EOF + The /sys/storage/raft/autopilot/configuration Vault API returns the autopilot configuration of + the cluster + EOF +} + +quality "vault_api_sys_storage_raft_autopilot_state_read" { + description = <<-EOF + The v1/sys/storage/raft/autopilot/state Vault API returns the raft autopilot state of the + cluster + EOF +} + +quality "vault_api_sys_storage_raft_autopilot_upgrade_info_read_status_matches" { + description = <<-EOF + The v1/sys/storage/raft/autopilot/state Vault API returns the raft autopilot state and the + 'upgrade_info.status' matches our expected state + EOF +} + +quality "vault_api_sys_storage_raft_autopilot_upgrade_info_target_version_read_matches_candidate" { + description = <<-EOF + The v1/sys/storage/raft/autopilot/state Vault API returns the raft autopilot state and the + 'upgrade_info.target_version' matches the the candidate version + EOF +} + +quality "vault_api_sys_storage_raft_configuration_read" { + description = <<-EOF + The v1/sys/storage/raft/configuration Vault API returns the raft configuration of the cluster + EOF +} + +quality "vault_api_sys_storage_raft_remove_peer_write_removes_peer" { + description = <<-EOF + The v1/sys/storage/raft/remove-peer Vault API removes the desired node from the raft sub-system + EOF +} + +quality "vault_api_sys_version_history_keys" { + description = <<-EOF + The v1/sys/version-history Vault API returns the cluster version history and the 'keys' data + includes our target version + EOF +} + +quality "vault_api_sys_version_history_key_info" { + description = <<-EOF + The v1/sys/version-history Vault API returns the cluster version history and the + 'key_info["$expected_version]' data is present for the expected version and the 'build_date' + matches the expected build_date. + EOF +} + +quality "vault_artifact_bundle" { + description = "The candidate binary packaged as a zip bundle is used for testing" +} + +quality "vault_artifact_deb" { + description = "The candidate binary packaged as a deb package is used for testing" +} + +quality "vault_artifact_rpm" { + description = "The candidate binary packaged as an rpm package is used for testing" +} + +quality "vault_audit_log" { + description = "The Vault audit sub-system is enabled with the log and writes to a log" +} + +quality "vault_audit_socket" { + description = "The Vault audit sub-system is enabled with the socket and writes to a socket" +} + +quality "vault_audit_syslog" { + description = "The Vault audit sub-system is enabled with the syslog and writes to syslog" +} + +quality "vault_auto_unseals_after_autopilot_upgrade" { + description = "Vault auto-unseals after upgrading the cluster with autopilot" +} + +quality "vault_autojoins_new_nodes_into_initialized_cluster" { + description = "Vault sucessfully auto-joins new nodes into an existing cluster" +} + +quality "vault_autojoin_aws" { + description = "Vault auto-joins nodes using AWS tag discovery" +} + +quality "vault_autopilot_upgrade_leader_election" { + description = <<-EOF + Vault elects a new leader after upgrading the cluster with autopilot + EOF +} + +quality "vault_cli_audit_enable" { + description = "The 'vault audit enable' command enables audit devices" +} + +quality "vault_cli_auth_enable_approle" { + description = "The 'vault auth enable approle' command enables the approle auth method" +} + +quality "vault_cli_operator_members" { + description = "The 'vault operator members' command returns the expected list of members" +} + +quality "vault_cli_operator_raft_remove_peer" { + description = "The 'vault operator remove-peer' command removes the desired node" +} + +quality "vault_cli_operator_step_down" { + description = "The 'vault operator step-down' command forces the cluster leader to step down" +} + +quality "vault_cli_policy_write" { + description = "The 'vault policy write' command writes a policy" +} + +quality "vault_cli_status_exit_code" { + description = <<-EOF + The 'vault status' command exits with the correct code depending on expected seal status + EOF +} + +quality "vault_cluster_upgrade_in_place" { + description = <<-EOF + Vault starts with existing data and configuration in-place migrates the data + EOF +} + +quality "vault_config_env_variables" { + description = "Vault starts when configured primarily with environment variables" +} + +quality "vault_config_file" { + description = "Vault starts when configured primarily with a configuration file" +} + +quality "vault_config_log_level" { + description = "The 'log_level' config stanza modifies its log level" +} + +quality "vault_config_multiseal_is_toggleable" { + description = <<-EOF + The Vault Cluster can be configured with a single unseal method regardless of the + 'enable_multiseal' config value + EOF +} + +quality "vault_init" { + description = "Vault initializes the cluster with the given seal parameters" +} + +quality "vault_license_required_ent" { + description = "Vault Enterprise requires a license in order to start" +} + +quality "vault_listener_ipv4" { + description = "Vault operates on ipv4 TCP listeners" +} + +quality "vault_listener_ipv6" { + description = "Vault operates on ipv6 TCP listeners" +} + +quality "vault_mount_auth" { + description = "Vault mounts the auth engine" +} + +quality "vault_mount_identity" { + description = "Vault mounts the identity engine" +} + +quality "vault_mount_kv" { + description = "Vault mounts the kv engine" +} + +quality "vault_multiseal_enable" { + description = <<-EOF + The Vault Cluster starts with 'enable_multiseal' and multiple auto-unseal methods. + EOF +} + +quality "vault_proxy_auto_auth_approle" { + description = <<-EOF + Vault Proxy utilizes the approle auth method to to auto auth via a roles and secrets from file. + EOF +} + +quality "vault_proxy_cli_access" { + description = <<-EOF + The Vault CLI accesses tokens through the Vault proxy without a VAULT_TOKEN available + EOF +} + +quality "vault_raft_voters" { + description = global.description.verify_raft_cluster_all_nodes_are_voters +} + +quality "vault_replication_ce_disabled" { + description = "Replication is not enabled for CE editions" +} + +quality "vault_replication_ent_dr_available" { + description = "DR replication is available on Enterprise" +} + +quality "vault_replication_ent_pr_available" { + description = "PR replication is available on Enterprise" +} + +quality "vault_seal_awskms" { + description = "Vault auto-unseals with the awskms seal" +} + +quality "vault_seal_shamir" { + description = <<-EOF + Vault manually unseals with the shamir seal when given the expected number of 'key_shares' + EOF +} + +quality "vault_seal_pkcs11" { + description = "Vault auto-unseals with the pkcs11 seal" +} + +quality "vault_secrets_kv_read" { + description = "Vault kv secrets engine data is readable" +} + +quality "vault_secrets_kv_write" { + description = "Vault kv secrets engine data is writable" +} + +quality "vault_service_restart" { + description = "Vault restarts with existing configuration" +} + +quality "vault_service_start" { + description = "Vault starts with the configuration" +} + +quality "vault_service_systemd_notified" { + description = "The Vault binary notifies systemd when the service is active" +} + +quality "vault_service_systemd_unit" { + description = "The 'vault.service' systemd unit starts the service" +} + +quality "vault_status_seal_type" { + description = global.description.verify_seal_type +} + +quality "vault_storage_backend_consul" { + description = "Vault operates using Consul for storage" +} + +quality "vault_storage_backend_raft" { + description = "Vault operates using integrated Raft storage" +} + +quality "vault_ui_assets" { + description = global.description.verify_ui +} + +quality "vault_ui_test" { + description = <<-EOF + The Vault Web UI test suite runs against a live Vault server with the embedded static assets + EOF +} + +quality "vault_unseal_ha_leader_election" { + description = "Vault performs a leader election after it is unsealed" +} + +quality "vault_version_build_date" { + description = "Vault's reported build date matches our expectations" +} + +quality "vault_version_edition" { + description = "Vault's reported edition matches our expectations" +} + +quality "vault_version_release" { + description = "Vault's reported release version matches our expectations" +} + +quality "vault_billing_start_date" { + description = "Vault's billing start date has adjusted to the latest billing year" +} diff --git a/enos/enos-samples-ce-build.hcl b/enos/enos-samples-ce-build.hcl new file mode 100644 index 000000000000..981c2486690b --- /dev/null +++ b/enos/enos-samples-ce-build.hcl @@ -0,0 +1,272 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +sample "build_ce_linux_amd64_deb" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } +} + +sample "build_ce_linux_arm64_deb" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } +} + +sample "build_ce_linux_arm64_rpm" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["amzn", "rhel", "sles"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["amzn", "rhel", "sles"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["amzn", "rhel", "sles"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["amzn", "rhel", "sles"] + edition = ["ce"] + } + } +} + +sample "build_ce_linux_amd64_rpm" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["amzn", "leap", "rhel", "sles"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["amzn", "leap", "rhel", "sles"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["amzn", "leap", "rhel", "sles"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["amzn", "leap", "rhel", "sles"] + edition = ["ce"] + + exclude { + // Don't test from these versions in the build pipeline because of known issues + // in those older versions. + initial_version = ["1.8.12", "1.9.10", "1.10.11"] + } + } + } +} + +sample "build_ce_linux_amd64_zip" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["crt"] + distro = ["amzn", "ubuntu"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["crt"] + distro = ["amzn", "ubuntu"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["crt"] + distro = ["amzn", "ubuntu"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["crt"] + distro = ["amzn", "ubuntu"] + edition = ["ce"] + } + } +} + +sample "build_ce_linux_arm64_zip" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["bundle"] + distro = ["amzn", "ubuntu"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["bundle"] + distro = ["amzn", "ubuntu"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["bundle"] + distro = ["amzn", "ubuntu"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["bundle"] + distro = ["amzn", "ubuntu"] + edition = ["ce"] + } + } +} diff --git a/enos/enos-samples-ce-release.hcl b/enos/enos-samples-ce-release.hcl new file mode 100644 index 000000000000..634b4faeed45 --- /dev/null +++ b/enos/enos-samples-ce-release.hcl @@ -0,0 +1,266 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +sample "release_ce_linux_amd64_deb" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } +} + +sample "release_ce_linux_arm64_deb" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } +} + +sample "release_ce_linux_arm64_rpm" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["amzn", "rhel", "sles"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["amzn", "rhel", "sles"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["amzn", "rhel", "sles"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["amzn", "rhel", "sles"] + edition = ["ce"] + } + } +} + +sample "release_ce_linux_amd64_rpm" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["amzn", "leap", "rhel", "sles"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["amzn", "leap", "rhel", "sles"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["amzn", "leap", "rhel", "sles"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["amzn", "leap", "rhel", "sles"] + edition = ["ce"] + } + } +} + +sample "release_ce_linux_amd64_zip" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["artifactory"] + distro = ["amzn", "ubuntu"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["artifactory"] + distro = ["amzn", "ubuntu"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["artifactory"] + distro = ["amzn", "ubuntu"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["artifactory"] + distro = ["amzn", "ubuntu"] + edition = ["ce"] + } + } +} + +sample "release_ce_linux_arm64_zip" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["bundle"] + distro = ["amzn", "ubuntu"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["bundle"] + distro = ["amzn", "ubuntu"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["bundle"] + distro = ["amzn", "ubuntu"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["bundle"] + distro = ["amzn", "ubuntu"] + edition = ["ce"] + } + } +} diff --git a/enos/enos-scenario-agent.hcl b/enos/enos-scenario-agent.hcl index 6049fec23573..0f362a085f08 100644 --- a/enos/enos-scenario-agent.hcl +++ b/enos/enos-scenario-agent.hcl @@ -1,160 +1,347 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 scenario "agent" { + description = <<-EOF + The agent scenario verifies Vault when running in Agent mode. The build can be a local branch, + any CRT built Vault artifact saved to the local machine, or any CRT built Vault artifact in the + stable channel in Artifactory. + + The scenario creates a new Vault Cluster using the candidate build and then runs the same Vault + build in Agent mode and verifies behavior against the Vault cluster. The scenario also performs + standard baseline verification that is not specific to the Agent mode deployment. + + If you want to use the 'distro:leap' variant you must first accept SUSE's terms for the AWS + account. To verify that your account has agreed, sign-in to your AWS through Doormat, + and visit the following links to verify your subscription or subscribe: + arm64 AMI: https://aws.amazon.com/marketplace/server/procurement?productId=a516e959-df54-4035-bb1a-63599b7a6df9 + amd64 AMI: https://aws.amazon.com/marketplace/server/procurement?productId=5535c495-72d4-4355-b169-54ffa874f849 + EOF + matrix { - arch = ["amd64", "arm64"] - artifact_source = ["local", "crt", "artifactory"] - distro = ["ubuntu", "rhel"] - edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + arch = global.archs + artifact_source = global.artifact_sources + artifact_type = global.artifact_types + backend = global.backends + config_mode = global.config_modes + consul_edition = global.consul_editions + consul_version = global.consul_versions + distro = global.distros + edition = global.editions + ip_version = global.ip_versions + seal = global.seals + + // Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + // PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402. + exclude { + seal = ["pkcs11"] + edition = [for e in matrix.edition : e if !strcontains(e, "hsm")] + } + + // softhsm packages not available for leap/sles. + exclude { + seal = ["pkcs11"] + distro = ["leap", "sles"] + } + + // Testing in IPV6 mode is currently implemented for integrated Raft storage only + exclude { + ip_version = ["6"] + backend = ["consul"] + } } terraform_cli = terraform_cli.default terraform = terraform.default providers = [ provider.aws.default, - provider.enos.ubuntu, - provider.enos.rhel + provider.enos.ec2_user, + provider.enos.ubuntu ] locals { - build_tags = { - "oss" = ["ui"] - "ent" = ["ui", "enterprise", "ent"] - "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] - "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] - "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] - } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null - dependencies_to_install = ["jq"] + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null enos_provider = { - rhel = provider.enos.rhel + amzn = provider.enos.ec2_user + leap = provider.enos.ec2_user + rhel = provider.enos.ec2_user + sles = provider.enos.ec2_user ubuntu = provider.enos.ubuntu } - install_artifactory_artifact = local.bundle_path == null - tags = merge({ - "Project Name" : var.project_name - "Project" : "Enos", - "Environment" : "ci" - }, var.tags) - vault_instance_types = { - amd64 = "t3a.small" - arm64 = "t4g.small" + manage_service = matrix.artifact_type == "bundle" + } + + step "build_vault" { + description = global.description.build_vault + module = "build_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision } - vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[matrix.arch]) - vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) } - step "get_local_metadata" { - skip_step = matrix.artifact_source != "local" - module = module.get_local_metadata + step "ec2_info" { + description = global.description.ec2_info + module = module.ec2_info } - step "build_vault" { - module = "build_${matrix.artifact_source}" + step "create_vpc" { + description = global.description.create_vpc + module = module.create_vpc variables { - build_tags = try(var.vault_local_build_tags, local.build_tags[matrix.edition]) - bundle_path = local.bundle_path - goarch = matrix.arch - goos = "linux" - artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null - artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null - artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null - artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null - arch = matrix.artifact_source == "artifactory" ? matrix.arch : null - vault_product_version = var.vault_product_version - artifact_type = matrix.artifact_source == "artifactory" ? var.vault_artifact_type : null - distro = matrix.artifact_source == "artifactory" ? matrix.distro : null - edition = matrix.artifact_source == "artifactory" ? matrix.edition : null - instance_type = matrix.artifact_source == "artifactory" ? local.vault_instance_type : null - revision = var.vault_revision + common_tags = global.tags + ip_version = matrix.ip_version } } - step "find_azs" { - module = module.az_finder + // This step reads the contents of the backend license if we're using a Consul backend and + // an "ent" Consul edition. + step "read_backend_license" { + description = global.description.read_backend_license + skip_step = matrix.backend == "raft" || matrix.consul_edition == "ce" + module = module.read_license variables { - instance_type = [ - var.backend_instance_type, - local.vault_instance_type - ] + file_name = global.backend_license_path } } - step "create_vpc" { - module = module.create_vpc + step "read_vault_license" { + description = global.description.read_vault_license + skip_step = matrix.edition == "ce" + module = module.read_license variables { - ami_architectures = distinct([matrix.arch, "amd64"]) - availability_zones = step.find_azs.availability_zones - common_tags = local.tags + file_name = global.vault_license_path } } - step "read_license" { - skip_step = matrix.edition == "oss" - module = module.read_license + step "create_seal_key" { + description = global.description.create_seal_key + module = "seal_${matrix.seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } variables { - file_name = local.vault_license_path + cluster_id = step.create_vpc.id + common_tags = global.tags + } + } + + step "create_vault_cluster_targets" { + description = global.description.create_vault_cluster_targets + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_backend_targets" { + description = global.description.create_vault_cluster_targets + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"][global.distro_version["ubuntu"]] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id } } step "create_backend_cluster" { - module = "backend_raft" - depends_on = [step.create_vpc] + description = global.description.create_backend_cluster + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets + ] providers = { enos = provider.enos.ubuntu } + verifies = [ + // verified in modules + quality.consul_autojoin_aws, + quality.consul_config_file, + quality.consul_ha_leader_election, + quality.consul_service_start_server, + // verified in enos_consul_start resource + quality.consul_api_agent_host_read, + quality.consul_api_health_node_read, + quality.consul_api_operator_raft_config_read, + quality.consul_cli_validate, + quality.consul_health_state_passing_read_nodes_minimum, + quality.consul_operator_raft_configuration_read_voters_minimum, + quality.consul_service_systemd_unit, + quality.consul_service_systemd_notified, + ] + variables { - ami_id = step.create_vpc.ami_ids["ubuntu"]["amd64"] - common_tags = local.tags - instance_type = var.backend_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - vpc_id = step.create_vpc.vpc_id + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + hosts = step.create_vault_cluster_backend_targets.hosts + license = (matrix.backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = matrix.consul_edition + version = matrix.consul_version + } } } step "create_vault_cluster" { - module = module.vault_cluster + description = global.description.create_vault_cluster + module = module.vault_cluster depends_on = [ step.create_backend_cluster, step.build_vault, + step.create_vault_cluster_targets ] providers = { enos = local.enos_provider[matrix.distro] } + verifies = [ + // verified in modules + quality.consul_service_start_client, + quality.vault_artifact_bundle, + quality.vault_artifact_deb, + quality.vault_artifact_rpm, + quality.vault_audit_log, + quality.vault_audit_syslog, + quality.vault_audit_socket, + quality.vault_autojoin_aws, + quality.vault_config_env_variables, + quality.vault_config_log_level, + quality.vault_config_file, + quality.vault_license_required_ent, + quality.vault_listener_ipv4, + quality.vault_listener_ipv6, + quality.vault_service_start, + quality.vault_init, + quality.vault_storage_backend_consul, + quality.vault_storage_backend_raft, + // verified in enos_vault_start resource + quality.vault_api_sys_config_read, + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_health_read, + quality.vault_api_sys_host_info_read, + quality.vault_api_sys_seal_status_api_read_matches_sys_health, + quality.vault_api_sys_storage_raft_autopilot_configuration_read, + quality.vault_api_sys_storage_raft_autopilot_state_read, + quality.vault_api_sys_storage_raft_configuration_read, + quality.vault_api_sys_replication_status_read, + quality.vault_cli_status_exit_code, + quality.vault_service_systemd_unit, + quality.vault_service_systemd_notified, + ] + variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = "raft" - unseal_method = "shamir" - vault_local_artifact_path = local.bundle_path - vault_artifactory_release = local.install_artifactory_artifact ? step.build_vault.vault_artifactory_release : null - vault_license = matrix.edition != "oss" ? step.read_license.license : null - vpc_id = step.create_vpc.vpc_id - vault_environment = { - VAULT_LOG_LEVEL = var.vault_log_level - } + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = matrix.config_mode + consul_license = (matrix.backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.backend == "consul" ? { + edition = matrix.consul_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + hosts = step.create_vault_cluster_targets.hosts + install_dir = global.vault_install_dir[matrix.artifact_type] + ip_version = matrix.ip_version + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro][global.distro_version[matrix.distro]]) + seal_attributes = step.create_seal_key.attributes + seal_type = matrix.seal + storage_backend = matrix.backend + } + } + + step "get_local_metadata" { + description = global.description.get_local_metadata + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + // Wait for our cluster to elect a leader + step "wait_for_leader" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + timeout = 120 // seconds + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token } } step "start_vault_agent" { - module = "vault_agent" + description = global.description.start_vault_agent + module = module.vault_agent depends_on = [ - step.create_backend_cluster, step.build_vault, step.create_vault_cluster, + step.wait_for_leader, + ] + + verifies = [ + quality.vault_agent_auto_auth_approle, + quality.vault_cli_auth_enable_approle, ] providers = { @@ -162,8 +349,11 @@ scenario "agent" { } variables { - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token vault_agent_template_destination = "/tmp/agent_output.txt" vault_agent_template_contents = "{{ with secret \\\"auth/token/lookup-self\\\" }}orphan={{ .Data.orphan }} display_name={{ .Data.display_name }}{{ end }}" } @@ -174,56 +364,285 @@ scenario "agent" { depends_on = [ step.create_vault_cluster, step.start_vault_agent, + step.wait_for_leader, ] + verifies = quality.vault_agent_log_template + providers = { enos = local.enos_provider[matrix.distro] } variables { - vault_instances = step.create_vault_cluster.vault_instances + hosts = step.create_vault_cluster_targets.hosts vault_agent_template_destination = "/tmp/agent_output.txt" vault_agent_expected_output = "orphan=true display_name=approle" } } - output "vault_cluster_instance_ids" { - description = "The Vault cluster instance IDs" - value = step.create_vault_cluster.instance_ids + step "get_vault_cluster_ips" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } } - output "vault_cluster_pub_ips" { - description = "The Vault cluster public IPs" - value = step.create_vault_cluster.instance_public_ips + step "verify_vault_unsealed" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } } - output "vault_cluster_priv_ips" { + step "verify_vault_version" { + description = global.description.verify_vault_version + module = module.vault_verify_version + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_version_history_keys, + quality.vault_api_sys_version_history_key_info, + quality.vault_version_build_date, + quality.vault_version_edition, + quality.vault_version_release, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_edition = matrix.edition + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_secrets_engines_create" { + description = global.description.verify_secrets_engines_create + module = module.vault_verify_secrets_engines_create + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_auth_userpass_login_write, + quality.vault_api_auth_userpass_user_write, + quality.vault_api_identity_entity_write, + quality.vault_api_identity_entity_alias_write, + quality.vault_api_identity_group_write, + quality.vault_api_identity_oidc_config_write, + quality.vault_api_identity_oidc_introspect_write, + quality.vault_api_identity_oidc_key_write, + quality.vault_api_identity_oidc_key_rotate_write, + quality.vault_api_identity_oidc_role_write, + quality.vault_api_identity_oidc_token_read, + quality.vault_api_sys_auth_userpass_user_write, + quality.vault_api_sys_policy_write, + quality.vault_mount_auth, + quality.vault_mount_kv, + quality.vault_secrets_kv_write, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + leader_host = step.get_vault_cluster_ips.leader_host + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_raft_auto_join_voter" { + description = global.description.verify_raft_cluster_all_nodes_are_voters + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_raft_voters + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_replication" { + description = global.description.verify_replication_status + module = module.vault_verify_replication + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_replication_ce_disabled, + quality.vault_replication_ent_dr_available, + quality.vault_replication_ent_pr_available, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_edition = matrix.edition + } + } + + step "verify_secrets_engines_read" { + description = global.description.verify_secrets_engines_read + module = module.vault_verify_secrets_engines_read + depends_on = [ + step.verify_secrets_engines_create, + step.verify_replication + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_auth_userpass_login_write, + quality.vault_api_identity_entity_read, + quality.vault_api_identity_oidc_config_read, + quality.vault_api_identity_oidc_key_read, + quality.vault_api_identity_oidc_role_read, + quality.vault_secrets_kv_read + ] + + variables { + create_state = step.verify_secrets_engines_create.state + hosts = step.get_vault_cluster_ips.follower_hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "verify_ui" { + description = global.description.verify_ui + module = module.vault_verify_ui + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_ui_assets + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.hosts + } + + output "private_ips" { description = "The Vault cluster private IPs" - value = step.create_vault_cluster.instance_private_ips + value = step.create_vault_cluster.private_ips } - output "vault_cluster_key_id" { - description = "The Vault cluster Key ID" - value = step.create_vault_cluster.key_id + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips } - output "vault_cluster_root_token" { + output "root_token" { description = "The Vault cluster root token" - value = step.create_vault_cluster.vault_root_token + value = step.create_vault_cluster.root_token } - output "vault_cluster_unseal_keys_b64" { - description = "The Vault cluster unseal keys" - value = step.create_vault_cluster.vault_unseal_keys_b64 + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares } - output "vault_cluster_unseal_keys_hex" { - description = "The Vault cluster unseal keys hex" - value = step.create_vault_cluster.vault_unseal_keys_hex + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 } - output "vault_cluster_tag" { - description = "The Vault cluster tag" - value = step.create_vault_cluster.vault_cluster_tag + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "secrets_engines_state" { + description = "The state of configured secrets engines" + value = step.verify_secrets_engines_create.state + } + + output "seal_attributes" { + description = "The Vault cluster seal attributes" + value = step.create_seal_key.attributes + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex } } diff --git a/enos/enos-scenario-autopilot.hcl b/enos/enos-scenario-autopilot.hcl index 1f46c92c9502..036814cecb6f 100644 --- a/enos/enos-scenario-autopilot.hcl +++ b/enos/enos-scenario-autopilot.hcl @@ -1,19 +1,70 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 scenario "autopilot" { + description = <<-EOF + The autopilot scenario verifies autopilot upgrades between previously released versions of + Vault Enterprise against another candidate build. The build can be a local branch, any CRT built + Vault Enterprise artifact saved to the local machine, or any CRT built Vault Enterprise artifact + in the stable channel in Artifactory. + + The scenario creates a new Vault Cluster with a previously released version of Vault, mounts + various engines and creates data, then perform an Autopilot upgrade with any candidate build. + The scenario also performs standard baseline verification that is not specific to the autopilot + upgrade. + + If you want to use the 'distro:leap' variant you must first accept SUSE's terms for the AWS + account. To verify that your account has agreed, sign-in to your AWS through Doormat, + and visit the following links to verify your subscription or subscribe: + arm64 AMI: https://aws.amazon.com/marketplace/server/procurement?productId=a516e959-df54-4035-bb1a-63599b7a6df9 + amd64 AMI: https://aws.amazon.com/marketplace/server/procurement?productId=5535c495-72d4-4355-b169-54ffa874f849 + EOF + matrix { - arch = ["amd64", "arm64"] - artifact_source = ["local", "crt", "artifactory"] - artifact_type = ["bundle", "package"] - distro = ["ubuntu", "rhel"] - edition = ["ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] - seal = ["awskms", "shamir"] - - # Packages are not offered for the oss, ent.fips1402, and ent.hsm.fips1402 editions + arch = global.archs + artifact_source = global.artifact_sources + artifact_type = global.artifact_types + config_mode = global.config_modes + distro = global.distros + edition = global.enterprise_editions + initial_version = global.upgrade_initial_versions_ent + ip_version = global.ip_versions + seal = global.seals + + // Autopilot wasn't available before 1.11.x + exclude { + initial_version = [for e in matrix.initial_version : e if semverconstraint(e, "<1.11.0-0")] + } + + // Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + // There are no published versions of these artifacts yet. We'll update this to exclude older + // versions after our initial publication of these editions for arm64. + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } + + // PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402. + exclude { + seal = ["pkcs11"] + edition = [for e in matrix.edition : e if !strcontains(e, "hsm")] + } + + // softhsm packages not available for leap/sles. exclude { - edition = ["oss", "ent.fips1402", "ent.hsm.fips1402"] - artifact_type = ["package"] + seal = ["pkcs11"] + distro = ["leap", "sles"] + } + + // Testing in IPV6 mode is currently implemented for integrated Raft storage only + exclude { + ip_version = ["6"] + backend = ["consul"] } } @@ -21,48 +72,31 @@ scenario "autopilot" { terraform = terraform.default providers = [ provider.aws.default, - provider.enos.ubuntu, - provider.enos.rhel + provider.enos.ec2_user, + provider.enos.ubuntu ] locals { - build_tags = { - "ent" = ["ui", "enterprise", "ent"] - "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] - "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] - "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] - } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null - dependencies_to_install = ["jq"] + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null enos_provider = { - rhel = provider.enos.rhel + amzn = provider.enos.ec2_user + leap = provider.enos.ec2_user + rhel = provider.enos.ec2_user + sles = provider.enos.ec2_user ubuntu = provider.enos.ubuntu } - tags = merge({ - "Project Name" : var.project_name - "Project" : "Enos", - "Environment" : "ci" - }, var.tags) - vault_instance_types = { - amd64 = "t3a.small" - arm64 = "t4g.small" - } - - vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[matrix.arch]) - vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) - vault_install_dir_packages = { - rhel = "/bin" - ubuntu = "/usr/bin" - } - vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : local.vault_install_dir_packages[matrix.distro] + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_autopilot_default_max_leases = semverconstraint(matrix.initial_version, ">=1.16.0-0") ? "300000" : "" } step "build_vault" { - module = "build_${matrix.artifact_source}" + description = global.description.build_vault + module = "build_${matrix.artifact_source}" variables { - build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] - bundle_path = local.bundle_path + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path goarch = matrix.arch goos = "linux" artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null @@ -74,95 +108,221 @@ scenario "autopilot" { artifact_type = matrix.artifact_type distro = matrix.artifact_source == "artifactory" ? matrix.distro : null edition = matrix.artifact_source == "artifactory" ? matrix.edition : null - instance_type = matrix.artifact_source == "artifactory" ? local.vault_instance_type : null revision = var.vault_revision } } - step "find_azs" { - module = module.az_finder + step "ec2_info" { + description = global.description.ec2_info + module = module.ec2_info + } + + step "create_vpc" { + description = global.description.create_vpc + module = module.create_vpc variables { - instance_type = [ - local.vault_instance_type - ] + common_tags = global.tags + ip_version = matrix.ip_version } } - step "create_vpc" { - module = module.create_vpc - depends_on = [step.find_azs] + step "read_license" { + description = global.description.read_vault_license + module = module.read_license variables { - ami_architectures = [matrix.arch] - availability_zones = step.find_azs.availability_zones - common_tags = local.tags + file_name = global.vault_license_path } } - step "read_license" { - module = module.read_license + step "create_seal_key" { + description = global.description.create_seal_key + module = "seal_${matrix.seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } variables { - file_name = local.vault_license_path + cluster_id = step.create_vpc.id + common_tags = global.tags + } + } + + step "create_vault_cluster_targets" { + description = global.description.create_vault_cluster_targets + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + instance_count = 3 + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_upgrade_targets" { + description = global.description.create_vault_cluster_targets + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + common_tags = global.tags + cluster_name = step.create_vault_cluster_targets.cluster_name + cluster_tag_key = global.vault_tag_key + instance_count = 3 + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id } } - # This step creates a Vault cluster using a bundle downloaded from - # releases.hashicorp.com, with the version specified in var.vault_autopilot_initial_release step "create_vault_cluster" { + description = <<-EOF + ${global.description.create_vault_cluster} In this instance we'll create a Vault Cluster with + and older version and use Autopilot to upgrade to it. + EOF + module = module.vault_cluster depends_on = [ - step.create_vpc, step.build_vault, + step.create_vault_cluster_targets ] + providers = { enos = local.enos_provider[matrix.distro] } + verifies = [ + // verified in modules + quality.vault_artifact_bundle, + quality.vault_artifact_deb, + quality.vault_artifact_rpm, + quality.vault_audit_log, + quality.vault_audit_socket, + quality.vault_audit_syslog, + quality.vault_autojoin_aws, + quality.vault_config_env_variables, + quality.vault_config_file, + quality.vault_config_log_level, + quality.vault_init, + quality.vault_license_required_ent, + quality.vault_listener_ipv4, + quality.vault_listener_ipv6, + quality.vault_service_start, + quality.vault_storage_backend_consul, + quality.vault_storage_backend_raft, + // verified in enos_vault_start resource + quality.vault_api_sys_config_read, + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_health_read, + quality.vault_api_sys_host_info_read, + quality.vault_api_sys_seal_status_api_read_matches_sys_health, + quality.vault_api_sys_storage_raft_autopilot_configuration_read, + quality.vault_api_sys_storage_raft_autopilot_state_read, + quality.vault_api_sys_storage_raft_configuration_read, + quality.vault_api_sys_replication_status_read, + quality.vault_cli_status_exit_code, + quality.vault_service_systemd_notified, + quality.vault_service_systemd_unit, + ] + variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = "raft" - storage_backend_addl_config = { - autopilot_upgrade_version = var.vault_autopilot_initial_release.version + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = matrix.config_mode + enable_audit_devices = var.vault_enable_audit_devices + hosts = step.create_vault_cluster_targets.hosts + install_dir = local.vault_install_dir + ip_version = matrix.ip_version + license = matrix.edition != "ce" ? step.read_license.license : null + packages = concat(global.packages, global.distro_packages[matrix.distro][global.distro_version[matrix.distro]]) + release = { + edition = matrix.edition + version = matrix.initial_version } - unseal_method = matrix.seal - vault_install_dir = local.vault_install_dir - vault_release = var.vault_autopilot_initial_release - vault_license = step.read_license.license - vpc_id = step.create_vpc.vpc_id - vault_environment = { - VAULT_LOG_LEVEL = var.vault_log_level + seal_attributes = step.create_seal_key.attributes + seal_type = matrix.seal + storage_backend = "raft" + storage_backend_addl_config = { + autopilot_upgrade_version = matrix.initial_version } } } step "get_local_metadata" { - skip_step = matrix.artifact_source != "local" - module = module.get_local_metadata + description = global.description.get_local_metadata + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + // Wait for our cluster to elect a leader + step "wait_for_leader" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + timeout = 120 // seconds + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } } step "get_vault_cluster_ips" { - module = module.vault_get_cluster_ips - depends_on = [step.create_vault_cluster] + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [ + step.create_vault_cluster, + step.wait_for_leader, + ] providers = { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + variables { - vault_instances = step.create_vault_cluster.vault_instances + hosts = step.create_vault_cluster.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_cluster.vault_root_token + vault_root_token = step.create_vault_cluster.root_token } } - step "verify_write_test_data" { - module = module.vault_verify_write_data + step "verify_secrets_engines_create" { + description = global.description.verify_secrets_engines_create + module = module.vault_verify_secrets_engines_create depends_on = [ step.create_vault_cluster, step.get_vault_cluster_ips @@ -172,32 +332,53 @@ scenario "autopilot" { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_api_auth_userpass_login_write, + quality.vault_api_auth_userpass_user_write, + quality.vault_api_identity_entity_write, + quality.vault_api_identity_entity_alias_write, + quality.vault_api_identity_group_write, + quality.vault_api_identity_oidc_config_write, + quality.vault_api_identity_oidc_introspect_write, + quality.vault_api_identity_oidc_key_write, + quality.vault_api_identity_oidc_key_rotate_write, + quality.vault_api_identity_oidc_role_write, + quality.vault_api_identity_oidc_token_read, + quality.vault_api_sys_auth_userpass_user_write, + quality.vault_api_sys_policy_write, + quality.vault_mount_auth, + quality.vault_mount_kv, + quality.vault_secrets_kv_write, + ] + variables { - leader_public_ip = step.get_vault_cluster_ips.leader_public_ip - leader_private_ip = step.get_vault_cluster_ips.leader_private_ip - vault_instances = step.create_vault_cluster.vault_instances + hosts = step.create_vault_cluster.hosts + leader_host = step.get_vault_cluster_ips.leader_host + vault_addr = step.create_vault_cluster.api_addr_localhost vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_cluster.vault_root_token + vault_root_token = step.create_vault_cluster.root_token } } step "create_autopilot_upgrade_storageconfig" { - module = module.autopilot_upgrade_storageconfig + description = <<-EOF + An arithmetic module used to dynamically create autopilot storage configuration depending on + whether or not we're testing a local build or a candidate build. + EOF + module = module.autopilot_upgrade_storageconfig variables { vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version } } - # This step creates a new Vault cluster using a bundle or package - # from the matrix.artifact_source, with the var.vault_product_version step "upgrade_vault_cluster_with_autopilot" { module = module.vault_cluster depends_on = [ step.build_vault, step.create_vault_cluster, step.create_autopilot_upgrade_storageconfig, - step.verify_write_test_data + step.verify_secrets_engines_create ] providers = { @@ -205,35 +386,36 @@ scenario "autopilot" { } variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = matrix.config_mode + enable_audit_devices = var.vault_enable_audit_devices + force_unseal = matrix.seal == "shamir" + hosts = step.create_vault_cluster_upgrade_targets.hosts + initialize_cluster = false + install_dir = local.vault_install_dir + ip_version = matrix.ip_version + license = matrix.edition != "ce" ? step.read_license.license : null + local_artifact_path = local.artifact_path + log_level = var.vault_log_level + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro][global.distro_version[matrix.distro]]) + root_token = step.create_vault_cluster.root_token + seal_attributes = step.create_seal_key.attributes + seal_type = matrix.seal + shamir_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.unseal_keys_hex : null storage_backend = "raft" storage_backend_addl_config = step.create_autopilot_upgrade_storageconfig.storage_addl_config - unseal_method = matrix.seal - vault_cluster_tag = step.create_vault_cluster.vault_cluster_tag - vault_init = false - vault_install_dir = local.vault_install_dir - vault_license = step.read_license.license - vault_local_artifact_path = local.bundle_path - vault_artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null - vault_node_prefix = "upgrade_node" - vault_root_token = step.create_vault_cluster.vault_root_token - vault_unseal_when_no_init = matrix.seal == "shamir" - vault_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.vault_unseal_keys_hex : null - vpc_id = step.create_vpc.vpc_id - vault_environment = { - VAULT_LOG_LEVEL = var.vault_log_level - } + storage_node_prefix = "upgrade_node" } } step "verify_vault_unsealed" { - module = module.vault_verify_unsealed + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed depends_on = [ step.create_vault_cluster, + step.create_vault_cluster_upgrade_targets, step.upgrade_vault_cluster_with_autopilot, ] @@ -241,14 +423,23 @@ scenario "autopilot" { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_auto_unseals_after_autopilot_upgrade, + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + variables { + hosts = step.upgrade_vault_cluster_with_autopilot.hosts + vault_addr = step.upgrade_vault_cluster_with_autopilot.api_addr_localhost vault_install_dir = local.vault_install_dir - vault_instances = step.upgrade_vault_cluster_with_autopilot.vault_instances } } step "verify_raft_auto_join_voter" { - module = module.vault_verify_raft_auto_join_voter + description = global.description.verify_raft_cluster_all_nodes_are_voters + module = module.vault_verify_raft_auto_join_voter depends_on = [ step.upgrade_vault_cluster_with_autopilot, step.verify_vault_unsealed @@ -258,16 +449,22 @@ scenario "autopilot" { enos = local.enos_provider[matrix.distro] } + verifies = quality.vault_raft_voters + variables { + hosts = step.upgrade_vault_cluster_with_autopilot.hosts + ip_version = matrix.ip_version + vault_addr = step.upgrade_vault_cluster_with_autopilot.api_addr_localhost vault_install_dir = local.vault_install_dir - vault_instances = step.upgrade_vault_cluster_with_autopilot.vault_instances - vault_root_token = step.upgrade_vault_cluster_with_autopilot.vault_root_token + vault_root_token = step.upgrade_vault_cluster_with_autopilot.root_token } } step "verify_autopilot_await_server_removal_state" { - module = module.vault_verify_autopilot + description = global.description.verify_autopilot_idle_state + module = module.vault_verify_autopilot depends_on = [ + step.create_vault_cluster_upgrade_targets, step.upgrade_vault_cluster_with_autopilot, step.verify_raft_auto_join_voter ] @@ -276,19 +473,27 @@ scenario "autopilot" { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_api_sys_storage_raft_autopilot_upgrade_info_read_status_matches, + quality.vault_api_sys_storage_raft_autopilot_upgrade_info_target_version_read_matches_candidate, + ] + variables { + hosts = step.create_vault_cluster.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost vault_autopilot_upgrade_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version vault_autopilot_upgrade_status = "await-server-removal" vault_install_dir = local.vault_install_dir - vault_instances = step.upgrade_vault_cluster_with_autopilot.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + vault_root_token = step.upgrade_vault_cluster_with_autopilot.root_token } } - step "get_updated_vault_cluster_ips" { - module = module.vault_get_cluster_ips + step "wait_for_leader_in_upgrade_targets" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader depends_on = [ step.create_vault_cluster, + step.create_vault_cluster_upgrade_targets, step.get_vault_cluster_ips, step.upgrade_vault_cluster_with_autopilot ] @@ -297,20 +502,57 @@ scenario "autopilot" { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_autopilot_upgrade_leader_election, + ] + variables { - vault_instances = step.create_vault_cluster.vault_instances - vault_install_dir = local.vault_install_dir - added_vault_instances = step.upgrade_vault_cluster_with_autopilot.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token - node_public_ip = step.get_vault_cluster_ips.leader_public_ip + hosts = step.upgrade_vault_cluster_with_autopilot.hosts + ip_version = matrix.ip_version + timeout = 120 // seconds + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_updated_vault_cluster_ips" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [ + step.create_vault_cluster, + step.create_vault_cluster_upgrade_targets, + step.get_vault_cluster_ips, + step.upgrade_vault_cluster_with_autopilot, + step.wait_for_leader_in_upgrade_targets, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.upgrade_vault_cluster_with_autopilot.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token } } - step "verify_read_test_data" { - module = module.vault_verify_read_data + step "verify_secrets_engines_read" { + description = global.description.verify_secrets_engines_read + module = module.vault_verify_secrets_engines_read depends_on = [ step.get_updated_vault_cluster_ips, - step.verify_write_test_data, + step.verify_secrets_engines_create, step.upgrade_vault_cluster_with_autopilot, step.verify_raft_auto_join_voter ] @@ -319,16 +561,30 @@ scenario "autopilot" { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_api_auth_userpass_login_write, + quality.vault_api_identity_entity_read, + quality.vault_api_identity_oidc_config_read, + quality.vault_api_identity_oidc_key_read, + quality.vault_api_identity_oidc_role_read, + quality.vault_secrets_kv_read + ] + variables { - node_public_ips = step.get_updated_vault_cluster_ips.follower_public_ips - vault_instance_count = 6 - vault_install_dir = local.vault_install_dir + create_state = step.verify_secrets_engines_create.state + hosts = step.get_updated_vault_cluster_ips.follower_hosts + vault_addr = step.upgrade_vault_cluster_with_autopilot.api_addr_localhost + vault_install_dir = local.vault_install_dir } } step "raft_remove_peers" { - module = module.vault_raft_remove_peer + description = <<-EOF + Remove the nodes that were running the prior version of Vault from the raft cluster + EOF + module = module.vault_raft_remove_peer depends_on = [ + step.create_vault_cluster_upgrade_targets, step.get_updated_vault_cluster_ips, step.upgrade_vault_cluster_with_autopilot, step.verify_autopilot_await_server_removal_state @@ -338,17 +594,25 @@ scenario "autopilot" { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_api_sys_storage_raft_remove_peer_write_removes_peer, + quality.vault_cli_operator_raft_remove_peer, + ] + variables { - vault_install_dir = local.vault_install_dir - operator_instance = step.get_updated_vault_cluster_ips.leader_public_ip - remove_vault_instances = step.create_vault_cluster.vault_instances - vault_instance_count = 3 - vault_root_token = step.create_vault_cluster.vault_root_token + hosts = step.create_vault_cluster.hosts + ip_version = matrix.ip_version + operator_instance = step.get_updated_vault_cluster_ips.leader_public_ip + vault_addr = step.upgrade_vault_cluster_with_autopilot.api_addr_localhost + vault_cluster_addr_port = step.upgrade_vault_cluster_with_autopilot.cluster_port + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token } } step "remove_old_nodes" { - module = module.shutdown_multiple_nodes + description = global.description.shutdown_nodes + module = module.shutdown_multiple_nodes depends_on = [ step.create_vault_cluster, step.raft_remove_peers @@ -359,14 +623,15 @@ scenario "autopilot" { } variables { - old_vault_instances = step.create_vault_cluster.vault_instances - vault_instance_count = 3 + old_hosts = step.create_vault_cluster.hosts } } step "verify_autopilot_idle_state" { - module = module.vault_verify_autopilot + description = global.description.verify_autopilot_idle_state + module = module.vault_verify_autopilot depends_on = [ + step.create_vault_cluster_upgrade_targets, step.upgrade_vault_cluster_with_autopilot, step.verify_raft_auto_join_voter, step.remove_old_nodes @@ -376,102 +641,259 @@ scenario "autopilot" { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_api_sys_storage_raft_autopilot_upgrade_info_read_status_matches, + quality.vault_api_sys_storage_raft_autopilot_upgrade_info_target_version_read_matches_candidate, + ] + variables { + hosts = step.upgrade_vault_cluster_with_autopilot.hosts + vault_addr = step.upgrade_vault_cluster_with_autopilot.api_addr_localhost vault_autopilot_upgrade_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version vault_autopilot_upgrade_status = "idle" vault_install_dir = local.vault_install_dir - vault_instances = step.upgrade_vault_cluster_with_autopilot.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_replication" { + description = global.description.verify_replication_status + module = module.vault_verify_replication + depends_on = [ + step.create_vault_cluster_upgrade_targets, + step.upgrade_vault_cluster_with_autopilot, + step.verify_raft_auto_join_voter, + step.remove_old_nodes + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_replication_ce_disabled, + quality.vault_replication_ent_dr_available, + quality.vault_replication_ent_pr_available, + ] + + variables { + hosts = step.upgrade_vault_cluster_with_autopilot.hosts + vault_addr = step.upgrade_vault_cluster_with_autopilot.api_addr_localhost + vault_edition = matrix.edition + } + } + + step "verify_vault_version" { + description = global.description.verify_vault_version + module = module.vault_verify_version + depends_on = [ + step.create_vault_cluster_upgrade_targets, + step.upgrade_vault_cluster_with_autopilot, + step.verify_raft_auto_join_voter, + step.remove_old_nodes + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_version_history_keys, + quality.vault_api_sys_version_history_key_info, + quality.vault_version_build_date, + quality.vault_version_edition, + quality.vault_version_release, + ] + + variables { + hosts = step.upgrade_vault_cluster_with_autopilot.hosts + vault_addr = step.upgrade_vault_cluster_with_autopilot.api_addr_localhost + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_ui" { + description = global.description.verify_ui + module = module.vault_verify_ui + depends_on = [ + step.create_vault_cluster_upgrade_targets, + step.upgrade_vault_cluster_with_autopilot, + step.verify_raft_auto_join_voter, + step.remove_old_nodes + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_ui_assets + + variables { + hosts = step.upgrade_vault_cluster_with_autopilot.hosts + vault_addr = step.upgrade_vault_cluster_with_autopilot.api_addr_localhost } } - step "verify_undo_logs_status" { - skip_step = try(semverconstraint(var.vault_product_version, "<1.13.0-0"), true) - module = module.vault_verify_undo_logs + step "verify_undo_logs_enabled_on_primary" { + skip_step = semverconstraint(var.vault_product_version, "<1.13.0-0") + module = module.vault_verify_undo_logs + description = <<-EOF + Verifies that undo logs is correctly enabled on newly upgraded target hosts. For this it will + query the metrics system backend for the vault.core.replication.write_undo_logs gauge. + EOF + depends_on = [ + step.create_vault_cluster_upgrade_targets, step.remove_old_nodes, step.upgrade_vault_cluster_with_autopilot, step.verify_autopilot_idle_state ] + verifies = quality.vault_api_sys_metrics_vault_core_replication_write_undo_logs_enabled + providers = { enos = local.enos_provider[matrix.distro] } variables { + expected_state = 1 # Enabled + hosts = step.get_updated_vault_cluster_ips.leader_hosts + timeout = 180 # Seconds + vault_addr = step.upgrade_vault_cluster_with_autopilot.api_addr_localhost vault_install_dir = local.vault_install_dir - vault_instances = step.upgrade_vault_cluster_with_autopilot.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + vault_root_token = step.create_vault_cluster.root_token } } - output "vault_cluster_instance_ids" { - description = "The Vault cluster instance IDs" - value = step.create_vault_cluster.instance_ids + step "verify_undo_logs_disabled_on_followers" { + skip_step = semverconstraint(var.vault_product_version, "<1.13.0-0") + module = module.vault_verify_undo_logs + depends_on = [step.verify_undo_logs_enabled_on_primary] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + expected_state = 0 # Disabled + hosts = step.get_updated_vault_cluster_ips.follower_hosts + timeout = 10 # Seconds + vault_addr = step.upgrade_vault_cluster_with_autopilot.api_addr_localhost + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } } - output "vault_cluster_pub_ips" { - description = "The Vault cluster public IPs" - value = step.create_vault_cluster.instance_public_ips + // Verify that upgrading from a version <1.16.0 does not introduce Default LCQ + step "verify_default_lcq" { + description = <<-EOF + Verify that the default max lease count is 300,000 when the upgraded nodes are running + Vault >= 1.16.0. + EOF + module = module.vault_verify_default_lcq + depends_on = [ + step.create_vault_cluster_upgrade_targets, + step.remove_old_nodes, + step.upgrade_vault_cluster_with_autopilot, + step.verify_autopilot_idle_state + ] + + verifies = quality.vault_api_sys_quotas_lease_count_read_max_leases_default + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.upgrade_vault_cluster_with_autopilot.hosts + vault_addr = step.upgrade_vault_cluster_with_autopilot.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + vault_autopilot_default_max_leases = local.vault_autopilot_default_max_leases + } } - output "vault_cluster_priv_ips" { - description = "The Vault cluster private IPs" - value = step.create_vault_cluster.instance_private_ips + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path } - output "vault_cluster_key_id" { - description = "The Vault cluster Key ID" - value = step.create_vault_cluster.key_id + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name } - output "vault_cluster_root_token" { - description = "The Vault cluster root token" - value = step.create_vault_cluster.vault_root_token + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.hosts } - output "vault_cluster_unseal_keys_b64" { - description = "The Vault cluster unseal keys" - value = step.create_vault_cluster.vault_unseal_keys_b64 + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips } - output "vault_cluster_recovery_key_shares" { + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "recovery_key_shares" { description = "The Vault cluster recovery key shares" - value = step.create_vault_cluster.vault_recovery_key_shares + value = step.create_vault_cluster.recovery_key_shares } - output "vault_cluster_recovery_keys_b64" { + output "recovery_keys_b64" { description = "The Vault cluster recovery keys b64" - value = step.create_vault_cluster.vault_recovery_keys_b64 + value = step.create_vault_cluster.recovery_keys_b64 } - output "vault_cluster_recovery_keys_hex" { + output "recovery_keys_hex" { description = "The Vault cluster recovery keys hex" - value = step.create_vault_cluster.vault_recovery_keys_hex + value = step.create_vault_cluster.recovery_keys_hex } - output "vault_cluster_unseal_keys_hex" { - description = "The Vault cluster unseal keys hex" - value = step.create_vault_cluster.vault_unseal_keys_hex + output "secrets_engines_state" { + description = "The state of configured secrets engines" + value = step.verify_secrets_engines_create.state } - output "vault_cluster_tag" { - description = "The Vault cluster tag" - value = step.create_vault_cluster.vault_cluster_tag + output "seal_attributes" { + description = "The Vault cluster seal attributes" + value = step.create_seal_key.attributes } - output "upgraded_vault_cluster_instance_ids" { - description = "The Vault cluster instance IDs" - value = step.upgrade_vault_cluster_with_autopilot.instance_ids + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 } - output "upgraded_vault_cluster_pub_ips" { - description = "The Vault cluster public IPs" - value = step.upgrade_vault_cluster_with_autopilot.instance_public_ips + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } + + output "upgrade_hosts" { + description = "The Vault cluster target hosts" + value = step.upgrade_vault_cluster_with_autopilot.hosts } - output "upgraded_vault_cluster_priv_ips" { + output "upgrade_private_ips" { description = "The Vault cluster private IPs" - value = step.upgrade_vault_cluster_with_autopilot.instance_private_ips + value = step.upgrade_vault_cluster_with_autopilot.private_ips + } + + output "upgrade_public_ips" { + description = "The Vault cluster public IPs" + value = step.upgrade_vault_cluster_with_autopilot.public_ips } } diff --git a/enos/enos-scenario-dr-replication.hcl b/enos/enos-scenario-dr-replication.hcl new file mode 100644 index 000000000000..20b8836dcacb --- /dev/null +++ b/enos/enos-scenario-dr-replication.hcl @@ -0,0 +1,1368 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +scenario "dr_replication" { + description = <<-EOF + The DR replication scenario configures disaster recovery replication between two Vault clusters and + verifies behavior and failure tolerance. The build can be a local branch, any CRT built Vault + Enterprise artifact saved to the local machine, or any CRT built Vault Enterprise artifact in + the stable channel in Artifactory. + + The scenario deploys two Vault Enterprise clusters and establishes disaster recovery replication + between the primary cluster and the disaster recovery replication secondary cluster. Next, we write + test data to the primary cluster and verify that the data is replicated to the secondary cluster. + We then promote the secondary cluster to be the primary cluster and demote the primary cluster to be + the secondary cluster. We then update the secondary cluster to connect to the new primary cluster. + Finally, we verify that the secondary cluster is unsealed after enabling replication and verify the + disaster recovery replication status between the primary and secondary clusters. + + If you want to use the 'distro:leap' variant you must first accept SUSE's terms for the AWS + account. To verify that your account has agreed, sign-in to your AWS through Doormat, + and visit the following links to verify your subscription or subscribe: + arm64 AMI: https://aws.amazon.com/marketplace/server/procurement?productId=a516e959-df54-4035-bb1a-63599b7a6df9 + amd64 AMI: https://aws.amazon.com/marketplace/server/procurement?productId=5535c495-72d4-4355-b169-54ffa874f849 + EOF + + matrix { + arch = global.archs + artifact_source = global.artifact_sources + artifact_type = global.artifact_types + config_mode = global.config_modes + consul_edition = global.consul_editions + consul_version = global.consul_versions + distro = global.distros + edition = global.enterprise_editions + ip_version = global.ip_versions + primary_backend = global.backends + primary_seal = global.seals + secondary_backend = global.backends + secondary_seal = global.seals + + // Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + // PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402. + exclude { + primary_seal = ["pkcs11"] + edition = [for e in matrix.edition : e if !strcontains(e, "hsm")] + } + + exclude { + secondary_seal = ["pkcs11"] + edition = [for e in matrix.edition : e if !strcontains(e, "hsm")] + } + + // softhsm packages not available for leap/sles. + exclude { + primary_seal = ["pkcs11"] + distro = ["leap", "sles"] + } + + exclude { + secondary_seal = ["pkcs11"] + distro = ["leap", "sles"] + } + + // Testing in IPV6 mode is currently implemented for integrated Raft storage only + exclude { + ip_version = ["6"] + primary_backend = ["consul"] + } + + exclude { + ip_version = ["6"] + secondary_backend = ["consul"] + } + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ec2_user, + provider.enos.ubuntu + ] + + locals { + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + enos_provider = { + amzn = provider.enos.ec2_user + leap = provider.enos.ec2_user + rhel = provider.enos.ec2_user + sles = provider.enos.ec2_user + ubuntu = provider.enos.ubuntu + } + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir[matrix.artifact_type] + } + + step "build_vault" { + description = global.description.build_vault + module = "build_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision + } + } + + step "ec2_info" { + description = global.description.ec2_info + module = module.ec2_info + } + + step "create_vpc" { + description = global.description.create_vpc + module = module.create_vpc + + variables { + common_tags = global.tags + ip_version = matrix.ip_version + } + } + + // This step reads the contents of the backend license if we're using a Consul backend and + // an "ent" Consul edition. + step "read_backend_license" { + description = global.description.read_backend_license + skip_step = (matrix.primary_backend == "raft" && matrix.secondary_backend == "raft") || matrix.consul_edition == "ce" + module = module.read_license + + variables { + file_name = global.backend_license_path + } + } + + step "read_vault_license" { + description = global.description.read_vault_license + module = module.read_license + + variables { + file_name = abspath(joinpath(path.root, "./support/vault.hclic")) + } + } + + step "create_primary_seal_key" { + description = global.description.create_seal_key + module = "seal_${matrix.primary_seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + cluster_meta = "primary" + common_tags = global.tags + } + } + + step "create_secondary_seal_key" { + description = global.description.create_seal_key + module = "seal_${matrix.secondary_seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + cluster_meta = "secondary" + common_tags = global.tags + other_resources = step.create_primary_seal_key.resource_names + } + } + + // Create all of our instances for both primary and secondary clusters + step "create_primary_cluster_targets" { + description = global.description.create_vault_cluster_targets + module = module.target_ec2_instances + depends_on = [ + step.create_vpc, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_primary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_primary_cluster_backend_targets" { + description = global.description.create_vault_cluster_targets + module = matrix.primary_backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [ + step.create_vpc, + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"][global.distro_version["ubuntu"]] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_primary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_secondary_cluster_targets" { + description = global.description.create_vault_cluster_targets + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_secondary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_secondary_cluster_backend_targets" { + description = global.description.create_vault_cluster_targets + module = matrix.secondary_backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"][global.distro_version["ubuntu"]] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_secondary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_primary_backend_cluster" { + description = global.description.create_backend_cluster + module = "backend_${matrix.primary_backend}" + depends_on = [ + step.create_primary_cluster_backend_targets, + ] + + providers = { + enos = provider.enos.ubuntu + } + + verifies = [ + // verified in modules + quality.consul_autojoin_aws, + quality.consul_config_file, + quality.consul_ha_leader_election, + quality.consul_service_start_server, + // verified in enos_consul_start resource + quality.consul_api_agent_host_read, + quality.consul_api_health_node_read, + quality.consul_api_operator_raft_config_read, + quality.consul_cli_validate, + quality.consul_health_state_passing_read_nodes_minimum, + quality.consul_operator_raft_configuration_read_voters_minimum, + quality.consul_service_systemd_notified, + quality.consul_service_systemd_unit, + ] + + variables { + cluster_name = step.create_primary_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + hosts = step.create_primary_cluster_backend_targets.hosts + license = (matrix.primary_backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = matrix.consul_edition + version = matrix.consul_version + } + } + } + + step "create_primary_cluster" { + description = global.description.create_vault_cluster + module = module.vault_cluster + depends_on = [ + step.create_primary_backend_cluster, + step.build_vault, + step.create_primary_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + // verified in modules + quality.consul_service_start_client, + quality.vault_artifact_bundle, + quality.vault_artifact_deb, + quality.vault_artifact_rpm, + quality.vault_audit_log, + quality.vault_audit_socket, + quality.vault_audit_syslog, + quality.vault_autojoin_aws, + quality.vault_config_env_variables, + quality.vault_config_file, + quality.vault_config_log_level, + quality.vault_init, + quality.vault_license_required_ent, + quality.vault_listener_ipv4, + quality.vault_listener_ipv6, + quality.vault_service_start, + quality.vault_storage_backend_consul, + quality.vault_storage_backend_raft, + // verified in enos_vault_start resource + quality.vault_api_sys_config_read, + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_health_read, + quality.vault_api_sys_host_info_read, + quality.vault_api_sys_replication_status_read, + quality.vault_api_sys_seal_status_api_read_matches_sys_health, + quality.vault_api_sys_storage_raft_autopilot_configuration_read, + quality.vault_api_sys_storage_raft_autopilot_state_read, + quality.vault_api_sys_storage_raft_configuration_read, + quality.vault_cli_status_exit_code, + quality.vault_service_systemd_notified, + quality.vault_service_systemd_unit, + ] + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_primary_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + config_mode = matrix.config_mode + consul_license = (matrix.primary_backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null + cluster_name = step.create_primary_cluster_targets.cluster_name + consul_release = matrix.primary_backend == "consul" ? { + edition = matrix.consul_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + hosts = step.create_primary_cluster_targets.hosts + install_dir = global.vault_install_dir[matrix.artifact_type] + ip_version = matrix.ip_version + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro][global.distro_version[matrix.distro]]) + seal_attributes = step.create_primary_seal_key.attributes + seal_type = matrix.primary_seal + storage_backend = matrix.primary_backend + } + } + + step "get_local_metadata" { + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + step "wait_for_primary_cluster_leader" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.create_primary_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + hosts = step.create_primary_cluster_targets.hosts + ip_version = matrix.ip_version + timeout = 120 // seconds + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "create_secondary_backend_cluster" { + description = global.description.create_backend_cluster + module = "backend_${matrix.secondary_backend}" + depends_on = [ + step.create_secondary_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_secondary_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + hosts = step.create_secondary_cluster_backend_targets.hosts + license = (matrix.secondary_backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = matrix.consul_edition + version = matrix.consul_version + } + } + } + + step "create_secondary_cluster" { + module = module.vault_cluster + depends_on = [ + step.create_secondary_backend_cluster, + step.build_vault, + step.create_secondary_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + // verified in modules + quality.consul_autojoin_aws, + quality.consul_config_file, + quality.consul_ha_leader_election, + quality.consul_service_start_client, + // verified in enos_consul_start resource + quality.consul_api_agent_host_read, + quality.consul_api_health_node_read, + quality.consul_api_operator_raft_config_read, + quality.consul_health_state_passing_read_nodes_minimum, + quality.consul_operator_raft_configuration_read_voters_minimum, + quality.consul_service_systemd_notified, + quality.consul_service_systemd_unit, + ] + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_secondary_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + config_mode = matrix.config_mode + consul_license = (matrix.secondary_backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null + cluster_name = step.create_secondary_cluster_targets.cluster_name + consul_release = matrix.secondary_backend == "consul" ? { + edition = matrix.consul_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + hosts = step.create_secondary_cluster_targets.hosts + install_dir = global.vault_install_dir[matrix.artifact_type] + ip_version = matrix.ip_version + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro][global.distro_version[matrix.distro]]) + seal_attributes = step.create_secondary_seal_key.attributes + seal_type = matrix.secondary_seal + storage_backend = matrix.secondary_backend + } + } + + step "wait_for_secondary_cluster_leader" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.create_secondary_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + hosts = step.create_secondary_cluster_targets.hosts + ip_version = matrix.ip_version + timeout = 120 // seconds + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_secondary_cluster.root_token + } + } + + step "verify_that_vault_primary_cluster_is_unsealed" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [ + step.create_primary_cluster, + step.wait_for_primary_cluster_leader, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_auto_unseals_after_autopilot_upgrade, + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_primary_cluster_targets.hosts + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "verify_that_vault_secondary_cluster_is_unsealed" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [ + step.create_secondary_cluster, + step.wait_for_secondary_cluster_leader, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_auto_unseals_after_autopilot_upgrade, + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_secondary_cluster_targets.hosts + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "get_primary_cluster_ips" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.verify_that_vault_primary_cluster_is_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_primary_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "get_secondary_cluster_ips" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.verify_that_vault_secondary_cluster_is_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_secondary_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_secondary_cluster.root_token + } + } + + step "verify_vault_version" { + description = global.description.verify_vault_version + module = module.vault_verify_version + depends_on = [step.get_primary_cluster_ips] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_version_history_keys, + quality.vault_api_sys_version_history_key_info, + quality.vault_version_build_date, + quality.vault_version_edition, + quality.vault_version_release, + ] + + variables { + hosts = step.create_primary_cluster_targets.hosts + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_edition = matrix.edition + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "verify_ui" { + description = global.description.verify_ui + module = module.vault_verify_ui + depends_on = [step.get_primary_cluster_ips] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_ui_assets + + variables { + vault_addr = step.create_primary_cluster.api_addr_localhost + hosts = step.create_primary_cluster_targets.hosts + } + } + + step "verify_secrets_engines_on_primary" { + description = global.description.verify_secrets_engines_create + module = module.vault_verify_secrets_engines_create + depends_on = [step.get_primary_cluster_ips] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_auth_userpass_login_write, + quality.vault_api_auth_userpass_user_write, + quality.vault_api_identity_entity_write, + quality.vault_api_identity_entity_alias_write, + quality.vault_api_identity_group_write, + quality.vault_api_identity_oidc_config_write, + quality.vault_api_identity_oidc_introspect_write, + quality.vault_api_identity_oidc_key_write, + quality.vault_api_identity_oidc_key_rotate_write, + quality.vault_api_identity_oidc_role_write, + quality.vault_api_identity_oidc_token_read, + quality.vault_api_sys_auth_userpass_user_write, + quality.vault_api_sys_policy_write, + quality.vault_mount_auth, + quality.vault_mount_kv, + quality.vault_secrets_kv_write, + ] + + variables { + hosts = step.create_primary_cluster_targets.hosts + leader_host = step.get_primary_cluster_ips.leader_host + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_primary_cluster.root_token + } + } + + # ================================================ + # DISASTER RECOVERY (DR) REPLICATION SETUP + # ================================================ + # - Wait for seal rewrap to complete on both clusters. + # - Configure DR primary replication on cluster A. + # - Generate secondary token on cluster A. + # - Configure DR secondary replication on cluster B. + # - Confirm replication status on both clusters. + + + // Wait for our seals to finish any inflight rewraps before we enable DR replication as we don't + // want to accidentally swap seal info on the secondary before it has finished. + step "configure_dr_replication_primary" { + description = <<-EOF + Create the necessary superuser auth policy necessary for DR replication, assign it + to a our previously create test user, and enable DR replication on the primary + cluster. + EOF + module = module.vault_setup_dr_primary + depends_on = [ + step.get_primary_cluster_ips, + step.get_secondary_cluster_ips, + step.verify_secrets_engines_on_primary, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_auth_userpass_user_write, + quality.vault_api_sys_policy_write, + quality.vault_api_sys_replication_dr_primary_enable_write, + quality.vault_cli_policy_write, + ] + + variables { + ip_version = matrix.ip_version + primary_leader_host = step.get_primary_cluster_ips.leader_host + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "generate_secondary_token" { + description = <<-EOF + Generate a random token and configure the DR replication primary secondary-token and + configure the Vault cluster primary replication with the token. Export the wrapping token + so that secondary clusters can utilize it. + EOF + module = module.generate_secondary_token + depends_on = [step.configure_dr_replication_primary] + + verifies = quality.vault_api_sys_replication_dr_primary_secondary_token_write + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ip_version = matrix.ip_version + primary_leader_host = step.get_primary_cluster_ips.leader_host + replication_type = "dr" + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "wait_for_primary_seal_rewrap" { + module = module.vault_wait_for_seal_rewrap + depends_on = [ + step.generate_secondary_token, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_primary_cluster.hosts + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "wait_for_secondary_seal_rewrap" { + module = module.vault_wait_for_seal_rewrap + depends_on = [ + step.wait_for_primary_seal_rewrap, + + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_secondary_cluster.hosts + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_secondary_cluster.root_token + } + } + + step "configure_dr_replication_secondary" { + description = <<-EOF + Enable dr replication on the secondary cluster with the wrapping token created by + the primary cluster. + EOF + module = module.vault_setup_replication_secondary + depends_on = [ + step.wait_for_secondary_seal_rewrap, + step.generate_secondary_token, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_api_sys_replication_dr_secondary_enable_write, + quality.vault_api_sys_replication_dr_status_read, + ] + + variables { + ip_version = matrix.ip_version + secondary_leader_host = step.get_secondary_cluster_ips.leader_host + replication_type = "dr" + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_secondary_cluster.root_token + wrapping_token = step.generate_secondary_token.secondary_token + } + } + + step "unseal_secondary_followers" { + description = <<-EOF + After replication is enabled the secondary cluster followers need to be unsealed. + Secondary unseal keys are passed differently depending primary and secondary seal + type combinations. See the guide for more information: + https://developer.hashicorp.com/vault/docs/enterprise/replication#seals + EOF + module = module.vault_unseal_replication_followers + depends_on = [ + step.configure_dr_replication_secondary + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.get_secondary_cluster_ips.follower_hosts + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_seal_type = matrix.primary_seal == "shamir" ? matrix.primary_seal : matrix.secondary_seal + vault_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : step.create_primary_cluster.recovery_keys_hex + } + } + + step "verify_secondary_cluster_is_unsealed_after_enabling_replication" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [ + step.unseal_secondary_followers + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_auto_unseals_after_autopilot_upgrade, + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_secondary_cluster_targets.hosts + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "verify_dr_replication" { + description = <<-EOF + Verify that the DR replication status meets our expectations after enabling replication + and ensuring that all secondary nodes are unsealed. + EOF + module = module.vault_verify_dr_replication + depends_on = [ + step.configure_dr_replication_secondary, + step.unseal_secondary_followers, + step.verify_secondary_cluster_is_unsealed_after_enabling_replication, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_replication_dr_read_connection_status_connected, + quality.vault_api_sys_replication_dr_status_read, + quality.vault_api_sys_replication_dr_status_read_cluster_address, + quality.vault_api_sys_replication_dr_status_read_state_not_idle, + quality.vault_api_sys_replication_dr_status_known_primary_cluster_addrs, + ] + + variables { + ip_version = matrix.ip_version + primary_leader_host = step.get_primary_cluster_ips.leader_host + secondary_leader_host = step.get_secondary_cluster_ips.leader_host + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + # ============================== + # FAILOVER SCENARIO STEPS + # ============================== + # 1. Generate a batch DR operation token. + # 2. Promote the current secondary cluster B to become the new primary cluster. + # 3. Demote cluster A to secondary status. + # 4. Test access to Vault data on the new primary cluster B. + # 5. Point demoted cluster A to the new primary cluster B (Multistep process). + # 6. Verify that the data is replicated to the new primary cluster B. + + step "generate_batch_dr_operation_token" { + description = <<-EOF + Generate a batch DR operation token that you can use to promote and demote clusters as needed. + EOF + module = module.generate_dr_operation_token + depends_on = [step.verify_dr_replication] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + ip_version = matrix.ip_version + primary_leader_host = step.get_primary_cluster_ips.leader_host + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_root_token = step.create_primary_cluster.root_token + storage_backend = matrix.primary_backend + } + } + + step "vault_failover_promote_dr_secondary_cluster" { + description = <<-EOF + Promote the secondary cluster to be the primary cluster. This step will also + generate a new DR operation token for the secondary cluster to connect to the new + primary cluster. + EOF + module = module.vault_failover_promote_dr_secondary + depends_on = [step.generate_batch_dr_operation_token] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_addr = step.create_secondary_cluster.api_addr_localhost + ip_version = matrix.ip_version + secondary_leader_host = step.get_secondary_cluster_ips.leader_host + vault_root_token = step.create_secondary_cluster.root_token + dr_operation_token = step.generate_batch_dr_operation_token.dr_operation_token + } + } + + step "wait_for_promoted_cluster_leader" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.vault_failover_promote_dr_secondary_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + hosts = step.create_secondary_cluster_targets.hosts + ip_version = matrix.ip_version + timeout = 120 // seconds + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_secondary_cluster.root_token + } + } + + step "vault_failover_demote_dr_primary_cluster" { + description = <<-EOF + Demote the primary cluster to be the secondary cluster. This step will also + generate a new DR operation token for the secondary cluster to connect to the new + primary cluster. + EOF + module = module.vault_failover_demote_dr_primary + depends_on = [step.wait_for_promoted_cluster_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ip_version = matrix.ip_version + primary_leader_host = step.get_primary_cluster_ips.leader_host + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "wait_for_demoted_cluster_leader" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.vault_failover_demote_dr_primary_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + hosts = step.create_primary_cluster_targets.hosts + ip_version = matrix.ip_version + timeout = 120 // seconds + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "verify_new_primary_cluster_unsealed" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [ + step.wait_for_demoted_cluster_leader, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_auto_unseals_after_autopilot_upgrade, + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_secondary_cluster_targets.hosts + timeout = 120 // seconds + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "verify_replicated_data_during_failover" { + description = global.description.verify_secrets_engines_read + module = module.vault_verify_secrets_engines_read + depends_on = [ + step.wait_for_demoted_cluster_leader, + step.verify_new_primary_cluster_unsealed, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_auth_userpass_login_write, + quality.vault_api_identity_entity_read, + quality.vault_api_identity_oidc_config_read, + quality.vault_api_identity_oidc_key_read, + quality.vault_api_identity_oidc_role_read, + quality.vault_secrets_kv_read + ] + + variables { + create_state = step.verify_secrets_engines_on_primary.state + hosts = step.get_secondary_cluster_ips.follower_hosts + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "generate_demoted_secondary_public_key" { + description = <<-EOF + Generate a random token and configure the DR replication primary secondary-token and + configure the Vault cluster primary replication with the token. Export the wrapping token + so that secondary clusters can utilize it. + EOF + module = module.generate_secondary_public_key + depends_on = [ + step.verify_replicated_data_during_failover, + ] + + verifies = quality.vault_api_sys_replication_dr_primary_secondary_token_write + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ip_version = matrix.ip_version + primary_leader_host = step.get_primary_cluster_ips.leader_host + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "generate_demoted_secondary_token" { + description = <<-EOF + Generate a random token and configure the DR replication primary secondary-token and + configure the Vault cluster primary replication with the token. Export the wrapping token + so that secondary clusters can utilize it. + EOF + module = module.generate_failover_secondary_token + depends_on = [step.generate_demoted_secondary_public_key] + + verifies = quality.vault_api_sys_replication_dr_primary_secondary_token_write + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ip_version = matrix.ip_version + primary_leader_host = step.get_secondary_cluster_ips.leader_host + secondary_public_key = step.generate_demoted_secondary_public_key.secondary_public_key + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "vault_failover_update_dr_primary_cluster" { + description = <<-EOF + Update the secondary cluster to connect to the new primary cluster. + EOF + module = module.vault_failover_update_dr_primary + depends_on = [ + step.generate_demoted_secondary_token, + step.vault_failover_demote_dr_primary_cluster + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + ip_version = matrix.ip_version + secondary_leader_host = step.get_primary_cluster_ips.leader_host + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_root_token = step.create_primary_cluster.root_token + dr_operation_token = step.generate_batch_dr_operation_token.dr_operation_token + wrapping_token = step.generate_demoted_secondary_token.secondary_token + } + } + + step "verify_failover_dr_replication" { + description = <<-EOF + Verify that the DR replication status meets our expectations after enabling replication + and ensuring that all secondary nodes are unsealed. + EOF + module = module.vault_verify_dr_replication + depends_on = [step.vault_failover_update_dr_primary_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_replication_dr_read_connection_status_connected, + quality.vault_api_sys_replication_dr_status_read, + quality.vault_api_sys_replication_dr_status_read_cluster_address, + quality.vault_api_sys_replication_dr_status_read_state_not_idle, + quality.vault_api_sys_replication_dr_status_known_primary_cluster_addrs, + ] + + variables { + ip_version = matrix.ip_version + primary_leader_host = step.get_secondary_cluster_ips.leader_host + secondary_leader_host = step.get_primary_cluster_ips.leader_host + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "verify_failover_replicated_data" { + description = global.description.verify_secrets_engines_read + module = module.vault_verify_secrets_engines_read + depends_on = [ + step.verify_dr_replication, + step.get_secondary_cluster_ips, + step.verify_secrets_engines_on_primary, + step.verify_failover_dr_replication + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_auth_userpass_login_write, + quality.vault_api_identity_entity_read, + quality.vault_api_identity_oidc_config_read, + quality.vault_api_identity_oidc_key_read, + quality.vault_api_identity_oidc_role_read, + quality.vault_secrets_kv_read + ] + + variables { + create_state = step.verify_secrets_engines_on_primary.state + hosts = step.get_secondary_cluster_ips.follower_hosts + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + // Output the results of the scenario. + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_primary_cluster.audit_device_file_path + } + + output "primary_cluster_hosts" { + description = "The Vault primary cluster target hosts" + value = step.create_primary_cluster_targets.hosts + } + + output "primary_cluster_root_token" { + description = "The Vault primary cluster root token" + value = step.create_primary_cluster.root_token + } + + output "primary_cluster_unseal_keys_b64" { + description = "The Vault primary cluster unseal keys" + value = step.create_primary_cluster.unseal_keys_b64 + } + + output "primary_cluster_unseal_keys_hex" { + description = "The Vault primary cluster unseal keys hex" + value = step.create_primary_cluster.unseal_keys_hex + } + + output "primary_cluster_recovery_key_shares" { + description = "The Vault primary cluster recovery key shares" + value = step.create_primary_cluster.recovery_key_shares + } + + output "primary_cluster_recovery_keys_b64" { + description = "The Vault primary cluster recovery keys b64" + value = step.create_primary_cluster.recovery_keys_b64 + } + + output "primary_cluster_recovery_keys_hex" { + description = "The Vault primary cluster recovery keys hex" + value = step.create_primary_cluster.recovery_keys_hex + } + + output "secondary_cluster_hosts" { + description = "The Vault secondary cluster public IPs" + value = step.create_secondary_cluster_targets.hosts + } + + output "secondary_cluster_root_token" { + description = "The Vault secondary cluster root token" + value = step.create_secondary_cluster.root_token + } + + output "secrets_engines_state" { + description = "The state of configured secrets engines" + value = step.verify_secrets_engines_on_primary.state + } + + output "dr_secondary_token" { + description = "The dr secondary replication token" + value = step.generate_secondary_token.secondary_token + } + + output "batch_dr_operation_token" { + description = "The dr primary replication token" + value = step.generate_batch_dr_operation_token.dr_operation_token + } + + output "demoted_secondary_public_key" { + description = "The dr secondary public key" + value = step.generate_demoted_secondary_public_key.secondary_public_key + } + + output "demoted_secondary_token" { + description = "The dr secondary public key" + value = step.generate_demoted_secondary_token.secondary_token + } + + output "initial_primary_replication_status" { + description = "The Vault primary cluster dr replication status" + value = step.verify_dr_replication.primary_replication_status + } + + output "initial_known_primary_cluster_addresses" { + description = "The Vault primary cluster known primary cluster addresses" + value = step.verify_dr_replication.known_primary_cluster_addrs + } + + output "initial_secondary_dr_replication_status" { + description = "The Vault secondary cluster dr replication status" + value = step.verify_dr_replication.secondary_replication_status + } + + output "intial_primary_replication_data_secondaries" { + description = "The Vault primary cluster secondaries connection status" + value = step.verify_dr_replication.primary_replication_data_secondaries + } + + output "initial_secondary_replication_data_primaries" { + description = "The Vault secondary cluster primaries connection status" + value = step.verify_dr_replication.secondary_replication_data_primaries + } + + output "get_primary_cluster_ips_leader" { + description = "The Vault updated primary cluster dr replication status" + value = step.get_primary_cluster_ips.leader_public_ip + } + + output "get_secondary_cluster_ips_leader" { + description = "The Vault updated primary cluster dr replication status" + value = step.get_secondary_cluster_ips.leader_public_ip + } + + output "failover_primary_replication_status" { + description = "The Vault updated primary cluster dr replication status" + value = step.verify_failover_dr_replication.primary_replication_status + } + + output "failover_known_primary_cluster_addresses" { + description = "The Vault secondary cluster dr replication status" + value = step.verify_failover_dr_replication.known_primary_cluster_addrs + } + + output "failover_secondary_replication_status" { + description = "The Vault updated secondary cluster dr replication status" + value = step.verify_failover_dr_replication.secondary_replication_status + } + + output "failover_primary_replication_data_secondaries" { + description = "The Vault updated primary cluster secondaries connection status" + value = step.verify_failover_dr_replication.primary_replication_data_secondaries + } + + output "failover_secondary_replication_data_primaries" { + description = "The Vault updated secondary cluster primaries connection status" + value = step.verify_failover_dr_replication.secondary_replication_data_primaries + } +} diff --git a/enos/enos-scenario-pr-replication.hcl b/enos/enos-scenario-pr-replication.hcl new file mode 100644 index 000000000000..1f8aa8682f01 --- /dev/null +++ b/enos/enos-scenario-pr-replication.hcl @@ -0,0 +1,1330 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +scenario "pr_replication" { + description = <<-EOF + The PR replication scenario configures performance replication between two Vault clusters and + verifies behavior and failure tolerance. The build can be a local branch, any CRT built Vault + Enterprise artifact saved to the local machine, or any CRT built Vault Enterprise artifact in + the stable channel in Artifactory. + + The scenario deploys two Vault Enterprise clusters and establishes performance replication + between the primary cluster and the performance replication secondary cluster. Next, we simulate + a catastrophic failure event whereby the primary leader and a primary follower as ungracefully + removed from the cluster while running. This forces a leader election in the primary cluster + and requires the secondary cluster to recover replication and establish replication to the new + primary leader. The scenario also performs standard baseline verification that is not specific + to performance replication. + + If you want to use the 'distro:leap' variant you must first accept SUSE's terms for the AWS + account. To verify that your account has agreed, sign-in to your AWS through Doormat, + and visit the following links to verify your subscription or subscribe: + arm64 AMI: https://aws.amazon.com/marketplace/server/procurement?productId=a516e959-df54-4035-bb1a-63599b7a6df9 + amd64 AMI: https://aws.amazon.com/marketplace/server/procurement?productId=5535c495-72d4-4355-b169-54ffa874f849 + EOF + + matrix { + arch = global.archs + artifact_source = global.artifact_sources + artifact_type = global.artifact_types + config_mode = global.config_modes + consul_edition = global.consul_editions + consul_version = global.consul_versions + distro = global.distros + edition = global.enterprise_editions + ip_version = global.ip_versions + primary_backend = global.backends + primary_seal = global.seals + secondary_backend = global.backends + secondary_seal = global.seals + + // Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + // PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402. + exclude { + primary_seal = ["pkcs11"] + edition = [for e in matrix.edition : e if !strcontains(e, "hsm")] + } + + exclude { + secondary_seal = ["pkcs11"] + edition = [for e in matrix.edition : e if !strcontains(e, "hsm")] + } + + // softhsm packages not available for leap/sles. + exclude { + primary_seal = ["pkcs11"] + distro = ["leap", "sles"] + } + + exclude { + secondary_seal = ["pkcs11"] + distro = ["leap", "sles"] + } + + // Testing in IPV6 mode is currently implemented for integrated Raft storage only + exclude { + ip_version = ["6"] + primary_backend = ["consul"] + } + + exclude { + ip_version = ["6"] + secondary_backend = ["consul"] + } + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ec2_user, + provider.enos.ubuntu + ] + + locals { + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + enos_provider = { + amzn = provider.enos.ec2_user + leap = provider.enos.ec2_user + rhel = provider.enos.ec2_user + sles = provider.enos.ec2_user + ubuntu = provider.enos.ubuntu + } + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir[matrix.artifact_type] + } + + step "build_vault" { + description = global.description.build_vault + module = "build_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision + } + } + + step "ec2_info" { + description = global.description.ec2_info + module = module.ec2_info + } + + step "create_vpc" { + description = global.description.create_vpc + module = module.create_vpc + + variables { + common_tags = global.tags + ip_version = matrix.ip_version + } + } + + // This step reads the contents of the backend license if we're using a Consul backend and + // an "ent" Consul edition. + step "read_backend_license" { + description = global.description.read_backend_license + skip_step = (matrix.primary_backend == "raft" && matrix.secondary_backend == "raft") || matrix.consul_edition == "ce" + module = module.read_license + + variables { + file_name = global.backend_license_path + } + } + + step "read_vault_license" { + description = global.description.read_vault_license + module = module.read_license + + variables { + file_name = abspath(joinpath(path.root, "./support/vault.hclic")) + } + } + + step "create_primary_seal_key" { + description = global.description.create_seal_key + module = "seal_${matrix.primary_seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + cluster_meta = "primary" + common_tags = global.tags + } + } + + step "create_secondary_seal_key" { + description = global.description.create_seal_key + module = "seal_${matrix.secondary_seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + cluster_meta = "secondary" + common_tags = global.tags + other_resources = step.create_primary_seal_key.resource_names + } + } + + // Create all of our instances for both primary and secondary clusters + step "create_primary_cluster_targets" { + description = global.description.create_vault_cluster_targets + module = module.target_ec2_instances + depends_on = [ + step.create_vpc, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_primary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_primary_cluster_backend_targets" { + description = global.description.create_vault_cluster_targets + module = matrix.primary_backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [ + step.create_vpc, + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"][global.distro_version["ubuntu"]] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_primary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_primary_cluster_additional_targets" { + description = global.description.create_vault_cluster_targets + module = module.target_ec2_instances + depends_on = [ + step.create_vpc, + step.create_primary_cluster_targets, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_name = step.create_primary_cluster_targets.cluster_name + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_primary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_secondary_cluster_targets" { + description = global.description.create_vault_cluster_targets + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_secondary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_secondary_cluster_backend_targets" { + description = global.description.create_vault_cluster_targets + module = matrix.secondary_backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"][global.distro_version["ubuntu"]] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_secondary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_primary_backend_cluster" { + description = global.description.create_backend_cluster + module = "backend_${matrix.primary_backend}" + depends_on = [ + step.create_primary_cluster_backend_targets, + ] + + providers = { + enos = provider.enos.ubuntu + } + + verifies = [ + // verified in modules + quality.consul_autojoin_aws, + quality.consul_config_file, + quality.consul_ha_leader_election, + quality.consul_service_start_server, + // verified in enos_consul_start resource + quality.consul_api_agent_host_read, + quality.consul_api_health_node_read, + quality.consul_api_operator_raft_config_read, + quality.consul_cli_validate, + quality.consul_health_state_passing_read_nodes_minimum, + quality.consul_operator_raft_configuration_read_voters_minimum, + quality.consul_service_systemd_notified, + quality.consul_service_systemd_unit, + ] + + variables { + cluster_name = step.create_primary_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + hosts = step.create_primary_cluster_backend_targets.hosts + license = (matrix.primary_backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = matrix.consul_edition + version = matrix.consul_version + } + } + } + + step "create_primary_cluster" { + description = global.description.create_vault_cluster + module = module.vault_cluster + depends_on = [ + step.create_primary_backend_cluster, + step.build_vault, + step.create_primary_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + // verified in modules + quality.consul_service_start_client, + quality.vault_artifact_bundle, + quality.vault_artifact_deb, + quality.vault_artifact_rpm, + quality.vault_audit_log, + quality.vault_audit_socket, + quality.vault_audit_syslog, + quality.vault_autojoin_aws, + quality.vault_config_env_variables, + quality.vault_config_file, + quality.vault_config_log_level, + quality.vault_init, + quality.vault_license_required_ent, + quality.vault_listener_ipv4, + quality.vault_listener_ipv6, + quality.vault_service_start, + quality.vault_storage_backend_consul, + quality.vault_storage_backend_raft, + // verified in enos_vault_start resource + quality.vault_api_sys_config_read, + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_health_read, + quality.vault_api_sys_host_info_read, + quality.vault_api_sys_replication_status_read, + quality.vault_api_sys_seal_status_api_read_matches_sys_health, + quality.vault_api_sys_storage_raft_autopilot_configuration_read, + quality.vault_api_sys_storage_raft_autopilot_state_read, + quality.vault_api_sys_storage_raft_configuration_read, + quality.vault_cli_status_exit_code, + quality.vault_service_systemd_notified, + quality.vault_service_systemd_unit, + ] + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_primary_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + config_mode = matrix.config_mode + consul_license = (matrix.primary_backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null + cluster_name = step.create_primary_cluster_targets.cluster_name + consul_release = matrix.primary_backend == "consul" ? { + edition = matrix.consul_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + hosts = step.create_primary_cluster_targets.hosts + install_dir = global.vault_install_dir[matrix.artifact_type] + ip_version = matrix.ip_version + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro][global.distro_version[matrix.distro]]) + seal_attributes = step.create_primary_seal_key.attributes + seal_type = matrix.primary_seal + storage_backend = matrix.primary_backend + } + } + + step "get_local_metadata" { + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + step "wait_for_primary_cluster_leader" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.create_primary_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + hosts = step.create_primary_cluster_targets.hosts + ip_version = matrix.ip_version + timeout = 120 // seconds + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "create_secondary_backend_cluster" { + description = global.description.create_backend_cluster + module = "backend_${matrix.secondary_backend}" + depends_on = [ + step.create_secondary_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_secondary_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + hosts = step.create_secondary_cluster_backend_targets.hosts + license = (matrix.secondary_backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = matrix.consul_edition + version = matrix.consul_version + } + } + } + + step "create_secondary_cluster" { + module = module.vault_cluster + depends_on = [ + step.create_secondary_backend_cluster, + step.build_vault, + step.create_secondary_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + // verified in modules + quality.consul_autojoin_aws, + quality.consul_config_file, + quality.consul_ha_leader_election, + quality.consul_service_start_client, + // verified in enos_consul_start resource + quality.consul_api_agent_host_read, + quality.consul_api_health_node_read, + quality.consul_api_operator_raft_config_read, + quality.consul_health_state_passing_read_nodes_minimum, + quality.consul_operator_raft_configuration_read_voters_minimum, + quality.consul_service_systemd_notified, + quality.consul_service_systemd_unit, + ] + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_secondary_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + config_mode = matrix.config_mode + consul_license = (matrix.secondary_backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null + cluster_name = step.create_secondary_cluster_targets.cluster_name + consul_release = matrix.secondary_backend == "consul" ? { + edition = matrix.consul_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + hosts = step.create_secondary_cluster_targets.hosts + install_dir = global.vault_install_dir[matrix.artifact_type] + ip_version = matrix.ip_version + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro][global.distro_version[matrix.distro]]) + seal_attributes = step.create_secondary_seal_key.attributes + seal_type = matrix.secondary_seal + storage_backend = matrix.secondary_backend + } + } + + step "wait_for_secondary_cluster_leader" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.create_secondary_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + hosts = step.create_secondary_cluster_targets.hosts + ip_version = matrix.ip_version + timeout = 120 // seconds + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_secondary_cluster.root_token + } + } + + step "verify_that_vault_primary_cluster_is_unsealed" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [ + step.create_primary_cluster, + step.wait_for_primary_cluster_leader, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_auto_unseals_after_autopilot_upgrade, + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_primary_cluster_targets.hosts + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "verify_that_vault_secondary_cluster_is_unsealed" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [ + step.create_secondary_cluster, + step.wait_for_secondary_cluster_leader, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_auto_unseals_after_autopilot_upgrade, + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_secondary_cluster_targets.hosts + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "get_primary_cluster_ips" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.verify_that_vault_primary_cluster_is_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_primary_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "get_secondary_cluster_ips" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.verify_that_vault_secondary_cluster_is_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_secondary_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_secondary_cluster.root_token + } + } + + step "verify_vault_version" { + description = global.description.verify_vault_version + module = module.vault_verify_version + depends_on = [step.get_primary_cluster_ips] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_version_history_keys, + quality.vault_api_sys_version_history_key_info, + quality.vault_version_build_date, + quality.vault_version_edition, + quality.vault_version_release, + ] + + variables { + hosts = step.create_primary_cluster_targets.hosts + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_edition = matrix.edition + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "verify_ui" { + description = global.description.verify_ui + module = module.vault_verify_ui + depends_on = [step.get_primary_cluster_ips] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_ui_assets + + variables { + vault_addr = step.create_primary_cluster.api_addr_localhost + hosts = step.create_primary_cluster_targets.hosts + } + } + + step "verify_secrets_engines_on_primary" { + description = global.description.verify_secrets_engines_create + module = module.vault_verify_secrets_engines_create + depends_on = [step.get_primary_cluster_ips] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_auth_userpass_login_write, + quality.vault_api_auth_userpass_user_write, + quality.vault_api_identity_entity_write, + quality.vault_api_identity_entity_alias_write, + quality.vault_api_identity_group_write, + quality.vault_api_identity_oidc_config_write, + quality.vault_api_identity_oidc_introspect_write, + quality.vault_api_identity_oidc_key_write, + quality.vault_api_identity_oidc_key_rotate_write, + quality.vault_api_identity_oidc_role_write, + quality.vault_api_identity_oidc_token_read, + quality.vault_api_sys_auth_userpass_user_write, + quality.vault_api_sys_policy_write, + quality.vault_mount_auth, + quality.vault_mount_kv, + quality.vault_secrets_kv_write, + ] + + variables { + hosts = step.create_primary_cluster_targets.hosts + leader_host = step.get_primary_cluster_ips.leader_host + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "configure_performance_replication_primary" { + description = <<-EOF + Create the necessary superuser auth policy necessary for performance replication, assign it + to a our previously create test user, and enable performance replication on the primary + cluster. + EOF + module = module.vault_setup_perf_primary + depends_on = [ + // Wait for both clusters to be up and healthy... + step.get_primary_cluster_ips, + step.get_secondary_cluster_ips, + step.verify_secrets_engines_on_primary, + // Wait base verification to complete... + step.verify_vault_version, + step.verify_ui, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_auth_userpass_login_write, + quality.vault_api_auth_userpass_user_write, + quality.vault_api_identity_entity_write, + quality.vault_api_identity_entity_alias_write, + quality.vault_api_identity_group_write, + quality.vault_api_identity_oidc_config_write, + quality.vault_api_identity_oidc_introspect_write, + quality.vault_api_identity_oidc_key_write, + quality.vault_api_identity_oidc_key_rotate_write, + quality.vault_api_identity_oidc_role_write, + quality.vault_api_identity_oidc_token_read, + quality.vault_api_sys_auth_userpass_user_write, + quality.vault_api_sys_policy_write, + quality.vault_mount_auth, + quality.vault_mount_kv, + quality.vault_secrets_kv_write, + ] + + variables { + ip_version = matrix.ip_version + primary_leader_host = step.get_primary_cluster_ips.leader_host + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "generate_secondary_token" { + description = <<-EOF + Generate a random token and configure the performance replication primary secondary-token and + configure the Vault cluster primary replication with the token. Export the wrapping token + so that secondary clusters can utilize it. + EOF + module = module.generate_secondary_token + depends_on = [step.configure_performance_replication_primary] + + verifies = quality.vault_api_sys_replication_performance_primary_secondary_token_write + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ip_version = matrix.ip_version + primary_leader_host = step.get_primary_cluster_ips.leader_host + replication_type = "performance" + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "configure_performance_replication_secondary" { + description = <<-EOF + Enable performance replication on the secondary cluster with the wrapping token created by + the primary cluster. + EOF + module = module.vault_setup_replication_secondary + depends_on = [step.generate_secondary_token] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_api_sys_replication_performance_secondary_enable_write + + variables { + ip_version = matrix.ip_version + secondary_leader_host = step.get_secondary_cluster_ips.leader_host + replication_type = "performance" + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_secondary_cluster.root_token + wrapping_token = step.generate_secondary_token.secondary_token + } + } + + step "unseal_secondary_followers" { + description = <<-EOF + After replication is enabled the secondary cluster followers need to be unsealed. + Secondary unseal keys are passed differently depending primary and secondary seal + type combinations. See the guide for more information: + https://developer.hashicorp.com/vault/docs/enterprise/replication#seals + EOF + module = module.vault_unseal_replication_followers + depends_on = [ + step.create_primary_cluster, + step.create_secondary_cluster, + step.get_secondary_cluster_ips, + step.configure_performance_replication_secondary + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.get_secondary_cluster_ips.follower_hosts + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_seal_type = matrix.primary_seal == "shamir" ? matrix.primary_seal : matrix.secondary_seal + vault_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : step.create_primary_cluster.recovery_keys_hex + } + } + + step "verify_secondary_cluster_is_unsealed_after_enabling_replication" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [ + step.unseal_secondary_followers + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_auto_unseals_after_autopilot_upgrade, + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_secondary_cluster_targets.hosts + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "verify_performance_replication" { + description = <<-EOF + Verify that the performance replication status meets our expectations after enabling replication + and ensuring that all secondary nodes are unsealed. + EOF + module = module.vault_verify_performance_replication + depends_on = [step.verify_secondary_cluster_is_unsealed_after_enabling_replication] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_replication_performance_read_connection_status_connected, + quality.vault_api_sys_replication_performance_status_read, + quality.vault_api_sys_replication_performance_status_read_cluster_address, + quality.vault_api_sys_replication_performance_status_read_state_not_idle, + quality.vault_api_sys_replication_performance_status_known_primary_cluster_addrs, + ] + + variables { + ip_version = matrix.ip_version + primary_leader_host = step.get_primary_cluster_ips.leader_host + secondary_leader_host = step.get_secondary_cluster_ips.leader_host + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "verify_replicated_data" { + description = global.description.verify_secrets_engines_read + module = module.vault_verify_secrets_engines_read + depends_on = [ + step.verify_performance_replication, + step.get_secondary_cluster_ips, + step.verify_secrets_engines_on_primary + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_auth_userpass_login_write, + quality.vault_api_identity_entity_read, + quality.vault_api_identity_oidc_config_read, + quality.vault_api_identity_oidc_key_read, + quality.vault_api_identity_oidc_role_read, + quality.vault_secrets_kv_read + ] + + variables { + create_state = step.verify_secrets_engines_on_primary.state + hosts = step.get_secondary_cluster_ips.follower_hosts + vault_addr = step.create_secondary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "add_additional_nodes_to_primary_cluster" { + description = <<-EOF + Add additional nodes the Vault Cluster to prepare for our catostrophic failure simulation. + These nodes will use a different storage storage_node_prefix + EOF + module = module.vault_cluster + depends_on = [ + step.create_vpc, + step.create_primary_backend_cluster, + step.create_primary_cluster, + step.verify_replicated_data, + step.create_primary_cluster_additional_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + // unique to this invocation of the module + quality.vault_autojoins_new_nodes_into_initialized_cluster, + // verified in modules + quality.vault_artifact_bundle, + quality.vault_artifact_deb, + quality.vault_artifact_rpm, + quality.vault_audit_log, + quality.vault_audit_socket, + quality.vault_audit_syslog, + quality.vault_autojoin_aws, + quality.vault_config_env_variables, + quality.vault_config_file, + quality.vault_config_log_level, + quality.vault_init, + quality.vault_license_required_ent, + quality.vault_listener_ipv4, + quality.vault_listener_ipv6, + quality.vault_service_start, + quality.vault_storage_backend_consul, + quality.vault_storage_backend_raft, + // verified in enos_vault_start resource + quality.vault_api_sys_config_read, + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_health_read, + quality.vault_api_sys_host_info_read, + quality.vault_api_sys_replication_status_read, + quality.vault_api_sys_seal_status_api_read_matches_sys_health, + quality.vault_api_sys_storage_raft_configuration_read, + quality.vault_api_sys_storage_raft_autopilot_configuration_read, + quality.vault_api_sys_storage_raft_autopilot_state_read, + quality.vault_service_systemd_notified, + quality.vault_service_systemd_unit, + quality.vault_cli_status_exit_code, + ] + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_primary_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_primary_cluster_targets.cluster_name + config_mode = matrix.config_mode + consul_license = (matrix.primary_backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.primary_backend == "consul" ? { + edition = matrix.consul_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + force_unseal = matrix.primary_seal == "shamir" + hosts = step.create_primary_cluster_additional_targets.hosts + // Don't init when adding nodes into the cluster. + initialize_cluster = false + install_dir = global.vault_install_dir[matrix.artifact_type] + ip_version = matrix.ip_version + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro][global.distro_version[matrix.distro]]) + root_token = step.create_primary_cluster.root_token + seal_attributes = step.create_primary_seal_key.attributes + seal_type = matrix.primary_seal + shamir_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : null + storage_backend = matrix.primary_backend + storage_node_prefix = "newprimary_node" + } + } + + step "verify_additional_primary_nodes_are_unsealed" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [step.add_additional_nodes_to_primary_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_auto_unseals_after_autopilot_upgrade, + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_primary_cluster_additional_targets.hosts + vault_addr = step.add_additional_nodes_to_primary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "verify_raft_auto_join_voter" { + description = global.description.verify_raft_cluster_all_nodes_are_voters + skip_step = matrix.primary_backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [ + step.add_additional_nodes_to_primary_cluster, + step.create_primary_cluster, + step.verify_additional_primary_nodes_are_unsealed + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_raft_voters + + variables { + hosts = step.create_primary_cluster_additional_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "remove_primary_follower_1" { + description = <<-EOF + Simulate a catostrophic failure by forcefully removing the a follower node from the Vault + Cluster. + EOF + module = module.shutdown_node + depends_on = [ + step.verify_additional_primary_nodes_are_unsealed, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + host = step.get_primary_cluster_ips.follower_hosts["0"] + } + } + + step "remove_primary_leader" { + description = <<-EOF + Simulate a catostrophic failure by forcefully removing the the primary leader node from the + Vault Cluster without allowing a graceful shutdown. + EOF + module = module.shutdown_node + depends_on = [ + step.get_primary_cluster_ips, + step.remove_primary_follower_1 + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + host = step.get_primary_cluster_ips.leader_host + } + } + + step "get_remaining_hosts_replication_data" { + description = <<-EOF + An arithmetic module that we use to determine various metadata about the the leader and + follower nodes of the primary cluster so that we can correctly enable performance replication. + + We execute this to determine information about our hosts after having forced the leader + and a follower from the cluster. + EOF + + module = module.replication_data + depends_on = [ + step.get_primary_cluster_ips, + step.remove_primary_leader, + ] + + variables { + added_hosts = step.create_primary_cluster_additional_targets.hosts + initial_hosts = step.create_primary_cluster_targets.hosts + removed_follower_host = step.get_primary_cluster_ips.follower_hosts["0"] + removed_primary_host = step.get_primary_cluster_ips.leader_host + } + } + + step "wait_for_leader_in_remaining_hosts" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [ + step.remove_primary_leader, + step.get_remaining_hosts_replication_data, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + hosts = step.get_remaining_hosts_replication_data.remaining_hosts + ip_version = matrix.ip_version + timeout = 120 // seconds + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "get_updated_primary_cluster_ips" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [ + step.get_remaining_hosts_replication_data, + step.wait_for_leader_in_remaining_hosts, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.get_remaining_hosts_replication_data.remaining_hosts + ip_version = matrix.ip_version + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "verify_updated_performance_replication" { + description = <<-EOF + Verify that the performance replication status meets our expectations after the new leader + election. + EOF + + module = module.vault_verify_performance_replication + depends_on = [ + step.get_remaining_hosts_replication_data, + step.wait_for_leader_in_remaining_hosts, + step.get_updated_primary_cluster_ips, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_replication_performance_read_connection_status_connected, + quality.vault_api_sys_replication_performance_status_known_primary_cluster_addrs, + quality.vault_api_sys_replication_performance_status_read, + quality.vault_api_sys_replication_performance_status_read_state_not_idle, + quality.vault_api_sys_replication_performance_status_read_cluster_address, + ] + + variables { + ip_version = matrix.ip_version + primary_leader_host = step.get_updated_primary_cluster_ips.leader_host + secondary_leader_host = step.get_secondary_cluster_ips.leader_host + vault_addr = step.create_primary_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_primary_cluster.audit_device_file_path + } + + output "primary_cluster_hosts" { + description = "The Vault primary cluster target hosts" + value = step.create_primary_cluster_targets.hosts + } + + output "primary_cluster_additional_hosts" { + description = "The Vault added new node on primary cluster target hosts" + value = step.create_primary_cluster_additional_targets.hosts + } + + output "primary_cluster_root_token" { + description = "The Vault primary cluster root token" + value = step.create_primary_cluster.root_token + } + + output "primary_cluster_unseal_keys_b64" { + description = "The Vault primary cluster unseal keys" + value = step.create_primary_cluster.unseal_keys_b64 + } + + output "primary_cluster_unseal_keys_hex" { + description = "The Vault primary cluster unseal keys hex" + value = step.create_primary_cluster.unseal_keys_hex + } + + output "primary_cluster_recovery_key_shares" { + description = "The Vault primary cluster recovery key shares" + value = step.create_primary_cluster.recovery_key_shares + } + + output "primary_cluster_recovery_keys_b64" { + description = "The Vault primary cluster recovery keys b64" + value = step.create_primary_cluster.recovery_keys_b64 + } + + output "primary_cluster_recovery_keys_hex" { + description = "The Vault primary cluster recovery keys hex" + value = step.create_primary_cluster.recovery_keys_hex + } + + output "secondary_cluster_hosts" { + description = "The Vault secondary cluster public IPs" + value = step.create_secondary_cluster_targets.hosts + } + + output "secondary_cluster_root_token" { + description = "The Vault secondary cluster root token" + value = step.create_secondary_cluster.root_token + } + + output "secrets_engines_state" { + description = "The state of configured secrets engines" + value = step.verify_secrets_engines_on_primary.state + } + + output "performance_secondary_token" { + description = "The performance secondary replication token" + value = step.generate_secondary_token.secondary_token + } + + output "remaining_hosts" { + description = "The Vault cluster primary hosts after removing the leader and follower" + value = step.get_remaining_hosts_replication_data.remaining_hosts + } + + output "initial_primary_replication_status" { + description = "The Vault primary cluster performance replication status" + value = step.verify_performance_replication.primary_replication_status + } + + output "initial_known_primary_cluster_addresses" { + description = "The initial known Vault primary cluster addresses" + value = step.verify_performance_replication.known_primary_cluster_addrs + } + + output "initial_secondary_performance_replication_status" { + description = "The Vault secondary cluster performance replication status" + value = step.verify_performance_replication.secondary_replication_status + } + + output "intial_primary_replication_data_secondaries" { + description = "The Vault primary cluster secondaries connection status" + value = step.verify_performance_replication.primary_replication_data_secondaries + } + + output "initial_secondary_replication_data_primaries" { + description = "The Vault secondary cluster primaries connection status" + value = step.verify_performance_replication.secondary_replication_data_primaries + } + + output "updated_primary_replication_status" { + description = "The Vault updated primary cluster performance replication status" + value = step.verify_updated_performance_replication.primary_replication_status + } + + output "updated_known_primary_cluster_addresses" { + description = "The Vault secondary cluster performance replication status" + value = step.verify_updated_performance_replication.known_primary_cluster_addrs + } + + output "updated_secondary_replication_status" { + description = "The Vault updated secondary cluster performance replication status" + value = step.verify_updated_performance_replication.secondary_replication_status + } + + output "updated_primary_replication_data_secondaries" { + description = "The Vault updated primary cluster secondaries connection status" + value = step.verify_updated_performance_replication.primary_replication_data_secondaries + } + + output "updated_secondary_replication_data_primaries" { + description = "The Vault updated secondary cluster primaries connection status" + value = step.verify_updated_performance_replication.secondary_replication_data_primaries + } +} diff --git a/enos/enos-scenario-proxy.hcl b/enos/enos-scenario-proxy.hcl new file mode 100644 index 000000000000..6865d1b62146 --- /dev/null +++ b/enos/enos-scenario-proxy.hcl @@ -0,0 +1,625 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +scenario "proxy" { + description = <<-EOF + The agent scenario verifies Vault when running in Proxy mode. The build can be a local branch, + any CRT built Vault artifact saved to the local machine, or any CRT built Vault artifact in the + stable channel in Artifactory. + + The scenario creates a new Vault Cluster using the candidate build and then runs the same Vault + build in Proxy mode and verifies behavior against the Vault cluster. The scenario also performs + standard baseline verification that is not specific to the Proxy mode deployment. + + If you want to use the 'distro:leap' variant you must first accept SUSE's terms for the AWS + account. To verify that your account has agreed, sign-in to your AWS through Doormat, + and visit the following links to verify your subscription or subscribe: + arm64 AMI: https://aws.amazon.com/marketplace/server/procurement?productId=a516e959-df54-4035-bb1a-63599b7a6df9 + amd64 AMI: https://aws.amazon.com/marketplace/server/procurement?productId=5535c495-72d4-4355-b169-54ffa874f849 + EOF + + matrix { + arch = global.archs + artifact_source = global.artifact_sources + artifact_type = global.artifact_types + backend = global.backends + config_mode = global.config_modes + consul_edition = global.consul_editions + consul_version = global.consul_versions + distro = global.distros + edition = global.editions + ip_version = global.ip_versions + seal = global.seals + + // Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + // PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402. + exclude { + seal = ["pkcs11"] + edition = [for e in matrix.edition : e if !strcontains(e, "hsm")] + } + + // softhsm packages not available for leap/sles. + exclude { + seal = ["pkcs11"] + distro = ["leap", "sles"] + } + + // Testing in IPV6 mode is currently implemented for integrated Raft storage only + exclude { + ip_version = ["6"] + backend = ["consul"] + } + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ec2_user, + provider.enos.ubuntu + ] + + locals { + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + enos_provider = { + amzn = provider.enos.ec2_user + leap = provider.enos.ec2_user + rhel = provider.enos.ec2_user + sles = provider.enos.ec2_user + ubuntu = provider.enos.ubuntu + } + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + + step "get_local_metadata" { + description = global.description.get_local_metadata + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + step "build_vault" { + description = global.description.build_vault + module = "build_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision + } + } + + step "ec2_info" { + description = global.description.ec2_info + module = module.ec2_info + } + + step "create_vpc" { + description = global.description.create_vpc + module = module.create_vpc + + variables { + common_tags = global.tags + ip_version = matrix.ip_version + } + } + + // This step reads the contents of the backend license if we're using a Consul backend and + // an "ent" Consul edition. + step "read_backend_license" { + description = global.description.read_backend_license + skip_step = matrix.backend == "raft" || matrix.consul_edition == "ce" + module = module.read_license + + variables { + file_name = global.backend_license_path + } + } + + step "read_vault_license" { + description = global.description.read_vault_license + skip_step = matrix.edition == "ce" + module = module.read_license + + variables { + file_name = global.vault_license_path + } + } + + step "create_seal_key" { + description = global.description.create_seal_key + module = "seal_${matrix.seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + common_tags = global.tags + } + } + + step "create_vault_cluster_targets" { + description = global.description.create_vault_cluster_targets + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_backend_targets" { + description = global.description.create_vault_cluster_targets + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"][global.distro_version["ubuntu"]] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_backend_cluster" { + description = global.description.create_backend_cluster + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + verifies = [ + // verified in modules + quality.consul_autojoin_aws, + quality.consul_config_file, + quality.consul_ha_leader_election, + quality.consul_service_start_server, + // verified in enos_consul_start resource + quality.consul_api_agent_host_read, + quality.consul_api_health_node_read, + quality.consul_api_operator_raft_config_read, + quality.consul_cli_validate, + quality.consul_health_state_passing_read_nodes_minimum, + quality.consul_operator_raft_configuration_read_voters_minimum, + quality.consul_service_systemd_notified, + quality.consul_service_systemd_unit, + ] + + variables { + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + hosts = step.create_vault_cluster_backend_targets.hosts + license = (matrix.backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = matrix.consul_edition + version = matrix.consul_version + } + } + } + + step "create_vault_cluster" { + description = global.description.create_vault_cluster + module = module.vault_cluster + depends_on = [ + step.create_backend_cluster, + step.build_vault, + step.create_vault_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + // verified in modules + quality.consul_service_start_client, + quality.vault_artifact_bundle, + quality.vault_artifact_deb, + quality.vault_artifact_rpm, + quality.vault_audit_log, + quality.vault_audit_socket, + quality.vault_audit_syslog, + quality.vault_autojoin_aws, + quality.vault_config_env_variables, + quality.vault_config_file, + quality.vault_config_log_level, + quality.vault_init, + quality.vault_license_required_ent, + quality.vault_listener_ipv4, + quality.vault_listener_ipv6, + quality.vault_service_start, + quality.vault_storage_backend_consul, + quality.vault_storage_backend_raft, + // verified in enos_vault_start resource + quality.vault_api_sys_config_read, + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_health_read, + quality.vault_api_sys_host_info_read, + quality.vault_api_sys_replication_status_read, + quality.vault_api_sys_seal_status_api_read_matches_sys_health, + quality.vault_api_sys_storage_raft_autopilot_configuration_read, + quality.vault_api_sys_storage_raft_autopilot_state_read, + quality.vault_api_sys_storage_raft_configuration_read, + quality.vault_service_systemd_notified, + quality.vault_service_systemd_unit, + quality.vault_cli_status_exit_code, + ] + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = matrix.config_mode + consul_license = (matrix.backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.backend == "consul" ? { + edition = matrix.consul_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + hosts = step.create_vault_cluster_targets.hosts + install_dir = global.vault_install_dir[matrix.artifact_type] + ip_version = matrix.ip_version + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro][global.distro_version[matrix.distro]]) + seal_attributes = step.create_seal_key.attributes + seal_type = matrix.seal + storage_backend = matrix.backend + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + timeout = 120 // seconds + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "start_vault_proxy" { + module = module.vault_proxy + depends_on = [ + step.build_vault, + step.create_vault_cluster, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_cli_auth_enable_approle, + quality.vault_proxy_auto_auth_approle, + quality.vault_proxy_cli_access, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_vault_cluster_ips" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_unsealed" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "verify_vault_version" { + description = global.description.verify_vault_version + module = module.vault_verify_version + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_version_history_keys, + quality.vault_api_sys_version_history_key_info, + quality.vault_version_build_date, + quality.vault_version_edition, + quality.vault_version_release, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_edition = matrix.edition + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_secrets_engines_create" { + description = global.description.verify_secrets_engines_create + module = module.vault_verify_secrets_engines_create + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_auth_userpass_login_write, + quality.vault_api_auth_userpass_user_write, + quality.vault_api_identity_entity_write, + quality.vault_api_identity_entity_alias_write, + quality.vault_api_identity_group_write, + quality.vault_api_identity_oidc_config_write, + quality.vault_api_identity_oidc_introspect_write, + quality.vault_api_identity_oidc_key_write, + quality.vault_api_identity_oidc_key_rotate_write, + quality.vault_api_identity_oidc_role_write, + quality.vault_api_identity_oidc_token_read, + quality.vault_api_sys_auth_userpass_user_write, + quality.vault_api_sys_policy_write, + quality.vault_mount_auth, + quality.vault_mount_kv, + quality.vault_secrets_kv_write, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + leader_host = step.get_vault_cluster_ips.leader_host + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_raft_auto_join_voter" { + description = global.description.verify_raft_cluster_all_nodes_are_voters + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_raft_voters + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_replication" { + description = global.description.verify_replication_status + module = module.vault_verify_replication + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_replication_ce_disabled, + quality.vault_replication_ent_dr_available, + quality.vault_replication_ent_pr_available, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_edition = matrix.edition + } + } + + step "verify_secrets_engines_read" { + description = global.description.verify_secrets_engines_read + module = module.vault_verify_secrets_engines_read + depends_on = [ + step.verify_secrets_engines_create, + step.verify_replication + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_auth_userpass_login_write, + quality.vault_api_identity_entity_read, + quality.vault_api_identity_oidc_config_read, + quality.vault_api_identity_oidc_key_read, + quality.vault_api_identity_oidc_role_read, + quality.vault_secrets_kv_read + ] + + variables { + create_state = step.verify_secrets_engines_create.state + hosts = step.get_vault_cluster_ips.follower_hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "verify_ui" { + description = global.description.verify_ui + module = module.vault_verify_ui + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_ui_assets + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.hosts + } + + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "secrets_engines_state" { + description = "The state of configured secrets engines" + value = step.verify_secrets_engines_create.state + } + + output "seal_attributes" { + description = "The Vault cluster seal attributes" + value = step.create_seal_key.attributes + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } +} diff --git a/enos/enos-scenario-replication.hcl b/enos/enos-scenario-replication.hcl deleted file mode 100644 index 610ecf3cf7db..000000000000 --- a/enos/enos-scenario-replication.hcl +++ /dev/null @@ -1,687 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -// The replication scenario configures performance replication between two Vault clusters and verifies -// known_primary_cluster_addrs are updated on secondary Vault cluster with the IP addresses of replaced -// nodes on primary Vault cluster -scenario "replication" { - matrix { - arch = ["amd64", "arm64"] - artifact_source = ["local", "crt", "artifactory"] - artifact_type = ["bundle", "package"] - consul_version = ["1.14.2", "1.13.4", "1.12.7"] - distro = ["ubuntu", "rhel"] - edition = ["ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] - primary_backend = ["raft", "consul"] - primary_seal = ["awskms", "shamir"] - secondary_backend = ["raft", "consul"] - secondary_seal = ["awskms", "shamir"] - - # Packages are not offered for the oss, ent.fips1402, and ent.hsm.fips1402 editions - exclude { - edition = ["ent.fips1402", "ent.hsm.fips1402"] - artifact_type = ["package"] - } - } - - terraform_cli = terraform_cli.default - terraform = terraform.default - providers = [ - provider.aws.default, - provider.enos.ubuntu, - provider.enos.rhel - ] - - locals { - build_tags = { - "ent" = ["ui", "enterprise", "ent"] - "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] - "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] - "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] - } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null - dependencies_to_install = ["jq"] - enos_provider = { - rhel = provider.enos.rhel - ubuntu = provider.enos.ubuntu - } - tags = merge({ - "Project Name" : var.project_name - "Project" : "Enos", - "Environment" : "ci" - }, var.tags) - vault_instance_types = { - amd64 = "t3a.small" - arm64 = "t4g.small" - } - vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[matrix.arch]) - vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) - vault_install_dir_packages = { - rhel = "/bin" - ubuntu = "/usr/bin" - } - vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : local.vault_install_dir_packages[matrix.distro] - } - - step "build_vault" { - module = "build_${matrix.artifact_source}" - - variables { - build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] - bundle_path = local.bundle_path - goarch = matrix.arch - goos = "linux" - artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null - artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null - artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null - artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null - arch = matrix.artifact_source == "artifactory" ? matrix.arch : null - product_version = var.vault_product_version - artifact_type = matrix.artifact_type - distro = matrix.artifact_source == "artifactory" ? matrix.distro : null - edition = matrix.artifact_source == "artifactory" ? matrix.edition : null - instance_type = matrix.artifact_source == "artifactory" ? local.vault_instance_type : null - revision = var.vault_revision - } - } - - step "find_azs" { - module = module.az_finder - variables { - instance_type = [ - local.vault_instance_type - ] - } - } - - step "create_vpc" { - module = module.create_vpc - depends_on = [step.find_azs] - - variables { - ami_architectures = [matrix.arch] - availability_zones = step.find_azs.availability_zones - common_tags = local.tags - } - } - - step "read_license" { - module = module.read_license - - variables { - file_name = abspath(joinpath(path.root, "./support/vault.hclic")) - } - } - - step "create_primary_backend_cluster" { - module = "backend_${matrix.primary_backend}" - depends_on = [step.create_vpc] - - providers = { - enos = provider.enos.ubuntu - } - - variables { - ami_id = step.create_vpc.ami_ids["ubuntu"]["amd64"] - common_tags = local.tags - consul_release = { - edition = var.backend_edition - version = matrix.consul_version - } - instance_type = var.backend_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - vpc_id = step.create_vpc.vpc_id - } - } - - step "create_vault_primary_cluster" { - module = module.vault_cluster - depends_on = [ - step.create_primary_backend_cluster, - step.build_vault, - ] - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - consul_cluster_tag = step.create_primary_backend_cluster.consul_cluster_tag - consul_release = matrix.primary_backend == "consul" ? { - edition = var.backend_edition - version = matrix.consul_version - } : null - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = matrix.primary_backend - unseal_method = matrix.primary_seal - vault_local_artifact_path = local.bundle_path - vault_install_dir = local.vault_install_dir - vault_artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null - vault_license = step.read_license.license - vpc_id = step.create_vpc.vpc_id - vault_environment = { - VAULT_LOG_LEVEL = var.vault_log_level - } - } - } - - step "create_secondary_backend_cluster" { - module = "backend_${matrix.secondary_backend}" - depends_on = [step.create_vpc] - - providers = { - enos = provider.enos.ubuntu - } - - variables { - ami_id = step.create_vpc.ami_ids["ubuntu"]["amd64"] - common_tags = local.tags - consul_release = { - edition = var.backend_edition - version = matrix.consul_version - } - instance_type = var.backend_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - vpc_id = step.create_vpc.vpc_id - } - } - - step "create_vault_secondary_cluster" { - module = module.vault_cluster - depends_on = [ - step.create_secondary_backend_cluster, - step.build_vault, - ] - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - consul_cluster_tag = step.create_secondary_backend_cluster.consul_cluster_tag - consul_release = matrix.secondary_backend == "consul" ? { - edition = var.backend_edition - version = matrix.consul_version - } : null - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = matrix.secondary_backend - unseal_method = matrix.secondary_seal - vault_local_artifact_path = local.bundle_path - vault_install_dir = local.vault_install_dir - vault_artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null - vault_license = step.read_license.license - vpc_id = step.create_vpc.vpc_id - vault_environment = { - VAULT_LOG_LEVEL = var.vault_log_level - } - } - } - - step "verify_vault_primary_unsealed" { - module = module.vault_verify_unsealed - depends_on = [ - step.create_vault_primary_cluster - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - vault_instances = step.create_vault_primary_cluster.vault_instances - vault_install_dir = local.vault_install_dir - } - } - - step "verify_vault_secondary_unsealed" { - module = module.vault_verify_unsealed - depends_on = [ - step.create_vault_secondary_cluster - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - vault_instances = step.create_vault_secondary_cluster.vault_instances - vault_install_dir = local.vault_install_dir - } - } - - step "get_primary_cluster_ips" { - module = module.vault_get_cluster_ips - depends_on = [step.verify_vault_primary_unsealed] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - vault_instances = step.create_vault_primary_cluster.vault_instances - vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_primary_cluster.vault_root_token - } - } - - step "get_secondary_cluster_ips" { - module = module.vault_get_cluster_ips - depends_on = [step.verify_vault_secondary_unsealed] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - vault_instances = step.create_vault_secondary_cluster.vault_instances - vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_secondary_cluster.vault_root_token - } - } - - step "verify_vault_primary_write_data" { - module = module.vault_verify_write_data - depends_on = [step.get_primary_cluster_ips] - - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - leader_public_ip = step.get_primary_cluster_ips.leader_public_ip - leader_private_ip = step.get_primary_cluster_ips.leader_private_ip - vault_instances = step.create_vault_primary_cluster.vault_instances - vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_primary_cluster.vault_root_token - } - } - - step "configure_performance_replication_primary" { - module = module.vault_setup_perf_primary - depends_on = [ - step.get_primary_cluster_ips, - step.verify_vault_primary_write_data - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip - primary_leader_private_ip = step.get_primary_cluster_ips.leader_private_ip - vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_primary_cluster.vault_root_token - } - } - - step "generate_secondary_token" { - module = module.generate_secondary_token - depends_on = [step.configure_performance_replication_primary] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip - vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_primary_cluster.vault_root_token - } - } - - step "configure_performance_replication_secondary" { - module = module.vault_setup_perf_secondary - depends_on = [step.generate_secondary_token] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip - secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip - vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_secondary_cluster.vault_root_token - wrapping_token = step.generate_secondary_token.secondary_token - } - } - - // After replication is enabled, the secondary cluster followers need to be unsealed - // Secondary unseal keys are passed using the guide https://developer.hashicorp.com/vault/docs/enterprise/replication#seals - step "unseal_secondary_followers" { - module = module.vault_unseal_nodes - depends_on = [ - step.create_vault_primary_cluster, - step.create_vault_secondary_cluster, - step.get_secondary_cluster_ips, - step.configure_performance_replication_secondary - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - follower_public_ips = step.get_secondary_cluster_ips.follower_public_ips - vault_install_dir = local.vault_install_dir - vault_unseal_keys = matrix.primary_seal == "shamir" ? step.create_vault_primary_cluster.vault_unseal_keys_hex : step.create_vault_primary_cluster.vault_recovery_keys_hex - vault_seal_type = matrix.primary_seal == "shamir" ? matrix.primary_seal : matrix.secondary_seal - } - } - - step "verify_vault_secondary_unsealed_after_replication" { - module = module.vault_verify_unsealed - depends_on = [ - step.unseal_secondary_followers - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - vault_instances = step.create_vault_secondary_cluster.vault_instances - vault_install_dir = local.vault_install_dir - } - } - - step "verify_performance_replication" { - module = module.vault_verify_performance_replication - depends_on = [step.verify_vault_secondary_unsealed_after_replication] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip - primary_leader_private_ip = step.get_primary_cluster_ips.leader_private_ip - secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip - secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip - vault_install_dir = local.vault_install_dir - } - } - - step "verify_replicated_data" { - module = module.vault_verify_read_data - depends_on = [ - step.verify_performance_replication, - step.get_secondary_cluster_ips, - step.verify_vault_primary_write_data - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - node_public_ips = step.get_secondary_cluster_ips.follower_public_ips - vault_install_dir = local.vault_install_dir - } - } - - step "add_primary_cluster_nodes" { - module = module.vault_cluster - depends_on = [ - step.create_vpc, - step.create_primary_backend_cluster, - step.create_vault_primary_cluster, - step.verify_replicated_data - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - consul_cluster_tag = step.create_primary_backend_cluster.consul_cluster_tag - consul_release = matrix.primary_backend == "consul" ? { - edition = var.backend_edition - version = matrix.consul_version - } : null - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = matrix.primary_backend - unseal_method = matrix.primary_seal - vault_cluster_tag = step.create_vault_primary_cluster.vault_cluster_tag - vault_init = false - vault_license = step.read_license.license - vault_local_artifact_path = local.bundle_path - vault_install_dir = local.vault_install_dir - vault_artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null - vault_node_prefix = "newprimary_node" - vault_root_token = step.create_vault_primary_cluster.vault_root_token - vault_unseal_when_no_init = matrix.primary_seal == "shamir" - vault_unseal_keys = matrix.primary_seal == "shamir" ? step.create_vault_primary_cluster.vault_unseal_keys_hex : null - vpc_id = step.create_vpc.vpc_id - vault_environment = { - VAULT_LOG_LEVEL = var.vault_log_level - } - } - } - - step "verify_add_node_unsealed" { - module = module.vault_verify_unsealed - depends_on = [step.add_primary_cluster_nodes] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - vault_instances = step.add_primary_cluster_nodes.vault_instances - vault_install_dir = local.vault_install_dir - } - } - - step "verify_raft_auto_join_voter" { - skip_step = matrix.primary_backend != "raft" - module = module.vault_verify_raft_auto_join_voter - depends_on = [ - step.add_primary_cluster_nodes, - step.create_vault_primary_cluster, - step.verify_add_node_unsealed - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - vault_instances = step.add_primary_cluster_nodes.vault_instances - vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_primary_cluster.vault_root_token - } - } - - step "remove_primary_follower_1" { - module = module.shutdown_node - depends_on = [ - step.get_primary_cluster_ips, - step.verify_add_node_unsealed - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - node_public_ip = step.get_primary_cluster_ips.follower_public_ip_1 - } - } - - step "remove_primary_leader" { - module = module.shutdown_node - depends_on = [ - step.get_primary_cluster_ips, - step.remove_primary_follower_1 - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - node_public_ip = step.get_primary_cluster_ips.leader_public_ip - } - } - - step "get_updated_primary_cluster_ips" { - module = module.vault_get_cluster_ips - depends_on = [ - step.add_primary_cluster_nodes, - step.remove_primary_follower_1, - step.remove_primary_leader - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - vault_instances = step.create_vault_primary_cluster.vault_instances - vault_install_dir = local.vault_install_dir - added_vault_instances = step.add_primary_cluster_nodes.vault_instances - vault_root_token = step.create_vault_primary_cluster.vault_root_token - node_public_ip = step.get_primary_cluster_ips.follower_public_ip_2 - } - } - - step "verify_updated_performance_replication" { - module = module.vault_verify_performance_replication - depends_on = [step.get_updated_primary_cluster_ips] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - primary_leader_public_ip = step.get_updated_primary_cluster_ips.leader_public_ip - primary_leader_private_ip = step.get_updated_primary_cluster_ips.leader_private_ip - secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip - secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip - vault_install_dir = local.vault_install_dir - } - } - - output "vault_primary_cluster_pub_ips" { - description = "The Vault primary cluster public IPs" - value = step.create_vault_primary_cluster.instance_public_ips - } - - output "vault_primary_cluster_priv_ips" { - description = "The Vault primary cluster private IPs" - value = step.create_vault_primary_cluster.instance_private_ips - } - - output "vault_primary_newnode_pub_ip" { - description = "The Vault added new node on primary cluster public IP" - value = step.add_primary_cluster_nodes.instance_public_ips - } - - output "vault_primary_newnode_priv_ip" { - description = "The Vault added new node on primary cluster private IP" - value = step.add_primary_cluster_nodes.instance_private_ips - } - - output "vault_primary_cluster_root_token" { - description = "The Vault primary cluster root token" - value = step.create_vault_primary_cluster.vault_root_token - } - - output "vault_primary_cluster_unseal_keys_b64" { - description = "The Vault primary cluster unseal keys" - value = step.create_vault_primary_cluster.vault_unseal_keys_b64 - } - - output "vault_primary_cluster_unseal_keys_hex" { - description = "The Vault primary cluster unseal keys hex" - value = step.create_vault_primary_cluster.vault_unseal_keys_hex - } - - output "vault_primary_cluster_recovery_key_shares" { - description = "The Vault primary cluster recovery key shares" - value = step.create_vault_primary_cluster.vault_recovery_key_shares - } - - output "vault_primary_cluster_recovery_keys_b64" { - description = "The Vault primary cluster recovery keys b64" - value = step.create_vault_primary_cluster.vault_recovery_keys_b64 - } - - output "vault_primary_cluster_recovery_keys_hex" { - description = "The Vault primary cluster recovery keys hex" - value = step.create_vault_primary_cluster.vault_recovery_keys_hex - } - - output "vault_secondary_cluster_pub_ips" { - description = "The Vault secondary cluster public IPs" - value = step.create_vault_secondary_cluster.instance_public_ips - } - - output "vault_secondary_cluster_priv_ips" { - description = "The Vault secondary cluster private IPs" - value = step.create_vault_secondary_cluster.instance_private_ips - } - - output "vault_primary_performance_replication_status" { - description = "The Vault primary cluster performance replication status" - value = step.verify_performance_replication.primary_replication_status - } - - output "vault_replication_known_primary_cluster_addrs" { - description = "The Vault secondary cluster performance replication status" - value = step.verify_performance_replication.known_primary_cluster_addrs - } - - output "vault_secondary_performance_replication_status" { - description = "The Vault secondary cluster performance replication status" - value = step.verify_performance_replication.secondary_replication_status - } - - output "vault_primary_updated_performance_replication_status" { - description = "The Vault updated primary cluster performance replication status" - value = step.verify_updated_performance_replication.primary_replication_status - } - - output "vault_updated_replication_known_primary_cluster_addrs" { - description = "The Vault secondary cluster performance replication status" - value = step.verify_updated_performance_replication.known_primary_cluster_addrs - } - - output "verify_secondary_updated_performance_replication_status" { - description = "The Vault updated secondary cluster performance replication status" - value = step.verify_updated_performance_replication.secondary_replication_status - } - - output "primary_replication_data_secondaries" { - description = "The Vault primary cluster secondaries connection status" - value = step.verify_performance_replication.primary_replication_data_secondaries - } - - output "secondary_replication_data_primaries" { - description = "The Vault secondary cluster primaries connection status" - value = step.verify_performance_replication.secondary_replication_data_primaries - } - - output "primary_updated_replication_data_secondaries" { - description = "The Vault updated primary cluster secondaries connection status" - value = step.verify_updated_performance_replication.primary_replication_data_secondaries - } - - output "secondary_updated_replication_data_primaries" { - description = "The Vault updated secondary cluster primaries connection status" - value = step.verify_updated_performance_replication.secondary_replication_data_primaries - } -} diff --git a/enos/enos-scenario-seal-ha.hcl b/enos/enos-scenario-seal-ha.hcl new file mode 100644 index 000000000000..5478a6a99842 --- /dev/null +++ b/enos/enos-scenario-seal-ha.hcl @@ -0,0 +1,1095 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +scenario "seal_ha" { + description = <<-EOF + The seal_ha scenario verifies Vault Enterprises seal HA capabilities. The build can be a local + branch, any CRT built Vault Enterprise artifact saved to the local machine, or any CRT built + Vault Enterprise artifact in the stable channel in Artifactory. + + The scenario deploys a Vault Enterprise cluster with the candidate build and enables a single + primary seal, mounts various engines and writes data, then establishes seal HA with a secondary + seal, the removes the primary and verifies data integrity and seal data migration. It also + verifies that the cluster is able to recover from a forced leader election after the initial + seal rewrap. The scenario also performs standard baseline verification that is not specific to + seal_ha. + + If you want to use the 'distro:leap' variant you must first accept SUSE's terms for the AWS + account. To verify that your account has agreed, sign-in to your AWS through Doormat, + and visit the following links to verify your subscription or subscribe: + arm64 AMI: https://aws.amazon.com/marketplace/server/procurement?productId=a516e959-df54-4035-bb1a-63599b7a6df9 + amd64 AMI: https://aws.amazon.com/marketplace/server/procurement?productId=5535c495-72d4-4355-b169-54ffa874f849 + EOF + + matrix { + arch = global.archs + artifact_source = global.artifact_sources + artifact_type = global.artifact_types + backend = global.backends + config_mode = global.config_modes + consul_edition = global.consul_editions + consul_version = global.consul_versions + distro = global.distros + edition = global.enterprise_editions + ip_version = global.ip_versions + // Seal HA is only supported with auto-unseal devices. + primary_seal = ["awskms", "pkcs11"] + secondary_seal = ["awskms", "pkcs11"] + + // Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + // PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402. + exclude { + primary_seal = ["pkcs11"] + edition = [for e in matrix.edition : e if !strcontains(e, "hsm")] + } + + exclude { + secondary_seal = ["pkcs11"] + edition = [for e in matrix.edition : e if !strcontains(e, "hsm")] + } + + // softhsm packages not available for leap/sles. + exclude { + primary_seal = ["pkcs11"] + distro = ["leap", "sles"] + } + + exclude { + secondary_seal = ["pkcs11"] + distro = ["leap", "sles"] + } + + // Testing in IPV6 mode is currently implemented for integrated Raft storage only + exclude { + ip_version = ["6"] + backend = ["consul"] + } + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ec2_user, + provider.enos.ubuntu + ] + + locals { + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + enos_provider = { + amzn = provider.enos.ec2_user + leap = provider.enos.ec2_user + rhel = provider.enos.ec2_user + sles = provider.enos.ec2_user + ubuntu = provider.enos.ubuntu + } + manage_service = matrix.artifact_type == "bundle" + } + + step "get_local_metadata" { + description = global.description.get_local_metadata + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + step "build_vault" { + description = global.description.build_vault + module = "build_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision + } + } + + step "ec2_info" { + description = global.description.ec2_info + module = module.ec2_info + } + + step "create_vpc" { + description = global.description.create_vpc + module = module.create_vpc + + variables { + common_tags = global.tags + ip_version = matrix.ip_version + } + } + + // This step reads the contents of the backend license if we're using a Consul backend and + // the edition is "ent". + step "read_backend_license" { + description = global.description.read_backend_license + skip_step = matrix.backend == "raft" || matrix.consul_edition == "ce" + module = module.read_license + + variables { + file_name = global.backend_license_path + } + } + + step "read_vault_license" { + description = global.description.read_vault_license + skip_step = matrix.edition == "ce" + module = module.read_license + + variables { + file_name = global.vault_license_path + } + } + + step "create_primary_seal_key" { + description = global.description.create_seal_key + module = "seal_${matrix.primary_seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + cluster_meta = "primary" + common_tags = global.tags + } + } + + step "create_secondary_seal_key" { + description = global.description.create_seal_key + module = "seal_${matrix.secondary_seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + cluster_meta = "secondary" + common_tags = global.tags + other_resources = step.create_primary_seal_key.resource_names + } + } + + step "create_vault_cluster_targets" { + description = global.description.create_vault_cluster_targets + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_secondary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_backend_targets" { + description = global.description.create_vault_cluster_targets + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"][global.distro_version["ubuntu"]] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_secondary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_backend_cluster" { + description = global.description.create_backend_cluster + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + verifies = [ + // verified in modules + quality.consul_autojoin_aws, + quality.consul_config_file, + quality.consul_ha_leader_election, + quality.consul_service_start_server, + // verified in enos_consul_start resource + quality.consul_api_agent_host_read, + quality.consul_api_health_node_read, + quality.consul_api_operator_raft_config_read, + quality.consul_cli_validate, + quality.consul_health_state_passing_read_nodes_minimum, + quality.consul_operator_raft_configuration_read_voters_minimum, + quality.consul_service_systemd_notified, + quality.consul_service_systemd_unit, + ] + + variables { + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + hosts = step.create_vault_cluster_backend_targets.hosts + license = (matrix.backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = matrix.consul_edition + version = matrix.consul_version + } + } + } + + step "create_vault_cluster" { + description = global.description.create_vault_cluster + module = module.vault_cluster + depends_on = [ + step.create_backend_cluster, + step.build_vault, + step.create_vault_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + // verified in modules + quality.vault_artifact_bundle, + quality.vault_artifact_deb, + quality.vault_artifact_rpm, + quality.vault_audit_log, + quality.vault_audit_socket, + quality.vault_audit_syslog, + quality.vault_autojoin_aws, + quality.vault_config_env_variables, + quality.vault_config_file, + quality.vault_config_log_level, + quality.vault_init, + quality.vault_license_required_ent, + quality.vault_listener_ipv4, + quality.vault_listener_ipv6, + quality.vault_service_start, + quality.vault_storage_backend_consul, + quality.vault_storage_backend_raft, + // verified in enos_vault_start resource + quality.vault_api_sys_health_read, + quality.vault_api_sys_config_read, + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_host_info_read, + quality.vault_api_sys_replication_status_read, + quality.vault_api_sys_seal_status_api_read_matches_sys_health, + quality.vault_api_sys_storage_raft_autopilot_configuration_read, + quality.vault_api_sys_storage_raft_autopilot_state_read, + quality.vault_api_sys_storage_raft_configuration_read, + quality.vault_cli_status_exit_code, + quality.vault_service_systemd_notified, + quality.vault_service_systemd_unit, + ] + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = matrix.config_mode + consul_license = (matrix.backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.backend == "consul" ? { + edition = matrix.consul_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + hosts = step.create_vault_cluster_targets.hosts + install_dir = global.vault_install_dir[matrix.artifact_type] + ip_version = matrix.ip_version + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro][global.distro_version[matrix.distro]]) + // Only configure our primary seal during our initial cluster setup + seal_attributes = step.create_primary_seal_key.attributes + seal_type = matrix.primary_seal + storage_backend = matrix.backend + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + timeout = 120 // seconds + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_vault_cluster_ips" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_unsealed" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + // Write some test data before we create the new seal + step "verify_secrets_engines_create" { + description = global.description.verify_secrets_engines_create + module = module.vault_verify_secrets_engines_create + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips, + step.verify_vault_unsealed, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_auth_userpass_login_write, + quality.vault_api_auth_userpass_user_write, + quality.vault_api_identity_entity_write, + quality.vault_api_identity_entity_alias_write, + quality.vault_api_identity_group_write, + quality.vault_api_identity_oidc_config_write, + quality.vault_api_identity_oidc_introspect_write, + quality.vault_api_identity_oidc_key_write, + quality.vault_api_identity_oidc_key_rotate_write, + quality.vault_api_identity_oidc_role_write, + quality.vault_api_identity_oidc_token_read, + quality.vault_api_sys_auth_userpass_user_write, + quality.vault_api_sys_policy_write, + quality.vault_mount_auth, + quality.vault_mount_kv, + quality.vault_secrets_kv_write, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + leader_host = step.get_vault_cluster_ips.leader_host + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Wait for the initial seal rewrap to complete before we add our HA seal. + step "wait_for_initial_seal_rewrap" { + description = global.description.wait_for_seal_rewrap + module = module.vault_wait_for_seal_rewrap + depends_on = [ + step.verify_secrets_engines_create, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_sealwrap_rewrap_read_entries_processed_eq_entries_succeeded_post_rewrap, + quality.vault_api_sys_sealwrap_rewrap_read_entries_processed_gt_zero_post_rewrap, + quality.vault_api_sys_sealwrap_rewrap_read_is_running_false_post_rewrap, + quality.vault_api_sys_sealwrap_rewrap_read_no_entries_fail_during_rewrap, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "stop_vault" { + description = "${global.description.stop_vault}. We do this to write new seal configuration." + module = module.stop_vault + depends_on = [ + step.create_vault_cluster, + step.verify_secrets_engines_create, + step.wait_for_initial_seal_rewrap, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_vault_cluster_targets.hosts + } + } + + // Add the secondary seal to the cluster + step "add_ha_seal_to_cluster" { + description = global.description.enable_multiseal + module = module.start_vault + depends_on = [step.stop_vault] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_multiseal_enable + + variables { + cluster_name = step.create_vault_cluster_targets.cluster_name + hosts = step.create_vault_cluster_targets.hosts + install_dir = global.vault_install_dir[matrix.artifact_type] + ip_version = matrix.ip_version + license = matrix.edition != "ce" ? step.read_vault_license.license : null + manage_service = local.manage_service + seal_attributes = step.create_primary_seal_key.attributes + seal_attributes_secondary = step.create_secondary_seal_key.attributes + seal_type = matrix.primary_seal + seal_type_secondary = matrix.secondary_seal + storage_backend = matrix.backend + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader_election" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.add_ha_seal_to_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + timeout = 120 // seconds + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_leader_ip_for_step_down" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader_election] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Force a step down to trigger a new leader election + step "vault_leader_step_down" { + description = global.description.vault_leader_step_down + module = module.vault_step_down + depends_on = [step.get_leader_ip_for_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_step_down_steps_down, + quality.vault_cli_operator_step_down, + ] + + variables { + leader_host = step.get_leader_ip_for_step_down.leader_host + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Wait for our cluster to elect a leader + step "wait_for_new_leader" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.vault_leader_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + timeout = 120 // seconds + ip_version = matrix.ip_version + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_updated_cluster_ips" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_new_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_unsealed_with_new_seal" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [step.wait_for_new_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + // Wait for the seal rewrap to complete and verify that no entries failed + step "wait_for_seal_rewrap" { + description = global.description.wait_for_seal_rewrap + module = module.vault_wait_for_seal_rewrap + depends_on = [ + step.add_ha_seal_to_cluster, + step.verify_vault_unsealed_with_new_seal, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_sealwrap_rewrap_read_entries_processed_eq_entries_succeeded_post_rewrap, + quality.vault_api_sys_sealwrap_rewrap_read_entries_processed_gt_zero_post_rewrap, + quality.vault_api_sys_sealwrap_rewrap_read_is_running_false_post_rewrap, + quality.vault_api_sys_sealwrap_rewrap_read_no_entries_fail_during_rewrap, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Perform all of our standard verifications after we've enabled multiseal + step "verify_vault_version" { + description = global.description.verify_vault_version + module = module.vault_verify_version + depends_on = [step.wait_for_seal_rewrap] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_version_history_keys, + quality.vault_api_sys_version_history_key_info, + quality.vault_version_build_date, + quality.vault_version_edition, + quality.vault_version_release, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_edition = matrix.edition + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_raft_auto_join_voter" { + description = global.description.verify_raft_cluster_all_nodes_are_voters + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [step.wait_for_seal_rewrap] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_raft_voters + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_replication" { + description = global.description.verify_replication_status + module = module.vault_verify_replication + depends_on = [step.wait_for_seal_rewrap] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_replication_ce_disabled, + quality.vault_replication_ent_dr_available, + quality.vault_replication_ent_pr_available, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_edition = matrix.edition + } + } + + // Make sure our data is still available + step "verify_secrets_engines_read" { + description = global.description.verify_secrets_engines_read + module = module.vault_verify_secrets_engines_read + depends_on = [step.wait_for_seal_rewrap] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_auth_userpass_login_write, + quality.vault_api_identity_entity_read, + quality.vault_api_identity_oidc_config_read, + quality.vault_api_identity_oidc_key_read, + quality.vault_api_identity_oidc_role_read, + quality.vault_secrets_kv_read + ] + + variables { + create_state = step.verify_secrets_engines_create.state + hosts = step.get_updated_cluster_ips.follower_hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "verify_ui" { + description = global.description.verify_ui + module = module.vault_verify_ui + depends_on = [step.wait_for_seal_rewrap] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_ui_assets + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + } + } + + step "verify_seal_type" { + description = "${global.description.verify_seal_type} In this case we expect to have 'multiseal'." + // Don't run this on versions less than 1.16.0-beta1 until VAULT-21053 is fixed on prior branches. + skip_step = semverconstraint(var.vault_product_version, "< 1.16.0-beta1") + module = module.verify_seal_type + depends_on = [step.wait_for_seal_rewrap] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_status_seal_type + + variables { + hosts = step.create_vault_cluster_targets.hosts + seal_type = "multiseal" + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + // Now we'll migrate away from our initial seal to our secondary seal + + // Stop the vault service on all nodes before we restart with new seal config + step "stop_vault_for_migration" { + description = "${global.description.stop_vault}. We do this to remove the old primary seal." + module = module.stop_vault + depends_on = [ + step.wait_for_seal_rewrap, + step.verify_secrets_engines_read, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_vault_cluster_targets.hosts + } + } + + // Remove the "primary" seal from the cluster. Set our "secondary" seal to priority 1. We do this + // by restarting vault with the correct config. + step "remove_primary_seal" { + description = <<-EOF + Reconfigure the vault cluster seal configuration with only our secondary seal config which + will force a seal migration to a single seal. + EOF + module = module.start_vault + depends_on = [step.stop_vault_for_migration] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_config_multiseal_is_toggleable + + variables { + cluster_name = step.create_vault_cluster_targets.cluster_name + hosts = step.create_vault_cluster_targets.hosts + install_dir = global.vault_install_dir[matrix.artifact_type] + ip_version = matrix.ip_version + license = matrix.edition != "ce" ? step.read_vault_license.license : null + manage_service = local.manage_service + seal_alias = "secondary" + seal_attributes = step.create_secondary_seal_key.attributes + seal_type = matrix.secondary_seal + storage_backend = matrix.backend + } + } + + // Wait for our cluster to elect a leader after restarting vault with a new primary seal + step "wait_for_leader_after_migration" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.remove_primary_seal] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + timeout = 120 // seconds + ip_version = matrix.ip_version + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Since we've restarted our cluster we might have a new leader and followers. Get the new IPs. + step "get_cluster_ips_after_migration" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader_after_migration] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Make sure we unsealed + step "verify_vault_unsealed_after_migration" { + module = module.vault_wait_for_cluster_unsealed + depends_on = [step.wait_for_leader_after_migration] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + // Wait for the seal rewrap to complete and verify that no entries failed + step "wait_for_seal_rewrap_after_migration" { + module = module.vault_wait_for_seal_rewrap + depends_on = [ + step.wait_for_leader_after_migration, + step.verify_vault_unsealed_after_migration, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Make sure our data is still available after migration + step "verify_secrets_engines_read_after_migration" { + module = module.vault_verify_secrets_engines_read + depends_on = [step.wait_for_seal_rewrap_after_migration] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_auth_userpass_login_write, + quality.vault_api_identity_entity_read, + quality.vault_api_identity_oidc_config_read, + quality.vault_api_identity_oidc_key_read, + quality.vault_api_identity_oidc_role_read, + quality.vault_secrets_kv_read + ] + + variables { + create_state = step.verify_secrets_engines_create.state + hosts = step.get_cluster_ips_after_migration.follower_hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + // Make sure we have our secondary seal type after migration + step "verify_seal_type_after_migration" { + // Don't run this on versions less than 1.16.0-beta1 until VAULT-21053 is fixed on prior branches. + skip_step = semverconstraint(var.vault_product_version, "<= 1.16.0-beta1") + module = module.verify_seal_type + depends_on = [step.wait_for_seal_rewrap_after_migration] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_vault_cluster_targets.hosts + seal_type = matrix.secondary_seal + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.hosts + } + + output "initial_seal_rewrap" { + description = "The initial seal rewrap status" + value = step.wait_for_initial_seal_rewrap.stdout + } + + output "post_migration_seal_rewrap" { + description = "The seal rewrap status after migrating the primary seal" + value = step.wait_for_seal_rewrap_after_migration.stdout + } + + output "primary_seal_attributes" { + description = "The Vault cluster primary seal attributes" + value = step.create_primary_seal_key.attributes + } + + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "secondary_seal_attributes" { + description = "The Vault cluster secondary seal attributes" + value = step.create_secondary_seal_key.attributes + } + + output "secrets_engines_state" { + description = "The state of configured secrets engines" + value = step.verify_secrets_engines_create.state + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } +} diff --git a/enos/enos-scenario-smoke.hcl b/enos/enos-scenario-smoke.hcl index f2b5e9b5ef61..cfbfe3a07ca2 100644 --- a/enos/enos-scenario-smoke.hcl +++ b/enos/enos-scenario-smoke.hcl @@ -1,21 +1,57 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 scenario "smoke" { + description = <<-EOF + The smoke scenario verifies a Vault cluster in a fresh installation. The build can be a local + branch, any CRT built Vault artifact saved to the local machine, or any CRT built Vault artifact + in the stable channel in Artifactory. + + The scenario deploys a Vault cluster with the candidate build performs an extended set of + baseline verification. + + If you want to use the 'distro:leap' variant you must first accept SUSE's terms for the AWS + account. To verify that your account has agreed, sign-in to your AWS through Doormat, + and visit the following links to verify your subscription or subscribe: + arm64 AMI: https://aws.amazon.com/marketplace/server/procurement?productId=a516e959-df54-4035-bb1a-63599b7a6df9 + amd64 AMI: https://aws.amazon.com/marketplace/server/procurement?productId=5535c495-72d4-4355-b169-54ffa874f849 + EOF + matrix { - arch = ["amd64", "arm64"] - backend = ["consul", "raft"] - artifact_source = ["local", "crt", "artifactory"] - artifact_type = ["bundle", "package"] - consul_version = ["1.14.2", "1.13.4", "1.12.7"] - distro = ["ubuntu", "rhel"] - edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] - seal = ["awskms", "shamir"] - - # Packages are not offered for the oss, ent.fips1402, and ent.hsm.fips1402 editions + arch = global.archs + artifact_source = global.artifact_sources + artifact_type = global.artifact_types + backend = global.backends + config_mode = global.config_modes + consul_edition = global.consul_editions + consul_version = global.consul_versions + distro = global.distros + edition = global.editions + ip_version = global.ip_versions + seal = global.seals + + // Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + // PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402. + exclude { + seal = ["pkcs11"] + edition = [for e in matrix.edition : e if !strcontains(e, "hsm")] + } + + // softhsm packages not available for leap/sles. exclude { - edition = ["oss", "ent.fips1402", "ent.hsm.fips1402"] - artifact_type = ["package"] + seal = ["pkcs11"] + distro = ["leap", "sles"] + } + + // Testing in IPV6 mode is currently implemented for integrated Raft storage only + exclude { + ip_version = ["6"] + backend = ["consul"] } } @@ -23,53 +59,29 @@ scenario "smoke" { terraform = terraform.default providers = [ provider.aws.default, - provider.enos.ubuntu, - provider.enos.rhel + provider.enos.ec2_user, + provider.enos.ubuntu ] locals { - build_tags = { - "oss" = ["ui"] - "ent" = ["ui", "enterprise", "ent"] - "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] - "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] - "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] - } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null - dependencies_to_install = ["jq"] + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null enos_provider = { - rhel = provider.enos.rhel + amzn = provider.enos.ec2_user + leap = provider.enos.ec2_user + rhel = provider.enos.ec2_user + sles = provider.enos.ec2_user ubuntu = provider.enos.ubuntu } - tags = merge({ - "Project Name" : var.project_name - "Project" : "Enos", - "Environment" : "ci" - }, var.tags) - vault_instance_types = { - amd64 = "t3a.small" - arm64 = "t4g.small" - } - vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[matrix.arch]) - vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) - vault_install_dir_packages = { - rhel = "/bin" - ubuntu = "/usr/bin" - } - vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : local.vault_install_dir_packages[matrix.distro] - } - - step "get_local_metadata" { - skip_step = matrix.artifact_source != "local" - module = module.get_local_metadata + manage_service = matrix.artifact_type == "bundle" } step "build_vault" { - module = "build_${matrix.artifact_source}" + description = global.description.build_vault + module = "build_${matrix.artifact_source}" variables { - build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] - bundle_path = local.bundle_path + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path goarch = matrix.arch goos = "linux" artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null @@ -81,200 +93,472 @@ scenario "smoke" { artifact_type = matrix.artifact_type distro = matrix.artifact_source == "artifactory" ? matrix.distro : null edition = matrix.artifact_source == "artifactory" ? matrix.edition : null - instance_type = matrix.artifact_source == "artifactory" ? local.vault_instance_type : null revision = var.vault_revision } } - step "find_azs" { - module = module.az_finder + step "ec2_info" { + description = global.description.ec2_info + module = module.ec2_info + } + + step "create_vpc" { + description = global.description.create_vpc + module = module.create_vpc variables { - instance_type = [ - var.backend_instance_type, - local.vault_instance_type - ] + common_tags = global.tags + ip_version = matrix.ip_version } } - step "create_vpc" { - module = module.create_vpc + step "read_backend_license" { + description = global.description.read_backend_license + module = module.read_license + skip_step = matrix.backend == "raft" || matrix.consul_edition == "ce" variables { - ami_architectures = distinct([matrix.arch, "amd64"]) - availability_zones = step.find_azs.availability_zones - common_tags = local.tags + file_name = global.backend_license_path } } - step "read_license" { - skip_step = matrix.edition == "oss" - module = module.read_license + step "read_vault_license" { + description = global.description.read_vault_license + skip_step = matrix.edition == "ce" + module = module.read_license variables { - file_name = local.vault_license_path + file_name = global.vault_license_path + } + } + + step "create_seal_key" { + description = global.description.create_seal_key + module = "seal_${matrix.seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + common_tags = global.tags + } + } + + step "create_vault_cluster_targets" { + description = global.description.create_vault_cluster_targets + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_backend_targets" { + description = global.description.create_vault_cluster_targets + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"][global.distro_version["ubuntu"]] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id } } step "create_backend_cluster" { - module = "backend_${matrix.backend}" - depends_on = [step.create_vpc] + description = global.description.create_backend_cluster + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets + ] providers = { enos = provider.enos.ubuntu } + verifies = [ + // verified in modules + quality.consul_autojoin_aws, + quality.consul_config_file, + quality.consul_ha_leader_election, + quality.consul_service_start_server, + // verified in enos_consul_start resource + quality.consul_api_agent_host_read, + quality.consul_api_health_node_read, + quality.consul_api_operator_raft_config_read, + quality.consul_cli_validate, + quality.consul_health_state_passing_read_nodes_minimum, + quality.consul_operator_raft_configuration_read_voters_minimum, + quality.consul_service_systemd_notified, + quality.consul_service_systemd_unit, + ] + variables { - ami_id = step.create_vpc.ami_ids["ubuntu"]["amd64"] - common_tags = local.tags - consul_release = { - edition = var.backend_edition + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + hosts = step.create_vault_cluster_backend_targets.hosts + license = (matrix.backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = matrix.consul_edition version = matrix.consul_version } - instance_type = var.backend_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - vpc_id = step.create_vpc.vpc_id } } step "create_vault_cluster" { - module = module.vault_cluster + description = global.description.create_vault_cluster + module = module.vault_cluster depends_on = [ step.create_backend_cluster, step.build_vault, + step.create_vault_cluster_targets, ] providers = { enos = local.enos_provider[matrix.distro] } + verifies = [ + // verified in modules + quality.consul_service_start_client, + quality.vault_artifact_bundle, + quality.vault_artifact_deb, + quality.vault_artifact_rpm, + quality.vault_audit_log, + quality.vault_audit_socket, + quality.vault_audit_syslog, + quality.vault_autojoin_aws, + quality.vault_config_env_variables, + quality.vault_config_file, + quality.vault_config_log_level, + quality.vault_init, + quality.vault_license_required_ent, + quality.vault_listener_ipv4, + quality.vault_listener_ipv6, + quality.vault_service_start, + quality.vault_storage_backend_consul, + quality.vault_storage_backend_raft, + // verified in enos_vault_start resource + quality.vault_api_sys_config_read, + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_health_read, + quality.vault_api_sys_host_info_read, + quality.vault_api_sys_replication_status_read, + quality.vault_api_sys_seal_status_api_read_matches_sys_health, + quality.vault_api_sys_storage_raft_autopilot_configuration_read, + quality.vault_api_sys_storage_raft_autopilot_state_read, + quality.vault_api_sys_storage_raft_configuration_read, + quality.vault_cli_status_exit_code, + quality.vault_service_systemd_notified, + quality.vault_service_systemd_unit, + ] + variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = matrix.config_mode + consul_license = (matrix.backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null consul_release = matrix.backend == "consul" ? { - edition = var.backend_edition + edition = matrix.consul_edition version = matrix.consul_version } : null - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = matrix.backend - unseal_method = matrix.seal - vault_local_artifact_path = local.bundle_path - vault_install_dir = local.vault_install_dir - vault_artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null - vault_license = matrix.edition != "oss" ? step.read_license.license : null - vpc_id = step.create_vpc.vpc_id - vault_environment = { - VAULT_LOG_LEVEL = var.vault_log_level - } + enable_audit_devices = var.vault_enable_audit_devices + hosts = step.create_vault_cluster_targets.hosts + install_dir = global.vault_install_dir[matrix.artifact_type] + ip_version = matrix.ip_version + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro][global.distro_version[matrix.distro]]) + seal_attributes = step.create_seal_key.attributes + seal_type = matrix.seal + storage_backend = matrix.backend } } - step "get_vault_cluster_ips" { - module = module.vault_get_cluster_ips - depends_on = [step.create_vault_cluster] + step "get_local_metadata" { + description = global.description.get_local_metadata + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + // Wait for our cluster to elect a leader + step "wait_for_new_leader" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster] providers = { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + variables { - vault_instances = step.create_vault_cluster.vault_instances - vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_cluster.vault_root_token + timeout = 120 // seconds + ip_version = matrix.ip_version + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token } } - step "verify_vault_version" { - module = module.vault_verify_version - depends_on = [step.create_vault_cluster] + step "get_leader_ip_for_step_down" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_new_leader] providers = { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + variables { - vault_instances = step.create_vault_cluster.vault_instances - vault_edition = matrix.edition - vault_install_dir = local.vault_install_dir - vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version - vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision - vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date - vault_root_token = step.create_vault_cluster.vault_root_token + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Force a step down to trigger a new leader election + step "vault_leader_step_down" { + description = global.description.vault_leader_step_down + module = module.vault_step_down + depends_on = [step.get_leader_ip_for_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_step_down_steps_down, + quality.vault_cli_operator_step_down, + ] + + variables { + leader_host = step.get_leader_ip_for_step_down.leader_host + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.vault_leader_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_step_down, + ] + + variables { + timeout = 120 // seconds + ip_version = matrix.ip_version + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_vault_cluster_ips" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token } } step "verify_vault_unsealed" { - module = module.vault_verify_unsealed - depends_on = [step.create_vault_cluster] + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [step.wait_for_leader] providers = { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + variables { - vault_install_dir = local.vault_install_dir - vault_instances = step.create_vault_cluster.vault_instances + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] } } - step "verify_write_test_data" { - module = module.vault_verify_write_data - depends_on = [ - step.create_vault_cluster, - step.get_vault_cluster_ips + step "verify_vault_version" { + description = global.description.verify_vault_version + module = module.vault_verify_version + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_version_history_keys, + quality.vault_api_sys_version_history_key_info, + quality.vault_version_build_date, + quality.vault_version_edition, + quality.vault_version_release, ] + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_edition = matrix.edition + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_secrets_engines_create" { + description = global.description.verify_secrets_engines_create + module = module.vault_verify_secrets_engines_create + depends_on = [step.verify_vault_unsealed] + providers = { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_api_auth_userpass_login_write, + quality.vault_api_auth_userpass_user_write, + quality.vault_api_identity_entity_write, + quality.vault_api_identity_entity_alias_write, + quality.vault_api_identity_group_write, + quality.vault_api_identity_oidc_config_write, + quality.vault_api_identity_oidc_introspect_write, + quality.vault_api_identity_oidc_key_write, + quality.vault_api_identity_oidc_key_rotate_write, + quality.vault_api_identity_oidc_role_write, + quality.vault_api_identity_oidc_token_read, + quality.vault_api_sys_auth_userpass_user_write, + quality.vault_api_sys_policy_write, + quality.vault_mount_auth, + quality.vault_mount_kv, + quality.vault_secrets_kv_write, + ] + variables { - leader_public_ip = step.get_vault_cluster_ips.leader_public_ip - leader_private_ip = step.get_vault_cluster_ips.leader_private_ip - vault_instances = step.create_vault_cluster.vault_instances - vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_cluster.vault_root_token + hosts = step.create_vault_cluster_targets.hosts + leader_host = step.get_vault_cluster_ips.leader_host + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token } } step "verify_raft_auto_join_voter" { - skip_step = matrix.backend != "raft" - module = module.vault_verify_raft_auto_join_voter - depends_on = [step.create_vault_cluster] + description = global.description.verify_raft_cluster_all_nodes_are_voters + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [step.verify_vault_unsealed] providers = { enos = local.enos_provider[matrix.distro] } + verifies = quality.vault_raft_voters + variables { - vault_install_dir = local.vault_install_dir - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token } } step "verify_replication" { - module = module.vault_verify_replication - depends_on = [step.create_vault_cluster] + description = global.description.verify_replication_status + module = module.vault_verify_replication + depends_on = [step.verify_vault_unsealed] providers = { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_replication_ce_disabled, + quality.vault_replication_ent_dr_available, + quality.vault_replication_ent_pr_available, + ] + variables { - vault_edition = matrix.edition - vault_install_dir = local.vault_install_dir - vault_instances = step.create_vault_cluster.vault_instances + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_edition = matrix.edition } } - step "verify_read_test_data" { - module = module.vault_verify_read_data + step "verify_secrets_engines_read" { + description = global.description.verify_secrets_engines_read + module = module.vault_verify_secrets_engines_read depends_on = [ - step.verify_write_test_data, + step.verify_secrets_engines_create, step.verify_replication ] @@ -282,78 +566,102 @@ scenario "smoke" { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_api_auth_userpass_login_write, + quality.vault_api_identity_entity_read, + quality.vault_api_identity_oidc_config_read, + quality.vault_api_identity_oidc_key_read, + quality.vault_api_identity_oidc_role_read, + quality.vault_secrets_kv_read + ] + variables { - node_public_ips = step.get_vault_cluster_ips.follower_public_ips - vault_install_dir = local.vault_install_dir + create_state = step.verify_secrets_engines_create.state + hosts = step.get_vault_cluster_ips.follower_hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] } } step "verify_ui" { - module = module.vault_verify_ui - depends_on = [step.create_vault_cluster] + description = global.description.verify_ui + module = module.vault_verify_ui + depends_on = [step.verify_vault_unsealed] providers = { enos = local.enos_provider[matrix.distro] } + verifies = quality.vault_ui_assets + variables { - vault_instances = step.create_vault_cluster.vault_instances - vault_install_dir = local.vault_install_dir + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost } } - output "vault_cluster_instance_ids" { - description = "The Vault cluster instance IDs" - value = step.create_vault_cluster.instance_ids + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path } - output "vault_cluster_pub_ips" { - description = "The Vault cluster public IPs" - value = step.create_vault_cluster.instance_public_ips + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name } - output "vault_cluster_priv_ips" { + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.hosts + } + + output "private_ips" { description = "The Vault cluster private IPs" - value = step.create_vault_cluster.instance_private_ips + value = step.create_vault_cluster.private_ips } - output "vault_cluster_key_id" { - description = "The Vault cluster Key ID" - value = step.create_vault_cluster.key_id + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips } - output "vault_cluster_root_token" { + output "root_token" { description = "The Vault cluster root token" - value = step.create_vault_cluster.vault_root_token + value = step.create_vault_cluster.root_token } - output "vault_cluster_recovery_key_shares" { + output "recovery_key_shares" { description = "The Vault cluster recovery key shares" - value = step.create_vault_cluster.vault_recovery_key_shares + value = step.create_vault_cluster.recovery_key_shares } - output "vault_cluster_recovery_keys_b64" { + output "recovery_keys_b64" { description = "The Vault cluster recovery keys b64" - value = step.create_vault_cluster.vault_recovery_keys_b64 + value = step.create_vault_cluster.recovery_keys_b64 } - output "vault_cluster_recovery_keys_hex" { + output "recovery_keys_hex" { description = "The Vault cluster recovery keys hex" - value = step.create_vault_cluster.vault_recovery_keys_hex + value = step.create_vault_cluster.recovery_keys_hex } - output "vault_cluster_unseal_keys_b64" { - description = "The Vault cluster unseal keys" - value = step.create_vault_cluster.vault_unseal_keys_b64 + output "secrets_engines_state" { + description = "The state of configured secrets engines" + value = step.verify_secrets_engines_create.state } - output "vault_cluster_unseal_keys_hex" { - description = "The Vault cluster unseal keys hex" - value = step.create_vault_cluster.vault_unseal_keys_hex + output "seal_key_attributes" { + description = "The Vault cluster seal attributes" + value = step.create_seal_key.attributes } - output "vault_cluster_tag" { - description = "The Vault cluster tag" - value = step.create_vault_cluster.vault_cluster_tag + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex } } diff --git a/enos/enos-scenario-ui.hcl b/enos/enos-scenario-ui.hcl index 6fba448e2d5b..0e1ee47cedac 100644 --- a/enos/enos-scenario-ui.hcl +++ b/enos/enos-scenario-ui.hcl @@ -1,10 +1,21 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 scenario "ui" { + description = <<-EOF + The UI scenario is designed to create a new cluster and run the existing Ember test suite + against a live Vault cluster instead of a binary in dev mode. + + The UI scenario verifies the Vault ember test suite against a Vault cluster. The build can be a + local branch, any CRT built Vault artifact saved to the local machine, or any CRT built Vault + artifact in the stable channel in Artifactory. + + The scenario deploys a Vault cluster with the candidate build and executes the ember test suite. + EOF matrix { - edition = ["oss", "ent"] - backend = ["consul", "raft"] + backend = global.backends + consul_edition = global.consul_editions + edition = ["ce", "ent"] } terraform_cli = terraform_cli.default @@ -15,45 +26,37 @@ scenario "ui" { ] locals { - arch = "amd64" - distro = "ubuntu" - seal = "awskms" - artifact_type = "bundle" - consul_version = "1.14.2" + arch = "amd64" + artifact_type = "bundle" + backend_license_path = abspath(var.backend_license_path != null ? var.backend_license_path : joinpath(path.root, "./support/consul.hclic")) + backend_tag_key = "VaultStorage" build_tags = { - "oss" = ["ui"] + "ce" = ["ui"] "ent" = ["ui", "enterprise", "ent"] } - bundle_path = abspath(var.vault_bundle_path) + artifact_path = abspath(var.vault_artifact_path) + distro = "ubuntu" + consul_version = "1.17.0" + ip_version = 4 + seal = "awskms" tags = merge({ "Project Name" : var.project_name "Project" : "Enos", "Environment" : "ci" }, var.tags) - vault_instance_types = { - amd64 = "t3a.small" - arm64 = "t4g.small" - } - vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[local.arch]) - vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) - vault_install_dir_packages = { - rhel = "/bin" - ubuntu = "/usr/bin" - } - vault_install_dir = var.vault_install_dir - ui_test_filter = var.ui_test_filter != null && try(trimspace(var.ui_test_filter), "") != "" ? var.ui_test_filter : (matrix.edition == "oss") ? "!enterprise" : null - } - - step "get_local_metadata" { - module = module.get_local_metadata + vault_install_dir = var.vault_install_dir + vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) + vault_tag_key = "Type" // enos_vault_start expects Type as the tag key + ui_test_filter = var.ui_test_filter != null && try(trimspace(var.ui_test_filter), "") != "" ? var.ui_test_filter : (matrix.edition == "ce") ? "!enterprise" : null } step "build_vault" { - module = module.build_local + description = global.description.build_vault + module = module.build_local variables { build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] - bundle_path = local.bundle_path + artifact_path = local.artifact_path goarch = local.arch goos = "linux" product_version = var.vault_product_version @@ -62,136 +65,297 @@ scenario "ui" { } } - step "find_azs" { - module = module.az_finder + step "ec2_info" { + description = global.description.ec2_info + module = module.ec2_info + } + + step "create_vpc" { + description = global.description.create_vpc + module = module.create_vpc variables { - instance_type = [ - var.backend_instance_type, - local.vault_instance_type - ] + common_tags = local.tags + ip_version = local.ip_version } } - step "create_vpc" { - module = module.create_vpc + // This step reads the contents of the backend license if we're using a Consul backend and + // the edition is "ent". + step "read_backend_license" { + description = global.description.read_backend_license + skip_step = matrix.backend == "raft" || matrix.consul_edition == "ce" + module = module.read_license variables { - ami_architectures = [local.arch] - availability_zones = step.find_azs.availability_zones - common_tags = local.tags + file_name = local.backend_license_path } } - step "read_license" { - skip_step = matrix.edition == "oss" - module = module.read_license + step "read_vault_license" { + description = global.description.read_vault_license + skip_step = matrix.edition == "ce" + module = module.read_license variables { file_name = local.vault_license_path } } + step "create_seal_key" { + description = global.description.create_seal_key + module = "seal_${local.seal}" + + variables { + cluster_id = step.create_vpc.cluster_id + common_tags = global.tags + } + } + + step "create_vault_cluster_targets" { + description = global.description.create_vault_cluster_targets + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"][global.distro_version["ubuntu"]] + cluster_tag_key = local.vault_tag_key + common_tags = local.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_backend_targets" { + description = global.description.create_vault_cluster_targets + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"][global.distro_version["ubuntu"]] + cluster_tag_key = local.backend_tag_key + common_tags = local.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + step "create_backend_cluster" { - module = "backend_${matrix.backend}" - depends_on = [step.create_vpc] + description = global.description.create_backend_cluster + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets, + ] providers = { enos = provider.enos.ubuntu } + verifies = [ + // verified in modules + quality.consul_autojoin_aws, + quality.consul_config_file, + quality.consul_ha_leader_election, + quality.consul_service_start_server, + // verified in enos_consul_start resource + quality.consul_api_agent_host_read, + quality.consul_api_health_node_read, + quality.consul_api_operator_raft_config_read, + quality.consul_cli_validate, + quality.consul_health_state_passing_read_nodes_minimum, + quality.consul_operator_raft_configuration_read_voters_minimum, + quality.consul_service_systemd_notified, + quality.consul_service_systemd_unit, + ] + variables { - ami_id = step.create_vpc.ami_ids["ubuntu"]["amd64"] - common_tags = local.tags - consul_release = { - edition = var.backend_edition + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = local.backend_tag_key + hosts = step.create_vault_cluster_backend_targets.hosts + license = (matrix.backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = matrix.consul_edition version = local.consul_version } - instance_type = var.backend_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - vpc_id = step.create_vpc.vpc_id } } step "create_vault_cluster" { - module = module.vault_cluster + description = global.description.create_vault_cluster + module = module.vault_cluster depends_on = [ step.create_backend_cluster, step.build_vault, + step.create_vault_cluster_targets ] providers = { enos = provider.enos.ubuntu } + verifies = [ + // verified in modules + quality.consul_service_start_client, + quality.vault_artifact_bundle, + quality.vault_artifact_deb, + quality.vault_artifact_rpm, + quality.vault_audit_log, + quality.vault_audit_socket, + quality.vault_audit_syslog, + quality.vault_autojoin_aws, + quality.vault_config_env_variables, + quality.vault_config_file, + quality.vault_config_log_level, + quality.vault_init, + quality.vault_license_required_ent, + quality.vault_listener_ipv4, + quality.vault_listener_ipv6, + quality.vault_service_start, + quality.vault_storage_backend_consul, + quality.vault_storage_backend_raft, + // verified in enos_vault_start resource + quality.vault_api_sys_config_read, + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_health_read, + quality.vault_api_sys_host_info_read, + quality.vault_api_sys_replication_status_read, + quality.vault_api_sys_seal_status_api_read_matches_sys_health, + quality.vault_api_sys_storage_raft_autopilot_configuration_read, + quality.vault_api_sys_storage_raft_autopilot_state_read, + quality.vault_api_sys_storage_raft_configuration_read, + quality.vault_cli_status_exit_code, + quality.vault_service_systemd_notified, + quality.vault_service_systemd_unit, + ] + variables { - ami_id = step.create_vpc.ami_ids[local.distro][local.arch] - common_tags = local.tags - consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = matrix.backend - unseal_method = local.seal - vault_local_artifact_path = local.bundle_path - vault_install_dir = local.vault_install_dir - vault_license = matrix.edition != "oss" ? step.read_license.license : null - vpc_id = step.create_vpc.vpc_id - vault_environment = { - VAULT_LOG_LEVEL = var.vault_log_level - } + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = local.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = "file" + consul_license = (matrix.backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.backend == "consul" ? { + edition = matrix.consul_edition + version = local.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + hosts = step.create_vault_cluster_targets.hosts + install_dir = local.vault_install_dir + ip_version = local.ip_version + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + packages = concat(global.packages, global.distro_packages["ubuntu"][global.distro_version["ubuntu"]]) + seal_attributes = step.create_seal_key.attributes + seal_type = local.seal + storage_backend = matrix.backend + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster] + + providers = { + enos = provider.enos.ubuntu + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + timeout = 120 // seconds + ip_version = local.ip_version + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token } } step "test_ui" { - module = module.vault_test_ui + description = <<-EOF + Verify that the Vault Web UI test suite can run against a live cluster with the compiled + assets. + EOF + module = module.vault_test_ui + depends_on = [step.wait_for_leader] + + verifies = quality.vault_ui_test variables { - vault_addr = step.create_vault_cluster.instance_public_ips[0] - vault_root_token = step.create_vault_cluster.vault_root_token - vault_unseal_keys = step.create_vault_cluster.vault_recovery_keys_b64 - vault_recovery_threshold = step.create_vault_cluster.vault_recovery_threshold + vault_addr = step.create_vault_cluster_targets.hosts[0].public_ip + vault_root_token = step.create_vault_cluster.root_token + vault_unseal_keys = step.create_vault_cluster.recovery_keys_b64 + vault_recovery_threshold = step.create_vault_cluster.recovery_threshold ui_test_filter = local.ui_test_filter } } - output "vault_cluster_instance_ids" { - description = "The Vault cluster instance IDs" - value = step.create_vault_cluster.instance_ids + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path } - output "vault_cluster_pub_ips" { - description = "The Vault cluster public IPs" - value = step.create_vault_cluster.instance_public_ips + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.hosts } - output "vault_cluster_priv_ips" { + output "private_ips" { description = "The Vault cluster private IPs" - value = step.create_vault_cluster.instance_private_ips + value = step.create_vault_cluster.private_ips } - output "vault_cluster_key_id" { - description = "The Vault cluster Key ID" - value = step.create_vault_cluster.key_id + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips } - output "vault_cluster_root_token" { - description = "The Vault cluster root token" - value = step.create_vault_cluster.vault_root_token + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares } - output "vault_cluster_unseal_keys_b64" { - description = "The Vault cluster unseal keys" - value = step.create_vault_cluster.vault_unseal_keys_b64 + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 } - output "vault_cluster_unseal_keys_hex" { - description = "The Vault cluster unseal keys hex" - value = step.create_vault_cluster.vault_unseal_keys_hex + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "seal_name" { + description = "The Vault cluster seal key name" + value = step.create_seal_key.resource_name } - output "vault_cluster_tag" { - description = "The Vault cluster tag" - value = step.create_vault_cluster.vault_cluster_tag + output "ui_test_environment" { + value = step.test_ui.ui_test_environment + description = "The environment variables that are required in order to run the test:enos yarn target" } output "ui_test_stderr" { @@ -204,8 +368,13 @@ scenario "ui" { value = step.test_ui.ui_test_stdout } - output "ui_test_environment" { - value = step.test_ui.ui_test_environment - description = "The environment variables that are required in order to run the test:enos yarn target" + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex } } diff --git a/enos/enos-scenario-upgrade.hcl b/enos/enos-scenario-upgrade.hcl index 3b576ed70087..bc746ac12790 100644 --- a/enos/enos-scenario-upgrade.hcl +++ b/enos/enos-scenario-upgrade.hcl @@ -1,21 +1,80 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 scenario "upgrade" { + description = <<-EOF + The upgrade scenario verifies in-place upgrades between previously released versions of Vault + against another candidate build. The build can be a local branch, any CRT built Vault artifact + saved to the local machine, or any CRT built Vault artifact in the stable channel in + Artifactory. + + The scenario will first create a new Vault Cluster with a previously released version of Vault, + mount engines and create data, then perform an in-place upgrade with any candidate built and + perform quality verification. + + If you want to use the 'distro:leap' variant you must first accept SUSE's terms for the AWS + account. To verify that your account has agreed, sign-in to your AWS through Doormat, + and visit the following links to verify your subscription or subscribe: + arm64 AMI: https://aws.amazon.com/marketplace/server/procurement?productId=a516e959-df54-4035-bb1a-63599b7a6df9 + amd64 AMI: https://aws.amazon.com/marketplace/server/procurement?productId=5535c495-72d4-4355-b169-54ffa874f849 + EOF + matrix { - arch = ["amd64", "arm64"] - backend = ["consul", "raft"] - artifact_source = ["local", "crt", "artifactory"] - artifact_type = ["bundle", "package"] - consul_version = ["1.14.2", "1.13.4", "1.12.7"] - distro = ["ubuntu", "rhel"] - edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] - seal = ["awskms", "shamir"] - - # Packages are not offered for the oss, ent.fips1402, and ent.hsm.fips1402 editions + arch = global.archs + artifact_source = global.artifact_sources + artifact_type = global.artifact_types + backend = global.backends + config_mode = global.config_modes + consul_edition = global.consul_editions + consul_version = global.consul_versions + distro = global.distros + edition = global.editions + initial_version = global.upgrade_initial_versions_ce + ip_version = global.ip_versions + seal = global.seals + + // Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + // Don't upgrade from super-ancient versions in CI because there are known reliability issues + // in those versions that have already been fixed. + exclude { + initial_version = [for e in matrix.initial_version : e if semverconstraint(e, "<1.11.0-0")] + } + + // FIPS 140-2 editions were not supported until 1.11.x, even though there are 1.10.x binaries + // published. + exclude { + edition = ["ent.fips1402", "ent.hsm.fips1402"] + initial_version = [for e in matrix.initial_version : e if semverconstraint(e, "<1.11.0-0")] + } + + // There are no published versions of these artifacts yet. We'll update this to exclude older + // versions after our initial publication of these editions for arm64. + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } + + // PKCS#11 can only be used with hsm editions + exclude { + seal = ["pkcs11"] + edition = [for e in matrix.edition : e if !strcontains(e, "hsm")] + } + + // softhsm packages not available for leap/sles. + exclude { + seal = ["pkcs11"] + distro = ["leap", "sles"] + } + + // Testing in IPV6 mode is currently implemented for integrated Raft storage only exclude { - edition = ["oss", "ent.fips1402", "ent.hsm.fips1402"] - artifact_type = ["package"] + ip_version = ["6"] + backend = ["consul"] } } @@ -23,49 +82,29 @@ scenario "upgrade" { terraform = terraform.default providers = [ provider.aws.default, - provider.enos.ubuntu, - provider.enos.rhel + provider.enos.ec2_user, + provider.enos.ubuntu ] locals { - build_tags = { - "oss" = ["ui"] - "ent" = ["ui", "enterprise", "ent"] - "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] - "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] - "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] - } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null - dependencies_to_install = ["jq"] + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null enos_provider = { - rhel = provider.enos.rhel + amzn = provider.enos.ec2_user + leap = provider.enos.ec2_user + rhel = provider.enos.ec2_user + sles = provider.enos.ec2_user ubuntu = provider.enos.ubuntu } - tags = merge({ - "Project Name" : var.project_name - "Project" : "Enos", - "Environment" : "ci" - }, var.tags) - vault_instance_types = { - amd64 = "t3a.small" - arm64 = "t4g.small" - } - vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[matrix.arch]) - vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) - vault_install_dir_packages = { - rhel = "/bin" - ubuntu = "/usr/bin" - } - vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : local.vault_install_dir_packages[matrix.distro] + manage_service = matrix.artifact_type == "bundle" } - # This step gets/builds the upgrade artifact that we will upgrade to step "build_vault" { - module = "build_${matrix.artifact_source}" + description = global.description.build_vault + module = "build_${matrix.artifact_source}" variables { - build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] - bundle_path = local.bundle_path + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path goarch = matrix.arch goos = "linux" artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null @@ -77,165 +116,351 @@ scenario "upgrade" { artifact_type = matrix.artifact_type distro = matrix.artifact_source == "artifactory" ? matrix.distro : null edition = matrix.artifact_source == "artifactory" ? matrix.edition : null - instance_type = matrix.artifact_source == "artifactory" ? local.vault_instance_type : null revision = var.vault_revision } } - step "find_azs" { - module = module.az_finder + step "ec2_info" { + description = global.description.ec2_info + module = module.ec2_info + } + + step "create_vpc" { + description = global.description.create_vpc + module = module.create_vpc + + variables { + common_tags = global.tags + ip_version = matrix.ip_version + } + } + + // This step reads the contents of the backend license if we're using a Consul backend and + // an "ent" Consul edition. + step "read_backend_license" { + description = global.description.read_backend_license + skip_step = matrix.backend == "raft" || matrix.consul_edition == "ce" + module = module.read_license variables { - instance_type = [ - var.backend_instance_type, - local.vault_instance_type, - ] + file_name = global.backend_license_path } } - step "create_vpc" { - module = module.create_vpc + step "read_vault_license" { + description = global.description.read_vault_license + skip_step = matrix.edition == "ce" + module = module.read_license variables { - ami_architectures = distinct([matrix.arch, "amd64"]) - availability_zones = step.find_azs.availability_zones - common_tags = local.tags + file_name = global.vault_license_path } } - step "read_license" { - skip_step = matrix.edition == "oss" - module = module.read_license + step "create_seal_key" { + description = global.description.create_seal_key + module = "seal_${matrix.seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } variables { - file_name = local.vault_license_path + cluster_id = step.create_vpc.id + common_tags = global.tags } } - step "get_local_metadata" { - skip_step = matrix.artifact_source != "local" - module = module.get_local_metadata + step "create_vault_cluster_targets" { + description = global.description.create_vault_cluster_targets + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_backend_targets" { + description = global.description.create_vault_cluster_targets + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"][global.distro_version["ubuntu"]] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } } step "create_backend_cluster" { - module = "backend_${matrix.backend}" - depends_on = [step.create_vpc] + description = global.description.create_backend_cluster + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets, + ] providers = { enos = provider.enos.ubuntu } + verifies = [ + // verified in modules + quality.consul_autojoin_aws, + quality.consul_config_file, + quality.consul_ha_leader_election, + quality.consul_service_start_server, + // verified in enos_consul_start resource + quality.consul_api_agent_host_read, + quality.consul_api_health_node_read, + quality.consul_api_operator_raft_config_read, + quality.consul_cli_validate, + quality.consul_health_state_passing_read_nodes_minimum, + quality.consul_operator_raft_configuration_read_voters_minimum, + quality.consul_service_systemd_notified, + quality.consul_service_systemd_unit, + ] + variables { - ami_id = step.create_vpc.ami_ids["ubuntu"]["amd64"] - common_tags = local.tags - consul_release = { - edition = var.backend_edition + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + hosts = step.create_vault_cluster_backend_targets.hosts + license = (matrix.backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = matrix.consul_edition version = matrix.consul_version } - instance_type = var.backend_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - vpc_id = step.create_vpc.vpc_id } } - # This step creates a Vault cluster using a bundle downloaded from - # releases.hashicorp.com, with the version specified in var.vault_autopilot_initial_release step "create_vault_cluster" { - module = module.vault_cluster + description = global.description.create_vault_cluster + module = module.vault_cluster depends_on = [ step.create_backend_cluster, step.build_vault, + step.create_vault_cluster_targets ] providers = { enos = local.enos_provider[matrix.distro] } + verifies = [ + // verified in modules + quality.consul_service_start_client, + quality.vault_artifact_bundle, + quality.vault_artifact_deb, + quality.vault_artifact_rpm, + quality.vault_audit_log, + quality.vault_audit_socket, + quality.vault_audit_syslog, + quality.vault_autojoin_aws, + quality.vault_storage_backend_consul, + quality.vault_config_env_variables, + quality.vault_config_file, + quality.vault_config_log_level, + quality.vault_init, + quality.vault_license_required_ent, + quality.vault_listener_ipv4, + quality.vault_listener_ipv6, + quality.vault_service_start, + quality.vault_storage_backend_raft, + // verified in enos_vault_start resource + quality.vault_api_sys_config_read, + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_health_read, + quality.vault_api_sys_host_info_read, + quality.vault_api_sys_replication_status_read, + quality.vault_api_sys_seal_status_api_read_matches_sys_health, + quality.vault_api_sys_storage_raft_autopilot_configuration_read, + quality.vault_api_sys_storage_raft_autopilot_state_read, + quality.vault_api_sys_storage_raft_configuration_read, + quality.vault_cli_status_exit_code, + quality.vault_service_systemd_unit, + quality.vault_service_systemd_notified, + ] + variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = matrix.config_mode + consul_license = (matrix.backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null consul_release = matrix.backend == "consul" ? { - edition = var.backend_edition + edition = matrix.consul_edition version = matrix.consul_version } : null - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = matrix.backend - unseal_method = matrix.seal - vault_install_dir = local.vault_install_dir - vault_release = var.vault_upgrade_initial_release - vault_license = matrix.edition != "oss" ? step.read_license.license : null - vpc_id = step.create_vpc.vpc_id - vault_environment = { - VAULT_LOG_LEVEL = var.vault_log_level + enable_audit_devices = var.vault_enable_audit_devices + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + license = matrix.edition != "ce" ? step.read_vault_license.license : null + manage_service = true # always handle systemd for released bundles + packages = concat(global.packages, global.distro_packages[matrix.distro][global.distro_version[matrix.distro]]) + release = { + edition = matrix.edition + version = matrix.initial_version } + seal_attributes = step.create_seal_key.attributes + seal_type = matrix.seal + storage_backend = matrix.backend + } + } + + step "get_local_metadata" { + description = global.description.get_local_metadata + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + // Wait for our cluster to elect a leader + step "wait_for_leader" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + timeout = 120 // seconds + vault_addr = step.create_vault_cluster.api_addr_localhost + // Use the install dir for our initial version, which always comes from a zip bundle + vault_install_dir = global.vault_install_dir["bundle"] + vault_root_token = step.create_vault_cluster.root_token } } step "get_vault_cluster_ips" { - module = module.vault_get_cluster_ips - depends_on = [step.create_vault_cluster] + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader] providers = { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + variables { - vault_instances = step.create_vault_cluster.vault_instances - vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_cluster.vault_root_token + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + // Use the install dir for our initial version, which always comes from a zip bundle + vault_install_dir = global.vault_install_dir["bundle"] + vault_root_token = step.create_vault_cluster.root_token } } - step "verify_write_test_data" { - module = module.vault_verify_write_data + step "verify_secrets_engines_create" { + description = global.description.verify_secrets_engines_create + module = module.vault_verify_secrets_engines_create depends_on = [ step.create_vault_cluster, - step.get_vault_cluster_ips + step.get_vault_cluster_ips, ] providers = { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_api_auth_userpass_login_write, + quality.vault_api_auth_userpass_user_write, + quality.vault_api_identity_entity_write, + quality.vault_api_identity_entity_alias_write, + quality.vault_api_identity_group_write, + quality.vault_api_identity_oidc_config_write, + quality.vault_api_identity_oidc_introspect_write, + quality.vault_api_identity_oidc_key_write, + quality.vault_api_identity_oidc_key_rotate_write, + quality.vault_api_identity_oidc_role_write, + quality.vault_api_identity_oidc_token_read, + quality.vault_api_sys_auth_userpass_user_write, + quality.vault_api_sys_policy_write, + quality.vault_mount_auth, + quality.vault_mount_kv, + quality.vault_secrets_kv_write, + ] + variables { - leader_public_ip = step.get_vault_cluster_ips.leader_public_ip - leader_private_ip = step.get_vault_cluster_ips.leader_private_ip - vault_instances = step.create_vault_cluster.vault_instances - vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_cluster.vault_root_token + hosts = step.create_vault_cluster_targets.hosts + leader_host = step.get_vault_cluster_ips.leader_host + vault_addr = step.create_vault_cluster.api_addr_localhost + // Use the install dir for our initial version, which always comes from a zip bundle + vault_install_dir = global.vault_install_dir["bundle"] + vault_root_token = step.create_vault_cluster.root_token } } - # This step upgrades the Vault cluster to the var.vault_product_version - # by getting a bundle or package of that version from the matrix.artifact_source + // This step upgrades the Vault cluster to the var.vault_product_version + // by getting a bundle or package of that version from the matrix.artifact_source step "upgrade_vault" { - module = module.vault_upgrade + description = <<-EOF + Perform an in-place upgrade of the Vault Cluster nodes by first installing a new version + of Vault on the cluster node machines and restarting the service. + EOF + module = module.vault_upgrade depends_on = [ step.create_vault_cluster, + step.verify_secrets_engines_create, ] providers = { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_cluster_upgrade_in_place, + quality.vault_service_restart, + ] + variables { - vault_api_addr = "http://localhost:8200" - vault_instances = step.create_vault_cluster.vault_instances - vault_local_artifact_path = local.bundle_path + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost vault_artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null - vault_install_dir = local.vault_install_dir - vault_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.vault_unseal_keys_hex : null + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_local_artifact_path = local.artifact_path + vault_root_token = step.create_vault_cluster.root_token vault_seal_type = matrix.seal + vault_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.unseal_keys_hex : null } } - step "verify_vault_version" { - module = module.vault_verify_version + // Wait for our upgraded cluster to elect a leader + step "wait_for_leader_after_upgrade" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader depends_on = [ - step.create_backend_cluster, + step.create_vault_cluster, step.upgrade_vault, ] @@ -243,58 +468,177 @@ scenario "upgrade" { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + variables { - vault_instances = step.create_vault_cluster.vault_instances - vault_edition = matrix.edition - vault_install_dir = local.vault_install_dir - vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version - vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision - vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date - vault_root_token = step.create_vault_cluster.vault_root_token + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + timeout = 120 // seconds + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_leader_ip_for_step_down" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader_after_upgrade] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Force a step down to trigger a new leader election + step "vault_leader_step_down" { + description = global.description.vault_leader_step_down + module = module.vault_step_down + depends_on = [step.get_leader_ip_for_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_step_down_steps_down, + quality.vault_cli_operator_step_down, + ] + + variables { + leader_host = step.get_leader_ip_for_step_down.leader_host + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader_after_stepdown" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.vault_leader_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + timeout = 120 // seconds + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token } } step "get_updated_vault_cluster_ips" { - module = module.vault_get_cluster_ips + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips depends_on = [ - step.create_vault_cluster, - step.upgrade_vault + step.wait_for_leader_after_stepdown, ] providers = { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + variables { - vault_instances = step.create_vault_cluster.vault_instances - vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_cluster.vault_root_token + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token } } step "verify_vault_unsealed" { - module = module.vault_verify_unsealed + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed depends_on = [ - step.create_vault_cluster, step.get_updated_vault_cluster_ips, - step.upgrade_vault, ] providers = { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "verify_vault_version" { + description = global.description.verify_vault_version + module = module.vault_verify_version + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_version_history_keys, + quality.vault_api_sys_version_history_key_info, + quality.vault_version_build_date, + quality.vault_version_edition, + quality.vault_version_release, + ] + variables { - vault_instances = step.create_vault_cluster.vault_instances - vault_install_dir = local.vault_install_dir + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_edition = matrix.edition + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token } } - step "verify_read_test_data" { - module = module.vault_verify_read_data + step "verify_secrets_engines_read" { + description = global.description.verify_secrets_engines_read + module = module.vault_verify_secrets_engines_read depends_on = [ - step.get_updated_vault_cluster_ips, - step.verify_write_test_data, + step.verify_secrets_engines_create, step.verify_vault_unsealed ] @@ -302,83 +646,171 @@ scenario "upgrade" { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_api_auth_userpass_login_write, + quality.vault_api_identity_entity_read, + quality.vault_api_identity_oidc_config_read, + quality.vault_api_identity_oidc_key_read, + quality.vault_api_identity_oidc_role_read, + quality.vault_secrets_kv_read + ] + variables { - node_public_ips = step.get_updated_vault_cluster_ips.follower_public_ips - vault_install_dir = local.vault_install_dir + create_state = step.verify_secrets_engines_create.state + hosts = step.get_updated_vault_cluster_ips.follower_hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] } } step "verify_raft_auto_join_voter" { - skip_step = matrix.backend != "raft" - module = module.vault_verify_raft_auto_join_voter + description = global.description.verify_raft_cluster_all_nodes_are_voters + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_raft_voters + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_replication" { + description = global.description.verify_replication_status + module = module.vault_verify_replication + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_replication_ce_disabled, + quality.vault_replication_ent_dr_available, + quality.vault_replication_ent_pr_available, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_edition = matrix.edition + } + } + + step "verify_billing_start_date" { + description = global.description.verify_billing_start_date + skip_step = semverconstraint(var.vault_product_version, "<=1.16.6-0 || >=1.17.0-0 <=1.17.2-0") + module = module.vault_verify_billing_start_date depends_on = [ - step.create_backend_cluster, - step.upgrade_vault, + step.get_updated_vault_cluster_ips, + step.verify_vault_unsealed, + step.verify_secrets_engines_read, ] providers = { enos = local.enos_provider[matrix.distro] } + verifies = [ + quality.vault_billing_start_date, + ] + variables { - vault_install_dir = local.vault_install_dir - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token } } - output "vault_cluster_instance_ids" { - description = "The Vault cluster instance IDs" - value = step.create_vault_cluster.instance_ids + step "verify_ui" { + description = global.description.verify_ui + module = module.vault_verify_ui + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_ui_assets + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + } } - output "vault_cluster_pub_ips" { - description = "The Vault cluster public IPs" - value = step.create_vault_cluster.instance_public_ips + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path } - output "vault_cluster_priv_ips" { + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.hosts + } + + output "private_ips" { description = "The Vault cluster private IPs" - value = step.create_vault_cluster.instance_private_ips + value = step.create_vault_cluster.private_ips } - output "vault_cluster_key_id" { - description = "The Vault cluster Key ID" - value = step.create_vault_cluster.key_id + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips } - output "vault_cluster_root_token" { + output "root_token" { description = "The Vault cluster root token" - value = step.create_vault_cluster.vault_root_token + value = step.create_vault_cluster.root_token } - output "vault_cluster_recovery_key_shares" { + output "recovery_key_shares" { description = "The Vault cluster recovery key shares" - value = step.create_vault_cluster.vault_recovery_key_shares + value = step.create_vault_cluster.recovery_key_shares } - output "vault_cluster_recovery_keys_b64" { + output "recovery_keys_b64" { description = "The Vault cluster recovery keys b64" - value = step.create_vault_cluster.vault_recovery_keys_b64 + value = step.create_vault_cluster.recovery_keys_b64 } - output "vault_cluster_recovery_keys_hex" { + output "recovery_keys_hex" { description = "The Vault cluster recovery keys hex" - value = step.create_vault_cluster.vault_recovery_keys_hex + value = step.create_vault_cluster.recovery_keys_hex } - output "vault_cluster_unseal_keys_b64" { - description = "The Vault cluster unseal keys" - value = step.create_vault_cluster.vault_unseal_keys_b64 + output "seal_name" { + description = "The Vault cluster seal attributes" + value = step.create_seal_key.attributes } - output "vault_cluster_unseal_keys_hex" { - description = "The Vault cluster unseal keys hex" - value = step.create_vault_cluster.vault_unseal_keys_hex + output "secrets_engines_state" { + description = "The state of configured secrets engines" + value = step.verify_secrets_engines_create.state } - output "vault_cluster_tag" { - description = "The Vault cluster tag" - value = step.create_vault_cluster.vault_cluster_tag + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex } } diff --git a/enos/enos-terraform.hcl b/enos/enos-terraform.hcl index b03b4b48027e..a8f82f96accb 100644 --- a/enos/enos-terraform.hcl +++ b/enos/enos-terraform.hcl @@ -1,21 +1,19 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 terraform_cli "default" { plugin_cache_dir = var.terraform_plugin_cache_dir != null ? abspath(var.terraform_plugin_cache_dir) : null +} - credentials "app.terraform.io" { - token = var.tfc_api_token - } +terraform_cli "dev" { + plugin_cache_dir = var.terraform_plugin_cache_dir != null ? abspath(var.terraform_plugin_cache_dir) : null - /* provider_installation { dev_overrides = { - "app.terraform.io/hashicorp-qti/enos" = abspath("../../enos-provider") + "registry.terraform.io/hashicorp-forge/enos" = try(abspath("../../terraform-provider-enos/dist"), null) } direct {} } - */ } terraform "default" { @@ -27,7 +25,8 @@ terraform "default" { } enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.0" } } } diff --git a/enos/enos-variables.hcl b/enos/enos-variables.hcl index 65a43f891b5b..08909e844df2 100644 --- a/enos/enos-variables.hcl +++ b/enos/enos-variables.hcl @@ -1,22 +1,16 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -variable "artifact_path" { - type = string - description = "The local path for dev artifact to test" - default = null -} +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 variable "artifactory_username" { type = string - description = "The username to use when connecting to artifactory" + description = "The username to use when testing an artifact from artifactory" default = null sensitive = true } variable "artifactory_token" { type = string - description = "The token to use when connecting to artifactory" + description = "The token to use when authenticating to artifactory" default = null sensitive = true } @@ -36,7 +30,7 @@ variable "artifactory_repo" { variable "aws_region" { description = "The AWS region where we'll create infrastructure" type = string - default = "us-west-1" + default = "us-east-1" } variable "aws_ssh_keypair_name" { @@ -54,13 +48,13 @@ variable "aws_ssh_private_key_path" { variable "backend_edition" { description = "The backend release edition if applicable" type = string - default = "oss" + default = "ce" // or "ent" } variable "backend_instance_type" { - description = "The instance type to use for the Vault backend" + description = "The instance type to use for the Vault backend. Must be arm64/nitro compatible" type = string - default = "t3.small" + default = "t4g.small" } variable "backend_license_path" { @@ -69,12 +63,48 @@ variable "backend_license_path" { default = null } +variable "backend_log_level" { + description = "The server log level for the backend. Supported values include 'trace', 'debug', 'info', 'warn', 'error'" + type = string + default = "trace" +} + variable "project_name" { description = "The description of the project" type = string default = "vault-enos-integration" } +variable "distro_version_amzn" { + description = "The version of Amazon Linux 2 to use" + type = string + default = "2023" // or "2", though pkcs11 has not been tested with 2 +} + +variable "distro_version_leap" { + description = "The version of openSUSE leap to use" + type = string + default = "15.6" +} + +variable "distro_version_rhel" { + description = "The version of RHEL to use" + type = string + default = "9.4" // or "8.10" +} + +variable "distro_version_sles" { + description = "The version of SUSE SLES to use" + type = string + default = "15.6" +} + +variable "distro_version_ubuntu" { + description = "The version of ubuntu to use" + type = string + default = "24.04" // or "20.04", "22.04" +} + variable "tags" { description = "Tags that will be applied to infrastructure resources that support tagging" type = map(string) @@ -87,41 +117,45 @@ variable "terraform_plugin_cache_dir" { default = null } -variable "tfc_api_token" { - description = "The Terraform Cloud QTI Organization API token." +variable "ui_test_filter" { type = string - sensitive = true + description = "A test filter to limit the ui tests to execute. Will be appended to the ember test command as '-f=\"\"'" + default = null } -variable "vault_artifact_type" { - description = "The Vault artifact type package or bundle" - default = "bundle" +variable "ui_run_tests" { + type = bool + description = "Whether to run the UI tests or not. If set to false a cluster will be created but no tests will be run" + default = true } -variable "vault_autopilot_initial_release" { - description = "The Vault release to deploy before upgrading with autopilot" - default = { - edition = "ent" - version = "1.11.0" - } +variable "vault_artifact_type" { + description = "The type of Vault artifact to use when installing Vault from artifactory. It should be 'package' for .deb or .rpm package and 'bundle' for .zip bundles" + default = "bundle" } -variable "vault_bundle_path" { +variable "vault_artifact_path" { description = "Path to CRT generated or local vault.zip bundle" type = string default = "/tmp/vault.zip" } -variable "vault_install_dir" { +variable "vault_build_date" { + description = "The build date for Vault artifact" type = string - description = "The directory where the Vault binary will be installed" - default = "/opt/vault/bin" + default = "" } -variable "vault_instance_type" { - description = "The instance type to use for the Vault backend" +variable "vault_enable_audit_devices" { + description = "If true every audit device will be enabled" + type = bool + default = true +} + +variable "vault_install_dir" { type = string - default = null + description = "The directory where the Vault binary will be installed" + default = "/opt/vault/bin" } variable "vault_instance_count" { @@ -131,7 +165,7 @@ variable "vault_instance_count" { } variable "vault_license_path" { - description = "The path to a valid Vault enterprise edition license. This is only required for non-oss editions" + description = "The path to a valid Vault enterprise edition license. This is only required for non-ce editions" type = string default = null } @@ -145,13 +179,7 @@ variable "vault_local_build_tags" { variable "vault_log_level" { description = "The server log level for Vault logs. Supported values (in order of detail) are trace, debug, info, warn, and err." type = string - default = "info" -} - -variable "vault_build_date" { - description = "The build date for Vault artifact" - type = string - default = "" + default = "trace" } variable "vault_product_version" { @@ -169,34 +197,8 @@ variable "vault_revision" { variable "vault_upgrade_initial_release" { description = "The Vault release to deploy before upgrading" default = { - edition = "oss" + edition = "ce" // Vault 1.10.5 has a known issue with retry_join. version = "1.10.4" } } - -variable "operator_instance" { - type = string - description = "The ip address of the operator (Voter) node" -} - -variable "remove_vault_instances" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The old vault nodes to be removed" -} - - -variable "ui_test_filter" { - type = string - description = "A test filter to limit the ui tests to execute. Will be appended to the ember test command as '-f=\"\"'" - default = null -} - -variable "ui_run_tests" { - type = bool - description = "Whether to run the UI tests or not. If set to false a cluster will be created but no tests will be run" - default = true -} diff --git a/enos/enos.vars.hcl b/enos/enos.vars.hcl index d63af4452c80..1d60711806d1 100644 --- a/enos/enos.vars.hcl +++ b/enos/enos.vars.hcl @@ -1,51 +1,121 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 -# aws_region is the AWS region where we'll create infrastructure -# for the smoke scenario -# aws_region = "us-west-1" +// artifactory_username is the username to use when testing an artifact stored in artfactory. +// artifactory_username = "yourname@hashicorp.com" -# aws_ssh_keypair_name is the AWS keypair to use for SSH -# aws_ssh_keypair_name = "enos-ci-ssh-key" +// artifactory_token is the token to use when authenticating to artifactory. +// artifactory_token = "yourtoken" -# aws_ssh_private_key_path is the path to the AWS keypair private key -# aws_ssh_private_key_path = "./support/private_key.pem" +// artifactory_host is the artifactory host to search for vault artifacts. +// artifactory_host = "https://artifactory.hashicorp.engineering/artifactory" -# backend_instance_type is the instance type to use for the Vault backend -# backend_instance_type = "t3.small" +// artifactory_repo is the artifactory repo to search for vault artifacts. +// artifactory_repo = "hashicorp-crt-stable-local*" -# tags are a map of tags that will be applied to infrastructure resources that -# support tagging. -# tags = { "Project Name" : "Vault", "Something Cool" : "Value" } +// aws_region is the AWS region where we'll create infrastructure +// for the smoke scenario +// aws_region = "us-east-1" -# terraform_plugin_cache_dir is the directory to cache Terraform modules and providers. -# It must exist. -# terraform_plugin_cache_dir = "/Users//.terraform/plugin-cache-dir +// aws_ssh_keypair_name is the AWS keypair to use for SSH +// aws_ssh_keypair_name = "enos-ci-ssh-key" -# tfc_api_token is the Terraform Cloud QTI Organization API token. We need this -# to download the enos Terraform provider and the enos Terraform modules. -# tfc_api_token = "XXXXX.atlasv1.XXXXX..." +// aws_ssh_private_key_path is the path to the AWS keypair private key +// aws_ssh_private_key_path = "./support/private_key.pem" -# vault_bundle_path is the path to CRT generated or local vault.zip bundle. When -# using the "builder:local" variant a bundle will be built from the current branch. -# In CI it will use the output of the build workflow. -# vault_bundle_path = "./dist/vault.zip" +// backend_license_path is the license for the backend if applicable (Consul Enterprise)". +// backend_license_path = "./support/consul.hclic" -# vault_install_dir is the directory where the vault binary will be installed on -# the remote machines. -# vault_install_dir = "/opt/vault/bin" +// backend_log_level is the server log level for the backend. Supported values include 'trace', +// 'debug', 'info', 'warn', 'error'" +// backend_log_level = "trace" -# vault_local_binary_path is the path of the local binary that we're upgrading to. -# vault_local_binary_path = "./support/vault" +// backend_instance_type is the instance type to use for the Vault backend. Must support arm64 +// backend_instance_type = "t4g.small" -# vault_instance_type is the instance type to use for the Vault backend -# vault_instance_type = "t3.small" +// project_name is the description of the project. It will often be used to tag infrastructure +// resources. +// project_name = "vault-enos-integration" -# vault_instance_count is how many instances to create for the Vault cluster. -# vault_instance_count = 3 +// distro_version_amzn is the version of Amazon Linux 2 to use for "distro:amzn" variants +// distro_version_amzn = "2" -# vault_license_path is the path to a valid Vault enterprise edition license. -# This is only required for non-oss editions" -# vault_license_path = "./support/vault.hclic" +// distro_version_leap is the version of openSUSE Leap to use for "distro:leap" variants +// distro_version_leap = "15.5" -# vault_upgrade_initial_release is the Vault release to deploy before upgrading. +// distro_version_rhel is the version of RHEL to use for "distro:rhel" variants. +// distro_version_rhel = "9.3" // or "8.9" + +// distro_version_sles is the version of SUSE SLES to use for "distro:sles" variants. +// distro_version_sles = "v15_sp5_standard" + +// distro_version_ubuntu is the version of ubuntu to use for "distro:ubuntu" variants +// distro_version_ubuntu = "22.04" // or "20.04" + +// tags are a map of tags that will be applied to infrastructure resources that +// support tagging. +// tags = { "Project Name" : "Vault", "Something Cool" : "Value" } + +// terraform_plugin_cache_dir is the directory to cache Terraform modules and providers. +// It must exist. +// terraform_plugin_cache_dir = "/Users//.terraform/plugin-cache-dir + +// ui_test_filter is the test filter to limit the ui tests to execute for the ui scenario. It will +// be appended to the ember test command as '-f=\"\"'. +// ui_test_filter = "sometest" + +// ui_run_tests sets whether to run the UI tests or not for the ui scenario. If set to false a +// cluster will be created but no tests will be run. +// ui_run_tests = true + +// vault_artifact_path is the path to CRT generated or local vault.zip bundle. When +// using the "builder:local" variant a bundle will be built from the current branch. +// In CI it will use the output of the build workflow. +// vault_artifact_path = "./dist/vault.zip" + +// vault_artifact_type is the type of Vault artifact to use when installing Vault from artifactory. +// It should be 'package' for .deb or # .rpm package and 'bundle' for .zip bundles" +// vault_artifact_type = "bundle" + +// vault_build_date is the build date for Vault artifact. Some validations will require the binary build +// date to match" +// vault_build_date = "2023-07-07T14:06:37Z" // make ci-get-date for example + +// vault_enable_audit_devices sets whether or not to enable every audit device. It true +// a file audit device will be enabled at the path /var/log/vault_audit.log, the syslog +// audit device will be enabled, and a socket audit device connecting to 127.0.0.1:9090 +// will be enabled. The netcat program is run in listening mode to provide an endpoint +// that the socket audit device can connect to. +// vault_enable_audit_devices = true + +// vault_install_dir is the directory where the vault binary will be installed on +// the remote machines. +// vault_install_dir = "/opt/vault/bin" + +// vault_local_binary_path is the path of the local binary that we're upgrading to. +// vault_local_binary_path = "./support/vault" + +// vault_instance_type is the instance type to use for the Vault backend +// vault_instance_type = "t3.small" + +// vault_instance_count is how many instances to create for the Vault cluster. +// vault_instance_count = 3 + +// vault_license_path is the path to a valid Vault enterprise edition license. +// This is only required for non-ce editions" +// vault_license_path = "./support/vault.hclic" + +// vault_local_build_tags override the build tags we pass to the Go compiler for builder:local variants. +// vault_local_build_tags = ["ui", "ent"] + +// vault_log_level is the server log level for Vault logs. Supported values (in order of detail) are +// trace, debug, info, warn, and err." +// vault_log_level = "trace" + +// vault_product_version is the version of Vault we are testing. Some validations will expect the vault +// binary and cluster to report this version. +// vault_product_version = "1.15.0" + +// vault_revision is the git sha of Vault artifact we are testing. Some validations will expect the vault +// binary and cluster to report this revision. +// vault_revision = "df733361af26f8bb29b63704168bbc5ab8d083de" diff --git a/enos/k8s/enos-modules-k8s.hcl b/enos/k8s/enos-modules-k8s.hcl index 17f565a7443d..3350535016b8 100644 --- a/enos/k8s/enos-modules-k8s.hcl +++ b/enos/k8s/enos-modules-k8s.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 module "create_kind_cluster" { source = "../modules/local_kind_cluster" @@ -12,39 +12,39 @@ module "load_docker_image" { module "k8s_deploy_vault" { source = "../modules/k8s_deploy_vault" - vault_instance_count = var.vault_instance_count + vault_instance_count = var.instance_count } module "k8s_verify_build_date" { source = "../modules/k8s_vault_verify_build_date" - vault_instance_count = var.vault_instance_count + vault_instance_count = var.instance_count } module "k8s_verify_replication" { source = "../modules/k8s_vault_verify_replication" - vault_instance_count = var.vault_instance_count + vault_instance_count = var.instance_count } module "k8s_verify_ui" { source = "../modules/k8s_vault_verify_ui" - vault_instance_count = var.vault_instance_count + vault_instance_count = var.instance_count } module "k8s_verify_version" { source = "../modules/k8s_vault_verify_version" - vault_instance_count = var.vault_instance_count - vault_product_version = var.vault_product_version - vault_product_revision = var.vault_product_revision + vault_instance_count = var.instance_count + vault_product_version = var.vault_version + vault_product_revision = var.vault_revision } module "k8s_verify_write_data" { source = "../modules/k8s_vault_verify_write_data" - vault_instance_count = var.vault_instance_count + vault_instance_count = var.instance_count } module "read_license" { diff --git a/enos/k8s/enos-providers-k8s.hcl b/enos/k8s/enos-providers-k8s.hcl index 7e3d7a774315..e11092c22c53 100644 --- a/enos/k8s/enos-providers-k8s.hcl +++ b/enos/k8s/enos-providers-k8s.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 provider "enos" "default" {} diff --git a/enos/k8s/enos-qualities.hcl b/enos/k8s/enos-qualities.hcl new file mode 100644 index 000000000000..2dfe81f97ae7 --- /dev/null +++ b/enos/k8s/enos-qualities.hcl @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +quality "vault_artifact_container_alpine" { + description = "The candidate binary packaged as an Alpine package is used for testing" +} + +quality "vault_artifact_container_ubi" { + description = "The candidate binary packaged as an UBI package is used for testing" +} + +quality "vault_artifact_container_tags" { + description = "The candidate binary has the expected tags" +} diff --git a/enos/k8s/enos-samples-ce.hcl b/enos/k8s/enos-samples-ce.hcl new file mode 100644 index 000000000000..7839b5a52177 --- /dev/null +++ b/enos/k8s/enos-samples-ce.hcl @@ -0,0 +1,38 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +sample "ce_default_linux_amd64_ent_docker" { + subset "k8s" { + matrix { + repo = ["docker", "ecr"] + edition = ["ce"] + } + } +} + +sample "ce_default_linux_arm64_ce_docker" { + subset "k8s" { + matrix { + repo = ["docker", "ecr"] + edition = ["ce"] + } + } +} + +sample "ce_ubi_linux_amd64_ce_redhat" { + subset "k8s" { + matrix { + repo = ["quay"] + edition = ["ce"] + } + } +} + +sample "ce_ubi_linux_arm64_ce_redhat" { + subset "k8s" { + matrix { + repo = ["quay"] + edition = ["ce"] + } + } +} diff --git a/enos/k8s/enos-scenario-k8s.hcl b/enos/k8s/enos-scenario-k8s.hcl index a3c254fcc5c0..7ba9be2c0355 100644 --- a/enos/k8s/enos-scenario-k8s.hcl +++ b/enos/k8s/enos-scenario-k8s.hcl @@ -1,9 +1,18 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 scenario "k8s" { + description = <<-EOF + The k8s scenario verifies Vault when running in Kubernetes mode. The build can be a container + in a remote repository or a local container archive tarball. + + The scenario creates a new kind kubernetes cluster in Docker and creates a Vault Cluster using + the candidate artifact and verifies behavior against the Vault cluster. + EOF + matrix { - edition = ["oss", "ent"] + edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + repo = ["docker", "ecr", "quay"] } terraform_cli = terraform_cli.default @@ -15,19 +24,110 @@ scenario "k8s" { ] locals { - image_path = abspath(var.vault_docker_image_archive) - - image_repo = var.vault_image_repository != null ? var.vault_image_repository : matrix.edition == "oss" ? "hashicorp/vault" : "hashicorp/vault-enterprise" - image_tag = replace(var.vault_product_version, "+ent", "-ent") - + // For now this works as the vault_version includes metadata. If we ever get to the point that + // vault_version excludes metadata we'll have to include the matrix.edition here as well. + tag_version = replace(var.vault_version, "+ent", "-ent") + tag_version_ubi = "${local.tag_version}-ubi" + // When we load candidate images into our k8s cluster we verify that the archives embedded + // repository and tag match our expectations. This is the source of truth for what we _expect_ + // various artifacts to have. The source of truth for what we use when building is defined in + // .github/actions/containerize. If you are modifying these expectations you likely need to + // modify the source of truth there. + repo_metadata = { + "ce" = { + docker = { + // https://hub.docker.com/r/hashicorp/vault + repo = "hashicorp/vault" + tag = local.tag_version + } + ecr = { + // https://gallery.ecr.aws/hashicorp/vault + repo = "public.ecr.aws/hashicorp/vault" + tag = local.tag_version + } + quay = { + // https://catalog.redhat.com/software/containers/hashicorp/vault/5fda55bd2937386820429e0c + repo = "quay.io/redhat-isv-containers/5f89bb5e0b94cf64cfeb500a" + tag = local.tag_version_ubi + } + }, + "ent" = { + docker = { + // https://hub.docker.com/r/hashicorp/vault-enterprise + repo = "hashicorp/vault-enterprise" + tag = local.tag_version + } + ecr = { + // https://gallery.ecr.aws/hashicorp/vault-enterprise + repo = "public.ecr.aws/hashicorp/vault-enterprise" + tag = local.tag_version + } + quay = { + // https://catalog.redhat.com/software/containers/hashicorp/vault-enterprise/5fda5633ac3db90370a26443 + repo = "quay.io/redhat-isv-containers/5f89bb9242e382c85087dce2" + tag = local.tag_version_ubi + } + }, + "ent.fips1402" = { + docker = { + // https://hub.docker.com/r/hashicorp/vault-enterprise-fips + repo = "hashicorp/vault-enterprise-fips" + tag = local.tag_version + } + ecr = { + // https://gallery.ecr.aws/hashicorp/vault-enterprise-fips + repo = "public.ecr.aws/hashicorp/vault-enterprise-fips" + tag = local.tag_version + } + quay = { + // https://catalog.redhat.com/software/containers/hashicorp/vault-enterprise-fips/628d50e37ff70c66a88517ea + repo = "quay.io/redhat-isv-containers/6283f645d02c6b16d9caeb8e" + tag = local.tag_version_ubi + } + }, + "ent.hsm" = { + docker = { + // https://hub.docker.com/r/hashicorp/vault-enterprise + repo = "hashicorp/vault-enterprise" + tag = local.tag_version + } + ecr = { + // https://gallery.ecr.aws/hashicorp/vault-enterprise + repo = "public.ecr.aws/hashicorp/vault-enterprise" + tag = local.tag_version + } + quay = { + // https://catalog.redhat.com/software/containers/hashicorp/vault-enterprise/5fda5633ac3db90370a26443 + repo = "quay.io/redhat-isv-containers/5f89bb9242e382c85087dce2" + tag = local.tag_version_ubi + } + }, + "ent.hsm.fips1402" = { + docker = { + // https://hub.docker.com/r/hashicorp/vault-enterprise + repo = "hashicorp/vault-enterprise" + tag = local.tag_version + } + ecr = { + // https://gallery.ecr.aws/hashicorp/vault-enterprise + repo = "public.ecr.aws/hashicorp/vault-enterprise" + tag = local.tag_version + } + quay = { + // https://catalog.redhat.com/software/containers/hashicorp/vault-enterprise/5fda5633ac3db90370a26443 + repo = "quay.io/redhat-isv-containers/5f89bb9242e382c85087dce2" + tag = local.tag_version_ubi + } + }, + } // The additional '-0' is required in the constraint since without it, the semver function will // only compare the non-pre-release parts (Major.Minor.Patch) of the version and the constraint, // which can lead to unexpected results. - version_includes_build_date = semverconstraint(var.vault_product_version, ">=1.11.0-0") + version_includes_build_date = semverconstraint(var.vault_version, ">=1.11.0-0") } step "read_license" { - skip_step = matrix.edition == "oss" + skip_step = matrix.edition == "ce" module = module.read_license variables { @@ -44,20 +144,34 @@ scenario "k8s" { } step "load_docker_image" { - module = module.load_docker_image + description = <<-EOF + Load an verify the tags of a Vault container image into the kind k8s cluster. If no + var.container_image_archive has been set it will attempt to load an image matching the + var.vault_version from the matrix.repo. + EOF + module = module.load_docker_image + depends_on = [step.create_kind_cluster] + + verifies = [ + quality.vault_artifact_container_alpine, + quality.vault_artifact_container_ubi, + quality.vault_artifact_container_tags, + ] variables { cluster_name = step.create_kind_cluster.cluster_name - image = local.image_repo - tag = local.image_tag - archive = var.vault_docker_image_archive + image = local.repo_metadata[matrix.edition][matrix.repo].repo + tag = local.repo_metadata[matrix.edition][matrix.repo].tag + archive = var.container_image_archive } - - depends_on = [step.create_kind_cluster] } step "deploy_vault" { module = module.k8s_deploy_vault + depends_on = [ + step.load_docker_image, + step.create_kind_cluster, + ] variables { image_tag = step.load_docker_image.tag @@ -65,55 +179,26 @@ scenario "k8s" { image_repository = step.load_docker_image.repository kubeconfig_base64 = step.create_kind_cluster.kubeconfig_base64 vault_edition = matrix.edition - vault_log_level = var.vault_log_level - ent_license = matrix.edition != "oss" ? step.read_license.license : null + vault_log_level = var.log_level + ent_license = matrix.edition != "ce" ? step.read_license.license : null } - - depends_on = [step.load_docker_image, step.create_kind_cluster] - } - - step "verify_build_date" { - skip_step = !local.version_includes_build_date - module = module.k8s_verify_build_date - - variables { - vault_pods = step.deploy_vault.vault_pods - vault_root_token = step.deploy_vault.vault_root_token - kubeconfig_base64 = step.create_kind_cluster.kubeconfig_base64 - context_name = step.create_kind_cluster.context_name - } - - depends_on = [step.deploy_vault] } step "verify_replication" { - module = module.k8s_verify_replication - - variables { - vault_pods = step.deploy_vault.vault_pods - vault_edition = matrix.edition - kubeconfig_base64 = step.create_kind_cluster.kubeconfig_base64 - context_name = step.create_kind_cluster.context_name - } - + module = module.k8s_verify_replication depends_on = [step.deploy_vault] - } - - step "verify_ui" { - module = module.k8s_verify_ui - skip_step = matrix.edition == "oss" variables { vault_pods = step.deploy_vault.vault_pods + vault_edition = matrix.edition kubeconfig_base64 = step.create_kind_cluster.kubeconfig_base64 context_name = step.create_kind_cluster.context_name } - - depends_on = [step.deploy_vault] } step "verify_version" { - module = module.k8s_verify_version + module = module.k8s_verify_version + depends_on = [step.deploy_vault] variables { vault_pods = step.deploy_vault.vault_pods @@ -124,12 +209,11 @@ scenario "k8s" { check_build_date = local.version_includes_build_date vault_build_date = var.vault_build_date } - - depends_on = [step.deploy_vault] } step "verify_write_data" { - module = module.k8s_verify_write_data + module = module.k8s_verify_write_data + depends_on = [step.deploy_vault] variables { vault_pods = step.deploy_vault.vault_pods @@ -137,7 +221,5 @@ scenario "k8s" { kubeconfig_base64 = step.create_kind_cluster.kubeconfig_base64 context_name = step.create_kind_cluster.context_name } - - depends_on = [step.deploy_vault] } } diff --git a/enos/k8s/enos-terraform-k8s.hcl b/enos/k8s/enos-terraform-k8s.hcl index ad9215e33b12..d7a14538c5e6 100644 --- a/enos/k8s/enos-terraform-k8s.hcl +++ b/enos/k8s/enos-terraform-k8s.hcl @@ -1,23 +1,20 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform "k8s" { required_version = ">= 1.2.0" + required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } helm = { - source = "hashicorp/helm" + source = "hashicorp/helm" } } } terraform_cli "default" { plugin_cache_dir = var.terraform_plugin_cache_dir != null ? abspath(var.terraform_plugin_cache_dir) : null - - credentials "app.terraform.io" { - token = var.tfc_api_token - } } diff --git a/enos/k8s/enos-variables-k8s.hcl b/enos/k8s/enos-variables-k8s.hcl index 86bf9d5e3642..26ea3d0ce8ec 100644 --- a/enos/k8s/enos-variables-k8s.hcl +++ b/enos/k8s/enos-variables-k8s.hcl @@ -1,37 +1,19 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 -variable "vault_image_repository" { - description = "The repository for the docker image to load, i.e. hashicorp/vault" +variable "container_image_archive" { + description = "The path to the location of the container image archive to test" type = string - default = null + default = null # If none is given we'll simply load a container from a repo } -variable "vault_log_level" { +variable "log_level" { description = "The server log level for Vault logs. Supported values (in order of detail) are trace, debug, info, warn, and err." type = string - default = "info" -} - -variable "vault_product_version" { - description = "The vault product version to test" - type = string - default = null -} - -variable "vault_product_revision" { - type = string - description = "The vault product revision to test" - default = null -} - -variable "vault_docker_image_archive" { - description = "The path to the location of the docker image archive to test" - type = string - default = null + default = "trace" } -variable "vault_instance_count" { +variable "instance_count" { description = "How many instances to create for the Vault cluster" type = number default = 3 @@ -43,13 +25,18 @@ variable "terraform_plugin_cache_dir" { default = null } -variable "tfc_api_token" { - description = "The Terraform Cloud QTI Organization API token." +variable "vault_build_date" { + description = "The expected vault build date" + type = string + default = "" +} + +variable "vault_revision" { type = string + description = "The expected vault revision" } -variable "vault_build_date" { - description = "The build date for the vault docker image" +variable "vault_version" { + description = "The expected vault version" type = string - default = "" } diff --git a/enos/modules/autopilot_upgrade_storageconfig/main.tf b/enos/modules/autopilot_upgrade_storageconfig/main.tf index 68f47d19dd71..3fcb77a7067c 100644 --- a/enos/modules/autopilot_upgrade_storageconfig/main.tf +++ b/enos/modules/autopilot_upgrade_storageconfig/main.tf @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 variable "vault_product_version" {} diff --git a/enos/modules/az_finder/main.tf b/enos/modules/az_finder/main.tf deleted file mode 100644 index 3508ff0cc39c..000000000000 --- a/enos/modules/az_finder/main.tf +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - } - } -} - -variable "instance_type" { - default = ["t3.small"] - type = list(string) -} - -data "aws_ec2_instance_type_offerings" "infra" { - filter { - name = "instance-type" - values = var.instance_type - } - - location_type = "availability-zone" -} - -output "availability_zones" { - value = data.aws_ec2_instance_type_offerings.infra.locations -} diff --git a/enos/modules/backend_consul/main.tf b/enos/modules/backend_consul/main.tf new file mode 100644 index 000000000000..1d0a514e0ec9 --- /dev/null +++ b/enos/modules/backend_consul/main.tf @@ -0,0 +1,56 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_version = ">= 1.2.0" + + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.4" + } + } +} + +locals { + bin_path = "${var.install_dir}/consul" +} + +resource "enos_bundle_install" "consul" { + for_each = var.hosts + + destination = var.install_dir + release = merge(var.release, { product = "consul" }) + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_consul_start" "consul" { + for_each = enos_bundle_install.consul + + bin_path = local.bin_path + data_dir = var.data_dir + config_dir = var.config_dir + config = { + data_dir = var.data_dir + datacenter = "dc1" + retry_join = ["provider=aws tag_key=${var.cluster_tag_key} tag_value=${var.cluster_name}"] + server = true + bootstrap_expect = length(var.hosts) + log_level = var.log_level + log_file = var.log_dir + } + license = var.license + unit_name = "consul" + username = "consul" + + transport = { + ssh = { + host = var.hosts[each.key].public_ip + } + } +} diff --git a/enos/modules/backend_consul/outputs.tf b/enos/modules/backend_consul/outputs.tf new file mode 100644 index 000000000000..5f78e3f85092 --- /dev/null +++ b/enos/modules/backend_consul/outputs.tf @@ -0,0 +1,18 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "private_ips" { + description = "Consul cluster target host private_ips" + value = [for host in var.hosts : host.private_ip] +} + +output "public_ips" { + description = "Consul cluster target host public_ips" + value = [for host in var.hosts : host.public_ip] +} + +output "hosts" { + description = "The Consul cluster instances that were created" + + value = var.hosts +} diff --git a/enos/modules/backend_consul/variables.tf b/enos/modules/backend_consul/variables.tf new file mode 100644 index 000000000000..c404c0ff5ed5 --- /dev/null +++ b/enos/modules/backend_consul/variables.tf @@ -0,0 +1,77 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "cluster_name" { + type = string + description = "The name of the Consul cluster" + default = null +} + +variable "cluster_tag_key" { + type = string + description = "The tag key for searching for Consul nodes" + default = null +} + +variable "config_dir" { + type = string + description = "The directory where the consul will write config files" + default = "/etc/consul.d" +} + +variable "data_dir" { + type = string + description = "The directory where the consul will store data" + default = "/opt/consul/data" +} + +variable "hosts" { + description = "The target machines host addresses to use for the consul cluster" + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) +} + +variable "install_dir" { + type = string + description = "The directory where the consul binary will be installed" + default = "/opt/consul/bin" +} + +variable "license" { + type = string + sensitive = true + description = "The consul enterprise license" + default = null +} + +variable "log_dir" { + type = string + description = "The directory where the consul will write log files" + default = "/var/log/consul.d" +} + +variable "log_level" { + type = string + description = "The consul service log level" + default = "info" + + validation { + condition = contains(["trace", "debug", "info", "warn", "error"], var.log_level) + error_message = "The log_level must be one of 'trace', 'debug', 'info', 'warn', or 'error'." + } +} + +variable "release" { + type = object({ + version = string + edition = string + }) + description = "Consul release version and edition to install from releases.hashicorp.com" + default = { + version = "1.15.3" + edition = "ce" + } +} diff --git a/enos/modules/backend_raft/main.tf b/enos/modules/backend_raft/main.tf index 2e6afa215e9e..415b058a3be5 100644 --- a/enos/modules/backend_raft/main.tf +++ b/enos/modules/backend_raft/main.tf @@ -1,49 +1,70 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 -// Shim module to handle the fact that Vault doesn't actually need a backend module +// Shim module to handle the fact that Vault doesn't actually need a backend module when we use raft. terraform { + required_version = ">= 1.2.0" + required_providers { - aws = { - source = "hashicorp/aws" - } enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.0" } } } -variable "ami_id" { +variable "cluster_name" { default = null } -variable "common_tags" { + +variable "cluster_tag_key" { default = null } -variable "consul_license" { + +variable "config_dir" { default = null } -variable "consul_release" { + +variable "consul_log_level" { default = null } -variable "environment" { + +variable "data_dir" { + default = null +} + +variable "install_dir" { default = null } -variable "instance_type" { + +variable "license" { default = null } -variable "kms_key_arn" { + +variable "log_dir" { default = null } -variable "project_name" { + +variable "log_level" { default = null } -variable "ssh_aws_keypair" { + +variable "release" { default = null } -variable "vpc_id" { + +variable "hosts" { default = null } -output "consul_cluster_tag" { - value = null +output "private_ips" { + value = [for host in var.hosts : host.private_ip] +} + +output "public_ips" { + value = [for host in var.hosts : host.public_ip] +} + +output "hosts" { + value = var.hosts } diff --git a/enos/modules/build_artifactory_artifact/locals.tf b/enos/modules/build_artifactory_artifact/locals.tf new file mode 100644 index 000000000000..97a3ab689cb9 --- /dev/null +++ b/enos/modules/build_artifactory_artifact/locals.tf @@ -0,0 +1,72 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +locals { + + // file name extensions for the install packages of vault for the various architectures, distributions and editions + package_extensions = { + amd64 = { + amzn = "-1.x86_64.rpm" + leap = "-1.x86_64.rpm" + rhel = "-1.x86_64.rpm" + sles = "-1.x86_64.rpm" + ubuntu = "-1_amd64.deb" + } + arm64 = { + amzn = "-1.aarch64.rpm" + leap = "-1.aarch64.rpm" + rhel = "-1.aarch64.rpm" + sles = "-1.aarch64.rpm" + ubuntu = "-1_arm64.deb" + } + } + + // product_version --> artifact_version + artifact_version = replace(var.product_version, var.edition, "ent") + + // file name prefixes for the install packages of vault for the various distributions and artifact types (package or bundle) + artifact_package_release_names = { + amzn = { + "ce" = "vault-" + "ent" = "vault-enterprise-", + "ent.fips1402" = "vault-enterprise-fips1402-", + "ent.hsm" = "vault-enterprise-hsm-", + "ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402-", + }, + leap = { + "ce" = "vault-" + "ent" = "vault-enterprise-", + "ent.fips1402" = "vault-enterprise-fips1402-", + "ent.hsm" = "vault-enterprise-hsm-", + "ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402-", + }, + rhel = { + "ce" = "vault-" + "ent" = "vault-enterprise-", + "ent.fips1402" = "vault-enterprise-fips1402-", + "ent.hsm" = "vault-enterprise-hsm-", + "ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402-", + }, + sles = { + "ce" = "vault-" + "ent" = "vault-enterprise-", + "ent.fips1402" = "vault-enterprise-fips1402-", + "ent.hsm" = "vault-enterprise-hsm-", + "ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402-", + } + ubuntu = { + "ce" = "vault_" + "ent" = "vault-enterprise_", + "ent.fips1402" = "vault-enterprise-fips1402_", + "ent.hsm" = "vault-enterprise-hsm_", + "ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402_", + } + } + + # Prefix for the artifact name. Ex: vault_, vault-, vault-enterprise_, vault-enterprise-hsm-fips1402-, etc + artifact_name_prefix = var.artifact_type == "package" ? local.artifact_package_release_names[var.distro][var.edition] : "vault_" + # Suffix and extension for the artifact name. Ex: _linux_.zip, + artifact_name_extension = var.artifact_type == "package" ? local.package_extensions[var.arch][var.distro] : "_linux_${var.arch}.zip" + # Combine prefix/suffix/extension together to form the artifact name + artifact_name = var.artifact_type == "package" ? "${local.artifact_name_prefix}${replace(local.artifact_version, "-", "~")}${local.artifact_name_extension}" : "${local.artifact_name_prefix}${var.product_version}${local.artifact_name_extension}" +} diff --git a/enos/modules/build_artifactory_artifact/main.tf b/enos/modules/build_artifactory_artifact/main.tf new file mode 100644 index 000000000000..fb8e4c0e1d8f --- /dev/null +++ b/enos/modules/build_artifactory_artifact/main.tf @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.2.3" + } + } +} + +data "enos_artifactory_item" "vault" { + username = var.artifactory_username + token = var.artifactory_token + name = local.artifact_name + host = var.artifactory_host + repo = var.artifactory_repo + path = var.edition == "ce" ? "vault/*" : "vault-enterprise/*" + properties = tomap({ + "commit" = var.revision + "product-name" = var.edition == "ce" ? "vault" : "vault-enterprise" + "product-version" = local.artifact_version + }) +} diff --git a/enos/modules/build_artifactory_artifact/outputs.tf b/enos/modules/build_artifactory_artifact/outputs.tf new file mode 100644 index 000000000000..d05b5bf7959a --- /dev/null +++ b/enos/modules/build_artifactory_artifact/outputs.tf @@ -0,0 +1,32 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +output "url" { + value = data.enos_artifactory_item.vault.results[0].url + description = "The artifactory download url for the artifact" +} + +output "sha256" { + value = data.enos_artifactory_item.vault.results[0].sha256 + description = "The sha256 checksum for the artifact" +} + +output "size" { + value = data.enos_artifactory_item.vault.results[0].size + description = "The size in bytes of the artifact" +} + +output "name" { + value = data.enos_artifactory_item.vault.results[0].name + description = "The name of the artifact" +} + +output "vault_artifactory_release" { + value = { + url = data.enos_artifactory_item.vault.results[0].url + sha256 = data.enos_artifactory_item.vault.results[0].sha256 + username = var.artifactory_username + token = var.artifactory_token + } +} diff --git a/enos/modules/build_artifactory_artifact/variables.tf b/enos/modules/build_artifactory_artifact/variables.tf new file mode 100644 index 000000000000..a2d9042af535 --- /dev/null +++ b/enos/modules/build_artifactory_artifact/variables.tf @@ -0,0 +1,37 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 +variable "artifactory_username" { + type = string + description = "The username to use when connecting to artifactory" + default = null +} + +variable "artifactory_token" { + type = string + description = "The token to use when connecting to artifactory" + default = null + sensitive = true +} + +variable "artifactory_host" { + type = string + description = "The artifactory host to search for vault artifacts" + default = "https://artifactory.hashicorp.engineering/artifactory" +} + +variable "artifactory_repo" { + type = string + description = "The artifactory repo to search for vault artifacts" + default = "hashicorp-crt-stable-local*" +} +variable "arch" {} +variable "artifact_type" {} +variable "artifact_path" {} +variable "distro" {} +variable "edition" {} +variable "revision" {} +variable "product_version" {} +variable "build_tags" { default = null } +variable "bundle_path" { default = null } +variable "goarch" { default = null } +variable "goos" { default = null } diff --git a/enos/modules/build_artifactory_package/main.tf b/enos/modules/build_artifactory_package/main.tf new file mode 100644 index 000000000000..1e7d0826d22f --- /dev/null +++ b/enos/modules/build_artifactory_package/main.tf @@ -0,0 +1,160 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "arch" { + type = string + description = "The architecture for the desired artifact" +} + +variable "artifactory_username" { + type = string + description = "The username to use when connecting to Artifactory" +} + +variable "artifactory_token" { + type = string + description = "The token to use when connecting to Artifactory" + sensitive = true +} + +variable "artifactory_host" { + type = string + description = "The Artifactory host to search for Vault artifacts" + default = "https://artifactory.hashicorp.engineering/artifactory" +} + +variable "distro" { + type = string + description = "The distro for the desired artifact (ubuntu or rhel)" +} + +variable "distro_version" { + type = string + description = "The RHEL version for .rpm packages" + default = "9" +} + +variable "edition" { + type = string + description = "The edition of Vault to use" +} + +variable "product_version" { + type = string + description = "The version of Vault to use" +} + +// Shim variables that we don't use but include to satisfy the build module "interface" +variable "artifact_path" { default = null } +variable "artifact_type" { default = null } +variable "artifactory_repo" { default = null } +variable "build_tags" { default = null } +variable "build_ui" { default = null } +variable "bundle_path" { default = null } +variable "goarch" { default = null } +variable "goos" { default = null } +variable "revision" { default = null } + +locals { + // File name prefixes for the various distributions and editions + artifact_prefix = { + ubuntu = { + "ce" = "vault_" + "ent" = "vault-enterprise_", + "ent.hsm" = "vault-enterprise-hsm_", + "ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402_", + "oss" = "vault_" + }, + rhel = { + "ce" = "vault-" + "ent" = "vault-enterprise-", + "ent.hsm" = "vault-enterprise-hsm-", + "ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402-", + "oss" = "vault-" + } + } + + // Format the version and edition to use in the artifact name + artifact_version = { + "ce" = "${var.product_version}" + "ent" = "${var.product_version}+ent" + "ent.hsm" = "${var.product_version}+ent" + "ent.hsm.fips1402" = "${var.product_version}+ent" + "oss" = "${var.product_version}" + } + + // File name extensions for the various architectures and distributions + artifact_extension = { + amd64 = { + ubuntu = "-1_amd64.deb" + rhel = "-1.x86_64.rpm" + } + arm64 = { + ubuntu = "-1_arm64.deb" + rhel = "-1.aarch64.rpm" + } + } + + // Use the above variables to construct the artifact name to look up in Artifactory. + // Will look something like: + // vault_1.12.2-1_arm64.deb + // vault-enterprise_1.12.2+ent-1_amd64.deb + // vault-enterprise-hsm-1.12.2+ent-1.x86_64.rpm + artifact_name = "${local.artifact_prefix[var.distro][var.edition]}${local.artifact_version[var.edition]}${local.artifact_extension[var.arch][var.distro]}" + + // The path within the Artifactory repo that corresponds to the appropriate architecture + artifactory_repo_path_dir = { + "amd64" = "x86_64" + "arm64" = "aarch64" + } +} + +data "enos_artifactory_item" "vault_package" { + username = var.artifactory_username + token = var.artifactory_token + name = local.artifact_name + host = var.artifactory_host + repo = var.distro == "rhel" ? "hashicorp-rpm-release-local*" : "hashicorp-apt-release-local*" + path = var.distro == "rhel" ? "RHEL/${var.distro_version}/${local.artifactory_repo_path_dir[var.arch]}/stable" : "pool/${var.arch}/main" +} + +output "results" { + value = data.enos_artifactory_item.vault_package.results +} + +output "url" { + value = data.enos_artifactory_item.vault_package.results[0].url + description = "The artifactory download url for the artifact" +} + +output "sha256" { + value = data.enos_artifactory_item.vault_package.results[0].sha256 + description = "The sha256 checksum for the artifact" +} + +output "size" { + value = data.enos_artifactory_item.vault_package.results[0].size + description = "The size in bytes of the artifact" +} + +output "name" { + value = data.enos_artifactory_item.vault_package.results[0].name + description = "The name of the artifact" +} + +output "release" { + value = { + url = data.enos_artifactory_item.vault_package.results[0].url + sha256 = data.enos_artifactory_item.vault_package.results[0].sha256 + username = var.artifactory_username + token = var.artifactory_token + } +} diff --git a/enos/modules/build_crt/main.tf b/enos/modules/build_crt/main.tf index 1e125763a16d..d113c9cbe05e 100644 --- a/enos/modules/build_crt/main.tf +++ b/enos/modules/build_crt/main.tf @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 # Shim module since CRT provided things will use the crt_bundle_path variable variable "bundle_path" { @@ -26,27 +26,12 @@ variable "artifactory_host" { default = null } variable "artifactory_repo" { default = null } variable "artifactory_username" { default = null } variable "artifactory_token" { default = null } -variable "arch" { - default = null -} -variable "artifact_path" { - default = null -} -variable "artifact_type" { - default = null -} -variable "distro" { - default = null -} -variable "edition" { - default = null -} -variable "instance_type" { - default = null -} -variable "revision" { - default = null -} -variable "product_version" { - default = null -} +variable "arch" { default = null } +variable "artifact_path" { default = null } +variable "artifact_type" { default = null } +variable "build_ui" { default = null } +variable "distro" { default = null } +variable "distro_version" { default = null } +variable "edition" { default = null } +variable "revision" { default = null } +variable "product_version" { default = null } diff --git a/enos/modules/build_local/main.tf b/enos/modules/build_local/main.tf index c7ee686e3823..1ad1338bff91 100644 --- a/enos/modules/build_local/main.tf +++ b/enos/modules/build_local/main.tf @@ -1,17 +1,16 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } -variable "bundle_path" { - type = string - default = "/tmp/vault.zip" +variable "artifact_path" { + description = "Where to create the zip bundle of the Vault build" } variable "build_tags" { @@ -19,6 +18,12 @@ variable "build_tags" { description = "The build tags to pass to the Go compiler" } +variable "build_ui" { + type = bool + description = "Whether or not we should build the UI when creating the local build" + default = true +} + variable "goarch" { type = string description = "The Go architecture target" @@ -35,38 +40,30 @@ variable "artifactory_host" { default = null } variable "artifactory_repo" { default = null } variable "artifactory_username" { default = null } variable "artifactory_token" { default = null } -variable "arch" { - default = null -} -variable "artifact_path" { - default = null -} -variable "artifact_type" { - default = null -} -variable "distro" { - default = null -} -variable "edition" { - default = null -} -variable "instance_type" { - default = null -} -variable "revision" { - default = null -} -variable "product_version" { - default = null +variable "arch" { default = null } +variable "artifact_type" { default = null } +variable "distro" { default = null } +variable "distro_version" { default = null } +variable "edition" { default = null } +variable "revision" { default = null } +variable "product_version" { default = null } + +module "local_metadata" { + source = "../get_local_metadata" } resource "enos_local_exec" "build" { - scripts = ["${path.module}/scripts/build.sh"] + scripts = [abspath("${path.module}/scripts/build.sh")] environment = { - BUNDLE_PATH = var.bundle_path, - GO_TAGS = join(" ", var.build_tags) - GOARCH = var.goarch - GOOS = var.goos + BASE_VERSION = module.local_metadata.version_base + BIN_PATH = abspath("${path.module}/../../../dist") + BUILD_UI = tostring(var.build_ui) + BUNDLE_PATH = abspath(var.artifact_path) + GO_TAGS = join(" ", var.build_tags) + GOARCH = var.goarch + GOOS = var.goos + PRERELEASE_VERSION = module.local_metadata.version_pre + VERSION_METADATA = module.local_metadata.version_meta } } diff --git a/enos/modules/build_local/scripts/build.sh b/enos/modules/build_local/scripts/build.sh index 64f1bbd8d7d6..06fc03f39462 100755 --- a/enos/modules/build_local/scripts/build.sh +++ b/enos/modules/build_local/scripts/build.sh @@ -1,6 +1,6 @@ #!/bin/bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 set -eux -o pipefail @@ -11,5 +11,14 @@ export CGO_ENABLED=0 root_dir="$(git rev-parse --show-toplevel)" pushd "$root_dir" > /dev/null -make ci-build-ui ci-build ci-bundle + +if [ -n "$BUILD_UI" ] && [ "$BUILD_UI" = "true" ]; then + make ci-build-ui +fi + +make ci-build + popd > /dev/null + +echo "--> Bundling $BIN_PATH/* to $BUNDLE_PATH" +zip -r -j "$BUNDLE_PATH" "$BIN_PATH/" diff --git a/enos/modules/create_vpc/main.tf b/enos/modules/create_vpc/main.tf new file mode 100644 index 000000000000..55cbf0165b26 --- /dev/null +++ b/enos/modules/create_vpc/main.tf @@ -0,0 +1,114 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "zone-name" + values = ["*"] + } +} + +resource "random_string" "cluster_id" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +resource "aws_vpc" "vpc" { + // Always set the ipv4 cidr block as it's required in "dual-stack" VPCs which we create. + cidr_block = var.ipv4_cidr + enable_dns_hostnames = true + enable_dns_support = true + assign_generated_ipv6_cidr_block = var.ip_version == 6 + + tags = merge( + var.common_tags, + { + "Name" = var.name + }, + ) +} + +resource "aws_subnet" "subnet" { + count = length(data.aws_availability_zones.available.names) + vpc_id = aws_vpc.vpc.id + availability_zone = data.aws_availability_zones.available.names[count.index] + + // IPV4, but since we need to support ipv4 connections from the machine running enos, we're + // always going to need ipv4 available. + map_public_ip_on_launch = true + cidr_block = cidrsubnet(var.ipv4_cidr, 8, count.index) + + // IPV6, only set these when we want to run in ipv6 mode. + assign_ipv6_address_on_creation = var.ip_version == 6 + ipv6_cidr_block = var.ip_version == 6 ? cidrsubnet(aws_vpc.vpc.ipv6_cidr_block, 4, count.index) : null + + tags = merge( + var.common_tags, + { + "Name" = "${var.name}-subnet-${data.aws_availability_zones.available.names[count.index]}" + }, + ) +} + +resource "aws_internet_gateway" "ipv4" { + vpc_id = aws_vpc.vpc.id + + tags = merge( + var.common_tags, + { + "Name" = "${var.name}-igw" + }, + ) +} + +resource "aws_egress_only_internet_gateway" "ipv6" { + count = var.ip_version == 6 ? 1 : 0 + vpc_id = aws_vpc.vpc.id +} + +resource "aws_route" "igw_ipv4" { + route_table_id = aws_vpc.vpc.default_route_table_id + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.ipv4.id +} + +resource "aws_route" "igw_ipv6" { + count = var.ip_version == 6 ? 1 : 0 + route_table_id = aws_vpc.vpc.default_route_table_id + destination_ipv6_cidr_block = "::/0" + egress_only_gateway_id = aws_egress_only_internet_gateway.ipv6[0].id +} + +resource "aws_security_group" "default" { + vpc_id = aws_vpc.vpc.id + + ingress { + description = "allow_ingress_from_all" + from_port = 0 + to_port = 0 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = var.ip_version == 6 ? ["::/0"] : null + } + + egress { + description = "allow_egress_from_all" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = var.ip_version == 6 ? ["::/0"] : null + } + + tags = merge( + var.common_tags, + { + "Name" = "${var.name}-default" + }, + ) +} diff --git a/enos/modules/create_vpc/outputs.tf b/enos/modules/create_vpc/outputs.tf new file mode 100644 index 000000000000..d54fbd8131c0 --- /dev/null +++ b/enos/modules/create_vpc/outputs.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "id" { + description = "Created VPC ID" + value = aws_vpc.vpc.id +} + +output "ipv4_cidr" { + description = "The VPC subnet CIDR for ipv4 mode" + value = var.ipv4_cidr +} + +output "ipv6_cidr" { + description = "The VPC subnet CIDR for ipv6 mode" + value = aws_vpc.vpc.ipv6_cidr_block +} + +output "cluster_id" { + description = "A unique string associated with the VPC" + value = random_string.cluster_id.result +} diff --git a/enos/modules/create_vpc/variables.tf b/enos/modules/create_vpc/variables.tf new file mode 100644 index 000000000000..80c64ea3c1d3 --- /dev/null +++ b/enos/modules/create_vpc/variables.tf @@ -0,0 +1,37 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "name" { + type = string + default = "vault-ci" + description = "The name of the VPC" +} + +variable "ip_version" { + type = number + default = 4 + description = "The IP version to use for the default subnet" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "ipv4_cidr" { + type = string + default = "10.13.0.0/16" + description = "The CIDR block for the VPC when using IPV4 mode" +} + +variable "environment" { + description = "Name of the environment." + type = string + default = "vault-ci" +} + +variable "common_tags" { + description = "Tags to set for all resources" + type = map(string) + default = { "Project" : "vault-ci" } +} diff --git a/enos/modules/disable_selinux/main.tf b/enos/modules/disable_selinux/main.tf new file mode 100644 index 000000000000..7ed2f5263773 --- /dev/null +++ b/enos/modules/disable_selinux/main.tf @@ -0,0 +1,31 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The hosts to install packages on" +} + +resource "enos_remote_exec" "make_selinux_permissive" { + for_each = var.hosts + + scripts = [abspath("${path.module}/scripts/make-selinux-permissive.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/disable_selinux/scripts/make-selinux-permissive.sh b/enos/modules/disable_selinux/scripts/make-selinux-permissive.sh new file mode 100644 index 000000000000..cedc23d46d46 --- /dev/null +++ b/enos/modules/disable_selinux/scripts/make-selinux-permissive.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +if ! type getenforce &> /dev/null; then + exit 0 +fi + +if sudo getenforce | grep Enforcing; then + sudo setenforce 0 +fi diff --git a/enos/modules/ec2_info/main.tf b/enos/modules/ec2_info/main.tf new file mode 100644 index 000000000000..1ca8d575f69e --- /dev/null +++ b/enos/modules/ec2_info/main.tf @@ -0,0 +1,266 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# Note: in order to use the openSUSE Leap AMIs, the AWS account in use must "subscribe" +# and accept SUSE's terms of use. You can do this at the links below. If the AWS account +# you are using is already subscribed, this confirmation will be displayed on each page. +# openSUSE Leap arm64 subscription: https://aws.amazon.com/marketplace/server/procurement?productId=a516e959-df54-4035-bb1a-63599b7a6df9 +# openSUSE Leap amd64 subscription: https://aws.amazon.com/marketplace/server/procurement?productId=5535c495-72d4-4355-b169-54ffa874f849 + +locals { + architectures = toset(["arm64", "x86_64"]) + amazon_owner_id = "591542846629" + canonical_owner_id = "099720109477" + suse_owner_id = "013907871322" + opensuse_owner_id = "679593333241" + redhat_owner_id = "309956199498" + ids = { + // NOTE: If you modify these versions you'll probably also need to update the `softhsm_install` + // module to match. + "arm64" = { + "amzn" = { + "2" = data.aws_ami.amzn_2["arm64"].id + "2023" = data.aws_ami.amzn_2023["arm64"].id + } + "leap" = { + "15.6" = data.aws_ami.leap_15["arm64"].id + } + "rhel" = { + "8.10" = data.aws_ami.rhel_8["arm64"].id + "9.4" = data.aws_ami.rhel_9["arm64"].id + } + "sles" = { + "15.6" = data.aws_ami.sles_15["arm64"].id + } + "ubuntu" = { + "20.04" = data.aws_ami.ubuntu_2004["arm64"].id + "22.04" = data.aws_ami.ubuntu_2204["arm64"].id + "24.04" = data.aws_ami.ubuntu_2404["arm64"].id + } + } + "amd64" = { + "amzn" = { + "2" = data.aws_ami.amzn_2["x86_64"].id + "2023" = data.aws_ami.amzn_2023["x86_64"].id + } + "leap" = { + "15.6" = data.aws_ami.leap_15["x86_64"].id + } + "rhel" = { + "8.10" = data.aws_ami.rhel_8["x86_64"].id + "9.4" = data.aws_ami.rhel_9["x86_64"].id + } + "sles" = { + "15.6" = data.aws_ami.sles_15["x86_64"].id + } + "ubuntu" = { + "20.04" = data.aws_ami.ubuntu_2004["x86_64"].id + "22.04" = data.aws_ami.ubuntu_2204["x86_64"].id + "24.04" = data.aws_ami.ubuntu_2404["x86_64"].id + } + } + } +} + +data "aws_ami" "amzn_2" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["amzn2-ami-ecs-hvm-2.0*"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.amazon_owner_id] +} + +data "aws_ami" "amzn_2023" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["al2023-ami-ecs-hvm*"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.amazon_owner_id] +} + +data "aws_ami" "leap_15" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["openSUSE-Leap-15-6*"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.opensuse_owner_id] +} + +data "aws_ami" "rhel_8" { + most_recent = true + for_each = local.architectures + + # Currently latest latest point release-1 + filter { + name = "name" + values = ["RHEL-8.10*HVM-20*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.redhat_owner_id] +} + +data "aws_ami" "rhel_9" { + most_recent = true + for_each = local.architectures + + # Currently latest latest point release-1 + filter { + name = "name" + values = ["RHEL-9.4*HVM-20*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.redhat_owner_id] +} + +data "aws_ami" "sles_15" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["suse-sles-15-sp6-v*-hvm-*"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.suse_owner_id] +} + +data "aws_ami" "ubuntu_2004" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd/ubuntu-*-20.04-*-server-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.canonical_owner_id] +} + +data "aws_ami" "ubuntu_2204" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd/ubuntu-*-22.04-*-server-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.canonical_owner_id] +} + +data "aws_ami" "ubuntu_2404" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-*-server-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.canonical_owner_id] +} + +data "aws_region" "current" {} + +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "zone-name" + values = ["*"] + } +} + +output "ami_ids" { + value = local.ids +} + +output "current_region" { + value = data.aws_region.current +} + +output "availability_zones" { + value = data.aws_availability_zones.available +} diff --git a/enos/modules/generate_dr_operation_token/main.tf b/enos/modules/generate_dr_operation_token/main.tf new file mode 100644 index 000000000000..c582c0c73ab0 --- /dev/null +++ b/enos/modules/generate_dr_operation_token/main.tf @@ -0,0 +1,82 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + random = { + source = "hashicorp/random" + version = ">= 3.4.3" + } + } +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "primary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The primary cluster leader host" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "storage_backend" { + type = string + description = "The storage backend to use for the Vault cluster" +} + +locals { + token_id = random_uuid.token_id.id + dr_operation_token = enos_remote_exec.fetch_dr_operation_token.stdout +} + +resource "random_uuid" "token_id" {} + +resource "enos_remote_exec" "fetch_dr_operation_token" { + depends_on = [random_uuid.token_id] + environment = { + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_TOKEN = var.vault_root_token + STORAGE_BACKEND = var.storage_backend + } + + scripts = [abspath("${path.module}/scripts/configure-vault-dr-primary.sh")] + + transport = { + ssh = { + host = var.primary_leader_host.public_ip + } + } +} + +output "dr_operation_token" { + value = local.dr_operation_token +} diff --git a/enos/modules/generate_dr_operation_token/scripts/configure-vault-dr-primary.sh b/enos/modules/generate_dr_operation_token/scripts/configure-vault-dr-primary.sh new file mode 100755 index 000000000000..eae9b105c24a --- /dev/null +++ b/enos/modules/generate_dr_operation_token/scripts/configure-vault-dr-primary.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +binpath="${VAULT_INSTALL_DIR}/vault" + +fail() { + echo "$1" >&2 + exit 1 +} + +# Check required environment variables +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$STORAGE_BACKEND" ]] && fail "STORAGE_BACKEND env variable has not been set" + +# Define the policy content +policy_content() { + cat << EOF +path "sys/replication/dr/secondary/promote" { + capabilities = [ "update" ] +} + +path "sys/replication/dr/secondary/update-primary" { + capabilities = [ "update" ] +} +EOF + if [ "$STORAGE_BACKEND" = "raft" ]; then + cat << EOF +path "sys/storage/raft/autopilot/state" { + capabilities = [ "update", "read" ] +} +EOF + fi +} + +# Write the policy +$binpath policy write dr-secondary-promotion - <<< "$(policy_content)" &> /dev/null + +# Configure the failover handler token role +$binpath write auth/token/roles/failover-handler \ + allowed_policies=dr-secondary-promotion \ + orphan=true \ + renewable=false \ + token_type=batch &> /dev/null + +# Create a token for the failover handler role and output the token only +$binpath token create -field=token -role=failover-handler -ttl=8h diff --git a/enos/modules/generate_failover_secondary_token/main.tf b/enos/modules/generate_failover_secondary_token/main.tf new file mode 100644 index 000000000000..537b0afeb7f9 --- /dev/null +++ b/enos/modules/generate_failover_secondary_token/main.tf @@ -0,0 +1,98 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + random = { + source = "hashicorp/random" + version = ">= 3.4.3" + } + } +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "primary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The primary cluster leader host" +} + +variable "retry_interval" { + type = string + default = "2" + description = "How long to wait between retries" +} + +variable "secondary_public_key" { + type = string + description = "The secondary public key" +} + +variable "timeout" { + type = string + default = "15" + description = "How many seconds to wait before timing out" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +locals { + primary_leader_addr = var.ip_version == 6 ? var.primary_leader_host.ipv6 : var.primary_leader_host.private_ip + token_id = random_uuid.token_id.id + secondary_token = enos_remote_exec.fetch_secondary_token.stdout +} + +resource "random_uuid" "token_id" {} + +resource "enos_remote_exec" "fetch_secondary_token" { + depends_on = [random_uuid.token_id] + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + SECONDARY_PUBLIC_KEY = var.secondary_public_key + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/generate-failover-secondary-token.sh")] + + + transport = { + ssh = { + host = var.primary_leader_host.public_ip + } + } +} + +output "secondary_token" { + value = local.secondary_token +} diff --git a/enos/modules/generate_failover_secondary_token/scripts/generate-failover-secondary-token.sh b/enos/modules/generate_failover_secondary_token/scripts/generate-failover-secondary-token.sh new file mode 100644 index 000000000000..e8e0e3094b12 --- /dev/null +++ b/enos/modules/generate_failover_secondary_token/scripts/generate-failover-secondary-token.sh @@ -0,0 +1,33 @@ +#!/bin/bash +## Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +[[ -z "${VAULT_INSTALL_DIR}" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "${RETRY_INTERVAL}" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "${TIMEOUT_SECONDS}" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "${VAULT_ADDR}" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "${VAULT_TOKEN}" ]] && fail "VAULT_TOKEN env variable has not been set" +[[ -z "${SECONDARY_PUBLIC_KEY}" ]] && fail "SECONDARY_PUBLIC_KEY env variable has not been set" + +fail() { + echo "$1" 1>&2 + exit 1 +} + +binpath="${VAULT_INSTALL_DIR}"/vault +test -x "${binpath}" || fail "unable to locate vault binary at ${binpath}" + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "${end_time}" ]; do + if secondary_token=$(${binpath} write -field token sys/replication/dr/primary/secondary-token id="${VAULT_TOKEN}" secondary_public_key="${SECONDARY_PUBLIC_KEY}"); then + echo "${secondary_token}" + exit 0 + fi + + sleep "${RETRY_INTERVAL}" +done + +fail "Timed out trying to generate secondary token" diff --git a/enos/modules/generate_secondary_public_key/main.tf b/enos/modules/generate_secondary_public_key/main.tf new file mode 100644 index 000000000000..761972dfad87 --- /dev/null +++ b/enos/modules/generate_secondary_public_key/main.tf @@ -0,0 +1,77 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + random = { + source = "hashicorp/random" + version = ">= 3.4.3" + } + } +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "primary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The primary cluster leader host" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +locals { + primary_leader_addr = var.ip_version == 6 ? var.primary_leader_host.ipv6 : var.primary_leader_host.private_ip + token_id = random_uuid.token_id.id + secondary_public_key = enos_remote_exec.fetch_secondary_public_key.stdout +} + +resource "random_uuid" "token_id" {} + +resource "enos_remote_exec" "fetch_secondary_public_key" { + depends_on = [random_uuid.token_id] + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + } + + inline = ["${var.vault_install_dir}/vault write -field secondary_public_key -f sys/replication/dr/secondary/generate-public-key"] + + transport = { + ssh = { + host = var.primary_leader_host.public_ip + } + } +} + +output "secondary_public_key" { + value = local.secondary_public_key +} diff --git a/enos/modules/generate_secondary_token/main.tf b/enos/modules/generate_secondary_token/main.tf index 49a4a15e24c0..41b2774e3bc9 100644 --- a/enos/modules/generate_secondary_token/main.tf +++ b/enos/modules/generate_secondary_token/main.tf @@ -1,10 +1,10 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } random = { source = "hashicorp/random" @@ -13,14 +13,43 @@ terraform { } } -variable "vault_install_dir" { +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "primary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The primary cluster leader host" +} + +variable "replication_type" { type = string - description = "The directory where the Vault binary will be installed" + description = "The type of replication to perform" + + validation { + condition = contains(["dr", "performance"], var.replication_type) + error_message = "The replication_type must be either dr or performance" + } +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" } -variable "primary_leader_public_ip" { +variable "vault_install_dir" { type = string - description = "Vault primary cluster leader Public IP address" + description = "The directory where the Vault binary will be installed" } variable "vault_root_token" { @@ -29,23 +58,25 @@ variable "vault_root_token" { } locals { - token_id = random_uuid.token_id.id - secondary_token = enos_remote_exec.fetch_secondary_token.stdout + primary_leader_addr = var.ip_version == 6 ? var.primary_leader_host.ipv6 : var.primary_leader_host.private_ip + token_id = random_uuid.token_id.id + secondary_token = enos_remote_exec.fetch_secondary_token.stdout } + resource "random_uuid" "token_id" {} resource "enos_remote_exec" "fetch_secondary_token" { depends_on = [random_uuid.token_id] environment = { - VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_ADDR = var.vault_addr VAULT_TOKEN = var.vault_root_token } - inline = ["${var.vault_install_dir}/vault write sys/replication/performance/primary/secondary-token id=${local.token_id} |sed -n '/^wrapping_token:/p' |awk '{print $2}'"] + inline = ["${var.vault_install_dir}/vault write sys/replication/${var.replication_type}/primary/secondary-token id=${local.token_id} |sed -n '/^wrapping_token:/p' |awk '{print $2}'"] transport = { ssh = { - host = var.primary_leader_public_ip + host = var.primary_leader_host.public_ip } } } diff --git a/enos/modules/get_local_metadata/main.tf b/enos/modules/get_local_metadata/main.tf index 29376aa86556..2b1ee6d45c1d 100644 --- a/enos/modules/get_local_metadata/main.tf +++ b/enos/modules/get_local_metadata/main.tf @@ -1,34 +1,58 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } resource "enos_local_exec" "get_build_date" { - scripts = ["${path.module}/scripts/build_date.sh"] + scripts = [abspath("${path.module}/scripts/build_date.sh")] +} + +resource "enos_local_exec" "get_revision" { + inline = ["git rev-parse HEAD"] +} + +resource "enos_local_exec" "get_version" { + inline = ["${abspath("${path.module}/scripts/version.sh")} version"] +} + +resource "enos_local_exec" "get_version_base" { + inline = ["${abspath("${path.module}/scripts/version.sh")} version-base"] +} + +resource "enos_local_exec" "get_version_pre" { + inline = ["${abspath("${path.module}/scripts/version.sh")} version-pre"] +} + +resource "enos_local_exec" "get_version_meta" { + inline = ["${abspath("${path.module}/scripts/version.sh")} version-meta"] } output "build_date" { value = trimspace(enos_local_exec.get_build_date.stdout) } -resource "enos_local_exec" "get_version" { - scripts = ["${path.module}/scripts/version.sh"] +output "revision" { + value = trimspace(enos_local_exec.get_revision.stdout) } output "version" { value = trimspace(enos_local_exec.get_version.stdout) } -resource "enos_local_exec" "get_revision" { - inline = ["git rev-parse HEAD"] +output "version_base" { + value = trimspace(enos_local_exec.get_version_base.stdout) } -output "revision" { - value = trimspace(enos_local_exec.get_revision.stdout) +output "version_pre" { + value = trimspace(enos_local_exec.get_version_pre.stdout) +} + +output "version_meta" { + value = trimspace(enos_local_exec.get_version_meta.stdout) } diff --git a/enos/modules/get_local_metadata/scripts/build_date.sh b/enos/modules/get_local_metadata/scripts/build_date.sh index fc763fd4e91a..ea63c74d8ed3 100755 --- a/enos/modules/get_local_metadata/scripts/build_date.sh +++ b/enos/modules/get_local_metadata/scripts/build_date.sh @@ -1,6 +1,6 @@ -#!/bin/env bash +#!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 set -eu -o pipefail diff --git a/enos/modules/get_local_metadata/scripts/version.sh b/enos/modules/get_local_metadata/scripts/version.sh index 74a561f11bd8..ed1238b04733 100755 --- a/enos/modules/get_local_metadata/scripts/version.sh +++ b/enos/modules/get_local_metadata/scripts/version.sh @@ -1,9 +1,97 @@ -#!/bin/env bash +#!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 -set -eu -o pipefail +set -euo pipefail -pushd "$(git rev-parse --show-toplevel)" > /dev/null -make ci-get-version -popd > /dev/null +# Get the full version information +# this is only needed for local enos builds in order to get the default version from version_base.go +# this should match the default version that the binary has been built with +# CRT release builds use the new static version from ./release/VERSION +function version() { + local version + local prerelease + local metadata + + version=$(version_base) + prerelease=$(version_pre) + metadata=$(version_metadata) + + if [ -n "$metadata" ] && [ -n "$prerelease" ]; then + echo "$version-$prerelease+$metadata" + elif [ -n "$metadata" ]; then + echo "$version+$metadata" + elif [ -n "$prerelease" ]; then + echo "$version-$prerelease" + else + echo "$version" + fi +} + +# Get the base version +function version_base() { + : "${VAULT_VERSION:=""}" + + if [ -n "$VAULT_VERSION" ]; then + echo "$VAULT_VERSION" + return + fi + + : "${VERSION_FILE:=$(repo_root)/version/VERSION}" + awk -F- '{ print $1 }' < "$VERSION_FILE" +} + +# Get the version pre-release +function version_pre() { + : "${VAULT_PRERELEASE:=""}" + + if [ -n "$VAULT_PRERELEASE" ]; then + echo "$VAULT_PRERELEASE" + return + fi + + : "${VERSION_FILE:=$(repo_root)/version/VERSION}" + awk -F- '{ print $2 }' < "$VERSION_FILE" +} + +# Get the version metadata, which is commonly the edition +function version_metadata() { + : "${VAULT_METADATA:=""}" + + if [ -n "$VAULT_METADATA" ]; then + echo "$VAULT_METADATA" + return + fi + + : "${VERSION_FILE:=$(repo_root)/version/version_base.go}" + awk '$1 == "VersionMetadata" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < "$VERSION_FILE" +} + +# Determine the root directory of the repository +function repo_root() { + git rev-parse --show-toplevel +} + +# Run Enos local +function main() { + case $1 in + version) + version + ;; + version-base) + version_base + ;; + version-pre) + version_pre + ;; + version-meta) + version_metadata + ;; + *) + echo "unknown sub-command" >&2 + exit 1 + ;; + esac +} + +main "$@" diff --git a/enos/modules/install_packages/main.tf b/enos/modules/install_packages/main.tf new file mode 100644 index 000000000000..78ef41c803d9 --- /dev/null +++ b/enos/modules/install_packages/main.tf @@ -0,0 +1,136 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +locals { + arch = { + "amd64" = "x86_64" + "arm64" = "aarch64" + } + package_manager = { + "amzn" = "yum" + "opensuse-leap" = "zypper" + "rhel" = "dnf" + "sles" = "zypper" + "ubuntu" = "apt" + } + distro_repos = { + "sles" = { + "15.6" = "https://download.opensuse.org/repositories/network:utilities/SLE_15_SP6/network:utilities.repo" + } + "rhel" = { + "8.10" = "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm" + "9.4" = "https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm" + } + } +} + +variable "packages" { + type = list(string) + default = [] +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The hosts to install packages on" +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out. This is applied to each step so total timeout will be longer." + default = 120 +} + +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +resource "enos_host_info" "hosts" { + for_each = var.hosts + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# Synchronize repositories on remote machines. This does not update packages but only ensures that +# the remote hosts are configured with default upstream repositories that have been refreshed to +# the latest metedata. +resource "enos_remote_exec" "synchronize_repos" { + for_each = var.hosts + + environment = { + DISTRO = enos_host_info.hosts[each.key].distro + PACKAGE_MANAGER = local.package_manager[enos_host_info.hosts[each.key].distro] + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + } + + scripts = [abspath("${path.module}/scripts/synchronize-repos.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# Add any additional repositories. +resource "enos_remote_exec" "add_repos" { + for_each = var.hosts + depends_on = [enos_remote_exec.synchronize_repos] + + environment = { + DISTRO_REPOS = try(local.distro_repos[enos_host_info.hosts[each.key].distro][enos_host_info.hosts[each.key].distro_version], "__none") + PACKAGE_MANAGER = local.package_manager[enos_host_info.hosts[each.key].distro] + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + } + + scripts = [abspath("${path.module}/scripts/add-repos.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# Install any required packages. +resource "enos_remote_exec" "install_packages" { + for_each = var.hosts + depends_on = [ + enos_remote_exec.synchronize_repos, + enos_remote_exec.add_repos, + ] + + environment = { + PACKAGE_MANAGER = local.package_manager[enos_host_info.hosts[each.key].distro] + PACKAGES = length(var.packages) >= 1 ? join(" ", var.packages) : "__skip" + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + } + + scripts = [abspath("${path.module}/scripts/install-packages.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/install_packages/scripts/add-repos.sh b/enos/modules/install_packages/scripts/add-repos.sh new file mode 100644 index 000000000000..47f327960674 --- /dev/null +++ b/enos/modules/install_packages/scripts/add-repos.sh @@ -0,0 +1,84 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "${PACKAGE_MANAGER}" ]] && fail "PACKAGE_MANAGER env variable has not been set" +[[ -z "${RETRY_INTERVAL}" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "${TIMEOUT_SECONDS}" ]] && fail "TIMEOUT_SECONDS env variable has not been set" + +# Add any repositories that have have been passed in +add_repos() { + # If we don't have any repos on the list for this distro, no action needed. + if [ ${#DISTRO_REPOS[@]} -lt 1 ]; then + echo "DISTRO_REPOS is empty; No repos required for the packages for this Linux distro." + return 0 + fi + + case $PACKAGE_MANAGER in + apt) + # NOTE: We do not currently add any apt repositories in our scenarios. I suspect if that time + # comes we'll need to add support for apt-key here. + for repo in ${DISTRO_REPOS}; do + if [ "$repo" == "__none" ]; then + continue + fi + sudo add-apt-repository "${repo}" + done + ;; + dnf) + for repo in ${DISTRO_REPOS}; do + if [ "$repo" == "__none" ]; then + continue + fi + sudo dnf install -y "${repo}" + sudo dnf makecache -y + done + ;; + yum) + for repo in ${DISTRO_REPOS}; do + if [ "$repo" == "__none" ]; then + continue + fi + sudo yum install -y "${repo}" + sudo yum makecache -y + done + ;; + zypper) + # Add each repo + for repo in ${DISTRO_REPOS}; do + if [ "$repo" == "__none" ]; then + continue + fi + if sudo zypper lr "${repo}"; then + echo "A repo named ${repo} already exists, skipping..." + continue + fi + sudo zypper --gpg-auto-import-keys --non-interactive addrepo "${repo}" + done + sudo zypper --gpg-auto-import-keys ref + sudo zypper --gpg-auto-import-keys refs + ;; + *) + fail "Unsupported package manager: ${PACKAGE_MANAGER}" + ;; + esac +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + if add_repos; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +fail "Timed out waiting for distro repos to be set up" diff --git a/enos/modules/install_packages/scripts/install-packages.sh b/enos/modules/install_packages/scripts/install-packages.sh new file mode 100644 index 000000000000..6c6e5dd7dc0e --- /dev/null +++ b/enos/modules/install_packages/scripts/install-packages.sh @@ -0,0 +1,105 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "${RETRY_INTERVAL}" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "${TIMEOUT_SECONDS}" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "${PACKAGES}" ]] && fail "PACKAGES env variable has not been set" +[[ -z "${PACKAGE_MANAGER}" ]] && fail "PACKAGE_MANAGER env variable has not been set" + +# Install packages based on the provided packages and package manager. We assume that the repositories +# have already been synchronized by the repo setup that is a prerequisite for this script. +install_packages() { + if [[ "${PACKAGES}" = "__skip" ]]; then + return 0 + fi + + set -x + echo "Installing Dependencies: ${PACKAGES}" + + # Use the default package manager of the current Linux distro to install packages + case $PACKAGE_MANAGER in + apt) + for package in ${PACKAGES}; do + if dpkg -s "${package}"; then + echo "Skipping installation of ${package} because it is already installed" + continue + else + echo "Installing ${package}" + local output + if ! output=$(sudo apt install -y "${package}" 2>&1); then + echo "Failed to install ${package}: ${output}" 1>&2 + return 1 + fi + fi + done + ;; + dnf) + for package in ${PACKAGES}; do + if rpm -q "${package}"; then + echo "Skipping installation of ${package} because it is already installed" + continue + else + echo "Installing ${package}" + local output + if ! output=$(sudo dnf -y install "${package}" 2>&1); then + echo "Failed to install ${package}: ${output}" 1>&2 + return 1 + fi + fi + done + ;; + yum) + for package in ${PACKAGES}; do + if rpm -q "${package}"; then + echo "Skipping installation of ${package} because it is already installed" + continue + else + echo "Installing ${package}" + local output + if ! output=$(sudo yum -y install "${package}" 2>&1); then + echo "Failed to install ${package}: ${output}" 1>&2 + return 1 + fi + fi + done + ;; + zypper) + for package in ${PACKAGES}; do + if rpm -q "${package}"; then + echo "Skipping installation of ${package} because it is already installed" + continue + else + echo "Installing ${package}" + local output + if ! output=$(sudo zypper --non-interactive install -y -l --force-resolution "${package}" 2>&1); then + echo "Failed to install ${package}: ${output}" 1>&2 + return 1 + fi + fi + done + ;; + *) + fail "No matching package manager provided." + ;; + esac +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [[ "$(date +%s)" -lt "${end_time}" ]]; do + if install_packages; then + exit 0 + fi + + sleep "${RETRY_INTERVAL}" +done + +fail "Timed out waiting for packages to install" diff --git a/enos/modules/install_packages/scripts/synchronize-repos.sh b/enos/modules/install_packages/scripts/synchronize-repos.sh new file mode 100644 index 000000000000..8ea2c50dbca4 --- /dev/null +++ b/enos/modules/install_packages/scripts/synchronize-repos.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "${PACKAGE_MANAGER}" ]] && fail "PACKAGE_MANAGER env variable has not been set" +[[ -z "${RETRY_INTERVAL}" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "${TIMEOUT_SECONDS}" ]] && fail "TIMEOUT_SECONDS env variable has not been set" + +# The SLES AMI's do not come configured with Zypper repositories by default. To get them you +# have to run SUSEConnect to register the instance with SUSE. On the AMI this is handled +# automatically by a oneshot systemd unit called guestregister.service. This oneshot service needs +# to complete before any other repo or package steps are completed. At the time of writing it's very +# unreliable so we have to ensure that it has correctly executed ourselves or restart it. We do this +# by checking if the guestregister.service has reached the correct "inactive" state that we need. +# If it hasn't reached that state it's usually in some sort of active state, i.e. running, or it has +# failed. If it's in one of the active states we need to let it continue and check the status when +# it completes. If it has completed but is failed we'll restart the service to re-run the script that +# executes SUSEConnect. +sles_check_guestregister_service_and_restart_if_failed() { + local active_state + local failed_state + + # systemctl returns non-zero exit codes. We rely on output here because all states don't have + # their own exit code. + set +e + active_state=$(sudo systemctl is-active guestregister.service) + failed_state=$(sudo systemctl is-failed guestregister.service) + set -e + + case "$active_state" in + active | activating | deactivating) + # It's running so we'll return 1 and get retried by the caller + echo "the guestregister.service is still in the ${active_state} state" 1>&2 + return 1 + ;; + *) + if [ "$active_state" == "inactive" ] && [ "$failed_state" == "inactive" ]; then + # The oneshot has completed and hasn't "failed" + echo "the guestregister.service is 'inactive' for both active and failed states" + return 0 + fi + + # Our service is stopped and failed, restart it and hope it works the next time + sudo systemctl restart --wait guestregister.service + ;; + esac +} + +# Check or restart the guestregister service if it has failed. If it passes do another check to make +# sure that the zypper repositories list isn't empty. +sles_ensure_suseconnect() { + local health_output + if ! health_output=$(sles_check_guestregister_service_and_restart_if_failed); then + echo "the guestregister.service failed to reach a healthy state: ${health_output}" 1>&2 + return 1 + fi + + # Make sure Zypper has repositories. + if ! lr_output=$(zypper lr); then + echo "The guestregister.service failed. Unable to SUSEConnect and thus have no Zypper repositories: ${lr_output}: ${health_output}." 1>&2 + return 1 + fi + + return 0 +} + +# Synchronize our repositories so that futher installation steps are working with updated cache +# and repo metadata. +synchronize_repos() { + case $PACKAGE_MANAGER in + apt) + sudo apt update + ;; + dnf) + sudo dnf makecache + ;; + yum) + sudo yum makecache + ;; + zypper) + if [ "$DISTRO" == "sles" ]; then + if ! sles_ensure_suseconnect; then + return 1 + fi + fi + sudo zypper --gpg-auto-import-keys --non-interactive ref + sudo zypper --gpg-auto-import-keys --non-interactive refs + ;; + *) + return 0 + ;; + esac +} + +# Before we start to modify repositories and install packages we'll wait for cloud-init to finish +# so it doesn't race with any of our package installations. +# We run as sudo becase Amazon Linux 2 throws Python 2.7 errors when running `cloud-init status` as +# non-root user (known bug). +sudo cloud-init status --wait + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + if synchronize_repos; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +fail "Timed out waiting for distro repos to be set up" diff --git a/enos/modules/k8s_deploy_vault/main.tf b/enos/modules/k8s_deploy_vault/main.tf index 72f4f4700354..a422be435caa 100644 --- a/enos/modules/k8s_deploy_vault/main.tf +++ b/enos/modules/k8s_deploy_vault/main.tf @@ -1,12 +1,12 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_version = ">= 1.0" required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } helm = { diff --git a/enos/modules/k8s_deploy_vault/raft-config.hcl b/enos/modules/k8s_deploy_vault/raft-config.hcl index b624dad80fbb..423390b2d12f 100644 --- a/enos/modules/k8s_deploy_vault/raft-config.hcl +++ b/enos/modules/k8s_deploy_vault/raft-config.hcl @@ -7,14 +7,6 @@ listener "tcp" { storage "raft" { path = "/vault/data" - autopilot { - cleanup_dead_servers = "true" - last_contact_threshold = "200ms" - last_contact_failure_threshold = "10m" - max_trailing_logs = 250000 - min_quorum = 5 - server_stabilization_time = "10s" - } } service_registration "kubernetes" {} diff --git a/enos/modules/k8s_deploy_vault/variables.tf b/enos/modules/k8s_deploy_vault/variables.tf index 55fa6f1da6b9..9730f87a7807 100644 --- a/enos/modules/k8s_deploy_vault/variables.tf +++ b/enos/modules/k8s_deploy_vault/variables.tf @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 variable "context_name" { type = string diff --git a/enos/modules/k8s_vault_verify_build_date/main.tf b/enos/modules/k8s_vault_verify_build_date/main.tf deleted file mode 100644 index 366497d08475..000000000000 --- a/enos/modules/k8s_vault_verify_build_date/main.tf +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -terraform { - required_providers { - enos = { - source = "app.terraform.io/hashicorp-qti/enos" - } - } -} - -locals { - vault_instances = toset([for idx in range(var.vault_instance_count) : tostring(idx)]) -} - -# Get the date from the vault status command - status_date -# Format the original status output with ISO-8601 - formatted_date -# Format the original status output with awk - awk_date -# Compare the formatted outputs - date_comparison -resource "enos_remote_exec" "status_date" { - for_each = local.vault_instances - - transport = { - kubernetes = { - kubeconfig_base64 = var.kubeconfig_base64 - context_name = var.context_name - pod = var.vault_pods[each.key].name - namespace = var.vault_pods[each.key].namespace - } - } - - inline = ["${var.vault_bin_path} status -format=json | grep build_date | cut -d \\\" -f 4"] -} - -resource "enos_remote_exec" "formatted_date" { - for_each = local.vault_instances - - transport = { - kubernetes = { - kubeconfig_base64 = var.kubeconfig_base64 - context_name = var.context_name - pod = var.vault_pods[each.key].name - namespace = var.vault_pods[each.key].namespace - } - } - - inline = ["date -d \"${enos_remote_exec.status_date[each.key].stdout}\" -D '%Y-%m-%dT%H:%M:%SZ' -I"] -} - -resource "enos_local_exec" "awk_date" { - for_each = local.vault_instances - - inline = ["echo ${enos_remote_exec.status_date[each.key].stdout} | awk -F\"T\" '{printf $1}'"] -} - -resource "enos_local_exec" "date_comparison" { - for_each = local.vault_instances - - inline = ["[[ ${enos_local_exec.awk_date[each.key].stdout} == ${enos_remote_exec.formatted_date[each.key].stdout} ]] && echo \"Verification for build date format ${enos_remote_exec.status_date[each.key].stdout} succeeded\" || \"invalid build_date, must be formatted as RFC 3339: ${enos_remote_exec.status_date[each.key].stdout}\""] -} diff --git a/enos/modules/k8s_vault_verify_build_date/variables.tf b/enos/modules/k8s_vault_verify_build_date/variables.tf deleted file mode 100644 index d960b7840e3e..000000000000 --- a/enos/modules/k8s_vault_verify_build_date/variables.tf +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_pods" { - type = list(object({ - name = string - namespace = string - })) - description = "The vault instances for the cluster to verify" -} - -variable "vault_bin_path" { - type = string - description = "The path to the vault binary" - default = "/bin/vault" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -variable "kubeconfig_base64" { - type = string - description = "The base64 encoded version of the Kubernetes configuration file" -} - -variable "context_name" { - type = string - description = "The name of the k8s context for Vault" -} diff --git a/enos/modules/k8s_vault_verify_replication/main.tf b/enos/modules/k8s_vault_verify_replication/main.tf index 27824dc7676b..666067366a98 100644 --- a/enos/modules/k8s_vault_verify_replication/main.tf +++ b/enos/modules/k8s_vault_verify_replication/main.tf @@ -1,11 +1,11 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } diff --git a/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh b/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh index 5786502cbcdd..6987f7c688c0 100755 --- a/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh +++ b/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh @@ -1,7 +1,6 @@ -#!/usr/bin/env bash +#!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - +# SPDX-License-Identifier: BUSL-1.1 # The Vault replication smoke test, documented in # https://docs.google.com/document/d/16sjIk3hzFDPyY5A9ncxTZV_9gnpYSF1_Vx6UA1iiwgI/edit#heading=h.kgrxf0f1et25 @@ -9,14 +8,14 @@ set -e fail() { - echo "$1" 1>&2 - exit 1 + echo "$1" 1>&2 + exit 1 } -# Replication STATUS endpoint should have data.mode disabled for OSS release -if [ "$VAULT_EDITION" == "oss" ]; then +# Replication STATUS endpoint should have data.mode disabled for CE release +if [ "$VAULT_EDITION" == "ce" ]; then if [ "$(echo "${STATUS}" | jq -r '.data.mode')" != "disabled" ]; then - fail "replication data mode is not disabled for OSS release!" + fail "replication data mode is not disabled for CE release!" fi else if [ "$(echo "${STATUS}" | jq -r '.data.dr')" == "" ]; then diff --git a/enos/modules/k8s_vault_verify_replication/variables.tf b/enos/modules/k8s_vault_verify_replication/variables.tf index 7d4337801def..011ae9cf2b39 100644 --- a/enos/modules/k8s_vault_verify_replication/variables.tf +++ b/enos/modules/k8s_vault_verify_replication/variables.tf @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 variable "vault_instance_count" { type = number diff --git a/enos/modules/k8s_vault_verify_ui/main.tf b/enos/modules/k8s_vault_verify_ui/main.tf index ce5796096771..40132541658a 100644 --- a/enos/modules/k8s_vault_verify_ui/main.tf +++ b/enos/modules/k8s_vault_verify_ui/main.tf @@ -1,12 +1,12 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - version = ">= 0.1.17" - source = "app.terraform.io/hashicorp-qti/enos" + version = "> 0.4.0" + source = "registry.terraform.io/hashicorp-forge/enos" } } } diff --git a/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh b/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh index f6b8a278dce7..9964df2cdb65 100755 --- a/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh +++ b/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh @@ -1,13 +1,12 @@ #!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - +# SPDX-License-Identifier: BUSL-1.1 set -e fail() { - echo "$1" 1>&2 - exit 1 + echo "$1" 1>&2 + exit 1 } if [ "${REDIRECT_URL}" != "http://localhost:8200/ui/" ]; then diff --git a/enos/modules/k8s_vault_verify_ui/variables.tf b/enos/modules/k8s_vault_verify_ui/variables.tf index c39f24e1fa2e..3f000c54f85c 100644 --- a/enos/modules/k8s_vault_verify_ui/variables.tf +++ b/enos/modules/k8s_vault_verify_ui/variables.tf @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 variable "vault_instance_count" { type = number diff --git a/enos/modules/k8s_vault_verify_version/main.tf b/enos/modules/k8s_vault_verify_version/main.tf index 8decaaa77848..35746350443a 100644 --- a/enos/modules/k8s_vault_verify_version/main.tf +++ b/enos/modules/k8s_vault_verify_version/main.tf @@ -1,18 +1,18 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } locals { instances = toset([for idx in range(var.vault_instance_count) : tostring(idx)]) - expected_version = var.vault_edition == "oss" ? var.vault_product_version : "${var.vault_product_version}-ent" + expected_version = var.vault_edition == "ce" ? var.vault_product_version : "${var.vault_product_version}-ent" } resource "enos_remote_exec" "release_info" { @@ -38,13 +38,13 @@ resource "enos_local_exec" "smoke-verify-version" { for_each = enos_remote_exec.release_info environment = { - VAULT_STATUS = jsonencode(jsondecode(each.value.stdout).status) ACTUAL_VERSION = jsondecode(each.value.stdout).version + BUILD_DATE = var.vault_build_date + CHECK_BUILD_DATE = var.check_build_date EXPECTED_VERSION = var.vault_product_version, VAULT_EDITION = var.vault_edition, VAULT_REVISION = var.vault_product_revision, - CHECK_BUILD_DATE = var.check_build_date - BUILD_DATE = var.vault_build_date + VAULT_STATUS = jsonencode(jsondecode(each.value.stdout).status) } scripts = [abspath("${path.module}/scripts/smoke-verify-version.sh")] diff --git a/enos/modules/k8s_vault_verify_version/scripts/get-status.sh b/enos/modules/k8s_vault_verify_version/scripts/get-status.sh index a799ebc66f3f..b68e0f69a666 100755 --- a/enos/modules/k8s_vault_verify_version/scripts/get-status.sh +++ b/enos/modules/k8s_vault_verify_version/scripts/get-status.sh @@ -1,7 +1,6 @@ #!/usr/bin/env sh # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - +# SPDX-License-Identifier: BUSL-1.1 set -e diff --git a/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh b/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh index 514969cf5098..895879a670cd 100755 --- a/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh +++ b/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh @@ -1,45 +1,45 @@ #!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - +# SPDX-License-Identifier: BUSL-1.1 # The Vault smoke test to verify the Vault version installed set -e fail() { - echo "$1" 1>&2 - exit 1 + echo "$1" 1>&2 + exit 1 } if [[ "${CHECK_BUILD_DATE}" == "false" ]]; then expected_build_date="" else - build_date="${BUILD_DATE}" - if [[ "${build_date}" == "" ]]; then - build_date=$(echo "${VAULT_STATUS}" | jq -Mr .build_date) + cfg_build_date="${BUILD_DATE}" + if [[ "${cfg_build_date}" == "" ]]; then + cfg_build_date=$(echo "${VAULT_STATUS}" | jq -Mr .build_date) fi - expected_build_date=", built $build_date" + expected_build_date=", built $cfg_build_date" fi vault_expected_version="Vault v${EXPECTED_VERSION} (${VAULT_REVISION})" case "${VAULT_EDITION}" in - oss) version_expected="${vault_expected_version}${expected_build_date}";; - ent) version_expected="${vault_expected_version}${expected_build_date}";; - ent.hsm) version_expected="${vault_expected_version}${expected_build_date} (cgo)";; - ent.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; - ent.hsm.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; - *) fail "(${VAULT_EDITION}) does not match any known Vault editions" + ce) version_expected="${vault_expected_version}${expected_build_date}" ;; + ent) version_expected="${vault_expected_version}${expected_build_date}" ;; + ent.hsm) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; + ent.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; + ent.hsm.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; + *) fail "(${VAULT_EDITION}) does not match any known Vault editions" ;; esac version_expected_nosha=$(echo "$version_expected" | awk '!($3="")' | sed 's/ / /' | sed -e 's/[[:space:]]*$//') if [[ "${ACTUAL_VERSION}" == "$version_expected_nosha" ]] || [[ "${ACTUAL_VERSION}" == "$version_expected" ]]; then - echo "Version verification succeeded!" + echo "Version verification succeeded!" else - echo "CHECK_BUILD_DATE: ${CHECK_BUILD_DATE}" - echo "BUILD_DATE: ${BUILD_DATE}" - echo "build_date: ${build_date}" - fail "expected Version=$version_expected or $version_expected_nosha, got: ${ACTUAL_VERSION}" + echo "Version checking enabled: ${CHECK_BUILD_DATE}" 1>&2 + echo "Given build date: ${BUILD_DATE}" 1>&2 + echo "Interpreted build date: ${cfg_build_date}" 1>&2 + + fail "expected Version=$version_expected or $version_expected_nosha, got: ${ACTUAL_VERSION}" fi diff --git a/enos/modules/k8s_vault_verify_version/variables.tf b/enos/modules/k8s_vault_verify_version/variables.tf index 58940a8551bd..05ca66082198 100644 --- a/enos/modules/k8s_vault_verify_version/variables.tf +++ b/enos/modules/k8s_vault_verify_version/variables.tf @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 variable "vault_instance_count" { type = number diff --git a/enos/modules/k8s_vault_verify_write_data/main.tf b/enos/modules/k8s_vault_verify_write_data/main.tf index 5606b8988352..52279718a2fb 100644 --- a/enos/modules/k8s_vault_verify_write_data/main.tf +++ b/enos/modules/k8s_vault_verify_write_data/main.tf @@ -1,11 +1,11 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } diff --git a/enos/modules/k8s_vault_verify_write_data/variables.tf b/enos/modules/k8s_vault_verify_write_data/variables.tf index d960b7840e3e..4e1754ebe9f1 100644 --- a/enos/modules/k8s_vault_verify_write_data/variables.tf +++ b/enos/modules/k8s_vault_verify_write_data/variables.tf @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 variable "vault_instance_count" { type = number diff --git a/enos/modules/load_docker_image/main.tf b/enos/modules/load_docker_image/main.tf index 4e5f293f9084..9f5e15c380ac 100644 --- a/enos/modules/load_docker_image/main.tf +++ b/enos/modules/load_docker_image/main.tf @@ -1,10 +1,10 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } diff --git a/enos/modules/local_kind_cluster/main.tf b/enos/modules/local_kind_cluster/main.tf index 5a351679b2e2..b21bfe61da03 100644 --- a/enos/modules/local_kind_cluster/main.tf +++ b/enos/modules/local_kind_cluster/main.tf @@ -1,10 +1,10 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } random = { source = "hashicorp/random" diff --git a/enos/modules/read_license/main.tf b/enos/modules/read_license/main.tf index a1358b3e293d..823714f5d0b8 100644 --- a/enos/modules/read_license/main.tf +++ b/enos/modules/read_license/main.tf @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 variable "file_name" {} diff --git a/enos/modules/replication_data/main.tf b/enos/modules/replication_data/main.tf new file mode 100644 index 000000000000..91c89a4b08b2 --- /dev/null +++ b/enos/modules/replication_data/main.tf @@ -0,0 +1,51 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +// An arithmetic module for calculating inputs and outputs for various replication steps. + +variable "added_hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + default = {} +} + +variable "initial_hosts" { + description = "The initial set of Vault cluster hosts before removing and adding hosts" + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + default = {} +} + +variable "removed_primary_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + default = null +} + +variable "removed_follower_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + default = null +} + +locals { + remaining_initial = setsubtract(values(var.initial_hosts), [var.removed_primary_host, var.removed_follower_host]) + remaining_hosts_list = tolist(setunion(values(var.added_hosts), local.remaining_initial)) + remaining_hosts = { for idx in range(length(local.remaining_hosts_list)) : idx => local.remaining_hosts_list[idx] } +} + +output "remaining_hosts" { + value = local.remaining_hosts +} diff --git a/enos/modules/seal_awskms/main.tf b/enos/modules/seal_awskms/main.tf new file mode 100644 index 000000000000..e8a1ad39cca2 --- /dev/null +++ b/enos/modules/seal_awskms/main.tf @@ -0,0 +1,68 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "cluster_id" { + type = string +} + +variable "cluster_meta" { + type = string + default = null +} + +variable "cluster_ssh_keypair" { + type = string + default = null +} + +variable "common_tags" { + type = map(string) + default = null +} + +variable "other_resources" { + type = list(string) + default = [] +} + +locals { + cluster_name = var.cluster_meta == null ? var.cluster_id : "${var.cluster_id}-${var.cluster_meta}" +} + +resource "aws_kms_key" "key" { + description = "auto-unseal-key-${local.cluster_name}" + deletion_window_in_days = 7 // 7 is the shortest allowed window + tags = var.common_tags +} + +resource "aws_kms_alias" "alias" { + name = "alias/auto-unseal-key-${local.cluster_name}" + target_key_id = aws_kms_key.key.key_id +} + +output "attributes" { + description = "Seal device specific attributes" + value = { + kms_key_id = aws_kms_key.key.arn + } +} + +// We output our resource name and a collection of those passed in to create a full list of key +// resources that might be required for instance roles that are associated with some unseal types. +output "resource_name" { + description = "The awskms key name" + value = aws_kms_key.key.arn +} + +output "resource_names" { + description = "The list of awskms key names to associate with a role" + value = compact(concat([aws_kms_key.key.arn], var.other_resources)) +} diff --git a/enos/modules/seal_pkcs11/main.tf b/enos/modules/seal_pkcs11/main.tf new file mode 100644 index 000000000000..084d364028cc --- /dev/null +++ b/enos/modules/seal_pkcs11/main.tf @@ -0,0 +1,133 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +/* + +A seal module that emulates using a real PKCS#11 HSM. For this we'll use softhsm2. You'll +need softhsm2 and opensc installed to get access to the userspace tools and dynamic library that +Vault Enterprise will use. Here we'll take in the vault hosts and use the one of the nodes +to generate the hsm slot and the tokens, and then we'll copy the softhsm tokens to the other nodes. + +Using softhsm2 and opensc is a bit complicated but here's a cheat sheet for getting started. + +$ brew install softhsm opensc +or +$ sudo apt install softhsm2 opensc + +Create a softhsm slot. You can use anything you want for the pin and the supervisor pin. This will +output the slot identifier, which you'll use as the `slot` parameter in the seal config. +$ softhsm2-util --init-token --free --so-pin=1234 --pin=1234 --label="seal" | grep -oE '[0-9]+$' + +You can see the slots: +$ softhsm2-util --show-slots +Or use opensc's pkcs11-tool. Make sure to use your pin for the -p flag. The module that we refer +to is the location of the shared library that we need to provide to Vault Enterprise. Depending on +your platform or installation method this could be different. +$ pkcs11-tool --module /usr/local/Cellar/softhsm/2.6.1/lib/softhsm/libsofthsm2.so -a seal -p 1234 -IL + +Find yours +$ find /usr/local -type f -name libsofthsm2.so -print -quit + +Your tokens will be installed in the default directories.tokendir. See man softhsm2.conf(5) for +more details. On macOS from brew this is /usr/local/var/lib/softhsm/tokens/ + +Vault Enterprise supports creating the HSM keys, but for softhsm2 that would require us to +initialize with one node before copying the contents. So instead we'll create an HSM key and HMAC +key that we'll copy everywhere. + +$ pkcs11-tool --module /usr/local/Cellar/softhsm/2.6.1/lib/softhsm/libsofthsm2.so -a seal -p 1234 --token-label seal --keygen --usage-sign --label hsm_hmac --id 1 --key-type GENERIC:32 --private --sensitive +$ pkcs11-tool --module /usr/local/Cellar/softhsm/2.6.1/lib/softhsm/libsofthsm2.so -a seal -p 1234 --token-label seal --keygen --usage-sign --label hsm_aes --id 2 --key-type AES:32 --private --sensitive --usage-wrap + +Now you should be able to configure Vault Enterprise seal stanza. +*/ + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "cluster_id" { + type = string + description = "The VPC ID of the cluster" +} + +variable "cluster_meta" { + type = string + default = null + description = "Any metadata that needs to be passed in. If we're creating multiple softhsm tokens this value could be a prior KEYS_BASE64" +} + +variable "cluster_ssh_keypair" { + type = string + description = "The ssh keypair of the vault cluster. We need this to used the inherited provider for our target" +} + +variable "common_tags" { + type = map(string) + default = null +} + +variable "other_resources" { + type = list(string) + default = [] +} + +resource "random_string" "id" { + length = 8 + numeric = false + special = false + upper = false +} + +module "ec2_info" { + source = "../ec2_info" +} + +locals { + id = "${var.cluster_id}-${random_string.id.result}" +} + +module "target" { + source = "../target_ec2_instances" + ami_id = module.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + cluster_tag_key = local.id + common_tags = var.common_tags + instance_count = 1 + instance_types = { + amd64 = "t3a.small" + arm64 = "t4g.small" + } + ports_ingress = [ + { + description = "SSH" + port = 22 + protocol = "tcp" + }, + ] + // Make sure it's not too long as we use this for aws resources that size maximums that are easy + // to hit. + project_name = substr("vault-ci-softhsm-${local.id}", 0, 32) + ssh_keypair = var.cluster_ssh_keypair + vpc_id = var.cluster_id +} + +module "create_vault_keys" { + source = "../softhsm_create_vault_keys" + + cluster_id = var.cluster_id + hosts = module.target.hosts +} + +// Our attributes contain all required keys for the seal stanza and our base64 encoded softhsm +// token and keys. +output "attributes" { + description = "Seal device specific attributes" + value = module.create_vault_keys.all_attributes +} + +// Shim for chaining seals that require IAM roles +output "resource_name" { value = null } +output "resource_names" { value = var.other_resources } diff --git a/enos/modules/seal_shamir/main.tf b/enos/modules/seal_shamir/main.tf new file mode 100644 index 000000000000..55e26d1547b6 --- /dev/null +++ b/enos/modules/seal_shamir/main.tf @@ -0,0 +1,27 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# A shim seal module for shamir seals. For Shamir seals the enos_vault_init resource will take care +# of creating our seal. + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "cluster_id" { default = null } +variable "cluster_meta" { default = null } +variable "cluster_ssh_keypair" { default = null } +variable "common_tags" { default = null } +variable "image_id" { default = null } +variable "other_resources" { + type = list(string) + default = [] +} + +output "resource_name" { value = null } +output "resource_names" { value = var.other_resources } +output "attributes" { value = null } diff --git a/enos/modules/shutdown_multiple_nodes/main.tf b/enos/modules/shutdown_multiple_nodes/main.tf index 86045db0bc25..c2781cd8c40a 100644 --- a/enos/modules/shutdown_multiple_nodes/main.tf +++ b/enos/modules/shutdown_multiple_nodes/main.tf @@ -1,20 +1,15 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "old_vault_instances" { +variable "old_hosts" { type = map(object({ private_ip = string public_ip = string @@ -22,18 +17,9 @@ variable "old_vault_instances" { description = "The vault cluster instances to be shutdown" } -locals { - public_ips = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.old_vault_instances)[idx].public_ip - private_ip = values(var.old_vault_instances)[idx].private_ip - } - } -} - resource "enos_remote_exec" "shutdown_multiple_nodes" { - for_each = local.public_ips - inline = ["sudo shutdown -H --no-wall; exit 0"] + for_each = var.old_hosts + inline = ["sudo shutdown -P --no-wall; exit 0"] transport = { ssh = { diff --git a/enos/modules/shutdown_node/main.tf b/enos/modules/shutdown_node/main.tf index f27de68534e8..045857015cdb 100644 --- a/enos/modules/shutdown_node/main.tf +++ b/enos/modules/shutdown_node/main.tf @@ -1,25 +1,29 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } -variable "node_public_ip" { - type = string - description = "Node Public IP address" +variable "host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The node to shut down" } resource "enos_remote_exec" "shutdown_node" { - inline = ["sudo shutdown -H --no-wall; exit 0"] + inline = ["sudo shutdown -P --no-wall; exit 0"] transport = { ssh = { - host = var.node_public_ip + host = var.host.public_ip } } } diff --git a/enos/modules/softhsm_create_vault_keys/main.tf b/enos/modules/softhsm_create_vault_keys/main.tf new file mode 100644 index 000000000000..4132de8f2250 --- /dev/null +++ b/enos/modules/softhsm_create_vault_keys/main.tf @@ -0,0 +1,129 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "cluster_id" { + type = string +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The hosts that will have access to the softhsm" +} + +locals { + pin = resource.random_string.pin.result + aes_label = "vault_hsm_aes_${local.pin}" + hmac_label = "vault_hsm_hmac_${local.pin}" + seal_attributes = jsondecode(resource.enos_remote_exec.create_keys.stdout) + target = tomap({ "0" = var.hosts[0] }) + token = "${var.cluster_id}_${local.pin}" +} + +resource "random_string" "pin" { + length = 5 + lower = true + upper = false + numeric = true + special = false +} + +module "install" { + source = "../softhsm_install" + + hosts = local.target + include_tools = true # make sure opensc is also installed as we need it to create keys +} + +module "initialize" { + source = "../softhsm_init" + depends_on = [module.install] + + hosts = local.target +} + +// Create our keys. Our stdout contains the requried the values for the pksc11 seal stanza +// as JSON. https://developer.hashicorp.com/vault/docs/configuration/seal/pkcs11#pkcs11-parameters +resource "enos_remote_exec" "create_keys" { + depends_on = [ + module.install, + module.initialize, + ] + + environment = { + AES_LABEL = local.aes_label + HMAC_LABEL = local.hmac_label + PIN = resource.random_string.pin.result + TOKEN_DIR = module.initialize.token_dir + TOKEN_LABEL = local.token + SO_PIN = resource.random_string.pin.result + } + + scripts = [abspath("${path.module}/scripts/create-keys.sh")] + + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } +} + +// Get our softhsm token. Stdout is a base64 encoded gzipped tarball of the softhsm token dir. This +// allows us to pass around binary data inside of Terraform's type system. +resource "enos_remote_exec" "get_keys" { + depends_on = [enos_remote_exec.create_keys] + + environment = { + TOKEN_DIR = module.initialize.token_dir + } + + scripts = [abspath("${path.module}/scripts/get-keys.sh")] + + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } +} + +output "seal_attributes" { + description = "Seal device specific attributes. Contains all required keys for the seal stanza" + value = local.seal_attributes +} + +output "token_base64" { + description = "The softhsm token and keys gzipped tarball in base64" + value = enos_remote_exec.get_keys.stdout +} + +output "token_dir" { + description = "The softhsm directory where tokens and keys are stored" + value = module.initialize.token_dir +} + +output "token_label" { + description = "The HSM slot token label" + value = local.token +} + +output "all_attributes" { + description = "Seal device specific attributes" + value = merge( + local.seal_attributes, + { + token_base64 = enos_remote_exec.get_keys.stdout, + token_dir = module.initialize.token_dir + }, + ) +} diff --git a/enos/modules/softhsm_create_vault_keys/scripts/create-keys.sh b/enos/modules/softhsm_create_vault_keys/scripts/create-keys.sh new file mode 100644 index 000000000000..aa271cde1dd4 --- /dev/null +++ b/enos/modules/softhsm_create_vault_keys/scripts/create-keys.sh @@ -0,0 +1,82 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$AES_LABEL" ]] && fail "AES_LABEL env variable has not been set" +[[ -z "$HMAC_LABEL" ]] && fail "HMAC_LABEL env variable has not been set" +[[ -z "$PIN" ]] && fail "PIN env variable has not been set" +[[ -z "$SO_PIN" ]] && fail "SO_PIN env variable has not been set" +[[ -z "$TOKEN_LABEL" ]] && fail "TOKEN_LABEL env variable has not been set" +[[ -z "$TOKEN_DIR" ]] && fail "TOKEN_DIR env variable has not been set" + +if ! type softhsm2-util &> /dev/null; then + fail "unable to locate softhsm2-util in PATH. Have you installed softhsm?" +fi + +if ! type pkcs11-tool &> /dev/null; then + fail "unable to locate pkcs11-tool in PATH. Have you installed opensc?" +fi + +# Create an HSM slot and return the slot number in decimal value. +create_slot() { + sudo softhsm2-util --init-token --free --so-pin="$SO_PIN" --pin="$PIN" --label="$TOKEN_LABEL" | grep -oE '[0-9]+$' +} + +# Find the location of our softhsm shared object. +find_softhsm_so() { + sudo find /usr -type f -name libsofthsm2.so -print -quit +} + +# Create key a key in the slot. Args: module, key label, id number, key type +keygen() { + sudo pkcs11-tool --keygen --usage-sign --private --sensitive --usage-wrap \ + --module "$1" \ + -p "$PIN" \ + --token-label "$TOKEN_LABEL" \ + --label "$2" \ + --id "$3" \ + --key-type "$4" +} + +# Create our softhsm slot and keys +main() { + local slot + if ! slot=$(create_slot); then + fail "failed to create softhsm token slot" + fi + + local so + if ! so=$(find_softhsm_so); then + fail "unable to locate libsofthsm2.so shared object" + fi + + if ! keygen "$so" "$AES_LABEL" 1 'AES:32' 1>&2; then + fail "failed to create AES key" + fi + + if ! keygen "$so" "$HMAC_LABEL" 2 'GENERIC:32' 1>&2; then + fail "failed to create HMAC key" + fi + + # Return our seal configuration attributes as JSON + cat << EOF +{ + "lib": "${so}", + "slot": "${slot}", + "pin": "${PIN}", + "key_label": "${AES_LABEL}", + "hmac_key_label": "${HMAC_LABEL}", + "generate_key": "false" +} +EOF + exit 0 +} + +main diff --git a/enos/modules/softhsm_create_vault_keys/scripts/get-keys.sh b/enos/modules/softhsm_create_vault_keys/scripts/get-keys.sh new file mode 100644 index 000000000000..6409943f51fd --- /dev/null +++ b/enos/modules/softhsm_create_vault_keys/scripts/get-keys.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$TOKEN_DIR" ]] && fail "TOKEN_DIR env variable has not been set" + +# Tar up our token. We have to do this as a superuser because softhsm is owned by root. +sudo tar -czf token.tgz -C "$TOKEN_DIR" . +me="$(whoami)" +sudo chown "$me:$me" token.tgz + +# Write the value STDOUT as base64 so we can handle binary data as a string +base64 -i token.tgz diff --git a/enos/modules/softhsm_distribute_vault_keys/main.tf b/enos/modules/softhsm_distribute_vault_keys/main.tf new file mode 100644 index 000000000000..0ccebe1c71cc --- /dev/null +++ b/enos/modules/softhsm_distribute_vault_keys/main.tf @@ -0,0 +1,110 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.9" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The hosts for whom we'll distribute the softhsm tokens and keys" +} + +variable "token_base64" { + type = string + description = "The base64 encoded gzipped tarball of the softhsm token" +} + +locals { + // The user/group name for softhsm + softhsm_groups = { + "amzn" = "ods" + "rhel" = "ods" + "ubuntu" = "softhsm" + } + + // Determine if we should skip distribution. If we haven't been passed in a base64 token tarball + // we should short circuit the rest of the module. + skip = var.token_base64 == null || var.token_base64 == "" ? true : false +} + +module "install" { + // TODO: Should packages take a string instead of array so we can plan with unknown values that could change? + source = "../softhsm_install" + + hosts = var.hosts + include_tools = false # we don't need opensc on machines that did not create the HSM. +} + +module "initialize" { + source = "../softhsm_init" + depends_on = [module.install] + + hosts = var.hosts + skip = local.skip +} + +# In order for the vault service to access our keys we need to deal with ownership of files. Make +# sure we have a vault user on the machine if it doesn't already exist. Our distribution script +# below will handle adding vault to the "softhsm" group and setting ownership of the tokens. +resource "enos_user" "vault" { + for_each = var.hosts + + name = "vault" + home_dir = "/etc/vault.d" + shell = "/bin/false" + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +// Get the host information so we can ensure that the correct user/group is used for softhsm. +resource "enos_host_info" "hosts" { + for_each = var.hosts + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +// Distribute our softhsm token and keys to the given hosts. +resource "enos_remote_exec" "distribute_token" { + for_each = var.hosts + depends_on = [ + module.initialize, + enos_user.vault, + enos_host_info.hosts, + ] + + environment = { + TOKEN_BASE64 = var.token_base64 + TOKEN_DIR = module.initialize.token_dir + SOFTHSM_GROUP = local.softhsm_groups[enos_host_info.hosts[each.key].distro] + } + + scripts = [abspath("${path.module}/scripts/distribute-token.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +output "lib" { + value = module.install.lib +} diff --git a/enos/modules/softhsm_distribute_vault_keys/scripts/distribute-token.sh b/enos/modules/softhsm_distribute_vault_keys/scripts/distribute-token.sh new file mode 100644 index 000000000000..95f896c756d1 --- /dev/null +++ b/enos/modules/softhsm_distribute_vault_keys/scripts/distribute-token.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -ex + +fail() { + echo "$1" 1>&2 + exit 1 +} + +# If we're not given keys we'll short circuit. This should only happen if we're skipping distribution +# because we haven't created a token or keys. +if [ -z "$TOKEN_BASE64" ]; then + echo "TOKEN_BASE64 environment variable was unset. Assuming we don't need to distribute our token" 1>&2 + exit 0 +fi + +[[ -z "$SOFTHSM_GROUP" ]] && fail "SOFTHSM_GROUP env variable has not been set" +[[ -z "$TOKEN_DIR" ]] && fail "TOKEN_DIR env variable has not been set" + +# Convert our base64 encoded gzipped tarball of the softhsm token back into a tarball. +base64 --decode - > token.tgz <<< "$TOKEN_BASE64" + +# Expand it. We assume it was written with the correct directory metadata. Do this as a superuser +# because the token directory should be owned by root. +sudo tar -xvf token.tgz -C "$TOKEN_DIR" + +# Make sure the vault user is in the softhsm group to get access to the tokens. +sudo usermod -aG "$SOFTHSM_GROUP" vault +sudo chown -R "vault:$SOFTHSM_GROUP" "$TOKEN_DIR" diff --git a/enos/modules/softhsm_init/main.tf b/enos/modules/softhsm_init/main.tf new file mode 100644 index 000000000000..edadca849ae1 --- /dev/null +++ b/enos/modules/softhsm_init/main.tf @@ -0,0 +1,83 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.9" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The hosts for whom default softhsm configuration will be applied" +} + +variable "skip" { + type = bool + default = false + description = "Whether or not to skip initializing softhsm" +} + +locals { + // The location on disk to write the softhsm tokens to + token_dir = "/var/lib/softhsm/tokens" + + // Where the default configuration is + config_paths = { + "amzn" = "/etc/softhsm2.conf" + "rhel" = "/etc/softhsm2.conf" + "ubuntu" = "/etc/softhsm/softhsm2.conf" + } + + host_key = element(keys(enos_host_info.hosts), 0) + config_path = local.config_paths[enos_host_info.hosts[local.host_key].distro] +} + +resource "enos_host_info" "hosts" { + for_each = var.hosts + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_remote_exec" "init_softhsm" { + for_each = var.hosts + depends_on = [enos_host_info.hosts] + + environment = { + CONFIG_PATH = local.config_paths[enos_host_info.hosts[each.key].distro] + TOKEN_DIR = local.token_dir + SKIP = var.skip ? "true" : "false" + } + + scripts = [abspath("${path.module}/scripts/init-softhsm.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +output "config_path" { + // Technically this is actually just the first config path of our hosts. + value = local.config_path +} + +output "token_dir" { + value = local.token_dir +} + +output "skipped" { + value = var.skip +} diff --git a/enos/modules/softhsm_init/scripts/init-softhsm.sh b/enos/modules/softhsm_init/scripts/init-softhsm.sh new file mode 100644 index 000000000000..c36db5304306 --- /dev/null +++ b/enos/modules/softhsm_init/scripts/init-softhsm.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$CONFIG_PATH" ]] && fail "CONFIG_PATH env variable has not been set" +[[ -z "$TOKEN_DIR" ]] && fail "TOKEN_DIR env variable has not been set" +[[ -z "$SKIP" ]] && fail "SKIP env variable has not been set" + +if [ "$SKIP" == "true" ]; then + exit 0 +fi + +cat << EOF | sudo tee "$CONFIG_PATH" +directories.tokendir = $TOKEN_DIR +objectstore.backend = file +log.level = DEBUG +slots.removable = false +slots.mechanisms = ALL +library.reset_on_fork = false +EOF + +sudo mkdir -p "$TOKEN_DIR" +sudo chmod 0770 "$TOKEN_DIR" diff --git a/enos/modules/softhsm_install/main.tf b/enos/modules/softhsm_install/main.tf new file mode 100644 index 000000000000..8659104d5bca --- /dev/null +++ b/enos/modules/softhsm_install/main.tf @@ -0,0 +1,116 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The hosts that will have access to the softhsm. We assume they're all the same platform and architecture" +} + +variable "include_tools" { + type = bool + default = false + description = "Install opensc pkcs11-tools along with softhsm" +} + +variable "retry_interval" { + type = string + default = "2" + description = "How long to wait between retries" +} + +variable "timeout" { + type = string + default = "15" + description = "How many seconds to wait before timing out" +} + +locals { + packages = var.include_tools ? { + // These packages match the distros that are currently defined in the `ec2_info` module. + amzn = { + "2023" = ["softhsm", "opensc"] + } + rhel = { + "8.10" = ["softhsm", "opensc"] + "9.4" = ["softhsm", "opensc"] + } + ubuntu = { + "20.04" = ["softhsm", "opensc"] + "22.04" = ["softhsm", "opensc"] + "24.04" = ["softhsm2", "opensc"] + } + } : { + amzn = { + "2023" = ["softhsm"] + } + rhel = { + "8.10" = ["softhsm"] + "9.4" = ["softhsm"] + } + ubuntu = { + "20.04" = ["softhsm"] + "22.04" = ["softhsm"] + "24.04" = ["softhsm2"] + } + } +} + +// Get the host information so we can ensure that we install the correct packages depending on the +// distro and distro version +resource "enos_host_info" "target" { + transport = { + ssh = { + host = var.hosts["0"].public_ip + } + } +} + +module "install_softhsm" { + source = "../install_packages" + + hosts = var.hosts + packages = local.packages[enos_host_info.target.distro][enos_host_info.target.distro_version] +} + +resource "enos_remote_exec" "find_shared_object" { + for_each = var.hosts + depends_on = [module.install_softhsm] + + environment = { + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + } + + scripts = [abspath("${path.module}/scripts/find-shared-object.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +locals { + object_paths = compact(distinct(values(enos_remote_exec.find_shared_object)[*].stdout)) +} + +output "lib" { + value = local.object_paths[0] + + precondition { + condition = length(local.object_paths) == 1 + error_message = "SoftHSM targets cannot have different libsofthsm2.so shared object paths. Are they all the same Linux distro?" + } +} diff --git a/enos/modules/softhsm_install/scripts/find-shared-object.sh b/enos/modules/softhsm_install/scripts/find-shared-object.sh new file mode 100644 index 000000000000..4afaee8b16b2 --- /dev/null +++ b/enos/modules/softhsm_install/scripts/find-shared-object.sh @@ -0,0 +1,26 @@ +#!/bin/bash +## Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + if so=$(sudo find /usr -type f -name libsofthsm2.so -print -quit); then + echo "$so" + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +fail "Timed out trying to locate libsofthsm2.so shared object" diff --git a/enos/modules/start_vault/main.tf b/enos/modules/start_vault/main.tf new file mode 100644 index 000000000000..9e386e01e44a --- /dev/null +++ b/enos/modules/start_vault/main.tf @@ -0,0 +1,272 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.5.3" + } + } +} + +locals { + api_addr_localhost = var.ip_version == 4 ? "http://127.0.0.1:${var.listener_port}" : "http://[::1]:${var.listener_port}" + api_addrs = tolist([for h in var.hosts : { + 4 : "http://${h.public_ip}:${var.listener_port}", + 6 : "http://[${h.ipv6}]:${var.listener_port}", + }]) + api_addrs_internal = tolist([for h in var.hosts : { + 4 : "http://${h.private_ip}:${var.listener_port}", + 6 : "http://[${h.ipv6}]:${var.listener_port}", + }]) + bin_path = "${var.install_dir}/vault" + cluster_addrs = tolist([for h in var.hosts : { + 4 : "http://${h.public_ip}:${var.cluster_port}", + 6 : "http://[${h.ipv6}]:${var.cluster_port}", + }]) + cluster_addrs_internal = tolist([for h in var.hosts : { + 4 : "http://${h.private_ip}:${var.cluster_port}", + 6 : "http://[${h.ipv6}]:${var.cluster_port}", + }]) + // In order to get Terraform to plan we have to use collections with keys that are known at plan + // time. Here we're creating locals that keep track of index values that point to our target hosts. + followers = toset(slice(local.instances, 1, length(local.instances))) + instances = [for idx in range(length(var.hosts)) : tostring(idx)] + leader = toset(slice(local.instances, 0, 1)) + listener_address = var.ip_version == 4 ? "0.0.0.0:${var.listener_port}" : "[::]:${var.listener_port}" + // Handle cases where we might have to distribute HSM tokens for the pkcs11 seal before starting + // vault. + token_base64 = try(lookup(var.seal_attributes, "token_base64", ""), "") + token_base64_secondary = try(lookup(var.seal_attributes_secondary, "token_base64", ""), "") + // This module currently supports up to two defined seals. Most of our locals logic here is for + // creating the correct seal configuration. + seals = { + primary = local.seal_primary + secondary = local.seal_secondary + } + seals_primary = { + awskms = { + type = "awskms" + attributes = merge( + { + name = var.seal_alias + priority = var.seal_priority + }, var.seal_attributes + ) + } + pkcs11 = { + type = "pkcs11" + attributes = merge( + { + name = var.seal_alias + priority = var.seal_priority + }, + // Strip out attributes that aren't supposed to be in seal stanza like our base64 encoded + // softhsm blob and the token directory. We'll also inject the shared object library + // location that we detect on the target machines. This allows use to create the token and + // keys on a machines that have different shared object locations. + merge( + try({ for key, val in var.seal_attributes : key => val if key != "token_base64" && key != "token_dir" }, {}), + # Note: the below reference has to point to a specific instance of the maybe_configure_hsm + # module (in this case [0]) due to the maybe_configure_hsm module call using `count` to control whether it runs or not. + try({ lib = module.maybe_configure_hsm[0].lib }, {}) + ), + ) + } + shamir = { + type = "shamir" + attributes = null + } + } + seal_primary = local.seals_primary[var.seal_type] + seals_secondary = { + awskms = { + type = "awskms" + attributes = merge( + { + name = var.seal_alias_secondary + priority = var.seal_priority_secondary + }, var.seal_attributes_secondary + ) + } + pkcs11 = { + type = "pkcs11" + attributes = merge( + { + name = var.seal_alias_secondary + priority = var.seal_priority_secondary + }, + merge( + try({ for key, val in var.seal_attributes_secondary : key => val if key != "token_base64" && key != "token_dir" }, {}), + # Note: the below reference has to point to a specific instance of the maybe_configure_hsm_secondary + # module (in this case [0]) due to the maybe_configure_hsm_secondary module call using `count` to control whether it runs or not. + try({ lib = module.maybe_configure_hsm_secondary[0].lib }, {}) + ), + ) + } + none = { + type = "none" + attributes = null + } + } + seal_secondary = local.seals_secondary[var.seal_type_secondary] + storage_address = var.ip_version == 4 ? "0.0.0.0:${var.external_storage_port}" : "[::]:${var.external_storage_port}" + storage_attributes = [for idx, host in var.hosts : (var.storage_backend == "raft" ? + merge( + { + node_id = "${var.storage_node_prefix}_${idx}" + }, + var.storage_backend_attrs + ) : + { + address = local.storage_address + path = "vault" + }) + ] + storage_retry_join = { + "raft" : { + auto_join : "provider=aws addr_type=${var.ip_version == 4 ? "private_v4" : "public_v6"} tag_key=${var.cluster_tag_key} tag_value=${var.cluster_name}", + auto_join_scheme : "http", + }, + } +} + +# You might be wondering why our start_vault module, which supports shamir, awskms, and pkcs11 seal +# types, contains sub-modules that are only used for HSM. Well, each of those seal devices has +# different requirements and as such we have some seal specific requirements before starting Vault. +# +# A Shamir seal key cannot exist until Vault has already started, so this modules responsibility for +# shamir seals is ensuring that the seal type is passed to the enos_vault_start resource. That's it. +# +# Auto-unseal with a KMS requires that we configure the enos_vault_start resource with the correct +# seal type and the attributes necessary to know which KMS key to use. Vault should automatically +# unseal if we've given it the correct configuration. As long as Vault is able to access the key +# in the KMS it should be able to start. That's normally done via roles associated to the target +# machines, which is outside the scope of this module. +# +# Auto-unseal with an HSM and PKCS#11 is more complicated because a shared object library, which is +# how we interface with the HSM, must be present on each node in order to start Vault. In the real +# world this means an actual HSM in the same rack or data center as every node in the Vault cluster, +# but in our case we're creating ephemeral infrastructure for these test scenarios and don't have a +# real HSM available. We could use CloudHSM or the like, but at the time of writing CloudHSM +# provisioning takes anywhere from 30 to 60 minutes and costs upwards of $2 dollars an hour. That's +# far too long and expensive for scenarios we'll run fairly frequently. Instead, we test using a +# software HSM. Using a software HSM solves the cost and speed problems but creates new set of +# problems. We need to ensure every node in the cluster has access to the same "HSM" and with +# softhsm that means the same software, configuration, tokens and keys. Our `seal_pkcs11` module +# takes care of creating the token and keys, but that's the end of the road for that module. It's +# our job to ensure that when we're starting Vault with a software HSM that we'll ensure the correct +# software, configuration and data are available on the nodes. That's where the following two +# modules come in. They handle installing the required software, configuring it, and distributing +# the key data that was passed in via seal attributes. +module "maybe_configure_hsm" { + source = "../softhsm_distribute_vault_keys" + count = (var.seal_type == "pkcs11" || var.seal_type_secondary == "pkcs11") ? 1 : 0 + + hosts = var.hosts + token_base64 = local.token_base64 +} + +module "maybe_configure_hsm_secondary" { + source = "../softhsm_distribute_vault_keys" + depends_on = [module.maybe_configure_hsm] + count = (var.seal_type == "pkcs11" || var.seal_type_secondary == "pkcs11") ? 1 : 0 + + hosts = var.hosts + token_base64 = local.token_base64_secondary +} + +resource "enos_vault_start" "leader" { + for_each = local.leader + depends_on = [ + module.maybe_configure_hsm_secondary, + ] + + bin_path = local.bin_path + config_dir = var.config_dir + config_mode = var.config_mode + environment = var.environment + config = { + api_addr = local.api_addrs_internal[tonumber(each.value)][var.ip_version] + cluster_addr = local.cluster_addrs_internal[tonumber(each.value)][var.ip_version] + cluster_name = var.cluster_name + listener = { + type = "tcp" + attributes = { + address = local.listener_address + tls_disable = "true" + } + } + log_level = var.log_level + storage = { + type = var.storage_backend + attributes = local.storage_attributes[each.key] + retry_join = try(local.storage_retry_join[var.storage_backend], null) + } + seals = local.seals + ui = true + } + license = var.license + manage_service = var.manage_service + username = var.service_username + unit_name = "vault" + + transport = { + ssh = { + host = var.hosts[each.value].public_ip + } + } +} + +resource "enos_vault_start" "followers" { + depends_on = [ + enos_vault_start.leader, + ] + for_each = local.followers + + bin_path = local.bin_path + config_dir = var.config_dir + config_mode = var.config_mode + environment = var.environment + config = { + api_addr = local.api_addrs_internal[tonumber(each.value)][var.ip_version] + cluster_addr = local.cluster_addrs_internal[tonumber(each.value)][var.ip_version] + cluster_name = var.cluster_name + listener = { + type = "tcp" + attributes = { + address = local.listener_address + tls_disable = "true" + } + } + log_level = var.log_level + storage = { + type = var.storage_backend + attributes = { for key, value in local.storage_attributes[each.key] : key => value } + retry_join = try(local.storage_retry_join[var.storage_backend], null) + } + seals = local.seals + ui = true + } + license = var.license + manage_service = var.manage_service + username = var.service_username + unit_name = "vault" + + transport = { + ssh = { + host = var.hosts[each.value].public_ip + } + } +} + +output "token_base64" { + value = local.token_base64 +} + +output "token_base64_secondary" { + value = local.token_base64_secondary +} diff --git a/enos/modules/start_vault/outputs.tf b/enos/modules/start_vault/outputs.tf new file mode 100644 index 000000000000..c20e7b80168c --- /dev/null +++ b/enos/modules/start_vault/outputs.tf @@ -0,0 +1,63 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "api_addr_localhost" { + description = "The localhost API address" + value = local.api_addr_localhost +} + +output "api_addrs" { + description = "The external API addresses of all nodes the cluster" + value = local.api_addrs +} + +output "cluster_name" { + description = "The Vault cluster name" + value = var.cluster_name +} + +output "cluster_port" { + description = "The Vault cluster request forwarding listener port" + value = var.cluster_port +} + +output "external_storage_port" { + description = "The Vault cluster non-raft external storage port" + value = var.external_storage_port +} + +output "followers" { + description = "The follower enos_vault_start resources" + value = enos_vault_start.followers +} + +output "leader" { + description = "The leader enos_vault_start resource" + value = enos_vault_start.leader +} + +output "ipv6s" { + description = "Vault cluster target host ipv6s" + value = [for host in var.hosts : host.ipv6] +} + +output "listener_port" { + description = "The Vault cluster TCP listener port" + value = var.listener_port +} + +output "private_ips" { + description = "Vault cluster target host private_ips" + value = [for host in var.hosts : host.private_ip] +} + +output "public_ips" { + description = "Vault cluster target host public_ips" + value = [for host in var.hosts : host.public_ip] +} + +output "hosts" { + description = "The vault cluster instances that were created" + + value = var.hosts +} diff --git a/enos/modules/start_vault/variables.tf b/enos/modules/start_vault/variables.tf new file mode 100644 index 000000000000..2571b0c2dda0 --- /dev/null +++ b/enos/modules/start_vault/variables.tf @@ -0,0 +1,187 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "cluster_name" { + type = string + description = "The Vault cluster name" +} + +variable "cluster_port" { + type = number + description = "The cluster port for Vault to listen on" + default = 8201 +} + +variable "cluster_tag_key" { + type = string + description = "The Vault cluster tag key" + default = "retry_join" +} + +variable "config_dir" { + type = string + description = "The directory to use for Vault configuration" + default = "/etc/vault.d" +} + +variable "config_mode" { + description = "The method to use when configuring Vault. When set to 'env' we will configure Vault using VAULT_ style environment variables if possible. When 'file' we'll use the HCL configuration file for all configuration options." + default = "file" + + validation { + condition = contains(["env", "file"], var.config_mode) + error_message = "The config_mode must be either 'env' or 'file'. No other configuration modes are supported." + } +} + +variable "environment" { + description = "Optional Vault configuration environment variables to set starting Vault" + type = map(string) + default = null +} + +variable "external_storage_port" { + type = number + description = "The port to connect to when using external storage" + default = 8500 +} + +variable "hosts" { + description = "The target machines host addresses to use for the Vault cluster" + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) +} + +variable "install_dir" { + type = string + description = "The directory where the vault binary will be installed" + default = "/opt/vault/bin" +} + +variable "ip_version" { + type = number + description = "The IP version to use for the Vault TCP listeners" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "license" { + type = string + sensitive = true + description = "The value of the Vault license" + default = null +} + +variable "log_level" { + type = string + description = "The vault service log level" + default = "info" + + validation { + condition = contains(["trace", "debug", "info", "warn", "error"], var.log_level) + error_message = "The log_level must be one of 'trace', 'debug', 'info', 'warn', or 'error'." + } +} + +variable "manage_service" { + type = bool + description = "Manage the Vault service users and systemd unit. Disable this to use configuration in RPM and Debian packages" + default = true +} + +variable "listener_port" { + type = number + description = "The port for Vault to listen on" + default = 8200 +} + +variable "seal_alias" { + type = string + description = "The primary seal alias name" + default = "primary" +} + +variable "seal_alias_secondary" { + type = string + description = "The secondary seal alias name" + default = "secondary" +} + +variable "seal_attributes" { + description = "The primary auto-unseal attributes" + default = null +} + +variable "seal_attributes_secondary" { + description = "The secondary auto-unseal attributes" + default = null +} + +variable "seal_priority" { + type = string + description = "The primary seal priority" + default = "1" +} + +variable "seal_priority_secondary" { + type = string + description = "The secondary seal priority" + default = "2" +} + +variable "seal_type" { + type = string + description = "The method by which to unseal the Vault cluster" + default = "awskms" + + validation { + condition = contains(["awskms", "pkcs11", "shamir"], var.seal_type) + error_message = "The seal_type must be either 'awskms', 'pkcs11', or 'shamir'. No other seal types are supported." + } +} + +variable "seal_type_secondary" { + type = string + description = "A secondary HA seal method. Only supported in Vault Enterprise >= 1.15" + default = "none" + + validation { + condition = contains(["awskms", "pkcs11", "none"], var.seal_type_secondary) + error_message = "The secondary_seal_type must be 'awskms', 'pkcs11' or 'none'. No other secondary seal types are supported." + } +} + +variable "service_username" { + type = string + description = "The host username to own the vault service" + default = "vault" +} + +variable "storage_backend" { + type = string + description = "The storage backend to use" + default = "raft" + + validation { + condition = contains(["raft", "consul"], var.storage_backend) + error_message = "The storage_backend must be either raft or consul. No other storage backends are supported." + } +} + +variable "storage_backend_attrs" { + type = map(any) + description = "An optional set of key value pairs to inject into the storage block" + default = {} +} + +variable "storage_node_prefix" { + type = string + description = "A prefix to use for each node in the Vault storage configuration" + default = "node" +} diff --git a/enos/modules/stop_vault/main.tf b/enos/modules/stop_vault/main.tf new file mode 100644 index 000000000000..6dd477d4dda7 --- /dev/null +++ b/enos/modules/stop_vault/main.tf @@ -0,0 +1,39 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.0" + } + } +} + +variable "service_name" { + type = string + description = "The Vault systemd service name" + default = "vault" +} + +variable "hosts" { + description = "The target machines host addresses to use for the Vault cluster" + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) +} + +resource "enos_remote_exec" "shutdown_multiple_nodes" { + for_each = var.hosts + inline = ["sudo systemctl stop ${var.service_name}.service; sleep 5"] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/target_ec2_fleet/main.tf b/enos/modules/target_ec2_fleet/main.tf new file mode 100644 index 000000000000..411d1744b12f --- /dev/null +++ b/enos/modules/target_ec2_fleet/main.tf @@ -0,0 +1,339 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.3.24" + } + } +} + +data "aws_vpc" "vpc" { + id = var.vpc_id +} + +data "aws_subnets" "vpc" { + filter { + name = "vpc-id" + values = [var.vpc_id] + } +} + +data "aws_iam_policy_document" "target" { + statement { + resources = ["*"] + + actions = [ + "ec2:DescribeInstances", + "secretsmanager:*" + ] + } + + dynamic "statement" { + for_each = var.seal_key_names + + content { + resources = [statement.value] + + actions = [ + "kms:DescribeKey", + "kms:ListKeys", + "kms:Encrypt", + "kms:Decrypt", + "kms:GenerateDataKey" + ] + } + } +} + +data "aws_iam_policy_document" "target_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} + +data "enos_environment" "localhost" {} + +resource "random_string" "random_cluster_name" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +resource "random_string" "unique_id" { + length = 4 + lower = true + upper = false + numeric = false + special = false +} + +// ec2:CreateFleet only allows up to 4 InstanceRequirements overrides so we can only ever request +// a fleet across 4 or fewer subnets if we want to bid with InstanceRequirements instead of +// weighted instance types. +resource "random_shuffle" "subnets" { + input = data.aws_subnets.vpc.ids + result_count = 4 +} + +locals { + spot_allocation_strategy = "lowestPrice" + on_demand_allocation_strategy = "lowestPrice" + instances = toset([for idx in range(var.instance_count) : tostring(idx)]) + cluster_name = coalesce(var.cluster_name, random_string.random_cluster_name.result) + name_prefix = "${var.project_name}-${local.cluster_name}-${random_string.unique_id.result}" + fleet_tag = "${local.name_prefix}-spot-fleet-target" + fleet_tags = { + Name = "${local.name_prefix}-${var.cluster_tag_key}-target" + "${var.cluster_tag_key}" = local.cluster_name + Fleet = local.fleet_tag + } +} + +resource "aws_iam_role" "target" { + name = "${local.name_prefix}-target-role" + assume_role_policy = data.aws_iam_policy_document.target_role.json +} + +resource "aws_iam_instance_profile" "target" { + name = "${local.name_prefix}-target-profile" + role = aws_iam_role.target.name +} + +resource "aws_iam_role_policy" "target" { + name = "${local.name_prefix}-target-policy" + role = aws_iam_role.target.id + policy = data.aws_iam_policy_document.target.json +} + +resource "aws_security_group" "target" { + name = "${local.name_prefix}-target" + description = "Target instance security group" + vpc_id = var.vpc_id + + # SSH traffic + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Vault traffic + ingress { + from_port = 8200 + to_port = 8201 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + formatlist("%s/32", var.ssh_allow_ips) + ]) + } + + # Consul traffic + ingress { + from_port = 8300 + to_port = 8302 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8301 + to_port = 8302 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8500 + to_port = 8503 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8600 + to_port = 8600 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8600 + to_port = 8600 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Internal traffic + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + self = true + } + + # External traffic + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge( + var.common_tags, + { + Name = "${local.name_prefix}-sg" + }, + ) +} + +resource "aws_launch_template" "target" { + name = "${local.name_prefix}-target" + image_id = var.ami_id + key_name = var.ssh_keypair + + iam_instance_profile { + name = aws_iam_instance_profile.target.name + } + + instance_requirements { + burstable_performance = "included" + + memory_mib { + min = var.instance_mem_min + max = var.instance_mem_max + } + + vcpu_count { + min = var.instance_cpu_min + max = var.instance_cpu_max + } + } + + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + security_groups = [aws_security_group.target.id] + } + + tag_specifications { + resource_type = "instance" + + tags = merge( + var.common_tags, + local.fleet_tags, + ) + } +} + +# There are three primary knobs we can turn to try and optimize our costs by +# using a spot fleet: our min and max instance requirements, our max bid +# price, and the allocation strategy to use when fulfilling the spot request. +# We've currently configured our instance requirements to allow for anywhere +# from 2-4 vCPUs and 4-16GB of RAM. We intentionally have a wide range +# to allow for a large instance size pool to be considered. Our next knob is our +# max bid price. As we're using spot fleets to save on instance cost, we never +# want to pay more for an instance than we were on-demand. We've set the max price +# to equal what we pay for t3.medium instances on-demand, which are the smallest +# reliable size for Vault scenarios. The final knob is the allocation strategy +# that AWS will use when looking for instances that meet our resource and cost +# requirements. We're using the "lowestPrice" strategy to get the absolute +# cheapest machines that will fit the requirements, but it comes with a slightly +# higher capacity risk than say, "capacityOptimized" or "priceCapacityOptimized". +# Unless we see capacity issues or instances being shut down then we ought to +# stick with that strategy. +resource "aws_ec2_fleet" "targets" { + replace_unhealthy_instances = false + terminate_instances = true // terminate instances when we "delete" the fleet + terminate_instances_with_expiration = false + tags = merge( + var.common_tags, + local.fleet_tags, + ) + type = "instant" // make a synchronous request for the entire fleet + + launch_template_config { + launch_template_specification { + launch_template_id = aws_launch_template.target.id + version = aws_launch_template.target.latest_version + } + + dynamic "override" { + for_each = random_shuffle.subnets.result + + content { + subnet_id = override.value + } + } + } + + on_demand_options { + allocation_strategy = local.on_demand_allocation_strategy + max_total_price = (var.max_price * var.instance_count) + min_target_capacity = var.capacity_type == "on-demand" ? var.instance_count : null + // One of these has to be set to enforce our on-demand target capacity minimum + single_availability_zone = false + single_instance_type = true + } + + spot_options { + allocation_strategy = local.spot_allocation_strategy + // The instance_pools_to_use_count is only valid for the allocation_strategy + // lowestPrice. When we are using that strategy we'll want to always set it + // to non-zero to avoid rebuilding the fleet on a re-run. For any other strategy + // set it to zero to avoid rebuilding the fleet on a re-run. + instance_pools_to_use_count = local.spot_allocation_strategy == "lowestPrice" ? 1 : null + } + + // Try and provision only spot instances and fall back to on-demand. + target_capacity_specification { + default_target_capacity_type = var.capacity_type + spot_target_capacity = var.capacity_type == "spot" ? var.instance_count : 0 + on_demand_target_capacity = var.capacity_type == "on-demand" ? var.instance_count : 0 + target_capacity_unit_type = "units" // units == instance count + total_target_capacity = var.instance_count + } +} + +data "aws_instance" "targets" { + depends_on = [ + aws_ec2_fleet.targets, + ] + for_each = local.instances + + instance_id = aws_ec2_fleet.targets.fleet_instance_set[0].instance_ids[each.key] + +} diff --git a/enos/modules/target_ec2_fleet/outputs.tf b/enos/modules/target_ec2_fleet/outputs.tf new file mode 100644 index 000000000000..505db0e4eb88 --- /dev/null +++ b/enos/modules/target_ec2_fleet/outputs.tf @@ -0,0 +1,15 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "cluster_name" { + value = local.cluster_name +} + +output "hosts" { + description = "The ec2 fleet target hosts" + value = { for idx in range(var.instance_count) : idx => { + public_ip = data.aws_instance.targets[idx].public_ip + private_ip = data.aws_instance.targets[idx].private_ip + ipv6 = try(data.aws_instance.targets[idx].ipv6_addresses[0], null) + } } +} diff --git a/enos/modules/target_ec2_fleet/variables.tf b/enos/modules/target_ec2_fleet/variables.tf new file mode 100644 index 000000000000..f0eb87bf5d15 --- /dev/null +++ b/enos/modules/target_ec2_fleet/variables.tf @@ -0,0 +1,107 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "ami_id" { + description = "The machine image identifier" + type = string +} + +variable "cluster_name" { + type = string + description = "A unique cluster identifier" + default = null +} + +variable "cluster_tag_key" { + type = string + description = "The key name for the cluster tag" + default = "TargetCluster" +} + +variable "common_tags" { + description = "Common tags for cloud resources" + type = map(string) + default = { + Project = "vault-ci" + } +} + +variable "disable_selinux" { + description = "Optionally disable SELinux for certain distros/versions" + type = bool + default = true +} + +variable "instance_mem_min" { + description = "The minimum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" + type = number + default = 4096 // ~4 GB +} + +variable "instance_mem_max" { + description = "The maximum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" + type = number + default = 16385 // ~16 GB +} + +variable "instance_cpu_min" { + description = "The minimum number of vCPU's for each instance in the fleet" + type = number + default = 2 +} + +variable "instance_cpu_max" { + description = "The maximum number of vCPU's for each instance in the fleet" + type = number + default = 8 // Unlikely we'll ever get that high due to spot price bid protection +} + +variable "instance_count" { + description = "The number of target instances to create" + type = number + default = 3 +} + +variable "max_price" { + description = "The maximum hourly price to pay for each target instance" + type = string + default = "0.0416" +} + +variable "project_name" { + description = "A unique project name" + type = string +} + +variable "seal_key_names" { + type = list(string) + description = "The key management seal key names" + default = null +} + +variable "ssh_allow_ips" { + description = "Allowlisted IP addresses for SSH access to target nodes. The IP address of the machine running Enos will automatically allowlisted" + type = list(string) + default = [] +} + +variable "ssh_keypair" { + description = "SSH keypair used to connect to EC2 instances" + type = string +} + +variable "capacity_type" { + description = "What capacity type to use for EC2 instances" + type = string + default = "on-demand" + + validation { + condition = contains(["on-demand", "spot"], var.capacity_type) + error_message = "The capacity_type must be either 'on-demand' or 'spot'." + } +} + +variable "vpc_id" { + description = "The identifier of the VPC where the target instances will be created" + type = string +} diff --git a/enos/modules/target_ec2_instances/locals.tf b/enos/modules/target_ec2_instances/locals.tf new file mode 100644 index 000000000000..8831b7ec2696 --- /dev/null +++ b/enos/modules/target_ec2_instances/locals.tf @@ -0,0 +1,11 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +locals { + hosts = { for idx in range(var.instance_count) : idx => { + ipv6 = try(aws_instance.targets[idx].ipv6_addresses[0], "") + public_ip = aws_instance.targets[idx].public_ip + private_ip = aws_instance.targets[idx].private_ip + } + } +} diff --git a/enos/modules/target_ec2_instances/main.tf b/enos/modules/target_ec2_instances/main.tf new file mode 100644 index 000000000000..75d2bd55edc6 --- /dev/null +++ b/enos/modules/target_ec2_instances/main.tf @@ -0,0 +1,214 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.3.24" + } + } +} + +data "aws_vpc" "vpc" { + id = var.vpc_id +} + +data "aws_ami" "ami" { + filter { + name = "image-id" + values = [var.ami_id] + } +} + +data "aws_ec2_instance_type_offerings" "instance" { + filter { + name = "instance-type" + values = [local.instance_type] + } + + location_type = "availability-zone" +} + +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "zone-name" + values = data.aws_ec2_instance_type_offerings.instance.locations + } +} + +data "aws_subnets" "vpc" { + filter { + name = "availability-zone" + values = data.aws_availability_zones.available.names + } + + filter { + name = "vpc-id" + values = [var.vpc_id] + } +} + +data "aws_iam_policy_document" "target" { + statement { + resources = ["*"] + + actions = [ + "ec2:DescribeInstances", + "secretsmanager:*" + ] + } + + dynamic "statement" { + for_each = var.seal_key_names + + content { + resources = [statement.value] + + actions = [ + "kms:DescribeKey", + "kms:ListKeys", + "kms:Encrypt", + "kms:Decrypt", + "kms:GenerateDataKey" + ] + } + } +} + +data "aws_iam_policy_document" "target_instance_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} + +data "enos_environment" "localhost" {} + +locals { + cluster_name = coalesce(var.cluster_name, random_string.cluster_name.result) + instance_type = local.instance_types[data.aws_ami.ami.architecture] + instance_types = { + "arm64" = var.instance_types["arm64"] + "x86_64" = var.instance_types["amd64"] + } + instances = toset([for idx in range(var.instance_count) : tostring(idx)]) + name_prefix = "${var.project_name}-${local.cluster_name}-${random_string.unique_id.result}" +} + +resource "random_string" "cluster_name" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +resource "random_string" "unique_id" { + length = 4 + lower = true + upper = false + numeric = false + special = false +} + +resource "aws_iam_role" "target_instance_role" { + name = "${local.name_prefix}-instance-role" + assume_role_policy = data.aws_iam_policy_document.target_instance_role.json +} + +resource "aws_iam_instance_profile" "target" { + name = "${local.name_prefix}-instance-profile" + role = aws_iam_role.target_instance_role.name +} + +resource "aws_iam_role_policy" "target" { + name = "${local.name_prefix}-role-policy" + role = aws_iam_role.target_instance_role.id + policy = data.aws_iam_policy_document.target.json +} + +resource "aws_security_group" "target" { + name = "${local.name_prefix}-sg" + description = "Target instance security group" + vpc_id = var.vpc_id + + # External ingress + dynamic "ingress" { + for_each = var.ports_ingress + + content { + from_port = ingress.value.port + to_port = ingress.value.port + protocol = ingress.value.protocol + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + formatlist("%s/32", var.ssh_allow_ips) + ]) + ipv6_cidr_blocks = data.aws_vpc.vpc.ipv6_cidr_block != "" ? [data.aws_vpc.vpc.ipv6_cidr_block] : null + } + } + + # Internal traffic + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + self = true + } + + # External traffic + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = ["::/0"] + } + + tags = merge( + var.common_tags, + { + Name = "${local.name_prefix}-sg" + }, + ) +} + +resource "aws_instance" "targets" { + for_each = local.instances + + ami = var.ami_id + iam_instance_profile = aws_iam_instance_profile.target.name + // Some scenarios (autopilot, pr_replication) shutdown instances to simulate failure. In those + // cases we should terminate the instance entirely rather than get stuck in stopped limbo. + instance_initiated_shutdown_behavior = "terminate" + instance_type = local.instance_type + key_name = var.ssh_keypair + subnet_id = data.aws_subnets.vpc.ids[tonumber(each.key) % length(data.aws_subnets.vpc.ids)] + vpc_security_group_ids = [aws_security_group.target.id] + + tags = merge( + var.common_tags, + { + Name = "${local.name_prefix}-${var.cluster_tag_key}-instance-target" + "${var.cluster_tag_key}" = local.cluster_name + }, + ) +} + +module "disable_selinux" { + depends_on = [aws_instance.targets] + source = "../disable_selinux" + count = var.disable_selinux == true ? 1 : 0 + + hosts = local.hosts +} diff --git a/enos/modules/target_ec2_instances/outputs.tf b/enos/modules/target_ec2_instances/outputs.tf new file mode 100644 index 000000000000..674c5cf7b1de --- /dev/null +++ b/enos/modules/target_ec2_instances/outputs.tf @@ -0,0 +1,11 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "cluster_name" { + value = local.cluster_name +} + +output "hosts" { + description = "The ec2 instance target hosts" + value = local.hosts +} diff --git a/enos/modules/target_ec2_instances/variables.tf b/enos/modules/target_ec2_instances/variables.tf new file mode 100644 index 000000000000..9718f2fdaea9 --- /dev/null +++ b/enos/modules/target_ec2_instances/variables.tf @@ -0,0 +1,85 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "ami_id" { + description = "The machine image identifier" + type = string +} + +variable "cluster_name" { + type = string + description = "A unique cluster identifier" + default = null +} + +variable "cluster_tag_key" { + type = string + description = "The key name for the cluster tag" + default = "TargetCluster" +} + +variable "common_tags" { + description = "Common tags for cloud resources" + type = map(string) + default = { "Project" : "vault-ci" } +} + +variable "ports_ingress" { + description = "Ports mappings to allow for ingress" + type = list(object({ + description = string + port = number + protocol = string + })) +} + +variable "disable_selinux" { + description = "Optionally disable SELinux for certain distros/versions" + type = bool + default = true +} + +variable "instance_count" { + description = "The number of target instances to create" + type = number + default = 3 +} + +variable "instance_types" { + description = "The instance types to use depending on architecture" + type = object({ + amd64 = string + arm64 = string + }) + default = { + amd64 = "t3a.medium" + arm64 = "t4g.medium" + } +} + +variable "project_name" { + description = "A unique project name" + type = string +} + +variable "seal_key_names" { + type = list(string) + description = "The key management seal key names" + default = [] +} + +variable "ssh_allow_ips" { + description = "Allowlisted IP addresses for SSH access to target nodes. The IP address of the machine running Enos will automatically allowlisted" + type = list(string) + default = [] +} + +variable "ssh_keypair" { + description = "SSH keypair used to connect to EC2 instances" + type = string +} + +variable "vpc_id" { + description = "The identifier of the VPC where the target instances will be created" + type = string +} diff --git a/enos/modules/target_ec2_shim/main.tf b/enos/modules/target_ec2_shim/main.tf new file mode 100644 index 000000000000..c755668865c9 --- /dev/null +++ b/enos/modules/target_ec2_shim/main.tf @@ -0,0 +1,52 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.3.24" + } + } +} + +variable "ami_id" { default = null } +variable "cluster_name" { default = null } +variable "cluster_tag_key" { default = null } +variable "common_tags" { default = null } +variable "disable_selinux" { default = true } +variable "instance_count" { default = 3 } +variable "instance_cpu_max" { default = null } +variable "instance_cpu_min" { default = null } +variable "instance_mem_max" { default = null } +variable "instance_mem_min" { default = null } +variable "instance_types" { default = null } +variable "max_price" { default = null } +variable "ports_ingress" { default = null } +variable "project_name" { default = null } +variable "seal_key_names" { default = null } +variable "ssh_allow_ips" { default = null } +variable "ssh_keypair" { default = null } +variable "vpc_id" { default = null } + +resource "random_string" "cluster_name" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +output "cluster_name" { + value = coalesce(var.cluster_name, random_string.cluster_name.result) +} + +output "hosts" { + value = { for idx in range(var.instance_count) : idx => { + public_ip = "null-public-${idx}" + private_ip = "null-private-${idx}" + ipv6 = "null-ipv6-${idx}" + } } +} diff --git a/enos/modules/target_ec2_spot_fleet/main.tf b/enos/modules/target_ec2_spot_fleet/main.tf new file mode 100644 index 000000000000..4a762746e547 --- /dev/null +++ b/enos/modules/target_ec2_spot_fleet/main.tf @@ -0,0 +1,466 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.3.24" + } + } +} + +data "aws_vpc" "vpc" { + id = var.vpc_id +} + +data "aws_subnets" "vpc" { + filter { + name = "vpc-id" + values = [var.vpc_id] + } +} + +data "aws_iam_policy_document" "target" { + statement { + resources = ["*"] + + actions = [ + "ec2:DescribeInstances", + "secretsmanager:*" + ] + } + + dynamic "statement" { + for_each = var.seal_key_names + + content { + resources = [statement.value] + + actions = [ + "kms:DescribeKey", + "kms:ListKeys", + "kms:Encrypt", + "kms:Decrypt", + "kms:GenerateDataKey" + ] + } + } +} + +data "aws_iam_policy_document" "target_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} + +data "aws_iam_policy_document" "fleet" { + statement { + resources = ["*"] + + actions = [ + "ec2:DescribeImages", + "ec2:DescribeSubnets", + "ec2:RequestSpotInstances", + "ec2:TerminateInstances", + "ec2:DescribeInstanceStatus", + "ec2:CancelSpotFleetRequests", + "ec2:CreateTags", + "ec2:RunInstances", + "ec2:StartInstances", + "ec2:StopInstances", + ] + } + + statement { + effect = "Deny" + + resources = [ + "arn:aws:ec2:*:*:instance/*", + ] + + actions = [ + "ec2:RunInstances", + ] + + condition { + test = "StringNotEquals" + variable = "ec2:InstanceMarketType" + values = ["spot"] + } + } + + statement { + resources = ["*"] + + actions = [ + "iam:PassRole", + ] + + condition { + test = "StringEquals" + variable = "iam:PassedToService" + values = [ + "ec2.amazonaws.com", + ] + } + } + + statement { + resources = [ + "arn:aws:elasticloadbalancing:*:*:loadbalancer/*", + ] + + actions = [ + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + ] + } + + statement { + resources = [ + "arn:aws:elasticloadbalancing:*:*:*/*" + ] + + actions = [ + "elasticloadbalancing:RegisterTargets" + ] + } +} + +data "aws_iam_policy_document" "fleet_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["spotfleet.amazonaws.com"] + } + } +} + +data "enos_environment" "localhost" {} + +resource "random_string" "random_cluster_name" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +resource "random_string" "unique_id" { + length = 4 + lower = true + upper = false + numeric = false + special = false +} + +// ec2:RequestSpotFleet only allows up to 4 InstanceRequirements overrides so we can only ever +// request a fleet across 4 or fewer subnets if we want to bid with InstanceRequirements instead of +// weighted instance types. +resource "random_shuffle" "subnets" { + input = data.aws_subnets.vpc.ids + result_count = 4 +} + +locals { + allocation_strategy = "lowestPrice" + instances = toset([for idx in range(var.instance_count) : tostring(idx)]) + cluster_name = coalesce(var.cluster_name, random_string.random_cluster_name.result) + name_prefix = "${var.project_name}-${local.cluster_name}-${random_string.unique_id.result}" + fleet_tag = "${local.name_prefix}-spot-fleet-target" + fleet_tags = { + Name = "${local.name_prefix}-${var.cluster_tag_key}-target" + "${var.cluster_tag_key}" = local.cluster_name + Fleet = local.fleet_tag + } +} + +resource "aws_iam_role" "target" { + name = "${local.name_prefix}-target-role" + assume_role_policy = data.aws_iam_policy_document.target_role.json +} + +resource "aws_iam_instance_profile" "target" { + name = "${local.name_prefix}-target-profile" + role = aws_iam_role.target.name +} + +resource "aws_iam_role_policy" "target" { + name = "${local.name_prefix}-target-policy" + role = aws_iam_role.target.id + policy = data.aws_iam_policy_document.target.json +} + +resource "aws_iam_role" "fleet" { + name = "${local.name_prefix}-fleet-role" + assume_role_policy = data.aws_iam_policy_document.fleet_role.json +} + +resource "aws_iam_role_policy" "fleet" { + name = "${local.name_prefix}-fleet-policy" + role = aws_iam_role.fleet.id + policy = data.aws_iam_policy_document.fleet.json +} + +resource "aws_security_group" "target" { + name = "${local.name_prefix}-target" + description = "Target instance security group" + vpc_id = var.vpc_id + + # SSH traffic + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Vault traffic + ingress { + from_port = 8200 + to_port = 8201 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + formatlist("%s/32", var.ssh_allow_ips) + ]) + } + + # Consul traffic + ingress { + from_port = 8300 + to_port = 8302 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8301 + to_port = 8302 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8500 + to_port = 8503 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8600 + to_port = 8600 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8600 + to_port = 8600 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Internal traffic + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + self = true + } + + # External traffic + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge( + var.common_tags, + { + Name = "${local.name_prefix}-sg" + }, + ) +} + +resource "aws_launch_template" "target" { + name = "${local.name_prefix}-target" + image_id = var.ami_id + instance_type = null + key_name = var.ssh_keypair + + iam_instance_profile { + name = aws_iam_instance_profile.target.name + } + + instance_requirements { + burstable_performance = "included" + + memory_mib { + min = var.instance_mem_min + max = var.instance_mem_max + } + + vcpu_count { + min = var.instance_cpu_min + max = var.instance_cpu_max + } + } + + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + security_groups = [aws_security_group.target.id] + } + + tag_specifications { + resource_type = "instance" + + tags = merge( + var.common_tags, + local.fleet_tags, + ) + } +} + +# There are three primary knobs we can turn to try and optimize our costs by +# using a spot fleet: our min and max instance requirements, our max bid +# price, and the allocation strategy to use when fulfilling the spot request. +# We've currently configured our instance requirements to allow for anywhere +# from 2-4 vCPUs and 4-16GB of RAM. We intentionally have a wide range +# to allow for a large instance size pool to be considered. Our next knob is our +# max bid price. As we're using spot fleets to save on instance cost, we never +# want to pay more for an instance than we were on-demand. We've set the max price +# to equal what we pay for t3.medium instances on-demand, which are the smallest +# reliable size for Vault scenarios. The final knob is the allocation strategy +# that AWS will use when looking for instances that meet our resource and cost +# requirements. We're using the "lowestPrice" strategy to get the absolute +# cheapest machines that will fit the requirements, but it comes with a slightly +# higher capacity risk than say, "capacityOptimized" or "priceCapacityOptimized". +# Unless we see capacity issues or instances being shut down then we ought to +# stick with that strategy. +resource "aws_spot_fleet_request" "targets" { + allocation_strategy = local.allocation_strategy + fleet_type = "request" + iam_fleet_role = aws_iam_role.fleet.arn + // The instance_pools_to_use_count is only valid for the allocation_strategy + // lowestPrice. When we are using that strategy we'll want to always set it + // to 1 to avoid rebuilding the fleet on a re-run. For any other strategy + // set it to zero to avoid rebuilding the fleet on a re-run. + instance_pools_to_use_count = local.allocation_strategy == "lowestPrice" ? 1 : 0 + spot_price = var.max_price + target_capacity = var.instance_count + terminate_instances_on_delete = true + wait_for_fulfillment = true + + launch_template_config { + launch_template_specification { + id = aws_launch_template.target.id + version = aws_launch_template.target.latest_version + } + + // We cannot currently use more than one subnet[0]. Until the bug has been resolved + // we'll choose a random subnet. It would be ideal to bid across all subnets to get + // the absolute cheapest available at the time of bidding. + // + // [0] https://github.com/hashicorp/terraform-provider-aws/issues/30505 + + /* + dynamic "overrides" { + for_each = random_shuffle.subnets.result + + content { + subnet_id = overrides.value + } + } + */ + + overrides { + subnet_id = random_shuffle.subnets.result[0] + } + } + + tags = merge( + var.common_tags, + local.fleet_tags, + ) +} + +resource "time_sleep" "wait_for_fulfillment" { + depends_on = [aws_spot_fleet_request.targets] + create_duration = "2s" +} + +data "aws_instances" "targets" { + depends_on = [ + time_sleep.wait_for_fulfillment, + aws_spot_fleet_request.targets, + ] + + instance_tags = local.fleet_tags + instance_state_names = [ + "pending", + "running", + ] + + filter { + name = "image-id" + values = [var.ami_id] + } + + filter { + name = "iam-instance-profile.arn" + values = [aws_iam_instance_profile.target.arn] + } +} + +data "aws_instance" "targets" { + depends_on = [ + aws_spot_fleet_request.targets, + data.aws_instances.targets + ] + for_each = local.instances + + instance_id = data.aws_instances.targets.ids[each.key] +} + +module "disable_selinux" { + source = "../disable_selinux" + count = var.disable_selinux == true ? 1 : 0 + + hosts = { for idx in range(var.instance_count) : idx => { + public_ip = aws_instance.targets[idx].public_ip + private_ip = aws_instance.targets[idx].private_ip + } } +} diff --git a/enos/modules/target_ec2_spot_fleet/outputs.tf b/enos/modules/target_ec2_spot_fleet/outputs.tf new file mode 100644 index 000000000000..505db0e4eb88 --- /dev/null +++ b/enos/modules/target_ec2_spot_fleet/outputs.tf @@ -0,0 +1,15 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "cluster_name" { + value = local.cluster_name +} + +output "hosts" { + description = "The ec2 fleet target hosts" + value = { for idx in range(var.instance_count) : idx => { + public_ip = data.aws_instance.targets[idx].public_ip + private_ip = data.aws_instance.targets[idx].private_ip + ipv6 = try(data.aws_instance.targets[idx].ipv6_addresses[0], null) + } } +} diff --git a/enos/modules/target_ec2_spot_fleet/variables.tf b/enos/modules/target_ec2_spot_fleet/variables.tf new file mode 100644 index 000000000000..af6c0dc04f82 --- /dev/null +++ b/enos/modules/target_ec2_spot_fleet/variables.tf @@ -0,0 +1,96 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "ami_id" { + description = "The machine image identifier" + type = string +} + +variable "cluster_name" { + type = string + description = "A unique cluster identifier" + default = null +} + +variable "cluster_tag_key" { + type = string + description = "The key name for the cluster tag" + default = "TargetCluster" +} + +variable "common_tags" { + description = "Common tags for cloud resources" + type = map(string) + default = { + Project = "Vault" + } +} + +variable "disable_selinux" { + description = "Optionally disable SELinux for certain distros/versions" + type = bool + default = true +} + +variable "instance_mem_min" { + description = "The minimum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" + type = number + default = 4096 // ~4 GB +} + +variable "instance_mem_max" { + description = "The maximum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" + type = number + default = 16385 // ~16 GB +} + +variable "instance_cpu_min" { + description = "The minimum number of vCPU's for each instance in the fleet" + type = number + default = 2 +} + +variable "instance_cpu_max" { + description = "The maximum number of vCPU's for each instance in the fleet" + type = number + default = 8 // Unlikely we'll ever get that high due to spot price bid protection +} + +variable "instance_count" { + description = "The number of target instances to create" + type = number + default = 3 +} + +variable "project_name" { + description = "A unique project name" + type = string +} + +variable "max_price" { + description = "The maximum hourly price to pay for each target instance" + type = string + default = "0.0416" +} + +variable "seal_key_names" { + type = list(string) + description = "The key management seal key names" + default = null +} + +variable "ssh_allow_ips" { + description = "Allowlisted IP addresses for SSH access to target nodes. The IP address of the machine running Enos will automatically allowlisted" + type = list(string) + default = [] +} + +variable "ssh_keypair" { + description = "SSH keypair used to connect to EC2 instances" + type = string +} + +variable "vpc_id" { + description = "The identifier of the VPC where the target instances will be created" + type = string +} diff --git a/enos/modules/vault_agent/main.tf b/enos/modules/vault_agent/main.tf index cb112020b390..e5d19667c276 100644 --- a/enos/modules/vault_agent/main.tf +++ b/enos/modules/vault_agent/main.tf @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { @@ -7,11 +7,32 @@ terraform { source = "hashicorp/aws" } enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } +variable "ip_version" { + type = number + default = 4 + description = "The IP version to use for the Vault TCP listeners" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_agent_port" { + type = number + description = "The listener port number for the Vault Agent" +} + variable "vault_agent_template_destination" { type = string description = "The destination of the template rendered by Agent" @@ -27,44 +48,44 @@ variable "vault_root_token" { description = "The Vault root token" } -variable "vault_instances" { +variable "hosts" { type = map(object({ + ipv6 = string private_ip = string public_ip = string })) description = "The Vault cluster instances that were created" } -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - variable "vault_install_dir" { type = string description = "The directory where the Vault binary will be installed" } locals { - vault_instances = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.vault_instances)[idx].public_ip - private_ip = values(var.vault_instances)[idx].private_ip - } - } + agent_listen_addr = "${var.ip_version == 4 ? "127.0.0.1" : "[::1]"}:${var.vault_agent_port}" } resource "enos_remote_exec" "set_up_approle_auth_and_agent" { - content = templatefile("${path.module}/templates/set-up-approle-and-agent.sh", { - vault_install_dir = var.vault_install_dir - vault_token = var.vault_root_token - vault_agent_template_destination = var.vault_agent_template_destination - vault_agent_template_contents = var.vault_agent_template_contents - }) + environment = { + AGENT_LISTEN_ADDR = local.agent_listen_addr, + VAULT_ADDR = var.vault_addr, + VAULT_INSTALL_DIR = var.vault_install_dir, + VAULT_TOKEN = var.vault_root_token, + VAULT_AGENT_TEMPLATE_DESTINATION = var.vault_agent_template_destination, + VAULT_AGENT_TEMPLATE_CONTENTS = var.vault_agent_template_contents, + } + + scripts = [abspath("${path.module}/scripts/set-up-approle-and-agent.sh")] transport = { ssh = { - host = local.vault_instances[0].public_ip + host = var.hosts[0].public_ip } } } + +output "vault_agent_listen_addr" { + description = "The vault agent listen address" + value = local.agent_listen_addr +} diff --git a/enos/modules/vault_agent/scripts/set-up-approle-and-agent.sh b/enos/modules/vault_agent/scripts/set-up-approle-and-agent.sh new file mode 100644 index 000000000000..6af219ab1b87 --- /dev/null +++ b/enos/modules/vault_agent/scripts/set-up-approle-and-agent.sh @@ -0,0 +1,99 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + return 1 +} + +[[ -z "$AGENT_LISTEN_ADDR" ]] && fail "AGENT_LISTEN_ADDR env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_AGENT_TEMPLATE_CONTENTS" ]] && fail "VAULT_AGENT_TEMPLATE_CONTENTS env variable has not been set" +[[ -z "$VAULT_AGENT_TEMPLATE_DESTINATION" ]] && fail "VAULT_AGENT_TEMPLATE_DESTINATION env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +# If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist) +$binpath auth disable approle || true + +$binpath auth enable approle + +$binpath write auth/approle/role/agent-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000 + +ROLEID=$($binpath read --format=json auth/approle/role/agent-role/role-id | jq -r '.data.role_id') + +if [[ "$ROLEID" == '' ]]; then + fail "expected ROLEID to be nonempty, but it is empty" +fi + +SECRETID=$($binpath write -f --format=json auth/approle/role/agent-role/secret-id | jq -r '.data.secret_id') + +if [[ "$SECRETID" == '' ]]; then + fail "expected SECRETID to be nonempty, but it is empty" +fi + +echo "$ROLEID" > /tmp/role-id +echo "$SECRETID" > /tmp/secret-id + +cat > /tmp/vault-agent.hcl <<- EOM +pid_file = "/tmp/pidfile" + +vault { + address = "${VAULT_ADDR}" + tls_skip_verify = true + retry { + num_retries = 10 + } +} + +cache { + enforce_consistency = "always" + use_auto_auth_token = true +} + +listener "tcp" { + address = "${AGENT_LISTEN_ADDR}" + tls_disable = true +} + +template { + destination = "${VAULT_AGENT_TEMPLATE_DESTINATION}" + contents = "${VAULT_AGENT_TEMPLATE_CONTENTS}" + exec { + command = "pkill -F /tmp/pidfile" + } +} + +auto_auth { + method { + type = "approle" + config = { + role_id_file_path = "/tmp/role-id" + secret_id_file_path = "/tmp/secret-id" + } + } + sink { + type = "file" + config = { + path = "/tmp/token" + } + } +} +EOM + +# If Agent is still running from a previous run, kill it +pkill -F /tmp/pidfile || true + +# If the template file already exists, remove it +rm "${VAULT_AGENT_TEMPLATE_DESTINATION}" || true + +# Run agent (it will kill itself when it finishes rendering the template) +if ! $binpath agent -config=/tmp/vault-agent.hcl > /tmp/agent-logs.txt 2>&1; then + fail "failed to run vault agent: $(cat /tmp/agent-logs.txt)" +fi diff --git a/enos/modules/vault_agent/templates/set-up-approle-and-agent.sh b/enos/modules/vault_agent/templates/set-up-approle-and-agent.sh deleted file mode 100644 index 42a097641642..000000000000 --- a/enos/modules/vault_agent/templates/set-up-approle-and-agent.sh +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -set -e - -binpath=${vault_install_dir}/vault - -fail() { - echo "$1" 1>&2 - return 1 -} - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_ADDR='http://127.0.0.1:8200' -export VAULT_TOKEN='${vault_token}' - -# If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist) -$binpath auth disable approle || true - -approle_create_status=$($binpath auth enable approle) - -approle_status=$($binpath write auth/approle/role/agent-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000) - -ROLEID=$($binpath read --format=json auth/approle/role/agent-role/role-id | jq -r '.data.role_id') - -if [[ "$ROLEID" == '' ]]; then - fail "expected ROLEID to be nonempty, but it is empty" -fi - -SECRETID=$($binpath write -f --format=json auth/approle/role/agent-role/secret-id | jq -r '.data.secret_id') - -if [[ "$SECRETID" == '' ]]; then - fail "expected SECRETID to be nonempty, but it is empty" -fi - -echo $ROLEID > /tmp/role-id -echo $SECRETID > /tmp/secret-id - -cat > /tmp/vault-agent.hcl <<- EOM -pid_file = "/tmp/pidfile" - -vault { - address = "http://127.0.0.1:8200" - tls_skip_verify = true - retry { - num_retries = 10 - } -} - -cache { - enforce_consistency = "always" - use_auto_auth_token = true -} - -listener "tcp" { - address = "127.0.0.1:8100" - tls_disable = true -} - -template { - destination = "${vault_agent_template_destination}" - contents = "${vault_agent_template_contents}" - exec { - command = "pkill -F /tmp/pidfile" - } -} - -auto_auth { - method { - type = "approle" - config = { - role_id_file_path = "/tmp/role-id" - secret_id_file_path = "/tmp/secret-id" - } - } - sink { - type = "file" - config = { - path = "/tmp/token" - } - } -} -EOM - -# If Agent is still running from a previous run, kill it -pkill -F /tmp/pidfile || true - -# If the template file already exists, remove it -rm ${vault_agent_template_destination} || true - -# Run agent (it will kill itself when it finishes rendering the template) -$binpath agent -config=/tmp/vault-agent.hcl > /tmp/agent-logs.txt 2>&1 diff --git a/enos/modules/vault_artifactory_artifact/locals.tf b/enos/modules/vault_artifactory_artifact/locals.tf deleted file mode 100644 index 708813faa454..000000000000 --- a/enos/modules/vault_artifactory_artifact/locals.tf +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -locals { - - // file name extensions for the install packages of vault for the various architectures, distributions and editions - package_extensions = { - amd64 = { - ubuntu = { - # "oss" = "-1_amd64.deb" - "ent" = "-1_amd64.deb" - "ent.hsm" = "-1_amd64.deb" - } - rhel = { - # "oss" = "-1.x86_64.rpm" - "ent" = "-1.x86_64.rpm" - "ent.hsm" = "-1.x86_64.rpm" - } - } - arm64 = { - ubuntu = { - # "oss" = "-1_arm64.deb" - "ent" = "-1_arm64.deb" - } - rhel = { - # "oss" = "-1.aarch64.rpm" - "ent" = "-1.aarch64.rpm" - } - } - } - - // product_version --> artifact_version - artifact_version = replace(var.product_version, var.edition, "ent") - - // file name prefixes for the install packages of vault for the various distributions and artifact types (package or bundle) - artifact_package_release_names = { - ubuntu = { - "oss" = "vault_" - "ent" = "vault-enterprise_", - "ent.hsm" = "vault-enterprise-hsm_", - }, - rhel = { - "oss" = "vault-" - "ent" = "vault-enterprise-", - "ent.hsm" = "vault-enterprise-hsm-", - } - } - - artifact_types = ["package", "bundle"] - - // edition --> artifact name edition - artifact_name_edition = { - "oss" = "" - "ent" = "" - "ent.hsm" = ".hsm" - "ent.fips1402" = ".fips1402" - "ent.hsm.fips1402" = ".hsm.fips1402" - } - - artifact_name_prefix = var.artifact_type == "package" ? local.artifact_package_release_names[var.distro][var.edition] : "vault_" - artifact_name_extension = var.artifact_type == "package" ? local.package_extensions[var.arch][var.distro][var.edition] : "_linux_${var.arch}.zip" - artifact_name = var.artifact_type == "package" ? "${local.artifact_name_prefix}${replace(local.artifact_version, "-", "~")}${local.artifact_name_extension}" : "${local.artifact_name_prefix}${var.product_version}${local.artifact_name_extension}" -} diff --git a/enos/modules/vault_artifactory_artifact/main.tf b/enos/modules/vault_artifactory_artifact/main.tf deleted file mode 100644 index 0f0df3865c7a..000000000000 --- a/enos/modules/vault_artifactory_artifact/main.tf +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -terraform { - required_providers { - enos = { - source = "app.terraform.io/hashicorp-qti/enos" - version = ">= 0.2.3" - } - } -} - -data "enos_artifactory_item" "vault" { - username = var.artifactory_username - token = var.artifactory_token - name = local.artifact_name - host = var.artifactory_host - repo = var.artifactory_repo - path = var.edition == "oss" ? "vault/*" : "vault-enterprise/*" - properties = tomap({ - "commit" = var.revision - "product-name" = var.edition == "oss" ? "vault" : "vault-enterprise" - "product-version" = local.artifact_version - }) -} diff --git a/enos/modules/vault_artifactory_artifact/outputs.tf b/enos/modules/vault_artifactory_artifact/outputs.tf deleted file mode 100644 index c100c45ddd97..000000000000 --- a/enos/modules/vault_artifactory_artifact/outputs.tf +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -output "url" { - value = data.enos_artifactory_item.vault.results[0].url - description = "The artifactory download url for the artifact" -} - -output "sha256" { - value = data.enos_artifactory_item.vault.results[0].sha256 - description = "The sha256 checksum for the artifact" -} - -output "size" { - value = data.enos_artifactory_item.vault.results[0].size - description = "The size in bytes of the artifact" -} - -output "name" { - value = data.enos_artifactory_item.vault.results[0].name - description = "The name of the artifact" -} - -output "vault_artifactory_release" { - value = { - url = data.enos_artifactory_item.vault.results[0].url - sha256 = data.enos_artifactory_item.vault.results[0].sha256 - username = var.artifactory_username - token = var.artifactory_token - } -} diff --git a/enos/modules/vault_artifactory_artifact/variables.tf b/enos/modules/vault_artifactory_artifact/variables.tf deleted file mode 100644 index 7b641ce98f95..000000000000 --- a/enos/modules/vault_artifactory_artifact/variables.tf +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -variable "artifactory_username" { - type = string - description = "The username to use when connecting to artifactory" - default = null -} - -variable "artifactory_token" { - type = string - description = "The token to use when connecting to artifactory" - default = null - sensitive = true -} - -variable "artifactory_host" { - type = string - description = "The artifactory host to search for vault artifacts" - default = "https://artifactory.hashicorp.engineering/artifactory" -} - -variable "artifactory_repo" { - type = string - description = "The artifactory repo to search for vault artifacts" - default = "hashicorp-crt-stable-local*" -} -variable "arch" {} -variable "artifact_type" {} -variable "distro" {} -variable "edition" {} -variable "instance_type" {} -variable "revision" {} -variable "product_version" {} -variable "build_tags" { default = null } -variable "bundle_path" { default = null } -variable "goarch" { default = null } -variable "goos" { default = null } diff --git a/enos/modules/vault_cluster/main.tf b/enos/modules/vault_cluster/main.tf new file mode 100644 index 000000000000..1fbe31547a98 --- /dev/null +++ b/enos/modules/vault_cluster/main.tf @@ -0,0 +1,412 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.0" + } + } +} + +data "enos_environment" "localhost" {} + +locals { + audit_device_file_path = "/var/log/vault/vault_audit.log" + audit_socket_port = "9090" + bin_path = "${var.install_dir}/vault" + consul_bin_path = "${var.consul_install_dir}/consul" + enable_audit_devices = var.enable_audit_devices && var.initialize_cluster + // In order to get Terraform to plan we have to use collections with keys + // that are known at plan time. In order for our module to work our var.hosts + // must be a map with known keys at plan time. Here we're creating locals + // that keep track of index values that point to our target hosts. + followers = toset(slice(local.instances, 1, length(local.instances))) + instances = [for idx in range(length(var.hosts)) : tostring(idx)] + key_shares = { + "awskms" = null + "shamir" = 5 + "pkcs11" = null + } + key_threshold = { + "awskms" = null + "shamir" = 3 + "pkcs11" = null + } + leader = toset(slice(local.instances, 0, 1)) + netcat_command = { + amzn = "nc" + opensuse-leap = "netcat" + rhel = "nc" + sles = "nc" + ubuntu = "netcat" + } + recovery_shares = { + "awskms" = 5 + "shamir" = null + "pkcs11" = 5 + } + recovery_threshold = { + "awskms" = 3 + "shamir" = null + "pkcs11" = 3 + } + vault_service_user = "vault" +} + +resource "enos_host_info" "hosts" { + for_each = var.hosts + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_bundle_install" "consul" { + for_each = { + for idx, host in var.hosts : idx => var.hosts[idx] + if var.storage_backend == "consul" + } + + destination = var.consul_install_dir + release = merge(var.consul_release, { product = "consul" }) + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# We run install_packages before we install Vault because for some combinations of +# certain Linux distros and artifact types (e.g. SLES and RPM packages), there may +# be packages that are required to perform Vault installation (e.g. openssl). +module "install_packages" { + source = "../install_packages" + + hosts = var.hosts + packages = var.packages +} + +resource "enos_bundle_install" "vault" { + for_each = var.hosts + depends_on = [ + module.install_packages, // Don't race for the package manager locks with install_packages + ] + + destination = var.install_dir + release = var.release == null ? var.release : merge({ product = "vault" }, var.release) + artifactory = var.artifactory_release + path = var.local_artifact_path + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_consul_start" "consul" { + for_each = enos_bundle_install.consul + + bin_path = local.consul_bin_path + data_dir = var.consul_data_dir + config = { + # GetPrivateInterfaces is a go-sockaddr template that helps Consul get the correct + # addr in all of our default cases. This is required in the case of Amazon Linux, + # because amzn has a default docker listener that will make Consul try to use the + # incorrect addr. + bind_addr = "{{ GetPrivateInterfaces | include \"type\" \"IP\" | sort \"default\" | limit 1 | attr \"address\"}}" + data_dir = var.consul_data_dir + datacenter = "dc1" + retry_join = ["provider=aws tag_key=${var.backend_cluster_tag_key} tag_value=${var.backend_cluster_name}"] + server = false + bootstrap_expect = 0 + license = var.consul_license + log_level = var.consul_log_level + log_file = var.consul_log_file + } + license = var.consul_license + unit_name = "consul" + username = "consul" + + transport = { + ssh = { + host = var.hosts[each.key].public_ip + } + } +} + +module "start_vault" { + source = "../start_vault" + + depends_on = [ + enos_consul_start.consul, + module.install_packages, + enos_bundle_install.vault, + ] + + cluster_name = var.cluster_name + cluster_port = var.cluster_port + cluster_tag_key = var.cluster_tag_key + config_dir = var.config_dir + config_mode = var.config_mode + external_storage_port = var.external_storage_port + hosts = var.hosts + install_dir = var.install_dir + ip_version = var.ip_version + license = var.license + listener_port = var.listener_port + log_level = var.log_level + manage_service = var.manage_service + seal_attributes = var.seal_attributes + seal_attributes_secondary = var.seal_attributes_secondary + seal_type = var.seal_type + seal_type_secondary = var.seal_type_secondary + service_username = local.vault_service_user + storage_backend = var.storage_backend + storage_backend_attrs = var.storage_backend_addl_config + storage_node_prefix = var.storage_node_prefix +} + +resource "enos_vault_init" "leader" { + depends_on = [ + module.start_vault, + ] + for_each = toset([ + for idx, leader in local.leader : leader + if var.initialize_cluster + ]) + + bin_path = local.bin_path + vault_addr = module.start_vault.leader[0].config.api_addr + + key_shares = local.key_shares[var.seal_type] + key_threshold = local.key_threshold[var.seal_type] + + recovery_shares = local.recovery_shares[var.seal_type] + recovery_threshold = local.recovery_threshold[var.seal_type] + + transport = { + ssh = { + host = var.hosts[each.value].public_ip + } + } +} + +resource "enos_vault_unseal" "leader" { + depends_on = [ + module.start_vault, + enos_vault_init.leader, + ] + for_each = enos_vault_init.leader // only unseal the leader if we initialized it + + bin_path = local.bin_path + vault_addr = module.start_vault.leader[each.key].config.api_addr + seal_type = var.seal_type + unseal_keys = var.seal_type != "shamir" ? null : coalesce(var.shamir_unseal_keys, enos_vault_init.leader[0].unseal_keys_hex) + + transport = { + ssh = { + host = var.hosts[tolist(local.leader)[0]].public_ip + } + } +} + +resource "enos_vault_unseal" "followers" { + depends_on = [ + enos_vault_init.leader, + enos_vault_unseal.leader, + ] + // Only unseal followers if we're not using an auto-unseal method and we've + // initialized the cluster + for_each = toset([ + for idx, follower in local.followers : follower + if var.seal_type == "shamir" && var.initialize_cluster + ]) + + bin_path = local.bin_path + vault_addr = module.start_vault.followers[each.key].config.api_addr + seal_type = var.seal_type + unseal_keys = var.seal_type != "shamir" ? null : coalesce(var.shamir_unseal_keys, enos_vault_init.leader[0].unseal_keys_hex) + + transport = { + ssh = { + host = var.hosts[each.value].public_ip + } + } +} + +// Force unseal the cluster. This is used if the vault-cluster module is used +// to add additional nodes to a cluster via auto-pilot, or some other means. +// When that happens we'll want to set initialize_cluster to false and +// force_unseal to true. +resource "enos_vault_unseal" "maybe_force_unseal" { + depends_on = [ + module.start_vault.followers, + ] + for_each = { + for idx, host in var.hosts : idx => host + if var.force_unseal && !var.initialize_cluster + } + + bin_path = local.bin_path + vault_addr = module.start_vault.api_addr_localhost + seal_type = var.seal_type + unseal_keys = coalesce( + var.shamir_unseal_keys, + try(enos_vault_init.leader[0].unseal_keys_hex, null), + ) + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# Add the vault install location to the PATH and set up VAULT_ADDR and VAULT_TOKEN environement +# variables in the login shell so we don't have to do it if/when we login in to a cluster node. +resource "enos_remote_exec" "configure_login_shell_profile" { + depends_on = [ + enos_vault_init.leader, + enos_vault_unseal.leader, + ] + for_each = var.hosts + + environment = { + VAULT_ADDR = module.start_vault.api_addr_localhost + VAULT_TOKEN = var.root_token != null ? var.root_token : try(enos_vault_init.leader[0].root_token, "_") + VAULT_INSTALL_DIR = var.install_dir + } + + scripts = [abspath("${path.module}/scripts/set-up-login-shell-profile.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# Add a motd to assist people that might be logging in. +resource "enos_file" "motd" { + depends_on = [ + enos_remote_exec.configure_login_shell_profile + ] + for_each = var.hosts + + destination = "/etc/motd" + content = <&2 + exit 1 +} + +[[ -z "$LOG_FILE_PATH" ]] && fail "LOG_FILE_PATH env variable has not been set" +[[ -z "$SERVICE_USER" ]] && fail "SERVICE_USER env variable has not been set" + +LOG_DIR=$(dirname "$LOG_FILE_PATH") + +function retry { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=10 + count=$((count + 1)) + + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + return "$exit" + fi + done + + return 0 +} + +retry 7 id -a "$SERVICE_USER" + +sudo mkdir -p "$LOG_DIR" +sudo chown -R "$SERVICE_USER":"$SERVICE_USER" "$LOG_DIR" diff --git a/enos/modules/vault_cluster/scripts/enable-audit-devices.sh b/enos/modules/vault_cluster/scripts/enable-audit-devices.sh new file mode 100644 index 000000000000..a93bd5503b24 --- /dev/null +++ b/enos/modules/vault_cluster/scripts/enable-audit-devices.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -exo pipefail + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" +[[ -z "$LOG_FILE_PATH" ]] && fail "LOG_FILE_PATH env variable has not been set" +[[ -z "$SOCKET_PORT" ]] && fail "SOCKET_PORT env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_BIN_PATH" ]] && fail "VAULT_BIN_PATH env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +enable_file_audit_device() { + $VAULT_BIN_PATH audit enable file file_path="$LOG_FILE_PATH" +} + +enable_syslog_audit_device() { + $VAULT_BIN_PATH audit enable syslog tag="vault" facility="AUTH" +} + +enable_socket_audit_device() { + if [ "$IP_VERSION" = "4" ]; then + "$VAULT_BIN_PATH" audit enable socket address="127.0.0.1:$SOCKET_PORT" + else + "$VAULT_BIN_PATH" audit enable socket address="[::1]:$SOCKET_PORT" + fi +} + +main() { + if ! enable_file_audit_device; then + fail "Failed to enable vault file audit device" + fi + + if ! enable_syslog_audit_device; then + fail "Failed to enable vault syslog audit device" + fi + + if ! enable_socket_audit_device; then + local log + log=$(cat /tmp/vault-socket.log) + fail "Failed to enable vault socket audit device: listener log: $log" + fi + + return 0 +} + +main diff --git a/enos/modules/vault_cluster/scripts/set-up-login-shell-profile.sh b/enos/modules/vault_cluster/scripts/set-up-login-shell-profile.sh new file mode 100644 index 000000000000..f3a42d22a59b --- /dev/null +++ b/enos/modules/vault_cluster/scripts/set-up-login-shell-profile.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +# Determine the profile file we should write to. We only want to affect login shells and bash will +# only read one of these in ordered of precendence. +determineProfileFile() { + if [ -f "$HOME/.bash_profile" ]; then + printf "%s/.bash_profile\n" "$HOME" + return 0 + fi + + if [ -f "$HOME/.bash_login" ]; then + printf "%s/.bash_login\n" "$HOME" + return 0 + fi + + printf "%s/.profile\n" "$HOME" +} + +appendVaultProfileInformation() { + tee -a "$1" <<< "export PATH=$PATH:$VAULT_INSTALL_DIR +export VAULT_ADDR=$VAULT_ADDR +export VAULT_TOKEN=$VAULT_TOKEN" +} + +main() { + local profile_file + if ! profile_file=$(determineProfileFile); then + fail "failed to determine login shell profile file location" + fi + + # If vault_cluster is used more than once, eg: autopilot or replication, this module can + # be called more than once. Short ciruit here if our profile is already set up. + if grep VAULT_ADDR < "$profile_file"; then + exit 0 + fi + + if ! appendVaultProfileInformation "$profile_file"; then + fail "failed to write vault configuration to login shell profile" + fi + + exit 0 +} + +main diff --git a/enos/modules/vault_cluster/scripts/start-audit-socket-listener.sh b/enos/modules/vault_cluster/scripts/start-audit-socket-listener.sh new file mode 100644 index 000000000000..9c714a335a21 --- /dev/null +++ b/enos/modules/vault_cluster/scripts/start-audit-socket-listener.sh @@ -0,0 +1,92 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -exo pipefail + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" +[[ -z "$NETCAT_COMMAND" ]] && fail "NETCAT_COMMAND env variable has not been set" +[[ -z "$SOCKET_PORT" ]] && fail "SOCKET_PORT env variable has not been set" + +if [ "$IP_VERSION" = "4" ]; then + export SOCKET_ADDR="127.0.0.1" +else + export SOCKET_ADDR="::1" +fi + +socket_listener_procs() { + pgrep -x "${NETCAT_COMMAND}" +} + +kill_socket_listener() { + pkill "${NETCAT_COMMAND}" +} + +test_socket_listener() { + case $IP_VERSION in + 4) + "${NETCAT_COMMAND}" -zvw 2 "${SOCKET_ADDR}" "$SOCKET_PORT" < /dev/null + ;; + 6) + "${NETCAT_COMMAND}" -6 -zvw 2 "${SOCKET_ADDR}" "$SOCKET_PORT" < /dev/null + ;; + *) + fail "unknown IP_VERSION: $IP_VERSION" + ;; + esac +} + +start_socket_listener() { + if socket_listener_procs; then + test_socket_listener + return $? + fi + + # Run nc to listen on port 9090 for the socket auditor. We spawn nc + # with nohup to ensure that the listener doesn't expect a SIGHUP and + # thus block the SSH session from exiting or terminating on exit. + case $IP_VERSION in + 4) + nohup nc -kl "$SOCKET_PORT" >> /tmp/vault-socket.log 2>&1 < /dev/null & + ;; + 6) + nohup nc -6 -kl "$SOCKET_PORT" >> /tmp/vault-socket.log 2>&1 < /dev/null & + ;; + *) + fail "unknown IP_VERSION: $IP_VERSION" + ;; + esac +} + +read_log() { + local f + f=/tmp/vault-socket.log + [[ -f "$f" ]] && cat "$f" +} + +main() { + if socket_listener_procs; then + # Clean up old nc's that might not be working + kill_socket_listener + fi + + if ! start_socket_listener; then + fail "Failed to start audit socket listener: socket listener log: $(read_log)" + fi + + # wait for nc to listen + sleep 1 + + if ! test_socket_listener; then + fail "Error testing socket listener: socket listener log: $(read_log)" + fi + + return 0 +} + +main diff --git a/enos/modules/vault_cluster/variables.tf b/enos/modules/vault_cluster/variables.tf new file mode 100644 index 000000000000..1e4de12e53d9 --- /dev/null +++ b/enos/modules/vault_cluster/variables.tf @@ -0,0 +1,291 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "artifactory_release" { + type = object({ + username = string + token = string + url = string + sha256 = string + }) + description = "The Artifactory release information to install Vault artifacts from Artifactory" + default = null +} + +variable "backend_cluster_name" { + type = string + description = "The name of the backend cluster" + default = null +} + +variable "backend_cluster_tag_key" { + type = string + description = "The tag key for searching for backend nodes" + default = null +} + +variable "cluster_name" { + type = string + description = "The Vault cluster name" + default = null +} + +variable "cluster_port" { + type = number + description = "The cluster port for Vault to listen on" + default = 8201 +} + +variable "cluster_tag_key" { + type = string + description = "The Vault cluster tag key" + default = "retry_join" +} + +variable "config_dir" { + type = string + description = "The directory to use for Vault configuration" + default = "/etc/vault.d" +} + +variable "config_mode" { + description = "The method to use when configuring Vault. When set to 'env' we will configure Vault using VAULT_ style environment variables if possible. When 'file' we'll use the HCL configuration file for all configuration options." + default = "file" + + validation { + condition = contains(["env", "file"], var.config_mode) + error_message = "The config_mode must be either 'env' or 'file'. No other configuration modes are supported." + } +} + +variable "config_env_vars" { + description = "Optional Vault configuration environment variables to set starting Vault" + type = map(string) + default = null +} + +variable "consul_data_dir" { + type = string + description = "The directory where the consul will store data" + default = "/opt/consul/data" +} + +variable "consul_install_dir" { + type = string + description = "The directory where the consul binary will be installed" + default = "/opt/consul/bin" +} + +variable "consul_license" { + type = string + sensitive = true + description = "The consul enterprise license" + default = null +} + +variable "consul_log_file" { + type = string + description = "The file where the consul will write log output" + default = "/var/log/consul.log" +} + +variable "consul_log_level" { + type = string + description = "The consul service log level" + default = "info" + + validation { + condition = contains(["trace", "debug", "info", "warn", "error"], var.consul_log_level) + error_message = "The consul_log_level must be one of 'trace', 'debug', 'info', 'warn', or 'error'." + } +} + +variable "consul_release" { + type = object({ + version = string + edition = string + }) + description = "Consul release version and edition to install from releases.hashicorp.com" + default = { + version = "1.15.1" + edition = "ce" + } +} + +variable "distro_version" { + type = string + description = "The Linux distro version" + default = null +} + +variable "enable_audit_devices" { + description = "If true every audit device will be enabled" + type = bool + default = true +} + +variable "external_storage_port" { + type = number + description = "The port to connect to when using external storage" + default = 8500 +} + +variable "force_unseal" { + type = bool + description = "Always unseal the Vault cluster even if we're not initializing it" + default = false +} + +variable "hosts" { + description = "The target machines host addresses to use for the Vault cluster" + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) +} + +variable "initialize_cluster" { + type = bool + description = "Initialize the Vault cluster" + default = true +} + +variable "install_dir" { + type = string + description = "The directory where the Vault binary will be installed" + default = "/opt/vault/bin" +} + +variable "ip_version" { + type = number + description = "The IP version to use for the Vault TCP listeners" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "license" { + type = string + sensitive = true + description = "The value of the Vault license" + default = null +} + +variable "listener_port" { + type = number + description = "The port for Vault to listen on" + default = 8200 +} + +variable "local_artifact_path" { + type = string + description = "The path to a locally built vault artifact to install. It can be a zip archive, RPM, or Debian package" + default = null +} + +variable "log_level" { + type = string + description = "The vault service log level" + default = "info" + + validation { + condition = contains(["trace", "debug", "info", "warn", "error"], var.log_level) + error_message = "The log_level must be one of 'trace', 'debug', 'info', 'warn', or 'error'." + } +} + +variable "manage_service" { + type = bool + description = "Manage the Vault service users and systemd unit. Disable this to use configuration in RPM and Debian packages" + default = true +} + +variable "packages" { + type = list(string) + description = "A list of packages to install via the target host package manager" + default = [] +} + +variable "release" { + type = object({ + version = string + edition = string + }) + description = "Vault release version and edition to install from releases.hashicorp.com" + default = null +} + +variable "root_token" { + type = string + description = "The Vault root token that we can use to initialize and configure the cluster" + default = null +} + +variable "seal_ha_beta" { + description = "Enable using Seal HA on clusters that meet minimum version requirements and are enterprise editions" + default = true +} + +variable "seal_attributes" { + description = "The auto-unseal device attributes" + default = null +} + +variable "seal_attributes_secondary" { + description = "The secondary auto-unseal device attributes" + default = null +} + +variable "seal_type" { + type = string + description = "The primary seal device type" + default = "awskms" + + validation { + condition = contains(["awskms", "pkcs11", "shamir"], var.seal_type) + error_message = "The seal_type must be either 'awskms', 'pkcs11', or 'shamir'. No other seal types are supported." + } +} + +variable "seal_type_secondary" { + type = string + description = "A secondary HA seal device type. Only supported in Vault Enterprise >= 1.15" + default = "none" + + validation { + condition = contains(["awskms", "none", "pkcs11"], var.seal_type_secondary) + error_message = "The secondary_seal_type must be 'awskms', 'none', or 'pkcs11'. No other secondary seal types are supported." + } +} + +variable "shamir_unseal_keys" { + type = list(string) + description = "Shamir unseal keys. Often only used adding additional nodes to an already initialized cluster." + default = null +} + +variable "storage_backend" { + type = string + description = "The storage backend to use" + default = "raft" + + validation { + condition = contains(["raft", "consul"], var.storage_backend) + error_message = "The storage_backend must be either raft or consul. No other storage backends are supported." + } +} + +variable "storage_backend_addl_config" { + type = map(any) + description = "An optional set of key value pairs to inject into the storage block" + default = {} +} + +variable "storage_node_prefix" { + type = string + description = "A prefix to use for each node in the Vault storage configuration" + default = "node" +} diff --git a/enos/modules/vault_failover_demote_dr_primary/main.tf b/enos/modules/vault_failover_demote_dr_primary/main.tf new file mode 100644 index 000000000000..819337074f9a --- /dev/null +++ b/enos/modules/vault_failover_demote_dr_primary/main.tf @@ -0,0 +1,63 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "primary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The primary cluster leader host" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +locals { +} + +resource "enos_remote_exec" "demote_dr_primary" { + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + } + + inline = ["${var.vault_install_dir}/vault write -f sys/replication/dr/primary/demote"] + + + transport = { + ssh = { + host = var.primary_leader_host.public_ip + } + } +} diff --git a/enos/modules/vault_failover_promote_dr_secondary/main.tf b/enos/modules/vault_failover_promote_dr_secondary/main.tf new file mode 100644 index 000000000000..85382536307d --- /dev/null +++ b/enos/modules/vault_failover_promote_dr_secondary/main.tf @@ -0,0 +1,69 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "secondary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The secondary cluster leader host" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "dr_operation_token" { + type = string + description = "The wrapping token created on primary cluster" +} + +locals { + dr_operation_token = var.dr_operation_token +} + +resource "enos_remote_exec" "promote_dr_secondary" { + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + } + + inline = ["${var.vault_install_dir}/vault write -f sys/replication/dr/secondary/promote dr_operation_token=${local.dr_operation_token}"] + + + transport = { + ssh = { + host = var.secondary_leader_host.public_ip + } + } +} diff --git a/enos/modules/vault_failover_update_dr_primary/main.tf b/enos/modules/vault_failover_update_dr_primary/main.tf new file mode 100644 index 000000000000..cc159f2ddd84 --- /dev/null +++ b/enos/modules/vault_failover_update_dr_primary/main.tf @@ -0,0 +1,76 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "secondary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The secondary cluster leader host" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" + +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "dr_operation_token" { + type = string + description = "The wrapping token created on primary cluster" +} + +variable "wrapping_token" { + type = string + description = "The wrapping token created on primary cluster" +} + +locals { + dr_operation_token = var.dr_operation_token + wrapping_token = var.wrapping_token +} + +resource "enos_remote_exec" "update_dr_primary" { + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + } + + inline = ["${var.vault_install_dir}/vault write sys/replication/dr/secondary/update-primary dr_operation_token=${local.dr_operation_token} token=${local.wrapping_token}"] + + + transport = { + ssh = { + host = var.secondary_leader_host.public_ip + } + } +} diff --git a/enos/modules/vault_get_cluster_ips/main.tf b/enos/modules/vault_get_cluster_ips/main.tf index d627e2454028..ef3101896fc0 100644 --- a/enos/modules/vault_get_cluster_ips/main.tf +++ b/enos/modules/vault_get_cluster_ips/main.tf @@ -1,14 +1,45 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 + +/* + +Given our expected hosts, determine which is currently the leader and verify that all expected +nodes are either the leader or a follower. + +*/ terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The Vault cluster hosts that are expected to be in the cluster" +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + variable "vault_install_dir" { type = string description = "The directory where the Vault binary will be installed" @@ -19,124 +50,136 @@ variable "vault_root_token" { description = "The vault root token" } -variable "node_public_ip" { - type = string - description = "The primary node public ip" - default = "" +locals { + follower_hosts_list = [ + for idx in range(length(var.hosts)) : var.hosts[idx] if var.ip_version == 6 ? + contains(tolist(local.follower_ipv6s), var.hosts[idx].ipv6) : + contains(tolist(local.follower_private_ips), var.hosts[idx].private_ip) + ] + follower_hosts = { + for idx in range(local.host_count - 1) : idx => try(local.follower_hosts_list[idx], null) + } + follower_ipv6s = jsondecode(enos_remote_exec.follower_ipv6s.stdout) + follower_private_ips = jsondecode(enos_remote_exec.follower_private_ipv4s.stdout) + follower_public_ips = [for host in local.follower_hosts : host.public_ip] + host_count = length(var.hosts) + ipv6s = [for k, v in values(tomap(var.hosts)) : tostring(v["ipv6"])] + leader_host_list = [ + for idx in range(length(var.hosts)) : var.hosts[idx] if var.ip_version == 6 ? + var.hosts[idx].ipv6 == local.leader_ipv6 : + var.hosts[idx].private_ip == local.leader_private_ip + ] + leader_host = try(local.leader_host_list[0], null) + leader_ipv6 = trimspace(enos_remote_exec.leader_ipv6.stdout) + leader_private_ip = trimspace(enos_remote_exec.leader_private_ipv4.stdout) + leader_public_ip = try(local.leader_host.public_ip, null) + private_ips = [for k, v in values(tomap(var.hosts)) : tostring(v["private_ip"])] } -variable "vault_instances" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} +resource "enos_remote_exec" "leader_private_ipv4" { + environment = { + IP_VERSION = var.ip_version + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_TOKEN = var.vault_root_token + } -variable "added_vault_instances" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were added" - default = {} -} + scripts = [abspath("${path.module}/scripts/get-leader-ipv4.sh")] -locals { - leftover_primary_instances = var.node_public_ip != "" ? { - for k, v in var.vault_instances : k => v if contains(values(v), trimspace(var.node_public_ip)) - } : null - all_instances = var.node_public_ip != "" ? merge(var.added_vault_instances, local.leftover_primary_instances) : var.vault_instances - updated_instance_count = length(local.all_instances) - updated_instances = { - for idx in range(local.updated_instance_count) : idx => { - public_ip = values(local.all_instances)[idx].public_ip - private_ip = values(local.all_instances)[idx].private_ip + transport = { + ssh = { + host = var.hosts[0].public_ip } } - node_ip = var.node_public_ip != "" ? var.node_public_ip : local.updated_instances[0].public_ip - instance_private_ips = [ - for k, v in values(tomap(local.updated_instances)) : - tostring(v["private_ip"]) - ] - follower_public_ips = [ - for k, v in values(tomap(local.updated_instances)) : - tostring(v["public_ip"]) if v["private_ip"] != trimspace(enos_remote_exec.get_leader_private_ip.stdout) - ] - follower_private_ips = [ - for k, v in values(tomap(local.updated_instances)) : - tostring(v["private_ip"]) if v["private_ip"] != trimspace(enos_remote_exec.get_leader_private_ip.stdout) - ] } -resource "enos_remote_exec" "get_leader_private_ip" { +resource "enos_remote_exec" "leader_ipv6" { environment = { - VAULT_ADDR = "http://127.0.0.1:8200" - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - VAULT_INSTANCE_PRIVATE_IPS = jsonencode(local.instance_private_ips) + IP_VERSION = var.ip_version + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_TOKEN = var.vault_root_token } - scripts = ["${path.module}/scripts/get-leader-private-ip.sh"] + scripts = [abspath("${path.module}/scripts/get-leader-ipv6.sh")] transport = { ssh = { - host = local.node_ip + host = var.hosts[0].public_ip } } } -output "leftover_primary_instances" { - value = local.leftover_primary_instances -} +resource "enos_remote_exec" "follower_private_ipv4s" { + environment = { + IP_VERSION = var.ip_version + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_LEADER_PRIVATE_IP = local.leader_private_ip + VAULT_PRIVATE_IPS = jsonencode(local.private_ips) + VAULT_TOKEN = var.vault_root_token + } -output "all_instances" { - value = local.all_instances -} + scripts = [abspath("${path.module}/scripts/get-follower-ipv4s.sh")] -output "updated_instance_count" { - value = local.updated_instance_count + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } } -output "updated_instances" { - value = local.updated_instances +resource "enos_remote_exec" "follower_ipv6s" { + environment = { + IP_VERSION = var.ip_version + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_IPV6S = jsonencode(local.ipv6s) + VAULT_LEADER_IPV6 = local.leader_ipv6 + VAULT_TOKEN = var.vault_root_token + } + + scripts = [abspath("${path.module}/scripts/get-follower-ipv6s.sh")] + + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } } -output "leader_private_ip" { - value = trimspace(enos_remote_exec.get_leader_private_ip.stdout) +output "follower_hosts" { + value = local.follower_hosts } -output "leader_public_ip" { - value = element([ - for k, v in values(tomap(local.all_instances)) : - tostring(v["public_ip"]) if v["private_ip"] == trimspace(enos_remote_exec.get_leader_private_ip.stdout) - ], 0) +output "follower_ipv6s" { + value = local.follower_ipv6s } -output "vault_instance_private_ips" { - value = jsonencode(local.instance_private_ips) +output "follower_private_ips" { + value = local.follower_private_ips } output "follower_public_ips" { value = local.follower_public_ips } -output "follower_public_ip_1" { - value = element(local.follower_public_ips, 0) +output "leader_host" { + value = local.leader_host } -output "follower_public_ip_2" { - value = element(local.follower_public_ips, 1) +output "leader_hosts" { + value = { 0 : local.leader_host } } -output "follower_private_ips" { - value = local.follower_private_ips +output "leader_ipv6" { + value = local.leader_ipv6 } -output "follower_private_ip_1" { - value = element(local.follower_private_ips, 0) +output "leader_private_ip" { + value = local.leader_private_ip } -output "follower_private_ip_2" { - value = element(local.follower_private_ips, 1) +output "leader_public_ip" { + value = local.leader_public_ip } diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv4s.sh b/enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv4s.sh new file mode 100644 index 000000000000..51f3b76691d3 --- /dev/null +++ b/enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv4s.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "Unable to locate vault binary at $binpath" + +getFollowerPrivateIPsFromOperatorMembers() { + if members=$($binpath operator members -format json); then + if followers=$(echo "$members" | jq -e --argjson expected "$VAULT_PRIVATE_IPS" -c '.Nodes | map(select(any(.; .active_node==false)) | .api_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")) as $followers | $expected - ($expected - $followers)'); then + # Make sure that we got all the followers + if jq -e --argjson expected "$VAULT_PRIVATE_IPS" --argjson followers "$followers" -ne '$expected | length as $el | $followers | length as $fl | $fl == $el-1' > /dev/null; then + echo "$followers" + return 0 + fi + fi + fi + + return 1 +} + +removeIP() { + local needle + local haystack + needle=$1 + haystack=$2 + if remain=$(jq -e --arg ip "$needle" -c '. | map(select(.!=$ip))' <<< "$haystack"); then + if [[ -n "$remain" ]]; then + echo "$remain" + return 0 + fi + fi + + return 1 +} + +count=0 +retries=10 +while :; do + case $IP_VERSION in + 4) + [[ -z "$VAULT_PRIVATE_IPS" ]] && fail "VAULT_PRIVATE_IPS env variable has not been set" + [[ -z "$VAULT_LEADER_PRIVATE_IP" ]] && fail "VAULT_LEADER_PRIVATE_IP env variable has not been set" + + # Vault >= 1.10.x has the operator members. If we have that then we'll use it. + if $binpath operator -h 2>&1 | grep members &> /dev/null; then + if followers=$(getFollowerPrivateIPsFromOperatorMembers); then + echo "$followers" + exit 0 + fi + else + removeIP "$VAULT_LEADER_PRIVATE_IP" "$VAULT_PRIVATE_IPS" + + return $? + fi + ;; + 6) + echo '[]' + exit 0 + ;; + *) + fail "unknown IP_VERSION: $IP_VERSION" + ;; + esac + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "Timed out trying to obtain the cluster followers" + fi +done diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv6s.sh b/enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv6s.sh new file mode 100644 index 000000000000..f51247bb73fd --- /dev/null +++ b/enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv6s.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +echo "$VAULT_IPV6S" > /tmp/vaultipv6s + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "Unable to locate vault binary at $binpath" + +getFollowerIPV6sFromOperatorMembers() { + if members=$($binpath operator members -format json); then + if followers=$(echo "$members" | jq -e --argjson expected "$VAULT_IPV6S" -c '.Nodes | map(select(any(.; .active_node==false)) | .api_address | scan("\\[(.+)\\]") | .[0]) as $followers | $expected - ($expected - $followers)'); then + # Make sure that we got all the followers + if jq -e --argjson expected "$VAULT_IPV6S" --argjson followers "$followers" -ne '$expected | length as $el | $followers | length as $fl | $fl == $el-1' > /dev/null; then + echo "$followers" + return 0 + fi + fi + fi + + return 1 +} + +removeIP() { + local needle + local haystack + needle=$1 + haystack=$2 + if remain=$(jq -e --arg ip "$needle" -c '. | map(select(.!=$ip))' <<< "$haystack"); then + if [[ -n "$remain" ]]; then + echo "$remain" + return 0 + fi + fi + + return 1 +} + +count=0 +retries=10 +while :; do + case $IP_VERSION in + 4) + echo "[]" + exit 0 + ;; + 6) + [[ -z "$VAULT_IPV6S" ]] && fail "VAULT_IPV6S env variable has not been set" + [[ -z "$VAULT_LEADER_IPV6" ]] && fail "VAULT_LEADER_IPV6 env variable has not been set" + + # Vault >= 1.10.x has the operator members. If we have that then we'll use it. + if $binpath operator -h 2>&1 | grep members &> /dev/null; then + if followers=$(getFollowerIPV6sFromOperatorMembers); then + echo "$followers" + exit 0 + fi + else + [[ -z "$VAULT_LEADER_IPV6" ]] && fail "VAULT_LEADER_IPV6 env variable has not been set" + removeIP "$VAULT_LEADER_IPV6" "$VAULT_IPV6S" + exit $? + fi + ;; + *) + fail "unknown IP_VERSION: $IP_VERSION" + ;; + esac + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "Timed out trying to obtain the cluster followers" + fi +done diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv4.sh b/enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv4.sh new file mode 100644 index 000000000000..f5697a93e9e5 --- /dev/null +++ b/enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv4.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "Unable to locate vault binary at $binpath" + +findLeaderPrivateIP() { + # Find the leader private IP address + if ip=$($binpath read sys/leader -format=json | jq -r '.data.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then + if [[ -n "$ip" ]]; then + echo "$ip" + return 0 + fi + fi + + # Some older versions of vault don't support reading sys/leader. Try falling back to the cli status. + if ip=$($binpath status -format json | jq -r '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then + if [[ -n "$ip" ]]; then + echo "$ip" + return 0 + fi + fi + + return 1 +} + +count=0 +retries=5 +while :; do + case $IP_VERSION in + 4) + # Find the leader private IP address + if ip=$(findLeaderPrivateIP); then + echo "$ip" + exit 0 + fi + ;; + 6) + exit 0 + ;; + *) + fail "unknown IP_VERSION: $IP_VERSION" + ;; + esac + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "Timed out trying to obtain the cluster leader" + fi +done diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv6.sh b/enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv6.sh new file mode 100644 index 000000000000..d5d5a4513b59 --- /dev/null +++ b/enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv6.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "Unable to locate vault binary at $binpath" + +findLeaderIPV6() { + # Find the leader private IP address + if ip=$($binpath read sys/leader -format=json | jq -r '.data.leader_address | scan("\\[(.+)\\]") | .[0]'); then + if [[ -n "$ip" ]]; then + echo "$ip" + return 0 + fi + fi + + # Some older versions of vault don't support reading sys/leader. Try falling back to the cli status. + if ip=$($binpath status -format json | jq -r '.leader_address | scan("\\[(.+)\\]") | .[0]'); then + if [[ -n "$ip" ]]; then + echo "$ip" + return 0 + fi + fi + + return 1 +} + +count=0 +retries=5 +while :; do + # Find the leader private IP address + case $IP_VERSION in + 4) + exit 0 + ;; + 6) + if ip=$(findLeaderIPV6); then + echo "$ip" + exit 0 + fi + ;; + *) + fail "unknown IP_VERSION: $IP_VERSION" + ;; + esac + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "Timed out trying to obtain the cluster leader" + fi +done diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh b/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh deleted file mode 100644 index 98b2d21fdda6..000000000000 --- a/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -set -e - -binpath=${VAULT_INSTALL_DIR}/vault -instance_ips=${VAULT_INSTANCE_PRIVATE_IPS} - -function fail() { - echo "$1" 1>&2 - exit 1 -} - -count=0 -retries=5 -while :; do - # Find the leader private IP address - leader_private_ip=$($binpath status -format json | jq '.leader_address | rtrimstr(":8200") | ltrimstr("http://")') - match_ip=$(echo $instance_ips |jq -r --argjson ip $leader_private_ip 'map(select(. == $ip))') - - if [[ "$leader_private_ip" != 'null' ]] && [[ "$match_ip" != '[]' ]]; then - echo "$leader_private_ip" | sed 's/\"//g' - exit 0 - fi - - wait=$((5 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - else - fail "leader IP address $leader_private_ip was not found in $instance_ips" - fi -done diff --git a/enos/modules/vault_proxy/main.tf b/enos/modules/vault_proxy/main.tf new file mode 100644 index 000000000000..b69b052c4b62 --- /dev/null +++ b/enos/modules/vault_proxy/main.tf @@ -0,0 +1,100 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The Vault cluster instances that were created" +} + +variable "ip_version" { + type = number + description = "The IP version to use for the Vault TCP listeners" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_proxy_pidfile" { + type = string + description = "The filepath where the Vault Proxy pid file is kept" + default = "/tmp/pidfile" +} + +variable "vault_proxy_port" { + type = number + description = "The Vault Proxy listener port" +} + +variable "vault_root_token" { + type = string + description = "The Vault root token" +} + +locals { + vault_proxy_address = "${var.ip_version == 4 ? "127.0.0.1" : "[::1]"}:${var.vault_proxy_port}" +} + +resource "enos_remote_exec" "set_up_approle_auth_and_proxy" { + environment = { + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_PROXY_ADDRESS = local.vault_proxy_address + VAULT_PROXY_PIDFILE = var.vault_proxy_pidfile + VAULT_TOKEN = var.vault_root_token + } + + scripts = [abspath("${path.module}/scripts/set-up-approle-and-proxy.sh")] + + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } +} + +resource "enos_remote_exec" "use_proxy" { + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_PROXY_PIDFILE = var.vault_proxy_pidfile + VAULT_PROXY_ADDRESS = local.vault_proxy_address + } + + scripts = [abspath("${path.module}/scripts/use-proxy.sh")] + + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } + + depends_on = [ + enos_remote_exec.set_up_approle_auth_and_proxy + ] +} diff --git a/enos/modules/vault_proxy/scripts/set-up-approle-and-proxy.sh b/enos/modules/vault_proxy/scripts/set-up-approle-and-proxy.sh new file mode 100644 index 000000000000..a4be7e858fa7 --- /dev/null +++ b/enos/modules/vault_proxy/scripts/set-up-approle-and-proxy.sh @@ -0,0 +1,86 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault + +fail() { + echo "$1" 1>&2 + return 1 +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +# If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist) +$binpath auth disable approle || true + +$binpath auth enable approle + +$binpath write auth/approle/role/proxy-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000 + +ROLEID=$($binpath read --format=json auth/approle/role/proxy-role/role-id | jq -r '.data.role_id') + +if [[ "$ROLEID" == '' ]]; then + fail "expected ROLEID to be nonempty, but it is empty" +fi + +SECRETID=$($binpath write -f --format=json auth/approle/role/proxy-role/secret-id | jq -r '.data.secret_id') + +if [[ "$SECRETID" == '' ]]; then + fail "vault write -f --format=json auth/approle/role/proxy-role/secret-id did not return a .data.secret_id" +fi + +echo "$ROLEID" > /tmp/role-id +echo "$SECRETID" > /tmp/secret-id + +# Write the Vault Proxy's configuration to /tmp/vault-proxy.hcl +# The Proxy references the Vault server address passed in as $VAULT_ADDR +# The Proxy itself listens at the address passed in as $VAULT_PROXY_ADDRESS +cat > /tmp/vault-proxy.hcl <<- EOM +pid_file = "${VAULT_PROXY_PIDFILE}" + +vault { + address = "${VAULT_ADDR}" + tls_skip_verify = true + retry { + num_retries = 10 + } +} + +api_proxy { + enforce_consistency = "always" + use_auto_auth_token = true +} + +listener "tcp" { + address = "${VAULT_PROXY_ADDRESS}" + tls_disable = true +} + +auto_auth { + method { + type = "approle" + config = { + role_id_file_path = "/tmp/role-id" + secret_id_file_path = "/tmp/secret-id" + } + } + sink { + type = "file" + config = { + path = "/tmp/token" + } + } +} +EOM + +# If Proxy is still running from a previous run, kill it +pkill -F "${VAULT_PROXY_PIDFILE}" || true + +# Run proxy in the background +$binpath proxy -config=/tmp/vault-proxy.hcl > /tmp/proxy-logs.txt 2>&1 & diff --git a/enos/modules/vault_proxy/scripts/use-proxy.sh b/enos/modules/vault_proxy/scripts/use-proxy.sh new file mode 100644 index 000000000000..23a62e044c4b --- /dev/null +++ b/enos/modules/vault_proxy/scripts/use-proxy.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + return 1 +} + +[[ -z "$VAULT_PROXY_ADDRESS" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_PROXY_PIDFILE" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +# Will cause the Vault CLI to communicate with the Vault Proxy, since it +# is listening at port 8100. +export VAULT_ADDR="http://${VAULT_PROXY_ADDRESS}" + +# Explicitly unsetting VAULT_TOKEN to make sure that the Vault Proxy's token +# is used. +unset VAULT_TOKEN + +# Use the Vault CLI to communicate with the Vault Proxy (via the VAULT_ADDR env +# var) to lookup the details of the Proxy's token and make sure that the +# .data.path field contains 'auth/approle/login', thus confirming that the Proxy +# automatically authenticated itself. +if ! $binpath token lookup -format=json | jq -Mer --arg expected "auth/approle/login" '.data.path == $expected'; then + fail "expected proxy to automatically authenticate using 'auth/approle/login', got: '$($binpath token lookup -format=json | jq -r '.data.path')'" +fi + +# Now that we're done, kill the proxy +pkill -F "${VAULT_PROXY_PIDFILE}" || true diff --git a/enos/modules/vault_raft_remove_peer/main.tf b/enos/modules/vault_raft_remove_peer/main.tf index a0da72249e1f..8bfef463755f 100644 --- a/enos/modules/vault_raft_remove_peer/main.tf +++ b/enos/modules/vault_raft_remove_peer/main.tf @@ -1,28 +1,31 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } -variable "vault_cluster_addr_port" { - description = "The Raft cluster address port" - type = string - default = "8201" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The old vault nodes to be removed" } -variable "vault_instance_count" { +variable "ip_version" { type = number - description = "How many vault instances are in the cluster" + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } } variable "operator_instance" { @@ -30,41 +33,37 @@ variable "operator_instance" { description = "The ip address of the operator (Voter) node" } -variable "remove_vault_instances" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The old vault nodes to be removed" +variable "vault_addr" { + type = string + description = "The local vault API listen address" } -variable "vault_root_token" { +variable "vault_cluster_addr_port" { + description = "The Raft cluster address port" type = string - description = "The vault root token" } -locals { - instances = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.remove_vault_instances)[idx].public_ip - private_ip = values(var.remove_vault_instances)[idx].private_ip - } - } +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" } resource "enos_remote_exec" "vault_raft_remove_peer" { - for_each = local.instances + for_each = var.hosts environment = { - VAULT_TOKEN = var.vault_root_token - VAULT_ADDR = "http://localhost:8200" + REMOVE_VAULT_CLUSTER_ADDR = "${var.ip_version == 4 ? "${each.value.private_ip}" : "[${each.value.ipv6}]"}:${var.vault_cluster_addr_port}" + VAULT_TOKEN = var.vault_root_token + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir } - content = templatefile("${path.module}/templates/raft-remove-peer.sh", { - remove_vault_cluster_addr = "${each.value.private_ip}:${var.vault_cluster_addr_port}" - vault_install_dir = var.vault_install_dir - vault_local_binary_path = "${var.vault_install_dir}/vault" - }) + scripts = [abspath("${path.module}/scripts/raft-remove-peer.sh")] transport = { ssh = { diff --git a/enos/modules/vault_raft_remove_peer/scripts/raft-remove-peer.sh b/enos/modules/vault_raft_remove_peer/scripts/raft-remove-peer.sh new file mode 100644 index 000000000000..b6b3e53c8150 --- /dev/null +++ b/enos/modules/vault_raft_remove_peer/scripts/raft-remove-peer.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault +node_addr=${REMOVE_VAULT_CLUSTER_ADDR} + +fail() { + echo "$1" 2>&1 + return 1 +} + +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +retry() { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + echo "retry $count" + else + return "$exit" + fi + done + + return 0 +} + +remove_peer() { + if ! node_id=$("$binpath" operator raft list-peers -format json | jq -Mr --argjson expected "false" '.data.config.servers[] | select(.address=='\""$node_addr"\"') | select(.voter==$expected) | .node_id'); then + fail "failed to get node id of a non-voter node" + fi + + $binpath operator raft remove-peer "$node_id" +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +# Retry a few times because it can take some time for things to settle after autopilot upgrade +retry 5 remove_peer diff --git a/enos/modules/vault_raft_remove_peer/templates/raft-remove-peer.sh b/enos/modules/vault_raft_remove_peer/templates/raft-remove-peer.sh deleted file mode 100644 index ab49f76ba820..000000000000 --- a/enos/modules/vault_raft_remove_peer/templates/raft-remove-peer.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -set -e - -binpath=${vault_install_dir}/vault - -node_addr=${remove_vault_cluster_addr} - -fail() { - echo "$1" 2>&1 - return 1 -} - -retry() { - local retries=$1 - shift - local count=0 - - until "$@"; do - exit=$? - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - echo "retry $count" - else - return "$exit" - fi - done - - return 0 -} - -remove_peer() { - node_id=$($binpath operator raft list-peers -format json | jq -Mr --argjson expected "false" '.data.config.servers[] | select(.address=='\""$node_addr"\"') | select(.voter==$expected) | .node_id') - if [ "$?" != "0" ];then - fail "failed to get node id of a non-voter node" - fi - - $binpath operator raft remove-peer "$node_id" -} - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -# Retry a few times because it can take some time for things to settle after autopilot upgrade -retry 5 remove_peer diff --git a/enos/modules/vault_setup_dr_primary/main.tf b/enos/modules/vault_setup_dr_primary/main.tf new file mode 100644 index 000000000000..69e29e6d03a4 --- /dev/null +++ b/enos/modules/vault_setup_dr_primary/main.tf @@ -0,0 +1,61 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "primary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The primary cluster leader host" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +// Enable DR replication on the primary. This will immediately clear all data in the secondary. +resource "enos_remote_exec" "enable_dr_replication" { + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/enable.sh")] + + transport = { + ssh = { + host = var.primary_leader_host.public_ip + } + } +} diff --git a/enos/modules/vault_setup_dr_primary/scripts/enable.sh b/enos/modules/vault_setup_dr_primary/scripts/enable.sh new file mode 100644 index 000000000000..b8c987bc0ded --- /dev/null +++ b/enos/modules/vault_setup_dr_primary/scripts/enable.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault + +fail() { + echo "$1" 1>&2 + return 1 +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +# Activate the primary +$binpath write -f sys/replication/dr/primary/enable diff --git a/enos/modules/vault_setup_perf_primary/main.tf b/enos/modules/vault_setup_perf_primary/main.tf index 75e998eb3f27..155ab20f4a69 100644 --- a/enos/modules/vault_setup_perf_primary/main.tf +++ b/enos/modules/vault_setup_perf_primary/main.tf @@ -1,33 +1,41 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } -variable "vault_cluster_addr_port" { - description = "The Raft cluster address port" - type = string - default = "8201" +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } } -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" +variable "primary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The primary cluster leader host" } -variable "primary_leader_public_ip" { +variable "vault_addr" { type = string - description = "Vault primary cluster leader Public IP address" + description = "The local vault API listen address" } -variable "primary_leader_private_ip" { +variable "vault_install_dir" { type = string - description = "Vault primary cluster leader Private IP address" + description = "The directory where the Vault binary will be installed" } variable "vault_root_token" { @@ -37,16 +45,16 @@ variable "vault_root_token" { resource "enos_remote_exec" "configure_pr_primary" { environment = { - VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_ADDR = var.vault_addr VAULT_TOKEN = var.vault_root_token - vault_install_dir = var.vault_install_dir + VAULT_INSTALL_DIR = var.vault_install_dir } - scripts = ["${path.module}/scripts/configure-vault-pr-primary.sh"] + scripts = [abspath("${path.module}/scripts/configure-vault-pr-primary.sh")] transport = { ssh = { - host = var.primary_leader_public_ip + host = var.primary_leader_host.public_ip } } } diff --git a/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh b/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh index d15699c4b966..10398b805256 100644 --- a/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh +++ b/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh @@ -1,11 +1,10 @@ #!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - +# SPDX-License-Identifier: BUSL-1.1 set -e -binpath=${vault_install_dir}/vault +binpath=${VAULT_INSTALL_DIR}/vault fail() { echo "$1" 1>&2 @@ -14,17 +13,5 @@ fail() { test -x "$binpath" || fail "unable to locate vault binary at $binpath" -# Create superuser policy -$binpath policy write superuser -<&2 + return 1 +} + +[[ -z "$REPLICATION_TYPE" ]] && fail "REPLICATION_TYPE env variable has not been set" +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json + +replicationStatus() { + $binpath read "sys/replication/${REPLICATION_TYPE}/status" | jq .data +} + +isReady() { + # Find the leader private IP address + local status + if ! status=$(replicationStatus); then + return 1 + fi + + if ! jq -eMc '.state == "stream-wals"' &> /dev/null <<< "$status"; then + echo "DR replication state is not yet running" 1>&2 + echo "DR replication is not yet running, got: $(jq '.state' <<< "$status")" 1>&2 + return 1 + fi + + if ! jq -eMc '.mode == "secondary"' &> /dev/null <<< "$status"; then + echo "DR replication mode is not yet primary, got: $(jq '.mode' <<< "$status")" 1>&2 + return 1 + fi + + if ! jq -eMc '.corrupted_merkle_tree == false' &> /dev/null <<< "$status"; then + echo "DR replication merkle is corrupted" 1>&2 + return 1 + fi + + echo "${REPLICATION_TYPE} primary is ready for followers to be unsealed!" 1>&2 + return 0 +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + if isReady; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +fail "Timed out waiting for ${REPLICATION_TYPE} primary to ready: $(replicationStatus)" diff --git a/enos/modules/vault_step_down/main.tf b/enos/modules/vault_step_down/main.tf new file mode 100644 index 000000000000..4074969dee6d --- /dev/null +++ b/enos/modules/vault_step_down/main.tf @@ -0,0 +1,50 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "leader_host" { + type = object({ + private_ip = string + public_ip = string + }) + + description = "The vault cluster host that can be expected as a leader" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +resource "enos_remote_exec" "vault_operator_step_down" { + environment = { + VAULT_TOKEN = var.vault_root_token + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/operator-step-down.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} diff --git a/enos/modules/vault_step_down/scripts/operator-step-down.sh b/enos/modules/vault_step_down/scripts/operator-step-down.sh new file mode 100644 index 000000000000..07f2c38f8d19 --- /dev/null +++ b/enos/modules/vault_step_down/scripts/operator-step-down.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -eou pipefail + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +eval "$binpath" operator step-down diff --git a/enos/modules/vault_test_ui/main.tf b/enos/modules/vault_test_ui/main.tf index 0e051bdf48cf..9fc16a7b62bd 100644 --- a/enos/modules/vault_test_ui/main.tf +++ b/enos/modules/vault_test_ui/main.tf @@ -1,10 +1,10 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } diff --git a/enos/modules/vault_test_ui/outputs.tf b/enos/modules/vault_test_ui/outputs.tf index 887d030a7899..ae4f926b3f93 100644 --- a/enos/modules/vault_test_ui/outputs.tf +++ b/enos/modules/vault_test_ui/outputs.tf @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 output "ui_test_stderr" { value = var.ui_run_tests ? enos_local_exec.test_ui[0].stderr : "No std out tests where not run" diff --git a/enos/modules/vault_test_ui/scripts/test_ui.sh b/enos/modules/vault_test_ui/scripts/test_ui.sh index e7cf7e9564ed..9a98243a70e8 100755 --- a/enos/modules/vault_test_ui/scripts/test_ui.sh +++ b/enos/modules/vault_test_ui/scripts/test_ui.sh @@ -1,7 +1,6 @@ #!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - +# SPDX-License-Identifier: BUSL-1.1 set -eux -o pipefail diff --git a/enos/modules/vault_test_ui/variables.tf b/enos/modules/vault_test_ui/variables.tf index c2db5c57b9bb..99625b29ec37 100644 --- a/enos/modules/vault_test_ui/variables.tf +++ b/enos/modules/vault_test_ui/variables.tf @@ -1,8 +1,8 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 variable "vault_addr" { - description = "The host address for the vault instance to test" + description = "The local vault API listen address" type = string } diff --git a/enos/modules/vault_unseal_nodes/main.tf b/enos/modules/vault_unseal_nodes/main.tf deleted file mode 100644 index b353fa8a58a8..000000000000 --- a/enos/modules/vault_unseal_nodes/main.tf +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -# This module unseals the replication secondary follower nodes -terraform { - required_providers { - enos = { - source = "app.terraform.io/hashicorp-qti/enos" - } - } -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "follower_public_ips" { - type = list(string) - description = "Vault cluster follower Public IP addresses" -} - -variable "vault_seal_type" { - type = string - description = "The Vault seal type" -} - -variable "vault_unseal_keys" {} - -locals { - followers = toset([for idx in range(var.vault_instance_count - 1) : tostring(idx)]) - vault_bin_path = "${var.vault_install_dir}/vault" -} - -# After replication is enabled the secondary follower nodes are expected to be sealed, -# so we wait for the secondary follower nodes to update the seal status -resource "enos_remote_exec" "wait_until_sealed" { - for_each = { - for idx, follower in local.followers : idx => follower - } - environment = { - VAULT_ADDR = "http://127.0.0.1:8200" - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = ["${path.module}/scripts/wait-until-sealed.sh"] - - transport = { - ssh = { - host = element(var.follower_public_ips, each.key) - } - } -} - -# The follower nodes on secondary replication cluster incorrectly report -# unseal progress 2/3 (Issue: https://hashicorp.atlassian.net/browse/VAULT-12309), -# so we restart the followers to clear the status and to autounseal incase of awskms seal type -resource "enos_remote_exec" "restart_followers" { - depends_on = [enos_remote_exec.wait_until_sealed] - for_each = { - for idx, follower in local.followers : idx => follower - } - - inline = ["sudo systemctl restart vault"] - - transport = { - ssh = { - host = element(var.follower_public_ips, each.key) - } - } -} - -# We cannot use the vault_unseal resouce due to the known issue -# (https://hashicorp.atlassian.net/browse/VAULT-12311). We use a custom -# script to allow retry for unsealing the secondary followers -resource "enos_remote_exec" "unseal_followers" { - depends_on = [enos_remote_exec.restart_followers] - # The unseal keys are required only for seal_type shamir - for_each = { - for idx, follower in local.followers : idx => follower - if var.vault_seal_type == "shamir" - } - - environment = { - VAULT_ADDR = "http://127.0.0.1:8200" - VAULT_INSTALL_DIR = var.vault_install_dir - UNSEAL_KEYS = join(",", var.vault_unseal_keys) - } - - scripts = ["${path.module}/scripts/unseal-node.sh"] - - transport = { - ssh = { - host = element(var.follower_public_ips, each.key) - } - } -} - -# This is a second attempt needed to unseal the secondary followers -# using a custom script due to get past the known issue -# (https://hashicorp.atlassian.net/browse/VAULT-12311) -resource "enos_remote_exec" "unseal_followers_again" { - depends_on = [enos_remote_exec.unseal_followers] - for_each = { - for idx, follower in local.followers : idx => follower - if var.vault_seal_type == "shamir" - } - - environment = { - VAULT_ADDR = "http://127.0.0.1:8200" - VAULT_INSTALL_DIR = var.vault_install_dir - UNSEAL_KEYS = join(",", var.vault_unseal_keys) - } - - scripts = ["${path.module}/scripts/unseal-node.sh"] - - transport = { - ssh = { - host = element(var.follower_public_ips, each.key) - } - } -} diff --git a/enos/modules/vault_unseal_nodes/scripts/unseal-node.sh b/enos/modules/vault_unseal_nodes/scripts/unseal-node.sh deleted file mode 100755 index b3f77de5041c..000000000000 --- a/enos/modules/vault_unseal_nodes/scripts/unseal-node.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -binpath=${VAULT_INSTALL_DIR}/vault - -IFS="," read -a keys <<< ${UNSEAL_KEYS} - -function fail() { - echo "$1" 1>&2 - exit 1 -} -count=0 -retries=5 -while :; do - for key in ${keys[@]}; do - - # Check the Vault seal status - seal_status=$($binpath status -format json | jq '.sealed') - - if [[ "$seal_status" == "true" ]]; then - echo "running unseal with $key count $count with retry $retry" >> /tmp/unseal_script.out - $binpath operator unseal $key > /dev/null 2>&1 - else - exit 0 - fi - done - - wait=$((1 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - else - fail "failed to unseal node" - fi -done diff --git a/enos/modules/vault_unseal_replication_followers/main.tf b/enos/modules/vault_unseal_replication_followers/main.tf new file mode 100644 index 000000000000..59d34a7d4c02 --- /dev/null +++ b/enos/modules/vault_unseal_replication_followers/main.tf @@ -0,0 +1,129 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# This module unseals the replication secondary follower nodes +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The Vault cluster hosts to unseal" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_seal_type" { + type = string + description = "The Vault seal type" +} + +variable "vault_unseal_keys" {} + +locals { + vault_bin_path = "${var.vault_install_dir}/vault" +} + +# After replication is enabled the secondary follower nodes are expected to be sealed, +# so we wait for the secondary follower nodes to update the seal status +resource "enos_remote_exec" "wait_until_sealed" { + for_each = var.hosts + environment = { + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/wait-until-sealed.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# The follower nodes on secondary replication cluster incorrectly report +# unseal progress 2/3 (Issue: https://hashicorp.atlassian.net/browse/VAULT-12309), +# so we restart the followers to allow them to auto-unseal +resource "enos_remote_exec" "restart_followers" { + depends_on = [enos_remote_exec.wait_until_sealed] + for_each = { + for idx, host in var.hosts : idx => host + if var.vault_seal_type != "shamir" + } + + inline = ["sudo systemctl restart vault"] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# We cannot use the vault_unseal resouce due to the known issue +# (https://hashicorp.atlassian.net/browse/VAULT-12311). We use a custom +# script to allow retry for unsealing the secondary followers +resource "enos_remote_exec" "unseal_followers" { + depends_on = [enos_remote_exec.restart_followers] + # The unseal keys are required only for seal_type shamir + for_each = { + for idx, host in var.hosts : idx => host + if var.vault_seal_type == "shamir" + } + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + UNSEAL_KEYS = join(",", var.vault_unseal_keys) + } + + scripts = [abspath("${path.module}/scripts/unseal-node.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# This is a second attempt needed to unseal the secondary followers +# using a custom script due to get past the known issue +# (https://hashicorp.atlassian.net/browse/VAULT-12311) +resource "enos_remote_exec" "unseal_followers_again" { + depends_on = [enos_remote_exec.unseal_followers] + for_each = { + for idx, host in var.hosts : idx => host + if var.vault_seal_type == "shamir" + } + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + UNSEAL_KEYS = join(",", var.vault_unseal_keys) + } + + scripts = [abspath("${path.module}/scripts/unseal-node.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_unseal_replication_followers/scripts/unseal-node.sh b/enos/modules/vault_unseal_replication_followers/scripts/unseal-node.sh new file mode 100755 index 000000000000..c6dafb01289f --- /dev/null +++ b/enos/modules/vault_unseal_replication_followers/scripts/unseal-node.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +binpath=${VAULT_INSTALL_DIR}/vault + +IFS="," read -r -a keys <<< "${UNSEAL_KEYS}" + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +count=0 +retries=5 +while :; do + for key in "${keys[@]}"; do + + # Check the Vault seal status + seal_status=$($binpath status -format json | jq '.sealed') + + if [[ "$seal_status" == "true" ]]; then + echo "running unseal with $key count $count with retry $retries" >> /tmp/unseal_script.out + "$binpath" operator unseal "$key" > /dev/null 2>&1 + else + exit 0 + fi + done + + wait=$((1 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "failed to unseal node" + fi +done diff --git a/enos/modules/vault_unseal_nodes/scripts/wait-until-sealed.sh b/enos/modules/vault_unseal_replication_followers/scripts/wait-until-sealed.sh similarity index 88% rename from enos/modules/vault_unseal_nodes/scripts/wait-until-sealed.sh rename to enos/modules/vault_unseal_replication_followers/scripts/wait-until-sealed.sh index af935578781a..a50722865a44 100644 --- a/enos/modules/vault_unseal_nodes/scripts/wait-until-sealed.sh +++ b/enos/modules/vault_unseal_replication_followers/scripts/wait-until-sealed.sh @@ -1,13 +1,12 @@ #!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - +# SPDX-License-Identifier: BUSL-1.1 binpath=${VAULT_INSTALL_DIR}/vault function fail() { - echo "$1" 1>&2 - exit 1 + echo "$1" 1>&2 + exit 1 } count=0 diff --git a/enos/modules/vault_upgrade/main.tf b/enos/modules/vault_upgrade/main.tf index 5502212d5151..2d6d7d2f0426 100644 --- a/enos/modules/vault_upgrade/main.tf +++ b/enos/modules/vault_upgrade/main.tf @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { @@ -7,38 +7,35 @@ terraform { source = "hashicorp/aws" } enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.5.4" } } } -variable "vault_api_addr" { - type = string - description = "The API address of the Vault cluster" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_instances" { +variable "hosts" { type = map(object({ + ipv6 = string private_ip = string public_ip = string })) description = "The vault cluster instances that were created" } -variable "vault_local_artifact_path" { + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "vault_addr" { type = string - description = "The path to a locally built vault artifact to install" - default = null + description = "The local vault API listen address" } variable "vault_artifactory_release" { @@ -52,6 +49,22 @@ variable "vault_artifactory_release" { default = null } +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_local_artifact_path" { + type = string + description = "The path to a locally built vault artifact to install" + default = null +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + variable "vault_seal_type" { type = string description = "The Vault seal type" @@ -64,19 +77,15 @@ variable "vault_unseal_keys" { } locals { - instances = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.vault_instances)[idx].public_ip - private_ip = values(var.vault_instances)[idx].private_ip - } - } - followers = toset([for idx in range(var.vault_instance_count - 1) : tostring(idx)]) - follower_ips = compact(split(" ", enos_remote_exec.get_follower_public_ips.stdout)) vault_bin_path = "${var.vault_install_dir}/vault" } +// Upgrade the Vault artifact in-place. With zip bundles we must use the same path of the original +// installation so that we can re-use the systemd unit that enos_vault_start created at +// /etc/systemd/system/vault.service. The path does not matter for package types as the systemd +// unit for the bianry is included and will be installed. resource "enos_bundle_install" "upgrade_vault_binary" { - for_each = local.instances + for_each = var.hosts destination = var.vault_install_dir artifactory = var.vault_artifactory_release @@ -89,75 +98,101 @@ resource "enos_bundle_install" "upgrade_vault_binary" { } } -resource "enos_remote_exec" "get_leader_public_ip" { +// We assume that our original Vault cluster used a zip bundle from releases.hashicorp.com and as +// such enos_vault_start will have created a systemd unit for it at /etc/systemd/systemd/vault.service. +// If we're upgrading to a package that contains its own systemd unit we'll need to remove the +// old unit file so that when we restart vault we pick up the new unit that points to the updated +// binary. +resource "enos_remote_exec" "maybe_remove_old_unit_file" { + for_each = var.hosts depends_on = [enos_bundle_install.upgrade_vault_binary] - content = templatefile("${path.module}/templates/get-leader-public-ip.sh", { - vault_install_dir = var.vault_install_dir, - vault_instances = jsonencode(local.instances) - }) + environment = { + ARTIFACT_NAME = enos_bundle_install.upgrade_vault_binary[each.key].name + } + + scripts = [abspath("${path.module}/scripts/maybe-remove-old-unit-file.sh")] transport = { ssh = { - host = local.instances[0].public_ip + host = each.value.public_ip } } } -resource "enos_remote_exec" "get_follower_public_ips" { - depends_on = [enos_bundle_install.upgrade_vault_binary] +module "get_ip_addresses" { + source = "../vault_get_cluster_ips" - content = templatefile("${path.module}/templates/get-follower-public-ips.sh", { - vault_install_dir = var.vault_install_dir, - vault_instances = jsonencode(local.instances) - }) + depends_on = [enos_remote_exec.maybe_remove_old_unit_file] - transport = { - ssh = { - host = local.instances[0].public_ip - } - } + hosts = var.hosts + ip_version = var.ip_version + vault_addr = var.vault_addr + vault_install_dir = var.vault_install_dir + vault_root_token = var.vault_root_token } resource "enos_remote_exec" "restart_followers" { - for_each = local.followers - depends_on = [enos_remote_exec.get_follower_public_ips] + for_each = module.get_ip_addresses.follower_hosts + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + } - content = file("${path.module}/templates/restart-vault.sh") + scripts = [abspath("${path.module}/scripts/restart-vault.sh")] transport = { ssh = { - host = trimspace(local.follower_ips[tonumber(each.key)]) + host = each.value.public_ip } } } resource "enos_vault_unseal" "followers" { - depends_on = [enos_remote_exec.restart_followers] for_each = { - for idx, follower in local.followers : idx => follower + for idx, host in module.get_ip_addresses.follower_hosts : idx => host if var.vault_seal_type == "shamir" } + depends_on = [enos_remote_exec.restart_followers] + bin_path = local.vault_bin_path - vault_addr = var.vault_api_addr + vault_addr = var.vault_addr seal_type = var.vault_seal_type unseal_keys = var.vault_unseal_keys transport = { ssh = { - host = trimspace(local.follower_ips[each.key]) + host = each.value.public_ip } } } +module "wait_for_followers_unsealed" { + source = "../vault_wait_for_cluster_unsealed" + depends_on = [ + enos_remote_exec.restart_followers, + enos_vault_unseal.followers, + ] + + hosts = module.get_ip_addresses.follower_hosts + vault_addr = var.vault_addr + vault_install_dir = var.vault_install_dir +} + resource "enos_remote_exec" "restart_leader" { - depends_on = [enos_vault_unseal.followers] + depends_on = [module.wait_for_followers_unsealed] + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + } - content = file("${path.module}/templates/restart-vault.sh") + scripts = [abspath("${path.module}/scripts/restart-vault.sh")] transport = { ssh = { - host = trimspace(enos_remote_exec.get_leader_public_ip.stdout) + host = module.get_ip_addresses.leader_public_ip } } } @@ -167,13 +202,13 @@ resource "enos_vault_unseal" "leader" { depends_on = [enos_remote_exec.restart_leader] bin_path = local.vault_bin_path - vault_addr = var.vault_api_addr + vault_addr = var.vault_addr seal_type = var.vault_seal_type unseal_keys = var.vault_unseal_keys transport = { ssh = { - host = trimspace(enos_remote_exec.get_leader_public_ip.stdout) + host = module.get_ip_addresses.leader_public_ip } } } diff --git a/enos/modules/vault_upgrade/scripts/maybe-remove-old-unit-file.sh b/enos/modules/vault_upgrade/scripts/maybe-remove-old-unit-file.sh new file mode 100644 index 000000000000..1d584d76d7aa --- /dev/null +++ b/enos/modules/vault_upgrade/scripts/maybe-remove-old-unit-file.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$ARTIFACT_NAME" ]] && fail "ARTIFACT_NAME env variable has not been set" + +if [ "${ARTIFACT_NAME##*.}" == "zip" ]; then + echo "Skipped removing unit file because new artifact is a zip bundle" + exit 0 +fi + +# Get the unit file for the vault.service that is running. If it's not in /etc/systemd then it +# should be a package provided unit file so we don't need to delete anything. +# +# Note that we use -p instead of -P so that we support ancient amzn 2 systemctl. +if ! unit_path=$(systemctl show -p FragmentPath vault | cut -d = -f2 2>&1); then + echo "Skipped removing unit file because and existing path could not be found: $unit_path" + exit 0 +fi + +if [[ "$unit_path" == *"/etc/systemd"* ]]; then + if [ -f "$unit_path" ]; then + echo "Removing old systemd unit file: $unit_path" + if ! out=$(sudo rm "$unit_path" 2>&1); then + fail "Failed to remove old unit file: $unit_path: $out" + fi + else + echo "Skipped removing old systemd unit file because it no longer exists: $unit_path" + fi +else + echo "Skipped removing old systemd unit file because it was not created in /etc/systemd/: $unit_path" +fi diff --git a/enos/modules/vault_upgrade/scripts/restart-vault.sh b/enos/modules/vault_upgrade/scripts/restart-vault.sh new file mode 100644 index 000000000000..ba067fc88ce1 --- /dev/null +++ b/enos/modules/vault_upgrade/scripts/restart-vault.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +if ! out=$(sudo systemctl stop vault 2>&1); then + fail "failed to stop vault: $out: $(sudo systemctl status vault)" +fi + +if ! out=$(sudo systemctl daemon-reload 2>&1); then + fail "failed to daemon-reload systemd: $out" 1>&2 +fi + +if ! out=$(sudo systemctl start vault 2>&1); then + fail "failed to start vault: $out: $(sudo systemctl status vault)" +fi + +count=0 +retries=5 +while :; do + # Check the Vault seal status + status=$($binpath status) + code=$? + + if [ $code == 0 ] || [ $code == 2 ]; then + # 0 is unsealed and 2 is running but sealed + echo "$status" + exit 0 + fi + + printf "Waiting for Vault cluster to be ready: status code: %s, status:\n%s\n" "$code" "$status" 2>&1 + + wait=$((3 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "Timed out waiting for Vault node to be ready after restart" + fi +done diff --git a/enos/modules/vault_upgrade/templates/get-follower-public-ips.sh b/enos/modules/vault_upgrade/templates/get-follower-public-ips.sh deleted file mode 100644 index 127be64499cf..000000000000 --- a/enos/modules/vault_upgrade/templates/get-follower-public-ips.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -set -e - -binpath=${vault_install_dir}/vault -export VAULT_ADDR="http://localhost:8200" - -instances='${vault_instances}' - -# Find the leader -leader_address=$($binpath status -format json | jq '.leader_address | rtrimstr(":8200") | ltrimstr("http://")') - -# Get the public ip addresses of the followers -follower_ips=$(jq ".[] | select(.private_ip!=$leader_address) | .public_ip" <<< "$instances") - -echo "$follower_ips" | sed 's/\"//g' | tr '\n' ' ' diff --git a/enos/modules/vault_upgrade/templates/get-leader-public-ip.sh b/enos/modules/vault_upgrade/templates/get-leader-public-ip.sh deleted file mode 100644 index d64a6c16ed36..000000000000 --- a/enos/modules/vault_upgrade/templates/get-leader-public-ip.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -set -e - -binpath=${vault_install_dir}/vault -export VAULT_ADDR="http://localhost:8200" - -instances='${vault_instances}' - -# Find the leader -leader_address=$($binpath status -format json | jq '.leader_address | rtrimstr(":8200") | ltrimstr("http://")') - -# Get the public ip address of the leader -leader_public=$(jq ".[] | select(.private_ip==$leader_address) | .public_ip" <<< "$instances") -echo "$leader_public" | sed 's/\"//g' diff --git a/enos/modules/vault_upgrade/templates/restart-vault.sh b/enos/modules/vault_upgrade/templates/restart-vault.sh deleted file mode 100644 index fc6b007a3509..000000000000 --- a/enos/modules/vault_upgrade/templates/restart-vault.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -set -eux - -sudo systemctl restart vault diff --git a/enos/modules/vault_verify_agent_output/main.tf b/enos/modules/vault_verify_agent_output/main.tf index 850ea5366c1e..68e0484f2e67 100644 --- a/enos/modules/vault_verify_agent_output/main.tf +++ b/enos/modules/vault_verify_agent_output/main.tf @@ -1,17 +1,21 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } -variable "vault_agent_template_destination" { - type = string - description = "The destination of the template rendered by Agent" +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" } variable "vault_agent_expected_output" { @@ -19,38 +23,22 @@ variable "vault_agent_expected_output" { description = "The output that's expected in the rendered template at vault_agent_template_destination" } -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_instances" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" +variable "vault_agent_template_destination" { + type = string + description = "The destination of the template rendered by Agent" } -locals { - vault_instances = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.vault_instances)[idx].public_ip - private_ip = values(var.vault_instances)[idx].private_ip - } +resource "enos_remote_exec" "verify_vault_agent_output" { + environment = { + VAULT_AGENT_TEMPLATE_DESTINATION = var.vault_agent_template_destination + VAULT_AGENT_EXPECTED_OUTPUT = var.vault_agent_expected_output } -} -resource "enos_remote_exec" "verify_vault_agent_output" { - content = templatefile("${path.module}/templates/verify-vault-agent-output.sh", { - vault_agent_template_destination = var.vault_agent_template_destination - vault_agent_expected_output = var.vault_agent_expected_output - vault_instances = jsonencode(local.vault_instances) - }) + scripts = [abspath("${path.module}/scripts/verify-vault-agent-output.sh")] transport = { ssh = { - host = local.vault_instances[0].public_ip + host = var.hosts[0].public_ip } } } diff --git a/enos/modules/vault_verify_agent_output/scripts/verify-vault-agent-output.sh b/enos/modules/vault_verify_agent_output/scripts/verify-vault-agent-output.sh new file mode 100644 index 000000000000..7924e17c9545 --- /dev/null +++ b/enos/modules/vault_verify_agent_output/scripts/verify-vault-agent-output.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + return 1 +} + +actual_output=$(cat "${VAULT_AGENT_TEMPLATE_DESTINATION}") +if [[ "$actual_output" != "${VAULT_AGENT_EXPECTED_OUTPUT}" ]]; then + fail "expected '${VAULT_AGENT_EXPECTED_OUTPUT}' to be the Agent output, but got: '$actual_output'" +fi diff --git a/enos/modules/vault_verify_agent_output/templates/verify-vault-agent-output.sh b/enos/modules/vault_verify_agent_output/templates/verify-vault-agent-output.sh deleted file mode 100644 index cd25a01c8d02..000000000000 --- a/enos/modules/vault_verify_agent_output/templates/verify-vault-agent-output.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -set -e - -fail() { - echo "$1" 1>&2 - return 1 -} - -actual_output=$(cat ${vault_agent_template_destination}) -if [[ "$actual_output" != "${vault_agent_expected_output}" ]]; then - fail "expected '${vault_agent_expected_output}' to be the Agent output, but got: '$actual_output'" -fi diff --git a/enos/modules/vault_verify_autopilot/main.tf b/enos/modules/vault_verify_autopilot/main.tf index b1d050af2959..236acf7564f4 100644 --- a/enos/modules/vault_verify_autopilot/main.tf +++ b/enos/modules/vault_verify_autopilot/main.tf @@ -1,35 +1,31 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_instances" { +variable "hosts" { type = map(object({ + ipv6 = string private_ip = string public_ip = string })) description = "The vault cluster instances that were created" } -variable "vault_root_token" { +variable "vault_addr" { type = string - description = "The vault root token" + description = "The local vault API listen address" +} + +variable "vault_autopilot_upgrade_status" { + type = string + description = "The autopilot upgrade expected status" } variable "vault_autopilot_upgrade_version" { @@ -37,29 +33,28 @@ variable "vault_autopilot_upgrade_version" { description = "The Vault upgraded version" } -variable "vault_autopilot_upgrade_status" { +variable "vault_install_dir" { type = string - description = "The autopilot upgrade expected status" + description = "The directory where the Vault binary will be installed" } -locals { - public_ips = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.vault_instances)[idx].public_ip - private_ip = values(var.vault_instances)[idx].private_ip - } - } +variable "vault_root_token" { + type = string + description = "The vault root token" } resource "enos_remote_exec" "smoke-verify-autopilot" { - for_each = local.public_ips + for_each = var.hosts + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir, + VAULT_TOKEN = var.vault_root_token, + VAULT_AUTOPILOT_UPGRADE_STATUS = var.vault_autopilot_upgrade_status, + VAULT_AUTOPILOT_UPGRADE_VERSION = var.vault_autopilot_upgrade_version, + } - content = templatefile("${path.module}/templates/smoke-verify-autopilot.sh", { - vault_install_dir = var.vault_install_dir - vault_token = var.vault_root_token - vault_autopilot_upgrade_status = var.vault_autopilot_upgrade_status, - vault_autopilot_upgrade_version = var.vault_autopilot_upgrade_version, - }) + scripts = [abspath("${path.module}/scripts/smoke-verify-autopilot.sh")] transport = { ssh = { diff --git a/enos/modules/vault_verify_autopilot/scripts/smoke-verify-autopilot.sh b/enos/modules/vault_verify_autopilot/scripts/smoke-verify-autopilot.sh new file mode 100755 index 000000000000..eb0a1a1baf65 --- /dev/null +++ b/enos/modules/vault_verify_autopilot/scripts/smoke-verify-autopilot.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_AUTOPILOT_UPGRADE_STATUS" ]] && fail "VAULT_AUTOPILOT_UPGRADE_STATUS env variable has not been set" +[[ -z "$VAULT_AUTOPILOT_UPGRADE_VERSION" ]] && fail "VAULT_AUTOPILOT_UPGRADE_VERSION env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +count=0 +retries=8 +while :; do + state=$($binpath read -format=json sys/storage/raft/autopilot/state) + status="$(jq -r '.data.upgrade_info.status' <<< "$state")" + target_version="$(jq -r '.data.upgrade_info.target_version' <<< "$state")" + + if [ "$status" = "$VAULT_AUTOPILOT_UPGRADE_STATUS" ] && [ "$target_version" = "$VAULT_AUTOPILOT_UPGRADE_VERSION" ]; then + exit 0 + fi + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + echo "Expected autopilot status to be $VAULT_AUTOPILOT_UPGRADE_STATUS, got $status" + echo "Expected autopilot target_version to be $VAULT_AUTOPILOT_UPGRADE_VERSION, got $target_version" + sleep "$wait" + else + echo "$state" + echo "Expected autopilot status to be $VAULT_AUTOPILOT_UPGRADE_STATUS, got $status" + echo "Expected autopilot target_version to be $VAULT_AUTOPILOT_UPGRADE_VERSION, got $target_version" + fail "Autopilot did not get into the correct status" + fi +done diff --git a/enos/modules/vault_verify_autopilot/templates/smoke-verify-autopilot.sh b/enos/modules/vault_verify_autopilot/templates/smoke-verify-autopilot.sh deleted file mode 100755 index 129288a3cd21..000000000000 --- a/enos/modules/vault_verify_autopilot/templates/smoke-verify-autopilot.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -token="${vault_token}" -autopilot_version="${vault_autopilot_upgrade_version}" -autopilot_status="${vault_autopilot_upgrade_status}" - -export VAULT_ADDR="http://localhost:8200" -export VAULT_TOKEN="$token" - -function fail() { - echo "$1" 1>&2 - exit 1 -} - -count=0 -retries=7 -while :; do - state=$(${vault_install_dir}/vault read -format=json sys/storage/raft/autopilot/state) - status="$(jq -r '.data.upgrade_info.status' <<< "$state")" - target_version="$(jq -r '.data.upgrade_info.target_version' <<< "$state")" - - if [ "$status" = "$autopilot_status" ] && [ "$target_version" = "$autopilot_version" ]; then - exit 0 - fi - - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - echo "$state" - sleep "$wait" - else - fail "Autopilot did not get into the correct status" - fi -done diff --git a/enos/modules/vault_verify_billing_start_date/main.tf b/enos/modules/vault_verify_billing_start_date/main.tf new file mode 100644 index 000000000000..0d72fa70005f --- /dev/null +++ b/enos/modules/vault_verify_billing_start_date/main.tf @@ -0,0 +1,64 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_cluster_addr_port" { + description = "The Raft cluster address port" + type = string + default = "8201" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +resource "enos_remote_exec" "vault_verify_billing_start_date" { + for_each = var.hosts + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_CLUSTER_ADDR = "${each.value.private_ip}:${var.vault_cluster_addr_port}" + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_LOCAL_BINARY_PATH = "${var.vault_install_dir}/vault" + VAULT_TOKEN = var.vault_root_token + } + + scripts = [abspath("${path.module}/scripts/verify-billing-start.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_verify_billing_start_date/scripts/verify-billing-start.sh b/enos/modules/vault_verify_billing_start_date/scripts/verify-billing-start.sh new file mode 100644 index 000000000000..c4334cc6af98 --- /dev/null +++ b/enos/modules/vault_verify_billing_start_date/scripts/verify-billing-start.sh @@ -0,0 +1,98 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +retry() { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep 30 + else + return "$exit" + fi + done + + return 0 +} + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +enable_debugging() { + echo "Turning debugging on.." + export PS4='+(${BASH_SOURCE}:${LINENO})> ${FUNCNAME[0]:+${FUNCNAME[0]}(): }' + set -x +} + +get_billing_start_date() { + "$binpath" read -format=json sys/internal/counters/config | jq -r ".data.billing_start_timestamp" +} + +get_target_platform() { + uname -s +} + +# Given the date as ARGV 1, return 1 year as a unix date +verify_date_is_in_current_year() { + local billing_start_unix + local one_year_ago_unix + + # Verify if the billing start date is in the latest billing year + case $(get_target_platform) in + Linux) + billing_start_unix=$(TZ=UTC date -d "$1" +'%s') # For "now", use $(date +'%s') + one_year_ago_unix=$(TZ=UTC date -d "1 year ago" +'%s') + ;; + Darwin) + one_year_ago_unix=$(TZ=UTC date -v -1y +'%s') + billing_start_unix=$(TZ=UTC date -j -f "%Y-%m-%dT%H:%M:%SZ" "${1}" +'%s') + ;; + *) + fail "Unsupported target host operating system: $(get_target_platform)" 1>&2 + ;; + esac + + if [ "$billing_start_unix" -gt "$one_year_ago_unix" ]; then + echo "Billing start date $1 has successfully rolled over to current year." + exit 0 + else + local vault_ps + vault_ps=$(pgrep vault | xargs) + echo "On version $version, pid $vault_ps, addr $VAULT_ADDR, Billing start date $1 did not roll over to current year" 1>&2 + fi +} + +verify_billing_start_date() { + local billing_start + billing_start=$(get_billing_start_date) + + if verify_date_is_in_current_year "$billing_start"; then + return 0 + fi + + local version + local vault_ps + version=$("$binpath" status -format=json | jq .version) + vault_ps=$(pgrep vault | xargs) + echo "On version $version, pid $vault_ps, addr $VAULT_ADDR, Billing start date $billing_start did not roll over to current year" 1>&2 + return 1 +} + +enable_debugging + +retry 10 verify_billing_start_date diff --git a/enos/modules/vault_verify_default_lcq/main.tf b/enos/modules/vault_verify_default_lcq/main.tf new file mode 100644 index 000000000000..bb05726c47b0 --- /dev/null +++ b/enos/modules/vault_verify_default_lcq/main.tf @@ -0,0 +1,66 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out" + default = 60 +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_autopilot_default_max_leases" { + type = string + description = "The autopilot upgrade expected max_leases" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +resource "enos_remote_exec" "smoke_verify_default_lcq" { + for_each = var.hosts + + environment = { + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + DEFAULT_LCQ = var.vault_autopilot_default_max_leases + } + + scripts = [abspath("${path.module}/scripts/smoke-verify-default-lcq.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_verify_default_lcq/scripts/smoke-verify-default-lcq.sh b/enos/modules/vault_verify_default_lcq/scripts/smoke-verify-default-lcq.sh new file mode 100755 index 000000000000..57a943654157 --- /dev/null +++ b/enos/modules/vault_verify_default_lcq/scripts/smoke-verify-default-lcq.sh @@ -0,0 +1,49 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +# Exit early if we haven't been given an expected DEFAULT_LCQ +[[ -z "$DEFAULT_LCQ" ]] && exit 0 + +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +getMaxLeases() { + curl --request GET --header "X-Vault-Token: $VAULT_TOKEN" \ + "$VAULT_ADDR/v1/sys/quotas/lease-count/default" | jq '.data.max_leases // empty' +} + +waitForMaxLeases() { + local max_leases + if ! max_leases=$(getMaxLeases); then + echo "failed getting /v1/sys/quotas/lease-count/default data" 1>&2 + return 1 + fi + + if [[ "$max_leases" == "$DEFAULT_LCQ" ]]; then + echo "$max_leases" + return 0 + else + echo "Expected Default LCQ $DEFAULT_LCQ but got $max_leases" + return 1 + fi +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + if waitForMaxLeases; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +fail "Timed out waiting for Default LCQ verification to complete. Data:\n\t$(getMaxLeases)" diff --git a/enos/modules/vault_verify_dr_replication/main.tf b/enos/modules/vault_verify_dr_replication/main.tf new file mode 100644 index 000000000000..f7f99fdedec0 --- /dev/null +++ b/enos/modules/vault_verify_dr_replication/main.tf @@ -0,0 +1,117 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "primary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The primary cluster leader host" +} + +variable "secondary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The secondary cluster leader host" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "wrapping_token" { + type = string + description = "The wrapping token created on primary cluster" + default = null +} + +locals { + primary_leader_addr = var.ip_version == 6 ? var.primary_leader_host.ipv6 : var.primary_leader_host.private_ip + secondary_leader_addr = var.ip_version == 6 ? var.secondary_leader_host.ipv6 : var.secondary_leader_host.private_ip + primary_replication_status = jsondecode(enos_remote_exec.verify_replication_status_on_primary.stdout) + secondary_replication_status = jsondecode(enos_remote_exec.verify_replication_status_on_secondary.stdout) +} + +resource "enos_remote_exec" "verify_replication_status_on_primary" { + environment = { + IP_VERSION = var.ip_version + PRIMARY_LEADER_ADDR = local.primary_leader_addr + SECONDARY_LEADER_ADDR = local.secondary_leader_addr + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/verify-replication-status.sh")] + + transport = { + ssh = { + host = var.primary_leader_host.public_ip + } + } +} + +resource "enos_remote_exec" "verify_replication_status_on_secondary" { + environment = { + IP_VERSION = var.ip_version + PRIMARY_LEADER_ADDR = local.primary_leader_addr + SECONDARY_LEADER_ADDR = local.secondary_leader_addr + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/verify-replication-status.sh")] + + transport = { + ssh = { + host = var.secondary_leader_host.public_ip + } + } +} + +output "primary_replication_status" { + value = local.primary_replication_status +} + +output "known_primary_cluster_addrs" { + value = local.secondary_replication_status.data.known_primary_cluster_addrs +} + +output "secondary_replication_status" { + value = local.secondary_replication_status +} + +output "primary_replication_data_secondaries" { + value = local.primary_replication_status.data.secondaries +} + +output "secondary_replication_data_primaries" { + value = local.secondary_replication_status.data.primaries +} diff --git a/enos/modules/vault_verify_dr_replication/scripts/verify-replication-status.sh b/enos/modules/vault_verify_dr_replication/scripts/verify-replication-status.sh new file mode 100644 index 000000000000..f01a9cd28162 --- /dev/null +++ b/enos/modules/vault_verify_dr_replication/scripts/verify-replication-status.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# This script waits for the replication status to be established +# then verifies the dr replication between primary and +# secondary clusters + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" +[[ -z "$PRIMARY_LEADER_ADDR" ]] && fail "PRIMARY_LEADER_ADDR env variable has not been set" +[[ -z "$SECONDARY_LEADER_ADDR" ]] && fail "SECONDARY_LEADER_ADDR env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +retry() { + local retries=$1 + shift + local count=0 + + until "$@"; do + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "$($binpath read -format=json sys/replication/dr/status)" + fi + done +} + +check_dr_status() { + dr_status=$($binpath read -format=json sys/replication/dr/status) + cluster_state=$(jq -r '.data.state' <<< "$dr_status") + connection_mode=$(jq -r '.data.mode' <<< "$dr_status") + + if [[ "$cluster_state" == 'idle' ]]; then + echo "replication cluster state is idle" 1>&2 + return 1 + fi + + if [[ "$connection_mode" == "primary" ]]; then + connection_status=$(jq -r '.data.secondaries[0].connection_status' <<< "$dr_status") + if [[ "$connection_status" == 'disconnected' ]]; then + echo ".data.secondaries[0].connection_status from primary node is 'disconnected'" 1>&2 + return 1 + fi + # Confirm we are in a "running" state for the primary + if [[ "$cluster_state" != "running" ]]; then + echo "replication cluster primary state is not running" 1>&2 + return 1 + fi + else + connection_status=$(jq -r '.data.primaries[0].connection_status' <<< "$dr_status") + if [[ "$connection_status" == 'disconnected' ]]; then + echo ".data.primaries[0].connection_status from secondary node is 'disconnected'" 1>&2 + return 1 + fi + # Confirm we are in a "stream-wals" state for the secondary + if [[ "$cluster_state" != "stream-wals" ]]; then + echo "replication cluster primary state is not stream-wals" 1>&2 + return 1 + fi + known_primary_cluster_addrs=$(jq -r '.data.known_primary_cluster_addrs' <<< "$dr_status") + if ! echo "$known_primary_cluster_addrs" | grep -q "$PRIMARY_LEADER_ADDR"; then + echo "$PRIMARY_LEADER_ADDR is not in .data.known_primary_cluster_addrs: $known_primary_cluster_addrs" 1>&2 + return 1 + fi + fi + + echo "$dr_status" + return 0 +} + +if [ "$IP_VERSION" != 4 ] && [ "$IP_VERSION" != 6 ]; then + fail "unsupported IP_VERSION: $IP_VERSION" +fi + +# Retry for a while because it can take some time for replication to sync +retry 10 check_dr_status diff --git a/enos/modules/vault_verify_performance_replication/main.tf b/enos/modules/vault_verify_performance_replication/main.tf index 9d6b0c0056ef..f7f99fdedec0 100644 --- a/enos/modules/vault_verify_performance_replication/main.tf +++ b/enos/modules/vault_verify_performance_replication/main.tf @@ -1,43 +1,50 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } -variable "vault_cluster_addr_port" { - description = "The Raft cluster address port" - type = string - default = "8201" -} +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } } -variable "primary_leader_public_ip" { - type = string - description = "Vault primary cluster leader Public IP address" +variable "primary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The primary cluster leader host" } -variable "primary_leader_private_ip" { - type = string - description = "Vault primary cluster leader Private IP address" +variable "secondary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The secondary cluster leader host" } -variable "secondary_leader_public_ip" { +variable "vault_addr" { type = string - description = "Vault secondary cluster leader Public IP address" + description = "The local vault API listen address" } -variable "secondary_leader_private_ip" { +variable "vault_install_dir" { type = string - description = "Vault secondary cluster leader Private IP address" + description = "The directory where the Vault binary will be installed" } variable "wrapping_token" { @@ -47,97 +54,64 @@ variable "wrapping_token" { } locals { - primary_replication_status = jsondecode(enos_remote_exec.replication_status_on_primary.stdout) - secondary_replication_status = jsondecode(enos_remote_exec.replication_status_on_secondary.stdout) + primary_leader_addr = var.ip_version == 6 ? var.primary_leader_host.ipv6 : var.primary_leader_host.private_ip + secondary_leader_addr = var.ip_version == 6 ? var.secondary_leader_host.ipv6 : var.secondary_leader_host.private_ip + primary_replication_status = jsondecode(enos_remote_exec.verify_replication_status_on_primary.stdout) + secondary_replication_status = jsondecode(enos_remote_exec.verify_replication_status_on_secondary.stdout) } -resource "enos_remote_exec" "replication_status_on_primary" { +resource "enos_remote_exec" "verify_replication_status_on_primary" { environment = { - VAULT_ADDR = "http://127.0.0.1:8200" - VAULT_INSTALL_DIR = var.vault_install_dir - REPLICATION_MODE = "primary" + IP_VERSION = var.ip_version + PRIMARY_LEADER_ADDR = local.primary_leader_addr + SECONDARY_LEADER_ADDR = local.secondary_leader_addr + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir } - scripts = ["${path.module}/scripts/get-replication-status.sh"] + scripts = [abspath("${path.module}/scripts/verify-replication-status.sh")] transport = { ssh = { - host = var.primary_leader_public_ip + host = var.primary_leader_host.public_ip } } } -output "primary_replication_status" { - value = local.primary_replication_status - - precondition { - condition = local.primary_replication_status.data.mode == "primary" && local.primary_replication_status.data.state != "idle" - error_message = "Vault primary cluster mode must be \"primary\" and state must not be \"idle\"." - } -} - -resource "enos_remote_exec" "replication_status_on_secondary" { +resource "enos_remote_exec" "verify_replication_status_on_secondary" { environment = { - VAULT_ADDR = "http://127.0.0.1:8200" - VAULT_INSTALL_DIR = var.vault_install_dir - REPLICATION_MODE = "secondary" + IP_VERSION = var.ip_version + PRIMARY_LEADER_ADDR = local.primary_leader_addr + SECONDARY_LEADER_ADDR = local.secondary_leader_addr + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir } - scripts = ["${path.module}/scripts/get-replication-status.sh"] + scripts = [abspath("${path.module}/scripts/verify-replication-status.sh")] transport = { ssh = { - host = var.secondary_leader_public_ip + host = var.secondary_leader_host.public_ip } } } +output "primary_replication_status" { + value = local.primary_replication_status +} + output "known_primary_cluster_addrs" { value = local.secondary_replication_status.data.known_primary_cluster_addrs - - precondition { - condition = contains(local.secondary_replication_status.data.known_primary_cluster_addrs, "https://${var.primary_leader_private_ip}:8201") - error_message = "Vault secondary cluster known_primary_cluster_addrs must include ${var.primary_leader_private_ip}." - } } output "secondary_replication_status" { value = local.secondary_replication_status - - precondition { - condition = local.secondary_replication_status.data.mode == "secondary" && local.secondary_replication_status.data.state != "idle" - error_message = "Vault secondary cluster mode must be \"secondary\" and state must not be \"idle\"." - } } output "primary_replication_data_secondaries" { value = local.primary_replication_status.data.secondaries - - # The secondaries connection_status should be "connected" - precondition { - condition = local.primary_replication_status.data.secondaries[0].connection_status == "connected" - error_message = "connection status to primaries must be \"connected\"." - } - - # The secondaries cluster address must have the secondary leader address - precondition { - condition = local.primary_replication_status.data.secondaries[0].cluster_address == "https://${var.secondary_leader_private_ip}:8201" - error_message = "Vault secondaries cluster_address must be with ${var.secondary_leader_private_ip}." - } } output "secondary_replication_data_primaries" { value = local.secondary_replication_status.data.primaries - - # The primaries connection_status should be "connected" - precondition { - condition = local.secondary_replication_status.data.primaries[0].connection_status == "connected" - error_message = "connection status to primaries must be \"connected\"." - } - - # The primaries cluster address must have the primary leader address - precondition { - condition = local.secondary_replication_status.data.primaries[0].cluster_address == "https://${var.primary_leader_private_ip}:8201" - error_message = "Vault primaries cluster_address must be ${var.primary_leader_private_ip}." - } } diff --git a/enos/modules/vault_verify_performance_replication/scripts/get-replication-status.sh b/enos/modules/vault_verify_performance_replication/scripts/get-replication-status.sh deleted file mode 100644 index 6b9930a838e8..000000000000 --- a/enos/modules/vault_verify_performance_replication/scripts/get-replication-status.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -# This script waits for the replication status to be established -# But the replication validations are done by Terraform so this -# script should always exit success - -set -e - -binpath=${VAULT_INSTALL_DIR}/vault - -retry() { - local retries=$1 - shift - local count=0 - - until "$@"; do - exit=$? - wait=$((10 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - else - echo $pr_status - return 0 - fi - done - - echo $pr_status - return 0 -} - -test -x "$binpath" || exit 1 - -check_pr_status() { - pr_status=$($binpath read -format=json sys/replication/performance/status) - cluster_state=$(echo $pr_status | jq -r '.data.state') - - if [[ "${REPLICATION_MODE}" == "primary" ]]; then - connection_status=$(echo $pr_status | jq -r '.data.secondaries[0].connection_status') - else - connection_status=$(echo $pr_status | jq -r '.data.primaries[0].connection_status') - fi - - if [[ "$connection_status" == 'disconnected' ]] || [[ "$cluster_state" == 'idle' ]]; then - return 1 - fi -} - -# Retry a few times because it can take some time for replication to sync -retry 5 check_pr_status diff --git a/enos/modules/vault_verify_performance_replication/scripts/verify-replication-status.sh b/enos/modules/vault_verify_performance_replication/scripts/verify-replication-status.sh new file mode 100644 index 000000000000..57b1b436b75d --- /dev/null +++ b/enos/modules/vault_verify_performance_replication/scripts/verify-replication-status.sh @@ -0,0 +1,97 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# This script waits for the replication status to be established +# then verifies the performance replication between primary and +# secondary clusters + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" +[[ -z "$PRIMARY_LEADER_ADDR" ]] && fail "PRIMARY_LEADER_ADDR env variable has not been set" +[[ -z "$SECONDARY_LEADER_ADDR" ]] && fail "SECONDARY_LEADER_ADDR env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +retry() { + local retries=$1 + shift + local count=0 + + until "$@"; do + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "$($binpath read -format=json sys/replication/performance/status)" + fi + done +} + +check_pr_status() { + pr_status=$($binpath read -format=json sys/replication/performance/status) + cluster_state=$(jq -r '.data.state' <<< "$pr_status") + connection_mode=$(jq -r '.data.mode' <<< "$pr_status") + + if [[ "$cluster_state" == 'idle' ]]; then + echo "replication cluster state is idle" 1>&2 + return 1 + fi + + if [[ "$connection_mode" == "primary" ]]; then + connection_status=$(jq -r '.data.secondaries[0].connection_status' <<< "$pr_status") + if [[ "$connection_status" == 'disconnected' ]]; then + echo ".data.secondaries[0].connection_status from primary node is 'disconnected'" 1>&2 + return 1 + fi + if [ "$IP_VERSION" == 4 ]; then + secondary_cluster_addr=$(jq -r '.data.secondaries[0].cluster_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")' <<< "$pr_status") + else + secondary_cluster_addr=$(jq -r '.data.secondaries[0].cluster_address | scan("\\[(.+)\\]") | .[0]' <<< "$pr_status") + fi + if [[ "$secondary_cluster_addr" != "$SECONDARY_LEADER_ADDR" ]]; then + echo ".data.secondaries[0].cluster_address should have an IP address of $SECONDARY_LEADER_ADDR, got: $secondary_cluster_addr" 1>&2 + return 1 + fi + else + connection_status=$(jq -r '.data.primaries[0].connection_status' <<< "$pr_status") + if [[ "$connection_status" == 'disconnected' ]]; then + echo ".data.primaries[0].connection_status from secondary node is 'disconnected'" 1>&2 + return 1 + fi + if [ "$IP_VERSION" == 4 ]; then + primary_cluster_addr=$(jq -r '.data.primaries[0].cluster_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")' <<< "$pr_status") + else + primary_cluster_addr=$(jq -r '.data.primaries[0].cluster_address | scan("\\[(.+)\\]") | .[0]' <<< "$pr_status") + fi + if [[ "$primary_cluster_addr" != "$PRIMARY_LEADER_ADDR" ]]; then + echo ".data.primaries[0].cluster_address should have an IP address of $PRIMARY_LEADER_ADDR, got: $primary_cluster_addr" 1>&2 + return 1 + fi + known_primary_cluster_addrs=$(jq -r '.data.known_primary_cluster_addrs' <<< "$pr_status") + if ! echo "$known_primary_cluster_addrs" | grep -q "$PRIMARY_LEADER_ADDR"; then + echo "$PRIMARY_LEADER_ADDR is not in .data.known_primary_cluster_addrs: $known_primary_cluster_addrs" 1>&2 + return 1 + fi + fi + + echo "$pr_status" + return 0 +} + +if [ "$IP_VERSION" != 4 ] && [ "$IP_VERSION" != 6 ]; then + fail "unsupported IP_VERSION: $IP_VERSION" +fi + +# Retry for a while because it can take some time for replication to sync +retry 10 check_pr_status diff --git a/enos/modules/vault_verify_raft_auto_join_voter/main.tf b/enos/modules/vault_verify_raft_auto_join_voter/main.tf index 44df4496ab9e..826b00b54c8a 100644 --- a/enos/modules/vault_verify_raft_auto_join_voter/main.tf +++ b/enos/modules/vault_verify_raft_auto_join_voter/main.tf @@ -1,18 +1,41 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "ip_version" { + type = number + description = "The IP version to use for the Vault TCP listeners" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + variable "vault_cluster_addr_port" { description = "The Raft cluster address port" type = string - default = "8201" } variable "vault_install_dir" { @@ -20,42 +43,30 @@ variable "vault_install_dir" { description = "The directory where the Vault binary will be installed" } -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_instances" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} - variable "vault_root_token" { type = string description = "The vault root token" } locals { - instances = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.vault_instances)[idx].public_ip - private_ip = values(var.vault_instances)[idx].private_ip - } + cluster_addrs = { + 4 : { for k, v in var.hosts : k => "${v.private_ip}:${var.vault_cluster_addr_port}" }, + 6 : { for k, v in var.hosts : k => "[${v.ipv6}]:${var.vault_cluster_addr_port}" }, } } resource "enos_remote_exec" "verify_raft_auto_join_voter" { - for_each = local.instances + for_each = var.hosts + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_CLUSTER_ADDR = local.cluster_addrs[var.ip_version][each.key] + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_LOCAL_BINARY_PATH = "${var.vault_install_dir}/vault" + VAULT_TOKEN = var.vault_root_token + } - content = templatefile("${path.module}/templates/verify-raft-auto-join-voter.sh", { - vault_cluster_addr = "${each.value.private_ip}:${var.vault_cluster_addr_port}" - vault_install_dir = var.vault_install_dir - vault_local_binary_path = "${var.vault_install_dir}/vault" - vault_token = var.vault_root_token - }) + scripts = [abspath("${path.module}/scripts/verify-raft-auto-join-voter.sh")] transport = { ssh = { diff --git a/enos/modules/vault_verify_raft_auto_join_voter/scripts/verify-raft-auto-join-voter.sh b/enos/modules/vault_verify_raft_auto_join_voter/scripts/verify-raft-auto-join-voter.sh new file mode 100644 index 000000000000..c20aade5d353 --- /dev/null +++ b/enos/modules/vault_verify_raft_auto_join_voter/scripts/verify-raft-auto-join-voter.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault + +fail() { + echo "$1" 2>&1 + return 1 +} + +retry() { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + echo "retry $count" + else + return "$exit" + fi + done + + return 0 +} + +check_voter_status() { + voter_status=$($binpath operator raft list-peers -format json | jq -Mr --argjson expected "true" --arg ADDR "$VAULT_CLUSTER_ADDR" '.data.config.servers[] | select(.address==$ADDR) | .voter == $expected') + + if [[ "$voter_status" != 'true' ]]; then + fail "expected $VAULT_CLUSTER_ADDR to be raft voter, got raft status for node: $($binpath operator raft list-peers -format json | jq -Mr --arg ADDR "$VAULT_CLUSTER_ADDR" '.data.config.servers[] | select(.address==$ADDR)')" + fi +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +# Retry a few times because it can take some time for things to settle after +# all the nodes are unsealed +retry 10 check_voter_status diff --git a/enos/modules/vault_verify_raft_auto_join_voter/templates/verify-raft-auto-join-voter.sh b/enos/modules/vault_verify_raft_auto_join_voter/templates/verify-raft-auto-join-voter.sh deleted file mode 100644 index 3187ac69fbf0..000000000000 --- a/enos/modules/vault_verify_raft_auto_join_voter/templates/verify-raft-auto-join-voter.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -set -e - -binpath=${vault_install_dir}/vault - -fail() { - echo "$1" 2>&1 - return 1 -} - -retry() { - local retries=$1 - shift - local count=0 - - until "$@"; do - exit=$? - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - echo "retry $count" - else - return "$exit" - fi - done - - return 0 -} - -check_voter_status() { - voter_status=$($binpath operator raft list-peers -format json | jq -Mr --argjson expected "true" '.data.config.servers[] | select(.address=="${vault_cluster_addr}") | .voter == $expected') - - if [[ "$voter_status" != 'true' ]]; then - fail "expected ${vault_cluster_addr} to be raft voter, got raft status for node: $($binpath operator raft list-peers -format json | jq '.data.config.servers[] | select(.address==${vault_cluster_addr})')" - fi -} - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_ADDR='http://127.0.0.1:8200' -export VAULT_TOKEN='${vault_token}' - -# Retry a few times because it can take some time for things to settle after -# all the nodes are unsealed -retry 5 check_voter_status diff --git a/enos/modules/vault_verify_read_data/main.tf b/enos/modules/vault_verify_read_data/main.tf deleted file mode 100644 index 853c9cbc3c2f..000000000000 --- a/enos/modules/vault_verify_read_data/main.tf +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -terraform { - required_providers { - enos = { - source = "app.terraform.io/hashicorp-qti/enos" - } - } -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "node_public_ips" { - type = list(string) - description = "Vault cluster node Public IP address" -} - -locals { - followers = toset([for idx in range(var.vault_instance_count - 1) : tostring(idx)]) - vault_bin_path = "${var.vault_install_dir}/vault" -} - -resource "enos_remote_exec" "verify_kv_on_node" { - for_each = { - for idx, follower in local.followers : idx => follower - } - environment = { - VAULT_ADDR = "http://127.0.0.1:8200" - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = ["${path.module}/scripts/verify-data.sh"] - - transport = { - ssh = { - host = element(var.node_public_ips, each.key) - } - } -} diff --git a/enos/modules/vault_verify_read_data/scripts/verify-data.sh b/enos/modules/vault_verify_read_data/scripts/verify-data.sh deleted file mode 100644 index 5c095c58caf8..000000000000 --- a/enos/modules/vault_verify_read_data/scripts/verify-data.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -set -e - -function retry { - local retries=$1 - shift - local count=0 - - until "$@"; do - exit=$? - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - else - return "$exit" - fi - done - - return 0 -} - -function fail { - echo "$1" 1>&2 - exit 1 -} - -binpath=${VAULT_INSTALL_DIR}/vault - -fail() { - echo "$1" 1>&2 - return 1 -} - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -# To keep the authentication method and module verification consistent between all -# Enos scenarios we authenticate using testuser created by vault_verify_write_data module -retry 5 $binpath login -method=userpass username=testuser password=passuser1 -retry 5 $binpath kv get secret/test diff --git a/enos/modules/vault_verify_replication/main.tf b/enos/modules/vault_verify_replication/main.tf index fbb360a38da4..f9377d87c144 100644 --- a/enos/modules/vault_verify_replication/main.tf +++ b/enos/modules/vault_verify_replication/main.tf @@ -1,30 +1,44 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } -locals { - instances = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.vault_instances)[idx].public_ip - private_ip = values(var.vault_instances)[idx].private_ip - } - } +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_edition" { + type = string + description = "The vault product edition" + default = null } resource "enos_remote_exec" "smoke-verify-replication" { - for_each = local.instances + for_each = var.hosts + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_EDITION = var.vault_edition + } - content = templatefile("${path.module}/templates/smoke-verify-replication.sh", { - vault_edition = var.vault_edition - }) + scripts = [abspath("${path.module}/scripts/smoke-verify-replication.sh")] transport = { ssh = { diff --git a/enos/modules/vault_verify_replication/scripts/smoke-verify-replication.sh b/enos/modules/vault_verify_replication/scripts/smoke-verify-replication.sh new file mode 100644 index 000000000000..72ecbd2521d5 --- /dev/null +++ b/enos/modules/vault_verify_replication/scripts/smoke-verify-replication.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_EDITION" ]] && fail "VAULT_EDITION env variable has not been set" + +# Replication status endpoint should have data.mode disabled for CE release +status=$(curl "${VAULT_ADDR}/v1/sys/replication/status") +if [ "$VAULT_EDITION" == "ce" ]; then + if [ "$(jq -r '.data.mode' <<< "$status")" != "disabled" ]; then + fail "replication data mode is not disabled for CE release!" + fi +else + if [ "$(jq -r '.data.dr' <<< "$status")" == "" ]; then + fail "DR replication should be available for an ENT release!" + fi + if [ "$(jq -r '.data.performance' <<< "$status")" == "" ]; then + fail "Performance replication should be available for an ENT release!" + fi +fi diff --git a/enos/modules/vault_verify_replication/templates/smoke-verify-replication.sh b/enos/modules/vault_verify_replication/templates/smoke-verify-replication.sh deleted file mode 100644 index 1ef6207a37eb..000000000000 --- a/enos/modules/vault_verify_replication/templates/smoke-verify-replication.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -# The Vault replication smoke test, documented in -# https://docs.google.com/document/d/16sjIk3hzFDPyY5A9ncxTZV_9gnpYSF1_Vx6UA1iiwgI/edit#heading=h.kgrxf0f1et25 - -set -e - -edition=${vault_edition} - -function fail() { - echo "$1" 1>&2 - exit 1 -} - -# Replication status endpoint should have data.mode disabled for OSS release -status=$(curl -s http://localhost:8200/v1/sys/replication/status) -if [ "$edition" == "oss" ]; then - if [ "$(jq -r '.data.mode' <<< "$status")" != "disabled" ]; then - fail "replication data mode is not disabled for OSS release!" - fi -else - if [ "$(jq -r '.data.dr' <<< "$status")" == "" ]; then - fail "DR replication should be available for an ENT release!" - fi - if [ "$(jq -r '.data.performance' <<< "$status")" == "" ]; then - fail "Performance replication should be available for an ENT release!" - fi -fi diff --git a/enos/modules/vault_verify_replication/variables.tf b/enos/modules/vault_verify_replication/variables.tf deleted file mode 100644 index 26ac75c91291..000000000000 --- a/enos/modules/vault_verify_replication/variables.tf +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -variable "vault_edition" { - type = string - description = "The vault product edition" - default = null -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_instances" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} diff --git a/enos/modules/vault_verify_ui/main.tf b/enos/modules/vault_verify_ui/main.tf index 32986072cba7..61d7361efa63 100644 --- a/enos/modules/vault_verify_ui/main.tf +++ b/enos/modules/vault_verify_ui/main.tf @@ -1,30 +1,37 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } -locals { - instances = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.vault_instances)[idx].public_ip - private_ip = values(var.vault_instances)[idx].private_ip - } - } +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" } resource "enos_remote_exec" "smoke-verify-ui" { - for_each = local.instances + for_each = var.hosts + + environment = { + VAULT_ADDR = var.vault_addr, + } - content = templatefile("${path.module}/templates/smoke-verify-ui.sh", { - vault_install_dir = var.vault_install_dir, - }) + scripts = [abspath("${path.module}/scripts/smoke-verify-ui.sh")] transport = { ssh = { diff --git a/enos/modules/vault_verify_ui/scripts/smoke-verify-ui.sh b/enos/modules/vault_verify_ui/scripts/smoke-verify-ui.sh new file mode 100644 index 000000000000..75007889da13 --- /dev/null +++ b/enos/modules/vault_verify_ui/scripts/smoke-verify-ui.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +url_effective=$(curl -w "%{url_effective}\n" -I -L -s -S "${VAULT_ADDR}" -o /dev/null) +expected="${VAULT_ADDR}/ui/" +if [ "${url_effective}" != "${expected}" ]; then + fail "Expecting Vault to redirect to UI.\nExpected: ${expected}\nGot: ${url_effective}" +fi + +if curl -s "${VAULT_ADDR}/ui/" | grep -q 'Vault UI is not available'; then + fail "Vault UI is not available" +fi diff --git a/enos/modules/vault_verify_ui/templates/smoke-verify-ui.sh b/enos/modules/vault_verify_ui/templates/smoke-verify-ui.sh deleted file mode 100644 index 2ec23a107332..000000000000 --- a/enos/modules/vault_verify_ui/templates/smoke-verify-ui.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} -if [ "$(curl -s -o /dev/null -w "%%{redirect_url}" http://localhost:8200/)" != "http://localhost:8200/ui/" ]; then - fail "Port 8200 not redirecting to UI" -fi -if curl -s http://localhost:8200/ui/ | grep -q 'Vault UI is not available'; then - fail "Vault UI is not available" -fi diff --git a/enos/modules/vault_verify_ui/variables.tf b/enos/modules/vault_verify_ui/variables.tf deleted file mode 100644 index 344f0d8077d7..000000000000 --- a/enos/modules/vault_verify_ui/variables.tf +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" - default = null -} - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_instances" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} diff --git a/enos/modules/vault_verify_undo_logs/main.tf b/enos/modules/vault_verify_undo_logs/main.tf index 717d90735fd0..554732112090 100644 --- a/enos/modules/vault_verify_undo_logs/main.tf +++ b/enos/modules/vault_verify_undo_logs/main.tf @@ -1,52 +1,70 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_instance_count" { +variable "expected_state" { type = number - description = "How many vault instances are in the cluster" + description = "The expected state to have in vault.core.replication.write_undo_logs telemetry. Must be either 1 for enabled or 0 for disabled." + + validation { + condition = contains([0, 1], var.expected_state) + error_message = "The expected_state must be either 0 or 1" + } } -variable "vault_instances" { +variable "hosts" { type = map(object({ + ipv6 = string private_ip = string public_ip = string })) - description = "The vault cluster instances that were created" + description = "The vault cluster target hosts to check" } -variable "vault_root_token" { +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out" + default = 60 +} + +variable "vault_addr" { type = string - description = "The vault root token" + description = "The local vault API listen address" } -locals { - public_ips = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.vault_instances)[idx].public_ip - private_ip = values(var.vault_instances)[idx].private_ip - } - } +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" } resource "enos_remote_exec" "smoke-verify-undo-logs" { - for_each = local.public_ips + for_each = var.hosts environment = { - VAULT_TOKEN = var.vault_root_token - VAULT_ADDR = "http://localhost:8200" + EXPECTED_STATE = var.expected_state + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_TOKEN = var.vault_root_token } scripts = [abspath("${path.module}/scripts/smoke-verify-undo-logs.sh")] diff --git a/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh b/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh index ec308dd8bbaf..99bc7018c7d7 100644 --- a/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh +++ b/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh @@ -1,30 +1,35 @@ #!/bin/bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - +# SPDX-License-Identifier: BUSL-1.1 function fail() { - echo "$1" 1>&2 - exit 1 + echo "$1" 1>&2 + exit 1 } -count=0 -retries=20 -while :; do - leader_address=$(curl -H "X-Vault-Request: true" -H "X-Vault-Token: $VAULT_TOKEN" "$VAULT_ADDR/v1/sys/leader" | jq '.leader_address' | sed 's/\"//g') - state=$(curl --header "X-Vault-Token: $VAULT_TOKEN" "$leader_address/v1/sys/metrics" | jq -r '.Gauges[] | select(.Name == "vault.core.replication.write_undo_logs")') - target_undo_logs_status="$(jq -r '.Value' <<< "$state")" +[[ -z "$EXPECTED_STATE" ]] && fail "EXPECTED_STAE env variable has not been set" +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" - if [ "$target_undo_logs_status" == "1" ]; then - exit 0 - fi +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + state=$($binpath read sys/metrics -format=json | jq -r '.data.Gauges[] | select(.Name == "vault.core.replication.write_undo_logs")') + target_undo_logs_status="$(jq -r '.Value' <<< "$state")" - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - echo "$state" - sleep "$wait" - else - fail "Undo_logs did not get into the correct status" - fi + if [ "$target_undo_logs_status" == "$EXPECTED_STATE" ]; then + echo "vault.core.replication.write_undo_logs has expected Value: \"${EXPECTED_STATE}\"" + exit 0 + fi + + echo "Waiting for vault.core.replication.write_undo_logs to have Value: \"${EXPECTED_STATE}\"" + sleep "$RETRY_INTERVAL" done + +fail "Timed out waiting for vault.core.replication.write_undo_logs to have Value: \"${EXPECTED_STATE}\"" diff --git a/enos/modules/vault_verify_unsealed/main.tf b/enos/modules/vault_verify_unsealed/main.tf deleted file mode 100644 index 45d15418a632..000000000000 --- a/enos/modules/vault_verify_unsealed/main.tf +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -terraform { - required_providers { - enos = { - source = "app.terraform.io/hashicorp-qti/enos" - } - } -} - -variable "vault_cluster_addr_port" { - description = "The Raft cluster address port" - type = string - default = "8201" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_instances" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} - -locals { - instances = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.vault_instances)[idx].public_ip - private_ip = values(var.vault_instances)[idx].private_ip - } - } -} - -resource "enos_remote_exec" "verify_node_unsealed" { - for_each = local.instances - - content = templatefile("${path.module}/templates/verify-vault-node-unsealed.sh", { - vault_cluster_addr = "${each.value.private_ip}:${var.vault_cluster_addr_port}" - vault_install_dir = var.vault_install_dir - vault_local_binary_path = "${var.vault_install_dir}/vault" - }) - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh b/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh deleted file mode 100644 index c69c253ba4c1..000000000000 --- a/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -set -e - -binpath=${vault_install_dir}/vault - -fail() { - echo "$1" 1>&2 - return 1 -} - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_ADDR='http://127.0.0.1:8200' - -health_status=$(curl http://127.0.0.1:8200/v1/sys/health |jq '.') -unseal_status=$($binpath status -format json | jq -Mr --argjson expected "false" '.sealed == $expected') -if [[ "$unseal_status" != 'true' ]]; then - fail "expected ${vault_cluster_addr} to be unsealed, got unseal status: $unseal_status" -fi - -echo $health_status diff --git a/enos/modules/vault_verify_version/main.tf b/enos/modules/vault_verify_version/main.tf index 88b4e7a00d42..9c992bb0a03b 100644 --- a/enos/modules/vault_verify_version/main.tf +++ b/enos/modules/vault_verify_version/main.tf @@ -1,41 +1,31 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } -variable "vault_build_date" { - type = string - description = "The Vault artifact build date" - default = null -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_instance_count" { - type = number - description = "How many Vault instances are in the cluster" -} - -variable "vault_instances" { +variable "hosts" { type = map(object({ + ipv6 = string private_ip = string public_ip = string })) description = "The Vault cluster instances that were created" } -variable "vault_product_version" { +variable "vault_addr" { type = string - description = "The Vault product version" + description = "The local vault API listen address" +} + +variable "vault_build_date" { + type = string + description = "The Vault artifact build date" default = null } @@ -45,6 +35,17 @@ variable "vault_edition" { default = null } +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_product_version" { + type = string + description = "The Vault product version" + default = null +} + variable "vault_revision" { type = string description = "The Vault product revision" @@ -57,26 +58,39 @@ variable "vault_root_token" { default = null } -locals { - instances = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.vault_instances)[idx].public_ip - private_ip = values(var.vault_instances)[idx].private_ip +resource "enos_remote_exec" "verify_cli_version" { + for_each = var.hosts + + environment = { + VAULT_ADDR = var.vault_addr, + VAULT_BUILD_DATE = var.vault_build_date, + VAULT_EDITION = var.vault_edition, + VAULT_INSTALL_DIR = var.vault_install_dir, + VAULT_REVISION = var.vault_revision, + VAULT_TOKEN = var.vault_root_token, + VAULT_VERSION = var.vault_product_version, + } + + scripts = [abspath("${path.module}/scripts/verify-cli-version.sh")] + + transport = { + ssh = { + host = each.value.public_ip } } } -resource "enos_remote_exec" "verify_all_nodes_have_updated_version" { - for_each = local.instances - - content = templatefile("${path.module}/templates/verify-cluster-version.sh", { - vault_install_dir = var.vault_install_dir, - vault_build_date = var.vault_build_date, - vault_version = var.vault_product_version, - vault_edition = var.vault_edition, - vault_revision = var.vault_revision, - vault_token = var.vault_root_token, - }) +resource "enos_remote_exec" "verify_cluster_version" { + for_each = var.hosts + + environment = { + VAULT_ADDR = var.vault_addr, + VAULT_BUILD_DATE = var.vault_build_date, + VAULT_TOKEN = var.vault_root_token, + VAULT_VERSION = var.vault_product_version, + } + + scripts = [abspath("${path.module}/scripts/verify-cluster-version.sh")] transport = { ssh = { diff --git a/enos/modules/vault_verify_version/scripts/verify-cli-version.sh b/enos/modules/vault_verify_version/scripts/verify-cli-version.sh new file mode 100644 index 000000000000..d90abc781cf4 --- /dev/null +++ b/enos/modules/vault_verify_version/scripts/verify-cli-version.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# Verify the Vault "version" includes the correct base version, build date, +# revision SHA, and edition metadata. +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_BUILD_DATE" ]] && fail "VAULT_BUILD_DATE env variable has not been set" +[[ -z "$VAULT_EDITION" ]] && fail "VAULT_EDITION env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_REVISION" ]] && fail "VAULT_REVISION env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" +[[ -z "$VAULT_VERSION" ]] && fail "VAULT_VERSION env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +edition=${VAULT_EDITION} +version=${VAULT_VERSION} +sha=${VAULT_REVISION} +build_date=${VAULT_BUILD_DATE} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" +version_expected="Vault v$version ($sha), built $build_date" + +case "$edition" in + *ce) ;; + *ent) ;; + *ent.hsm) version_expected="$version_expected (cgo)" ;; + *ent.fips1402) version_expected="$version_expected (cgo)" ;; + *ent.hsm.fips1402) version_expected="$version_expected (cgo)" ;; + *) fail "Unknown Vault edition: ($edition)" ;; +esac + +version_expected_nosha=$(echo "$version_expected" | awk '!($3="")' | sed 's/ / /' | sed -e 's/[[:space:]]*$//') +version_output=$("$binpath" version) + +if [[ "$version_output" == "$version_expected_nosha" ]] || [[ "$version_output" == "$version_expected" ]]; then + echo "Version verification succeeded!" +else + msg="$(printf "\nThe Vault cluster did not match the expected version, expected:\n%s\nor\n%s\ngot:\n%s" "$version_expected" "$version_expected_nosha" "$version_output")" + if type diff &> /dev/null; then + # Diff exits non-zero if we have a diff, which we want, so we'll guard against failing early. + if ! version_diff=$(diff <(echo "$version_expected") <(echo "$version_output") -u -L expected -L got); then + msg="$(printf "\nThe Vault cluster did not match the expected version:\n%s" "$version_diff")" + fi + fi + + fail "$msg" +fi diff --git a/enos/modules/vault_verify_version/scripts/verify-cluster-version.sh b/enos/modules/vault_verify_version/scripts/verify-cluster-version.sh new file mode 100644 index 000000000000..f0afee64510b --- /dev/null +++ b/enos/modules/vault_verify_version/scripts/verify-cluster-version.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# Verify the Vault "version" includes the correct base version, build date, +# revision SHA, and edition metadata. +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_BUILD_DATE" ]] && fail "VAULT_BUILD_DATE env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" +[[ -z "$VAULT_VERSION" ]] && fail "VAULT_VERSION env variable has not been set" + +# The sys/version-history endpoint only includes major.minor.patch, any other semver fields need to +# be stripped out. +if ! version=$(cut -d + -f1 <<< "$VAULT_VERSION" | cut -d - -f1); then + fail "failed to parse the expected version: $version" +fi + +if ! vh=$(curl -s -X LIST -H "X-Vault-Token: $VAULT_TOKEN" http://127.0.0.1:8200/v1/sys/version-history | jq -eMc '.data'); then + fail "failed to Vault cluster version history: $vh" +fi + +if ! out=$(jq -eMc --arg version "$version" '.keys | contains([$version])' <<< "$vh"); then + fail "cluster version history does not include our expected version: expected: $version, versions: $(jq -eMc '.keys' <<< "$vh"): output: $out" +fi + +if ! out=$(jq -eMc --arg version "$version" --arg bd "$VAULT_BUILD_DATE" '.key_info[$version].build_date == $bd' <<< "$vh"); then + fail "cluster version history build date is not the expected date: expected: true, expected date: $VAULT_BUILD_DATE, key_info: $(jq -eMc '.key_info' <<< "$vh"), output: $out" +fi + +printf "Cluster version information is valid!: %s\n" "$vh" diff --git a/enos/modules/vault_verify_version/templates/verify-cluster-version.sh b/enos/modules/vault_verify_version/templates/verify-cluster-version.sh deleted file mode 100644 index ba5df7488580..000000000000 --- a/enos/modules/vault_verify_version/templates/verify-cluster-version.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -# Verify the Vault "version" includes the correct base version, build date, -# revision SHA, and edition metadata. -set -e - -binpath=${vault_install_dir}/vault -edition=${vault_edition} -version=${vault_version} -sha=${vault_revision} -build_date=${vault_build_date} - -fail() { - echo "$1" 1>&2 - exit 1 -} - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_ADDR='http://127.0.0.1:8200' -export VAULT_TOKEN='${vault_token}' - -# Build date was added in 1.11 -if [[ "$(echo "$version" |awk -F'.' '{print $2}')" -ge 11 ]]; then - version_expected="Vault v$version ($sha), built $build_date" -else - version_expected="Vault v$version ($sha)" -fi - -case "$edition" in - *oss) ;; - *ent) ;; - *ent.hsm) version_expected="$version_expected (cgo)";; - *ent.fips1402) version_expected="$version_expected (cgo)" ;; - *ent.hsm.fips1402) version_expected="$version_expected (cgo)" ;; - *) fail "Unknown Vault edition: ($edition)" ;; -esac - -version_expected_nosha=$(echo "$version_expected" | awk '!($3="")' | sed 's/ / /' | sed -e 's/[[:space:]]*$//') -version_output=$("$binpath" version) - -if [[ "$version_output" == "$version_expected_nosha" ]] || [[ "$version_output" == "$version_expected" ]]; then - echo "Version verification succeeded!" -else - fail "expected Version=$version_expected or $version_expected_nosha, got: $version_output" -fi diff --git a/enos/modules/vault_verify_write_data/main.tf b/enos/modules/vault_verify_write_data/main.tf deleted file mode 100644 index c17a094b5552..000000000000 --- a/enos/modules/vault_verify_write_data/main.tf +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -terraform { - required_providers { - enos = { - source = "app.terraform.io/hashicorp-qti/enos" - } - } -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_instance_count" { - type = number - description = "How many Vault instances are in the cluster" -} - -variable "leader_public_ip" { - type = string - description = "Vault cluster leader Public IP address" -} - -variable "leader_private_ip" { - type = string - description = "Vault cluster leader Private IP address" -} - -variable "vault_instances" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The Vault cluster instances that were created" -} - -variable "vault_root_token" { - type = string - description = "The Vault root token" - default = null -} - -locals { - instances = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.vault_instances)[idx].public_ip - private_ip = values(var.vault_instances)[idx].private_ip - } - } -} - -# We use this module to verify write data in all Enos scenarios. Since we cannot use -# Vault token to authenticate to secondary clusters in replication scenario we add a regular user -# here to keep the authentication method and module verification consistent between all scenarios -resource "enos_remote_exec" "smoke-enable-secrets-kv" { - # Only enable the secrets engine on the leader node - environment = { - VAULT_ADDR = "http://127.0.0.1:8200" - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = ["${path.module}/scripts/smoke-enable-secrets-kv.sh"] - - transport = { - ssh = { - host = var.leader_public_ip - } - } -} - -# Verify that we can enable the k/v secrets engine and write data to it. -resource "enos_remote_exec" "smoke-write-test-data" { - depends_on = [enos_remote_exec.smoke-enable-secrets-kv] - for_each = local.instances - - environment = { - VAULT_ADDR = "http://127.0.0.1:8200" - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - TEST_KEY = "smoke${each.key}" - TEST_VALUE = "fire" - } - - scripts = ["${path.module}/scripts/smoke-write-test-data.sh"] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/vault_verify_write_data/scripts/smoke-enable-secrets-kv.sh b/enos/modules/vault_verify_write_data/scripts/smoke-enable-secrets-kv.sh deleted file mode 100644 index 2d3e81c2161a..000000000000 --- a/enos/modules/vault_verify_write_data/scripts/smoke-enable-secrets-kv.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -set -e - -function retry { - local retries=$1 - shift - local count=0 - - until "$@"; do - exit=$? - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - else - return "$exit" - fi - done - - return 0 -} - -function fail { - echo "$1" 1>&2 - exit 1 -} - -binpath=${VAULT_INSTALL_DIR}/vault - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -retry 5 "$binpath" status > /dev/null 2>&1 - -# Create user policy -retry 5 $binpath policy write reguser -< /dev/null 2>&1 - -# Create new user and attach reguser policy -retry 5 $binpath write auth/userpass/users/testuser password="passuser1" policies="reguser" - -retry 5 $binpath secrets enable -path="secret" kv diff --git a/enos/modules/vault_verify_write_data/scripts/smoke-write-test-data.sh b/enos/modules/vault_verify_write_data/scripts/smoke-write-test-data.sh deleted file mode 100644 index 98b6392c580c..000000000000 --- a/enos/modules/vault_verify_write_data/scripts/smoke-write-test-data.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -set -e - -function retry { - local retries=$1 - shift - local count=0 - - until "$@"; do - exit=$? - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - else - return "$exit" - fi - done - - return 0 -} - -function fail { - echo "$1" 1>&2 - exit 1 -} - -binpath=${VAULT_INSTALL_DIR}/vault -testkey=${TEST_KEY} -testvalue=${TEST_VALUE} - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -retry 5 $binpath kv put secret/test $testkey=$testvalue diff --git a/enos/modules/vault_wait_for_cluster_unsealed/main.tf b/enos/modules/vault_wait_for_cluster_unsealed/main.tf new file mode 100644 index 000000000000..ce9ee25e1939 --- /dev/null +++ b/enos/modules/vault_wait_for_cluster_unsealed/main.tf @@ -0,0 +1,62 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out" + default = 60 +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +resource "enos_remote_exec" "verify_node_unsealed" { + for_each = var.hosts + + scripts = [abspath("${path.module}/scripts/verify-vault-node-unsealed.sh")] + + environment = { + HOST_IPV4 = each.value.public_ip + HOST_IPV6 = each.value.ipv6 + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + } + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_wait_for_cluster_unsealed/scripts/verify-vault-node-unsealed.sh b/enos/modules/vault_wait_for_cluster_unsealed/scripts/verify-vault-node-unsealed.sh new file mode 100644 index 000000000000..1bce52094cfd --- /dev/null +++ b/enos/modules/vault_wait_for_cluster_unsealed/scripts/verify-vault-node-unsealed.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +getStatus() { + $binpath status -format json +} + +isUnsealed() { + local status + if ! status=$(getStatus); then + echo "failed to get vault status" 1>&2 + return 1 + fi + + if status=$(jq -Mre --argjson expected "false" '.sealed == $expected' <<< "$status"); then + echo "vault is unsealed: $status" + return 0 + fi + + echo "vault is sealed" 1>&2 + return 1 +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + echo "waiting for vault to be unsealed..." + + if isUnsealed; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +if [ -n "$HOST_IPV6" ]; then + fail "timed out waiting for Vault cluster on ${HOST_IPV6} to be unsealed" +fi +if [ -n "$HOST_IPV4" ]; then + fail "timed out waiting for Vault cluster on ${HOST_IPV4} to be unsealed" +fi +fail "timed out waiting for Vault cluster to be unsealed" diff --git a/enos/modules/vault_wait_for_leader/main.tf b/enos/modules/vault_wait_for_leader/main.tf new file mode 100644 index 000000000000..7c29280c91ee --- /dev/null +++ b/enos/modules/vault_wait_for_leader/main.tf @@ -0,0 +1,82 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster hosts that can be expected as a leader" +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out" + default = 60 +} + +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +locals { + ipv6s = [for k, v in values(tomap(var.hosts)) : tostring(v["ipv6"])] + private_ips = [for k, v in values(tomap(var.hosts)) : tostring(v["private_ip"])] +} + +resource "enos_remote_exec" "wait_for_leader_in_hosts" { + environment = { + IP_VERSION = var.ip_version + TIMEOUT_SECONDS = var.timeout + RETRY_INTERVAL = var.retry_interval + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTANCE_IPV6S = jsonencode(local.ipv6s) + VAULT_INSTANCE_PRIVATE_IPS = jsonencode(local.private_ips) + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/wait-for-leader.sh")] + + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } +} diff --git a/enos/modules/vault_wait_for_leader/scripts/wait-for-leader.sh b/enos/modules/vault_wait_for_leader/scripts/wait-for-leader.sh new file mode 100644 index 000000000000..dc97cb6ad163 --- /dev/null +++ b/enos/modules/vault_wait_for_leader/scripts/wait-for-leader.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +findLeaderInPrivateIPs() { + # Find the leader private IP address + local leader_private_ip + if ! leader_private_ip=$($binpath read sys/leader -format=json | jq -er '.data.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then + # Some older versions of vault don't support reading sys/leader. Fallback to the cli status. + if ! leader_private_ip=$($binpath status -format json | jq -er '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then + return 1 + fi + fi + + if isIn=$(jq -er --arg ip "$leader_private_ip" 'map(select(. == $ip)) | length == 1' <<< "$VAULT_INSTANCE_PRIVATE_IPS"); then + if [[ "$isIn" == "true" ]]; then + echo "$leader_private_ip" + return 0 + fi + fi + + return 1 +} + +findLeaderInIPV6s() { + # Find the leader private IP address + local leader_ipv6 + if ! leader_ipv6=$($binpath read sys/leader -format=json | jq -er '.data.leader_address | scan("\\[(.+)\\]") | .[0]'); then + # Some older versions of vault don't support reading sys/leader. Fallback to the cli status. + if ! leader_ipv6=$($binpath status -format json | jq -er '.leader_address | scan("\\[(.+)\\]") | .[0]'); then + return 1 + fi + fi + + if isIn=$(jq -er --arg ip "$leader_ipv6" 'map(select(. == $ip)) | length == 1' <<< "$VAULT_INSTANCE_IPV6S"); then + if [[ "$isIn" == "true" ]]; then + echo "$leader_ipv6" + return 0 + fi + fi + + return 1 +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + # Use the default package manager of the current Linux distro to install packages + case $IP_VERSION in + 4) + [[ -z "$VAULT_INSTANCE_PRIVATE_IPS" ]] && fail "VAULT_INSTANCE_PRIVATE_IPS env variable has not been set" + if findLeaderInPrivateIPs; then + exit 0 + fi + ;; + 6) + [[ -z "$VAULT_INSTANCE_IPV6S" ]] && fail "VAULT_INSTANCE_IPV6S env variable has not been set" + if findLeaderInIPV6s; then + exit 0 + fi + ;; + *) + fail "No matching package manager provided." + ;; + esac + + sleep "$RETRY_INTERVAL" +done + +case $IP_VERSION in + 4) + fail "Timed out waiting for one of $VAULT_INSTANCE_PRIVATE_IPS to be leader." + ;; + 6) + fail "Timed out waiting for one of $VAULT_INSTANCE_IPV6S to be leader." + ;; + *) + fail "Timed out waiting for leader" + ;; +esac diff --git a/enos/modules/vault_wait_for_seal_rewrap/main.tf b/enos/modules/vault_wait_for_seal_rewrap/main.tf new file mode 100644 index 000000000000..920672a71d75 --- /dev/null +++ b/enos/modules/vault_wait_for_seal_rewrap/main.tf @@ -0,0 +1,78 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster hosts that can be expected as a leader" +} + +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out" + default = 60 +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +locals { + private_ips = [for k, v in values(tomap(var.hosts)) : tostring(v["private_ip"])] + first_key = element(keys(enos_remote_exec.wait_for_seal_rewrap_to_be_completed), 0) +} + +resource "enos_remote_exec" "wait_for_seal_rewrap_to_be_completed" { + for_each = var.hosts + environment = { + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/wait-for-seal-rewrap.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +output "stdout" { + value = enos_remote_exec.wait_for_seal_rewrap_to_be_completed[local.first_key].stdout +} + +output "stderr" { + value = enos_remote_exec.wait_for_seal_rewrap_to_be_completed[local.first_key].stdout +} diff --git a/enos/modules/vault_wait_for_seal_rewrap/scripts/wait-for-seal-rewrap.sh b/enos/modules/vault_wait_for_seal_rewrap/scripts/wait-for-seal-rewrap.sh new file mode 100644 index 000000000000..67bc1444b42c --- /dev/null +++ b/enos/modules/vault_wait_for_seal_rewrap/scripts/wait-for-seal-rewrap.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +getRewrapData() { + $binpath read sys/sealwrap/rewrap -format=json | jq -eMc '.data' +} + +waitForRewrap() { + local data + if ! data=$(getRewrapData); then + echo "failed getting /v1/sys/sealwrap/rewrap data" 1>&2 + return 1 + fi + + if ! jq -e '.is_running == false' <<< "$data" &> /dev/null; then + echo "rewrap is running" 1>&2 + return 1 + fi + + if ! jq -e '.entries.failed == 0' <<< "$data" &> /dev/null; then + local entries + entries=$(jq -Mc '.entries.failed' <<< "$data") + echo "rewrap has $entries failed entries" 1>&2 + return 1 + fi + + if ! jq -e '.entries.processed == .entries.succeeded' <<< "$data" &> /dev/null; then + local processed + local succeeded + processed=$(jq -Mc '.entries.processed' <<< "$data") + succeeded=$(jq -Mc '.entries.succeeded' <<< "$data") + echo "the number of processed entries ($processed) does not equal then number of succeeded ($succeeded)" 1>&2 + return 1 + fi + + if jq -e '.entries.processed == 0' <<< "$data" &> /dev/null; then + echo "A seal rewrap has not been started yet. Number of processed entries is zero and a rewrap is not yet running." + return 1 + fi + + echo "$data" + return 0 +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + if waitForRewrap; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +fail "Timed out waiting for seal rewrap to be completed. Data:\n\t$(getRewrapData)" diff --git a/enos/modules/verify_seal_type/main.tf b/enos/modules/verify_seal_type/main.tf new file mode 100644 index 000000000000..e8d81890dd6e --- /dev/null +++ b/enos/modules/verify_seal_type/main.tf @@ -0,0 +1,54 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "seal_type" { + type = string + description = "The expected seal type" + default = "shamir" +} + + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +resource "enos_remote_exec" "verify_seal_type" { + for_each = var.hosts + + scripts = [abspath("${path.module}/scripts/verify-seal-type.sh")] + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + EXPECTED_SEAL_TYPE = var.seal_type + } + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/verify_seal_type/scripts/verify-seal-type.sh b/enos/modules/verify_seal_type/scripts/verify-seal-type.sh new file mode 100644 index 000000000000..73ce06fd9e03 --- /dev/null +++ b/enos/modules/verify_seal_type/scripts/verify-seal-type.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$EXPECTED_SEAL_TYPE" ]] && fail "EXPECTED_SEAL_TYPE env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +count=0 +retries=2 +while :; do + if seal_status=$($binpath read sys/seal-status -format=json); then + if jq -Mer --arg expected "$EXPECTED_SEAL_TYPE" '.data.type == $expected' <<< "$seal_status" &> /dev/null; then + exit 0 + fi + fi + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + printf "Seal Status: %s\n" "$seal_status" + got=$(jq -Mer '.data.type' <<< "$seal_status") + fail "Expected seal type to be $EXPECTED_SEAL_TYPE, got: $got" + fi +done diff --git a/enos/modules/verify_secrets_engines/modules/create/auth.tf b/enos/modules/verify_secrets_engines/modules/create/auth.tf new file mode 100644 index 000000000000..cfbec2f84e18 --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/create/auth.tf @@ -0,0 +1,145 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +locals { + // Variables + auth_userpass_path = "userpass" # auth/userpass + user_name = "testuser" # auth/userpass/users/testuser + user_password = "passtestuser1" # auth/userpass/login/passtestuser1 + user_policy_name = "reguser" # sys/policy/reguser + + // Response data + user_login_data = jsondecode(enos_remote_exec.auth_login_testuser.stdout) + sys_auth_data = jsondecode(enos_remote_exec.read_sys_auth.stdout).data + + // Output + auth_output = { + sys = local.sys_auth_data + userpass = { + path = local.auth_userpass_path + user = { + name = local.user_name + password = local.user_password + policy_name = local.user_policy_name + login = local.user_login_data + } + } + } +} + +output "auth" { + value = local.auth_output +} + +# Enable userpass auth +resource "enos_remote_exec" "auth_enable_userpass" { + environment = { + AUTH_METHOD = "userpass" + AUTH_PATH = local.auth_userpass_path + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/auth-enable.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +# Get the sys/auth data after enabling our auth method +resource "enos_remote_exec" "read_sys_auth" { + depends_on = [ + enos_remote_exec.auth_enable_userpass, + ] + environment = { + REQPATH = "sys/auth" + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/read.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +# Create a default policy for our users that allows them to read and list. +resource "enos_remote_exec" "policy_read_reguser" { + environment = { + POLICY_NAME = local.user_policy_name + POLICY_CONFIG = <<-EOF + path "*" { + capabilities = ["read", "list"] + } + EOF + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/policy-write.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +# Create our user +resource "enos_remote_exec" "auth_create_testuser" { + depends_on = [ + enos_remote_exec.auth_enable_userpass, + enos_remote_exec.policy_read_reguser, + ] + + environment = { + AUTH_PATH = local.auth_userpass_path + PASSWORD = local.user_password + POLICIES = local.user_policy_name + USERNAME = local.user_name + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/auth-userpass-write.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +resource "enos_remote_exec" "auth_login_testuser" { + depends_on = [ + // Don't try to login until created our user and added it to the kv_writers group + enos_remote_exec.auth_create_testuser, + enos_remote_exec.identity_group_kv_writers, + ] + + environment = { + AUTH_PATH = local.auth_userpass_path + PASSWORD = local.user_password + USERNAME = local.user_name + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/auth-userpass-login.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} diff --git a/enos/modules/verify_secrets_engines/modules/create/identity.tf b/enos/modules/verify_secrets_engines/modules/create/identity.tf new file mode 100644 index 000000000000..6ee8810f0281 --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/create/identity.tf @@ -0,0 +1,380 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +locals { + // Variables + identity_entity_metadata = { + "organization" = "vault", + "team" = "qt", + } + group_name_oidc_readers = "oidc_token_readers" // identity/group/name/oidc_token_readers + oidc_config_issuer_url = "https://enos.example.com:1234" // identity/oidc/config + oidc_key_algorithms = ["RS256", "RS384", "RS512", "ES256", "ES384", "ES512", "EdDSA"] + oidc_key_algorithm = local.oidc_key_algorithms[random_integer.oidc_key_algorithm_idx.result] + oidc_key_name = "reguser" // identity/oidc/key/reguser + oidc_key_rotation_period = 86400 // 24h + oidc_key_verification_ttl = 21600 // 6h + oidc_role_name = "reguser" // identity/oidc/role/reguser + oidc_role_ttl = 3600 // 1h + oidc_client_id = "reguser" // optional client ID but required if we want to scope a key and role together without a * + oidc_token_read_policy_name = "oidc_token_reader" + + // Response data + oidc_token_data = jsondecode(enos_remote_exec.oidc_token.stdout).data + group_oidc_token_readers_data = jsondecode(enos_remote_exec.identity_group_oidc_token_readers.stdout).data + initial_oidc_token_data = jsondecode(enos_remote_exec.initial_oidc_token.stdout).data + user_entity_data = jsondecode(enos_remote_exec.identity_entity_testuser.stdout).data + user_entity_alias_data = jsondecode(enos_remote_exec.identity_entity_alias_testuser.stdout).data + + // Output + identity_output = { + oidc = { + reader_group_name = local.group_name_oidc_readers + reader_policy_name = local.oidc_token_read_policy_name + issuer_url = local.oidc_config_issuer_url + key_algorithm = local.oidc_key_algorithm + key_name = local.oidc_key_name + key_rotation_period = local.oidc_key_rotation_period + key_verification_ttl = local.oidc_key_verification_ttl + role_name = local.oidc_role_name + role_ttl = local.oidc_role_ttl + client_id = local.oidc_client_id + } + identity_entity_metadata = local.identity_entity_metadata + data = { + entity = local.user_entity_data + entity_alias = local.user_entity_alias_data + oidc_token = local.oidc_token_data + group_oidc_token_readers = local.group_oidc_token_readers_data + } + } +} + +output "identity" { + value = local.identity_output +} + +// Get a random index for our algorithms so that we can randomly rotate through the various algorithms +resource "random_integer" "oidc_key_algorithm_idx" { + min = 0 + max = length(local.oidc_key_algorithms) - 1 +} + +// Create identity entity for our user +resource "enos_remote_exec" "identity_entity_testuser" { + depends_on = [ + enos_remote_exec.auth_create_testuser, + ] + + environment = { + REQPATH = "identity/entity" + PAYLOAD = jsonencode({ + name = local.user_name, + metadata = local.identity_entity_metadata, + policies = [local.user_policy_name], + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Create identity entity alias for our user +resource "enos_remote_exec" "identity_entity_alias_testuser" { + environment = { + REQPATH = "identity/entity-alias" + PAYLOAD = jsonencode({ + name = local.user_name, + canonical_id = local.user_entity_data.id + mount_accessor = local.sys_auth_data["${local.auth_userpass_path}/"].accessor + policies = [local.user_policy_name], + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Configure our the oidc token backend +resource "enos_remote_exec" "oidc_config" { + environment = { + REQPATH = "identity/oidc/config" + PAYLOAD = jsonencode({ + issuer = local.oidc_config_issuer_url, + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Create a named key that can sign OIDC identity token +resource "enos_remote_exec" "oidc_key" { + environment = { + REQPATH = "identity/oidc/key/${local.oidc_key_name}" + PAYLOAD = jsonencode({ + allowed_client_ids = [local.oidc_client_id], + algorithm = local.oidc_key_algorithm, + rotation_period = local.oidc_key_rotation_period, + verification_ttl = local.oidc_key_verification_ttl, + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Create a role with custom template and that uses the named key +resource "enos_remote_exec" "oidc_role" { + depends_on = [ + enos_remote_exec.oidc_key, + ] + + environment = { + REQPATH = "identity/oidc/role/${local.oidc_role_name}" + PAYLOAD = jsonencode({ + client_id = local.oidc_client_id, + key = local.oidc_key_name, + ttl = local.oidc_role_ttl + template = base64encode(<<-EOF + { + "team": {{identity.entity.metadata.team}}, + "organization": {{identity.entity.metadata.organization}}, + "groups": {{identity.entity.groups.names}} + } + EOF + ), + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Create a group policy that allows "reading" a new signed OIDC token +resource "enos_remote_exec" "policy_write_oidc_token" { + depends_on = [ + enos_remote_exec.secrets_enable_kv_secret, + ] + environment = { + POLICY_NAME = local.oidc_token_read_policy_name + POLICY_CONFIG = <<-EOF + path "identity/oidc/token/*" { + capabilities = ["read"] + } + EOF + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/policy-write.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Create oidc_token_readers group and add our testuser to it +resource "enos_remote_exec" "identity_group_oidc_token_readers" { + environment = { + REQPATH = "identity/group" + PAYLOAD = jsonencode({ + member_entity_ids = [local.user_entity_data.id], + name = local.group_name_oidc_readers, + policies = [local.oidc_token_read_policy_name], + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Generate a signed ID token with our test user +resource "enos_remote_exec" "initial_oidc_token" { + depends_on = [ + enos_remote_exec.oidc_role, + ] + + environment = { + REQPATH = "identity/oidc/token/${local.oidc_role_name}" + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = local.user_login_data.auth.client_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/read.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Introspect the signed ID and verify it +resource "enos_remote_exec" "oidc_introspect_initial_token" { + environment = { + ASSERT_ACTIVE = true // Our token should be "active" + PAYLOAD = jsonencode({ + token = local.initial_oidc_token_data.token, + client_id = local.initial_oidc_token_data.client_id + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/identity-oidc-introspect-token.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Rotate the key with a zero TTL to force expiration +resource "enos_remote_exec" "oidc_key_rotate" { + depends_on = [ + enos_remote_exec.oidc_introspect_initial_token, + ] + + environment = { + REQPATH = "identity/oidc/key/${local.oidc_key_name}/rotate" + PAYLOAD = jsonencode({ + verification_ttl = 0, + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Introspect it again to make sure it's no longer active +resource "enos_remote_exec" "oidc_introspect_initial_token_post_rotate" { + depends_on = [ + enos_remote_exec.oidc_key_rotate, + ] + + environment = { + ASSERT_ACTIVE = false // Our token should not be "active" + PAYLOAD = jsonencode({ + token = local.initial_oidc_token_data.token, + client_id = local.initial_oidc_token_data.client_id + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/identity-oidc-introspect-token.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Generate a new token that we can use later +resource "enos_remote_exec" "oidc_token" { + depends_on = [ + enos_remote_exec.oidc_introspect_initial_token_post_rotate, + ] + + environment = { + REQPATH = "identity/oidc/token/${local.oidc_role_name}" + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = local.user_login_data.auth.client_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/read.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Introspect the new token to ensure it's active before we export it for user later via outputs +resource "enos_remote_exec" "oidc_introspect_token" { + environment = { + ASSERT_ACTIVE = true // Our token should be "active" + PAYLOAD = jsonencode({ + token = local.oidc_token_data.token, + client_id = local.oidc_token_data.client_id + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/identity-oidc-introspect-token.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} diff --git a/enos/modules/verify_secrets_engines/modules/create/kv.tf b/enos/modules/verify_secrets_engines/modules/create/kv.tf new file mode 100644 index 000000000000..269f64b73eec --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/create/kv.tf @@ -0,0 +1,126 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +locals { + // Variables + group_name_kv_writers = "kv_writers" # identity/group/name/kv_writers + kv_mount = "secret" # secret + kv_write_policy_name = "kv_writer" # sys/policy/kv_writer + kv_test_data_path_prefix = "smoke" + kv_test_data_value_prefix = "fire" + + // Response data + identity_group_kv_writers_data = jsondecode(enos_remote_exec.identity_group_kv_writers.stdout).data + + // Output + kv_output = { + reader_group_name = local.group_name_kv_writers + writer_policy_name = local.kv_write_policy_name + mount = local.kv_mount + test = { + path_prefix = local.kv_test_data_path_prefix + value_prefix = local.kv_test_data_value_prefix + } + data = { + identity_group_kv_writers = local.identity_group_kv_writers_data + } + } +} + +output "kv" { + value = local.kv_output +} + +# Enable kv secrets engine +resource "enos_remote_exec" "secrets_enable_kv_secret" { + environment = { + ENGINE = "kv" + MOUNT = local.kv_mount + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/secrets-enable.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +# Create a group policy that allows writing to our kv store +resource "enos_remote_exec" "policy_write_kv_writer" { + depends_on = [ + enos_remote_exec.secrets_enable_kv_secret, + ] + environment = { + POLICY_NAME = local.kv_write_policy_name + POLICY_CONFIG = <<-EOF + path "${local.kv_mount}/*" { + capabilities = ["create", "update", "read", "delete", "list"] + } + EOF + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/policy-write.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +# Create kv_writers group and add our testuser to it +resource "enos_remote_exec" "identity_group_kv_writers" { + environment = { + REQPATH = "identity/group" + PAYLOAD = jsonencode({ + member_entity_ids = [local.user_entity_data.id], // Created in identity.tf + name = local.group_name_kv_writers, + policies = [local.kv_write_policy_name], + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Write test data as our user. +resource "enos_remote_exec" "kv_put_secret_test" { + depends_on = [ + enos_remote_exec.secrets_enable_kv_secret, + ] + for_each = var.hosts + + environment = { + MOUNT = local.kv_mount + SECRET_PATH = "${local.kv_test_data_path_prefix}-${each.key}" + KEY = "${local.kv_test_data_path_prefix}-${each.key}" + VALUE = "${local.kv_test_data_value_prefix}-${each.key}" + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = local.user_login_data.auth.client_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/kv-put.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/verify_secrets_engines/modules/create/main.tf b/enos/modules/verify_secrets_engines/modules/create/main.tf new file mode 100644 index 000000000000..89ca1c80b406 --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/create/main.tf @@ -0,0 +1,53 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The Vault cluster instances that were created" +} + +variable "leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + + description = "Vault cluster leader host" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The Vault root token" + default = null +} + +output "state" { + value = { + auth = local.auth_output + identity = local.identity_output + kv = local.kv_output + } +} diff --git a/enos/modules/verify_secrets_engines/modules/read/auth.tf b/enos/modules/verify_secrets_engines/modules/read/auth.tf new file mode 100644 index 000000000000..2ea06de22c37 --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/read/auth.tf @@ -0,0 +1,24 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +locals { + user_login_data = jsondecode(enos_remote_exec.auth_login_testuser.stdout) +} + +resource "enos_remote_exec" "auth_login_testuser" { + environment = { + AUTH_PATH = var.create_state.auth.userpass.path + PASSWORD = var.create_state.auth.userpass.user.password + USERNAME = var.create_state.auth.userpass.user.name + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/auth-userpass-login.sh")] + + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } +} diff --git a/enos/modules/verify_secrets_engines/modules/read/identity.tf b/enos/modules/verify_secrets_engines/modules/read/identity.tf new file mode 100644 index 000000000000..0f347969a1fa --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/read/identity.tf @@ -0,0 +1,56 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +// Read our testuser identity entity and verify that it matches our expected alias, groups, policy, +// and metadata. +resource "enos_remote_exec" "identity_verify_entity" { + for_each = var.hosts + + environment = { + ENTITY_ALIAS_ID = var.create_state.identity.data.entity_alias.id + ENTITY_GROUP_IDS = jsonencode([ + var.create_state.kv.data.identity_group_kv_writers.id, + var.create_state.identity.data.group_oidc_token_readers.id, + ]) + ENTITY_METADATA = jsonencode(var.create_state.identity.identity_entity_metadata) + ENTITY_NAME = var.create_state.identity.data.entity.name + ENTITY_POLICIES = jsonencode([var.create_state.auth.userpass.user.policy_name]) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = local.user_login_data.auth.client_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/identity-verify-entity.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +// Read our OIDC key and role and verify that they have the correct configuration, TTLs, and algorithms. +resource "enos_remote_exec" "identity_verify_oidc" { + for_each = var.hosts + + environment = { + OIDC_ISSUER_URL = var.create_state.identity.oidc.issuer_url + OIDC_KEY_NAME = var.create_state.identity.oidc.key_name + OIDC_KEY_ROTATION_PERIOD = var.create_state.identity.oidc.key_rotation_period + OIDC_KEY_VERIFICATION_TTL = var.create_state.identity.oidc.key_verification_ttl + OIDC_KEY_ALGORITHM = var.create_state.identity.oidc.key_algorithm + OIDC_ROLE_NAME = var.create_state.identity.oidc.role_name + OIDC_ROLE_TTL = var.create_state.identity.oidc.role_ttl + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = local.user_login_data.auth.client_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/identity-verify-oidc.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/verify_secrets_engines/modules/read/kv.tf b/enos/modules/verify_secrets_engines/modules/read/kv.tf new file mode 100644 index 000000000000..cfa4b7829e13 --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/read/kv.tf @@ -0,0 +1,24 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +resource "enos_remote_exec" "kv_get_verify_test_data" { + for_each = var.hosts + + environment = { + MOUNT = var.create_state.kv.mount + SECRET_PATH = "${var.create_state.kv.test.path_prefix}-${each.key}" + KEY = "${var.create_state.kv.test.path_prefix}-${each.key}" + VALUE = "${var.create_state.kv.test.value_prefix}-${each.key}" + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = local.user_login_data.auth.client_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/kv-verify-value.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/verify_secrets_engines/modules/read/main.tf b/enos/modules/verify_secrets_engines/modules/read/main.tf new file mode 100644 index 000000000000..f2ad27a60f3f --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/read/main.tf @@ -0,0 +1,43 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The Vault cluster instances that were created" +} + +variable "create_state" { + description = "The state of the secrets engines from the 'create' module" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The Vault root token" + default = null +} + +locals { + vault_bin_path = "${var.vault_install_dir}/vault" +} diff --git a/enos/modules/verify_secrets_engines/scripts/auth-enable.sh b/enos/modules/verify_secrets_engines/scripts/auth-enable.sh new file mode 100644 index 000000000000..5601715a81a6 --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/auth-enable.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$AUTH_METHOD" ]] && fail "AUTH_METHOD env variable has not been set" +[[ -z "$AUTH_PATH" ]] && fail "AUTH_PATH env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json +"$binpath" auth enable -path="$AUTH_PATH" "$AUTH_METHOD" diff --git a/enos/modules/verify_secrets_engines/scripts/auth-userpass-login.sh b/enos/modules/verify_secrets_engines/scripts/auth-userpass-login.sh new file mode 100644 index 000000000000..31b756f1f5a5 --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/auth-userpass-login.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$AUTH_PATH" ]] && fail "AUTH_PATH env variable has not been set" +[[ -z "$PASSWORD" ]] && fail "PASSWORD env variable has not been set" +[[ -z "$USERNAME" ]] && fail "USERNAME env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json +"$binpath" write "auth/$AUTH_PATH/login/$USERNAME" password="$PASSWORD" diff --git a/enos/modules/verify_secrets_engines/scripts/auth-userpass-write.sh b/enos/modules/verify_secrets_engines/scripts/auth-userpass-write.sh new file mode 100644 index 000000000000..b8cca8bb1b63 --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/auth-userpass-write.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$AUTH_PATH" ]] && fail "AUTH_PATH env variable has not been set" +[[ -z "$PASSWORD" ]] && fail "PASSWORD env variable has not been set" +[[ -z "$POLICIES" ]] && fail "POLICIES env variable has not been set" +[[ -z "$USERNAME" ]] && fail "USERNAME env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json +"$binpath" write "auth/$AUTH_PATH/users/$USERNAME" password="$PASSWORD" policies="$POLICIES" diff --git a/enos/modules/verify_secrets_engines/scripts/identity-oidc-introspect-token.sh b/enos/modules/verify_secrets_engines/scripts/identity-oidc-introspect-token.sh new file mode 100644 index 000000000000..0e6e1eaabab7 --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/identity-oidc-introspect-token.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$PAYLOAD" ]] && fail "PAYLOAD env variable has not been set" +[[ -z "$ASSERT_ACTIVE" ]] && fail "ASSERT_ACTIVE env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json +if ! output=$("$binpath" write identity/oidc/introspect - <<< "$PAYLOAD" 2>&1); then + # Attempt to write our error on stdout as JSON as our consumers of the script expect it to be JSON + printf '{"data":{"error":"%s"}}' "$output" + # Fail on stderr with a human readable message + fail "failed to write payload to identity/oidc/introspect: payload=$PAYLOAD output=$output" +fi + +printf "%s\n" "$output" # Write our response output JSON to stdout +if ! jq -Me --argjson ACTIVE "$ASSERT_ACTIVE" '.data.active == $ACTIVE' <<< "$output" &> /dev/null; then + # Write a failure message on STDERR + fail "token active state is invalid, expected .data.active='$ASSERT_ACTIVE'" +fi diff --git a/enos/modules/verify_secrets_engines/scripts/identity-verify-entity.sh b/enos/modules/verify_secrets_engines/scripts/identity-verify-entity.sh new file mode 100644 index 000000000000..2ee950368196 --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/identity-verify-entity.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$ENTITY_ALIAS_ID" ]] && fail "ENTITY_ALIAS_ID env variable has not been set" +[[ -z "$ENTITY_GROUP_IDS" ]] && fail "ENTITY_GROUP_IDS env variable has not been set" +[[ -z "$ENTITY_METADATA" ]] && fail "ENTITY_METADATA env variable has not been set" +[[ -z "$ENTITY_NAME" ]] && fail "ENTITY_NAME env variable has not been set" +[[ -z "$ENTITY_POLICIES" ]] && fail "ENTITY_POLICIES env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json +if ! output=$("$binpath" read "identity/entity/name/$ENTITY_NAME" 2>&1); then + fail "failed to read identity/entity/name/$ENTITY_NAME: $output" +fi + +if ! jq -Mec --arg ALIAS "$ENTITY_ALIAS_ID" '.data.aliases[0].id == $ALIAS' <<< "$output"; then + fail "entity alias ID does not match, expected: $ENTITY_ALIAS_ID, got: $(jq -Mrc '.data.aliases' <<< "$output")" +fi + +if ! jq -Mec --argjson GROUPS "$ENTITY_GROUP_IDS" '.data.group_ids | sort as $have | $GROUPS | sort as $want | $have == $want' <<< "$output"; then + fail "entity group ID's do not match, expected: $ENTITY_GROUP_IDS, got: $(jq -Mrc '.data.group_ids' <<< "$output")" +fi + +if ! jq -Mec --argjson METADATA "$ENTITY_METADATA" '.data.metadata == $METADATA' <<< "$output"; then + fail "entity metadata does not match, expected: $ENTITY_METADATA, got: $(jq -Mrc '.data.metadata' <<< "$output")" +fi + +if ! jq -Mec --argjson POLICIES "$ENTITY_POLICIES" '.data.policies == $POLICIES' <<< "$output"; then + fail "entity policies do not match, expected: $ENTITY_POLICIES, got: $(jq -Mrc '.data.policies' <<< "$output")" +fi diff --git a/enos/modules/verify_secrets_engines/scripts/identity-verify-oidc.sh b/enos/modules/verify_secrets_engines/scripts/identity-verify-oidc.sh new file mode 100644 index 000000000000..3b095570aebb --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/identity-verify-oidc.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$OIDC_ISSUER_URL" ]] && fail "OIDC_ISSUER_URL env variable has not been set" +[[ -z "$OIDC_KEY_NAME" ]] && fail "OIDC_KEY_NAME env variable has not been set" +[[ -z "$OIDC_KEY_ROTATION_PERIOD" ]] && fail "OIDC_KEY_ROTATION_PERIOD env variable has not been set" +[[ -z "$OIDC_KEY_VERIFICATION_TTL" ]] && fail "OIDC_KEY_VERIFICATION_TTL env variable has not been set" +[[ -z "$OIDC_KEY_ALGORITHM" ]] && fail "OIDC_KEY_ALGORITHM env variable has not been set" +[[ -z "$OIDC_ROLE_NAME" ]] && fail "OIDC_ROLE_NAME env variable has not been set" +[[ -z "$OIDC_ROLE_TTL" ]] && fail "OIDC_ROLE_TTL env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json + +# Verify that we have the correct issuer URL +if ! cfg=$("$binpath" read identity/oidc/config); then + fail "failed to read identity/oidc/config: $cfg" +elif ! jq -Merc --arg URL "$OIDC_ISSUER_URL" '.data.issuer == $URL' <<< "$cfg"; then + fail "oidc issuer URL is incorrect, expected: $OIDC_ISSUER_URL, got $(jq -Mrc '.data.issuer' <<< "$cfg")" +fi + +# Verify that our token algorithm, rotation period and verification TTL are correct +if ! key_res=$("$binpath" read "identity/oidc/key/$OIDC_KEY_NAME"); then + fail "failed to read identity/oidc/key/$OIDC_KEY_NAME: $key_res" +fi + +if ! jq -Merc --arg ALG "$OIDC_KEY_ALGORITHM" '.data.algorithm == $ALG' <<< "$key_res"; then + fail "oidc token algorithm is incorrect, expected: $OIDC_KEY_ALGORITHM, got $(jq -Mrc '.data.algorithm' <<< "$key_res")" +fi + +if ! jq -Merc --argjson RP "$OIDC_KEY_ROTATION_PERIOD" '.data.rotation_period == $RP' <<< "$key_res"; then + fail "oidc token rotation_period is incorrect, expected: $OIDC_KEY_ROTATION_PERIOD, got $(jq -Mrc '.data.rotation_period' <<< "$key_res")" +fi + +if ! jq -Merc --argjson TTL "$OIDC_KEY_VERIFICATION_TTL" '.data.verification_ttl == $TTL' <<< "$key_res"; then + fail "oidc token verification_ttl is incorrect, expected: $OIDC_KEY_VERIFICATION_TTL, got $(jq -Mrc '.data.verification_ttl' <<< "$key_res")" +fi + +# Verify that our role key and TTL are correct. +if ! role_res=$("$binpath" read "identity/oidc/role/$OIDC_ROLE_NAME"); then + fail "failed to read identity/oidc/role/$OIDC_ROLE_NAME: $role_res" +fi + +if ! jq -Merc --arg KEY "$OIDC_KEY_NAME" '.data.key == $KEY' <<< "$role_res"; then + fail "oidc role key is incorrect, expected: $OIDC_KEY_NAME, got $(jq -Mrc '.data.key' <<< "$role_res")" +fi + +if ! jq -Merc --argjson TTL "$OIDC_ROLE_TTL" '.data.ttl == $TTL' <<< "$role_res"; then + fail "oidc role ttl is incorrect, expected: $OIDC_ROLE_TTL, got $(jq -Mrc '.data.ttl' <<< "$role_res")" +fi diff --git a/enos/modules/verify_secrets_engines/scripts/kv-put.sh b/enos/modules/verify_secrets_engines/scripts/kv-put.sh new file mode 100644 index 000000000000..46e858f6c62d --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/kv-put.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$KEY" ]] && fail "KEY env variable has not been set" +[[ -z "$MOUNT" ]] && fail "MOUNT env variable has not been set" +[[ -z "$SECRET_PATH" ]] && fail "SECRET_PATH env variable has not been set" +[[ -z "$VALUE" ]] && fail "VALUE env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json + +"$binpath" kv put -mount="$MOUNT" "$SECRET_PATH" "$KEY=$VALUE" diff --git a/enos/modules/verify_secrets_engines/scripts/kv-verify-value.sh b/enos/modules/verify_secrets_engines/scripts/kv-verify-value.sh new file mode 100644 index 000000000000..72427d869642 --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/kv-verify-value.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$MOUNT" ]] && fail "MOUNT env variable has not been set" +[[ -z "$SECRET_PATH" ]] && fail "SECRET_PATH env variable has not been set" +[[ -z "$KEY" ]] && fail "KEY env variable has not been set" +[[ -z "$VALUE" ]] && fail "VALUE env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json +if res=$("$binpath" kv get "$MOUNT/$SECRET_PATH"); then + if jq -Merc --arg VALUE "$VALUE" --arg KEY "$KEY" '.data[$KEY] == $VALUE' <<< "$res"; then + printf "kv %s/%s %s=%s is valid\n" "$MOUNT" "$SECRET_PATH" "$KEY" "$VALUE" + exit 0 + fi + fail "kv $MOUNT/$SECRET_PATH $KEY=$VALUE invalid! Got: $(jq -Mrc --arg KEY "$KEY" '.data[$KEY]' <<< "$res")" +else + fail "failed to read kv data for $MOUNT/$SECRET_PATH: $res" +fi diff --git a/enos/modules/verify_secrets_engines/scripts/policy-write.sh b/enos/modules/verify_secrets_engines/scripts/policy-write.sh new file mode 100644 index 000000000000..18e011cc9686 --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/policy-write.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$POLICY_NAME" ]] && fail "POLICY_NAME env variable has not been set" +[[ -z "$POLICY_CONFIG" ]] && fail "POLICY_CONFIG env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json +"$binpath" policy write "$POLICY_NAME" - <<< "$POLICY_CONFIG" diff --git a/enos/modules/verify_secrets_engines/scripts/read.sh b/enos/modules/verify_secrets_engines/scripts/read.sh new file mode 100644 index 000000000000..b522c6f55f5e --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/read.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$REQPATH" ]] && fail "REQPATH env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json +"$binpath" read "$REQPATH" diff --git a/enos/modules/verify_secrets_engines/scripts/secrets-enable.sh b/enos/modules/verify_secrets_engines/scripts/secrets-enable.sh new file mode 100644 index 000000000000..7cc957a290bf --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/secrets-enable.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$MOUNT" ]] && fail "MOUNT env variable has not been set" +[[ -z "$ENGINE" ]] && fail "MOUNT env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json +"$binpath" secrets enable -path="$MOUNT" "$ENGINE" diff --git a/enos/modules/verify_secrets_engines/scripts/write-payload.sh b/enos/modules/verify_secrets_engines/scripts/write-payload.sh new file mode 100644 index 000000000000..922fb2e5f76f --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/write-payload.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$REQPATH" ]] && fail "REQPATH env variable has not been set" +[[ -z "$PAYLOAD" ]] && fail "PAYLOAD env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json +if output=$("$binpath" write "$REQPATH" - <<< "$PAYLOAD" 2>&1); then + printf "%s\n" "$output" +else + fail "failed to write payload: path=$REQPATH payload=$PAYLOAD out=$output" +fi diff --git a/go.mod b/go.mod index 48ba42a94872..7fdf614a7e33 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,16 @@ module github.com/hashicorp/vault -go 1.19 +// The go version directive value isn't consulted when building our production binaries, +// and the vault module isn't intended to be imported into other projects. As such the +// impact of this setting is usually rather limited. Note however that in some cases the +// Go project introduces new semantics for handling of go.mod depending on the value. +// +// The general policy for updating it is: when the Go major version used on the branch is +// updated. If we choose not to do so at some point (e.g. because we don't want some new +// semantic related to Go module handling), this comment should be updated to explain that. +// +// Whenever this value gets updated, sdk/go.mod should be updated to the same value. +go 1.22.5 replace github.com/hashicorp/vault/api => ./api @@ -12,362 +22,403 @@ replace github.com/hashicorp/vault/api/auth/userpass => ./api/auth/userpass replace github.com/hashicorp/vault/sdk => ./sdk -replace go.etcd.io/etcd/client/pkg/v3 v3.5.0 => go.etcd.io/etcd/client/pkg/v3 v3.0.0-20210928084031-3df272774672 - require ( - cloud.google.com/go/monitoring v1.8.0 - cloud.google.com/go/spanner v1.41.0 - cloud.google.com/go/storage v1.27.0 - github.com/Azure/azure-storage-blob-go v0.14.0 - github.com/Azure/go-autorest/autorest v0.11.28 - github.com/Azure/go-autorest/autorest/adal v0.9.20 - github.com/NYTimes/gziphandler v1.1.1 - github.com/ProtonMail/go-crypto v0.0.0-20220824120805-4b6e5c587895 - github.com/SAP/go-hdb v0.14.1 + cloud.google.com/go/cloudsqlconn v1.4.3 + cloud.google.com/go/monitoring v1.21.0 + cloud.google.com/go/spanner v1.67.0 + cloud.google.com/go/storage v1.43.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 + github.com/Azure/azure-storage-blob-go v0.15.0 + github.com/Azure/go-autorest/autorest v0.11.29 + github.com/Azure/go-autorest/autorest/adal v0.9.23 + github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 + github.com/SAP/go-hdb v1.10.1 github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a github.com/aerospike/aerospike-client-go/v5 v5.6.0 - github.com/aliyun/alibaba-cloud-sdk-go v1.62.146 + github.com/aliyun/alibaba-cloud-sdk-go v1.63.12 github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2 github.com/armon/go-metrics v0.4.1 github.com/armon/go-radix v1.0.0 - github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef - github.com/aws/aws-sdk-go v1.44.191 - github.com/aws/aws-sdk-go-v2/config v1.6.0 + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 + github.com/aws/aws-sdk-go v1.55.5 + github.com/aws/aws-sdk-go-v2/config v1.27.11 github.com/axiomhq/hyperloglog v0.0.0-20220105174342-98591331716a github.com/cenkalti/backoff/v3 v3.2.2 github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0 - github.com/client9/misspell v0.3.4 - github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c + github.com/cockroachdb/cockroach-go/v2 v2.3.8 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf - github.com/denisenkom/go-mssqldb v0.12.2 - github.com/docker/docker v20.10.18+incompatible - github.com/docker/go-connections v0.4.0 + github.com/denisenkom/go-mssqldb v0.12.3 + github.com/docker/docker v26.1.5+incompatible github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 - github.com/dustin/go-humanize v1.0.0 - github.com/fatih/color v1.13.0 + github.com/dustin/go-humanize v1.0.1 + github.com/fatih/color v1.17.0 github.com/fatih/structs v1.1.0 - github.com/favadi/protoc-go-inject-tag v1.3.0 + github.com/gammazero/workerpool v1.1.3 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 - github.com/go-errors/errors v1.4.1 - github.com/go-ldap/ldap/v3 v3.4.1 - github.com/go-sql-driver/mysql v1.6.0 - github.com/go-test/deep v1.1.0 + github.com/go-errors/errors v1.5.1 + github.com/go-git/go-git/v5 v5.11.0 + github.com/go-jose/go-jose/v3 v3.0.3 + github.com/go-ldap/ldap/v3 v3.4.8 + github.com/go-sql-driver/mysql v1.7.1 + github.com/go-test/deep v1.1.1 github.com/go-zookeeper/zk v1.0.3 github.com/gocql/gocql v1.0.0 - github.com/golang-jwt/jwt/v4 v4.4.2 - github.com/golang/protobuf v1.5.2 - github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 - github.com/google/go-cmp v0.5.9 + github.com/golang-jwt/jwt/v4 v4.5.0 + github.com/golang/protobuf v1.5.4 + github.com/google/go-cmp v0.6.0 github.com/google/go-github v17.0.0+incompatible github.com/google/go-metrics-stackdriver v0.2.0 - github.com/google/tink/go v1.6.1 - github.com/hashicorp/cap v0.2.1-0.20220727210936-60cd1534e220 - github.com/hashicorp/consul-template v0.29.5 - github.com/hashicorp/consul/api v1.17.0 + github.com/hashicorp-forge/bbolt v1.3.8-hc3 + github.com/hashicorp/cap v0.7.0 + github.com/hashicorp/cap/ldap v0.0.0-20240403125925-c0418810d10e + github.com/hashicorp/cli v1.1.6 + github.com/hashicorp/consul-template v0.39.1 + github.com/hashicorp/consul/api v1.29.1 github.com/hashicorp/errwrap v1.1.0 - github.com/hashicorp/eventlogger v0.1.1 + github.com/hashicorp/eventlogger v0.2.10 + github.com/hashicorp/go-bexpr v0.1.12 github.com/hashicorp/go-cleanhttp v0.5.2 github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192 - github.com/hashicorp/go-gcp-common v0.8.0 - github.com/hashicorp/go-hclog v1.4.0 - github.com/hashicorp/go-kms-wrapping/v2 v2.0.8 - github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.7-1 - github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.1 - github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.7 - github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.7 - github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.8 + github.com/hashicorp/go-gcp-common v0.9.0 + github.com/hashicorp/go-hclog v1.6.3 + github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.1 + github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 + github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.9 + github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.3 + github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.9 + github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.11 + github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.12 github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7 - github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.7 - github.com/hashicorp/go-memdb v1.3.3 - github.com/hashicorp/go-msgpack v1.1.5 + github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.12 + github.com/hashicorp/go-memdb v1.3.4 github.com/hashicorp/go-multierror v1.1.1 - github.com/hashicorp/go-plugin v1.4.8 + github.com/hashicorp/go-plugin v1.6.1 github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a - github.com/hashicorp/go-retryablehttp v0.7.1 + github.com/hashicorp/go-retryablehttp v0.7.7 github.com/hashicorp/go-rootcerts v1.0.2 - github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 + github.com/hashicorp/go-secure-stdlib/awsutil v0.3.0 github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 github.com/hashicorp/go-secure-stdlib/gatedwriter v0.1.1 github.com/hashicorp/go-secure-stdlib/kv-builder v0.1.2 - github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 - github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 + github.com/hashicorp/go-secure-stdlib/mlock v0.1.3 + github.com/hashicorp/go-secure-stdlib/nonceutil v0.1.0 + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 github.com/hashicorp/go-secure-stdlib/password v0.1.1 github.com/hashicorp/go-secure-stdlib/reloadutil v0.1.1 github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 - github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 - github.com/hashicorp/go-sockaddr v1.0.2 + github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.3 + github.com/hashicorp/go-sockaddr v1.0.6 github.com/hashicorp/go-syslog v1.0.0 github.com/hashicorp/go-uuid v1.0.3 - github.com/hashicorp/go-version v1.6.0 - github.com/hashicorp/golang-lru v0.5.4 + github.com/hashicorp/go-version v1.7.0 + github.com/hashicorp/golang-lru v1.0.2 + github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/hashicorp/hcl v1.0.1-vault-5 - github.com/hashicorp/hcp-link v0.1.0 - github.com/hashicorp/hcp-scada-provider v0.2.1 - github.com/hashicorp/hcp-sdk-go v0.23.0 - github.com/hashicorp/nomad/api v0.0.0-20220707195938-75f4c2237b28 - github.com/hashicorp/raft v1.3.10 - github.com/hashicorp/raft-autopilot v0.1.6 - github.com/hashicorp/raft-boltdb/v2 v2.0.0-20210421194847-a7e34179d62c + github.com/hashicorp/hcl/v2 v2.16.2 + github.com/hashicorp/hcp-link v0.2.1 + github.com/hashicorp/hcp-scada-provider v0.2.2 + github.com/hashicorp/hcp-sdk-go v0.101.0 + github.com/hashicorp/nomad/api v0.0.0-20240213164230-c364cb57298d + github.com/hashicorp/raft v1.7.1 + github.com/hashicorp/raft-autopilot v0.2.0 + github.com/hashicorp/raft-boltdb/v2 v2.3.0 github.com/hashicorp/raft-snapshot v1.0.4 - github.com/hashicorp/vault-plugin-auth-alicloud v0.14.0 - github.com/hashicorp/vault-plugin-auth-azure v0.13.0 - github.com/hashicorp/vault-plugin-auth-centrify v0.14.0 - github.com/hashicorp/vault-plugin-auth-cf v0.14.0 - github.com/hashicorp/vault-plugin-auth-gcp v0.15.0 - github.com/hashicorp/vault-plugin-auth-jwt v0.15.0 - github.com/hashicorp/vault-plugin-auth-kerberos v0.9.0 - github.com/hashicorp/vault-plugin-auth-kubernetes v0.15.0 - github.com/hashicorp/vault-plugin-auth-oci v0.13.1 - github.com/hashicorp/vault-plugin-database-couchbase v0.9.0 - github.com/hashicorp/vault-plugin-database-elasticsearch v0.13.1 - github.com/hashicorp/vault-plugin-database-mongodbatlas v0.9.0 - github.com/hashicorp/vault-plugin-database-redis v0.2.0 - github.com/hashicorp/vault-plugin-database-redis-elasticache v0.2.0 - github.com/hashicorp/vault-plugin-database-snowflake v0.7.0 + github.com/hashicorp/raft-wal v0.4.0 + github.com/hashicorp/vault-hcp-lib v0.0.0-20240704151836-a5c058ac604c + github.com/hashicorp/vault-plugin-auth-alicloud v0.19.0 + github.com/hashicorp/vault-plugin-auth-azure v0.19.0 + github.com/hashicorp/vault-plugin-auth-cf v0.19.0 + github.com/hashicorp/vault-plugin-auth-gcp v0.19.0 + github.com/hashicorp/vault-plugin-auth-jwt v0.22.0 + github.com/hashicorp/vault-plugin-auth-kerberos v0.13.0 + github.com/hashicorp/vault-plugin-auth-kubernetes v0.20.0 + github.com/hashicorp/vault-plugin-auth-oci v0.17.0 + github.com/hashicorp/vault-plugin-database-couchbase v0.12.0 + github.com/hashicorp/vault-plugin-database-elasticsearch v0.16.0 + github.com/hashicorp/vault-plugin-database-mongodbatlas v0.13.0 + github.com/hashicorp/vault-plugin-database-redis v0.4.0 + github.com/hashicorp/vault-plugin-database-redis-elasticache v0.5.0 + github.com/hashicorp/vault-plugin-database-snowflake v0.12.0 github.com/hashicorp/vault-plugin-mock v0.16.1 - github.com/hashicorp/vault-plugin-secrets-ad v0.15.0 - github.com/hashicorp/vault-plugin-secrets-alicloud v0.14.1 - github.com/hashicorp/vault-plugin-secrets-azure v0.15.0 - github.com/hashicorp/vault-plugin-secrets-gcp v0.15.0 - github.com/hashicorp/vault-plugin-secrets-gcpkms v0.14.0 - github.com/hashicorp/vault-plugin-secrets-kubernetes v0.3.0 - github.com/hashicorp/vault-plugin-secrets-kv v0.14.2 - github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.9.1 - github.com/hashicorp/vault-plugin-secrets-openldap v0.10.1 - github.com/hashicorp/vault-plugin-secrets-terraform v0.7.0 - github.com/hashicorp/vault-testing-stepwise v0.1.3-0.20230203193428-3a789cb2c68f - github.com/hashicorp/vault/api v1.9.0 + github.com/hashicorp/vault-plugin-secrets-ad v0.19.0 + github.com/hashicorp/vault-plugin-secrets-alicloud v0.18.0 + github.com/hashicorp/vault-plugin-secrets-azure v0.20.0 + github.com/hashicorp/vault-plugin-secrets-gcp v0.20.0 + github.com/hashicorp/vault-plugin-secrets-gcpkms v0.19.0 + github.com/hashicorp/vault-plugin-secrets-kubernetes v0.9.0 + github.com/hashicorp/vault-plugin-secrets-kv v0.20.0 + github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.13.0 + github.com/hashicorp/vault-plugin-secrets-openldap v0.14.0 + github.com/hashicorp/vault-plugin-secrets-terraform v0.10.0 + github.com/hashicorp/vault-testing-stepwise v0.3.1 + github.com/hashicorp/vault/api v1.15.0 github.com/hashicorp/vault/api/auth/approle v0.1.0 github.com/hashicorp/vault/api/auth/userpass v0.1.0 - github.com/hashicorp/vault/sdk v0.8.1 + github.com/hashicorp/vault/sdk v0.14.0 github.com/hashicorp/vault/vault/hcp_link/proto v0.0.0-20230201201504-b741fa893d77 github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab - github.com/jackc/pgx/v4 v4.15.0 - github.com/jcmturner/gokrb5/v8 v8.4.3 + github.com/jackc/pgx/v4 v4.18.3 + github.com/jcmturner/gokrb5/v8 v8.4.4 github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f - github.com/jefferai/jsonx v1.0.0 + github.com/jefferai/jsonx v1.0.1 github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f - github.com/kr/pretty v0.3.0 + github.com/klauspost/compress v1.17.8 + github.com/kr/pretty v0.3.1 github.com/kr/text v0.2.0 github.com/mattn/go-colorable v0.1.13 - github.com/mattn/go-isatty v0.0.17 - github.com/mholt/archiver/v3 v3.5.1 + github.com/mattn/go-isatty v0.0.20 github.com/michaelklishin/rabbit-hole/v2 v2.12.0 github.com/mikesmitty/edkey v0.0.0-20170222072505-3356ea4e686a - github.com/mitchellh/cli v1.1.2 github.com/mitchellh/copystructure v1.2.0 github.com/mitchellh/go-homedir v1.1.0 - github.com/mitchellh/go-testing-interface v1.14.1 - github.com/mitchellh/go-wordwrap v1.0.0 + github.com/mitchellh/go-wordwrap v1.0.1 github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/reflectwalk v1.0.2 - github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc github.com/ncw/swift v1.0.47 github.com/oklog/run v1.1.0 - github.com/okta/okta-sdk-golang/v2 v2.12.1 + github.com/okta/okta-sdk-golang/v2 v2.20.0 github.com/oracle/oci-go-sdk v24.3.0+incompatible github.com/ory/dockertest v3.3.5+incompatible - github.com/ory/dockertest/v3 v3.9.1 + github.com/ory/dockertest/v3 v3.10.0 github.com/patrickmn/go-cache v2.1.0+incompatible - github.com/pires/go-proxyproto v0.6.1 + github.com/pires/go-proxyproto v1.0.0 github.com/pkg/errors v0.9.1 github.com/posener/complete v1.2.3 github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d - github.com/prometheus/client_golang v1.11.1 - github.com/prometheus/common v0.26.0 + github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/common v0.55.0 github.com/rboyer/safeio v0.2.1 - github.com/ryanuber/columnize v2.1.0+incompatible + github.com/robfig/cron/v3 v3.0.1 + github.com/ryanuber/columnize v2.1.2+incompatible github.com/ryanuber/go-glob v1.0.0 github.com/sasha-s/go-deadlock v0.2.0 github.com/sethvargo/go-limiter v0.7.1 github.com/shirou/gopsutil/v3 v3.22.6 - github.com/stretchr/testify v1.8.1 - go.etcd.io/bbolt v1.3.6 - go.etcd.io/etcd/client/pkg/v3 v3.5.0 - go.etcd.io/etcd/client/v2 v2.305.0 - go.etcd.io/etcd/client/v3 v3.5.0 - go.mongodb.org/atlas v0.15.0 - go.mongodb.org/mongo-driver v1.7.3 - go.opentelemetry.io/otel v1.11.2 - go.opentelemetry.io/otel/sdk v1.11.2 - go.opentelemetry.io/otel/trace v1.11.2 - go.uber.org/atomic v1.9.0 - go.uber.org/goleak v1.1.12 - golang.org/x/crypto v0.6.0 - golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb - golang.org/x/net v0.7.0 - golang.org/x/oauth2 v0.4.0 - golang.org/x/sync v0.1.0 - golang.org/x/sys v0.5.0 - golang.org/x/term v0.5.0 - golang.org/x/tools v0.2.0 - google.golang.org/api v0.109.0 - google.golang.org/grpc v1.51.0 - google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 - google.golang.org/protobuf v1.28.1 + github.com/stretchr/testify v1.9.0 + github.com/tink-crypto/tink-go/v2 v2.2.0 + go.etcd.io/bbolt v1.3.10 + go.etcd.io/etcd/client/pkg/v3 v3.5.13 + go.etcd.io/etcd/client/v2 v2.305.5 + go.etcd.io/etcd/client/v3 v3.5.13 + go.mongodb.org/atlas v0.37.0 + go.mongodb.org/mongo-driver v1.16.1 + go.opentelemetry.io/otel v1.30.0 + go.opentelemetry.io/otel/sdk v1.30.0 + go.opentelemetry.io/otel/trace v1.30.0 + go.uber.org/atomic v1.11.0 + go.uber.org/goleak v1.3.0 + golang.org/x/crypto v0.27.0 + golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 + golang.org/x/net v0.29.0 + golang.org/x/oauth2 v0.23.0 + golang.org/x/sync v0.8.0 + golang.org/x/sys v0.25.0 + golang.org/x/term v0.24.0 + golang.org/x/text v0.18.0 + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d + google.golang.org/api v0.197.0 + google.golang.org/grpc v1.66.1 + google.golang.org/protobuf v1.34.2 gopkg.in/ory-am/dockertest.v3 v3.3.4 - gopkg.in/square/go-jose.v2 v2.6.0 - gotest.tools/gotestsum v1.9.0 - k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed - layeh.com/radius v0.0.0-20190322222518-890bc1058917 - mvdan.cc/gofumpt v0.3.1 - nhooyr.io/websocket v1.8.7 + k8s.io/apimachinery v0.31.0 + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 + layeh.com/radius v0.0.0-20231213012653-1006025d24f8 + nhooyr.io/websocket v1.8.11 +) + +require ( + cel.dev/expr v0.15.0 // indirect + cloud.google.com/go/longrunning v0.6.0 // indirect + github.com/containerd/containerd v1.7.20 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/hashicorp/go-secure-stdlib/httputil v0.1.0 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + github.com/x448/float16 v0.8.4 // indirect + golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect ) require ( - cloud.google.com/go v0.107.0 // indirect - cloud.google.com/go/compute v1.14.0 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v0.8.0 // indirect - cloud.google.com/go/kms v1.8.0 // indirect - code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f // indirect + cloud.google.com/go v0.115.1 // indirect + cloud.google.com/go/auth v0.9.3 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/compute/metadata v0.5.0 // indirect + cloud.google.com/go/iam v1.2.0 // indirect + cloud.google.com/go/kms v1.19.0 // indirect; indirect\ + dario.cat/mergo v1.0.1 // indirect + github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect + github.com/99designs/keyring v1.2.2 // indirect github.com/Azure/azure-pipeline-go v0.2.3 // indirect - github.com/Azure/azure-sdk-for-go v67.2.0+incompatible // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.1.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.0.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0 // indirect - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect - github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect + github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 // indirect - github.com/BurntSushi/toml v1.2.0 // indirect + github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/BurntSushi/toml v1.3.2 // indirect github.com/DataDog/datadog-go v3.2.0+incompatible // indirect - github.com/Jeffail/gabs v1.1.1 // indirect + github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.0 // indirect + github.com/Jeffail/gabs/v2 v2.1.0 // indirect + github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect - github.com/Masterminds/sprig v2.22.0+incompatible // indirect - github.com/Microsoft/go-winio v0.5.2 // indirect - github.com/Microsoft/hcsshim v0.9.0 // indirect + github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect - github.com/andybalholm/brotli v1.0.4 // indirect - github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64 // indirect - github.com/aws/aws-sdk-go-v2 v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.3.2 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.3.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.6.1 // indirect - github.com/aws/smithy-go v1.7.0 // indirect + github.com/agext/levenshtein v1.2.1 // indirect + github.com/apache/arrow/go/v15 v15.0.0 // indirect + github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + github.com/aws/aws-sdk-go-v2 v1.26.1 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.11 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 // indirect + github.com/aws/smithy-go v1.20.2 // indirect + github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect + github.com/benbjohnson/immutable v0.4.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bgentry/speakeasy v0.1.0 // indirect - github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect + github.com/bgentry/speakeasy v0.2.0 // indirect + github.com/boltdb/bolt v1.3.1 // indirect + github.com/boombuler/barcode v1.0.1 // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect - github.com/cenkalti/backoff/v4 v4.1.3 // indirect - github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect - github.com/centrify/cloud-golang-sdk v0.0.0-20210923165758-a8c48d049166 // indirect - github.com/cespare/xxhash/v2 v2.1.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible // indirect github.com/circonus-labs/circonusllhist v0.1.3 // indirect - github.com/cloudflare/circl v1.1.0 // indirect - github.com/cloudfoundry-community/go-cfclient v0.0.0-20210823134051-721f0e559306 // indirect - github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect - github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 // indirect - github.com/containerd/cgroups v1.0.3 // indirect - github.com/containerd/containerd v1.5.17 // indirect - github.com/containerd/continuity v0.3.0 // indirect - github.com/coreos/go-oidc v2.2.1+incompatible // indirect - github.com/coreos/go-oidc/v3 v3.1.0 // indirect + github.com/cjlapao/common-go v0.0.39 // indirect + github.com/cloudflare/circl v1.3.7 // indirect + github.com/cloudfoundry-community/go-cfclient v0.0.0-20220930021109-9c4e6c59ccf1 // indirect + github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect + github.com/containerd/continuity v0.4.3 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/coreos/etcd v3.3.27+incompatible // indirect + github.com/coreos/go-oidc/v3 v3.11.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect - github.com/coreos/go-systemd/v22 v22.3.2 // indirect - github.com/couchbase/gocb/v2 v2.3.3 // indirect - github.com/couchbase/gocbcore/v10 v10.0.4 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf // indirect + github.com/couchbase/gocb/v2 v2.9.1 // indirect + github.com/couchbase/gocbcore/v10 v10.5.1 // indirect + github.com/couchbase/gocbcoreps v0.1.3 // indirect + github.com/couchbase/goprotostellar v1.0.2 // indirect + github.com/couchbaselabs/gocbconnstr/v2 v2.0.0-20240607131231-fb385523de28 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect + github.com/danieljoos/wincred v1.1.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba // indirect github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect github.com/digitalocean/godo v1.7.5 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/dnephin/pflag v1.0.7 // indirect - github.com/docker/cli v20.10.18+incompatible // indirect - github.com/docker/distribution v2.8.1+incompatible // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/cli v26.1.5+incompatible // indirect + github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect - github.com/emicklei/go-restful/v3 v3.8.0 // indirect - github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 // indirect - github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect + github.com/dvsekhvalnov/jose2go v1.6.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/emirpasic/gods v1.18.1 // indirect + github.com/envoyproxy/go-control-plane v0.12.1-0.20240621013728-1eb8caab5155 // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect - github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect - github.com/gabriel-vasile/mimetype v1.3.1 // indirect - github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7 // indirect - github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56 // indirect - github.com/go-asn1-ber/asn1-ber v1.5.1 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/gabriel-vasile/mimetype v1.4.2 // indirect + github.com/gammazero/deque v0.2.1 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-git/go-billy/v5 v5.5.0 // indirect + github.com/go-jose/go-jose/v4 v4.0.4 // indirect github.com/go-ldap/ldif v0.0.0-20200320164324-fd88d9b715b3 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-openapi/analysis v0.20.0 // indirect - github.com/go-openapi/errors v0.20.1 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.5 // indirect - github.com/go-openapi/loads v0.20.2 // indirect - github.com/go-openapi/runtime v0.19.24 // indirect - github.com/go-openapi/spec v0.20.3 // indirect - github.com/go-openapi/strfmt v0.20.0 // indirect - github.com/go-openapi/swag v0.19.14 // indirect - github.com/go-openapi/validate v0.20.2 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect + github.com/go-openapi/errors v0.22.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/runtime v0.28.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/strfmt v0.23.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/validate v0.24.0 // indirect github.com/go-ozzo/ozzo-validation v3.6.0+incompatible // indirect - github.com/go-stack/stack v1.8.0 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/gofrs/uuid v4.3.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect + github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect github.com/golang-sql/sqlexp v0.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/mock v1.6.0 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/flatbuffers v2.0.0+incompatible // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/flatbuffers v23.5.26+incompatible // indirect + github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/gofuzz v1.1.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/s2a-go v0.1.8 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect - github.com/googleapis/gax-go/v2 v2.7.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect + github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/gophercloud/gophercloud v0.1.0 // indirect - github.com/gorilla/websocket v1.5.0 // indirect + github.com/gorilla/websocket v1.5.1 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect + github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect - github.com/hashicorp/cronexpr v1.1.1 // indirect + github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect - github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 // indirect - github.com/hashicorp/go-msgpack/v2 v2.0.0 // indirect + github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect github.com/hashicorp/go-secure-stdlib/fileutil v0.1.0 // indirect - github.com/hashicorp/go-slug v0.7.0 // indirect - github.com/hashicorp/go-tfe v0.20.0 // indirect - github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d // indirect + github.com/hashicorp/go-secure-stdlib/plugincontainer v0.4.0 // indirect + github.com/hashicorp/go-slug v0.15.2 // indirect + github.com/hashicorp/go-tfe v1.64.2 // indirect + github.com/hashicorp/jsonapi v1.3.1 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/mdns v1.0.4 // indirect github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0 // indirect github.com/hashicorp/serf v0.10.1 // indirect - github.com/hashicorp/vault/api/auth/kubernetes v0.3.0 // indirect + github.com/hashicorp/vault/api/auth/kubernetes v0.6.0 // indirect github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 // indirect - github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 // indirect - github.com/huandu/xstrings v1.3.2 // indirect - github.com/imdario/mergo v0.3.13 // indirect + github.com/hashicorp/yamux v0.1.1 // indirect + github.com/huandu/xstrings v1.5.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect - github.com/jackc/pgconn v1.11.0 // indirect + github.com/jackc/pgconn v1.14.3 // indirect github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgproto3/v2 v2.2.0 // indirect - github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect - github.com/jackc/pgtype v1.10.0 // indirect - github.com/jackc/pgx v3.3.0+incompatible // indirect + github.com/jackc/pgproto3/v2 v2.3.3 // indirect + github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect + github.com/jackc/pgtype v1.14.3 // indirect + github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.7.6 // indirect @@ -376,95 +427,123 @@ require ( github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect + github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kelseyhightower/envconfig v1.4.0 // indirect - github.com/klauspost/compress v1.15.15 // indirect - github.com/klauspost/pgzip v1.2.5 // indirect + github.com/kevinburke/ssh_config v1.2.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/lib/pq v1.10.6 // indirect + github.com/lib/pq v1.10.9 // indirect github.com/linode/linodego v0.7.1 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/mailru/easyjson v0.7.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-ieproxy v0.0.1 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/mediocregopher/radix/v4 v4.1.1 // indirect - github.com/miekg/dns v1.1.41 // indirect + github.com/mediocregopher/radix/v4 v4.1.4 // indirect + github.com/microsoft/kiota-abstractions-go v1.6.1 // indirect + github.com/microsoft/kiota-authentication-azure-go v1.1.0 // indirect + github.com/microsoft/kiota-http-go v1.4.4 // indirect + github.com/microsoft/kiota-serialization-form-go v1.0.0 // indirect + github.com/microsoft/kiota-serialization-json-go v1.0.8 // indirect + github.com/microsoft/kiota-serialization-multipart-go v1.0.0 // indirect + github.com/microsoft/kiota-serialization-text-go v1.0.0 // indirect + github.com/microsoftgraph/msgraph-sdk-go v1.47.0 // indirect + github.com/microsoftgraph/msgraph-sdk-go-core v1.2.1 // indirect + github.com/miekg/dns v1.1.50 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/hashstructure v1.1.0 // indirect - github.com/mitchellh/pointerstructure v1.2.0 // indirect - github.com/moby/sys/mount v0.2.0 // indirect - github.com/moby/sys/mountinfo v0.5.0 // indirect - github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect + github.com/mitchellh/pointerstructure v1.2.1 // indirect + github.com/moby/patternmatcher v0.5.0 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/sys/user v0.2.0 // indirect + github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/mongodb-forks/digest v1.0.3 // indirect + github.com/mongodb-forks/digest v1.1.0 // indirect + github.com/montanaflynn/stats v0.7.1 // indirect + github.com/mtibben/percent v0.2.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/natefinch/atomic v1.0.1 // indirect github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 // indirect - github.com/nwaples/rardecode v1.1.2 // indirect + github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.2 // indirect - github.com/opencontainers/runc v1.1.4 // indirect - github.com/openlyinc/pointy v1.1.2 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/opencontainers/runc v1.2.0-rc.1 // indirect github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect + github.com/oracle/oci-go-sdk/v59 v59.0.0 // indirect github.com/oracle/oci-go-sdk/v60 v60.0.0 // indirect github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c // indirect github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect github.com/pierrec/lz4 v2.6.1+incompatible // indirect - github.com/pierrec/lz4/v4 v4.1.8 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pierrec/lz4/v4 v4.1.18 // indirect + github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/pquerna/cachecontrol v0.1.0 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/procfs v0.6.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 // indirect - github.com/rogpeppe/go-internal v1.9.0 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/segmentio/fasthash v1.0.3 // indirect + github.com/sergi/go-diff v1.1.0 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/skeema/knownhosts v1.2.1 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect - github.com/snowflakedb/gosnowflake v1.6.3 // indirect + github.com/snowflakedb/gosnowflake v1.11.0 // indirect github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d // indirect - github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b // indirect + github.com/sony/gobreaker v0.5.0 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/std-uritemplate/std-uritemplate/go v0.0.57 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/tencentcloud/tencentcloud-sdk-go v1.0.162 // indirect - github.com/tilinna/clock v1.0.2 // indirect + github.com/tilinna/clock v1.1.0 // indirect github.com/tklauser/go-sysconf v0.3.10 // indirect github.com/tklauser/numcpus v0.4.0 // indirect github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c // indirect - github.com/ulikunitz/xz v0.5.10 // indirect github.com/vmware/govmomi v0.18.0 // indirect + github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect - github.com/xdg-go/scram v1.0.2 // indirect - github.com/xdg-go/stringprep v1.0.2 // indirect + github.com/xdg-go/scram v1.1.2 // indirect + github.com/xdg-go/stringprep v1.0.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect - github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect - go.etcd.io/etcd/api/v3 v3.5.0 // indirect + github.com/zclconf/go-cty v1.12.1 // indirect + github.com/zeebo/xxh3 v1.0.2 // indirect + go.etcd.io/etcd/api/v3 v3.5.13 // indirect go.opencensus.io v0.24.0 // indirect - go.uber.org/multierr v1.7.0 // indirect - go.uber.org/zap v1.19.1 // indirect - golang.org/x/mod v0.6.0 // indirect - golang.org/x/text v0.7.0 // indirect - golang.org/x/time v0.0.0-20220411224347-583f2d630306 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel/metric v1.30.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/time v0.6.0 // indirect + google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect; indirect\ gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.66.2 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect gopkg.in/resty.v1 v1.12.0 // indirect + gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.25.3 // indirect - k8s.io/apimachinery v0.25.3 // indirect - k8s.io/client-go v0.25.3 // indirect - k8s.io/klog/v2 v2.70.1 // indirect - k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.2.0 // indirect + k8s.io/api v0.31.0 // indirect + k8s.io/client-go v0.31.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) + +replace github.com/ma314smith/signedxml v1.1.1 => github.com/moov-io/signedxml v1.1.1 + +// Support using the forked repository until https://github.com/pires/go-proxyproto/pull/110 merges +// and is released. +replace github.com/pires/go-proxyproto v1.0.0 => github.com/peteski22/go-proxyproto v1.0.0 diff --git a/go.sum b/go.sum index 20f71e0d2845..61d64936cf07 100644 --- a/go.sum +++ b/go.sum @@ -1,11 +1,12 @@ -bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= -bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= +cel.dev/expr v0.15.0 h1:O1jzfJCQBfL5BFoYktaxwIhuttaQPsVWerH9/EEKx0w= +cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -16,94 +17,678 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.107.0 h1:qkj22L7bgkl6vIeZDlOY2po43Mx/TIa2Wsa7VR+PEww= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= +cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U= +cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk= +cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudsqlconn v1.4.3 h1:/WYFbB1NtMtoMxCbqpzzTFPDkxxlLTPme390KEGaEPc= +cloud.google.com/go/cloudsqlconn v1.4.3/go.mod h1:QL3tuStVOO70txb3rs4G8j5uMfo5ztZii8K3oGD3VYA= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= -cloud.google.com/go/kms v1.8.0 h1:VrJLOsMRzW7IqTTYn+OYupqF3iKSE060Nrn+PECrYjg= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8= +cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= -cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= -cloud.google.com/go/monitoring v1.8.0 h1:c9riaGSPQ4dUKWB+M1Fl0N+iLxstMbCktdEwYSPGDvA= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/kms v1.19.0 h1:x0OVJDl6UH1BSX4THKlMfdcFWoE4ruh90ZHuilZekrU= +cloud.google.com/go/kms v1.19.0/go.mod h1:e4imokuPJUc17Trz2s6lEXFDt8bgDmvpVynH39bdrHM= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/longrunning v0.6.0 h1:mM1ZmaNsQsnb+5n1DNPeL0KwQd9jQRqSqSDEkBZr+aI= +cloud.google.com/go/longrunning v0.6.0/go.mod h1:uHzSZqW89h7/pasCWNYdUpwGz3PcVWhrWupreVPYLts= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/monitoring v1.21.0 h1:EMc0tB+d3lUewT2NzKC/hr8cSR9WsUieVywzIHetGro= +cloud.google.com/go/monitoring v1.21.0/go.mod h1:tuJ+KNDdJbetSsbSGTqnaBvbauS5kr3Q/koy3Up6r+4= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/spanner v1.41.0 h1:NvdTpRwf7DTegbfFdPjAWyD7bOVu0VeMqcvR9aCQCAc= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/spanner v1.67.0 h1:h8xfobxh5lQu4qJVMPH+wSiyU+ZM6ZTxRNqGeu9iIVA= +cloud.google.com/go/spanner v1.67.0/go.mod h1:Um+TNmxfcCHqNCKid4rmAMvoe/Iu1vdz6UfxJ9GPxRQ= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.27.0 h1:YOO045NZI9RKfCj1c5A/ZtuuENUc8OAW+gHdGnDgyMQ= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= -code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f h1:UrKzEwTgeiff9vxdrfdqxibzpWjxLnuXDI5m6z3GJAk= -code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= +cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= +github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= -github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v44.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v67.2.0+incompatible h1:Uu/Ww6ernvPTrpq31kITVTIm/I5jlJ1wjtEH/bmSB2k= -github.com/Azure/azure-sdk-for-go v67.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 h1:gVXuXcWd1i4C2Ruxe321aU+IKGaStvGB/S90PUPB/W8= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1/go.mod h1:DffdKW9RFqa5VgmsjUOsS7UE7eiA5iAvYUs63bhKQ0M= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 h1:T8quHYlUGyb/oqtSTwqlCr1ilJHrDv+ZtpSfo+hm1BU= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1/go.mod h1:gLa1CL2RNE4s7M3yopJ/p0iq5DdY6Yv5ZUt9MTRZOQM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 h1:+5VZ72z0Qan5Bog5C+ZkgSqUbeVUd9wgtHOrIKuc5b8= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v3 v3.0.1 h1:H3g2mkmu105ON0c/Gqx3Bm+bzoIijLom8LmV9Gjn7X0= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.1.0 h1:Vjq3Uy3JAU1DTxbA+uX6BegIhgO2pyFltbfbmDa9KdI= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.1.0/go.mod h1:Q3u+T/qw3Kb1Wf3DFKiFwEZlyaAyPb4yBgWm9wq7yh8= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.0.0 h1:lMW1lD/17LUA5z1XTURo7LcVG2ICBPlyMHjIUrcFZNQ= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.0.0 h1:ZOt3s8LxEoRGgdD/k7Co4wGAWKmO4+jdPRCRBa8Rzc0= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.0.0/go.mod h1:ZJWUTTEMZLTJI4PPI6vuv/OCEs9YjEX9EqjCnLJ8afA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0 h1:m/sWOGCREuSBqg2htVQTBY8nOZpyajYztF0vUvSZTuM= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0/go.mod h1:Pu5Zksi2KrU7LPbZbNINx6fuVrUp/ffvpxdDj+i8LeE= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 h1:FbH3BbSb4bvGluTesZZ+ttN/MDsnMmQP36OSnDuSXqw= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.2.0 h1:Hp+EScFOu9HeCbeW8WU2yQPJd4gGwhMgKxWe+G6jNzw= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.2.0/go.mod h1:/pz8dyNQe+Ey3yBp/XuYz7oqX8YDNWVpPB0hH3XWfbc= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 h1:UPeCRD+XY7QlaGQte2EVI2iOcWvUYA2XY8w5T/8v0NQ= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1/go.mod h1:oGV6NlB0cvi1ZbYRR2UN44QHxWFyGk+iylgD0qaMXjA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2/go.mod h1:FbdwsQ2EzwvXxOPcMFYO8ogEc9uMMIj3YkmCdXdAFmk= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0 h1:pPvTJ1dY0sA35JOeFq6TsY2xj6Z85Yo23Pj4wCCvu4o= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0/go.mod h1:mLfWfj8v3jfWKsL9G4eoBoXVcsqcIUTapmdKy7uGOp0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.2.0 h1:z4YeiSXxnUI+PqB46Yj6MZA3nwb1CcJIkEMDrzUd8Cs= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.2.0/go.mod h1:rko9SzMxcMk0NJsNAxALEGaTYyy79bNRwxgJfrH0Spw= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.0.0 h1:nBy98uKOIfun5z6wx6jwWLrULcM0+cjBalBFZlEZ7CA= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0 h1:ECsQtyERDVz3NP3kvDOTLvbQhqWp/x9EsGKtb4ogUr8= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0/go.mod h1:s1tW/At+xHqjNFvWU4G0c0Qv33KOhvbGNj0RCTQDV8s= -github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= -github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.0.0/go.mod h1:243D9iHbcQXoFUtgHJwL7gl2zx1aDuDMjvBZVGr2uW0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag= +github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk= +github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= -github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= -github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= +github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= +github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg= -github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= +github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= github.com/Azure/go-autorest/autorest/azure/auth v0.5.0/go.mod h1:QRTvSZQpxqm8mSErhnbI+tANIBAKP7B+UIE2z4ypUO0= github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4= github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= @@ -125,96 +710,83 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c h1:/IBSNwUN8+eKzUzbJPqhK839ygXJ82sde8x3ogr6R28= -github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= -github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 h1:oPdPEZFSbl7oSPEAIPMPBMUmiL+mqgzBJwM/9qYcwNg= -github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1/go.mod h1:4qFor3D/HDsvBME35Xy9rwW9DecL+M2sNw1ybjPtwA0= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0= -github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/Jeffail/gabs v1.1.1 h1:V0uzR08Hj22EX8+8QMhyI9sX2hwRu+/RJhJUmnwda/E= -github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.0 h1:oVLqHXhnYtUwM89y9T1fXGaK9wTkXHgNp8/ZNMQzUxE= +github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.0/go.mod h1:dppbR7CwXD4pgtV9t3wD1812RaLDcBjtblcDF5f1vI0= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/Jeffail/gabs/v2 v2.1.0 h1:6dV9GGOjoQgzWTQEltZPXlJdFloxvIq7DwqgxMCbq30= +github.com/Jeffail/gabs/v2 v2.1.0/go.mod h1:xCn81vdHKxFUuWWAaD5jCTQDNPBMh5pPs9IJ+NcziBI= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= -github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= -github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= -github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= -github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.9.0 h1:BBgYMxl5YZDZVIijz02AlDINpYZOzQqRNCl9CZM13vk= -github.com/Microsoft/hcsshim v0.9.0/go.mod h1:VBJWdC71NSWPlEo7lwde1aL21748J8B6Sdgno7NqEGE= -github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= -github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Microsoft/hcsshim v0.11.7 h1:vl/nj3Bar/CvJSYo7gIQPyRWc9f3c6IeSNavBTSZNZQ= +github.com/Microsoft/hcsshim v0.11.7/go.mod h1:MV8xMfmECjl5HdO7U/3/hFVnkmSBjAjmA09d4bExKcU= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= -github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/ProtonMail/go-crypto v0.0.0-20220824120805-4b6e5c587895 h1:NsReiLpErIPzRrnogAXYwSoU7txA977LjDGrbkewJbg= -github.com/ProtonMail/go-crypto v0.0.0-20220824120805-4b6e5c587895/go.mod h1:UBYPn8k0D56RtnR8RFQMjmh4KrZzWJ5o7Z9SYjossQ8= +github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= +github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/SAP/go-hdb v0.14.1 h1:hkw4ozGZ/i4eak7ZuGkY5e0hxiXFdNUBNhr4AvZVNFE= -github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= +github.com/SAP/go-hdb v1.10.1 h1:c9dGT5xHZNDwPL3NQcRpnNISn3MchwYaGoMZpCAllUs= +github.com/SAP/go-hdb v1.10.1/go.mod h1:vxYDca44L2eRudZv5JAI6T+IygOfxb7vOCFh/Kj0pug= github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a h1:KFHLI4QGttB0i7M3qOkAo8Zn/GSsxwwCnInFqBaYtkM= github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a/go.mod h1:D73UAuEPckrDorYZdtlCu2ySOLuPB5W4rhIkmmc/XbI= -github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14= github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= github.com/aerospike/aerospike-client-go/v5 v5.6.0 h1:tRxcUq0HY8fFPQEzF3EgrknF+w1xFO0YDfUb9Nm8yRI= github.com/aerospike/aerospike-client-go/v5 v5.6.0/go.mod h1:rJ/KpmClE7kiBPfvAPrGw9WuNOiz8v2uKbQaUyYPXtI= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= +github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= -github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= -github.com/aliyun/alibaba-cloud-sdk-go v1.62.146 h1:zAH0YjWzonbKHvNkfbxqTmX51uHbkQYu+jJah2IAiCA= -github.com/aliyun/alibaba-cloud-sdk-go v1.62.146/go.mod h1:Api2AkmMgGaSUAhmk76oaFObkoeCPc/bKAqcyplPODs= +github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI= +github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= +github.com/aliyun/alibaba-cloud-sdk-go v1.63.12 h1:O/lpYuNJlb5ed/QIJUDE1yJBh6zPF5ZFToiuGpq91Ds= +github.com/aliyun/alibaba-cloud-sdk-go v1.63.12/go.mod h1:SOSDHfe1kX91v3W5QiBsWSLqeLxImobbMX1mxrFHsVQ= github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 h1:nWDRPCyCltiTsANwC/n3QZH7Vww33Npq9MKqlwRzI/c= github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= -github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64 h1:ZsPrlYPY/v1PR7pGrmYD/rq5BFiSPalH8i9eEkSfnnI= -github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= +github.com/apache/arrow/go/v15 v15.0.0 h1:1zZACWf85oEZY5/kd9dsQS7i+2G5zVQcbKTHgslqHNA= +github.com/apache/arrow/go/v15 v15.0.0/go.mod h1:DGXsR3ajT524njufqf95822i+KTh+yea1jass9YXgjA= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= +github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2 h1:VoHKYIXEQU5LWoambPBOvYxyLqZYHuj+rj5DVnMUc3k= github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2/go.mod h1:OMVSB21p9+xQUIqlGizHPZfjK+SHws1ht+ZytVDoz9U= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -222,284 +794,193 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= -github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/aws/aws-sdk-go v1.36.29/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.191 h1:GnbkalCx/AgobaorDMFCa248acmk+91+aHBQOk7ljzU= -github.com/aws/aws-sdk-go v1.44.191/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-sdk-go-v2 v1.8.0 h1:HcN6yDnHV9S7D69E7To0aUppJhiJNEzQSNcUxc7r3qo= -github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= -github.com/aws/aws-sdk-go-v2/config v1.6.0 h1:rtoCnNObhVm7me+v9sA2aY+NtHNZjjWWC3ifXVci+wE= -github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= -github.com/aws/aws-sdk-go-v2/credentials v1.3.2 h1:Uud/fZzm0lqqhE8kvXYJFAJ3PGnagKoUcvHq1hXfBZw= -github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0 h1:SGqDJun6tydgsSIFxv9+EYBJVqVUwg2QMJp6PbNq8C8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0 h1:Iqp2aHeRF3kaaNuDS82bHBzER285NM6lLPAgsxHCR2A= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0/go.mod h1:eHwXu2+uE/T6gpnYWwBwqoeqRf9IXyCcolyOWDRAErQ= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0 h1:xu45foJnwMwBqSkIMKyJP9kbyHi5hdhZ/WiJ7D2sHZ0= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2 h1:YcGVEqLQGHDa81776C3daai6ZkkRGf/8RAQ07hV0QcU= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2/go.mod h1:EASdTcM1lGhUe1/p4gkojHwlGJkeoRjjr1sRCzup3Is= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2 h1:Xv1rGYgsRRn0xw9JFNnfpBMZam54PrWpC4rJOJ9koA8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2 h1:ewIpdVz12MDinJJB/nu1uUiFIWFnvtd3iV7cEW7lR+M= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2/go.mod h1:QuL2Ym8BkrLmN4lUofXYq6000/i5jPjosCNK//t6gak= -github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0 h1:cxZbzTYXgiQrZ6u2/RJZAkkgZssqYOdydvJPBgIHlsM= -github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0/go.mod h1:6J++A5xpo7QDsIeSqPK4UHqMSyPOCopa+zKtqAMhqVQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.3.2 h1:b+U3WrF9ON3f32FH19geqmiod4uKcMv/q+wosQjjyyM= -github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= -github.com/aws/aws-sdk-go-v2/service/sts v1.6.1 h1:1Pls85C5CFjhE3aH+h85/hyAk89kQNlAWlEQtIkaFyc= -github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= -github.com/aws/smithy-go v1.7.0 h1:+cLHMRrDZvQ4wk+KuQ9yH6eEg6KZEJ9RI2IkDqnygCg= -github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/aws-sdk-go v1.34.0/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= +github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= +github.com/aws/aws-sdk-go-v2/config v1.27.11 h1:f47rANd2LQEYHda2ddSCKYId18/8BhSRM4BULGmfgNA= +github.com/aws/aws-sdk-go-v2/config v1.27.11/go.mod h1:SMsV78RIOYdve1vf36z8LmnszlRWkwMQtomCAI0/mIE= +github.com/aws/aws-sdk-go-v2/credentials v1.17.11 h1:YuIB1dJNf1Re822rriUOTxopaHHvIq0l/pX3fwO+Tzs= +github.com/aws/aws-sdk-go-v2/credentials v1.17.11/go.mod h1:AQtFPsDH9bI2O+71anW6EKL+NcD7LG3dpKGMV4SShgo= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 h1:7Zwtt/lP3KNRkeZre7soMELMGNoBrutx8nobg1jKWmo= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15/go.mod h1:436h2adoHb57yd+8W+gYPrrA9U/R/SuAuOO42Ushzhw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 h1:81KE7vaZzrl7yHBYHVEzYB8sypz11NMOZ40YlWvPxsU= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5/go.mod h1:LIt2rg7Mcgn09Ygbdh/RdIm0rQ+3BNkbP1gyVMFtRK0= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 h1:f9RyWNtS8oH7cZlbn+/JNPpjUk5+5fLd5lM9M0i49Ys= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5/go.mod h1:h5CoMZV2VF297/VLhRhO1WF+XYWOzXo+4HsObA4HjBQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 h1:6cnno47Me9bRykw9AEv9zkXE+5or7jz8TsskTTccbgc= +github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1/go.mod h1:qmdkIIAC+GCLASF7R2whgNrJADz0QZPX+Seiw/i4S3o= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 h1:vN8hEbpRnL7+Hopy9dzmRle1xmDc7o8tmY0klsr175w= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.5/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 h1:Jux+gDDyi1Lruk+KHF91tK2KCuY61kzoCpvtvJJBtOE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n2HZPkcKgPAi1phU= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= +github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= +github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/axiomhq/hyperloglog v0.0.0-20220105174342-98591331716a h1:eqjiAL3qooftPm8b9C1GsSSRcmlw7iOva8vdBTmV2PY= github.com/axiomhq/hyperloglog v0.0.0-20220105174342-98591331716a/go.mod h1:2stgcRjl6QmW+gU2h5E7BQXg4HU0gzxKWDuT5HviN9s= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/benbjohnson/immutable v0.4.0 h1:CTqXbEerYso8YzVPxmWxh2gnoRQbbB9X1quUC8+vGZA= +github.com/benbjohnson/immutable v0.4.0/go.mod h1:iAr8OjJGLnLmVUr9MZ/rz4PWUy6Ouc2JLYuMArmvAJM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bgentry/speakeasy v0.2.0 h1:tgObeVOf8WAvtuAX6DhJ4xks4CFNwPDZiqzGqIHE51E= +github.com/bgentry/speakeasy v0.2.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= -github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= -github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/boombuler/barcode v1.0.1 h1:NDBbPmhS+EqABEs5Kg3n/5ZNjy73Pz7SIV+KCeqyXcs= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= -github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= -github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/centrify/cloud-golang-sdk v0.0.0-20210923165758-a8c48d049166 h1:jQ93fKqb/wRmK/KiHpa7Tk9rmHeKXhp4j+5Sg/tENiY= -github.com/centrify/cloud-golang-sdk v0.0.0-20210923165758-a8c48d049166/go.mod h1:c/gmvyN8lq6lYtHvrqqoXrg2xyN65N0mBmbikxFWXNE= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= -github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= -github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0 h1:CWU8piLyqoi9qXEUwzOh5KFKGgmSU5ZhktJyYcq6ryQ= github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0/go.mod h1:5d8DqS60xkj9k3aXfL3+mXBH0DPYO0FQjcKosxl+b/Q= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= -github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= -github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= -github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= +github.com/cjlapao/common-go v0.0.39 h1:bAAUrj2B9v0kMzbAOhzjSmiyDy+rd56r2sy7oEiQLlA= +github.com/cjlapao/common-go v0.0.39/go.mod h1:M3dzazLjTjEtZJbbxoA5ZDiGCiHmpwqW9l4UWaddwOA= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.1.0 h1:bZgT/A+cikZnKIwn7xL2OBj012Bmvho/o6RpRvv3GKY= -github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= -github.com/cloudfoundry-community/go-cfclient v0.0.0-20210823134051-721f0e559306 h1:k8q2Nsz7kNaUlysVCnWIFLMUSqiKXaGLdIf9P0GsX2Y= -github.com/cloudfoundry-community/go-cfclient v0.0.0-20210823134051-721f0e559306/go.mod h1:0FdHblxw7g3M2PPICOw9i8YZOHP9dZTHbJUtoxL7Z/E= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/cloudfoundry-community/go-cfclient v0.0.0-20220930021109-9c4e6c59ccf1 h1:ef0OsiQjSQggHrLFAMDRiu6DfkVSElA5jfG1/Nkyu6c= +github.com/cloudfoundry-community/go-cfclient v0.0.0-20220930021109-9c4e6c59ccf1/go.mod h1:sgaEj3tRn0hwe7GPdEUwxrdOqjBzyjyvyOCGf1OQyZY= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c h1:2zRrJWIt/f9c9HhNHAgrRgq0San5gRRUJTBXLkchal0= -github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/cockroach-go/v2 v2.3.8 h1:53yoUo4+EtrC1NrAEgnnad4AS3ntNvGup1PAXZ7UmpE= +github.com/cockroachdb/cockroach-go/v2 v2.3.8/go.mod h1:9uH5jK4yQ3ZQUT9IXe4I2fHzMIF5+JC/oOdzTRgJYJk= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= -github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= -github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= -github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= -github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= -github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= -github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= -github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= -github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4= -github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= -github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= -github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= -github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= -github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= -github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= -github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= -github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= -github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.5.17 h1:NLDEI//zhMZpR3DS/AP0qiN+dzYKNAwJaNXCnCmYcgY= -github.com/containerd/containerd v1.5.17/go.mod h1:7IN9MtIzTZH4WPEmD1gNH8bbTQXVX68yd3ZXxSHYCis= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= -github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= -github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= -github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= -github.com/containerd/continuity v0.2.0/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= -github.com/containerd/continuity v0.2.1/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= -github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= -github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= -github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= -github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= -github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= -github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= -github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= -github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= -github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= -github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= -github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= -github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= -github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= -github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= -github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= -github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= -github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= -github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= -github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= -github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containerd/containerd v1.7.20 h1:Sl6jQYk3TRavaU83h66QMbI2Nqg9Jm6qzwX57Vsn1SQ= +github.com/containerd/containerd v1.7.20/go.mod h1:52GsS5CwquuqPuLncsXwG0t2CiUce+KsNHJZQJvAgR0= +github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= +github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.27+incompatible h1:QIudLb9KeBsE5zyYxd1mjzRSkzLg9Wf9QlRwFgd6oTA= +github.com/coreos/etcd v3.3.27+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= -github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-oidc/v3 v3.1.0 h1:6avEvcdvTa1qYsOZ6I5PRkSYHzpTNWgKYmaJfaYbrRw= -github.com/coreos/go-oidc/v3 v3.1.0/go.mod h1:rEJ/idjfUyfkBit1eI1fvyr+64/g9dcKpAm8MJMesvo= +github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI= +github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/couchbase/gocb/v2 v2.3.3 h1:OItaIrFqXR1ba9J77E2YOU+CSF9G9FHYivV26Xgoi98= -github.com/couchbase/gocb/v2 v2.3.3/go.mod h1:h4b3UYDnGI89hMW9VypVjAr+EE0Ki4jjlXJrVdmSZhQ= -github.com/couchbase/gocbcore/v10 v10.0.4 h1:RJ+dSXxMUbrpfgYEEUhMYwPH1S5KvcQYve3D2aKHP28= -github.com/couchbase/gocbcore/v10 v10.0.4/go.mod h1:s6dwBFs4c3+cAzZbo1q0VW+QasudhHJuehE8b8U2YNg= +github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf h1:GOPo6vn/vTN+3IwZBvXX0y5doJfSC7My0cdzelyOCsQ= +github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/couchbase/gocb/v2 v2.9.1 h1:yB2ZhRLk782Y9sZlATaUwglZe9+2QpvFmItJXTX4stQ= +github.com/couchbase/gocb/v2 v2.9.1/go.mod h1:TMAeK34yUdcASdV4mGcYuwtkAWckRBYN5uvMCEgPfXo= +github.com/couchbase/gocbcore/v10 v10.5.1 h1:bwlV/zv/fSQLuO14M9k49K7yWgcWfjSgMyfRGhW1AyU= +github.com/couchbase/gocbcore/v10 v10.5.1/go.mod h1:rulbgUK70EuyRUiLQ0LhQAfSI/Rl+jWws8tTbHzvB6M= +github.com/couchbase/gocbcoreps v0.1.3 h1:fILaKGCjxFIeCgAUG8FGmRDSpdrRggohOMKEgO9CUpg= +github.com/couchbase/gocbcoreps v0.1.3/go.mod h1:hBFpDNPnRno6HH5cRXExhqXYRmTsFJlFHQx7vztcXPk= +github.com/couchbase/goprotostellar v1.0.2 h1:yoPbAL9sCtcyZ5e/DcU5PRMOEFaJrF9awXYu3VPfGls= +github.com/couchbase/goprotostellar v1.0.2/go.mod h1:5/yqVnZlW2/NSbAWu1hPJCFBEwjxgpe0PFFOlRixnp4= +github.com/couchbaselabs/gocaves/client v0.0.0-20230404095311-05e3ba4f0259 h1:2TXy68EGEzIMHOx9UvczR5ApVecwCfQZ0LjkmwMI6g4= +github.com/couchbaselabs/gocaves/client v0.0.0-20230404095311-05e3ba4f0259/go.mod h1:AVekAZwIY2stsJOMWLAS/0uA/+qdp7pjO8EHnl61QkY= +github.com/couchbaselabs/gocbconnstr/v2 v2.0.0-20240607131231-fb385523de28 h1:lhGOw8rNG6RAadmmaJAF3PJ7MNt7rFuWG7BHCYMgnGE= +github.com/couchbaselabs/gocbconnstr/v2 v2.0.0-20240607131231-fb385523de28/go.mod h1:o7T431UOfFVHDNvMBUmUxpHnhivwv7BziUao/nMl81E= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= -github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= -github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= -github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= +github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.12.2 h1:1OcPn5GBIobjWNd+8yjfHNIaFX14B1pWI3F9HZy5KXw= -github.com/denisenkom/go-mssqldb v0.12.2/go.mod h1:lnIw1mZukFRZDJYQ0Pb833QS2IaC3l5HkEfra2LJ+sk= +github.com/denisenkom/go-mssqldb v0.12.3 h1:pBSGx9Tq67pBOTLmxNuirNTeB8Vjmf886Kx+8Y+8shw= +github.com/denisenkom/go-mssqldb v0.12.3/go.mod h1:k0mtMFOnU+AihqFxPMiF05rtiDrorD1Vrm1KEz5hxDo= github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba h1:p6poVbjHDkKa+wtC8frBMwQtT3BmqGYBjzMwJ63tuR4= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= -github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8= github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= @@ -509,320 +990,215 @@ github.com/digitalocean/godo v1.7.5/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nb github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk= -github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE= -github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v20.10.8+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v20.10.9+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v20.10.18+incompatible h1:f/GQLsVpo10VvToRay2IraVA1wHz9KktZyjev3SIVDU= -github.com/docker/cli v20.10.18+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= -github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v1.4.2-0.20200319182547-c7ad2b866182/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.10+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.18+incompatible h1:SN84VYXTBNGn92T/QwIRPlum9zfemfitN7pbsp26WSc= -github.com/docker/docker v20.10.18+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= -github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= -github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/cli v26.1.5+incompatible h1:NxXGSdz2N+Ibdaw330TDO3d/6/f7MvHuiMbuFaIQDTk= +github.com/docker/cli v26.1.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/docker v26.1.5+incompatible h1:NEAxTwEjxV6VbBMBoGG3zPqbiJosIApZjxlbrG9q3/g= +github.com/docker/docker v26.1.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 h1:iFaUwBSo5Svw6L7HYpRu/0lE3e0BaElwnNO1qkNQxBY= -github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s= -github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 h1:2MIhn2R6oXQbgW5yHfS+d6YqyMfXiu2L55rFZC4UD/M= github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= +github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= -github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +github.com/envoyproxy/go-control-plane v0.12.1-0.20240621013728-1eb8caab5155 h1:IgJPqnrlY2Mr4pYB6oaMKvFvwJ9H+X6CCY5x1vCTcpc= +github.com/envoyproxy/go-control-plane v0.12.1-0.20240621013728-1eb8caab5155/go.mod h1:5Wkq+JduFtdAXihLmeTJf+tRYIT4KBc2vPXDhwVo1pA= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/favadi/protoc-go-inject-tag v1.3.0 h1:JPrmsmc/uBShG85uY5xGZIa5WJ0IaNZn6LZhQR9tIQE= -github.com/favadi/protoc-go-inject-tag v1.3.0/go.mod h1:SSkUBgfqw2IJ2p7NPNKWk0Idwxt/qIt2LQgFPUgRGtc= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= -github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= -github.com/gabriel-vasile/mimetype v1.3.1 h1:qevA6c2MtE1RorlScnixeG0VA1H4xrXyhyX3oWBynNQ= -github.com/gabriel-vasile/mimetype v1.3.1/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= -github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7 h1:D2LrfOPgGHQprIxmsTpxtzhpmF66HoM6rXSmcqaX7h8= -github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7/go.mod h1:GeIq9qoE43YdGnDXURnmKTnGg15pQz4mYkXSTChbneI= -github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56 h1:VzbudKn/nvxYKOdzgkEBS6SSreRjAgoJ+ZeS4wPFkgc= -github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w= -github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= +github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/gammazero/deque v0.2.1 h1:qSdsbG6pgp6nL7A0+K/B7s12mcCY/5l5SIUpMOl+dC0= +github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= +github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44HWy9Q= +github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= -github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14= -github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= +github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-asn1-ber/asn1-ber v1.4.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-asn1-ber/asn1-ber v1.5.1 h1:pDbRAunXzIUXfx4CB2QJFv5IuPiuoW+sWvr/Us009o8= -github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-errors/errors v1.4.1 h1:IvVlgbzSsaUNudsw5dcXSzF3EWyXTi5XrAdngnuhRyg= -github.com/go-errors/errors v1.4.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-asn1-ber/asn1-ber v1.5.5 h1:MNHlNMBDgEKD4TcKr36vQN68BA00aDfjIt3/bD50WnA= +github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= +github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= +github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= +github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= +github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= +github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E= +github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= github.com/go-ldap/ldap/v3 v3.1.7/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= -github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= -github.com/go-ldap/ldap/v3 v3.4.1 h1:fU/0xli6HY02ocbMuozHAYsaHLcnkLjvho2r5a34BUU= -github.com/go-ldap/ldap/v3 v3.4.1/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjRvjKpyMg= +github.com/go-ldap/ldap/v3 v3.4.8 h1:loKJyspcRezt2Q3ZRMq2p/0v8iOurlmeXDPw6fikSvQ= +github.com/go-ldap/ldap/v3 v3.4.8/go.mod h1:qS3Sjlu76eHfHGpUdWkAXQTw4beih+cHsco2jXlIXrk= github.com/go-ldap/ldif v0.0.0-20200320164324-fd88d9b715b3 h1:sfz1YppV05y4sYaW7kXZtrocU/+vimnIWt4cxAYh7+o= github.com/go-ldap/ldif v0.0.0-20200320164324-fd88d9b715b3/go.mod h1:ZXFhGda43Z2TVbfGZefXyMJzsDHhCh0go3bZUcwTx7o= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= -github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk= -github.com/go-openapi/analysis v0.20.0 h1:UN09o0kNhleunxW7LR+KnltD0YrJ8FF03pSqvAN3Vro= -github.com/go-openapi/analysis v0.20.0/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.1 h1:j23mMDtRxMwIobkpId7sWh7Ddcx4ivaoqUbfXx5P+a8= -github.com/go-openapi/errors v0.20.1/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= +github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= -github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= -github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= -github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= -github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4= -github.com/go-openapi/loads v0.20.2 h1:z5p5Xf5wujMxS1y8aP+vxwW5qYT2zdJBbXKmQUG3lcc= -github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= -github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= -github.com/go-openapi/runtime v0.19.24 h1:TqagMVlRAOTwllE/7hNKx6rQ10O6T8ZzeJdMjSTKaD4= -github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= -github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= -github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ= -github.com/go-openapi/spec v0.20.3 h1:uH9RQ6vdyPSs2pSy9fL8QPspDF2AMIMPtmK5coSSjtQ= -github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= -github.com/go-openapi/strfmt v0.20.0 h1:l2omNtmNbMc39IGptl9BuXBEKcZfS8zjrTsPKTiJiDM= -github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= -github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= -github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= -github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= -github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= -github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= -github.com/go-openapi/validate v0.20.2 h1:AhqDegYV3J3iQkMPJSXkvzymHKMTw0BST3RK3hTT4ts= -github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-ozzo/ozzo-validation v3.6.0+incompatible h1:msy24VGS42fKO9K1vLz82/GeYW1cILu7Nuuj1N3BBkE= github.com/go-ozzo/ozzo-validation v3.6.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY= -github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= -github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= -github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= -github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= +github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= -github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= -github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= -github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/gocql/gocql v1.0.0 h1:UnbTERpP72VZ/viKE1Q1gPtmLvyTZTvuAstvSRydw/c= github.com/gocql/gocql v1.0.0/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8= -github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.3.0+incompatible h1:CaSVZxm5B+7o45rtab4jC2G37WGYX1zQfuU2i6DSvnc= github.com/gofrs/uuid v4.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= -github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= -github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -837,6 +1213,7 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -855,22 +1232,23 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 h1:DIPQnGy2Gv2FSA4B/hh8Q7xx3B7AIDk3DAMeHclH1vQ= -github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI= -github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg= +github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -885,9 +1263,9 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-metrics-stackdriver v0.2.0 h1:rbs2sxHAPn2OtUj9JdR/Gij1YKGl0BTVD0augB+HEjE= @@ -896,12 +1274,17 @@ github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:od github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -909,71 +1292,107 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/tink/go v1.6.1 h1:t7JHqO8Ath2w2ig5vjwQYJzhGEZymedQc90lQXUBa4I= -github.com/google/tink/go v1.6.1/go.mod h1:IGW53kTgag+st5yPhKKwJ6u2l+SSp5/v9XF7spovjlY= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/hashicorp/cap v0.2.1-0.20220727210936-60cd1534e220 h1:Vgv3jG0kicczshK+lOHWJ9OososZjnjSu1YslqofFYY= -github.com/hashicorp/cap v0.2.1-0.20220727210936-60cd1534e220/go.mod h1:zb3VvIFA0lM2lbmO69NjowV9dJzJnZS89TaM9blXPJA= -github.com/hashicorp/consul-template v0.29.5 h1:tzEo93RqODAX2cgOe/ke8xcpdPdxg5rxl6d22wE3f6c= -github.com/hashicorp/consul-template v0.29.5/go.mod h1:SZGBPz/t0JaBwMOqM6q/mG66cBRA8IeDUjOwjO0Pa5M= -github.com/hashicorp/consul/api v1.17.0 h1:aqytbw31uCPNn37ST+717IyGod+P1eTgSGu3yjRo4bs= -github.com/hashicorp/consul/api v1.17.0/go.mod h1:ZNwemOPAdgtV4cCx9fqxNmw+PI3vliW6gYin2WD+F2g= +github.com/hashicorp-forge/bbolt v1.3.8-hc3 h1:iTWR3RDPj0TGChAvJ8QjHFcNFWAUVgNQV73IE6gAX4E= +github.com/hashicorp-forge/bbolt v1.3.8-hc3/go.mod h1:sQBu5UIJ+rcUFU4Fo9rpTHNV935jwmGWS3dQ/MV8810= +github.com/hashicorp/cap v0.7.0 h1:atLIEU5lJslYXo1qsv7RtUL1HrJVVxnfkErIT3uxLp0= +github.com/hashicorp/cap v0.7.0/go.mod h1:UynhCoGX3pxL0OfVrfMzPWAyjMYp96bk11BNTf2zt8o= +github.com/hashicorp/cap/ldap v0.0.0-20240403125925-c0418810d10e h1:IakB/NhT0YtMEGqAf2tViMdBABC2cMAZn3O/mVeg2j4= +github.com/hashicorp/cap/ldap v0.0.0-20240403125925-c0418810d10e/go.mod h1:Ofp5fMLl1ImcwjNGu9FtEwNOdxA0LYoWpcWQE2vltuI= +github.com/hashicorp/cli v1.1.6 h1:CMOV+/LJfL1tXCOKrgAX0uRKnzjj/mpmqNXloRSy2K8= +github.com/hashicorp/cli v1.1.6/go.mod h1:MPon5QYlgjjo0BSoAiN0ESeT5fRzDjVRp+uioJ0piz4= +github.com/hashicorp/consul-template v0.39.1 h1:MfhPoNENzCVSEXtE7CnIm3JkCzM9K0I7rcJYofm1BYY= +github.com/hashicorp/consul-template v0.39.1/go.mod h1:AKqYCDerwwX2k9w7mTWIEC0bEUemSjyYl6Cy+NlqNYw= +github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc= +github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI= +github.com/hashicorp/consul/proto-public v0.6.1 h1:+uzH3olCrksXYWAYHKqK782CtK9scfqH+Unlw3UHhCg= +github.com/hashicorp/consul/proto-public v0.6.1/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.13.0 h1:lce3nFlpv8humJL8rNrrGHYSKc3q+Kxfeg3Ii1m6ZWU= -github.com/hashicorp/consul/sdk v0.13.0/go.mod h1:0hs/l5fOVhJy/VdcoaNqUSi2AUs95eF5WKtv+EYIQqE= -github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= -github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= +github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= +github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= +github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/eventlogger v0.1.1 h1:zyCjxsy7KunFsMPZKU5PnwWEakSrp1zjj2vPFmrDaeo= -github.com/hashicorp/eventlogger v0.1.1/go.mod h1://CHt6/j+Q2lc0NlUB5af4aS2M0c0aVBg9/JfcpAyhM= +github.com/hashicorp/eventlogger v0.2.10 h1:Dddth3KVSribGE1rInGToM30tRNblvL0G1OG6N+i2pk= +github.com/hashicorp/eventlogger v0.2.10/go.mod h1:imHMTfJH4qfb8Knh9nZw4iLfL9J1bX6TJKEurSB4t+U= +github.com/hashicorp/go-bexpr v0.1.12 h1:XrdVhmwu+9iYxIUWxsGVG7NQwrhzJZ0vR6nbN5bLgrA= +github.com/hashicorp/go-bexpr v0.1.12/go.mod h1:ACktpcSySkFNpcxWSClFrut7wicd9WzisnvHuw+g9K8= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= @@ -981,125 +1400,122 @@ github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/S github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192 h1:eje2KOX8Sf7aYPiAsLnpWdAIrGRMcpFjN/Go/Exb7Zo= github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192/go.mod h1:3/4dzY4lR1Hzt9bBqMhBzG7lngZ0GKx/nL6G/ad62wE= github.com/hashicorp/go-gatedio v0.5.0 h1:Jm1X5yP4yCqqWj5L1TgW7iZwCVPGtVc+mro5r/XX7Tg= -github.com/hashicorp/go-gcp-common v0.8.0 h1:/2vGAbCU1v+BZ3YHXTCzTvxqma9WOJHYtADTfhZixLo= -github.com/hashicorp/go-gcp-common v0.8.0/go.mod h1:Q7zYRy9ue9SuaEN2s9YLIQs4SoKHdoRmKRcImY3SLgs= +github.com/hashicorp/go-gatedio v0.5.0/go.mod h1:Lr3t8L6IyxD3DAeaUxGcgl2JnRUpWMCsmBl4Omu/2t4= +github.com/hashicorp/go-gcp-common v0.9.0 h1:dabqPrA+vlNWcyQV/3yOI6WCmQGFJgwyztDEsqDp+Q0= +github.com/hashicorp/go-gcp-common v0.9.0/go.mod h1:aZnN6BVMqryPo4vIy97ZAYSoREnJWilLMmaOmi5P7vY= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.3.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= -github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 h1:pSjQfW3vPtrOTcasTUKgCTQT7OGPPTTMVRrOfU6FJD8= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.8 h1:9Q2lu1YbbmiAgvYZ7Pr31RdlVonUpX+mmDL7Z7qTA2U= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.8/go.mod h1:qTCjxGig/kjuj3hk1z8pOUrzbse/GxB1tGfbrq8tGJg= -github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.7-1 h1:ZV26VJYcITBom0QqYSUOIj4HOHCVPEFjLqjxyXV/AbA= -github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.7-1/go.mod h1:b99cDSA+OzcyRoBZroSf174/ss/e6gUuS45wue9ZQfc= -github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.1 h1:ydUCtmr8f9F+mHZ1iCsvzqFTXqNVpewX3s9zcYipMKI= -github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.1/go.mod h1:Sl/ffzV57UAyjtSg1h5Km0rN5+dtzZJm1CUztkoCW2c= -github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.7 h1:E3eEWpkofgPNrYyYznfS1+drq4/jFcqHQVNcL7WhUCo= -github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.7/go.mod h1:j5vefRoguQUG7iM4reS/hKIZssU1lZRqNPM5Wow6UnM= -github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.7 h1:X27JWuPW6Gmi2l7NMm0pvnp7z7hhtns2TeIOQU93mqI= -github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.7/go.mod h1:i7Dt9mDsVUQG/I639jtdQerliaO2SvvPnpYPhZ8CGZ4= -github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.8 h1:16I8OqBEuxZIowwn3jiLvhlx+z+ia4dJc9stvz0yUBU= -github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.8/go.mod h1:6QUMo5BrXAtbzSuZilqmx0A4px2u6PeFK7vfp2WIzeM= +github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.1 h1:KIge4FHZEDb2/xjaWgmBheCTgRL6HV4sgTfDsH876L8= +github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.1/go.mod h1:aHO1EoFD0kBYLBedqxXgalfFT8lrWfP7kpuSoaqGjH0= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 h1:WZeXfD26QMWYC35at25KgE021SF9L3u9UMHK8fJAdV0= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16/go.mod h1:ZiKZctjRTLEppuRwrttWkp71VYMbTTCkazK4xT7U/NQ= +github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.9 h1:HpGOHc0Vd3aacMAEtAUVe38zMcq7BfYQSjrGCmtRNx0= +github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.9/go.mod h1:ygxw8l40DbAQQ587OzoB3bsBWVpB0e/BOWKlXgYkfG8= +github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.3 h1:36Pxy8BQd7DAJ2Mk6vuJlIjqQ80e20vlO7a4Ep3RTOg= +github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.3/go.mod h1:heY2PS1SGU0cMamgv+zId/sKT+XFHaf61bLOSnP1Gb8= +github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.9 h1:qdxeZvDMRGZ3YSE4Oz0Pp7WUSUn5S6cWZguEOkEVL50= +github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.9/go.mod h1:DcXbvVpgNWbxGmxgmu3QN64bEydMu14Cpe34RRR30HY= +github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.11 h1:/7SKkYIhA8cr3l8m1EKT6Q90bPoSVqqVBuQ6HgoMIkw= +github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.11/go.mod h1:LepS5s6ESGE0qQMpYaui5lX+mQYeiYiy06VzwWRioO8= +github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.12 h1:PCqWzT/Hii0KL07JsBZ3lJbv/wx02IAHYlhWQq8rxRY= +github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.12/go.mod h1:HSaOaX/lv3ShCdilUYbOTPnSvmoZ9xtQhgw+8hYcZkg= github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7 h1:KeG3QGrbxbr2qAqCJdf3NR4ijAYwdcWLTmwSbR0yusM= github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7/go.mod h1:rXxYzjjGw4HltEwxPp9zYSRIo6R+rBf1MSPk01bvodc= -github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.7 h1:G25tZFw/LrAzJWxvS0/BFI7V1xAP/UsAIsgBwiE0mwo= -github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.7/go.mod h1:hxNA5oTfAvwPacWVg1axtF/lvTafwlAa6a6K4uzWHhw= -github.com/hashicorp/go-memdb v1.3.3 h1:oGfEWrFuxtIUF3W2q/Jzt6G85TrMk9ey6XfYLvVe1Wo= -github.com/hashicorp/go-memdb v1.3.3/go.mod h1:uBTr1oQbtuMgd1SSGoR8YV27eT3sBHbYiNm53bMpgSg= +github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.12 h1:E8pzzF7i44OZCYDol+U7VbTBmHe65/6dx1nYxS0P1k0= +github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.12/go.mod h1:YRqguGarF7kbHeojTPkanH3qvjbEP2pelq5b0ifaQ1M= +github.com/hashicorp/go-memdb v1.3.4 h1:XSL3NR682X/cVk2IeV0d70N4DZ9ljI885xAEU8IoK3c= +github.com/hashicorp/go-memdb v1.3.4/go.mod h1:uBTr1oQbtuMgd1SSGoR8YV27eT3sBHbYiNm53bMpgSg= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs= github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= -github.com/hashicorp/go-msgpack/v2 v2.0.0 h1:c1fiLq1LNghmLOry1ipGhvLDi+/zEoaEP2JrE1oFJ9s= -github.com/hashicorp/go-msgpack/v2 v2.0.0/go.mod h1:JIxYkkFJRDDRSoWQBSh7s9QAVThq+82iWmUpmE4jKak= -github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-msgpack/v2 v2.1.2 h1:4Ee8FTp834e+ewB71RDrQ0VKpyFdrKOjvYtnQ/ltVj0= +github.com/hashicorp/go-msgpack/v2 v2.1.2/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.5/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= -github.com/hashicorp/go-plugin v1.4.8 h1:CHGwpxYDOttQOY7HOWgETU9dyVjOXzniXDqJcYJE1zM= -github.com/hashicorp/go-plugin v1.4.8/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= +github.com/hashicorp/go-plugin v1.6.1 h1:P7MR2UP6gNKGPp+y7EZw2kOiq4IR9WiqLvp0XOsVdwI= +github.com/hashicorp/go-plugin v1.6.1/go.mod h1:XPHFku2tFo3o3QKFgSYo+cghcUhw1NA1hZyMK0PWAw0= github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a h1:FmnBDwGwlTgugDGbVxwV8UavqSMACbGrUpfc98yFLR4= github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a/go.mod h1:xbXnmKqX9/+RhPkJ4zrEx4738HacP72aaUPlT2RZ4sU= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= -github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 h1:W9WN8p6moV1fjKLkeqEgkAMu5rauy9QeYDAmIaPuuiA= -github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6/go.mod h1:MpCPSPGLDILGb4JMm94/mMi3YysIqsXzGCzkEZjcjXg= -github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= +github.com/hashicorp/go-secure-stdlib/awsutil v0.3.0 h1:I8bynUKMh9I7JdwtW9voJ0xmHvBpxQtLjrMFDYmhOxY= +github.com/hashicorp/go-secure-stdlib/awsutil v0.3.0/go.mod h1:oKHSQs4ivIfZ3fbXGQOop1XuDfdSb8RIsWTGaAanSfg= github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 h1:ET4pqyjiGmY09R5y+rSd70J2w45CtbWDNvGqWp/R3Ng= github.com/hashicorp/go-secure-stdlib/base62 v0.1.2/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= github.com/hashicorp/go-secure-stdlib/fileutil v0.1.0 h1:f2mwVgMJjXuX/+eWD6ZW30+oIRgCofL+XMWknFkB1WM= github.com/hashicorp/go-secure-stdlib/fileutil v0.1.0/go.mod h1:uwcr2oga9pN5+OkHZyTN5MDk3+1YHOuMukhpnPaQAoI= github.com/hashicorp/go-secure-stdlib/gatedwriter v0.1.1 h1:9um9R8i0+HbRHS9d64kdvWR0/LJvo12sIonvR9zr1+U= github.com/hashicorp/go-secure-stdlib/gatedwriter v0.1.1/go.mod h1:6RoRTSMDK2H/rKh3P/JIsk1tK8aatKTt3JyvIopi3GQ= +github.com/hashicorp/go-secure-stdlib/httputil v0.1.0 h1:0cT/LmCfurGE6/MOq8ig3meKYS32YDh0sTE9g86ANgg= +github.com/hashicorp/go-secure-stdlib/httputil v0.1.0/go.mod h1:Md+jfeLf7CjGjTmgBWzFyc4vznsIb8yEiX7/CGAJvkI= github.com/hashicorp/go-secure-stdlib/kv-builder v0.1.2 h1:NS6BHieb/pDfx3M9jDdaPpGyyVp+aD4A3DjX3dgRmzs= github.com/hashicorp/go-secure-stdlib/kv-builder v0.1.2/go.mod h1:rf5JPE13wi+NwjgsmGkbg4b2CgHq8v7Htn/F0nDe/hg= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 h1:p4AKXPPS24tO8Wc8i1gLvSKdmkiSY5xuju57czJ/IJQ= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.2/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.3 h1:kH3Rhiht36xhAfhuHyWJDgdXXEx9IIZhDGRk24CDhzg= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.3/go.mod h1:ov1Q0oEDjC3+A4BwsG2YdKltrmEw8sf9Pau4V9JQ4Vo= +github.com/hashicorp/go-secure-stdlib/nonceutil v0.1.0 h1:iJG9Q3iUme12yH+wzBMGYrw/Am4CfX3sDcA8m5OGfhQ= +github.com/hashicorp/go-secure-stdlib/nonceutil v0.1.0/go.mod h1:s28ohJ0kU6tersf0it/WsBCyZSdziPlP+G1FRA3ar28= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 h1:iBt4Ew4XEGLfh6/bPk4rSYmuZJGizr6/x/AEizP0CQc= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8/go.mod h1:aiJI+PIApBRQG7FZTEBx5GiiX+HbOHilUdNxUZi4eV0= github.com/hashicorp/go-secure-stdlib/password v0.1.1 h1:6JzmBqXprakgFEHwBgdchsjaA9x3GyjdI568bXKxa60= github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= +github.com/hashicorp/go-secure-stdlib/plugincontainer v0.4.0 h1:7Yran48kl6X7jfUg3sfYDrFot1gD3LvzdC3oPu5l/qo= +github.com/hashicorp/go-secure-stdlib/plugincontainer v0.4.0/go.mod h1:9WJFu7L3d+Z4ViZmwUf+6/73/Uy7YMY1NXrB9wdElYE= github.com/hashicorp/go-secure-stdlib/reloadutil v0.1.1 h1:SMGUnbpAcat8rIKHkBPjfv81yC46a8eCNZ2hsR2l1EI= github.com/hashicorp/go-secure-stdlib/reloadutil v0.1.1/go.mod h1:Ch/bf00Qnx77MZd49JRgHYqHQjtEmTgGU2faufpVZb0= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 h1:phcbL8urUzF/kxA/Oj6awENaRwfWsjP59GW7u2qlDyY= -github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= -github.com/hashicorp/go-slug v0.7.0 h1:8HIi6oreWPtnhpYd8lIGQBgp4rXzDWQTOhfILZm+nok= -github.com/hashicorp/go-slug v0.7.0/go.mod h1:Ib+IWBYfEfJGI1ZyXMGNbu2BU+aa3Dzu41RKLH301v4= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.3 h1:xbrxd0U9XQW8qL1BAz2XrAjAF/P2vcqUTAues9c24B8= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.3/go.mod h1:LWq2Sy8UoKKuK4lFuCNWSjJj57MhNNf2zzBWMtkAIX4= +github.com/hashicorp/go-slug v0.15.2 h1:/ioIpE4bWVN/d7pG2qMrax0a7xe9vOA66S+fz7fZmGY= +github.com/hashicorp/go-slug v0.15.2/go.mod h1:THWVTAXwJEinbsp4/bBRcmbaO5EYNLTqxbG4tZ3gCYQ= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-sockaddr v1.0.6 h1:RSG8rKU28VTUTvEKghe5gIhIQpv8evvNpnDEyqO4u9I= +github.com/hashicorp/go-sockaddr v1.0.6/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI= github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-tfe v0.20.0 h1:XUAhKoCX8ZUQfwBebC8hz7nkSSnqgNkaablIfxnZ0PQ= -github.com/hashicorp/go-tfe v0.20.0/go.mod h1:gyXLXbpBVxA2F/6opah8XBsOkZJxHYQmghl0OWi8keI= +github.com/hashicorp/go-tfe v1.64.2 h1:nbK9p5gA7k8/jbqgNpOzs7lG5cGfOhLVCki4bn7PmdQ= +github.com/hashicorp/go-tfe v1.64.2/go.mod h1:JIgzD8EKkwAqFJdtmo0X2k1NUTrozyniKijL1nVkJgE= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hashicorp/hcp-link v0.1.0 h1:F6F1cpADc+o5EBI5CbJn5RX4qdFSLpuA4fN69eeE5lQ= -github.com/hashicorp/hcp-link v0.1.0/go.mod h1:BWVDuJDHrKJtWc5qI07bX5xlLjSgWq6kYLQUeG1g5dM= -github.com/hashicorp/hcp-scada-provider v0.2.1 h1:yr+Uxini7SWTZ2t49d3Xi+6+X/rbsSFx8gq6WVcC91c= -github.com/hashicorp/hcp-scada-provider v0.2.1/go.mod h1:Q0WpS2RyhBKOPD4X/8oW7AJe7jA2HXB09EwDzwRTao0= -github.com/hashicorp/hcp-sdk-go v0.23.0 h1:3WarkQSK0VzxJaH6psHIGQagag3ujL+NjWagZZHpiZM= -github.com/hashicorp/hcp-sdk-go v0.23.0/go.mod h1:/9UoDY2FYYA8lFaKBb2HmM/jKYZGANmf65q9QRc/cVw= -github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d h1:9ARUJJ1VVynB176G1HCwleORqCaXm/Vx0uUi0dL26I0= -github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d/go.mod h1:Yog5+CPEM3c99L1CL2CFCYoSzgWm5vTU58idbRUaLik= +github.com/hashicorp/hcl/v2 v2.16.2 h1:mpkHZh/Tv+xet3sy3F9Ld4FyI2tUpWe9x3XtPx9f1a0= +github.com/hashicorp/hcl/v2 v2.16.2/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng= +github.com/hashicorp/hcp-link v0.2.1 h1:8w4YVJxRb2C7oXN+hCPSyDbBeo7RQsIYTR6nQXJt6f8= +github.com/hashicorp/hcp-link v0.2.1/go.mod h1:6otT7bD+nBW1cyzgz8Z4BPziZfwxTtAEkYUrF/MOT8o= +github.com/hashicorp/hcp-scada-provider v0.2.2 h1:S4Kz+Vc02XOz/5Sm9Gug6ivfyfgchM6qv48cgz0uRls= +github.com/hashicorp/hcp-scada-provider v0.2.2/go.mod h1:Q0WpS2RyhBKOPD4X/8oW7AJe7jA2HXB09EwDzwRTao0= +github.com/hashicorp/hcp-sdk-go v0.101.0 h1:jRphqVzYCw3d/M0CyVe5FIMbS/FFv5Dq36mepIkqI7g= +github.com/hashicorp/hcp-sdk-go v0.101.0/go.mod h1:vQ4fzdL1AmhIAbCw+4zmFe5Hbpajj3NvRWkJoVuxmAk= +github.com/hashicorp/jsonapi v1.3.1 h1:GtPvnmcWgYwCuDGvYT5VZBHcUyFdq9lSyCzDjn1DdPo= +github.com/hashicorp/jsonapi v1.3.1/go.mod h1:kWfdn49yCjQvbpnvY1dxxAuAFzISwrrMDQOcu6NsFoM= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= @@ -1109,108 +1525,106 @@ github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0 h1:kBpVVl1sl3MaSrs97e0+pDQhSrqJv9gVbSUrPpVfl1w= github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0/go.mod h1:6pdNz0vo0mF0GvhwDG56O3N18qBrAz/XRIcfINfTbwo= -github.com/hashicorp/nomad/api v0.0.0-20220707195938-75f4c2237b28 h1:fo8EbQ6tc9hYqxik9CAdFMqy48TW8hh2I3znysPqf+0= -github.com/hashicorp/nomad/api v0.0.0-20220707195938-75f4c2237b28/go.mod h1:FslB+3eLbZgkuPWffqO1GeNzBFw1SuVqN2PXsMNe0Fg= +github.com/hashicorp/nomad/api v0.0.0-20240213164230-c364cb57298d h1:nvfutImOr3GgkMSMjfNdTil9e54vtyQxxyHZ+NHII3Y= +github.com/hashicorp/nomad/api v0.0.0-20240213164230-c364cb57298d/go.mod h1:ijDwa6o1uG1jFSq6kERiX2PamKGpZzTmo0XOFNeFZgw= github.com/hashicorp/raft v1.0.1/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= -github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= -github.com/hashicorp/raft v1.3.10 h1:LR5QZX1VQd0DFWZfeCwWawyeKfpS/Tm1yjnJIY5X4Tw= -github.com/hashicorp/raft v1.3.10/go.mod h1:J8naEwc6XaaCfts7+28whSeRvCqTd6e20BlCU3LtEO4= -github.com/hashicorp/raft-autopilot v0.1.6 h1:C1q3RNF2FfXNZfHWbvVAu0QixaQK8K5pX4O5lh+9z4I= -github.com/hashicorp/raft-autopilot v0.1.6/go.mod h1:Af4jZBwaNOI+tXfIqIdbcAnh/UyyqIMj/pOISIfhArw= +github.com/hashicorp/raft v1.7.1 h1:ytxsNx4baHsRZrhUcbt3+79zc4ly8qm7pi0393pSchY= +github.com/hashicorp/raft v1.7.1/go.mod h1:hUeiEwQQR/Nk2iKDD0dkEhklSsu3jcAcqvPzPoZSAEM= +github.com/hashicorp/raft-autopilot v0.2.0 h1:2/R2RPgamgRKgNWGQioULZvjeKXQZmDuw5Ty+6c+H7Y= +github.com/hashicorp/raft-autopilot v0.2.0/go.mod h1:q6tZ8UAZ5xio2gv2JvjgmtOlh80M6ic8xQYBe2Egkg8= github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= -github.com/hashicorp/raft-boltdb/v2 v2.0.0-20210421194847-a7e34179d62c h1:oiKun9QlrOz5yQxMZJ3tf1kWtFYuKSJzxzEDxDPevj4= -github.com/hashicorp/raft-boltdb/v2 v2.0.0-20210421194847-a7e34179d62c/go.mod h1:kiPs9g148eLShc2TYagUAyKDnD+dH9U+CQKsXzlY9xo= +github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702 h1:RLKEcCuKcZ+qp2VlaaZsYZfLOmIiuJNpEi48Rl8u9cQ= +github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702/go.mod h1:nTakvJ4XYq45UXtn0DbwR4aU9ZdjlnIenpbs6Cd+FM0= +github.com/hashicorp/raft-boltdb/v2 v2.3.0 h1:fPpQR1iGEVYjZ2OELvUHX600VAK5qmdnDEv3eXOwZUA= +github.com/hashicorp/raft-boltdb/v2 v2.3.0/go.mod h1:YHukhB04ChJsLHLJEUD6vjFyLX2L3dsX3wPBZcX4tmc= github.com/hashicorp/raft-snapshot v1.0.4 h1:EuDuayAJPdiDmVk1ygTDnG2zDzrs0/6/yBuma1IYSow= github.com/hashicorp/raft-snapshot v1.0.4/go.mod h1:5sL9eUn72lH5DzsFIJ9jaysITbHksSSszImWSOTC8Ic= +github.com/hashicorp/raft-wal v0.4.0 h1:oHCQLPa3gBTrfuBVHaDg2b/TVXpU0RIyeH/mU9ovk3Y= +github.com/hashicorp/raft-wal v0.4.0/go.mod h1:A6vP5o8hGOs1LHfC1Okh9xPwWDcmb6Vvuz/QyqUXlOE= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hashicorp/vault-plugin-auth-alicloud v0.14.0 h1:O6tNk0s/arubLUbLeCyaRs5xGo9VwmbQazISY/BfPK4= -github.com/hashicorp/vault-plugin-auth-alicloud v0.14.0/go.mod h1:We3fJplmALwK1VpjwrLuXr/4QCQHYMdnXLHmLUU6Ntg= -github.com/hashicorp/vault-plugin-auth-azure v0.13.0 h1:1j8fPQPumYg9oZG+MCFftglu/Edd6YGOuBvEWEkK0qQ= -github.com/hashicorp/vault-plugin-auth-azure v0.13.0/go.mod h1:Kg7oDhyyROtBEe8NNLTvpfDSnaxqgEyUvKqlNor/4I4= -github.com/hashicorp/vault-plugin-auth-centrify v0.14.0 h1:qZKnqU1tX1WS6+11+PskGMhlXl5LnfkVrDvJO4BRY7s= -github.com/hashicorp/vault-plugin-auth-centrify v0.14.0/go.mod h1:3fDbIVdwA/hkOVhwktKHDX5lo4DqIUUVbBdwQNNvxHw= -github.com/hashicorp/vault-plugin-auth-cf v0.14.0 h1:n/ojZukcH8YAOy/7JXITJn21byr1yxhujlR3DKlR3FY= -github.com/hashicorp/vault-plugin-auth-cf v0.14.0/go.mod h1:BdvPbWtUuBhTW1HrYXj2OGoeAIzWENYsKF378RoKmw4= -github.com/hashicorp/vault-plugin-auth-gcp v0.15.0 h1:EmfbQkYufMSFcbnOyn0f7bv2QYyyQyMx/D+qO04jfr0= -github.com/hashicorp/vault-plugin-auth-gcp v0.15.0/go.mod h1:GvtgteMxgza9I/QXNKFOAW6/FX0FmsAOzE0nz5126H4= -github.com/hashicorp/vault-plugin-auth-jwt v0.15.0 h1:GGS/64MmoobWZFA07nYEPan9NLw2NhqRrmVLra7JHNM= -github.com/hashicorp/vault-plugin-auth-jwt v0.15.0/go.mod h1:c6UQCaBpR11jB52xzcIjiV/9RY+v+bZw1TY78ylf5ds= -github.com/hashicorp/vault-plugin-auth-kerberos v0.9.0 h1:gdbrEwpPICDt8xQ7C595M+DXaojHvkA9/AhCKbvE+jY= -github.com/hashicorp/vault-plugin-auth-kerberos v0.9.0/go.mod h1:dyGS9eHADGMJC42tTr+XliO2Ntssv4bUOK1Je9IEMMo= -github.com/hashicorp/vault-plugin-auth-kubernetes v0.15.0 h1:uHsn1fJqxGxbWiiD2resMYZzPJWPwPMCGNCEziGHfwE= -github.com/hashicorp/vault-plugin-auth-kubernetes v0.15.0/go.mod h1:f9r9pDAyVLgVTzJmvCz2m0OSYjcdJivnLv+5YWVv3F8= -github.com/hashicorp/vault-plugin-auth-oci v0.13.1 h1:xThaZC9jzZoqqccfxTk11hfwgqwN3yEZ3kYOnY2v2Fs= -github.com/hashicorp/vault-plugin-auth-oci v0.13.1/go.mod h1:O426Kf4nUXfwq+o0HqQuqpZygm6SiOY6eEXyjrZweYA= -github.com/hashicorp/vault-plugin-database-couchbase v0.9.0 h1:hJOHJ9yZ9kt1/DuRaU5Sa339j3/QcPL4esT9JLQonYA= -github.com/hashicorp/vault-plugin-database-couchbase v0.9.0/go.mod h1:skmG6MgIG6fjIOlOEgVKOcNlr1PcgHPUb9q1YQ5+Q9k= -github.com/hashicorp/vault-plugin-database-elasticsearch v0.13.1 h1:nVO6F8V69E2fAQklh/Ds+EypVMutN4iIlt3sat9qW9M= -github.com/hashicorp/vault-plugin-database-elasticsearch v0.13.1/go.mod h1:wO8EPQs5bsBERD6MSQ+7Az+YJ4TFclCNxBo3r3VKeao= -github.com/hashicorp/vault-plugin-database-mongodbatlas v0.9.0 h1:wlWrg1z5Pyx+FTUCOzA9yh0FTI+pfA9tMrsFPFBcjjA= -github.com/hashicorp/vault-plugin-database-mongodbatlas v0.9.0/go.mod h1:4Ew6RNnA1NXtpLV0ijkwpE6pJE46G+suDKnTVMm+kXA= -github.com/hashicorp/vault-plugin-database-redis v0.2.0 h1:Fg1inevnDhj58+/y5SY1CihLftytG1D+3QqbUJbHYUM= -github.com/hashicorp/vault-plugin-database-redis v0.2.0/go.mod h1:hPj1vvjzsJ+g9PChP7iKqEJX7ttr03oz/RDEYsq8zZY= -github.com/hashicorp/vault-plugin-database-redis-elasticache v0.2.0 h1:dgTT7E8xj56hjktMxHNAgFpy7pchpoQ20cIhDsBcgz8= -github.com/hashicorp/vault-plugin-database-redis-elasticache v0.2.0/go.mod h1:h7H9VAI3xdoJ3VQ+wCyFZ5AOyMIQDS7ZhdjN8LGX3OU= -github.com/hashicorp/vault-plugin-database-snowflake v0.7.0 h1:Od5M2ddxRiHjDkHFto+aInru44/6Dy4jjrxyoKh3AW4= -github.com/hashicorp/vault-plugin-database-snowflake v0.7.0/go.mod h1:QJ8IL/Qlu4Me1KkL0OpaWO7aMFL0TNoSEKVB5F+lCiM= +github.com/hashicorp/vault-hcp-lib v0.0.0-20240704151836-a5c058ac604c h1:LCwgi0iiq6pPIRWG80MWwZfPxO2xoHPYwShWfnhAhNI= +github.com/hashicorp/vault-hcp-lib v0.0.0-20240704151836-a5c058ac604c/go.mod h1:Nb41BTPvmFbKB73D/+XpxIw6Nf2Rt+AOUvLzlDxwAGQ= +github.com/hashicorp/vault-plugin-auth-alicloud v0.19.0 h1:LgNFlAgUsOjt8THbhcnWDyfdiSwPIajfay6ltdg3d6I= +github.com/hashicorp/vault-plugin-auth-alicloud v0.19.0/go.mod h1:hkcOv6HSKRMWwZA/YZ6OgStW6iQXCv90KfSTJYbt5vc= +github.com/hashicorp/vault-plugin-auth-azure v0.19.0 h1:TdInvToRpn0tCl/+R4qqAAovMobq4YSuCezVvamWlPQ= +github.com/hashicorp/vault-plugin-auth-azure v0.19.0/go.mod h1:elSxwfldjnRJQsJIAfD305g7gvUnFDykGvuY5phNNgw= +github.com/hashicorp/vault-plugin-auth-cf v0.19.0 h1:/I084ZCypbhTO5ZiYjxhjzokuDqOWWLLxHatyViU9ss= +github.com/hashicorp/vault-plugin-auth-cf v0.19.0/go.mod h1:LiH/IttNxAgto2ooR9l2g6+CiXc5c/1uPE0pT0hILRg= +github.com/hashicorp/vault-plugin-auth-gcp v0.19.0 h1:mMTnAGDi6GigGmP9DlLjDzp5VRF8/sZzw8hlfOLFbbU= +github.com/hashicorp/vault-plugin-auth-gcp v0.19.0/go.mod h1:+0+ufeudu8nKVS448iHnzKp5SgLtMcs2U9E0nOUoL/Q= +github.com/hashicorp/vault-plugin-auth-jwt v0.22.0 h1:ihjx6HszRSt8Vfknc5t0AKXBQqFhqTQ4Wdd/PK+EboU= +github.com/hashicorp/vault-plugin-auth-jwt v0.22.0/go.mod h1:+Ne5sCgAza7aDIzxE4aruv6PeQI9ORWIvg/dFe2jlJU= +github.com/hashicorp/vault-plugin-auth-kerberos v0.13.0 h1:KN+nY7XJANb7IRILf0EnaCT04JI9ctiUhq/W9sgyJnk= +github.com/hashicorp/vault-plugin-auth-kerberos v0.13.0/go.mod h1:tJ4upLp8+7xxNHBFwpXQaQQQjkwSwI4P/Hic9EKkWvQ= +github.com/hashicorp/vault-plugin-auth-kubernetes v0.20.0 h1:C+3gSOE+M1JT/6K/CNbQXCAUAA2wFQ7IFpD9Y9IPXxU= +github.com/hashicorp/vault-plugin-auth-kubernetes v0.20.0/go.mod h1:5//ywSXEdrni78mPcOAE+BnF8XUnbvSMCIwMvmHSbzI= +github.com/hashicorp/vault-plugin-auth-oci v0.17.0 h1:t2PNAWSZNNm7sf+l2vK4wod0fQ4DckVZuCb3N9Dgark= +github.com/hashicorp/vault-plugin-auth-oci v0.17.0/go.mod h1:vCuIzHclwIyL+Vk6OfIoqjOomuvPmwRUbnnxlxHCyqM= +github.com/hashicorp/vault-plugin-database-couchbase v0.12.0 h1:WBPE3nsbM03HxmBBEa2dca2XR/6wZqm1eKZDo3NFxOg= +github.com/hashicorp/vault-plugin-database-couchbase v0.12.0/go.mod h1:Pp6qHuSRxSivKMNVGzxRhIjkVsdJ4LSj8B4n8uoD4XU= +github.com/hashicorp/vault-plugin-database-elasticsearch v0.16.0 h1:gArlFh8W0SVQA3OmYZ/IwD6H4RxLphAtKB71jOEKawE= +github.com/hashicorp/vault-plugin-database-elasticsearch v0.16.0/go.mod h1:IqiX9rT/8wCsriRogV4kOCIgAV21yEgjK1D+Oucda3Y= +github.com/hashicorp/vault-plugin-database-mongodbatlas v0.13.0 h1:QfqA3GYVtuVOBe02snoEoU1BS6u3hUkzObd27lVb6FQ= +github.com/hashicorp/vault-plugin-database-mongodbatlas v0.13.0/go.mod h1:4JKUfOniWUxWjnrsBKyPpy3u7dCxkncYAH6VM0BCPhg= +github.com/hashicorp/vault-plugin-database-redis v0.4.0 h1:caNySLrAoKnwYun2kLlwntMKDp+T4yFl/ToCI+ebu1A= +github.com/hashicorp/vault-plugin-database-redis v0.4.0/go.mod h1:OAKaJH4fj4t0MXPBU8FTb7Ca+DzyQD8wjvq4Dq7I8pU= +github.com/hashicorp/vault-plugin-database-redis-elasticache v0.5.0 h1:XCbJLn02bRf9+eyjoMVaEx5TPoIJD9YV7u+shjr4paU= +github.com/hashicorp/vault-plugin-database-redis-elasticache v0.5.0/go.mod h1:o1ac/VruWlSmjIaTx4GVCOiXw6+aUuXxHs3btG0XYjU= +github.com/hashicorp/vault-plugin-database-snowflake v0.12.0 h1:rykZv8cV7W6iSeR9vEAFB3FivLz/tTuO8s8mNd9Xrbw= +github.com/hashicorp/vault-plugin-database-snowflake v0.12.0/go.mod h1:grT3WmPmEiRY6zjEkJJ781jWq7h9Yg68jlU8G/BXuBU= github.com/hashicorp/vault-plugin-mock v0.16.1 h1:5QQvSUHxDjEEbrd2REOeacqyJnCLPD51IQzy71hx8P0= github.com/hashicorp/vault-plugin-mock v0.16.1/go.mod h1:83G4JKlOwUtxVourn5euQfze3ZWyXcUiLj2wqrKSDIM= -github.com/hashicorp/vault-plugin-secrets-ad v0.15.0 h1:4y/CtX4977uJXPWh5d70Raw5Mo+kCGDo9de2A6cOFso= -github.com/hashicorp/vault-plugin-secrets-ad v0.15.0/go.mod h1:+HVm4DDDc66fzFvL9FrgM/6ByVWR8eK3OA1050EjmOw= -github.com/hashicorp/vault-plugin-secrets-alicloud v0.14.1 h1:kFcdTltTe5HP0ILuB+YNw++Iy/PZMLv/i2FsmdXfGfM= -github.com/hashicorp/vault-plugin-secrets-alicloud v0.14.1/go.mod h1:sSjBgGh3o9cvMvpNC5K0DL+CndPL4fbsseR/pLiMlb8= -github.com/hashicorp/vault-plugin-secrets-azure v0.15.0 h1:R/3KLTOwvPIZenMrmeSIBWymKq5nYgA/bucXzBPyb3Q= -github.com/hashicorp/vault-plugin-secrets-azure v0.15.0/go.mod h1:frXRdkP8NFYLRIPLQsfIBKMaDrCmHJjv65N9QqAkN1w= -github.com/hashicorp/vault-plugin-secrets-gcp v0.15.0 h1:SfYIFmgFg/8p4fgLCV8YxxkI+iQN0c4gSjMJhg9vFJw= -github.com/hashicorp/vault-plugin-secrets-gcp v0.15.0/go.mod h1:/eOk7gJ5zvmOKgP5Ih7/5rZm5jOKDvGFpANIRqbr/Mc= -github.com/hashicorp/vault-plugin-secrets-gcpkms v0.14.0 h1:eUC5ltK+1bkc+SVMzAUq4tBeNrsDXyCuITH8jeajXcM= -github.com/hashicorp/vault-plugin-secrets-gcpkms v0.14.0/go.mod h1:86YCY86XuiQesV1jfjnV4icgoaxQdoUHONzDru+XQHA= -github.com/hashicorp/vault-plugin-secrets-kubernetes v0.3.0 h1:Joz9SBwjpEOGu+Ynv60JC3fAA4UuLJzu7NcrKm6wMMs= -github.com/hashicorp/vault-plugin-secrets-kubernetes v0.3.0/go.mod h1:NJeYBRgLVqjvkrVyZEe42oaqP3+xvVNMYdJoMWVoByU= -github.com/hashicorp/vault-plugin-secrets-kv v0.14.2 h1:13p50RIltQM/JH32uWZe9sAp16Uaj0zCLmVGPvS09qo= -github.com/hashicorp/vault-plugin-secrets-kv v0.14.2/go.mod h1:cAxt2o3BjRT5CbNLtgXuxTReaejvrgN/qk+no+DnwJ8= -github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.9.1 h1:WkW8fyHxEdz1wGSTxCnSCrzXvgLXqXr8Iqp7upa/s4E= -github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.9.1/go.mod h1:p96IECNtVwpvTq8RAw3dLlAYRWpG1n06XOoo0TkJnuk= -github.com/hashicorp/vault-plugin-secrets-openldap v0.10.1 h1:EN3/iEjPPmcpX9yihybQNHvewc+YoJw7aoKsio1WK5s= -github.com/hashicorp/vault-plugin-secrets-openldap v0.10.1/go.mod h1:sYuxnuNY2O59fy+LACtvgrqUO/r0cnhAYTMqLajD9FE= -github.com/hashicorp/vault-plugin-secrets-terraform v0.7.0 h1:jgJpVKhV0Eh6EjpUEIf7VYH2D6D0xW2Lry9/3PI8hy0= -github.com/hashicorp/vault-plugin-secrets-terraform v0.7.0/go.mod h1:GzYAJYytgbNNyT3S7rspz1cLE53E1oajFbEtaDUlVGU= -github.com/hashicorp/vault-testing-stepwise v0.1.1/go.mod h1:3vUYn6D0ZadvstNO3YQQlIcp7u1a19MdoOC0NQ0yaOE= -github.com/hashicorp/vault-testing-stepwise v0.1.3-0.20230203193428-3a789cb2c68f h1:7ASzAq/gPihP/HAd8RmS9c7LiJtDMnjkPbfWKhm/XiI= -github.com/hashicorp/vault-testing-stepwise v0.1.3-0.20230203193428-3a789cb2c68f/go.mod h1:8zCPiYcaasC/X/OR+NjbvaO48k1enp+WfhiJLJ/rkJ8= +github.com/hashicorp/vault-plugin-secrets-ad v0.19.0 h1:jt5flxYYEaqXasCNzE8MUsA1qWe2FjOWhS1viRpqsbE= +github.com/hashicorp/vault-plugin-secrets-ad v0.19.0/go.mod h1:FlrqHh3gDEOx81OEMFRPGgle+IlnKJUs+3HPYL8bawc= +github.com/hashicorp/vault-plugin-secrets-alicloud v0.18.0 h1:LCaOtwItk9x4lYVKjNyj1+AG+8423O9jSYcw7NBeick= +github.com/hashicorp/vault-plugin-secrets-alicloud v0.18.0/go.mod h1:We2m27w9q7uQgF1UULA3TtcUH6LnA4ItuiujhvvAGOU= +github.com/hashicorp/vault-plugin-secrets-azure v0.20.0 h1:rWsyvZQzF2G1Wkvp624yNIoZHeB7gQ4/Nxk9WuA9HtA= +github.com/hashicorp/vault-plugin-secrets-azure v0.20.0/go.mod h1:PW7g5lgIcwudoZAthoc3xNqiumHHI1gvNw9en/iI3TQ= +github.com/hashicorp/vault-plugin-secrets-gcp v0.20.0 h1:yTRId8Y8rpf6LBUcnAEMQZfMBApiKFxPh7669RcE2zg= +github.com/hashicorp/vault-plugin-secrets-gcp v0.20.0/go.mod h1:FiAMuQ67Wyy2qvXZyezcMFo0ZCh/Prk5FtBABdc1cPc= +github.com/hashicorp/vault-plugin-secrets-gcpkms v0.19.0 h1:XMVCbZtI5UwJ19KoYZpg4Q6byVccRvUzm/I4SGaFJ4o= +github.com/hashicorp/vault-plugin-secrets-gcpkms v0.19.0/go.mod h1:3OEx2UIpLZ0f4biNj60hRZTULuTzJV43Tn6+jKj9xdY= +github.com/hashicorp/vault-plugin-secrets-kubernetes v0.9.0 h1:HEgEjYzG/DYBbCOrm3Pr43XPNwZWMool1EzcRFw3lgg= +github.com/hashicorp/vault-plugin-secrets-kubernetes v0.9.0/go.mod h1:I/CF2GdsKiZ3ZgPrNVF+bs3XD7pUxp24iSKTVV4pHeE= +github.com/hashicorp/vault-plugin-secrets-kv v0.20.0 h1:p1RVmd4x1rgGK0tN8DDu21J21bR3O93qBFXLGEdJSEo= +github.com/hashicorp/vault-plugin-secrets-kv v0.20.0/go.mod h1:bCpMggD3Z0+H+3dOmTCoQjBHC53jA08lPqOLmFrHBi8= +github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.13.0 h1:BeDS7luTeOW0braIbtuyairFF8SEz7k3nvi9e+mJ2Ok= +github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.13.0/go.mod h1:sprde+S70PBIbgOLUAKDxR+xNF714ksBBVh77O3hnWc= +github.com/hashicorp/vault-plugin-secrets-openldap v0.14.0 h1:hhuh8FwP2jJ6dlOKOO/wDmwt1eEmhy0Hw0OjdkioP5c= +github.com/hashicorp/vault-plugin-secrets-openldap v0.14.0/go.mod h1:wqOf/QJqrrNXjnm0eLUnm5Ju9s/LIZUl6wEKmnFL9Uo= +github.com/hashicorp/vault-plugin-secrets-terraform v0.10.0 h1:YzOJrpuDRNrw5SQ4i7IEjedF40I/7ejupQy+gAyQ6Zg= +github.com/hashicorp/vault-plugin-secrets-terraform v0.10.0/go.mod h1:j2nbB//xAQMD+5JivVDalwDEyzJY3AWzKIkw6k65xJQ= +github.com/hashicorp/vault-testing-stepwise v0.3.1 h1:SqItnMWOOknQfJJR49Fps34ZfBMWSqBFFTx6NoTHzNw= +github.com/hashicorp/vault-testing-stepwise v0.3.1/go.mod h1:BK7TOCyZ7idR7txAlPGEu+9ETJzlQsYQNdabSv3lyYY= github.com/hashicorp/vault/vault/hcp_link/proto v0.0.0-20230201201504-b741fa893d77 h1:Y/+BtwxmRak3Us9jrByARvYW6uNeqZlEpMylIdXVIjY= github.com/hashicorp/vault/vault/hcp_link/proto v0.0.0-20230201201504-b741fa893d77/go.mod h1:a2crHoMWwY6aiL8GWT8hYj7vKD64uX0EdRPbnsHF5wU= github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 h1:O/pT5C1Q3mVXMyuqg7yuAWUg/jMZR1/0QTzTRdNR6Uw= github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35nTu0ey1EXjwNwPjI9xErAsoOCmcMb9GKvyxo= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 h1:xixZ2bWeofWV68J+x6AzmKuVM/JWCQwkWm6GW/MUR6I= -github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= -github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb v1.7.6/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab h1:HqW4xhhynfjrtEiiSGcQUd6vrK23iMam1FO8rI7mwig= github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= @@ -1218,8 +1632,8 @@ github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsU github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.11.0 h1:HiHArx4yFbwl91X3qqIHtUFoiIfLNJXCQRsnzkiwwaQ= -github.com/jackc/pgconn v1.11.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= +github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= @@ -1235,30 +1649,36 @@ github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvW github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.2.0 h1:r7JypeP2D3onoQTCxWdTpCtJ4D+qpKr0TxvoyMhZ5ns= -github.com/jackc/pgproto3/v2 v2.2.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= +github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= +github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA= +github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.10.0 h1:ILnBWrRMSXGczYvmkYD6PsYyVFUNLTnIUJHHDLmqk38= -github.com/jackc/pgtype v1.10.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= -github.com/jackc/pgx v3.3.0+incompatible h1:Wa90/+qsITBAPkAZjiByeIGHFcj3Ztu+VzrrIpHjL90= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgtype v1.14.3 h1:h6W9cPuHsRWQFTWUZMAKMgG5jSwQI0Zurzdvlx3Plus= +github.com/jackc/pgtype v1.14.3/go.mod h1:aKeozOde08iifGosdJpz9MBZonJOUJxqNpPBcMJTlVA= github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.15.0 h1:B7dTkXsdILD3MF987WGGCcg+tvLW6bZJdEcqVFeU//w= -github.com/jackc/pgx/v4 v4.15.0/go.mod h1:D/zyOyXiaM1TmVWnOM18p0xdDtdakRBa0RsVGI3U3bw= +github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= +github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA= +github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= -github.com/jarcoal/httpmock v1.0.7 h1:d1a2VFpSdm5gtjhCPWsQHSnx8+5V3ms5431YwvmkuNk= +github.com/jarcoal/httpmock v1.2.0 h1:gSvTxxFR/MEMfsGrvRbdfpRUMBStovlSRLw0Ep1bwwc= +github.com/jarcoal/httpmock v1.2.0/go.mod h1:oCoTsnAz4+UoOUIf5lJOWV2QQIW5UoeUI6aM2YnWAZk= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= @@ -1267,42 +1687,40 @@ github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVET github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= -github.com/jcmturner/gokrb5/v8 v8.4.3 h1:iTonLeSJOn7MVUtyMT+arAn5AKAPrkilzhGw8wE/Tq8= -github.com/jcmturner/gokrb5/v8 v8.4.3/go.mod h1:dqRwJGXznQrzw6cWmyo6kH+E7jksEQG/CyVWsJEsJO0= +github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= +github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2 h1:mex1izRBCD+7WjieGgRdy7e651vD/lvB1bD9vNE/3K4= github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2/go.mod h1:xkfESuHriIekR+4RoV+fu91j/CfnYM29Zi2tMFw5iD4= github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f h1:E87tDTVS5W65euzixn7clSzK66puSt1H4I5SC0EmHH4= github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f/go.mod h1:3J2qVK16Lq8V+wfiL2lPeDZ7UWMxk5LemerHa1p6N00= -github.com/jefferai/jsonx v1.0.0 h1:Xoz0ZbmkpBvED5W9W1B5B/zc3Oiq7oXqiW7iRV3B6EI= -github.com/jefferai/jsonx v1.0.0/go.mod h1:OGmqmi2tTeI/PS+qQfBDToLHHJIy/RMp24fPo8vFvoQ= +github.com/jefferai/jsonx v1.0.1 h1:GvWkLWihoLqDG0BSP45TUQJH9qsINX50PVrFULgpc/I= +github.com/jefferai/jsonx v1.0.1/go.mod h1:yFo3l2fcm7cZVHGq3HKLXE+Pd4RWuRjNBDHksM7XekQ= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= -github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= +github.com/jimlambrt/gldap v0.1.13 h1:jxmVQn0lfmFbM9jglueoau5LLF/IGRti0SKf0vB753M= +github.com/jimlambrt/gldap v0.1.13/go.mod h1:nlC30c7xVphjImg6etk7vg7ZewHCCvl1dfAhO3ZJzPg= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531 h1:hgVxRoDDPtQE68PT4LFvNlPz2nBKd3OMlGKIQ69OmR4= +github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531/go.mod h1:fqTUQpVYBvhCNIsMXGl2GE9q6z94DIP6NtFKXCSTVbg= +github.com/joshlf/testutil v0.0.0-20170608050642-b5d8aa79d93d h1:J8tJzRyiddAFF65YVgxli+TyWBi0f79Sld6rJP6CBcY= +github.com/joshlf/testutil v0.0.0-20170608050642-b5d8aa79d93d/go.mod h1:b+Q3v8Yrg5o15d71PSUraUzYb+jWl6wQMSBXSGS/hv0= github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f h1:ENpDacvnr8faw5ugQmEF1QYk+f/Y9lXFvuYmRxykago= github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f/go.mod h1:KDSfL7qe5ZfQqvlDMkVjCztbmcpp/c8M77vhQP8ZPvk= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= @@ -1311,75 +1729,58 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= -github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= -github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= -github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= -github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/linode/linodego v0.7.1 h1:4WZmMpSA2NRwlPZcc0+4Gyn7rr99Evk9bnr0B3gXRKE= github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZinAbj2sY= -github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 h1:YFh+sjyJTMQSYjKwM4dFKhJPJC/wfo98tPUc17HdoYw= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -1393,7 +1794,6 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -1402,35 +1802,45 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= -github.com/mediocregopher/radix/v4 v4.1.1 h1:JkZBEp0y8pWGNZkmO3RR5oEO5huwd4zKKt4rh1C+P8s= -github.com/mediocregopher/radix/v4 v4.1.1/go.mod h1:ajchozX/6ELmydxWeWM6xCFHVpZ4+67LXHOTOVR0nCE= -github.com/mholt/archiver/v3 v3.5.1 h1:rDjOBX9JSF5BvoJGvjqK479aL70qh9DIpZCl+k7Clwo= -github.com/mholt/archiver/v3 v3.5.1/go.mod h1:e3dqJ7H78uzsRSEACH1joayhuSyhnonssnDhppzS1L4= +github.com/mediocregopher/radix/v4 v4.1.4 h1:Uze6DEbEAvL+VHXUEu/EDBTkUk5CLct5h3nVSGpc6Ts= +github.com/mediocregopher/radix/v4 v4.1.4/go.mod h1:ajchozX/6ELmydxWeWM6xCFHVpZ4+67LXHOTOVR0nCE= github.com/michaelklishin/rabbit-hole/v2 v2.12.0 h1:946p6jOYFcVJdtBBX8MwXvuBkpPjwm1Nm2Qg8oX+uFk= github.com/michaelklishin/rabbit-hole/v2 v2.12.0/go.mod h1:AN/3zyz7d++OHf+4WUo/LR0+Q5nlPHMaXasIsG/mPY0= +github.com/microsoft/go-mssqldb v1.5.0 h1:CgENxkwtOBNj3Jg6T1X209y2blCfTTcwuOlznd2k9fk= +github.com/microsoft/go-mssqldb v1.5.0/go.mod h1:lmWsjHD8XX/Txr0f8ZqgbEZSC+BZjmEQy/Ms+rLrvho= +github.com/microsoft/kiota-abstractions-go v1.6.1 h1:NXK50S3BwJn9Wj6bO0YFuAig7y2WVgdQ/ie1ktMl2J4= +github.com/microsoft/kiota-abstractions-go v1.6.1/go.mod h1:FI1I2OHg0E7bK5t8DPnw+9C/CHVyLP6XeqDBT+95pTE= +github.com/microsoft/kiota-authentication-azure-go v1.1.0 h1:HudH57Enel9zFQ4TEaJw6lMiyZ5RbBdrRHwdU0NP2RY= +github.com/microsoft/kiota-authentication-azure-go v1.1.0/go.mod h1:zfPFOiLdEqM77Hua5B/2vpcXrVaGqSWjHSRzlvAWEgc= +github.com/microsoft/kiota-http-go v1.4.4 h1:HM0KT/Q7o+JsGatFkkbTIqJL24Jzo5eMI5NNe9N4TQ4= +github.com/microsoft/kiota-http-go v1.4.4/go.mod h1:Kup5nMDD3a9sjdgRKHCqZWqtrv3FbprjcPaGjLR6FzM= +github.com/microsoft/kiota-serialization-form-go v1.0.0 h1:UNdrkMnLFqUCccQZerKjblsyVgifS11b3WCx+eFEsAI= +github.com/microsoft/kiota-serialization-form-go v1.0.0/go.mod h1:h4mQOO6KVTNciMF6azi1J9QB19ujSw3ULKcSNyXXOMA= +github.com/microsoft/kiota-serialization-json-go v1.0.8 h1:+aViv9k6wqaw1Fx6P49fl5GIB1hN3b6CG0McNTcUYBc= +github.com/microsoft/kiota-serialization-json-go v1.0.8/go.mod h1:O8+v11U0EUwHlCz7hrW38KxDmdhKAHfv4Q89uvsBalY= +github.com/microsoft/kiota-serialization-multipart-go v1.0.0 h1:3O5sb5Zj+moLBiJympbXNaeV07K0d46IfuEd5v9+pBs= +github.com/microsoft/kiota-serialization-multipart-go v1.0.0/go.mod h1:yauLeBTpANk4L03XD985akNysG24SnRJGaveZf+p4so= +github.com/microsoft/kiota-serialization-text-go v1.0.0 h1:XOaRhAXy+g8ZVpcq7x7a0jlETWnWrEum0RhmbYrTFnA= +github.com/microsoft/kiota-serialization-text-go v1.0.0/go.mod h1:sM1/C6ecnQ7IquQOGUrUldaO5wj+9+v7G2W3sQ3fy6M= +github.com/microsoftgraph/msgraph-sdk-go v1.47.0 h1:qXfmDij9md6mPsSAJjiDNmS4hxqKo0R489GiVMZVmmY= +github.com/microsoftgraph/msgraph-sdk-go v1.47.0/go.mod h1:Gnws5D7d/930uS9J4qlCm4BAR/zenqECMk9tgMDXeZQ= +github.com/microsoftgraph/msgraph-sdk-go-core v1.2.1 h1:P1wpmn3xxfPMFJHg+PJPcusErfRkl63h6OdAnpDbkS8= +github.com/microsoftgraph/msgraph-sdk-go-core v1.2.1/go.mod h1:vFmWQGWyLlhxCESNLv61vlE4qesBU+eWmEVH7DJSESA= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= +github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= github.com/mikesmitty/edkey v0.0.0-20170222072505-3356ea4e686a h1:eU8j/ClY2Ty3qdHnn0TyW3ivFoPC/0F1gQZz8yTxbbE= github.com/mikesmitty/edkey v0.0.0-20170222072505-3356ea4e686a/go.mod h1:v8eSC2SMp9/7FTKUncp7fH9IwPfw+ysMObcEz5FWheQ= -github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/cli v1.1.2 h1:PvH+lL2B7IQ101xQL63Of8yFS2y+aDlsFcsqNc+u/Kw= -github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -1438,41 +1848,33 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0= github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/6d8ulp4AwfLKrmA= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= -github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= -github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= +github.com/mitchellh/pointerstructure v1.2.1 h1:ZhBBeX8tSlRpu/FFhXH4RC4OJzFlqsQhoHZAz4x7TIw= +github.com/mitchellh/pointerstructure v1.2.1/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= -github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= -github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI= -github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= -github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= -github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= -github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI= -github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo= +github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/user v0.2.0 h1:OnpapJsRp25vkhw8TFG6OLJODNh/3rEwRWtJ3kakwRM= +github.com/moby/sys/user v0.2.0/go.mod h1:RYstrcWOJpVh+6qzUqp2bU3eaRpdiQeKGlKitaH0PM8= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1481,107 +1883,72 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= -github.com/mongodb-forks/digest v1.0.3 h1:ZUK1vyZnBiRMvET0O1SzmnBmv935CkcOTjhfR4zIQ2s= -github.com/mongodb-forks/digest v1.0.3/go.mod h1:eHRfgovT+dvSFfltrOa27hy1oR/rcwyDdp5H1ZQxEMA= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/mongodb-forks/digest v1.1.0 h1:7eUdsR1BtqLv0mdNm4OXs6ddWvR4X2/OsLwdKksrOoc= +github.com/mongodb-forks/digest v1.1.0/go.mod h1:rb+EX8zotClD5Dj4NdgxnJXG9nwrlx3NWKJ8xttz1Dg= +github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= +github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc h1:7xGrl4tTpBQu5Zjll08WupHyq+Sp0Z/adtyf1cfk3Q8= -github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc/go.mod h1:1rLVY/DWf3U6vSZgH16S7pymfrhK2lcUlXjgGglw/lY= +github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= +github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= github.com/ncw/swift v1.0.47 h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 h1:BQ1HW7hr4IVovMwWg0E0PYcyW8CzqDcVmaew9cujU4s= github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= -github.com/nwaples/rardecode v1.1.2 h1:Cj0yZY6T1Zx1R7AhTbyGSALm44/Mmq+BAPc4B/p/d3M= -github.com/nwaples/rardecode v1.1.2/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/okta/okta-sdk-golang/v2 v2.12.1 h1:U+smE7trkHSZO8Mval3Ow85dbxawO+pMAr692VZq9gM= -github.com/okta/okta-sdk-golang/v2 v2.12.1/go.mod h1:KRoAArk1H216oiRnQT77UN6JAhBOnOWkK27yA1SM7FQ= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/okta/okta-sdk-golang/v2 v2.20.0 h1:EDKM+uOPfihOMNwgHMdno+NAsIfyXkVnoFAYVPay0YU= +github.com/okta/okta-sdk-golang/v2 v2.20.0/go.mod h1:FMy5hN5G8Rd/VoS0XrfyPPhIfOVo78ZK7lvwiQRS2+U= github.com/olekukonko/tablewriter v0.0.0-20180130162743-b8a9be070da4/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= -github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= -github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= -github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= -github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runc v1.1.4 h1:nRCz/8sKg6K6jgYAFLDlXzPeITBZJyX28DBVhWD+5dg= -github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= -github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= -github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= -github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/openlyinc/pointy v1.1.2 h1:LywVV2BWC5Sp5v7FoP4bUD+2Yn5k0VNeRbU5vq9jUMY= -github.com/openlyinc/pointy v1.1.2/go.mod h1:w2Sytx+0FVuMKn37xpXIAyBNhFNBIJGR/v2m7ik1WtM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/runc v1.2.0-rc.1 h1:SMjop2pxxYRTfKdsigna/8xRoaoCfIQfD2cVuOb64/o= +github.com/opencontainers/runc v1.2.0-rc.1/go.mod h1:m9JwxfHzXz5YTTXBQr7EY9KTuazFAGPyMQx2nRR3vTw= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU= github.com/oracle/oci-go-sdk v24.3.0+incompatible h1:x4mcfb4agelf1O4/1/auGlZ1lr97jXRSSN5MxTgG/zU= github.com/oracle/oci-go-sdk v24.3.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= +github.com/oracle/oci-go-sdk/v59 v59.0.0 h1:+zTvWfj9ZK0OwLRyXjUkZ8dPN3WvkQSRd3iooaOxNVs= +github.com/oracle/oci-go-sdk/v59 v59.0.0/go.mod h1:PWyWRn+xkQxwwmLq/oO03X3tN1tk2vEIE2tFaJmldHM= github.com/oracle/oci-go-sdk/v60 v60.0.0 h1:EJAWjEi4SY5Raha6iUzq4LTQ0uM5YFw/wat/L1ehIEM= github.com/oracle/oci-go-sdk/v60 v60.0.0/go.mod h1:krz+2gkSzlSL/L4PvP0Z9pZpag9HYLNtsMd1PmxlA2w= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= -github.com/ory/dockertest/v3 v3.8.0/go.mod h1:9zPATATlWQru+ynXP+DytBQrsXV7Tmlx7K86H6fQaDo= -github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY= -github.com/ory/dockertest/v3 v3.9.1/go.mod h1:42Ir9hmvaAPm0Mgibk6mBPi7SFvTXxEcnztDYOJ//uM= +github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4= +github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c h1:vwpFWvAO8DeIZfFeqASzZfsxuWPno9ncAebBEP0N3uE= @@ -1591,33 +1958,34 @@ github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0Mw github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= -github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/peteski22/go-proxyproto v1.0.0 h1:838NKdKEeViAMkaz08Pe+lvvAnGLYhZ0M0z246iCYv0= +github.com/peteski22/go-proxyproto v1.0.0/go.mod h1:iknsfgnH8EkjrMeMyvfKByp9TiBZCKZM0jx2xmKqnVY= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pierrec/lz4/v4 v4.1.8 h1:ieHkV+i2BRzngO4Wd/3HGowuZStgq6QkPsD1eolNAO4= -github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pires/go-proxyproto v0.6.1 h1:EBupykFmo22SDjv4fQVQd2J9NOoLPmyZA/15ldOGkPw= -github.com/pires/go-proxyproto v0.6.1/go.mod h1:Odh9VFOZJCf9G8cLW5o435Xf1J95Jw9Gw5rnCjcwzAY= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= +github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= -github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -1626,152 +1994,138 @@ github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXq github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= -github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d h1:PinQItctnaL2LtkaSM678+ZLLy5TajwOeXzWvYC7tII= github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= -github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rboyer/safeio v0.2.1 h1:05xhhdRNAdS3apYm7JRjOqngf4xruaW959jmRxGDuSU= github.com/rboyer/safeio v0.2.1/go.mod h1:Cq/cEPK+YXFn622lsQ0K4KsPZSPtaptHHEldsy7Fmig= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 h1:Wdi9nwnhFNAlseAOekn6B5G/+GMtks9UKbvRU/CMM/o= github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03/go.mod h1:gRAiPF5C5Nd0eyyRdqIu9qTiFSoZzpTq727b5B8fkkU= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.4.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.2+incompatible h1:C89EOx/XBWwIXl8wm8OPJBd7kPF25UfsK2X7Ph/zCAk= +github.com/ryanuber/columnize v2.1.2+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/sasha-s/go-deadlock v0.2.0 h1:lMqc+fUb7RrFS3gQLtoQsJ7/6TV/pAIFvBsqX73DK8Y= github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/conswriter v0.0.0-20180208195008-f5ae3917a627/go.mod h1:7zjs06qF79/FKAJpBvFx3P8Ww4UTIMAe+lpNXDHziac= github.com/sean-/pager v0.0.0-20180208200047-666be9bf53b5/go.mod h1:BeybITEsBEg6qbIiqJ6/Bqeq25bCLbL7YFmpaFfJDuM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM= +github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sethvargo/go-limiter v0.7.1 h1:wWNhTj0pxjyJ7wuJHpRJpYwJn+bUnjYfw2a85eu5w9U= github.com/sethvargo/go-limiter v0.7.1/go.mod h1:C0kbSFbiriE5k2FFOe18M1YZbAR2Fiwf72uGu0CXCcU= github.com/shirou/gopsutil/v3 v3.22.6 h1:FnHOFOh+cYAM0C30P+zysPISzlknLC5Z1G4EAElznfQ= github.com/shirou/gopsutil/v3 v3.22.6/go.mod h1:EdIubSnZhbAvBS1yJ7Xi+AShB/hxwLHOMz4MCYz7yMs= +github.com/shoenig/test v1.7.0 h1:eWcHtTXa6QLnBvm0jgEabMRN/uJ4DMV3M8xUGgRkZmk= +github.com/shoenig/test v1.7.0/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= +github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/snowflakedb/gosnowflake v1.6.3 h1:EJDdDi74YbYt1ty164ge3fMZ0eVZ6KA7b1zmAa/wnRo= -github.com/snowflakedb/gosnowflake v1.6.3/go.mod h1:6hLajn6yxuJ4xUHZegMekpq9rnQbGJ7TMwXjgTmA6lg= +github.com/snowflakedb/gosnowflake v1.11.0 h1:qyqunGCVyq/Qyx40KQT+6sJ1CAGuuG2qv3WiCTLTctI= +github.com/snowflakedb/gosnowflake v1.11.0/go.mod h1:WFe+8mpsapDaQjHX6BqJBKtfQCGlGD3lHKeDsKfpx2A= github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d h1:bVQRCxQvfjNUeRqaY/uT0tFuvuFY0ulgnczuR684Xic= github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b h1:br+bPNZsJWKicw/5rALEo67QHs5weyD5tf8WST+4sJ0= github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sony/gobreaker v0.5.0 h1:dRCvqm0P490vZPmy7ppEk2qCnCieBooFJ+YoXGYB+yg= +github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/std-uritemplate/std-uritemplate/go v0.0.57 h1:GHGjptrsmazP4IVDlUprssiEf9ESVkbjx15xQXXzvq4= +github.com/std-uritemplate/std-uritemplate/go v0.0.57/go.mod h1:rG/bqh/ThY4xE5de7Rap3vaDkYUT76B0GPJ0loYeTTc= github.com/streadway/amqp v1.0.0 h1:kuuDrUJFZL1QYL9hUNuCxNObNzB0bV/ZG5jV3RWAQgo= github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1782,23 +2136,22 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tencentcloud/tencentcloud-sdk-go v1.0.162 h1:8fDzz4GuVg4skjY2B0nMN7h6uN61EDVkuLyI2+qGHhI= github.com/tencentcloud/tencentcloud-sdk-go v1.0.162/go.mod h1:asUz5BPXxgoPGaRgZaVm1iGcUAuHyYUo1nXqKa83cvI= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tilinna/clock v1.0.2 h1:6BO2tyAC9JbPExKH/z9zl44FLu1lImh3nDNKA0kgrkI= github.com/tilinna/clock v1.0.2/go.mod h1:ZsP7BcY7sEEz7ktc0IVy8Us6boDrK8VradlKRUGfOao= +github.com/tilinna/clock v1.1.0 h1:6IQQQCo6KoBxVudv6gwtY8o4eDfhHo8ojA5dP0MfhSs= +github.com/tilinna/clock v1.1.0/go.mod h1:ZsP7BcY7sEEz7ktc0IVy8Us6boDrK8VradlKRUGfOao= +github.com/tink-crypto/tink-go/v2 v2.2.0 h1:L2Da0F2Udh2agtKztdr69mV/KpnY3/lGTkMgLTVIXlA= +github.com/tink-crypto/tink-go/v2 v2.2.0/go.mod h1:JJ6PomeNPF3cJpfWC0lgyTES6zpJILkAX0cJNwlS3xU= github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= @@ -1808,51 +2161,28 @@ github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= -github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vmware/govmomi v0.18.0 h1:f7QxSmP7meCtoAmiKZogvVbLInT+CZx6Px6K5rYsJZo= github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2 h1:akYIkZ28e6A96dkWNJQu3nmCzH3YfwMPQExUYDaRv7w= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= -github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yhat/scrape v0.0.0-20161128144610-24b7890b0945/go.mod h1:4vRFPPNYllgCacoj+0FoKOjTW68rUhEfqPLiEJaK2w8= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1867,128 +2197,150 @@ github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLC github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= -github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY= +github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= +github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= -go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.0.0-20210928084031-3df272774672 h1:19vOZe7geDEympjWIVidGi6/psR5Y+aaKnF17PSpdXA= -go.etcd.io/etcd/client/pkg/v3 v3.0.0-20210928084031-3df272774672/go.mod h1:wSVAyLiSU4JOBlqGr29lZeKbllk31oCAXAdTa6MioWQ= -go.etcd.io/etcd/client/v2 v2.305.0 h1:ftQ0nOOHMcbMS3KIaDQ0g5Qcd6bhaBrQT6b89DfwLTs= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v3 v3.5.0 h1:62Eh0XOro+rDwkrypAGDfgmNh5Joq+z+W9HZdlXMzek= -go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.mongodb.org/atlas v0.13.0/go.mod h1:wVCnHcm/7/IfTjEB6K8K35PLG70yGz8BdkRwX0oK9/M= -go.mongodb.org/atlas v0.15.0 h1:YyOBdBIuI//krRITf4r7PSirJ3YDNNUfNmapxwSyDow= -go.mongodb.org/atlas v0.15.0/go.mod h1:lQhRHIxc6jQHEK3/q9WLu/SdBkPj2fQYhjLGUF6Z3U8= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.7.3 h1:G4l/eYY9VrQAK/AUgkV0koQKzQnyddnWxrd/Etf0jIs= -go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= -go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= +go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= +go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= +go.etcd.io/etcd/api/v3 v3.5.13 h1:8WXU2/NBge6AUF1K1gOexB6e07NgsN1hXK0rSTtgSp4= +go.etcd.io/etcd/api/v3 v3.5.13/go.mod h1:gBqlqkcMMZMVTMm4NDZloEVJzxQOQIls8splbqBDa0c= +go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= +go.etcd.io/etcd/client/pkg/v3 v3.5.13 h1:RVZSAnWWWiI5IrYAXjQorajncORbS0zI48LQlE2kQWg= +go.etcd.io/etcd/client/pkg/v3 v3.5.13/go.mod h1:XxHT4u1qU12E2+po+UVPrEeL94Um6zL58ppuJWXSAB8= +go.etcd.io/etcd/client/v2 v2.305.5 h1:DktRP60//JJpnPC0VBymAN/7V71GHMdjDCBt4ZPXDjI= +go.etcd.io/etcd/client/v2 v2.305.5/go.mod h1:zQjKllfqfBVyVStbt4FaosoX2iYd8fV/GRy/PbowgP4= +go.etcd.io/etcd/client/v3 v3.5.13 h1:o0fHTNJLeO0MyVbc7I3fsCf6nrOqn5d+diSarKnB2js= +go.etcd.io/etcd/client/v3 v3.5.13/go.mod h1:cqiAeY8b5DEEcpxvgWKsbLIWNM/8Wy2xJSDMtioMcoI= +go.mongodb.org/atlas v0.37.0 h1:zQnO1o5+bVP9IotpAYpres4UjMD2F4nwNEFTZhNL4ck= +go.mongodb.org/atlas v0.37.0/go.mod h1:DJYtM+vsEpPEMSkQzJnFHrT0sP7ev6cseZc/GGjJYG8= +go.mongodb.org/mongo-driver v1.16.1 h1:rIVLL3q0IHM39dvE+z2ulZLp9ENZKThVfuvN/IiN4l8= +go.mongodb.org/mongo-driver v1.16.1/go.mod h1:oB6AhJQvFQL4LEHyXi6aJzQJtBiTQHiAd83l0GdFaiw= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.11.2 h1:YBZcQlsVekzFsFbjygXMOXSs6pialIZxcjfO/mBDmR0= -go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI= -go.opentelemetry.io/otel/sdk v1.11.2 h1:GF4JoaEx7iihdMFu30sOyRx52HDHOkl9xQ8SMqNXUiU= -go.opentelemetry.io/otel/sdk v1.11.2/go.mod h1:wZ1WxImwpq+lVRo4vsmSOxdd+xwoUJ6rqyLc3SyX9aU= -go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0= -go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= +go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 h1:JAv0Jwtl01UFiyWZEMiJZBiTlv5A50zNs8lsthXqIio= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0/go.mod h1:QNKLmUEAq2QUbPQUfvw4fmv0bgbK7UlOSFCnXyfvSNc= +go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= +go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= +go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= +go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= +go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= +go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= -go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= -go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220313003712-b769efc7c000/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb h1:PaBZQdo+iSDyHT053FjUCgZQ/9uqVwPOcl7KSWhKn6w= -golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1999,7 +2351,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -2010,17 +2362,20 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I= -golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2028,19 +2383,16 @@ golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -2052,42 +2404,55 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2095,14 +2460,35 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2112,8 +2498,11 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2128,29 +2517,17 @@ golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2161,20 +2538,14 @@ golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2183,73 +2554,91 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211031064116-611d5d643895/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2262,48 +2651,47 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220411224347-583f2d630306 h1:+gHMid33q6pen7kv9xvT+JRinntgeXO2AeZVd0AWD3w= -golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2328,38 +2716,58 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= +golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 h1:LLhsEBxRTBLuKlQxFBYUOU8xyFgXv6cOTp2HASDlsDk= +golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= +gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -2377,26 +2785,62 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.109.0 h1:sW9hgHyX497PP5//NUM7nqfV8D0iDfBApqq7sOh1XR8= -google.golang.org/api v0.109.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/api v0.197.0 h1:x6CwqQLsFiA5JKAiGyGBjc2bNtHtLddhJCE2IKuhhcQ= +google.golang.org/api v0.197.0/go.mod h1:AuOuo20GoQ331nq7DquGHlU6d+2wN2fZ8O0ta60nRNw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -2405,7 +2849,6 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -2414,33 +2857,130 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U= -google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -2452,14 +2992,35 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE= +google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.66.1 h1:hO5qAXR19+/Z44hmvIM4dQFMSYX9XcWsByfoxutBpAM= +google.golang.org/grpc v1.66.1/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -2474,42 +3035,39 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= -gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/ory-am/dockertest.v3 v3.3.4 h1:oen8RiwxVNxtQ1pRoV4e4jqh6UjNsOuIZ1NXns6jdcw= gopkg.in/ory-am/dockertest.v3 v3.3.4/go.mod h1:s9mmoLkaGeAh97qygnNj4xWkiN7e1SKekYC6CovU+ek= gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= -gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -2521,20 +3079,14 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/gotestsum v1.9.0 h1:Jbo/0k/sIOXIJu51IZxEAt27n77xspFEfL6SqKUR72A= -gotest.tools/gotestsum v1.9.0/go.mod h1:6JHCiN6TEjA7Kaz23q1bH0e2Dc3YJjDUZ0DmctFZf+w= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo= -gotest.tools/v3 v3.3.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= +gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= +gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2542,78 +3094,77 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= -k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= -k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= -k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/api v0.25.3 h1:Q1v5UFfYe87vi5H7NU0p4RXC26PPMT8KOpr1TLQbCMQ= -k8s.io/api v0.25.3/go.mod h1:o42gKscFrEVjHdQnyRenACrMtbuJsVdP+WVjqejfzmI= +k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= +k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= -k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.25.3 h1:7o9ium4uyUOM76t6aunP0nZuex7gDf8VGwkR5RcJnQc= -k8s.io/apimachinery v0.25.3/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo= -k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= -k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= -k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= +k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= -k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= -k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= -k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/client-go v0.25.3 h1:oB4Dyl8d6UbfDHD8Bv8evKylzs3BXzzufLiO27xuPs0= -k8s.io/client-go v0.25.3/go.mod h1:t39LPczAIMwycjcXkVc+CB+PZV69jQuNx4um5ORDjQA= -k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= -k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= -k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= -k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= -k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= -k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= +k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= -k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= -k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -layeh.com/radius v0.0.0-20190322222518-890bc1058917 h1:BDXFaFzUt5EIqe/4wrTc4AcYZWP6iC6Ult+jQWLh5eU= -layeh.com/radius v0.0.0-20190322222518-890bc1058917/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ= -mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= -mvdan.cc/gofumpt v0.3.1 h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8= -mvdan.cc/gofumpt v0.3.1/go.mod h1:w3ymliuxvzVx8DAutBnVyDqYb1Niy/yCJt/lk821YCE= -nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= -nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +layeh.com/radius v0.0.0-20231213012653-1006025d24f8 h1:orYXpi6BJZdvgytfHH4ybOe4wHnLbbS71Cmd8mWdZjs= +layeh.com/radius v0.0.0-20231213012653-1006025d24f8/go.mod h1:QRf+8aRqXc019kHkpcs/CTgyWXFzf+bxlsyuo2nAl1o= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= +nhooyr.io/websocket v1.8.11 h1:f/qXNc2/3DpoSZkHt1DQu6rj4zGC8JmkkLkWss0MgN0= +nhooyr.io/websocket v1.8.11/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/helper/benchhelpers/benchhelpers.go b/helper/benchhelpers/benchhelpers.go deleted file mode 100644 index 06dcde604e61..000000000000 --- a/helper/benchhelpers/benchhelpers.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package benchhelpers - -import ( - "testing" - - testinginterface "github.com/mitchellh/go-testing-interface" -) - -type tbWrapper struct { - testing.TB -} - -func (b tbWrapper) Parallel() { - // no-op -} - -func TBtoT(tb testing.TB) testinginterface.T { - return tbWrapper{tb} -} diff --git a/helper/builtinplugins/builtinplugins_test.go b/helper/builtinplugins/builtinplugins_test.go new file mode 100644 index 000000000000..9587960436ed --- /dev/null +++ b/helper/builtinplugins/builtinplugins_test.go @@ -0,0 +1,150 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package builtinplugins + +import ( + "testing" + + logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/api" + logicalDb "github.com/hashicorp/vault/builtin/logical/database" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +// TestBuiltinPluginsWork exists to confirm that all the credential and secrets plugins in Registry can successfully be +// initialized. Database plugins are excluded as there is no general way to initialize them - they require +// plugin-specific configuration at the time of initialization. +// +// This detects coding errors which would cause the plugins to panic on initialization - various aspects of the +// configuration of a framework.Backend are checked during Backend.init(), which runs as a sync.Once function triggered +// upon first request. +// +// In this test, a help request is used to trigger that initialization, since it is valid for all plugins. +func TestBuiltinPluginsWork(t *testing.T) { + cluster := vault.NewTestCluster( + t, + &vault.CoreConfig{ + BuiltinRegistry: Registry, + LogicalBackends: map[string]logical.Factory{ + // This needs to be here for madly overcomplicated reasons, otherwise we end up mounting a KV v1 even + // when we try to explicitly mount a KV v2... + // + // vault.NewCore hardcodes "kv" to vault.PassthroughBackendFactory if no explicit entry is configured, + // and this hardcoding is re-overridden in command.logicalBackends to point back to the real KV plugin. + // As far as I can tell, nothing at all relies upon the definition of "kv" in builtinplugins.Registry, + // as it always gets resolved via the logicalBackends map and the pluginCatalog is never queried. + "kv": logicalKv.Factory, + // Semi-similarly, "database" is added in command.logicalBackends and not at all in + // builtinplugins.Registry, so we need to add it here to be able to test it! + "database": logicalDb.Factory, + }, + PendingRemovalMountsAllowed: true, + }, + &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: 1, + }, + ) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + for _, authType := range append( + Registry.Keys(consts.PluginTypeCredential), + "token", + ) { + deprecationStatus, _ := Registry.DeprecationStatus(authType, consts.PluginTypeCredential) + if deprecationStatus == consts.Removed { + continue + } + + t.Run("Auth Method "+authType, func(t *testing.T) { + // This builtin backend is automatically mounted and should not be mounted again + if authType != "token" { + if err := client.Sys().EnableAuthWithOptions(authType, &api.EnableAuthOptions{ + Type: authType, + }); err != nil { + t.Fatal(err) + } + } + + if _, err := client.Logical().ReadWithData( + "auth/"+authType, + map[string][]string{"help": {"1"}}, + ); err != nil { + t.Fatal(err) + } + }) + } + + for _, secretsType := range append( + Registry.Keys(consts.PluginTypeSecrets), + "database", + "cubbyhole", + "identity", + "sys", + ) { + deprecationStatus, _ := Registry.DeprecationStatus(secretsType, consts.PluginTypeSecrets) + if deprecationStatus == consts.Removed { + continue + } + + t.Run("Secrets Engine "+secretsType, func(t *testing.T) { + switch secretsType { + // These three builtin backends are automatically mounted and should not be mounted again + case "cubbyhole": + case "identity": + case "sys": + + default: + if err := client.Sys().Mount(secretsType, &api.MountInput{ + Type: secretsType, + }); err != nil { + t.Fatal(err) + } + } + + if _, err := client.Logical().ReadWithData( + secretsType, + map[string][]string{"help": {"1"}}, + ); err != nil { + t.Fatal(err) + } + }) + } + + t.Run("Secrets Engine kv v2", func(t *testing.T) { + if err := client.Sys().Mount("kv-v2", &api.MountInput{ + Type: "kv", + Options: map[string]string{ + "version": "2", + }, + }); err != nil { + t.Fatal(err) + } + + if _, err := client.Logical().ReadWithData( + "kv-v2", + map[string][]string{"help": {"1"}}, + ); err != nil { + t.Fatal(err) + } + }) + + // This last part is not strictly necessary for original purpose of this test (checking the plugins initialize + // without errors), but whilst we have a test Vault with one of everything mounted, let's also test that the full + // OpenAPI document generation succeeds too. + t.Run("Whole OpenAPI document", func(t *testing.T) { + if _, err := client.Logical().Read("sys/internal/specs/openapi"); err != nil { + t.Fatal(err) + } + }) +} diff --git a/helper/builtinplugins/registry.go b/helper/builtinplugins/registry.go index b4d3da5937f0..e5a6044e5f3c 100644 --- a/helper/builtinplugins/registry.go +++ b/helper/builtinplugins/registry.go @@ -1,60 +1,19 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package builtinplugins import ( "context" - credAliCloud "github.com/hashicorp/vault-plugin-auth-alicloud" - credAzure "github.com/hashicorp/vault-plugin-auth-azure" - credCentrify "github.com/hashicorp/vault-plugin-auth-centrify" - credCF "github.com/hashicorp/vault-plugin-auth-cf" - credGcp "github.com/hashicorp/vault-plugin-auth-gcp/plugin" credJWT "github.com/hashicorp/vault-plugin-auth-jwt" - credKerb "github.com/hashicorp/vault-plugin-auth-kerberos" - credKube "github.com/hashicorp/vault-plugin-auth-kubernetes" - credOCI "github.com/hashicorp/vault-plugin-auth-oci" - dbCouchbase "github.com/hashicorp/vault-plugin-database-couchbase" - dbElastic "github.com/hashicorp/vault-plugin-database-elasticsearch" - dbMongoAtlas "github.com/hashicorp/vault-plugin-database-mongodbatlas" - dbRedis "github.com/hashicorp/vault-plugin-database-redis" - dbRedisElastiCache "github.com/hashicorp/vault-plugin-database-redis-elasticache" - dbSnowflake "github.com/hashicorp/vault-plugin-database-snowflake" - logicalAd "github.com/hashicorp/vault-plugin-secrets-ad/plugin" - logicalAlicloud "github.com/hashicorp/vault-plugin-secrets-alicloud" - logicalAzure "github.com/hashicorp/vault-plugin-secrets-azure" - logicalGcp "github.com/hashicorp/vault-plugin-secrets-gcp/plugin" - logicalGcpKms "github.com/hashicorp/vault-plugin-secrets-gcpkms" - logicalKube "github.com/hashicorp/vault-plugin-secrets-kubernetes" logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" - logicalMongoAtlas "github.com/hashicorp/vault-plugin-secrets-mongodbatlas" - logicalLDAP "github.com/hashicorp/vault-plugin-secrets-openldap" - logicalTerraform "github.com/hashicorp/vault-plugin-secrets-terraform" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" - credAws "github.com/hashicorp/vault/builtin/credential/aws" credCert "github.com/hashicorp/vault/builtin/credential/cert" - credGitHub "github.com/hashicorp/vault/builtin/credential/github" - credLdap "github.com/hashicorp/vault/builtin/credential/ldap" - credOkta "github.com/hashicorp/vault/builtin/credential/okta" - credRadius "github.com/hashicorp/vault/builtin/credential/radius" credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" - logicalAws "github.com/hashicorp/vault/builtin/logical/aws" - logicalConsul "github.com/hashicorp/vault/builtin/logical/consul" - logicalNomad "github.com/hashicorp/vault/builtin/logical/nomad" logicalPki "github.com/hashicorp/vault/builtin/logical/pki" - logicalRabbit "github.com/hashicorp/vault/builtin/logical/rabbitmq" logicalSsh "github.com/hashicorp/vault/builtin/logical/ssh" - logicalTotp "github.com/hashicorp/vault/builtin/logical/totp" logicalTransit "github.com/hashicorp/vault/builtin/logical/transit" - dbCass "github.com/hashicorp/vault/plugins/database/cassandra" - dbHana "github.com/hashicorp/vault/plugins/database/hana" - dbInflux "github.com/hashicorp/vault/plugins/database/influxdb" - dbMongo "github.com/hashicorp/vault/plugins/database/mongodb" - dbMssql "github.com/hashicorp/vault/plugins/database/mssql" - dbMysql "github.com/hashicorp/vault/plugins/database/mysql" - dbPostgres "github.com/hashicorp/vault/plugins/database/postgresql" - dbRedshift "github.com/hashicorp/vault/plugins/database/redshift" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" @@ -64,8 +23,6 @@ import ( // Thus, rather than creating multiple instances of it, we only need one. var Registry = newRegistry() -var addExternalPlugins = addExtPluginsImpl - // BuiltinFactory is the func signature that should be returned by // the plugin's New() func. type BuiltinFactory func() (interface{}, error) @@ -96,107 +53,31 @@ func removedFactory(ctx context.Context, config *logical.BackendConfig) (logical return removedBackend, nil } -func newRegistry() *registry { - reg := ®istry{ +func newMinimalRegistry() *registry { + return ®istry{ credentialBackends: map[string]credentialBackend{ - "alicloud": {Factory: credAliCloud.Factory}, - "app-id": { - Factory: removedFactory, - DeprecationStatus: consts.Removed, - }, - "approle": {Factory: credAppRole.Factory}, - "aws": {Factory: credAws.Factory}, - "azure": {Factory: credAzure.Factory}, - "centrify": {Factory: credCentrify.Factory}, - "cert": {Factory: credCert.Factory}, - "cf": {Factory: credCF.Factory}, - "gcp": {Factory: credGcp.Factory}, - "github": {Factory: credGitHub.Factory}, - "jwt": {Factory: credJWT.Factory}, - "kerberos": {Factory: credKerb.Factory}, - "kubernetes": {Factory: credKube.Factory}, - "ldap": {Factory: credLdap.Factory}, - "oci": {Factory: credOCI.Factory}, - "oidc": {Factory: credJWT.Factory}, - "okta": {Factory: credOkta.Factory}, - "pcf": { - Factory: credCF.Factory, - DeprecationStatus: consts.Deprecated, - }, - "radius": {Factory: credRadius.Factory}, + "approle": {Factory: credAppRole.Factory}, + "cert": {Factory: credCert.Factory}, + "jwt": {Factory: credJWT.Factory}, + "oidc": {Factory: credJWT.Factory}, "userpass": {Factory: credUserpass.Factory}, }, - databasePlugins: map[string]databasePlugin{ - // These four plugins all use the same mysql implementation but with - // different username settings passed by the constructor. - "mysql-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultUserNameTemplate)}, - "mysql-aurora-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultLegacyUserNameTemplate)}, - "mysql-rds-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultLegacyUserNameTemplate)}, - "mysql-legacy-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultLegacyUserNameTemplate)}, - - "cassandra-database-plugin": {Factory: dbCass.New}, - "couchbase-database-plugin": {Factory: dbCouchbase.New}, - "elasticsearch-database-plugin": {Factory: dbElastic.New}, - "hana-database-plugin": {Factory: dbHana.New}, - "influxdb-database-plugin": {Factory: dbInflux.New}, - "mongodb-database-plugin": {Factory: dbMongo.New}, - "mongodbatlas-database-plugin": {Factory: dbMongoAtlas.New}, - "mssql-database-plugin": {Factory: dbMssql.New}, - "postgresql-database-plugin": {Factory: dbPostgres.New}, - "redshift-database-plugin": {Factory: dbRedshift.New}, - "redis-database-plugin": {Factory: dbRedis.New}, - "redis-elasticache-database-plugin": {Factory: dbRedisElastiCache.New}, - "snowflake-database-plugin": {Factory: dbSnowflake.New}, - }, + databasePlugins: map[string]databasePlugin{}, logicalBackends: map[string]logicalBackend{ - "ad": { - Factory: logicalAd.Factory, - DeprecationStatus: consts.Deprecated, - }, - "alicloud": {Factory: logicalAlicloud.Factory}, - "aws": {Factory: logicalAws.Factory}, - "azure": {Factory: logicalAzure.Factory}, - "cassandra": { - Factory: removedFactory, - DeprecationStatus: consts.Removed, - }, - "consul": {Factory: logicalConsul.Factory}, - "gcp": {Factory: logicalGcp.Factory}, - "gcpkms": {Factory: logicalGcpKms.Factory}, - "kubernetes": {Factory: logicalKube.Factory}, - "kv": {Factory: logicalKv.Factory}, - "mongodb": { - Factory: removedFactory, - DeprecationStatus: consts.Removed, - }, - // The mongodbatlas secrets engine is not the same as the database plugin equivalent - // (`mongodbatlas-database-plugin`), and thus will not be deprecated at this time. - "mongodbatlas": {Factory: logicalMongoAtlas.Factory}, - "mssql": { - Factory: removedFactory, - DeprecationStatus: consts.Removed, - }, - "mysql": { - Factory: removedFactory, - DeprecationStatus: consts.Removed, - }, - "nomad": {Factory: logicalNomad.Factory}, - "openldap": {Factory: logicalLDAP.Factory}, - "ldap": {Factory: logicalLDAP.Factory}, - "pki": {Factory: logicalPki.Factory}, - "postgresql": { - Factory: removedFactory, - DeprecationStatus: consts.Removed, - }, - "rabbitmq": {Factory: logicalRabbit.Factory}, - "ssh": {Factory: logicalSsh.Factory}, - "terraform": {Factory: logicalTerraform.Factory}, - "totp": {Factory: logicalTotp.Factory}, - "transit": {Factory: logicalTransit.Factory}, + "kv": {Factory: logicalKv.Factory}, + "pki": {Factory: logicalPki.Factory}, + "ssh": {Factory: logicalSsh.Factory}, + "transit": {Factory: logicalTransit.Factory}, }, } +} + +func newRegistry() *registry { + reg := newMinimalRegistry() + + extendAddonPlugins(reg) - addExternalPlugins(reg) + entAddExtPlugins(reg) return reg } diff --git a/helper/builtinplugins/registry_full.go b/helper/builtinplugins/registry_full.go new file mode 100644 index 000000000000..32bba4048796 --- /dev/null +++ b/helper/builtinplugins/registry_full.go @@ -0,0 +1,149 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !minimal + +package builtinplugins + +import ( + "maps" + + credAliCloud "github.com/hashicorp/vault-plugin-auth-alicloud" + credAzure "github.com/hashicorp/vault-plugin-auth-azure" + credCF "github.com/hashicorp/vault-plugin-auth-cf" + credGcp "github.com/hashicorp/vault-plugin-auth-gcp/plugin" + credKerb "github.com/hashicorp/vault-plugin-auth-kerberos" + credKube "github.com/hashicorp/vault-plugin-auth-kubernetes" + credOCI "github.com/hashicorp/vault-plugin-auth-oci" + dbCouchbase "github.com/hashicorp/vault-plugin-database-couchbase" + dbElastic "github.com/hashicorp/vault-plugin-database-elasticsearch" + dbMongoAtlas "github.com/hashicorp/vault-plugin-database-mongodbatlas" + dbRedis "github.com/hashicorp/vault-plugin-database-redis" + dbRedisElastiCache "github.com/hashicorp/vault-plugin-database-redis-elasticache" + dbSnowflake "github.com/hashicorp/vault-plugin-database-snowflake" + logicalAd "github.com/hashicorp/vault-plugin-secrets-ad/plugin" + logicalAlicloud "github.com/hashicorp/vault-plugin-secrets-alicloud" + logicalAzure "github.com/hashicorp/vault-plugin-secrets-azure" + logicalGcp "github.com/hashicorp/vault-plugin-secrets-gcp/plugin" + logicalGcpKms "github.com/hashicorp/vault-plugin-secrets-gcpkms" + logicalKube "github.com/hashicorp/vault-plugin-secrets-kubernetes" + logicalMongoAtlas "github.com/hashicorp/vault-plugin-secrets-mongodbatlas" + logicalLDAP "github.com/hashicorp/vault-plugin-secrets-openldap" + logicalTerraform "github.com/hashicorp/vault-plugin-secrets-terraform" + credAws "github.com/hashicorp/vault/builtin/credential/aws" + credGitHub "github.com/hashicorp/vault/builtin/credential/github" + credLdap "github.com/hashicorp/vault/builtin/credential/ldap" + credOkta "github.com/hashicorp/vault/builtin/credential/okta" + credRadius "github.com/hashicorp/vault/builtin/credential/radius" + logicalAws "github.com/hashicorp/vault/builtin/logical/aws" + logicalConsul "github.com/hashicorp/vault/builtin/logical/consul" + logicalNomad "github.com/hashicorp/vault/builtin/logical/nomad" + logicalRabbit "github.com/hashicorp/vault/builtin/logical/rabbitmq" + logicalTotp "github.com/hashicorp/vault/builtin/logical/totp" + dbCass "github.com/hashicorp/vault/plugins/database/cassandra" + dbHana "github.com/hashicorp/vault/plugins/database/hana" + dbInflux "github.com/hashicorp/vault/plugins/database/influxdb" + dbMongo "github.com/hashicorp/vault/plugins/database/mongodb" + dbMssql "github.com/hashicorp/vault/plugins/database/mssql" + dbMysql "github.com/hashicorp/vault/plugins/database/mysql" + dbPostgres "github.com/hashicorp/vault/plugins/database/postgresql" + dbRedshift "github.com/hashicorp/vault/plugins/database/redshift" + "github.com/hashicorp/vault/sdk/helper/consts" +) + +func newFullAddonRegistry() *registry { + return ®istry{ + credentialBackends: map[string]credentialBackend{ + "alicloud": {Factory: credAliCloud.Factory}, + "app-id": { + Factory: removedFactory, + DeprecationStatus: consts.Removed, + }, + "aws": {Factory: credAws.Factory}, + "azure": {Factory: credAzure.Factory}, + "cf": {Factory: credCF.Factory}, + "gcp": {Factory: credGcp.Factory}, + "github": {Factory: credGitHub.Factory}, + "kerberos": {Factory: credKerb.Factory}, + "kubernetes": {Factory: credKube.Factory}, + "ldap": {Factory: credLdap.Factory}, + "oci": {Factory: credOCI.Factory}, + "okta": {Factory: credOkta.Factory}, + "pcf": { + Factory: credCF.Factory, + DeprecationStatus: consts.Deprecated, + }, + "radius": {Factory: credRadius.Factory}, + }, + databasePlugins: map[string]databasePlugin{ + // These four plugins all use the same mysql implementation but with + // different username settings passed by the constructor. + "mysql-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultUserNameTemplate)}, + "mysql-aurora-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultLegacyUserNameTemplate)}, + "mysql-rds-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultLegacyUserNameTemplate)}, + "mysql-legacy-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultLegacyUserNameTemplate)}, + + "cassandra-database-plugin": {Factory: dbCass.New}, + "couchbase-database-plugin": {Factory: dbCouchbase.New}, + "elasticsearch-database-plugin": {Factory: dbElastic.New}, + "hana-database-plugin": {Factory: dbHana.New}, + "influxdb-database-plugin": {Factory: dbInflux.New}, + "mongodb-database-plugin": {Factory: dbMongo.New}, + "mongodbatlas-database-plugin": {Factory: dbMongoAtlas.New}, + "mssql-database-plugin": {Factory: dbMssql.New}, + "postgresql-database-plugin": {Factory: dbPostgres.New}, + "redshift-database-plugin": {Factory: dbRedshift.New}, + "redis-database-plugin": {Factory: dbRedis.New}, + "redis-elasticache-database-plugin": {Factory: dbRedisElastiCache.New}, + "snowflake-database-plugin": {Factory: dbSnowflake.New}, + }, + logicalBackends: map[string]logicalBackend{ + "ad": { + Factory: logicalAd.Factory, + DeprecationStatus: consts.Deprecated, + }, + "alicloud": {Factory: logicalAlicloud.Factory}, + "aws": {Factory: logicalAws.Factory}, + "azure": {Factory: logicalAzure.Factory}, + "cassandra": { + Factory: removedFactory, + DeprecationStatus: consts.Removed, + }, + "consul": {Factory: logicalConsul.Factory}, + "gcp": {Factory: logicalGcp.Factory}, + "gcpkms": {Factory: logicalGcpKms.Factory}, + "kubernetes": {Factory: logicalKube.Factory}, + "mongodb": { + Factory: removedFactory, + DeprecationStatus: consts.Removed, + }, + "mongodbatlas": {Factory: logicalMongoAtlas.Factory}, + "mssql": { + Factory: removedFactory, + DeprecationStatus: consts.Removed, + }, + "mysql": { + Factory: removedFactory, + DeprecationStatus: consts.Removed, + }, + "nomad": {Factory: logicalNomad.Factory}, + "openldap": {Factory: logicalLDAP.Factory}, + "ldap": {Factory: logicalLDAP.Factory}, + "postgresql": { + Factory: removedFactory, + DeprecationStatus: consts.Removed, + }, + "rabbitmq": {Factory: logicalRabbit.Factory}, + "terraform": {Factory: logicalTerraform.Factory}, + "totp": {Factory: logicalTotp.Factory}, + }, + } +} + +func extendAddonPlugins(reg *registry) { + addonReg := newFullAddonRegistry() + + maps.Copy(reg.credentialBackends, addonReg.credentialBackends) + maps.Copy(reg.databasePlugins, addonReg.databasePlugins) + maps.Copy(reg.logicalBackends, addonReg.logicalBackends) +} diff --git a/helper/builtinplugins/registry_full_test.go b/helper/builtinplugins/registry_full_test.go new file mode 100644 index 000000000000..23626daf525b --- /dev/null +++ b/helper/builtinplugins/registry_full_test.go @@ -0,0 +1,30 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise && !minimal + +package builtinplugins + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// Test_newRegistry tests that newRegistry() returns a registry with +// the expected minimal registry extended with full addon registry +func Test_newRegistry(t *testing.T) { + actual := newRegistry() + expMinimal := newMinimalRegistry() + expFullAddon := newFullAddonRegistry() + + require.Equal(t, len(expMinimal.credentialBackends)+len(expFullAddon.credentialBackends), len(actual.credentialBackends), + "newRegistry() total auth backends mismatch total of minimal and full addon registries") + require.Equal(t, len(expMinimal.databasePlugins)+len(expFullAddon.databasePlugins), len(actual.databasePlugins), + "newRegistry() total database plugins mismatch total of minimal and full addon registries") + require.Equal(t, len(expMinimal.logicalBackends)+len(expFullAddon.logicalBackends), len(actual.logicalBackends), + "newRegistry() total logical backends mismatch total of minimal and full addon registries") + + assertRegistrySubset(t, actual, expMinimal, "common") + assertRegistrySubset(t, actual, expFullAddon, "full addon") +} diff --git a/helper/builtinplugins/registry_min.go b/helper/builtinplugins/registry_min.go new file mode 100644 index 000000000000..75b281f631b7 --- /dev/null +++ b/helper/builtinplugins/registry_min.go @@ -0,0 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build minimal + +package builtinplugins + +func extendAddonPlugins(_ *registry) { + // No-op +} diff --git a/helper/builtinplugins/registry_stubs_oss.go b/helper/builtinplugins/registry_stubs_oss.go new file mode 100644 index 000000000000..fa0dab8b6656 --- /dev/null +++ b/helper/builtinplugins/registry_stubs_oss.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package builtinplugins + +//go:generate go run github.com/hashicorp/vault/tools/stubmaker + +func entAddExtPlugins(r *registry) { +} diff --git a/helper/builtinplugins/registry_test.go b/helper/builtinplugins/registry_test.go index 3b6514e9ad2f..dda57921abf6 100644 --- a/helper/builtinplugins/registry_test.go +++ b/helper/builtinplugins/registry_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package builtinplugins @@ -12,9 +12,9 @@ import ( "testing" credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" + "github.com/hashicorp/vault/helper/constants" dbMysql "github.com/hashicorp/vault/plugins/database/mysql" "github.com/hashicorp/vault/sdk/helper/consts" - "golang.org/x/exp/slices" ) @@ -87,6 +87,7 @@ func Test_RegistryKeyCounts(t *testing.T) { name string pluginType consts.PluginType want int // use slice length as test condition + entWant int wantOk bool }{ { @@ -97,7 +98,8 @@ func Test_RegistryKeyCounts(t *testing.T) { { name: "number of auth plugins", pluginType: consts.PluginTypeCredential, - want: 19, + want: 18, + entWant: 1, }, { name: "number of database plugins", @@ -108,13 +110,18 @@ func Test_RegistryKeyCounts(t *testing.T) { name: "number of secrets plugins", pluginType: consts.PluginTypeSecrets, want: 19, + entWant: 3, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { keys := Registry.Keys(tt.pluginType) - if len(keys) != tt.want { - t.Fatalf("got size: %d, want size: %d", len(keys), tt.want) + want := tt.want + if constants.IsEnterprise { + want += tt.entWant + } + if len(keys) != want { + t.Fatalf("got size: %d, want size: %d", len(keys), want) } }) } @@ -240,12 +247,20 @@ func Test_RegistryMatchesGenOpenapi(t *testing.T) { } defer f.Close() + // This is a hack: the gen_openapi script contains a conditional block to + // enable the enterprise plugins, whose lines are indented. Tweak the + // regexp to only include the indented lines on enterprise. + leading := "^" + if constants.IsEnterprise { + leading = "^ *" + } + var ( credentialBackends []string - credentialBackendsRe = regexp.MustCompile(`^vault auth enable (?:"([a-zA-Z]+)"|([a-zA-Z]+))$`) + credentialBackendsRe = regexp.MustCompile(leading + `vault auth enable (?:-.+ )*(?:"([a-zA-Z]+)"|([a-zA-Z]+))$`) secretsBackends []string - secretsBackendsRe = regexp.MustCompile(`^vault secrets enable (?:"([a-zA-Z]+)"|([a-zA-Z]+))$`) + secretsBackendsRe = regexp.MustCompile(leading + `vault secrets enable (?:-.+ )*(?:"([a-zA-Z]+)"|([a-zA-Z]+))$`) ) scanner := bufio.NewScanner(f) @@ -280,25 +295,29 @@ func Test_RegistryMatchesGenOpenapi(t *testing.T) { deprecationStatus, ok := Registry.DeprecationStatus(name, pluginType) if !ok { - t.Fatalf("%q %s backend is missing from registry.go; please remove it from gen_openapi.sh", name, pluginType) + t.Errorf("%q %s backend is missing from registry.go; please remove it from gen_openapi.sh", name, pluginType) } if deprecationStatus == consts.Removed { - t.Fatalf("%q %s backend is marked 'removed' in registry.go; please remove it from gen_openapi.sh", name, pluginType) + t.Errorf("%q %s backend is marked 'removed' in registry.go; please remove it from gen_openapi.sh", name, pluginType) } } - // ensureInScript ensures that the given plugin name in in gen_openapi.sh script + // ensureInScript ensures that the given plugin name is in gen_openapi.sh script ensureInScript := func(t *testing.T, scriptBackends []string, name string) { t.Helper() - // "openldap" is an alias for "ldap" secrets engine - if name == "openldap" { - return + for _, excluded := range []string{ + "oidc", // alias for "jwt" + "openldap", // alias for "ldap" + } { + if name == excluded { + return + } } if !slices.Contains(scriptBackends, name) { - t.Fatalf("%q backend could not be found in gen_openapi.sh, please add it there", name) + t.Errorf("%q backend could not be found in gen_openapi.sh, please add it there", name) } } @@ -308,19 +327,23 @@ func Test_RegistryMatchesGenOpenapi(t *testing.T) { t.Fatal(err) } - for _, b := range scriptCredentialBackends { - ensureInRegistry(t, b, consts.PluginTypeCredential) + for _, name := range scriptCredentialBackends { + ensureInRegistry(t, name, consts.PluginTypeCredential) } - for _, b := range scriptSecretsBackends { - ensureInRegistry(t, b, consts.PluginTypeSecrets) + for _, name := range scriptSecretsBackends { + ensureInRegistry(t, name, consts.PluginTypeSecrets) } - for _, b := range Registry.Keys(consts.PluginTypeCredential) { - ensureInScript(t, scriptCredentialBackends, b) + for name, backend := range Registry.credentialBackends { + if backend.DeprecationStatus == consts.Supported { + ensureInScript(t, scriptCredentialBackends, name) + } } - for _, b := range Registry.Keys(consts.PluginTypeSecrets) { - ensureInScript(t, scriptSecretsBackends, b) + for name, backend := range Registry.logicalBackends { + if backend.DeprecationStatus == consts.Supported { + ensureInScript(t, scriptSecretsBackends, name) + } } } diff --git a/helper/builtinplugins/registry_testing_util.go b/helper/builtinplugins/registry_testing_util.go new file mode 100644 index 000000000000..78d0f938670b --- /dev/null +++ b/helper/builtinplugins/registry_testing_util.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package builtinplugins + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func assertRegistrySubset(t *testing.T, r, subset *registry, subsetName string) { + t.Helper() + + for k := range subset.credentialBackends { + require.Contains(t, r.credentialBackends, k, fmt.Sprintf("expected to contain %s auth backend", subsetName)) + } + + for k := range subset.databasePlugins { + require.Contains(t, r.databasePlugins, k, fmt.Sprintf("expected to contain %s database plugin", subsetName)) + } + + for k := range subset.logicalBackends { + require.Contains(t, r.logicalBackends, k, fmt.Sprintf("expected to contain %s logical backend", subsetName)) + } +} diff --git a/helper/builtinplugins/registry_util.go b/helper/builtinplugins/registry_util.go new file mode 100644 index 000000000000..257bc855628d --- /dev/null +++ b/helper/builtinplugins/registry_util.go @@ -0,0 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package builtinplugins + +import "github.com/hashicorp/vault/sdk/helper/consts" + +// IsBuiltinEntPlugin checks whether the plugin is an enterprise only builtin plugin +func (r *registry) IsBuiltinEntPlugin(name string, pluginType consts.PluginType) bool { + return false +} diff --git a/helper/constants/constants_oss.go b/helper/constants/constants_oss.go index 8675f7030658..820fd2914bb4 100644 --- a/helper/constants/constants_oss.go +++ b/helper/constants/constants_oss.go @@ -1,8 +1,8 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !enterprise package constants -var IsEnterprise = false +const IsEnterprise = false diff --git a/helper/constants/fips.go b/helper/constants/fips.go index 9632d014a549..f5ecc66ce47e 100644 --- a/helper/constants/fips.go +++ b/helper/constants/fips.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !fips diff --git a/helper/constants/fips_build_check.go b/helper/constants/fips_build_check.go index 10e07e583f98..cb2bf5edc836 100644 --- a/helper/constants/fips_build_check.go +++ b/helper/constants/fips_build_check.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build (!fips && (fips_140_2 || fips_140_3)) || (fips && !fips_140_2 && !fips_140_3) || (fips_140_2 && fips_140_3) diff --git a/helper/constants/fips_cgo_check.go b/helper/constants/fips_cgo_check.go index 6de7d9f0d031..409b713afdc9 100644 --- a/helper/constants/fips_cgo_check.go +++ b/helper/constants/fips_cgo_check.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build (fips || fips_140_2 || fips_140_3) && !cgo diff --git a/helper/dhutil/dhutil.go b/helper/dhutil/dhutil.go index 97552d4cc4c6..7c14cb29fbca 100644 --- a/helper/dhutil/dhutil.go +++ b/helper/dhutil/dhutil.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package dhutil @@ -13,9 +13,8 @@ import ( "fmt" "io" - "golang.org/x/crypto/hkdf" - "golang.org/x/crypto/curve25519" + "golang.org/x/crypto/hkdf" ) type PublicKeyInfo struct { diff --git a/helper/dhutil/dhutil_test.go b/helper/dhutil/dhutil_test.go index 4b94f601d92d..18cd2c064f8e 100644 --- a/helper/dhutil/dhutil_test.go +++ b/helper/dhutil/dhutil_test.go @@ -1,4 +1,4 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package dhutil diff --git a/helper/experiments/experiments.go b/helper/experiments/experiments.go index 538430e64ccc..7bbe8b6af1eb 100644 --- a/helper/experiments/experiments.go +++ b/helper/experiments/experiments.go @@ -1,19 +1,37 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package experiments -const VaultExperimentEventsAlpha1 = "events.alpha1" +import "slices" + +const ( + VaultExperimentCoreAuditEventsAlpha1 = "core.audit.events.alpha1" + VaultExperimentSecretsImport = "secrets.import.alpha1" + + // Unused experiments. We keep them so that we don't break users who include them in their + // flags or configs, but they no longer have any effect. + VaultExperimentEventsAlpha1 = "events.alpha1" +) var validExperiments = []string{ VaultExperimentEventsAlpha1, + VaultExperimentCoreAuditEventsAlpha1, + VaultExperimentSecretsImport, } -// ValidExperiments exposes the list without exposing a mutable global variable. -// Experiments can only be enabled when starting a server, and will typically -// enable pre-GA API functionality. +var unusedExperiments = []string{ + VaultExperimentEventsAlpha1, +} + +// ValidExperiments exposes the list of valid experiments without exposing a mutable +// global variable. Experiments can only be enabled when starting a server, and will +// typically enable pre-GA API functionality. func ValidExperiments() []string { - result := make([]string, len(validExperiments)) - copy(result, validExperiments) - return result + return slices.Clone(validExperiments) +} + +// IsUnused returns true if the given experiment is in the unused list. +func IsUnused(experiment string) bool { + return slices.Contains(unusedExperiments, experiment) } diff --git a/helper/fairshare/fairshare_testing_util.go b/helper/fairshare/fairshare_testing_util.go index 8061795947d8..5aae025f5e92 100644 --- a/helper/fairshare/fairshare_testing_util.go +++ b/helper/fairshare/fairshare_testing_util.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package fairshare diff --git a/helper/fairshare/jobmanager.go b/helper/fairshare/jobmanager.go index dc9a6198af25..73a115644be1 100644 --- a/helper/fairshare/jobmanager.go +++ b/helper/fairshare/jobmanager.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package fairshare @@ -125,7 +125,7 @@ func (j *JobManager) AddJob(job Job, queueID string) { } } -// GetCurrentJobCount returns the total number of pending jobs in the job manager +// GetPendingJobCount returns the total number of pending jobs in the job manager func (j *JobManager) GetPendingJobCount() int { j.l.RLock() defer j.l.RUnlock() @@ -142,7 +142,12 @@ func (j *JobManager) GetPendingJobCount() int { func (j *JobManager) GetWorkerCounts() map[string]int { j.l.RLock() defer j.l.RUnlock() - return j.workerCount + workerCounts := make(map[string]int, len(j.workerCount)) + for k, v := range j.workerCount { + workerCounts[k] = v + } + + return workerCounts } // GetWorkQueueLengths() returns a map of queue ID to number of jobs in the queue diff --git a/helper/fairshare/jobmanager_test.go b/helper/fairshare/jobmanager_test.go index 3d6638a4a766..288f0d2f949a 100644 --- a/helper/fairshare/jobmanager_test.go +++ b/helper/fairshare/jobmanager_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package fairshare @@ -747,3 +747,23 @@ func TestFairshare_queueWorkersSaturated(t *testing.T) { j.l.RUnlock() } } + +func TestJobManager_GetWorkerCounts_RaceCondition(t *testing.T) { + j := NewJobManager("test-job-mgr", 20, nil, nil) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for i := 0; i < 10; i++ { + j.incrementWorkerCount("a") + } + }() + wcs := j.GetWorkerCounts() + wcs["foo"] = 10 + for worker, count := range wcs { + _ = worker + _ = count + } + + wg.Wait() +} diff --git a/helper/fairshare/workerpool.go b/helper/fairshare/workerpool.go index e655a9084dd6..ef6ee3ad7b5f 100644 --- a/helper/fairshare/workerpool.go +++ b/helper/fairshare/workerpool.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package fairshare diff --git a/helper/fairshare/workerpool_test.go b/helper/fairshare/workerpool_test.go index eb563140374b..d347c6734662 100644 --- a/helper/fairshare/workerpool_test.go +++ b/helper/fairshare/workerpool_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package fairshare diff --git a/helper/flag-kv/flag.go b/helper/flag-kv/flag.go index a3b04cec11ae..f09332c3ff2c 100644 --- a/helper/flag-kv/flag.go +++ b/helper/flag-kv/flag.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package kvFlag diff --git a/helper/flag-kv/flag_test.go b/helper/flag-kv/flag_test.go index b083d52e74e3..91a344403975 100644 --- a/helper/flag-kv/flag_test.go +++ b/helper/flag-kv/flag_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package kvFlag diff --git a/helper/flag-slice/flag.go b/helper/flag-slice/flag.go index b8234385ef03..1824fc8477f2 100644 --- a/helper/flag-slice/flag.go +++ b/helper/flag-slice/flag.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package sliceflag diff --git a/helper/flag-slice/flag_test.go b/helper/flag-slice/flag_test.go index 7973d57926a2..6662446df7d7 100644 --- a/helper/flag-slice/flag_test.go +++ b/helper/flag-slice/flag_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package sliceflag diff --git a/helper/forwarding/types.pb.go b/helper/forwarding/types.pb.go index bf579d0382c2..66b6decb991a 100644 --- a/helper/forwarding/types.pb.go +++ b/helper/forwarding/types.pb.go @@ -1,10 +1,10 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc (unknown) // source: helper/forwarding/types.proto package forwarding @@ -422,7 +422,7 @@ func file_helper_forwarding_types_proto_rawDescGZIP() []byte { } var file_helper_forwarding_types_proto_msgTypes = make([]protoimpl.MessageInfo, 6) -var file_helper_forwarding_types_proto_goTypes = []interface{}{ +var file_helper_forwarding_types_proto_goTypes = []any{ (*Request)(nil), // 0: forwarding.Request (*URL)(nil), // 1: forwarding.URL (*HeaderEntry)(nil), // 2: forwarding.HeaderEntry @@ -449,7 +449,7 @@ func file_helper_forwarding_types_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_helper_forwarding_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_helper_forwarding_types_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Request); i { case 0: return &v.state @@ -461,7 +461,7 @@ func file_helper_forwarding_types_proto_init() { return nil } } - file_helper_forwarding_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_helper_forwarding_types_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*URL); i { case 0: return &v.state @@ -473,7 +473,7 @@ func file_helper_forwarding_types_proto_init() { return nil } } - file_helper_forwarding_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_helper_forwarding_types_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*HeaderEntry); i { case 0: return &v.state @@ -485,7 +485,7 @@ func file_helper_forwarding_types_proto_init() { return nil } } - file_helper_forwarding_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_helper_forwarding_types_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Response); i { case 0: return &v.state diff --git a/helper/forwarding/types.proto b/helper/forwarding/types.proto index 7624257919de..9dfce583c21c 100644 --- a/helper/forwarding/types.proto +++ b/helper/forwarding/types.proto @@ -1,52 +1,52 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 syntax = "proto3"; -option go_package = "github.com/hashicorp/vault/helper/forwarding"; - package forwarding; +option go_package = "github.com/hashicorp/vault/helper/forwarding"; + message Request { - // Not used right now but reserving in case it turns out that streaming - // makes things more economical on the gRPC side - //uint64 id = 1; - string method = 2; - URL url = 3; - map header_entries = 4; - bytes body = 5; - string host = 6; - string remote_addr = 7; - repeated bytes peer_certificates = 8; + // Not used right now but reserving in case it turns out that streaming + // makes things more economical on the gRPC side + //uint64 id = 1; + string method = 2; + URL url = 3; + map header_entries = 4; + bytes body = 5; + string host = 6; + string remote_addr = 7; + repeated bytes peer_certificates = 8; } message URL { - string scheme = 1; - string opaque = 2; - // This isn't needed now but might be in the future, so we'll skip the - // number to keep the ordering in net/url - //UserInfo user = 3; - string host = 4; - string path = 5; - string raw_path = 6; - // This also isn't needed right now, but we'll reserve the number - //bool force_query = 7; - string raw_query = 8; - string fragment = 9; + string scheme = 1; + string opaque = 2; + // This isn't needed now but might be in the future, so we'll skip the + // number to keep the ordering in net/url + //UserInfo user = 3; + string host = 4; + string path = 5; + string raw_path = 6; + // This also isn't needed right now, but we'll reserve the number + //bool force_query = 7; + string raw_query = 8; + string fragment = 9; } message HeaderEntry { - repeated string values = 1; + repeated string values = 1; } message Response { - // Not used right now but reserving in case it turns out that streaming - // makes things more economical on the gRPC side - //uint64 id = 1; - uint32 status_code = 2; - bytes body = 3; - // Added in 0.6.2 to ensure that the content-type is set appropriately, as - // well as any other information - map header_entries = 4; - uint64 last_remote_wal = 5; + // Not used right now but reserving in case it turns out that streaming + // makes things more economical on the gRPC side + //uint64 id = 1; + uint32 status_code = 2; + bytes body = 3; + // Added in 0.6.2 to ensure that the content-type is set appropriately, as + // well as any other information + map header_entries = 4; + uint64 last_remote_wal = 5; } diff --git a/helper/forwarding/util.go b/helper/forwarding/util.go index 836123543222..b07bbada328b 100644 --- a/helper/forwarding/util.go +++ b/helper/forwarding/util.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package forwarding @@ -7,9 +7,7 @@ import ( "bytes" "crypto/tls" "crypto/x509" - "errors" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -28,8 +26,8 @@ func (b bufCloser) Close() error { return nil } -// GenerateForwardedRequest generates a new http.Request that contains the -// original requests's information in the new request's body. +// GenerateForwardedHTTPRequest generates a new http.Request that contains the +// original request's information in the new request's body. func GenerateForwardedHTTPRequest(req *http.Request, addr string) (*http.Request, error) { fq, err := GenerateForwardedRequest(req) if err != nil { @@ -63,19 +61,7 @@ func GenerateForwardedHTTPRequest(req *http.Request, addr string) (*http.Request func GenerateForwardedRequest(req *http.Request) (*Request, error) { var reader io.Reader = req.Body - ctx := req.Context() - maxRequestSize := ctx.Value("max_request_size") - if maxRequestSize != nil { - max, ok := maxRequestSize.(int64) - if !ok { - return nil, errors.New("could not parse max_request_size from request context") - } - if max > 0 { - reader = io.LimitReader(req.Body, max) - } - } - - body, err := ioutil.ReadAll(reader) + body, err := io.ReadAll(reader) if err != nil { return nil, err } @@ -115,7 +101,7 @@ func GenerateForwardedRequest(req *http.Request) (*Request, error) { return &fq, nil } -// ParseForwardedRequest generates a new http.Request that is comprised of the +// ParseForwardedHTTPRequest generates a new http.Request that is comprised of the // values in the given request's body, assuming it correctly parses into a // ForwardedRequest. func ParseForwardedHTTPRequest(req *http.Request) (*http.Request, error) { diff --git a/helper/forwarding/util_test.go b/helper/forwarding/util_test.go index 192646a15f4d..0bf4be76945e 100644 --- a/helper/forwarding/util_test.go +++ b/helper/forwarding/util_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package forwarding diff --git a/helper/hostutil/hostinfo.go b/helper/hostutil/hostinfo.go index 25c11e0b0753..e892ae8a6292 100644 --- a/helper/hostutil/hostinfo.go +++ b/helper/hostutil/hostinfo.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !openbsd diff --git a/helper/hostutil/hostinfo_error.go b/helper/hostutil/hostinfo_error.go index afbec28fa262..2cce22cb7d52 100644 --- a/helper/hostutil/hostinfo_error.go +++ b/helper/hostutil/hostinfo_error.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package hostutil diff --git a/helper/hostutil/hostinfo_openbsd.go b/helper/hostutil/hostinfo_openbsd.go index dbe1655e90dc..a73bb2df16f1 100644 --- a/helper/hostutil/hostinfo_openbsd.go +++ b/helper/hostutil/hostinfo_openbsd.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build openbsd diff --git a/helper/hostutil/hostinfo_test.go b/helper/hostutil/hostinfo_test.go index 0f53744adc1e..6862cacf790d 100644 --- a/helper/hostutil/hostinfo_test.go +++ b/helper/hostutil/hostinfo_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package hostutil diff --git a/helper/identity/identity.go b/helper/identity/identity.go index a7769f08e018..2d625c4bcc6c 100644 --- a/helper/identity/identity.go +++ b/helper/identity/identity.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package identity diff --git a/helper/identity/mfa/mfa.go b/helper/identity/mfa/mfa.go index 1f8af4f4c20b..a040563b502b 100644 --- a/helper/identity/mfa/mfa.go +++ b/helper/identity/mfa/mfa.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mfa diff --git a/helper/identity/mfa/sentinel.go b/helper/identity/mfa/sentinel.go index a587aa70b699..02bc857ceb03 100644 --- a/helper/identity/mfa/sentinel.go +++ b/helper/identity/mfa/sentinel.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mfa diff --git a/helper/identity/mfa/types.pb.go b/helper/identity/mfa/types.pb.go index 57dbab0da60c..55c992fc8a09 100644 --- a/helper/identity/mfa/types.pb.go +++ b/helper/identity/mfa/types.pb.go @@ -1,10 +1,10 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc (unknown) // source: helper/identity/mfa/types.proto package mfa @@ -1024,7 +1024,7 @@ func file_helper_identity_mfa_types_proto_rawDescGZIP() []byte { } var file_helper_identity_mfa_types_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_helper_identity_mfa_types_proto_goTypes = []interface{}{ +var file_helper_identity_mfa_types_proto_goTypes = []any{ (*Config)(nil), // 0: mfa.Config (*TOTPConfig)(nil), // 1: mfa.TOTPConfig (*DuoConfig)(nil), // 2: mfa.DuoConfig @@ -1053,7 +1053,7 @@ func file_helper_identity_mfa_types_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_helper_identity_mfa_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_helper_identity_mfa_types_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Config); i { case 0: return &v.state @@ -1065,7 +1065,7 @@ func file_helper_identity_mfa_types_proto_init() { return nil } } - file_helper_identity_mfa_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_helper_identity_mfa_types_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*TOTPConfig); i { case 0: return &v.state @@ -1077,7 +1077,7 @@ func file_helper_identity_mfa_types_proto_init() { return nil } } - file_helper_identity_mfa_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_helper_identity_mfa_types_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*DuoConfig); i { case 0: return &v.state @@ -1089,7 +1089,7 @@ func file_helper_identity_mfa_types_proto_init() { return nil } } - file_helper_identity_mfa_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_helper_identity_mfa_types_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*OktaConfig); i { case 0: return &v.state @@ -1101,7 +1101,7 @@ func file_helper_identity_mfa_types_proto_init() { return nil } } - file_helper_identity_mfa_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_helper_identity_mfa_types_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*PingIDConfig); i { case 0: return &v.state @@ -1113,7 +1113,7 @@ func file_helper_identity_mfa_types_proto_init() { return nil } } - file_helper_identity_mfa_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_helper_identity_mfa_types_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*Secret); i { case 0: return &v.state @@ -1125,7 +1125,7 @@ func file_helper_identity_mfa_types_proto_init() { return nil } } - file_helper_identity_mfa_types_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_helper_identity_mfa_types_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*TOTPSecret); i { case 0: return &v.state @@ -1137,7 +1137,7 @@ func file_helper_identity_mfa_types_proto_init() { return nil } } - file_helper_identity_mfa_types_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_helper_identity_mfa_types_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*MFAEnforcementConfig); i { case 0: return &v.state @@ -1150,13 +1150,13 @@ func file_helper_identity_mfa_types_proto_init() { } } } - file_helper_identity_mfa_types_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_helper_identity_mfa_types_proto_msgTypes[0].OneofWrappers = []any{ (*Config_TOTPConfig)(nil), (*Config_OktaConfig)(nil), (*Config_DuoConfig)(nil), (*Config_PingIDConfig)(nil), } - file_helper_identity_mfa_types_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_helper_identity_mfa_types_proto_msgTypes[5].OneofWrappers = []any{ (*Secret_TOTPSecret)(nil), } type x struct{} diff --git a/helper/identity/mfa/types.proto b/helper/identity/mfa/types.proto index 65eb853be246..f125a3d2f5f7 100644 --- a/helper/identity/mfa/types.proto +++ b/helper/identity/mfa/types.proto @@ -1,36 +1,36 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 syntax = "proto3"; -option go_package = "github.com/hashicorp/vault/helper/identity/mfa"; - package mfa; +option go_package = "github.com/hashicorp/vault/helper/identity/mfa"; + // Config represents the configuration information used *along with* the MFA // secret tied to caller's identity, to verify the MFA credentials supplied. // Configuration information differs by type. Handler of each type should know // what to expect from the Config field. message Config { - // @inject_tag: sentinel:"-" - string type = 1; - // @inject_tag: sentinel:"-" - string name = 2; - // @inject_tag: sentinel:"-" - string id = 3; - // @inject_tag: sentinel:"-" - string mount_accessor = 4; - // @inject_tag: sentinel:"-" - string username_format = 5; - // @inject_tag: sentinel:"-" - oneof config { - TOTPConfig totp_config = 6; - OktaConfig okta_config = 7; - DuoConfig duo_config = 8; - PingIDConfig pingid_config = 9; - } - // @inject_tag: sentinel:"-" - string namespace_id = 10; + // @inject_tag: sentinel:"-" + string type = 1; + // @inject_tag: sentinel:"-" + string name = 2; + // @inject_tag: sentinel:"-" + string id = 3; + // @inject_tag: sentinel:"-" + string mount_accessor = 4; + // @inject_tag: sentinel:"-" + string username_format = 5; + // @inject_tag: sentinel:"-" + oneof config { + TOTPConfig totp_config = 6; + OktaConfig okta_config = 7; + DuoConfig duo_config = 8; + PingIDConfig pingid_config = 9; + } + // @inject_tag: sentinel:"-" + string namespace_id = 10; } // TOTPConfig represents the configuration information required to generate @@ -39,115 +39,115 @@ message Config { // by the information stored in the entity and not from the values in the // configuration. message TOTPConfig { - // @inject_tag: sentinel:"-" - string issuer = 1; - // @inject_tag: sentinel:"-" - uint32 period = 2; - // @inject_tag: sentinel:"-" - int32 algorithm = 3; - // @inject_tag: sentinel:"-" - int32 digits = 4; - // @inject_tag: sentinel:"-" - uint32 skew = 5; - // @inject_tag: sentinel:"-" - uint32 key_size = 6; - // @inject_tag: sentinel:"-" - int32 qr_size = 7; - // @inject_tag: sentinel:"-" - uint32 max_validation_attempts = 8; + // @inject_tag: sentinel:"-" + string issuer = 1; + // @inject_tag: sentinel:"-" + uint32 period = 2; + // @inject_tag: sentinel:"-" + int32 algorithm = 3; + // @inject_tag: sentinel:"-" + int32 digits = 4; + // @inject_tag: sentinel:"-" + uint32 skew = 5; + // @inject_tag: sentinel:"-" + uint32 key_size = 6; + // @inject_tag: sentinel:"-" + int32 qr_size = 7; + // @inject_tag: sentinel:"-" + uint32 max_validation_attempts = 8; } // DuoConfig represents the configuration information required to perform // Duo authentication. message DuoConfig { - // @inject_tag: sentinel:"-" - string integration_key = 1; - // @inject_tag: sentinel:"-" - string secret_key = 2; - // @inject_tag: sentinel:"-" - string api_hostname = 3; - // @inject_tag: sentinel:"-" - string push_info = 4; - // @inject_tag: sentinel:"-" - bool use_passcode = 5; + // @inject_tag: sentinel:"-" + string integration_key = 1; + // @inject_tag: sentinel:"-" + string secret_key = 2; + // @inject_tag: sentinel:"-" + string api_hostname = 3; + // @inject_tag: sentinel:"-" + string push_info = 4; + // @inject_tag: sentinel:"-" + bool use_passcode = 5; } // OktaConfig contains Okta configuration parameters required to perform Okta // authentication. message OktaConfig { - // @inject_tag: sentinel:"-" - string org_name = 1; - // @inject_tag: sentinel:"-" - string api_token = 2; - // @inject_tag: sentinel:"-" - bool production = 3; - // @inject_tag: sentinel:"-" - string base_url = 4; - // @inject_tag: sentinel:"-" - bool primary_email = 5; + // @inject_tag: sentinel:"-" + string org_name = 1; + // @inject_tag: sentinel:"-" + string api_token = 2; + // @inject_tag: sentinel:"-" + bool production = 3; + // @inject_tag: sentinel:"-" + string base_url = 4; + // @inject_tag: sentinel:"-" + bool primary_email = 5; } // PingIDConfig contains PingID configuration information message PingIDConfig { - // @inject_tag: sentinel:"-" - string use_base64_key = 1; - // @inject_tag: sentinel:"-" - bool use_signature = 2; - // @inject_tag: sentinel:"-" - string token = 3; - // @inject_tag: sentinel:"-" - string idp_url = 4; - // @inject_tag: sentinel:"-" - string org_alias = 5; - // @inject_tag: sentinel:"-" - string admin_url = 6; - // @inject_tag: sentinel:"-" - string authenticator_url = 7; + // @inject_tag: sentinel:"-" + string use_base64_key = 1; + // @inject_tag: sentinel:"-" + bool use_signature = 2; + // @inject_tag: sentinel:"-" + string token = 3; + // @inject_tag: sentinel:"-" + string idp_url = 4; + // @inject_tag: sentinel:"-" + string org_alias = 5; + // @inject_tag: sentinel:"-" + string admin_url = 6; + // @inject_tag: sentinel:"-" + string authenticator_url = 7; } // Secret represents all the types of secrets which the entity can hold. // Each MFA type should add a secret type to the oneof block in this message. message Secret { - // @inject_tag: sentinel:"-" - string method_name = 1; - oneof value { - // @inject_tag: sentinel:"-" - TOTPSecret totp_secret = 2; - } + // @inject_tag: sentinel:"-" + string method_name = 1; + oneof value { + // @inject_tag: sentinel:"-" + TOTPSecret totp_secret = 2; + } } // TOTPSecret represents the secret that gets stored in the entity about a // particular MFA method. This information is used to validate the MFA // credential supplied over the API during request time. message TOTPSecret { - // @inject_tag: sentinel:"-" - string issuer = 1; - // @inject_tag: sentinel:"-" - uint32 period = 2; - // @inject_tag: sentinel:"-" - int32 algorithm = 3; - // @inject_tag: sentinel:"-" - int32 digits = 4; - // @inject_tag: sentinel:"-" - uint32 skew = 5; - // @inject_tag: sentinel:"-" - uint32 key_size = 6; - // reserving 7 here just to keep parity with the config message above - // @inject_tag: sentinel:"-" - string account_name = 8; - // @inject_tag: sentinel:"-" - string key = 9; + // @inject_tag: sentinel:"-" + string issuer = 1; + // @inject_tag: sentinel:"-" + uint32 period = 2; + // @inject_tag: sentinel:"-" + int32 algorithm = 3; + // @inject_tag: sentinel:"-" + int32 digits = 4; + // @inject_tag: sentinel:"-" + uint32 skew = 5; + // @inject_tag: sentinel:"-" + uint32 key_size = 6; + // reserving 7 here just to keep parity with the config message above + // @inject_tag: sentinel:"-" + string account_name = 8; + // @inject_tag: sentinel:"-" + string key = 9; } // MFAEnforcementConfig is what the user provides to the // mfa/login_enforcement endpoint. message MFAEnforcementConfig { - string name = 1; - string namespace_id = 2; - repeated string mfa_method_ids = 3; - repeated string auth_method_accessors = 4; - repeated string auth_method_types = 5; - repeated string identity_group_ids = 6; - repeated string identity_entity_ids = 7; - string id = 8; + string name = 1; + string namespace_id = 2; + repeated string mfa_method_ids = 3; + repeated string auth_method_accessors = 4; + repeated string auth_method_types = 5; + repeated string identity_group_ids = 6; + repeated string identity_entity_ids = 7; + string id = 8; } diff --git a/helper/identity/sentinel.go b/helper/identity/sentinel.go index 4f65e62c848d..a7ff44c91d79 100644 --- a/helper/identity/sentinel.go +++ b/helper/identity/sentinel.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package identity diff --git a/helper/identity/types.pb.go b/helper/identity/types.pb.go index 91b4c0ff20d7..fbdcb636735d 100644 --- a/helper/identity/types.pb.go +++ b/helper/identity/types.pb.go @@ -1,10 +1,10 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc (unknown) // source: helper/identity/types.proto package identity @@ -1106,7 +1106,7 @@ func file_helper_identity_types_proto_rawDescGZIP() []byte { } var file_helper_identity_types_proto_msgTypes = make([]protoimpl.MessageInfo, 14) -var file_helper_identity_types_proto_goTypes = []interface{}{ +var file_helper_identity_types_proto_goTypes = []any{ (*Group)(nil), // 0: identity.Group (*LocalAliases)(nil), // 1: identity.LocalAliases (*Entity)(nil), // 2: identity.Entity @@ -1162,7 +1162,7 @@ func file_helper_identity_types_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_helper_identity_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_helper_identity_types_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Group); i { case 0: return &v.state @@ -1174,7 +1174,7 @@ func file_helper_identity_types_proto_init() { return nil } } - file_helper_identity_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_helper_identity_types_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*LocalAliases); i { case 0: return &v.state @@ -1186,7 +1186,7 @@ func file_helper_identity_types_proto_init() { return nil } } - file_helper_identity_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_helper_identity_types_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Entity); i { case 0: return &v.state @@ -1198,7 +1198,7 @@ func file_helper_identity_types_proto_init() { return nil } } - file_helper_identity_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_helper_identity_types_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Alias); i { case 0: return &v.state @@ -1210,7 +1210,7 @@ func file_helper_identity_types_proto_init() { return nil } } - file_helper_identity_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_helper_identity_types_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*EntityStorageEntry); i { case 0: return &v.state @@ -1222,7 +1222,7 @@ func file_helper_identity_types_proto_init() { return nil } } - file_helper_identity_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_helper_identity_types_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*PersonaIndexEntry); i { case 0: return &v.state diff --git a/helper/identity/types.proto b/helper/identity/types.proto index a34d715acf34..7c6c49bd6050 100644 --- a/helper/identity/types.proto +++ b/helper/identity/types.proto @@ -1,264 +1,264 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 syntax = "proto3"; -option go_package = "github.com/hashicorp/vault/helper/identity"; - package identity; import "google/protobuf/timestamp.proto"; import "helper/identity/mfa/types.proto"; +option go_package = "github.com/hashicorp/vault/helper/identity"; + // Group represents an identity group. message Group { - // ID is the unique identifier for this group - // @inject_tag: sentinel:"-" - string id = 1; - - // Name is the unique name for this group - // @inject_tag: sentinel:"-" - string name = 2; - - // Policies are the vault policies to be granted to members of this group - // @inject_tag: sentinel:"-" - repeated string policies = 3; - - // ParentGroupIDs are the identifiers of those groups to which this group is a - // member of. These will serve as references to the parent group in the - // hierarchy. - // @inject_tag: sentinel:"-" - repeated string parent_group_ids = 4; - - // MemberEntityIDs are the identifiers of entities which are members of this - // group - // @inject_tag: sentinel:"-" - repeated string member_entity_ids = 5; - - // Metadata represents the custom data tied with this group - // @inject_tag: sentinel:"-" - map metadata = 6; - - // CreationTime is the time at which this group was created - // @inject_tag: sentinel:"-" - google.protobuf.Timestamp creation_time = 7; - - // LastUpdateTime is the time at which this group was last modified - // @inject_tag: sentinel:"-" - google.protobuf.Timestamp last_update_time= 8; - - // ModifyIndex tracks the number of updates to the group. It is useful to detect - // updates to the groups. - // @inject_tag: sentinel:"-" - uint64 modify_index = 9; - - // BucketKey is the path of the storage packer key into which this group is - // stored. - // @inject_tag: sentinel:"-" - string bucket_key = 10; - - // Alias is used to mark this group as an internal mapping of a group that - // is external to the identity store. Alias can only be set if the 'type' - // is set to 'external'. - // @inject_tag: sentinel:"-" - Alias alias = 11; - - // Type indicates if this group is an internal group or an external group. - // Memberships of the internal groups can be managed over the API whereas - // the memberships on the external group --for which a corresponding alias - // will be set-- will be managed automatically. - // @inject_tag: sentinel:"-" - string type = 12; - - // NamespaceID is the identifier of the namespace to which this group - // belongs to. Do not return this value over the API when reading the - // group. - // @inject_tag: sentinel:"-" - string namespace_id = 13; + // ID is the unique identifier for this group + // @inject_tag: sentinel:"-" + string id = 1; + + // Name is the unique name for this group + // @inject_tag: sentinel:"-" + string name = 2; + + // Policies are the vault policies to be granted to members of this group + // @inject_tag: sentinel:"-" + repeated string policies = 3; + + // ParentGroupIDs are the identifiers of those groups to which this group is a + // member of. These will serve as references to the parent group in the + // hierarchy. + // @inject_tag: sentinel:"-" + repeated string parent_group_ids = 4; + + // MemberEntityIDs are the identifiers of entities which are members of this + // group + // @inject_tag: sentinel:"-" + repeated string member_entity_ids = 5; + + // Metadata represents the custom data tied with this group + // @inject_tag: sentinel:"-" + map metadata = 6; + + // CreationTime is the time at which this group was created + // @inject_tag: sentinel:"-" + google.protobuf.Timestamp creation_time = 7; + + // LastUpdateTime is the time at which this group was last modified + // @inject_tag: sentinel:"-" + google.protobuf.Timestamp last_update_time = 8; + + // ModifyIndex tracks the number of updates to the group. It is useful to detect + // updates to the groups. + // @inject_tag: sentinel:"-" + uint64 modify_index = 9; + + // BucketKey is the path of the storage packer key into which this group is + // stored. + // @inject_tag: sentinel:"-" + string bucket_key = 10; + + // Alias is used to mark this group as an internal mapping of a group that + // is external to the identity store. Alias can only be set if the 'type' + // is set to 'external'. + // @inject_tag: sentinel:"-" + Alias alias = 11; + + // Type indicates if this group is an internal group or an external group. + // Memberships of the internal groups can be managed over the API whereas + // the memberships on the external group --for which a corresponding alias + // will be set-- will be managed automatically. + // @inject_tag: sentinel:"-" + string type = 12; + + // NamespaceID is the identifier of the namespace to which this group + // belongs to. Do not return this value over the API when reading the + // group. + // @inject_tag: sentinel:"-" + string namespace_id = 13; } // LocalAliases holds the aliases belonging to an entity that are local to the // cluster. message LocalAliases { - repeated Alias aliases = 1; + repeated Alias aliases = 1; } // Entity represents an entity that gets persisted and indexed. // Entity is fundamentally composed of zero or many aliases. message Entity { - // Aliases are the identities that this entity is made of. This can be - // empty as well to favor being able to create the entity first and then - // incrementally adding aliases. - // @inject_tag: sentinel:"-" - repeated Alias aliases = 1; - - // ID is the unique identifier of the entity which always be a UUID. This - // should never be allowed to be updated. - // @inject_tag: sentinel:"-" - string id = 2; - - // Name is a unique identifier of the entity which is intended to be - // human-friendly. The default name might not be human friendly since it - // gets suffixed by a UUID, but it can optionally be updated, unlike the ID - // field. - // @inject_tag: sentinel:"-" - string name = 3; - - // Metadata represents the explicit metadata which is set by the - // clients. This is useful to tie any information pertaining to the - // aliases. This is a non-unique field of entity, meaning multiple - // entities can have the same metadata set. Entities will be indexed based - // on this explicit metadata. This enables virtual groupings of entities - // based on its metadata. - // @inject_tag: sentinel:"-" - map metadata = 4; - - // CreationTime is the time at which this entity is first created. - // @inject_tag: sentinel:"-" - google.protobuf.Timestamp creation_time = 5; - - // LastUpdateTime is the most recent time at which the properties of this - // entity got modified. This is helpful in filtering out entities based on - // its age and to take action on them, if desired. - // @inject_tag: sentinel:"-" - google.protobuf.Timestamp last_update_time= 6; - - // MergedEntityIDs are the entities which got merged to this one. Entities - // will be indexed based on all the entities that got merged into it. This - // helps to apply the actions on this entity on the tokens that are merged - // to the merged entities. Merged entities will be deleted entirely and - // this is the only trackable trail of its earlier presence. - // @inject_tag: sentinel:"-" - repeated string merged_entity_ids = 7; - - // Policies the entity is entitled to - // @inject_tag: sentinel:"-" - repeated string policies = 8; - - // BucketKey is the path of the storage packer key into which this entity is - // stored. - // @inject_tag: sentinel:"-" - string bucket_key = 9; - - // MFASecrets holds the MFA secrets indexed by the identifier of the MFA - // method configuration. - // @inject_tag: sentinel:"-" - map mfa_secrets = 10; - - // Disabled indicates whether tokens associated with the account should not - // be able to be used - // @inject_tag: sentinel:"-" - bool disabled = 11; - - // NamespaceID is the identifier of the namespace to which this entity - // belongs to. Do not return this value over the API when reading the - // entity. - // @inject_tag: sentinel:"-" - string namespace_id = 12; + // Aliases are the identities that this entity is made of. This can be + // empty as well to favor being able to create the entity first and then + // incrementally adding aliases. + // @inject_tag: sentinel:"-" + repeated Alias aliases = 1; + + // ID is the unique identifier of the entity which always be a UUID. This + // should never be allowed to be updated. + // @inject_tag: sentinel:"-" + string id = 2; + + // Name is a unique identifier of the entity which is intended to be + // human-friendly. The default name might not be human friendly since it + // gets suffixed by a UUID, but it can optionally be updated, unlike the ID + // field. + // @inject_tag: sentinel:"-" + string name = 3; + + // Metadata represents the explicit metadata which is set by the + // clients. This is useful to tie any information pertaining to the + // aliases. This is a non-unique field of entity, meaning multiple + // entities can have the same metadata set. Entities will be indexed based + // on this explicit metadata. This enables virtual groupings of entities + // based on its metadata. + // @inject_tag: sentinel:"-" + map metadata = 4; + + // CreationTime is the time at which this entity is first created. + // @inject_tag: sentinel:"-" + google.protobuf.Timestamp creation_time = 5; + + // LastUpdateTime is the most recent time at which the properties of this + // entity got modified. This is helpful in filtering out entities based on + // its age and to take action on them, if desired. + // @inject_tag: sentinel:"-" + google.protobuf.Timestamp last_update_time = 6; + + // MergedEntityIDs are the entities which got merged to this one. Entities + // will be indexed based on all the entities that got merged into it. This + // helps to apply the actions on this entity on the tokens that are merged + // to the merged entities. Merged entities will be deleted entirely and + // this is the only trackable trail of its earlier presence. + // @inject_tag: sentinel:"-" + repeated string merged_entity_ids = 7; + + // Policies the entity is entitled to + // @inject_tag: sentinel:"-" + repeated string policies = 8; + + // BucketKey is the path of the storage packer key into which this entity is + // stored. + // @inject_tag: sentinel:"-" + string bucket_key = 9; + + // MFASecrets holds the MFA secrets indexed by the identifier of the MFA + // method configuration. + // @inject_tag: sentinel:"-" + map mfa_secrets = 10; + + // Disabled indicates whether tokens associated with the account should not + // be able to be used + // @inject_tag: sentinel:"-" + bool disabled = 11; + + // NamespaceID is the identifier of the namespace to which this entity + // belongs to. Do not return this value over the API when reading the + // entity. + // @inject_tag: sentinel:"-" + string namespace_id = 12; } // Alias represents the alias that gets stored inside of the // entity object in storage and also represents in an in-memory index of an // alias object. message Alias { - // ID is the unique identifier that represents this alias - // @inject_tag: sentinel:"-" - string id = 1; - - // CanonicalID is the entity identifier to which this alias belongs to - // @inject_tag: sentinel:"-" - string canonical_id = 2; - - // MountType is the backend mount's type to which this alias belongs to. - // This enables categorically querying aliases of specific backend types. - // @inject_tag: sentinel:"-" - string mount_type = 3; - - // MountAccessor is the backend mount's accessor to which this alias - // belongs to. - // @inject_tag: sentinel:"-" - string mount_accessor = 4; - - // MountPath is the backend mount's path to which the Maccessor belongs to. This - // field is not used for any operational purposes. This is only returned when - // alias is read, only as a nicety. - // @inject_tag: sentinel:"-" - string mount_path = 5; - - // Metadata is the explicit metadata that clients set against an entity - // which enables virtual grouping of aliases. Aliases will be indexed - // against their metadata. - // @inject_tag: sentinel:"-" - map metadata = 6; - - // Name is the identifier of this alias in its authentication source. - // This does not uniquely identify an alias in Vault. This in conjunction - // with MountAccessor form to be the factors that represent an alias in a - // unique way. Aliases will be indexed based on this combined uniqueness - // factor. - // @inject_tag: sentinel:"-" - string name = 7; - - // CreationTime is the time at which this alias was first created - // @inject_tag: sentinel:"-" - google.protobuf.Timestamp creation_time = 8; - - // LastUpdateTime is the most recent time at which the properties of this - // alias got modified. This is helpful in filtering out aliases based - // on its age and to take action on them, if desired. - // @inject_tag: sentinel:"-" - google.protobuf.Timestamp last_update_time = 9; - - // MergedFromCanonicalIDs is the FIFO history of merging activity - // @inject_tag: sentinel:"-" - repeated string merged_from_canonical_ids = 10; - - // NamespaceID is the identifier of the namespace to which this alias - // belongs. - // @inject_tag: sentinel:"-" - string namespace_id = 11; - - // Custom Metadata represents the custom data tied to this alias - // @inject_tag: sentinel:"-" - map custom_metadata = 12; - - // Local indicates if the alias only belongs to the cluster where it was - // created. If true, the alias will be stored in a location that is ignored by - // the performance replication subsystem. - // @inject_tag: sentinel:"-" - bool local = 13; - - // LocalBucketKey is the identifying element of the location where this alias - // is stored in the storage packer. This helps in querying local aliases - // during invalidation of local aliases in performance standbys. - // @inject_tag: sentinel:"-" - string local_bucket_key = 14; + // ID is the unique identifier that represents this alias + // @inject_tag: sentinel:"-" + string id = 1; + + // CanonicalID is the entity identifier to which this alias belongs to + // @inject_tag: sentinel:"-" + string canonical_id = 2; + + // MountType is the backend mount's type to which this alias belongs to. + // This enables categorically querying aliases of specific backend types. + // @inject_tag: sentinel:"-" + string mount_type = 3; + + // MountAccessor is the backend mount's accessor to which this alias + // belongs to. + // @inject_tag: sentinel:"-" + string mount_accessor = 4; + + // MountPath is the backend mount's path to which the Maccessor belongs to. This + // field is not used for any operational purposes. This is only returned when + // alias is read, only as a nicety. + // @inject_tag: sentinel:"-" + string mount_path = 5; + + // Metadata is the explicit metadata that clients set against an entity + // which enables virtual grouping of aliases. Aliases will be indexed + // against their metadata. + // @inject_tag: sentinel:"-" + map metadata = 6; + + // Name is the identifier of this alias in its authentication source. + // This does not uniquely identify an alias in Vault. This in conjunction + // with MountAccessor form to be the factors that represent an alias in a + // unique way. Aliases will be indexed based on this combined uniqueness + // factor. + // @inject_tag: sentinel:"-" + string name = 7; + + // CreationTime is the time at which this alias was first created + // @inject_tag: sentinel:"-" + google.protobuf.Timestamp creation_time = 8; + + // LastUpdateTime is the most recent time at which the properties of this + // alias got modified. This is helpful in filtering out aliases based + // on its age and to take action on them, if desired. + // @inject_tag: sentinel:"-" + google.protobuf.Timestamp last_update_time = 9; + + // MergedFromCanonicalIDs is the FIFO history of merging activity + // @inject_tag: sentinel:"-" + repeated string merged_from_canonical_ids = 10; + + // NamespaceID is the identifier of the namespace to which this alias + // belongs. + // @inject_tag: sentinel:"-" + string namespace_id = 11; + + // Custom Metadata represents the custom data tied to this alias + // @inject_tag: sentinel:"-" + map custom_metadata = 12; + + // Local indicates if the alias only belongs to the cluster where it was + // created. If true, the alias will be stored in a location that is ignored by + // the performance replication subsystem. + // @inject_tag: sentinel:"-" + bool local = 13; + + // LocalBucketKey is the identifying element of the location where this alias + // is stored in the storage packer. This helps in querying local aliases + // during invalidation of local aliases in performance standbys. + // @inject_tag: sentinel:"-" + string local_bucket_key = 14; } // Deprecated. Retained for backwards compatibility. message EntityStorageEntry { - repeated PersonaIndexEntry personas = 1; - string id = 2; - string name = 3; - map metadata = 4; - google.protobuf.Timestamp creation_time = 5; - google.protobuf.Timestamp last_update_time= 6; - repeated string merged_entity_ids = 7; - repeated string policies = 8; - string bucket_key_hash = 9; - map mfa_secrets = 10; + repeated PersonaIndexEntry personas = 1; + string id = 2; + string name = 3; + map metadata = 4; + google.protobuf.Timestamp creation_time = 5; + google.protobuf.Timestamp last_update_time = 6; + repeated string merged_entity_ids = 7; + repeated string policies = 8; + string bucket_key_hash = 9; + map mfa_secrets = 10; } // Deprecated. Retained for backwards compatibility. message PersonaIndexEntry { - string id = 1; - string entity_id = 2; - string mount_type = 3; - string mount_accessor = 4; - string mount_path = 5; - map metadata = 6; - string name = 7; - google.protobuf.Timestamp creation_time = 8; - google.protobuf.Timestamp last_update_time = 9; - repeated string merged_from_entity_ids = 10; + string id = 1; + string entity_id = 2; + string mount_type = 3; + string mount_accessor = 4; + string mount_path = 5; + map metadata = 6; + string name = 7; + google.protobuf.Timestamp creation_time = 8; + google.protobuf.Timestamp last_update_time = 9; + repeated string merged_from_entity_ids = 10; } diff --git a/helper/locking/core_locking.go b/helper/locking/core_locking.go new file mode 100644 index 000000000000..7370fd907c91 --- /dev/null +++ b/helper/locking/core_locking.go @@ -0,0 +1,44 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package locking + +import ( + "slices" + "strings" +) + +// ParseDetectDeadlockConfigParameter takes the detectDeadlockConfigParameter string +// and transforms it to a lowercase version of the string, then splits it into +// a slice of strings by interpreting commas as the element delimiters. +func ParseDetectDeadlockConfigParameter(detectDeadlockConfigParameter string) []string { + if detectDeadlockConfigParameter == "" { + // This doesn't seem necessary, since the companion functions that use + // this slice can handle an empty slice just the same as a nil slice, + // but for the sake of compatibility, this will be introduced for now + // until all occurrences that rely on Core.detectDeadlocks have been + // switched to using functions from this file to create their locks. + return nil + } + + result := strings.Split(strings.ToLower(detectDeadlockConfigParameter), ",") + for i := range result { + result[i] = strings.TrimSpace(result[i]) + } + + return result +} + +// CreateConfigurableRWMutex determines if the specified lock (identifier) should +// use a deadlock detecting implementation (DeadlockRWMutex) or simply a basic +// sync.RWMutex instance. This is done by splitting the deadlockDetectionLocks +// string into a slice of strings. If the slice contains the specified lock +// (identifier), then the deadlock detecting implementation is used, otherwise a +// sync.Mutex is returned. +func CreateConfigurableRWMutex(deadlockDetectionLocks []string, identifier string) RWMutex { + if slices.Contains(deadlockDetectionLocks, strings.ToLower(identifier)) { + return &DeadlockRWMutex{} + } + + return &SyncRWMutex{} +} diff --git a/helper/locking/core_locking_test.go b/helper/locking/core_locking_test.go new file mode 100644 index 000000000000..55017812d3d6 --- /dev/null +++ b/helper/locking/core_locking_test.go @@ -0,0 +1,103 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package locking + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +// TestParseDetectDeadlockConfigParameter verifies that all types of strings +// that could be obtained from the configuration file, are correctly parsed +// into a slice of string elements. +func TestParseDetectDeadlockConfigParameter(t *testing.T) { + for _, tc := range []struct { + name string + detectDeadlockConfigParameter string + expectedResult []string + }{ + { + name: "empty-string", + }, + { + name: "single-value", + detectDeadlockConfigParameter: "bar", + expectedResult: []string{"bar"}, + }, + { + name: "single-value-mixed-case", + detectDeadlockConfigParameter: "BaR", + expectedResult: []string{"bar"}, + }, + { + name: "multiple-values", + detectDeadlockConfigParameter: "bar,BAZ,fIZ", + expectedResult: []string{"bar", "baz", "fiz"}, + }, + { + name: "non-canonical-string-list", + detectDeadlockConfigParameter: "bar , baz, ", + expectedResult: []string{"bar", "baz", ""}, + }, + } { + t.Run(tc.name, func(t *testing.T) { + result := ParseDetectDeadlockConfigParameter(tc.detectDeadlockConfigParameter) + assert.ElementsMatch(t, tc.expectedResult, result) + }) + } +} + +// TestCreateConfigurableRWMutex verifies the correct behaviour in determining +// whether a deadlock detecting RWMutex should be returned or not based on the +// input arguments for the CreateConfigurableRWMutex function. +func TestCreateConfigurableRWMutex(t *testing.T) { + mutexTypes := map[bool]string{ + false: "locking.SyncRWMutex", + true: "locking.DeadlockRWMutex", + } + + for _, tc := range []struct { + name string + detectDeadlocks []string + lock string + expectDeadlockLock bool + }{ + { + name: "no-lock-types-specified", + lock: "foo", + }, + { + name: "single-lock-specified-no-match", + detectDeadlocks: []string{"bar"}, + lock: "foo", + }, + { + name: "single-lock-specified-match", + detectDeadlocks: []string{"foo"}, + lock: "foo", + expectDeadlockLock: true, + }, + { + name: "multiple-locks-specified-no-match", + detectDeadlocks: []string{"bar", "baz", "fiz"}, + lock: "foo", + }, + { + name: "multiple-locks-specified-match", + detectDeadlocks: []string{"bar", "foo", "baz"}, + lock: "foo", + expectDeadlockLock: true, + }, + } { + t.Run(tc.name, func(t *testing.T) { + m := CreateConfigurableRWMutex(tc.detectDeadlocks, tc.lock) + + _, ok := m.(*DeadlockRWMutex) + if tc.expectDeadlockLock != ok { + t.Fatalf("unexpected RWMutex type returned, expected: %s got %s", mutexTypes[tc.expectDeadlockLock], mutexTypes[ok]) + } + }) + } +} diff --git a/helper/locking/lock.go b/helper/locking/lock.go index a9bff4c0f06c..cc2e50345229 100644 --- a/helper/locking/lock.go +++ b/helper/locking/lock.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package locking diff --git a/helper/logging/logfile.go b/helper/logging/logfile.go index 9417e9ca8233..2f2eb8fbc46d 100644 --- a/helper/logging/logfile.go +++ b/helper/logging/logfile.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package logging @@ -57,7 +57,8 @@ func (l *LogFile) Write(b []byte) (n int, err error) { if err := l.openNew(); err != nil { return 0, err } - } else if err := l.rotate(); err != nil { // Check for the last contact and rotate if necessary + } + if err := l.rotate(); err != nil { // Check for the last contact and rotate if necessary return 0, err } @@ -82,21 +83,20 @@ func (l *LogFile) fileNamePattern() string { } func (l *LogFile) openNew() error { - fileNamePattern := l.fileNamePattern() - - createTime := now() - newFileName := fmt.Sprintf(fileNamePattern, strconv.FormatInt(createTime.UnixNano(), 10)) + newFileName := l.fileName newFilePath := filepath.Join(l.logPath, newFileName) - // Try creating a file. We truncate the file because we are the only authority to write the logs - filePointer, err := os.OpenFile(newFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o640) + // Try creating or opening the active log file. Since the active log file + // always has the same name, append log entries to prevent overwriting + // previous log data. + filePointer, err := os.OpenFile(newFilePath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o640) if err != nil { return err } - // New file, new 'bytes' tracker, new creation time :) :) + // New file, new bytes tracker, new creation time :) l.fileInfo = filePointer - l.lastCreated = createTime + l.lastCreated = now() l.bytesWritten = 0 return nil } @@ -109,6 +109,9 @@ func (l *LogFile) rotate() error { if err := l.fileInfo.Close(); err != nil { return err } + if err := l.renameCurrentFile(); err != nil { + return err + } if err := l.pruneFiles(); err != nil { return err } @@ -148,3 +151,13 @@ func removeFiles(files []string) (err error) { } return err } + +func (l *LogFile) renameCurrentFile() error { + fileNamePattern := l.fileNamePattern() + createTime := now() + currentFilePath := filepath.Join(l.logPath, l.fileName) + oldFileName := fmt.Sprintf(fileNamePattern, strconv.FormatInt(createTime.UnixNano(), 10)) + oldFilePath := filepath.Join(l.logPath, oldFileName) + + return os.Rename(currentFilePath, oldFilePath) +} diff --git a/helper/logging/logfile_test.go b/helper/logging/logfile_test.go index a0cae986aadc..8cb66693d8ee 100644 --- a/helper/logging/logfile_test.go +++ b/helper/logging/logfile_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package logging diff --git a/helper/logging/logger.go b/helper/logging/logger.go index 1efac27bedfd..b37134b93cb4 100644 --- a/helper/logging/logger.go +++ b/helper/logging/logger.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package logging @@ -11,7 +11,7 @@ import ( "strings" "time" - log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" ) @@ -32,7 +32,7 @@ type LogConfig struct { Name string // LogLevel is the minimum level to be logged. - LogLevel log.Level + LogLevel hclog.Level // LogFormat is the log format to use, supported formats are 'standard' and 'json'. LogFormat LogFormat @@ -48,10 +48,27 @@ type LogConfig struct { // LogRotateMaxFiles is the maximum number of past archived log files to keep LogRotateMaxFiles int + + // DefaultFileName should be set to the value to be used if the LogFilePath + // ends in a path separator such as '/var/log/' + // Examples of the default name are as follows: 'vault', 'agent' or 'proxy. + // The creator of this struct *must* ensure that it is assigned before doing + // anything with LogConfig! + DefaultFileName string +} + +// NewLogConfig should be used to initialize the LogConfig struct. +func NewLogConfig(defaultFileName string) (*LogConfig, error) { + defaultFileName = strings.TrimSpace(defaultFileName) + if defaultFileName == "" { + return nil, errors.New("default file name is required") + } + + return &LogConfig{DefaultFileName: defaultFileName}, nil } func (c *LogConfig) isLevelInvalid() bool { - return c.LogLevel == log.NoLevel || c.LogLevel == log.Off || c.LogLevel.String() == "unknown" + return c.LogLevel == hclog.NoLevel || c.LogLevel == hclog.Off || c.LogLevel.String() == "unknown" } func (c *LogConfig) isFormatJson() bool { @@ -104,7 +121,7 @@ func parseFullPath(fullPath string) (directory, fileName string, err error) { } // Setup creates a new logger with the specified configuration and writer -func Setup(config *LogConfig, w io.Writer) (log.InterceptLogger, error) { +func Setup(config *LogConfig, w io.Writer) (hclog.InterceptLogger, error) { // Validate the log level if config.isLevelInvalid() { return nil, fmt.Errorf("invalid log level: %v", config.LogLevel) @@ -121,7 +138,9 @@ func Setup(config *LogConfig, w io.Writer) (log.InterceptLogger, error) { if err != nil { return nil, err } - + if fileName == "" { + fileName = fmt.Sprintf("%s.log", config.DefaultFileName) + } if config.LogRotateDuration == 0 { config.LogRotateDuration = defaultRotateDuration } @@ -142,7 +161,7 @@ func Setup(config *LogConfig, w io.Writer) (log.InterceptLogger, error) { writers = append(writers, logFile) } - logger := log.NewInterceptLogger(&log.LoggerOptions{ + logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{ Name: config.Name, Level: config.LogLevel, IndependentLevels: true, @@ -169,21 +188,21 @@ func ParseLogFormat(format string) (LogFormat, error) { // ParseLogLevel returns the hclog.Level that corresponds with the provided level string. // This differs hclog.LevelFromString in that it supports additional level strings. -func ParseLogLevel(logLevel string) (log.Level, error) { - var result log.Level +func ParseLogLevel(logLevel string) (hclog.Level, error) { + var result hclog.Level logLevel = strings.ToLower(strings.TrimSpace(logLevel)) switch logLevel { case "trace": - result = log.Trace + result = hclog.Trace case "debug": - result = log.Debug + result = hclog.Debug case "notice", "info", "": - result = log.Info + result = hclog.Info case "warn", "warning": - result = log.Warn + result = hclog.Warn case "err", "error": - result = log.Error + result = hclog.Error default: return -1, errors.New(fmt.Sprintf("unknown log level: %s", logLevel)) } @@ -192,11 +211,11 @@ func ParseLogLevel(logLevel string) (log.Level, error) { } // TranslateLoggerLevel returns the string that corresponds with logging level of the hclog.Logger. -func TranslateLoggerLevel(logger log.Logger) (string, error) { +func TranslateLoggerLevel(logger hclog.Logger) (string, error) { logLevel := logger.GetLevel() switch logLevel { - case log.Trace, log.Debug, log.Info, log.Warn, log.Error: + case hclog.Trace, hclog.Debug, hclog.Info, hclog.Warn, hclog.Error: return logLevel.String(), nil default: return "", fmt.Errorf("unknown log level") diff --git a/helper/logging/logger_test.go b/helper/logging/logger_test.go index 30ff1783a776..c5f7ec50d9bc 100644 --- a/helper/logging/logger_test.go +++ b/helper/logging/logger_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package logging @@ -8,15 +8,17 @@ import ( "encoding/json" "errors" "os" + "path/filepath" "testing" - log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestLogger_SetupBasic(t *testing.T) { - cfg := &LogConfig{Name: "test-system", LogLevel: log.Info} + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info logger, err := Setup(cfg, nil) require.NoError(t, err) @@ -26,16 +28,15 @@ func TestLogger_SetupBasic(t *testing.T) { } func TestLogger_SetupInvalidLogLevel(t *testing.T) { - cfg := &LogConfig{} + cfg := newTestLogConfig(t) _, err := Setup(cfg, nil) assert.Containsf(t, err.Error(), "invalid log level", "expected error %s", err) } func TestLogger_SetupLoggerErrorLevel(t *testing.T) { - cfg := &LogConfig{ - LogLevel: log.Error, - } + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Error var buf bytes.Buffer @@ -48,15 +49,16 @@ func TestLogger_SetupLoggerErrorLevel(t *testing.T) { output := buf.String() - require.Contains(t, output, "[ERROR] test error msg") - require.NotContains(t, output, "[INFO] test info msg") + require.Contains(t, output, "[ERROR] test-system: test error msg") + require.NotContains(t, output, "[INFO] test-system: test info msg") } func TestLogger_SetupLoggerDebugLevel(t *testing.T) { - cfg := LogConfig{LogLevel: log.Debug} + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Debug var buf bytes.Buffer - logger, err := Setup(&cfg, &buf) + logger, err := Setup(cfg, &buf) require.NoError(t, err) require.NotNil(t, logger) @@ -65,15 +67,14 @@ func TestLogger_SetupLoggerDebugLevel(t *testing.T) { output := buf.String() - require.Contains(t, output, "[INFO] test info msg") - require.Contains(t, output, "[DEBUG] test debug msg") + require.Contains(t, output, "[INFO] test-system: test info msg") + require.Contains(t, output, "[DEBUG] test-system: test debug msg") } -func TestLogger_SetupLoggerWithName(t *testing.T) { - cfg := &LogConfig{ - LogLevel: log.Debug, - Name: "test-system", - } +func TestLogger_SetupLoggerWithoutName(t *testing.T) { + cfg := newTestLogConfig(t) + cfg.Name = "" + cfg.LogLevel = hclog.Info var buf bytes.Buffer logger, err := Setup(cfg, &buf) @@ -82,15 +83,13 @@ func TestLogger_SetupLoggerWithName(t *testing.T) { logger.Warn("test warn msg") - require.Contains(t, buf.String(), "[WARN] test-system: test warn msg") + require.Contains(t, buf.String(), "[WARN] test warn msg") } func TestLogger_SetupLoggerWithJSON(t *testing.T) { - cfg := &LogConfig{ - LogLevel: log.Debug, - LogFormat: JSONFormat, - Name: "test-system", - } + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Debug + cfg.LogFormat = JSONFormat var buf bytes.Buffer logger, err := Setup(cfg, &buf) @@ -108,13 +107,68 @@ func TestLogger_SetupLoggerWithJSON(t *testing.T) { require.Equal(t, jsonOutput["@message"], "test warn msg") } -func TestLogger_SetupLoggerWithValidLogPath(t *testing.T) { +func TestLogger_SetupLoggerWithValidLogPathMissingFileName(t *testing.T) { tmpDir := t.TempDir() + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + cfg.LogFilePath = tmpDir + "/" // add the trailing slash to the temp dir + var buf bytes.Buffer - cfg := &LogConfig{ - LogLevel: log.Info, - LogFilePath: tmpDir, //+ "/", - } + logger, err := Setup(cfg, &buf) + require.NoError(t, err) + require.NotNil(t, logger) + + logger.Info("juan?") + + m, err := filepath.Glob(cfg.LogFilePath + "*") + require.NoError(t, err) + require.Truef(t, len(m) == 1, "no files were found") +} + +func TestLogger_SetupLoggerWithValidLogPathFileName(t *testing.T) { + tmpDir := t.TempDir() + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + cfg.LogFilePath = filepath.Join(tmpDir, "juan.log") + var buf bytes.Buffer + + logger, err := Setup(cfg, &buf) + require.NoError(t, err) + require.NotNil(t, logger) + + logger.Info("juan?") + f, err := os.Stat(cfg.LogFilePath) + require.NoError(t, err) + require.NotNil(t, f) +} + +func TestLogger_SetupLoggerWithValidLogPathFileNameRotate(t *testing.T) { + tmpDir := t.TempDir() + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + cfg.LogFilePath = filepath.Join(tmpDir, "juan.log") + cfg.LogRotateBytes = 1 // set a tiny number of bytes to force rotation + var buf bytes.Buffer + + logger, err := Setup(cfg, &buf) + require.NoError(t, err) + require.NotNil(t, logger) + + logger.Info("juan?") + logger.Info("john?") + f, err := os.Stat(cfg.LogFilePath) + require.NoError(t, err) + require.NotNil(t, f) + m, err := filepath.Glob(tmpDir + "/juan-*") // look for juan-{timestamp}.log + require.NoError(t, err) + require.Truef(t, len(m) == 1, "no files were found") +} + +func TestLogger_SetupLoggerWithValidLogPath(t *testing.T) { + tmpDir := t.TempDir() + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + cfg.LogFilePath = tmpDir + "/" // add the trailing slash to the temp dir var buf bytes.Buffer logger, err := Setup(cfg, &buf) @@ -123,10 +177,10 @@ func TestLogger_SetupLoggerWithValidLogPath(t *testing.T) { } func TestLogger_SetupLoggerWithInValidLogPath(t *testing.T) { - cfg := &LogConfig{ - LogLevel: log.Info, - LogFilePath: "nonexistentdir/", - } + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + cfg.LogLevel = hclog.Info + cfg.LogFilePath = "nonexistentdir/" var buf bytes.Buffer logger, err := Setup(cfg, &buf) @@ -142,10 +196,9 @@ func TestLogger_SetupLoggerWithInValidLogPathPermission(t *testing.T) { assert.NoError(t, err, "unexpected error testing with invalid log path permission") defer os.RemoveAll(tmpDir) - cfg := &LogConfig{ - LogLevel: log.Info, - LogFilePath: tmpDir + "/", - } + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + cfg.LogFilePath = tmpDir + "/" var buf bytes.Buffer logger, err := Setup(cfg, &buf) @@ -188,10 +241,10 @@ func TestLogger_SetupLoggerWithInvalidLogFilePath(t *testing.T) { for name, tc := range cases { name := name tc := tc - cfg := &LogConfig{ - LogLevel: log.Info, - LogFilePath: tc.path, - } + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + cfg.LogFilePath = tc.path + _, err := Setup(cfg, &bytes.Buffer{}) assert.Error(t, err, "%s: expected error due to *", name) assert.Contains(t, err.Error(), tc.message, "%s: error message does not match: %s", name, err.Error()) @@ -199,26 +252,34 @@ func TestLogger_SetupLoggerWithInvalidLogFilePath(t *testing.T) { } func TestLogger_ChangeLogLevels(t *testing.T) { - cfg := &LogConfig{ - LogLevel: log.Debug, - Name: "test-system", - } + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Debug var buf bytes.Buffer logger, err := Setup(cfg, &buf) require.NoError(t, err) require.NotNil(t, logger) - assert.Equal(t, log.Debug, logger.GetLevel()) + assert.Equal(t, hclog.Debug, logger.GetLevel()) // Create new named loggers from the base logger and change the levels logger2 := logger.Named("test2") logger3 := logger.Named("test3") - logger2.SetLevel(log.Info) - logger3.SetLevel(log.Error) + logger2.SetLevel(hclog.Info) + logger3.SetLevel(hclog.Error) + + assert.Equal(t, hclog.Debug, logger.GetLevel()) + assert.Equal(t, hclog.Info, logger2.GetLevel()) + assert.Equal(t, hclog.Error, logger3.GetLevel()) +} + +func newTestLogConfig(t *testing.T) *LogConfig { + t.Helper() + + cfg, err := NewLogConfig("test") + require.NoError(t, err) + cfg.Name = "test-system" - assert.Equal(t, log.Debug, logger.GetLevel()) - assert.Equal(t, log.Info, logger2.GetLevel()) - assert.Equal(t, log.Error, logger3.GetLevel()) + return cfg } diff --git a/helper/metricsutil/bucket.go b/helper/metricsutil/bucket.go index 0f602e22ad15..f25df4d28178 100644 --- a/helper/metricsutil/bucket.go +++ b/helper/metricsutil/bucket.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package metricsutil diff --git a/helper/metricsutil/bucket_test.go b/helper/metricsutil/bucket_test.go index 1179e4dbdcef..19b6636ed404 100644 --- a/helper/metricsutil/bucket_test.go +++ b/helper/metricsutil/bucket_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package metricsutil diff --git a/helper/metricsutil/gauge_process.go b/helper/metricsutil/gauge_process.go index f471249d756a..bb61f24ddc09 100644 --- a/helper/metricsutil/gauge_process.go +++ b/helper/metricsutil/gauge_process.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package metricsutil @@ -11,24 +11,9 @@ import ( "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/timeutil" ) -// This interface allows unit tests to substitute in a simulated clock. -type clock interface { - Now() time.Time - NewTicker(time.Duration) *time.Ticker -} - -type defaultClock struct{} - -func (_ defaultClock) Now() time.Time { - return time.Now() -} - -func (_ defaultClock) NewTicker(d time.Duration) *time.Ticker { - return time.NewTicker(d) -} - // GaugeLabelValues is one gauge in a set sharing a single key, that // are measured in a batch. type GaugeLabelValues struct { @@ -76,7 +61,7 @@ type GaugeCollectionProcess struct { maxGaugeCardinality int // time source - clock clock + clock timeutil.Clock } // NewGaugeCollectionProcess creates a new collection process for the callback @@ -101,7 +86,7 @@ func NewGaugeCollectionProcess( gaugeInterval, maxGaugeCardinality, logger, - defaultClock{}, + timeutil.DefaultClock{}, ) } @@ -124,7 +109,7 @@ func (m *ClusterMetricSink) NewGaugeCollectionProcess( m.GaugeInterval, m.MaxGaugeCardinality, logger, - defaultClock{}, + timeutil.DefaultClock{}, ) } @@ -137,7 +122,7 @@ func newGaugeCollectionProcessWithClock( gaugeInterval time.Duration, maxGaugeCardinality int, logger log.Logger, - clock clock, + clock timeutil.Clock, ) (*GaugeCollectionProcess, error) { process := &GaugeCollectionProcess{ stop: make(chan struct{}, 1), diff --git a/helper/metricsutil/gauge_process_test.go b/helper/metricsutil/gauge_process_test.go index 83165a997bb6..e5e1c6145b5c 100644 --- a/helper/metricsutil/gauge_process_test.go +++ b/helper/metricsutil/gauge_process_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package metricsutil @@ -15,6 +15,7 @@ import ( "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/timeutil" ) // SimulatedTime maintains a virtual clock so the test isn't @@ -24,9 +25,10 @@ import ( type SimulatedTime struct { now time.Time tickerBarrier chan *SimulatedTicker + timeutil.DefaultClock } -var _ clock = &SimulatedTime{} +var _ timeutil.Clock = &SimulatedTime{} type SimulatedTicker struct { ticker *time.Ticker @@ -121,7 +123,7 @@ func TestGauge_Creation(t *testing.T) { t.Fatalf("Error creating collection process: %v", err) } - if _, ok := p.clock.(defaultClock); !ok { + if _, ok := p.clock.(timeutil.DefaultClock); !ok { t.Error("Default clock not installed.") } diff --git a/helper/metricsutil/metricsutil.go b/helper/metricsutil/metricsutil.go index cfc2e1109655..ccd365868d60 100644 --- a/helper/metricsutil/metricsutil.go +++ b/helper/metricsutil/metricsutil.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package metricsutil @@ -137,7 +137,8 @@ func (m *MetricsHelper) PrometheusResponse() *logical.Response { buf := &bytes.Buffer{} defer buf.Reset() - e := expfmt.NewEncoder(buf, expfmt.FmtText) + format := expfmt.NewFormat(expfmt.TypeTextPlain) + e := expfmt.NewEncoder(buf, format) for _, mf := range metricsFamilies { err := e.Encode(mf) if err != nil { @@ -145,7 +146,7 @@ func (m *MetricsHelper) PrometheusResponse() *logical.Response { return resp } } - resp.Data[logical.HTTPContentType] = string(expfmt.FmtText) + resp.Data[logical.HTTPContentType] = string(format) resp.Data[logical.HTTPRawBody] = buf.Bytes() resp.Data[logical.HTTPStatusCode] = http.StatusOK return resp diff --git a/helper/metricsutil/metricsutil_test.go b/helper/metricsutil/metricsutil_test.go index f8f17fedb361..ffe77b56cb0a 100644 --- a/helper/metricsutil/metricsutil_test.go +++ b/helper/metricsutil/metricsutil_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package metricsutil diff --git a/helper/metricsutil/wrapped_metrics.go b/helper/metricsutil/wrapped_metrics.go index e3df058e1165..8b33c8802003 100644 --- a/helper/metricsutil/wrapped_metrics.go +++ b/helper/metricsutil/wrapped_metrics.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package metricsutil @@ -37,12 +37,14 @@ type ClusterMetricSink struct { } type TelemetryConstConfig struct { - LeaseMetricsEpsilon time.Duration - NumLeaseMetricsTimeBuckets int - LeaseMetricsNameSpaceLabels bool + LeaseMetricsEpsilon time.Duration + NumLeaseMetricsTimeBuckets int + LeaseMetricsNameSpaceLabels bool + RollbackMetricsIncludeMountPoint bool } type Metrics interface { + SetGauge(key []string, val float32) SetGaugeWithLabels(key []string, val float32, labels []Label) IncrCounterWithLabels(key []string, val float32, labels []Label) AddSampleWithLabels(key []string, val float32, labels []Label) diff --git a/helper/metricsutil/wrapped_metrics_test.go b/helper/metricsutil/wrapped_metrics_test.go index b65809b46109..34c5cdda8c87 100644 --- a/helper/metricsutil/wrapped_metrics_test.go +++ b/helper/metricsutil/wrapped_metrics_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package metricsutil diff --git a/helper/monitor/monitor.go b/helper/monitor/monitor.go index 28ecf0eee571..ea4799ff09f5 100644 --- a/helper/monitor/monitor.go +++ b/helper/monitor/monitor.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package monitor diff --git a/helper/monitor/monitor_test.go b/helper/monitor/monitor_test.go index 06e1e0177782..e281952fe686 100644 --- a/helper/monitor/monitor_test.go +++ b/helper/monitor/monitor_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package monitor diff --git a/helper/namespace/namespace.go b/helper/namespace/namespace.go index 04a5b79dbec8..1a2346511dd5 100644 --- a/helper/namespace/namespace.go +++ b/helper/namespace/namespace.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package namespace diff --git a/helper/namespace/namespace_test.go b/helper/namespace/namespace_test.go index fd4c4c2f9988..10ee981b91d7 100644 --- a/helper/namespace/namespace_test.go +++ b/helper/namespace/namespace_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package namespace diff --git a/helper/osutil/fileinfo.go b/helper/osutil/fileinfo.go index f14db6b9c8bf..dffcc0f05917 100644 --- a/helper/osutil/fileinfo.go +++ b/helper/osutil/fileinfo.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package osutil diff --git a/helper/osutil/fileinfo_test.go b/helper/osutil/fileinfo_test.go index 8c3316bc91ab..edf7c50c9d85 100644 --- a/helper/osutil/fileinfo_test.go +++ b/helper/osutil/fileinfo_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package osutil diff --git a/helper/osutil/fileinfo_unix.go b/helper/osutil/fileinfo_unix.go index bb60c498797d..da7b58d61c0d 100644 --- a/helper/osutil/fileinfo_unix.go +++ b/helper/osutil/fileinfo_unix.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !windows diff --git a/helper/osutil/fileinfo_unix_test.go b/helper/osutil/fileinfo_unix_test.go index 302bd9e16847..65ed863febb2 100644 --- a/helper/osutil/fileinfo_unix_test.go +++ b/helper/osutil/fileinfo_unix_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !windows diff --git a/helper/osutil/fileinfo_windows.go b/helper/osutil/fileinfo_windows.go index 193fe3ff8420..9292b4613af1 100644 --- a/helper/osutil/fileinfo_windows.go +++ b/helper/osutil/fileinfo_windows.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build windows diff --git a/helper/parseip/parseip.go b/helper/parseip/parseip.go index 95579633b509..f4e6e0fc3e4d 100644 --- a/helper/parseip/parseip.go +++ b/helper/parseip/parseip.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package parseip diff --git a/helper/parseip/parseip_test.go b/helper/parseip/parseip_test.go index e26c810be677..fd8169a55311 100644 --- a/helper/parseip/parseip_test.go +++ b/helper/parseip/parseip_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package parseip diff --git a/helper/pgpkeys/encrypt_decrypt.go b/helper/pgpkeys/encrypt_decrypt.go index c7a8027cd2ce..44738caa0eaa 100644 --- a/helper/pgpkeys/encrypt_decrypt.go +++ b/helper/pgpkeys/encrypt_decrypt.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pgpkeys @@ -56,7 +56,6 @@ func GetFingerprints(pgpKeys []string, entities []*openpgp.Entity) ([]string, er if entities == nil { var err error entities, err = GetEntities(pgpKeys) - if err != nil { return nil, err } diff --git a/helper/pgpkeys/flag.go b/helper/pgpkeys/flag.go index 79d114b4d9cc..7749e5c8bffb 100644 --- a/helper/pgpkeys/flag.go +++ b/helper/pgpkeys/flag.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pgpkeys diff --git a/helper/pgpkeys/flag_test.go b/helper/pgpkeys/flag_test.go index 9ea25d44d589..f8447b61c588 100644 --- a/helper/pgpkeys/flag_test.go +++ b/helper/pgpkeys/flag_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pgpkeys diff --git a/helper/pgpkeys/keybase.go b/helper/pgpkeys/keybase.go index b24e4bf231c8..541841720f5d 100644 --- a/helper/pgpkeys/keybase.go +++ b/helper/pgpkeys/keybase.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pgpkeys diff --git a/helper/pgpkeys/keybase_test.go b/helper/pgpkeys/keybase_test.go index 7d59899fd9e5..2c8c229cc8cb 100644 --- a/helper/pgpkeys/keybase_test.go +++ b/helper/pgpkeys/keybase_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pgpkeys diff --git a/helper/pgpkeys/test_keys.go b/helper/pgpkeys/test_keys.go index be97698d1216..cccda6e2536b 100644 --- a/helper/pgpkeys/test_keys.go +++ b/helper/pgpkeys/test_keys.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package pgpkeys diff --git a/builtin/credential/aws/pkcs7/LICENSE b/helper/pkcs7/LICENSE similarity index 100% rename from builtin/credential/aws/pkcs7/LICENSE rename to helper/pkcs7/LICENSE diff --git a/builtin/credential/aws/pkcs7/README.md b/helper/pkcs7/README.md similarity index 100% rename from builtin/credential/aws/pkcs7/README.md rename to helper/pkcs7/README.md diff --git a/builtin/credential/aws/pkcs7/ber.go b/helper/pkcs7/ber.go similarity index 95% rename from builtin/credential/aws/pkcs7/ber.go rename to helper/pkcs7/ber.go index 0b18a6c8d361..898cf262ee06 100644 --- a/builtin/credential/aws/pkcs7/ber.go +++ b/helper/pkcs7/ber.go @@ -140,25 +140,22 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) { tagStart := offset b := ber[offset] offset++ - if offset >= berLen { - return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") - } tag := b & 0x1F // last 5 bits if tag == 0x1F { tag = 0 + if offset >= berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } for ber[offset] >= 0x80 { - tag = tag*128 + ber[offset] - 0x80 - offset++ - if offset > berLen { + if offset >= berLen { return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") } + tag = tag*128 + ber[offset] - 0x80 + offset++ } // jvehent 20170227: this doesn't appear to be used anywhere... // tag = tag*128 + ber[offset] - 0x80 offset++ - if offset > berLen { - return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") - } } tagEnd := offset @@ -170,14 +167,17 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) { } // read length var length int - l := ber[offset] - offset++ - if offset > berLen { + if offset >= berLen { return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") } + l := ber[offset] + offset++ indefinite := false if l > 0x80 { numberOfBytes := (int)(l & 0x7F) + if offset >= berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } if numberOfBytes > 4 { // int is only guaranteed to be 32bit return nil, 0, errors.New("ber2der: BER tag length too long") } @@ -192,7 +192,7 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) { for i := 0; i < numberOfBytes; i++ { length = length*256 + (int)(ber[offset]) offset++ - if offset > berLen { + if offset >= berLen { return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") } } @@ -217,6 +217,9 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) { return nil, 0, errors.New("ber2der: Indefinite form tag must have constructed encoding") } if kind == 0 { + if offset >= berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } obj = asn1Primitive{ tagBytes: ber[tagStart:tagEnd], length: length, diff --git a/builtin/credential/aws/pkcs7/ber_test.go b/helper/pkcs7/ber_test.go similarity index 86% rename from builtin/credential/aws/pkcs7/ber_test.go rename to helper/pkcs7/ber_test.go index 169c78ab701e..d93bcf57b225 100644 --- a/builtin/credential/aws/pkcs7/ber_test.go +++ b/helper/pkcs7/ber_test.go @@ -9,6 +9,38 @@ import ( "testing" ) +// FuzzReadObject is a fuzz test that will generate random input data in an +// attempt to find crash-causing inputs +// https://go.dev/doc/security/fuzz +func FuzzReadObject(f *testing.F) { + // seed corpus used to guide the fuzzing engine + seedCorpus := []struct { + input []byte + offset int + }{ + {[]byte{0x30, 0x85}, 0}, + {[]byte{0x30, 0x84, 0x80, 0x0, 0x0, 0x0}, 0}, + {[]byte{0x30, 0x82, 0x0, 0x1}, 0}, + {[]byte{0x30, 0x80, 0x1, 0x2, 0x1, 0x2}, 0}, + {[]byte{0x30, 0x80, 0x1, 0x2}, 0}, + {[]byte{0x30, 0x03, 0x01, 0x02}, 0}, + {[]byte{0x30}, 0}, + {[]byte("?0"), 0}, + } + for _, tc := range seedCorpus { + f.Add(tc.input, tc.offset) // Use f.Add to provide a seed corpus + } + f.Fuzz(func(t *testing.T, ber []byte, offset int) { + if offset < 0 { + return + } + _, _, err := readObject(ber, offset) + if err != nil { + t.Log(ber, offset) + } + }) +} + func TestBer2Der(t *testing.T) { // indefinite length fixture ber := []byte{0x30, 0x80, 0x02, 0x01, 0x01, 0x00, 0x00} @@ -44,13 +76,14 @@ func TestBer2Der_Negatives(t *testing.T) { Input []byte ErrorContains string }{ - {[]byte{0x30, 0x85}, "tag length too long"}, + {[]byte{0x30, 0x85}, "end of ber data reached"}, {[]byte{0x30, 0x84, 0x80, 0x0, 0x0, 0x0}, "length is negative"}, {[]byte{0x30, 0x82, 0x0, 0x1}, "length has leading zero"}, {[]byte{0x30, 0x80, 0x1, 0x2, 0x1, 0x2}, "Invalid BER format"}, {[]byte{0x30, 0x80, 0x1, 0x2}, "BER tag length is more than available data"}, {[]byte{0x30, 0x03, 0x01, 0x02}, "length is more than available data"}, {[]byte{0x30}, "end of ber data reached"}, + {[]byte("?0"), "end of ber data reached"}, } for _, fixture := range fixtures { diff --git a/builtin/credential/aws/pkcs7/decrypt.go b/helper/pkcs7/decrypt.go similarity index 100% rename from builtin/credential/aws/pkcs7/decrypt.go rename to helper/pkcs7/decrypt.go diff --git a/builtin/credential/aws/pkcs7/decrypt_test.go b/helper/pkcs7/decrypt_test.go similarity index 100% rename from builtin/credential/aws/pkcs7/decrypt_test.go rename to helper/pkcs7/decrypt_test.go diff --git a/builtin/credential/aws/pkcs7/encrypt.go b/helper/pkcs7/encrypt.go similarity index 100% rename from builtin/credential/aws/pkcs7/encrypt.go rename to helper/pkcs7/encrypt.go diff --git a/builtin/credential/aws/pkcs7/encrypt_test.go b/helper/pkcs7/encrypt_test.go similarity index 100% rename from builtin/credential/aws/pkcs7/encrypt_test.go rename to helper/pkcs7/encrypt_test.go diff --git a/builtin/credential/aws/pkcs7/pkcs7.go b/helper/pkcs7/pkcs7.go similarity index 99% rename from builtin/credential/aws/pkcs7/pkcs7.go rename to helper/pkcs7/pkcs7.go index eecff9bc8ed7..69155692e3ab 100644 --- a/builtin/credential/aws/pkcs7/pkcs7.go +++ b/helper/pkcs7/pkcs7.go @@ -7,14 +7,13 @@ import ( "crypto/dsa" "crypto/ecdsa" "crypto/rsa" + _ "crypto/sha1" // for crypto.SHA1 "crypto/x509" "crypto/x509/pkix" "encoding/asn1" "errors" "fmt" "sort" - - _ "crypto/sha1" // for crypto.SHA1 ) // PKCS7 Represents a PKCS7 structure diff --git a/builtin/credential/aws/pkcs7/pkcs7_test.go b/helper/pkcs7/pkcs7_test.go similarity index 100% rename from builtin/credential/aws/pkcs7/pkcs7_test.go rename to helper/pkcs7/pkcs7_test.go diff --git a/builtin/credential/aws/pkcs7/sign.go b/helper/pkcs7/sign.go similarity index 99% rename from builtin/credential/aws/pkcs7/sign.go rename to helper/pkcs7/sign.go index 72b99388548e..b64fcb11da47 100644 --- a/builtin/credential/aws/pkcs7/sign.go +++ b/helper/pkcs7/sign.go @@ -12,14 +12,8 @@ import ( "fmt" "math/big" "time" - - "github.com/hashicorp/vault/internal" ) -func init() { - internal.PatchSha1() -} - // SignedData is an opaque data structure for creating signed data payloads type SignedData struct { sd signedData diff --git a/builtin/credential/aws/pkcs7/sign_test.go b/helper/pkcs7/sign_test.go similarity index 86% rename from builtin/credential/aws/pkcs7/sign_test.go rename to helper/pkcs7/sign_test.go index 641cb0465fd0..2a4829a7e873 100644 --- a/builtin/credential/aws/pkcs7/sign_test.go +++ b/helper/pkcs7/sign_test.go @@ -89,6 +89,27 @@ func TestSign(t *testing.T) { } func TestDSASignAndVerifyWithOpenSSL(t *testing.T) { + dsaPublicCert := []byte(`-----BEGIN CERTIFICATE----- +MIIDOjCCAvWgAwIBAgIEPCY/UDANBglghkgBZQMEAwIFADBsMRAwDgYDVQQGEwdV +bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD +VQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRAwDgYDVQQDEwdVbmtub3du +MB4XDTE4MTAyMjEzNDMwN1oXDTQ2MDMwOTEzNDMwN1owbDEQMA4GA1UEBhMHVW5r +bm93bjEQMA4GA1UECBMHVW5rbm93bjEQMA4GA1UEBxMHVW5rbm93bjEQMA4GA1UE +ChMHVW5rbm93bjEQMA4GA1UECxMHVW5rbm93bjEQMA4GA1UEAxMHVW5rbm93bjCC +AbgwggEsBgcqhkjOOAQBMIIBHwKBgQD9f1OBHXUSKVLfSpwu7OTn9hG3UjzvRADD +Hj+AtlEmaUVdQCJR+1k9jVj6v8X1ujD2y5tVbNeBO4AdNG/yZmC3a5lQpaSfn+gE +exAiwk+7qdf+t8Yb+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1864rYdcq7/Ii +Axmd0UgBxwIVAJdgUI8VIwvMspK5gqLrhAvwWBz1AoGBAPfhoIXWmz3ey7yrXDa4 +V7l5lK+7+jrqgvlXTAs9B4JnUVlXjrrUWU/mcQcQgYC0SRZxI+hMKBYTt88JMozI +puE8FnqLVHyNKOCjrh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk8b6oUZCJqIPf4Vrl +nwaSi2ZegHtVJWQBTDv+z0kqA4GFAAKBgQDCriMPbEVBoRK4SOUeFwg7+VRf4TTp +rcOQC9IVVoCjXzuWEGrp3ZI7YWJSpFnSch4lk29RH8O0HpI/NOzKnOBtnKr782pt +1k/bJVMH9EaLd6MKnAVjrCDMYBB0MhebZ8QHY2elZZCWoqDYAcIDOsEx+m4NLErT +ypPnjS5M0jm1PKMhMB8wHQYDVR0OBBYEFC0Yt5XdM0Kc95IX8NQ8XRssGPx7MA0G +CWCGSAFlAwQDAgUAAzAAMC0CFQCIgQtrZZ9hdZG1ROhR5hc8nYEmbgIUAIlgC688 +qzy/7yePTlhlpj+ahMM= +-----END CERTIFICATE-----`) + content := []byte("Hello World") // write the content to a temp file tmpContentFile, err := ioutil.TempFile("", "TestDSASignAndVerifyWithOpenSSL_content") @@ -235,6 +256,12 @@ func TestDegenerateCertificate(t *testing.T) { } testOpenSSLParse(t, deg) pem.Encode(os.Stdout, &pem.Block{Type: "PKCS7", Bytes: deg}) + + // Make sure the library can parse the PKCS7 we generated along with OpenSSL + _, err = Parse(deg) + if err != nil { + t.Fatalf("failed parsing degenerated certificate: %v", err) + } } // writes the cert to a temporary file and tests that openssl can read it. diff --git a/builtin/credential/aws/pkcs7/verify.go b/helper/pkcs7/verify.go similarity index 99% rename from builtin/credential/aws/pkcs7/verify.go rename to helper/pkcs7/verify.go index 002e77f6e16e..8dff1eaecf15 100644 --- a/builtin/credential/aws/pkcs7/verify.go +++ b/helper/pkcs7/verify.go @@ -118,7 +118,7 @@ func verifySignatureAtTime(p7 *PKCS7, signer signerInfo, truststore *x509.CertPo } } -// dsaSignature verifies the DSA signature on a PKCS7 document. DSA support was +// dsaCheckSignature verifies the DSA signature on a PKCS7 document. DSA support was // removed from Go's crypto/x509 support prior to Go 1.16. This allows // verifying legacy signatures until affected applications can be migrated off // of DSA. diff --git a/helper/policies/policies.go b/helper/policies/policies.go index 2a3460205798..ea180b4e90e4 100644 --- a/helper/policies/policies.go +++ b/helper/policies/policies.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package policies diff --git a/helper/policies/policies_test.go b/helper/policies/policies_test.go index 6356dee18a04..09c104d21ea4 100644 --- a/helper/policies/policies_test.go +++ b/helper/policies/policies_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package policies diff --git a/helper/proxyutil/proxyutil.go b/helper/proxyutil/proxyutil.go index b0f06d6b9f26..5b0e523b51b3 100644 --- a/helper/proxyutil/proxyutil.go +++ b/helper/proxyutil/proxyutil.go @@ -1,18 +1,17 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package proxyutil import ( - "errors" "fmt" "net" "sync" "time" "github.com/hashicorp/go-secure-stdlib/parseutil" - sockaddr "github.com/hashicorp/go-sockaddr" - proxyproto "github.com/pires/go-proxyproto" + "github.com/hashicorp/go-sockaddr" + "github.com/pires/go-proxyproto" ) // ProxyProtoConfig contains configuration for the PROXY protocol @@ -72,7 +71,7 @@ func WrapInProxyProto(listener net.Listener, config *ProxyProtoConfig) (net.List return proxyproto.IGNORE, nil } - return proxyproto.REJECT, errors.New(`upstream connection not trusted proxy_protocol_behavior is "deny_unauthorized"`) + return proxyproto.REJECT, proxyproto.ErrInvalidUpstream }, } default: diff --git a/helper/random/parser.go b/helper/random/parser.go index c5e82c8c1e5a..f3523226eafd 100644 --- a/helper/random/parser.go +++ b/helper/random/parser.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package random diff --git a/helper/random/parser_test.go b/helper/random/parser_test.go index f8af5a5eb920..7ca05fd00b14 100644 --- a/helper/random/parser_test.go +++ b/helper/random/parser_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package random diff --git a/helper/random/random_api.go b/helper/random/random_api.go index 5bb9316b15be..0dea4a0a6566 100644 --- a/helper/random/random_api.go +++ b/helper/random/random_api.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package random diff --git a/helper/random/registry.go b/helper/random/registry.go index 334df734e24e..59393561a0fb 100644 --- a/helper/random/registry.go +++ b/helper/random/registry.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package random diff --git a/helper/random/registry_test.go b/helper/random/registry_test.go index 21297aaf21f5..10e6af0de2c8 100644 --- a/helper/random/registry_test.go +++ b/helper/random/registry_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package random diff --git a/helper/random/rules.go b/helper/random/rules.go index 05cc800c91e8..240437a2f865 100644 --- a/helper/random/rules.go +++ b/helper/random/rules.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package random diff --git a/helper/random/rules_test.go b/helper/random/rules_test.go index e85df503b0a1..535bac80670e 100644 --- a/helper/random/rules_test.go +++ b/helper/random/rules_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package random diff --git a/helper/random/serializing.go b/helper/random/serializing.go index 5b68d3275874..d3a51ca2d079 100644 --- a/helper/random/serializing.go +++ b/helper/random/serializing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package random diff --git a/helper/random/serializing_test.go b/helper/random/serializing_test.go index bfa17ae266e7..b05afd66029e 100644 --- a/helper/random/serializing_test.go +++ b/helper/random/serializing_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package random diff --git a/helper/random/string_generator.go b/helper/random/string_generator.go index c51d29a55de9..62a5ab9061d5 100644 --- a/helper/random/string_generator.go +++ b/helper/random/string_generator.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package random @@ -10,6 +10,7 @@ import ( "io" "math" "sort" + "sync" "time" "unicode" @@ -69,7 +70,7 @@ func sortCharset(chars string) string { return string(r) } -// StringGenerator generats random strings from the provided charset & adhering to a set of rules. The set of rules +// StringGenerator generates random strings from the provided charset & adhering to a set of rules. The set of rules // are things like CharsetRule which requires a certain number of characters from a sub-charset. type StringGenerator struct { // Length of the string to generate. @@ -79,7 +80,8 @@ type StringGenerator struct { Rules serializableRules `mapstructure:"-" json:"rule"` // This is "rule" in JSON so it matches the HCL property type // CharsetRule to choose runes from. This is computed from the rules, not directly configurable - charset runes + charset runes + charsetLock sync.RWMutex } // Generate a random string from the charset and adhering to the provided rules. @@ -119,7 +121,10 @@ func (g *StringGenerator) generate(rng io.Reader) (str string, err error) { // If performance improvements need to be made, this can be changed to read a batch of // potential strings at once rather than one at a time. This will significantly // improve performance, but at the cost of added complexity. - candidate, err := randomRunes(rng, g.charset, g.Length) + g.charsetLock.RLock() + charset := g.charset + g.charsetLock.RUnlock() + candidate, err := randomRunes(rng, charset, g.Length) if err != nil { return "", fmt.Errorf("unable to generate random characters: %w", err) } @@ -232,6 +237,8 @@ func (g *StringGenerator) validateConfig() (err error) { merr = multierror.Append(merr, fmt.Errorf("specified rules require at least %d characters but %d is specified", minLen, g.Length)) } + g.charsetLock.Lock() + defer g.charsetLock.Unlock() // Ensure we have a charset & all characters are printable if len(g.charset) == 0 { // Yes this is mutating the generator but this is done so we don't have to compute this on every generation diff --git a/helper/random/string_generator_test.go b/helper/random/string_generator_test.go index 8307ff73a485..c8ab3b6ace35 100644 --- a/helper/random/string_generator_test.go +++ b/helper/random/string_generator_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package random diff --git a/helper/storagepacker/storagepacker.go b/helper/storagepacker/storagepacker.go index 2e69f3a27d7a..219049b1bb0a 100644 --- a/helper/storagepacker/storagepacker.go +++ b/helper/storagepacker/storagepacker.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package storagepacker diff --git a/helper/storagepacker/storagepacker_test.go b/helper/storagepacker/storagepacker_test.go index ad76107afbf4..d1f4f66e7415 100644 --- a/helper/storagepacker/storagepacker_test.go +++ b/helper/storagepacker/storagepacker_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package storagepacker @@ -14,6 +14,7 @@ import ( uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/identity" "github.com/hashicorp/vault/sdk/logical" + "google.golang.org/protobuf/types/known/anypb" ) func BenchmarkStoragePacker(b *testing.B) { @@ -154,7 +155,7 @@ func TestStoragePacker_SerializeDeserializeComplexItem(t *testing.T) { Policies: []string{"policy1", "policy2"}, } - marshaledEntity, err := ptypes.MarshalAny(entity) + marshaledEntity, err := anypb.New(entity) if err != nil { t.Fatal(err) } diff --git a/helper/storagepacker/types.pb.go b/helper/storagepacker/types.pb.go index 6dd58b96d37d..fd73a0ebb2e9 100644 --- a/helper/storagepacker/types.pb.go +++ b/helper/storagepacker/types.pb.go @@ -1,10 +1,10 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc (unknown) // source: helper/storagepacker/types.proto package storagepacker @@ -203,7 +203,7 @@ func file_helper_storagepacker_types_proto_rawDescGZIP() []byte { } var file_helper_storagepacker_types_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_helper_storagepacker_types_proto_goTypes = []interface{}{ +var file_helper_storagepacker_types_proto_goTypes = []any{ (*Item)(nil), // 0: storagepacker.Item (*Bucket)(nil), // 1: storagepacker.Bucket nil, // 2: storagepacker.Bucket.ItemMapEntry @@ -227,7 +227,7 @@ func file_helper_storagepacker_types_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_helper_storagepacker_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_helper_storagepacker_types_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Item); i { case 0: return &v.state @@ -239,7 +239,7 @@ func file_helper_storagepacker_types_proto_init() { return nil } } - file_helper_storagepacker_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_helper_storagepacker_types_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Bucket); i { case 0: return &v.state diff --git a/helper/storagepacker/types.proto b/helper/storagepacker/types.proto index 7efb0a11ef98..062f602bf1ee 100644 --- a/helper/storagepacker/types.proto +++ b/helper/storagepacker/types.proto @@ -1,24 +1,24 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 syntax = "proto3"; -option go_package = "github.com/hashicorp/vault/helper/storagepacker"; - package storagepacker; import "google/protobuf/any.proto"; +option go_package = "github.com/hashicorp/vault/helper/storagepacker"; + // Item represents an entry that gets inserted into the storage packer message Item { - // ID must be provided by the caller; the same value, if used with GetItem, - // can be used to fetch the item. However, when iterating through a bucket, - // this ID will be an internal ID. In other words, outside of the use-case - // described above, the caller *must not* rely on this value to be - // consistent with what they passed in. - string id = 1; - // message is the contents of the item - google.protobuf.Any message = 2; + // ID must be provided by the caller; the same value, if used with GetItem, + // can be used to fetch the item. However, when iterating through a bucket, + // this ID will be an internal ID. In other words, outside of the use-case + // described above, the caller *must not* rely on this value to be + // consistent with what they passed in. + string id = 1; + // message is the contents of the item + google.protobuf.Any message = 2; } // Bucket is a construct to hold multiple items within itself. This @@ -28,10 +28,10 @@ message Item { // to become independent buckets. Hence, this can grow infinitely in terms of // storage space for items that get inserted. message Bucket { - // Key is the storage path where the bucket gets stored - string key = 1; - // Items holds the items contained within this bucket. Used by v1. - repeated Item items = 2; - // ItemMap stores a mapping of item ID to message. Used by v2. - map item_map = 3; + // Key is the storage path where the bucket gets stored + string key = 1; + // Items holds the items contained within this bucket. Used by v1. + repeated Item items = 2; + // ItemMap stores a mapping of item ID to message. Used by v2. + map item_map = 3; } diff --git a/helper/syncmap/syncmap.go b/helper/syncmap/syncmap.go new file mode 100644 index 000000000000..0734d953858a --- /dev/null +++ b/helper/syncmap/syncmap.go @@ -0,0 +1,103 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package syncmap + +import "sync" + +// SyncMap implements a map similar to sync.Map, but with generics and with an equality +// in the values specified by an "ID()" method. +type SyncMap[K comparable, V IDer] struct { + // lock is used to synchronize access to the map + lock sync.RWMutex + // data holds the actual data + data map[K]V +} + +// NewSyncMap returns a new, empty SyncMap. +func NewSyncMap[K comparable, V IDer]() *SyncMap[K, V] { + return &SyncMap[K, V]{ + data: make(map[K]V), + } +} + +// Get returns the value for the given key. +func (m *SyncMap[K, V]) Get(k K) V { + m.lock.RLock() + defer m.lock.RUnlock() + return m.data[k] +} + +// Pop deletes and returns the value for the given key, if it exists. +func (m *SyncMap[K, V]) Pop(k K) V { + m.lock.Lock() + defer m.lock.Unlock() + v, ok := m.data[k] + if ok { + delete(m.data, k) + } + return v +} + +// PopIfEqual deletes and returns the value for the given key, if it exists +// and only if the ID is equal to the provided string. +func (m *SyncMap[K, V]) PopIfEqual(k K, id string) V { + m.lock.Lock() + defer m.lock.Unlock() + v, ok := m.data[k] + if ok && v.ID() == id { + delete(m.data, k) + return v + } + var zero V + return zero +} + +// Put adds the given key-value pair to the map and returns the previous value, if any. +func (m *SyncMap[K, V]) Put(k K, v V) V { + m.lock.Lock() + defer m.lock.Unlock() + oldV := m.data[k] + m.data[k] = v + return oldV +} + +// PutIfEmpty adds the given key-value pair to the map only if there is no value already in it, +// and returns the new value and true if so. +// If there is already a value, it returns the existing value and false. +func (m *SyncMap[K, V]) PutIfEmpty(k K, v V) (V, bool) { + m.lock.Lock() + defer m.lock.Unlock() + oldV, ok := m.data[k] + if ok { + return oldV, false + } + m.data[k] = v + return v, true +} + +// Clear deletes all entries from the map, and returns the previous map. +func (m *SyncMap[K, V]) Clear() map[K]V { + m.lock.Lock() + defer m.lock.Unlock() + old := m.data + m.data = make(map[K]V) + return old +} + +// Values returns a copy of all values in the map. +func (m *SyncMap[K, V]) Values() []V { + m.lock.RLock() + defer m.lock.RUnlock() + + values := make([]V, 0, len(m.data)) + for _, v := range m.data { + values = append(values, v) + } + return values +} + +// IDer is used to extract an ID that SyncMap uses for equality checking. +type IDer interface { + ID() string +} diff --git a/helper/syncmap/syncmap_test.go b/helper/syncmap/syncmap_test.go new file mode 100644 index 000000000000..c66a131df4c1 --- /dev/null +++ b/helper/syncmap/syncmap_test.go @@ -0,0 +1,78 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package syncmap + +import ( + "sort" + "testing" + + "github.com/stretchr/testify/assert" +) + +type stringID struct { + val string + id string +} + +func (s stringID) ID() string { + return s.id +} + +var _ IDer = stringID{"", ""} + +// TestSyncMap_Get tests that basic getting and putting works. +func TestSyncMap_Get(t *testing.T) { + m := NewSyncMap[string, stringID]() + m.Put("a", stringID{"b", "b"}) + assert.Equal(t, stringID{"b", "b"}, m.Get("a")) + assert.Equal(t, stringID{"", ""}, m.Get("c")) +} + +// TestSyncMap_Pop tests that basic Pop operations work. +func TestSyncMap_Pop(t *testing.T) { + m := NewSyncMap[string, stringID]() + m.Put("a", stringID{"b", "b"}) + assert.Equal(t, stringID{"b", "b"}, m.Pop("a")) + assert.Equal(t, stringID{"", ""}, m.Pop("a")) + assert.Equal(t, stringID{"", ""}, m.Pop("c")) +} + +// TestSyncMap_PopIfEqual tests that basic PopIfEqual operations pop only if the IDs are equal. +func TestSyncMap_PopIfEqual(t *testing.T) { + m := NewSyncMap[string, stringID]() + m.Put("a", stringID{"b", "c"}) + assert.Equal(t, stringID{"", ""}, m.PopIfEqual("a", "b")) + assert.Equal(t, stringID{"b", "c"}, m.PopIfEqual("a", "c")) + assert.Equal(t, stringID{"", ""}, m.PopIfEqual("a", "c")) +} + +// TestSyncMap_Clear checks that clearing works as expected and returns a copy of the original map. +func TestSyncMap_Clear(t *testing.T) { + m := NewSyncMap[string, stringID]() + assert.Equal(t, map[string]stringID{}, m.data) + oldMap := m.Clear() + assert.Equal(t, map[string]stringID{}, m.data) + assert.Equal(t, map[string]stringID{}, oldMap) + + m.Put("a", stringID{"b", "b"}) + m.Put("c", stringID{"d", "d"}) + oldMap = m.Clear() + + assert.Equal(t, map[string]stringID{"a": {"b", "b"}, "c": {"d", "d"}}, oldMap) + assert.Equal(t, map[string]stringID{}, m.data) +} + +// TestSyncMap_Values checks that the Values method returns an array of the values. +func TestSyncMap_Values(t *testing.T) { + m := NewSyncMap[string, stringID]() + assert.Equal(t, []stringID{}, m.Values()) + m.Put("a", stringID{"b", "b"}) + assert.Equal(t, []stringID{{"b", "b"}}, m.Values()) + m.Put("c", stringID{"d", "d"}) + values := m.Values() + sort.Slice(values, func(i, j int) bool { + return values[i].val < values[j].val + }) + assert.Equal(t, []stringID{{"b", "b"}, {"d", "d"}}, values) +} diff --git a/helper/testhelpers/azurite/azurite.go b/helper/testhelpers/azurite/azurite.go index b03b5180d022..a9b291744a7d 100644 --- a/helper/testhelpers/azurite/azurite.go +++ b/helper/testhelpers/azurite/azurite.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package azurite @@ -10,7 +10,7 @@ import ( "testing" "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" ) type Config struct { diff --git a/helper/testhelpers/cassandra/cassandrahelper.go b/helper/testhelpers/cassandra/cassandrahelper.go index 71953e48e57d..b774a1690e70 100644 --- a/helper/testhelpers/cassandra/cassandrahelper.go +++ b/helper/testhelpers/cassandra/cassandrahelper.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cassandra @@ -9,11 +9,13 @@ import ( "net" "os" "path/filepath" + "runtime" + "strings" "testing" "time" "github.com/gocql/gocql" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" ) type containerConfig struct { @@ -80,6 +82,12 @@ func (h Host) ConnectionURL() string { func PrepareTestContainer(t *testing.T, opts ...ContainerOpt) (Host, func()) { t.Helper() + + // Skipping on ARM, as this image can't run on ARM architecture + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as this image is not supported on ARM architectures") + } + if os.Getenv("CASSANDRA_HOSTS") != "" { host, port, err := net.SplitHostPort(os.Getenv("CASSANDRA_HOSTS")) if err != nil { diff --git a/helper/testhelpers/certhelpers/cert_helpers.go b/helper/testhelpers/certhelpers/cert_helpers.go index 42692d01f6a6..d9c89735c618 100644 --- a/helper/testhelpers/certhelpers/cert_helpers.go +++ b/helper/testhelpers/certhelpers/cert_helpers.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package certhelpers diff --git a/helper/testhelpers/consul/cluster_storage.go b/helper/testhelpers/consul/cluster_storage.go new file mode 100644 index 000000000000..9ca1080c6f9c --- /dev/null +++ b/helper/testhelpers/consul/cluster_storage.go @@ -0,0 +1,70 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package consul + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/helper/testcluster" +) + +type ClusterStorage struct { + // Set these after calling `NewConsulClusterStorage` but before `Start` (or + // passing in to NewDockerCluster) to control Consul version specifically in + // your test. Leave empty for latest OSS (defined in consulhelper.go). + ConsulVersion string + ConsulEnterprise bool + + cleanup func() + config *Config +} + +var _ testcluster.ClusterStorage = &ClusterStorage{} + +func NewClusterStorage() *ClusterStorage { + return &ClusterStorage{} +} + +func (s *ClusterStorage) Start(ctx context.Context, opts *testcluster.ClusterOptions) error { + prefix := "" + if opts != nil && opts.ClusterName != "" { + prefix = fmt.Sprintf("%s-", opts.ClusterName) + } + cleanup, config, err := RunContainer(ctx, prefix, s.ConsulVersion, s.ConsulEnterprise, true) + if err != nil { + return err + } + s.cleanup = cleanup + s.config = config + + return nil +} + +func (s *ClusterStorage) Cleanup() error { + if s.cleanup != nil { + s.cleanup() + s.cleanup = nil + } + return nil +} + +func (s *ClusterStorage) Opts() map[string]interface{} { + if s.config == nil { + return nil + } + return map[string]interface{}{ + "address": s.config.ContainerHTTPAddr, + "token": s.config.Token, + "max_parallel": "32", + } +} + +func (s *ClusterStorage) Type() string { + return "consul" +} + +func (s *ClusterStorage) Config() *Config { + return s.config +} diff --git a/helper/testhelpers/consul/consulhelper.go b/helper/testhelpers/consul/consulhelper.go index 42f41e860313..d6ab5d72b5c1 100644 --- a/helper/testhelpers/consul/consulhelper.go +++ b/helper/testhelpers/consul/consulhelper.go @@ -1,22 +1,30 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package consul import ( "context" + "fmt" "os" "strings" "testing" consulapi "github.com/hashicorp/consul/api" goversion "github.com/hashicorp/go-version" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" ) +// LatestConsulVersion is the most recent version of Consul which is used unless +// another version is specified in the test config or environment. This will +// probably go stale as we don't always update it on every release but we rarely +// rely on specific new Consul functionality so that's probably not a problem. +const LatestConsulVersion = "1.15.3" + type Config struct { docker.ServiceHostPort - Token string + Token string + ContainerHTTPAddr string } func (c *Config) APIConfig() *consulapi.Config { @@ -26,19 +34,39 @@ func (c *Config) APIConfig() *consulapi.Config { return apiConfig } -// PrepareTestContainer creates a Consul docker container. If version is empty, -// the Consul version used will be given by the environment variable -// CONSUL_DOCKER_VERSION, or if that's empty, whatever we've hardcoded as the -// the latest Consul version. +// PrepareTestContainer is a test helper that creates a Consul docker container +// or fails the test if unsuccessful. See RunContainer for more details on the +// configuration. func PrepareTestContainer(t *testing.T, version string, isEnterprise bool, doBootstrapSetup bool) (func(), *Config) { t.Helper() + cleanup, config, err := RunContainer(context.Background(), "", version, isEnterprise, doBootstrapSetup) + if err != nil { + t.Fatalf("failed starting consul: %s", err) + } + return cleanup, config +} + +// RunContainer runs Consul in a Docker container unless CONSUL_HTTP_ADDR is +// already found in the environment. Consul version is determined by the version +// argument. If version is empty string, the CONSUL_DOCKER_VERSION environment +// variable is used and if that is empty too, LatestConsulVersion is used +// (defined above). If namePrefix is provided we assume you have chosen a unique +// enough prefix to avoid collision with other tests that may be running in +// parallel and so _do not_ add an additional unique ID suffix. We will also +// ensure previous instances are deleted and leave the container running for +// debugging. This is useful for using Consul as part of at testcluster (i.e. +// when Vault is in Docker too). If namePrefix is empty then a unique suffix is +// added since many older tests rely on a uniq instance of the container. This +// is used by `PrepareTestContainer` which is used typically in tests that rely +// on Consul but run tested code within the test process. +func RunContainer(ctx context.Context, namePrefix, version string, isEnterprise bool, doBootstrapSetup bool) (func(), *Config, error) { if retAddress := os.Getenv("CONSUL_HTTP_ADDR"); retAddress != "" { shp, err := docker.NewServiceHostPortParse(retAddress) if err != nil { - t.Fatal(err) + return nil, nil, err } - return func() {}, &Config{ServiceHostPort: *shp, Token: os.Getenv("CONSUL_HTTP_TOKEN")} + return func() {}, &Config{ServiceHostPort: *shp, Token: os.Getenv("CONSUL_HTTP_TOKEN")}, nil } config := `acl { enabled = true default_policy = "deny" }` @@ -47,7 +75,7 @@ func PrepareTestContainer(t *testing.T, version string, isEnterprise bool, doBoo if consulVersion != "" { version = consulVersion } else { - version = "1.11.3" // Latest Consul version, update as new releases come out + version = LatestConsulVersion } } if strings.HasPrefix(version, "1.3") { @@ -55,7 +83,7 @@ func PrepareTestContainer(t *testing.T, version string, isEnterprise bool, doBoo } name := "consul" - repo := "consul" + repo := "docker.mirror.hashicorp.services/library/consul" var envVars []string // If running the enterprise container, set the appropriate values below. if isEnterprise { @@ -66,15 +94,18 @@ func PrepareTestContainer(t *testing.T, version string, isEnterprise bool, doBoo envVars = append(envVars, "CONSUL_LICENSE="+license) if !hasLicense { - t.Fatalf("Failed to find enterprise license") + return nil, nil, fmt.Errorf("Failed to find enterprise license") } } + if namePrefix != "" { + name = namePrefix + name + } if dockerRepo, hasEnvRepo := os.LookupEnv("CONSUL_DOCKER_REPO"); hasEnvRepo { repo = dockerRepo } - runner, err := docker.NewServiceRunner(docker.RunOptions{ + dockerOpts := docker.RunOptions{ ContainerName: name, ImageRepo: repo, ImageTag: version, @@ -83,12 +114,25 @@ func PrepareTestContainer(t *testing.T, version string, isEnterprise bool, doBoo Ports: []string{"8500/tcp"}, AuthUsername: os.Getenv("CONSUL_DOCKER_USERNAME"), AuthPassword: os.Getenv("CONSUL_DOCKER_PASSWORD"), - }) + } + + // Add a unique suffix if there is no per-test prefix provided + addSuffix := true + if namePrefix != "" { + // Don't add a suffix if the caller already provided a prefix + addSuffix = false + // Also enable predelete and non-removal to make debugging easier for test + // cases with named containers). + dockerOpts.PreDelete = true + dockerOpts.DoNotAutoRemove = true + } + + runner, err := docker.NewServiceRunner(dockerOpts) if err != nil { - t.Fatalf("Could not start docker Consul: %s", err) + return nil, nil, fmt.Errorf("Could not start docker Consul: %s", err) } - svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + svc, _, err := runner.StartNewService(ctx, addSuffix, false, func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { shp := docker.NewServiceHostPort(host, port) apiConfig := consulapi.DefaultNonPooledConfig() apiConfig.Address = shp.Address() @@ -165,7 +209,7 @@ func PrepareTestContainer(t *testing.T, version string, isEnterprise bool, doBoo } } - // Configure a namespace and parition if testing enterprise Consul + // Configure a namespace and partition if testing enterprise Consul if isEnterprise { // Namespaces require Consul 1.7 or newer namespaceVersion, _ := goversion.NewVersion("1.7") @@ -229,8 +273,20 @@ func PrepareTestContainer(t *testing.T, version string, isEnterprise bool, doBoo }, nil }) if err != nil { - t.Fatalf("Could not start docker Consul: %s", err) + return nil, nil, err } - return svc.Cleanup, svc.Config.(*Config) + // Find the container network info. + if len(svc.Container.NetworkSettings.Networks) < 1 { + svc.Cleanup() + return nil, nil, fmt.Errorf("failed to find any network settings for container") + } + cfg := svc.Config.(*Config) + for _, eps := range svc.Container.NetworkSettings.Networks { + // Just pick the first network, we assume only one for now. + // Pull out the real container IP and set that up + cfg.ContainerHTTPAddr = fmt.Sprintf("http://%s:8500", eps.IPAddress) + break + } + return svc.Cleanup, cfg, nil } diff --git a/helper/testhelpers/corehelpers/corehelpers.go b/helper/testhelpers/corehelpers/corehelpers.go index 846db21da17a..1c7da787d7b6 100644 --- a/helper/testhelpers/corehelpers/corehelpers.go +++ b/helper/testhelpers/corehelpers/corehelpers.go @@ -1,34 +1,32 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 // Package corehelpers contains testhelpers that don't depend on package vault, // and thus can be used within vault (as well as elsewhere.) package corehelpers import ( - "bytes" "context" - "crypto/sha256" - "io/ioutil" + "io" "os" "path/filepath" - "sync" + "strings" + "testing" "time" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/builtin/credential/approle" "github.com/hashicorp/vault/plugins/database/mysql" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/salt" "github.com/hashicorp/vault/sdk/logical" - "github.com/mitchellh/go-testing-interface" ) +var externalPlugins = []string{"transform", "kmip", "keymgmt"} + // RetryUntil runs f until it returns a nil result or the timeout is reached. // If a nil result hasn't been obtained by timeout, calls t.Fatal. -func RetryUntil(t testing.T, timeout time.Duration, f func() error) { +func RetryUntil(t testing.TB, timeout time.Duration, f func() error) { t.Helper() deadline := time.Now().Add(timeout) var err error @@ -43,36 +41,27 @@ func RetryUntil(t testing.T, timeout time.Duration, f func() error) { // MakeTestPluginDir creates a temporary directory suitable for holding plugins. // This helper also resolves symlinks to make tests happy on OS X. -func MakeTestPluginDir(t testing.T) (string, func(t testing.T)) { - if t != nil { - t.Helper() - } +func MakeTestPluginDir(t testing.TB) string { + t.Helper() dir, err := os.MkdirTemp("", "") if err != nil { - if t == nil { - panic(err) - } t.Fatal(err) } // OSX tempdir are /var, but actually symlinked to /private/var dir, err = filepath.EvalSymlinks(dir) if err != nil { - if t == nil { - panic(err) - } t.Fatal(err) } - return dir, func(t testing.T) { + t.Cleanup(func() { if err := os.RemoveAll(dir); err != nil { - if t == nil { - panic(err) - } t.Fatal(err) } - } + }) + + return dir } func NewMockBuiltinRegistry() *mockBuiltinRegistry { @@ -180,10 +169,23 @@ func (m *mockBuiltinRegistry) Keys(pluginType consts.PluginType) []string { "pending-removal-test-plugin", "approle", } + + case consts.PluginTypeSecrets: + return append(externalPlugins, "kv") } + return []string{} } +func (r *mockBuiltinRegistry) IsBuiltinEntPlugin(name string, pluginType consts.PluginType) bool { + for _, i := range externalPlugins { + if i == name { + return true + } + } + return false +} + func (m *mockBuiltinRegistry) Contains(name string, pluginType consts.PluginType) bool { for _, key := range m.Keys(pluginType) { if key == name { @@ -201,190 +203,28 @@ func (m *mockBuiltinRegistry) DeprecationStatus(name string, pluginType consts.P return consts.Unknown, false } -func TestNoopAudit(t testing.T, config map[string]string) *NoopAudit { - n, err := NewNoopAudit(config) - if err != nil { - t.Fatal(err) - } - return n -} - -func NewNoopAudit(config map[string]string) (*NoopAudit, error) { - view := &logical.InmemStorage{} - err := view.Put(context.Background(), &logical.StorageEntry{ - Key: "salt", - Value: []byte("foo"), - }) - if err != nil { - return nil, err - } - - n := &NoopAudit{ - Config: &audit.BackendConfig{ - SaltView: view, - SaltConfig: &salt.Config{ - HMAC: sha256.New, - HMACType: "hmac-sha256", - }, - Config: config, - }, - } - n.formatter.AuditFormatWriter = &audit.JSONFormatWriter{ - SaltFunc: n.Salt, - } - return n, nil -} - -func NoopAuditFactory(records **[][]byte) audit.Factory { - return func(_ context.Context, config *audit.BackendConfig) (audit.Backend, error) { - n, err := NewNoopAudit(config.Config) - if err != nil { - return nil, err - } - if records != nil { - *records = &n.records - } - return n, nil - } -} - -type NoopAudit struct { - Config *audit.BackendConfig - ReqErr error - ReqAuth []*logical.Auth - Req []*logical.Request - ReqHeaders []map[string][]string - ReqNonHMACKeys []string - ReqErrs []error - - RespErr error - RespAuth []*logical.Auth - RespReq []*logical.Request - Resp []*logical.Response - RespNonHMACKeys [][]string - RespReqNonHMACKeys [][]string - RespErrs []error - - formatter audit.AuditFormatter - records [][]byte - l sync.RWMutex - salt *salt.Salt - saltMutex sync.RWMutex -} - -func (n *NoopAudit) LogRequest(ctx context.Context, in *logical.LogInput) error { - n.l.Lock() - defer n.l.Unlock() - if n.formatter.AuditFormatWriter != nil { - var w bytes.Buffer - err := n.formatter.FormatRequest(ctx, &w, audit.FormatterConfig{}, in) - if err != nil { - return err - } - n.records = append(n.records, w.Bytes()) - } - - n.ReqAuth = append(n.ReqAuth, in.Auth) - n.Req = append(n.Req, in.Request) - n.ReqHeaders = append(n.ReqHeaders, in.Request.Headers) - n.ReqNonHMACKeys = in.NonHMACReqDataKeys - n.ReqErrs = append(n.ReqErrs, in.OuterErr) - - return n.ReqErr -} - -func (n *NoopAudit) LogResponse(ctx context.Context, in *logical.LogInput) error { - n.l.Lock() - defer n.l.Unlock() - - if n.formatter.AuditFormatWriter != nil { - var w bytes.Buffer - err := n.formatter.FormatResponse(ctx, &w, audit.FormatterConfig{}, in) - if err != nil { - return err - } - n.records = append(n.records, w.Bytes()) - } - - n.RespAuth = append(n.RespAuth, in.Auth) - n.RespReq = append(n.RespReq, in.Request) - n.Resp = append(n.Resp, in.Response) - n.RespErrs = append(n.RespErrs, in.OuterErr) - - if in.Response != nil { - n.RespNonHMACKeys = append(n.RespNonHMACKeys, in.NonHMACRespDataKeys) - n.RespReqNonHMACKeys = append(n.RespReqNonHMACKeys, in.NonHMACReqDataKeys) - } - - return n.RespErr -} - -func (n *NoopAudit) LogTestMessage(ctx context.Context, in *logical.LogInput, config map[string]string) error { - n.l.Lock() - defer n.l.Unlock() - var w bytes.Buffer - tempFormatter := audit.NewTemporaryFormatter(config["format"], config["prefix"]) - err := tempFormatter.FormatResponse(ctx, &w, audit.FormatterConfig{}, in) - if err != nil { - return err - } - n.records = append(n.records, w.Bytes()) - return nil -} - -func (n *NoopAudit) Salt(ctx context.Context) (*salt.Salt, error) { - n.saltMutex.RLock() - if n.salt != nil { - defer n.saltMutex.RUnlock() - return n.salt, nil - } - n.saltMutex.RUnlock() - n.saltMutex.Lock() - defer n.saltMutex.Unlock() - if n.salt != nil { - return n.salt, nil - } - salt, err := salt.NewSalt(ctx, n.Config.SaltView, n.Config.SaltConfig) - if err != nil { - return nil, err - } - n.salt = salt - return salt, nil -} - -func (n *NoopAudit) GetHash(ctx context.Context, data string) (string, error) { - salt, err := n.Salt(ctx) - if err != nil { - return "", err - } - return salt.GetIdentifiedHMAC(data), nil -} - -func (n *NoopAudit) Reload(ctx context.Context) error { - return nil -} - -func (n *NoopAudit) Invalidate(ctx context.Context) { - n.saltMutex.Lock() - defer n.saltMutex.Unlock() - n.salt = nil -} - type TestLogger struct { - hclog.Logger + hclog.InterceptLogger Path string File *os.File sink hclog.SinkAdapter } -func NewTestLogger(t testing.T) *TestLogger { +func NewTestLogger(t testing.TB) *TestLogger { + return NewTestLoggerWithSuffix(t, "") +} + +func NewTestLoggerWithSuffix(t testing.TB, logFileSuffix string) *TestLogger { var logFile *os.File var logPath string output := os.Stderr logDir := os.Getenv("VAULT_TEST_LOG_DIR") if logDir != "" { - logPath = filepath.Join(logDir, t.Name()+".log") + if logFileSuffix != "" && !strings.HasPrefix(logFileSuffix, "_") { + logFileSuffix = "_" + logFileSuffix + } + logPath = filepath.Join(logDir, t.Name()+logFileSuffix+".log") // t.Name may include slashes. dir, _ := filepath.Split(logPath) err := os.MkdirAll(dir, 0o755) @@ -401,8 +241,9 @@ func NewTestLogger(t testing.T) *TestLogger { // We send nothing on the regular logger, that way we can later deregister // the sink to stop logging during cluster cleanup. logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{ - Output: ioutil.Discard, + Output: io.Discard, IndependentLevels: true, + Name: t.Name(), }) sink := hclog.NewSinkAdapter(&hclog.LoggerOptions{ Output: output, @@ -410,14 +251,25 @@ func NewTestLogger(t testing.T) *TestLogger { IndependentLevels: true, }) logger.RegisterSink(sink) - return &TestLogger{ - Path: logPath, - File: logFile, - Logger: logger, - sink: sink, + + testLogger := &TestLogger{ + Path: logPath, + File: logFile, + InterceptLogger: logger, + sink: sink, } + + t.Cleanup(func() { + testLogger.StopLogging() + if t.Failed() { + _ = testLogger.File.Close() + } else { + _ = os.Remove(testLogger.Path) + } + }) + return testLogger } func (tl *TestLogger) StopLogging() { - tl.Logger.(hclog.InterceptLogger).DeregisterSink(tl.sink) + tl.InterceptLogger.DeregisterSink(tl.sink) } diff --git a/helper/testhelpers/docker/testhelpers.go b/helper/testhelpers/docker/testhelpers.go deleted file mode 100644 index 37e14980d94c..000000000000 --- a/helper/testhelpers/docker/testhelpers.go +++ /dev/null @@ -1,718 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package docker - -import ( - "archive/tar" - "bytes" - "context" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/url" - "os" - "strconv" - "strings" - "time" - - "github.com/cenkalti/backoff/v3" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/docker/client" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/go-connections/nat" - "github.com/hashicorp/go-uuid" -) - -type Runner struct { - DockerAPI *client.Client - RunOptions RunOptions -} - -type RunOptions struct { - ImageRepo string - ImageTag string - ContainerName string - Cmd []string - Entrypoint []string - Env []string - NetworkID string - CopyFromTo map[string]string - Ports []string - DoNotAutoRemove bool - AuthUsername string - AuthPassword string - LogConsumer func(string) -} - -func NewServiceRunner(opts RunOptions) (*Runner, error) { - dapi, err := client.NewClientWithOpts(client.FromEnv, client.WithVersion("1.39")) - if err != nil { - return nil, err - } - - if opts.NetworkID == "" { - opts.NetworkID = os.Getenv("TEST_DOCKER_NETWORK_ID") - } - if opts.ContainerName == "" { - if strings.Contains(opts.ImageRepo, "/") { - return nil, fmt.Errorf("ContainerName is required for non-library images") - } - // If there's no slash in the repo it's almost certainly going to be - // a good container name. - opts.ContainerName = opts.ImageRepo - } - return &Runner{ - DockerAPI: dapi, - RunOptions: opts, - }, nil -} - -type ServiceConfig interface { - Address() string - URL() *url.URL -} - -func NewServiceHostPort(host string, port int) *ServiceHostPort { - return &ServiceHostPort{address: fmt.Sprintf("%s:%d", host, port)} -} - -func NewServiceHostPortParse(s string) (*ServiceHostPort, error) { - pieces := strings.Split(s, ":") - if len(pieces) != 2 { - return nil, fmt.Errorf("address must be of the form host:port, got: %v", s) - } - - port, err := strconv.Atoi(pieces[1]) - if err != nil || port < 1 { - return nil, fmt.Errorf("address must be of the form host:port, got: %v", s) - } - - return &ServiceHostPort{s}, nil -} - -type ServiceHostPort struct { - address string -} - -func (s ServiceHostPort) Address() string { - return s.address -} - -func (s ServiceHostPort) URL() *url.URL { - return &url.URL{Host: s.address} -} - -func NewServiceURLParse(s string) (*ServiceURL, error) { - u, err := url.Parse(s) - if err != nil { - return nil, err - } - return &ServiceURL{u: *u}, nil -} - -func NewServiceURL(u url.URL) *ServiceURL { - return &ServiceURL{u: u} -} - -type ServiceURL struct { - u url.URL -} - -func (s ServiceURL) Address() string { - return s.u.Host -} - -func (s ServiceURL) URL() *url.URL { - return &s.u -} - -// ServiceAdapter verifies connectivity to the service, then returns either the -// connection string (typically a URL) and nil, or empty string and an error. -type ServiceAdapter func(ctx context.Context, host string, port int) (ServiceConfig, error) - -// StartService will start the runner's configured docker container with a -// random UUID suffix appended to the name to make it unique and will return -// either a hostname or local address depending on if a Docker network was given. -// -// Most tests can default to using this. -func (d *Runner) StartService(ctx context.Context, connect ServiceAdapter) (*Service, error) { - serv, _, err := d.StartNewService(ctx, true, false, connect) - - return serv, err -} - -// StartNewService will start the runner's configured docker container but with the -// ability to control adding a name suffix or forcing a local address to be returned. -// 'addSuffix' will add a random UUID to the end of the container name. -// 'forceLocalAddr' will force the container address returned to be in the -// form of '127.0.0.1:1234' where 1234 is the mapped container port. -func (d *Runner) StartNewService(ctx context.Context, addSuffix, forceLocalAddr bool, connect ServiceAdapter) (*Service, string, error) { - container, hostIPs, containerID, err := d.Start(context.Background(), addSuffix, forceLocalAddr) - if err != nil { - return nil, "", err - } - - cleanup := func() { - if d.RunOptions.LogConsumer != nil { - rc, err := d.DockerAPI.ContainerLogs(ctx, container.ID, types.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - Timestamps: true, - Details: true, - }) - if err == nil { - b, err := ioutil.ReadAll(rc) - if err != nil { - d.RunOptions.LogConsumer(fmt.Sprintf("error reading container logs, err=%v, read: %s", err, string(b))) - } else { - d.RunOptions.LogConsumer(string(b)) - } - } - } - - for i := 0; i < 10; i++ { - err := d.DockerAPI.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{Force: true}) - if err == nil { - return - } - time.Sleep(1 * time.Second) - } - } - - bo := backoff.NewExponentialBackOff() - bo.MaxInterval = time.Second * 5 - bo.MaxElapsedTime = 2 * time.Minute - - pieces := strings.Split(hostIPs[0], ":") - portInt, err := strconv.Atoi(pieces[1]) - if err != nil { - return nil, "", err - } - - var config ServiceConfig - err = backoff.Retry(func() error { - c, err := connect(ctx, pieces[0], portInt) - if err != nil { - return err - } - if c == nil { - return fmt.Errorf("service adapter returned nil error and config") - } - config = c - return nil - }, bo) - - if err != nil { - if !d.RunOptions.DoNotAutoRemove { - cleanup() - } - return nil, "", err - } - - return &Service{ - Config: config, - Cleanup: cleanup, - Container: container, - }, containerID, nil -} - -type Service struct { - Config ServiceConfig - Cleanup func() - Container *types.ContainerJSON -} - -func (d *Runner) Start(ctx context.Context, addSuffix, forceLocalAddr bool) (*types.ContainerJSON, []string, string, error) { - name := d.RunOptions.ContainerName - if addSuffix { - suffix, err := uuid.GenerateUUID() - if err != nil { - return nil, nil, "", err - } - name += "-" + suffix - } - - cfg := &container.Config{ - Hostname: name, - Image: fmt.Sprintf("%s:%s", d.RunOptions.ImageRepo, d.RunOptions.ImageTag), - Env: d.RunOptions.Env, - Cmd: d.RunOptions.Cmd, - } - if len(d.RunOptions.Ports) > 0 { - cfg.ExposedPorts = make(map[nat.Port]struct{}) - for _, p := range d.RunOptions.Ports { - cfg.ExposedPorts[nat.Port(p)] = struct{}{} - } - } - if len(d.RunOptions.Entrypoint) > 0 { - cfg.Entrypoint = strslice.StrSlice(d.RunOptions.Entrypoint) - } - - hostConfig := &container.HostConfig{ - AutoRemove: !d.RunOptions.DoNotAutoRemove, - PublishAllPorts: true, - } - - netConfig := &network.NetworkingConfig{} - if d.RunOptions.NetworkID != "" { - netConfig.EndpointsConfig = map[string]*network.EndpointSettings{ - d.RunOptions.NetworkID: {}, - } - } - - // best-effort pull - var opts types.ImageCreateOptions - if d.RunOptions.AuthUsername != "" && d.RunOptions.AuthPassword != "" { - var buf bytes.Buffer - auth := map[string]string{ - "username": d.RunOptions.AuthUsername, - "password": d.RunOptions.AuthPassword, - } - if err := json.NewEncoder(&buf).Encode(auth); err != nil { - return nil, nil, "", err - } - opts.RegistryAuth = base64.URLEncoding.EncodeToString(buf.Bytes()) - } - resp, _ := d.DockerAPI.ImageCreate(ctx, cfg.Image, opts) - if resp != nil { - _, _ = ioutil.ReadAll(resp) - } - - c, err := d.DockerAPI.ContainerCreate(ctx, cfg, hostConfig, netConfig, nil, cfg.Hostname) - if err != nil { - return nil, nil, "", fmt.Errorf("container create failed: %v", err) - } - - for from, to := range d.RunOptions.CopyFromTo { - if err := copyToContainer(ctx, d.DockerAPI, c.ID, from, to); err != nil { - _ = d.DockerAPI.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{}) - return nil, nil, "", err - } - } - - err = d.DockerAPI.ContainerStart(ctx, c.ID, types.ContainerStartOptions{}) - if err != nil { - _ = d.DockerAPI.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{}) - return nil, nil, "", fmt.Errorf("container start failed: %v", err) - } - - inspect, err := d.DockerAPI.ContainerInspect(ctx, c.ID) - if err != nil { - _ = d.DockerAPI.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{}) - return nil, nil, "", err - } - - var addrs []string - for _, port := range d.RunOptions.Ports { - pieces := strings.Split(port, "/") - if len(pieces) < 2 { - return nil, nil, "", fmt.Errorf("expected port of the form 1234/tcp, got: %s", port) - } - if d.RunOptions.NetworkID != "" && !forceLocalAddr { - addrs = append(addrs, fmt.Sprintf("%s:%s", cfg.Hostname, pieces[0])) - } else { - mapped, ok := inspect.NetworkSettings.Ports[nat.Port(port)] - if !ok || len(mapped) == 0 { - return nil, nil, "", fmt.Errorf("no port mapping found for %s", port) - } - addrs = append(addrs, fmt.Sprintf("127.0.0.1:%s", mapped[0].HostPort)) - } - } - - return &inspect, addrs, c.ID, nil -} - -func (d *Runner) Stop(ctx context.Context, containerID string) error { - if d.RunOptions.NetworkID != "" { - if err := d.DockerAPI.NetworkDisconnect(ctx, d.RunOptions.NetworkID, containerID, true); err != nil { - return fmt.Errorf("error disconnecting network (%v): %v", d.RunOptions.NetworkID, err) - } - } - - timeout := 5 * time.Second - if err := d.DockerAPI.ContainerStop(ctx, containerID, &timeout); err != nil { - return fmt.Errorf("error stopping container: %v", err) - } - - return nil -} - -func (d *Runner) Restart(ctx context.Context, containerID string) error { - if err := d.DockerAPI.ContainerStart(ctx, containerID, types.ContainerStartOptions{}); err != nil { - return err - } - - ends := &network.EndpointSettings{ - NetworkID: d.RunOptions.NetworkID, - } - - return d.DockerAPI.NetworkConnect(ctx, d.RunOptions.NetworkID, containerID, ends) -} - -func copyToContainer(ctx context.Context, dapi *client.Client, containerID, from, to string) error { - srcInfo, err := archive.CopyInfoSourcePath(from, false) - if err != nil { - return fmt.Errorf("error copying from source %q: %v", from, err) - } - - srcArchive, err := archive.TarResource(srcInfo) - if err != nil { - return fmt.Errorf("error creating tar from source %q: %v", from, err) - } - defer srcArchive.Close() - - dstInfo := archive.CopyInfo{Path: to} - - dstDir, content, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) - if err != nil { - return fmt.Errorf("error preparing copy from %q -> %q: %v", from, to, err) - } - defer content.Close() - err = dapi.CopyToContainer(ctx, containerID, dstDir, content, types.CopyToContainerOptions{}) - if err != nil { - return fmt.Errorf("error copying from %q -> %q: %v", from, to, err) - } - - return nil -} - -type RunCmdOpt interface { - Apply(cfg *types.ExecConfig) error -} - -type RunCmdUser string - -var _ RunCmdOpt = (*RunCmdUser)(nil) - -func (u RunCmdUser) Apply(cfg *types.ExecConfig) error { - cfg.User = string(u) - return nil -} - -func (d *Runner) RunCmdWithOutput(ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) ([]byte, []byte, int, error) { - runCfg := types.ExecConfig{ - AttachStdout: true, - AttachStderr: true, - Cmd: cmd, - } - - for index, opt := range opts { - if err := opt.Apply(&runCfg); err != nil { - return nil, nil, -1, fmt.Errorf("error applying option (%d / %v): %w", index, opt, err) - } - } - - ret, err := d.DockerAPI.ContainerExecCreate(ctx, container, runCfg) - if err != nil { - return nil, nil, -1, fmt.Errorf("error creating execution environment: %v\ncfg: %v\n", err, runCfg) - } - - resp, err := d.DockerAPI.ContainerExecAttach(ctx, ret.ID, types.ExecStartCheck{}) - if err != nil { - return nil, nil, -1, fmt.Errorf("error attaching to command execution: %v\ncfg: %v\nret: %v\n", err, runCfg, ret) - } - defer resp.Close() - - var stdoutB bytes.Buffer - var stderrB bytes.Buffer - if _, err := stdcopy.StdCopy(&stdoutB, &stderrB, resp.Reader); err != nil { - return nil, nil, -1, fmt.Errorf("error reading command output: %v", err) - } - - stdout := stdoutB.Bytes() - stderr := stderrB.Bytes() - - // Fetch return code. - info, err := d.DockerAPI.ContainerExecInspect(ctx, ret.ID) - if err != nil { - return stdout, stderr, -1, fmt.Errorf("error reading command exit code: %v", err) - } - - return stdout, stderr, info.ExitCode, nil -} - -func (d *Runner) RunCmdInBackground(ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) (string, error) { - runCfg := types.ExecConfig{ - AttachStdout: true, - AttachStderr: true, - Cmd: cmd, - } - - for index, opt := range opts { - if err := opt.Apply(&runCfg); err != nil { - return "", fmt.Errorf("error applying option (%d / %v): %w", index, opt, err) - } - } - - ret, err := d.DockerAPI.ContainerExecCreate(ctx, container, runCfg) - if err != nil { - return "", fmt.Errorf("error creating execution environment: %w\ncfg: %v\n", err, runCfg) - } - - err = d.DockerAPI.ContainerExecStart(ctx, ret.ID, types.ExecStartCheck{}) - if err != nil { - return "", fmt.Errorf("error starting command execution: %w\ncfg: %v\nret: %v\n", err, runCfg, ret) - } - - return ret.ID, nil -} - -// Mapping of path->contents -type PathContents interface { - UpdateHeader(header *tar.Header) error - Get() ([]byte, error) -} - -type FileContents struct { - Data []byte - Mode int64 - UID int - GID int -} - -func (b FileContents) UpdateHeader(header *tar.Header) error { - header.Mode = b.Mode - header.Uid = b.UID - header.Gid = b.GID - return nil -} - -func (b FileContents) Get() ([]byte, error) { - return b.Data, nil -} - -func PathContentsFromBytes(data []byte) PathContents { - return FileContents{ - Data: data, - Mode: 0o644, - } -} - -func PathContentsFromString(data string) PathContents { - return PathContentsFromBytes([]byte(data)) -} - -type BuildContext map[string]PathContents - -func NewBuildContext() BuildContext { - return BuildContext{} -} - -func BuildContextFromTarball(reader io.Reader) (BuildContext, error) { - archive := tar.NewReader(reader) - bCtx := NewBuildContext() - - for true { - header, err := archive.Next() - if err != nil { - if err == io.EOF { - break - } - - return nil, fmt.Errorf("failed to parse provided tarball: %v", err) - } - - data := make([]byte, int(header.Size)) - read, err := archive.Read(data) - if err != nil { - return nil, fmt.Errorf("failed to parse read from provided tarball: %v", err) - } - - if read != int(header.Size) { - return nil, fmt.Errorf("unexpectedly short read on tarball: %v of %v", read, header.Size) - } - - bCtx[header.Name] = FileContents{ - Data: data, - Mode: header.Mode, - UID: header.Uid, - GID: header.Gid, - } - } - - return bCtx, nil -} - -func (bCtx *BuildContext) ToTarball() (io.Reader, error) { - var err error - buffer := new(bytes.Buffer) - tarBuilder := tar.NewWriter(buffer) - defer tarBuilder.Close() - - for filepath, contents := range *bCtx { - fileHeader := &tar.Header{Name: filepath} - if contents == nil && !strings.HasSuffix(filepath, "/") { - return nil, fmt.Errorf("expected file path (%v) to have trailing / due to nil contents, indicating directory", filepath) - } - - if err := contents.UpdateHeader(fileHeader); err != nil { - return nil, fmt.Errorf("failed to update tar header entry for %v: %w", filepath, err) - } - - var rawContents []byte - if contents != nil { - rawContents, err = contents.Get() - if err != nil { - return nil, fmt.Errorf("failed to get file contents for %v: %w", filepath, err) - } - - fileHeader.Size = int64(len(rawContents)) - } - - if err := tarBuilder.WriteHeader(fileHeader); err != nil { - return nil, fmt.Errorf("failed to write tar header entry for %v: %w", filepath, err) - } - - if contents != nil { - if _, err := tarBuilder.Write(rawContents); err != nil { - return nil, fmt.Errorf("failed to write tar file entry for %v: %w", filepath, err) - } - } - } - - return bytes.NewReader(buffer.Bytes()), nil -} - -type BuildOpt interface { - Apply(cfg *types.ImageBuildOptions) error -} - -type BuildRemove bool - -var _ BuildOpt = (*BuildRemove)(nil) - -func (u BuildRemove) Apply(cfg *types.ImageBuildOptions) error { - cfg.Remove = bool(u) - return nil -} - -type BuildForceRemove bool - -var _ BuildOpt = (*BuildForceRemove)(nil) - -func (u BuildForceRemove) Apply(cfg *types.ImageBuildOptions) error { - cfg.ForceRemove = bool(u) - return nil -} - -type BuildPullParent bool - -var _ BuildOpt = (*BuildPullParent)(nil) - -func (u BuildPullParent) Apply(cfg *types.ImageBuildOptions) error { - cfg.PullParent = bool(u) - return nil -} - -type BuildArgs map[string]*string - -var _ BuildOpt = (*BuildArgs)(nil) - -func (u BuildArgs) Apply(cfg *types.ImageBuildOptions) error { - cfg.BuildArgs = u - return nil -} - -type BuildTags []string - -var _ BuildOpt = (*BuildTags)(nil) - -func (u BuildTags) Apply(cfg *types.ImageBuildOptions) error { - cfg.Tags = u - return nil -} - -const containerfilePath = "_containerfile" - -func (d *Runner) BuildImage(ctx context.Context, containerfile string, containerContext BuildContext, opts ...BuildOpt) ([]byte, error) { - var cfg types.ImageBuildOptions - - // Build container context tarball, provisioning containerfile in. - containerContext[containerfilePath] = PathContentsFromBytes([]byte(containerfile)) - tar, err := containerContext.ToTarball() - if err != nil { - return nil, fmt.Errorf("failed to create build image context tarball: %w", err) - } - cfg.Dockerfile = "/" + containerfilePath - - // Apply all given options - for index, opt := range opts { - if err := opt.Apply(&cfg); err != nil { - return nil, fmt.Errorf("failed to apply option (%d / %v): %w", index, opt, err) - } - } - - resp, err := d.DockerAPI.ImageBuild(ctx, tar, cfg) - if err != nil { - return nil, fmt.Errorf("failed to build image: %v", err) - } - - output, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("failed to read image build output: %w", err) - } - - return output, nil -} - -func (d *Runner) CopyTo(container string, destination string, contents BuildContext) error { - // XXX: currently we use the default options but we might want to allow - // modifying cfg.CopyUIDGID in the future. - var cfg types.CopyToContainerOptions - - // Convert our provided contents to a tarball to ship up. - tar, err := contents.ToTarball() - if err != nil { - return fmt.Errorf("failed to build contents into tarball: %v", err) - } - - return d.DockerAPI.CopyToContainer(context.Background(), container, destination, tar, cfg) -} - -func (d *Runner) CopyFrom(container string, source string) (BuildContext, *types.ContainerPathStat, error) { - reader, stat, err := d.DockerAPI.CopyFromContainer(context.Background(), container, source) - if err != nil { - return nil, nil, fmt.Errorf("failed to read %v from container: %v", source, err) - } - - result, err := BuildContextFromTarball(reader) - if err != nil { - return nil, nil, fmt.Errorf("failed to build archive from result: %v", err) - } - - return result, &stat, nil -} - -func (d *Runner) GetNetworkAndAddresses(container string) (map[string]string, error) { - response, err := d.DockerAPI.ContainerInspect(context.Background(), container) - if err != nil { - return nil, fmt.Errorf("failed to fetch container inspection data: %v", err) - } - - if response.NetworkSettings == nil || len(response.NetworkSettings.Networks) == 0 { - return nil, fmt.Errorf("container (%v) had no associated network settings: %v", container, response) - } - - ret := make(map[string]string) - ns := response.NetworkSettings.Networks - for network, data := range ns { - if data == nil { - continue - } - - ret[network] = data.IPAddress - } - - if len(ret) == 0 { - return nil, fmt.Errorf("no valid network data for container (%v): %v", container, response) - } - - return ret, nil -} diff --git a/helper/testhelpers/etcd/etcdhelper.go b/helper/testhelpers/etcd/etcdhelper.go index cb23ad9fb54b..dc8f796e1564 100644 --- a/helper/testhelpers/etcd/etcdhelper.go +++ b/helper/testhelpers/etcd/etcdhelper.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package etcd @@ -11,7 +11,7 @@ import ( "testing" "time" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" clientv3 "go.etcd.io/etcd/client/v3" ) diff --git a/helper/testhelpers/fakegcsserver/fake-gcs-server.go b/helper/testhelpers/fakegcsserver/fake-gcs-server.go index 26ba503e9b6d..e3f6c7021e96 100644 --- a/helper/testhelpers/fakegcsserver/fake-gcs-server.go +++ b/helper/testhelpers/fakegcsserver/fake-gcs-server.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package fakegcsserver @@ -12,7 +12,7 @@ import ( "testing" "cloud.google.com/go/storage" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" "google.golang.org/api/iterator" "google.golang.org/api/option" ) diff --git a/helper/testhelpers/generaterootkind_enumer.go b/helper/testhelpers/generaterootkind_enumer.go new file mode 100644 index 000000000000..496b4eb98e76 --- /dev/null +++ b/helper/testhelpers/generaterootkind_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer -type=GenerateRootKind -trimprefix=GenerateRoot"; DO NOT EDIT. + +package testhelpers + +import ( + "fmt" +) + +const _GenerateRootKindName = "RegularDRGenerateRecovery" + +var _GenerateRootKindIndex = [...]uint8{0, 7, 9, 25} + +func (i GenerateRootKind) String() string { + if i < 0 || i >= GenerateRootKind(len(_GenerateRootKindIndex)-1) { + return fmt.Sprintf("GenerateRootKind(%d)", i) + } + return _GenerateRootKindName[_GenerateRootKindIndex[i]:_GenerateRootKindIndex[i+1]] +} + +var _GenerateRootKindValues = []GenerateRootKind{0, 1, 2} + +var _GenerateRootKindNameToValueMap = map[string]GenerateRootKind{ + _GenerateRootKindName[0:7]: 0, + _GenerateRootKindName[7:9]: 1, + _GenerateRootKindName[9:25]: 2, +} + +// GenerateRootKindString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func GenerateRootKindString(s string) (GenerateRootKind, error) { + if val, ok := _GenerateRootKindNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to GenerateRootKind values", s) +} + +// GenerateRootKindValues returns all values of the enum +func GenerateRootKindValues() []GenerateRootKind { + return _GenerateRootKindValues +} + +// IsAGenerateRootKind returns "true" if the value is listed in the enum definition. "false" otherwise +func (i GenerateRootKind) IsAGenerateRootKind() bool { + for _, v := range _GenerateRootKindValues { + if i == v { + return true + } + } + return false +} diff --git a/helper/testhelpers/ldap/ldaphelper.go b/helper/testhelpers/ldap/ldaphelper.go index 15b405f790e3..fa6cf9ff999d 100644 --- a/helper/testhelpers/ldap/ldaphelper.go +++ b/helper/testhelpers/ldap/ldaphelper.go @@ -1,27 +1,38 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package ldap import ( + "bytes" "context" "fmt" + "runtime" + "strings" "testing" - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/cap/ldap" + "github.com/hashicorp/vault/sdk/helper/docker" "github.com/hashicorp/vault/sdk/helper/ldaputil" ) func PrepareTestContainer(t *testing.T, version string) (cleanup func(), cfg *ldaputil.ConfigEntry) { + // note: this image isn't supported on arm64 architecture in CI. + // but if you're running on Apple Silicon, feel free to comment out the code below locally. + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as this image is not supported on ARM architectures") + } + + logsWriter := bytes.NewBuffer([]byte{}) + runner, err := docker.NewServiceRunner(docker.RunOptions{ - // Currently set to "michelvocks" until https://github.com/rroemhild/docker-test-openldap/pull/14 - // has been merged. - ImageRepo: "docker.mirror.hashicorp.services/michelvocks/docker-test-openldap", + ImageRepo: "ghcr.io/rroemhild/docker-test-openldap", ImageTag: version, ContainerName: "ldap", - Ports: []string{"389/tcp"}, + Ports: []string{"10389/tcp"}, // Env: []string{"LDAP_DEBUG_LEVEL=384"}, + LogStderr: logsWriter, + LogStdout: logsWriter, }) if err != nil { t.Fatalf("could not start local LDAP docker container: %s", err) @@ -36,31 +47,49 @@ func PrepareTestContainer(t *testing.T, version string) (cleanup func(), cfg *ld cfg.GroupDN = "ou=people,dc=planetexpress,dc=com" cfg.GroupAttr = "cn" cfg.RequestTimeout = 60 + cfg.MaximumPageSize = 1000 - svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { - connURL := fmt.Sprintf("ldap://%s:%d", host, port) - cfg.Url = connURL - logger := hclog.New(nil) - client := ldaputil.Client{ - LDAP: ldaputil.NewLDAP(), - Logger: logger, - } + var started bool + + for i := 0; i < 3; i++ { + svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + connURL := fmt.Sprintf("ldap://%s:%d", host, port) + cfg.Url = connURL + + client, err := ldap.NewClient(ctx, ldaputil.ConvertConfig(cfg)) + if err != nil { + return nil, err + } - conn, err := client.DialLDAP(cfg) + defer client.Close(ctx) + + _, err = client.Authenticate(ctx, "Philip J. Fry", "fry") + if err != nil { + return nil, err + } + + return docker.NewServiceURLParse(connURL) + }) if err != nil { - return nil, err + t.Logf("could not start local LDAP docker container: %s", err) + t.Log("Docker container logs: ") + t.Log(logsWriter.String()) + continue } - defer conn.Close() - if _, err := client.GetUserBindDN(cfg, conn, "Philip J. Fry"); err != nil { - return nil, err + started = true + cleanup = func() { + if t.Failed() { + t.Log(logsWriter.String()) + } + svc.Cleanup() } + break + } - return docker.NewServiceURLParse(connURL) - }) - if err != nil { - t.Fatalf("could not start local LDAP docker container: %s", err) + if !started { + t.FailNow() } - return svc.Cleanup, cfg + return cleanup, cfg } diff --git a/helper/testhelpers/logical/testing.go b/helper/testhelpers/logical/testing.go index f634be2f39b8..ad8149c1ec18 100644 --- a/helper/testhelpers/logical/testing.go +++ b/helper/testhelpers/logical/testing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package testing diff --git a/helper/testhelpers/logical/testing_test.go b/helper/testhelpers/logical/testing_test.go index 9f2d74b72f10..a73b91ecd05a 100644 --- a/helper/testhelpers/logical/testing_test.go +++ b/helper/testhelpers/logical/testing_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package testing diff --git a/helper/testhelpers/minimal/minimal.go b/helper/testhelpers/minimal/minimal.go new file mode 100644 index 000000000000..5af4d6d6031c --- /dev/null +++ b/helper/testhelpers/minimal/minimal.go @@ -0,0 +1,81 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package minimal + +import ( + "testing" + + logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/audit" + logicalDb "github.com/hashicorp/vault/builtin/logical/database" + "github.com/hashicorp/vault/builtin/plugin" + "github.com/hashicorp/vault/helper/builtinplugins" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical/inmem" + "github.com/hashicorp/vault/vault" + "github.com/mitchellh/copystructure" +) + +// NewTestSoloCluster is a simpler version of NewTestCluster that only creates +// single-node clusters. It is intentionally minimalist, if you need something +// from vault.TestClusterOptions, use NewTestCluster instead. It should work fine +// with a nil config argument. There is no need to call Start or Cleanup or +// TestWaitActive on the resulting cluster. +func NewTestSoloCluster(t testing.TB, config *vault.CoreConfig) *vault.TestCluster { + logger := corehelpers.NewTestLogger(t) + + mycfg := &vault.CoreConfig{} + + if config != nil { + // It's rude to modify an input argument as a side-effect + copy, err := copystructure.Copy(config) + if err != nil { + t.Fatal(err) + } + mycfg = copy.(*vault.CoreConfig) + } + if mycfg.Physical == nil { + // Don't use NewTransactionalInmem because that would enable replication, + // which we don't care about in our case (use NewTestCluster for that.) + inm, err := inmem.NewInmem(nil, logger) + if err != nil { + t.Fatal(err) + } + mycfg.Physical = inm + } + if mycfg.CredentialBackends == nil { + mycfg.CredentialBackends = map[string]logical.Factory{ + "plugin": plugin.Factory, + } + } + if mycfg.LogicalBackends == nil { + mycfg.LogicalBackends = map[string]logical.Factory{ + "plugin": plugin.Factory, + "database": logicalDb.Factory, + // This is also available in the plugin catalog, but is here due to the need to + // automatically mount it. + "kv": logicalKv.Factory, + } + } + if mycfg.AuditBackends == nil { + mycfg.AuditBackends = map[string]audit.Factory{ + "file": audit.NewFileBackend, + "socket": audit.NewSocketBackend, + "syslog": audit.NewSyslogBackend, + } + } + if mycfg.BuiltinRegistry == nil { + mycfg.BuiltinRegistry = builtinplugins.Registry + } + + cluster := vault.NewTestCluster(t, mycfg, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: http.Handler, + Logger: logger, + }) + t.Cleanup(cluster.Cleanup) + return cluster +} diff --git a/helper/testhelpers/minio/miniohelper.go b/helper/testhelpers/minio/miniohelper.go index d6aa8b088f8a..67d611e40a4c 100644 --- a/helper/testhelpers/minio/miniohelper.go +++ b/helper/testhelpers/minio/miniohelper.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package minio @@ -14,7 +14,7 @@ import ( "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" ) type Config struct { diff --git a/helper/testhelpers/mongodb/mongodbhelper.go b/helper/testhelpers/mongodb/mongodbhelper.go index b0330a9444bc..7ca214fcc081 100644 --- a/helper/testhelpers/mongodb/mongodbhelper.go +++ b/helper/testhelpers/mongodb/mongodbhelper.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mongodb @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readpref" diff --git a/helper/testhelpers/mssql/mssqlhelper.go b/helper/testhelpers/mssql/mssqlhelper.go index f1abf10b4cb0..154caf259843 100644 --- a/helper/testhelpers/mssql/mssqlhelper.go +++ b/helper/testhelpers/mssql/mssqlhelper.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mssqlhelper @@ -9,9 +9,12 @@ import ( "fmt" "net/url" "os" + "runtime" + "strings" "testing" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/sdk/helper/docker" ) const mssqlPassword = "yourStrong(!)Password" @@ -22,36 +25,44 @@ const mssqlPassword = "yourStrong(!)Password" const numRetries = 3 func PrepareMSSQLTestContainer(t *testing.T) (cleanup func(), retURL string) { + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as this image is not supported on ARM architectures") + } + if os.Getenv("MSSQL_URL") != "" { return func() {}, os.Getenv("MSSQL_URL") } + logger := corehelpers.NewTestLogger(t) + var err error for i := 0; i < numRetries; i++ { var svc *docker.Service - runner, err := docker.NewServiceRunner(docker.RunOptions{ + var runner *docker.Runner + runner, err = docker.NewServiceRunner(docker.RunOptions{ ContainerName: "sqlserver", ImageRepo: "mcr.microsoft.com/mssql/server", - ImageTag: "2017-latest-ubuntu", + ImageTag: "2022-latest", Env: []string{"ACCEPT_EULA=Y", "SA_PASSWORD=" + mssqlPassword}, Ports: []string{"1433/tcp"}, LogConsumer: func(s string) { - if t.Failed() { - t.Logf("container logs: %s", s) - } + logger.Info(s) }, }) if err != nil { - t.Fatalf("Could not start docker MSSQL: %s", err) + logger.Error("failed creating new service runner", "error", err.Error()) + continue } svc, err = runner.StartService(context.Background(), connectMSSQL) if err == nil { return svc.Cleanup, svc.Config.URL().String() } + + logger.Error("failed starting service", "error", err.Error()) } - t.Fatalf("Could not start docker MSSQL: %s", err) + t.Fatalf("Could not start docker MSSQL last error: %v", err) return nil, "" } diff --git a/helper/testhelpers/mysql/mysqlhelper.go b/helper/testhelpers/mysql/mysqlhelper.go index 8be498e5107d..93b2cd551e05 100644 --- a/helper/testhelpers/mysql/mysqlhelper.go +++ b/helper/testhelpers/mysql/mysqlhelper.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mysqlhelper @@ -8,10 +8,11 @@ import ( "database/sql" "fmt" "os" + "runtime" "strings" "testing" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" ) type Config struct { @@ -26,6 +27,12 @@ func PrepareTestContainer(t *testing.T, legacy bool, pw string) (func(), string) return func() {}, os.Getenv("MYSQL_URL") } + // ARM64 is only supported on MySQL 8.0 and above. If we update + // our image and support to 8.0, we can unskip these tests. + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as MySQL 5.7 is not supported on ARM architectures") + } + imageVersion := "5.7" if legacy { imageVersion = "5.6" diff --git a/helper/testhelpers/pluginhelpers/pluginhelpers.go b/helper/testhelpers/pluginhelpers/pluginhelpers.go index e9a006704492..2d9c2355399d 100644 --- a/helper/testhelpers/pluginhelpers/pluginhelpers.go +++ b/helper/testhelpers/pluginhelpers/pluginhelpers.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 // Package pluginhelpers contains testhelpers that don't depend on package // vault, and thus can be used within vault (as well as elsewhere.) @@ -12,10 +12,11 @@ import ( "os/exec" "path" "path/filepath" + "strings" "sync" + "testing" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/mitchellh/go-testing-interface" ) var ( @@ -24,14 +25,16 @@ var ( ) type TestPlugin struct { - Name string - Typ consts.PluginType - Version string - FileName string - Sha256 string + Name string + Typ consts.PluginType + Version string + FileName string + Sha256 string + Image string + ImageSha256 string } -func GetPlugin(t testing.T, typ consts.PluginType) (string, string, string, string) { +func GetPlugin(t testing.TB, typ consts.PluginType) (string, string, string, string) { t.Helper() var pluginName string var pluginType string @@ -62,7 +65,7 @@ func GetPlugin(t testing.T, typ consts.PluginType) (string, string, string, stri // to mount a plugin, we need a working binary plugin, so we compile one here. // pluginVersion is used to override the plugin's self-reported version -func CompilePlugin(t testing.T, typ consts.PluginType, pluginVersion string, pluginDir string) TestPlugin { +func CompilePlugin(t testing.TB, typ consts.PluginType, pluginVersion string, pluginDir string) TestPlugin { t.Helper() pluginName, pluginType, pluginMain, pluginVersionLocation := GetPlugin(t, typ) @@ -73,15 +76,17 @@ func CompilePlugin(t testing.T, typ consts.PluginType, pluginVersion string, plu var pluginBytes []byte dir := "" - var err error pluginRootDir := "builtin" if typ == consts.PluginTypeDatabase { pluginRootDir = "plugins" } for { - dir, err = os.Getwd() - if err != nil { - t.Fatal(err) + // So that we can assign to dir without overshadowing the other + // err variables. + var getWdErr error + dir, getWdErr = os.Getwd() + if getWdErr != nil { + t.Fatal(getWdErr) } // detect if we are in a subdirectory or the root directory and compensate if _, err := os.Stat(pluginRootDir); os.IsNotExist(err) { @@ -111,6 +116,7 @@ func CompilePlugin(t testing.T, typ consts.PluginType, pluginVersion string, plu } line = append(line, "-o", pluginPath, pluginMain) cmd := exec.Command("go", line...) + cmd.Env = append(os.Environ(), "CGO_ENABLED=0") cmd.Dir = dir output, err := cmd.CombinedOutput() if err != nil { @@ -124,15 +130,20 @@ func CompilePlugin(t testing.T, typ consts.PluginType, pluginVersion string, plu } // write the cached plugin if necessary - if _, err := os.Stat(pluginPath); os.IsNotExist(err) { - err = os.WriteFile(pluginPath, pluginBytes, 0o755) - } - if err != nil { - t.Fatal(err) + _, statErr := os.Stat(pluginPath) + if os.IsNotExist(statErr) { + err := os.WriteFile(pluginPath, pluginBytes, 0o755) + if err != nil { + t.Fatal(err) + } + } else { + if statErr != nil { + t.Fatal(statErr) + } } sha := sha256.New() - _, err = sha.Write(pluginBytes) + _, err := sha.Write(pluginBytes) if err != nil { t.Fatal(err) } @@ -144,3 +155,27 @@ func CompilePlugin(t testing.T, typ consts.PluginType, pluginVersion string, plu Sha256: fmt.Sprintf("%x", sha.Sum(nil)), } } + +func BuildPluginContainerImage(t testing.TB, plugin TestPlugin, pluginDir string) (image string, sha256 string) { + t.Helper() + ref := plugin.Name + if plugin.Version != "" { + ref += ":" + strings.TrimPrefix(plugin.Version, "v") + } else { + ref += ":latest" + } + args := []string{"build", "--tag=" + ref, "--build-arg=plugin=" + plugin.FileName, "--file=vault/testdata/Dockerfile", pluginDir} + cmd := exec.Command("docker", args...) + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatal(fmt.Errorf("error running docker build %v output: %s", err, output)) + } + + cmd = exec.Command("docker", "images", ref, "--format={{ .ID }}", "--no-trunc") + id, err := cmd.CombinedOutput() + if err != nil { + t.Fatal(fmt.Errorf("error running docker build %v output: %s", err, output)) + } + + return plugin.Name, strings.TrimSpace(strings.TrimPrefix(string(id), "sha256:")) +} diff --git a/helper/testhelpers/postgresql/postgresqlhelper.go b/helper/testhelpers/postgresql/postgresqlhelper.go index c79c1f5d07f1..c6ced3190b25 100644 --- a/helper/testhelpers/postgresql/postgresqlhelper.go +++ b/helper/testhelpers/postgresql/postgresqlhelper.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package postgresql @@ -9,18 +9,53 @@ import ( "fmt" "net/url" "os" + "strconv" "testing" + "time" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/helper/testhelpers/certhelpers" + "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/sdk/helper/pluginutil" ) -func PrepareTestContainer(t *testing.T, version string) (func(), string) { - env := []string{ - "POSTGRES_PASSWORD=secret", - "POSTGRES_DB=database", +const ( + defaultPGImage = "docker.mirror.hashicorp.services/postgres" + defaultPGVersion = "13.4-buster" + defaultPGPass = "secret" +) + +func defaultRunOpts(t *testing.T) docker.RunOptions { + return docker.RunOptions{ + ContainerName: "postgres", + ImageRepo: defaultPGImage, + ImageTag: defaultPGVersion, + Env: []string{ + "POSTGRES_PASSWORD=" + defaultPGPass, + "POSTGRES_DB=database", + }, + Ports: []string{"5432/tcp"}, + DoNotAutoRemove: false, + OmitLogTimestamps: true, + LogConsumer: func(s string) { + if t.Failed() { + t.Logf("container logs: %s", s) + } + }, } +} + +func PrepareTestContainer(t *testing.T) (func(), string) { + _, cleanup, url, _ := prepareTestContainer(t, defaultRunOpts(t), defaultPGPass, true, false, false) + + return cleanup, url +} + +func PrepareTestContainerSelfManaged(t *testing.T) (func(), *url.URL) { + return prepareTestContainerSelfManaged(t, defaultRunOpts(t), defaultPGPass, true, false, false) +} - _, cleanup, url, _ := prepareTestContainer(t, "postgres", "docker.mirror.hashicorp.services/postgres", version, "secret", true, false, false, env) +func PrepareTestContainerMultiHost(t *testing.T) (func(), string) { + _, cleanup, url, _ := prepareTestContainer(t, defaultRunOpts(t), defaultPGPass, true, false, true) return cleanup, url } @@ -28,65 +63,149 @@ func PrepareTestContainer(t *testing.T, version string) (func(), string) { // PrepareTestContainerWithVaultUser will setup a test container with a Vault // admin user configured so that we can safely call rotate-root without // rotating the root DB credentials -func PrepareTestContainerWithVaultUser(t *testing.T, ctx context.Context, version string) (func(), string) { - env := []string{ - "POSTGRES_PASSWORD=secret", - "POSTGRES_DB=database", +func PrepareTestContainerWithVaultUser(t *testing.T, ctx context.Context) (func(), string) { + runner, cleanup, url, id := prepareTestContainer(t, defaultRunOpts(t), defaultPGPass, true, false, false) + + cmd := []string{"psql", "-U", "postgres", "-c", "CREATE USER vaultadmin WITH LOGIN PASSWORD 'vaultpass' SUPERUSER"} + mustRunCommand(t, ctx, runner, id, cmd) + + return cleanup, url +} + +// PrepareTestContainerWithSSL will setup a test container with SSL enabled so +// that we can test client certificate authentication. +func PrepareTestContainerWithSSL( + t *testing.T, + sslMode string, + caCert certhelpers.Certificate, + clientCert certhelpers.Certificate, + useFallback bool, +) (func(), string) { + runOpts := defaultRunOpts(t) + runner, err := docker.NewServiceRunner(runOpts) + if err != nil { + t.Fatalf("Could not provision docker service runner: %s", err) } - runner, cleanup, url, id := prepareTestContainer(t, "postgres", "docker.mirror.hashicorp.services/postgres", version, "secret", true, false, false, env) + // first we connect with username/password because ssl is not enabled yet + svc, id, err := runner.StartNewService(context.Background(), true, false, connectPostgres(defaultPGPass, runOpts.ImageRepo, false)) + if err != nil { + t.Fatalf("Could not start docker Postgres: %s", err) + } - cmd := []string{"psql", "-U", "postgres", "-c", "CREATE USER vaultadmin WITH LOGIN PASSWORD 'vaultpass' SUPERUSER"} - _, err := runner.RunCmdInBackground(ctx, id, cmd) + // Create certificates for postgres authentication + serverCert := certhelpers.NewCert(t, + certhelpers.CommonName("server"), + certhelpers.DNS("localhost"), + certhelpers.Parent(caCert), + ) + + bCtx := docker.NewBuildContext() + bCtx["ca.crt"] = docker.PathContentsFromBytes(caCert.CombinedPEM()) + bCtx["server.crt"] = docker.PathContentsFromBytes(serverCert.CombinedPEM()) + bCtx["server.key"] = &docker.FileContents{ + Data: serverCert.PrivateKeyPEM(), + Mode: 0o600, + // postgres uid + UID: 999, + } + + // https://www.postgresql.org/docs/current/auth-pg-hba-conf.html + clientAuthConfig := "echo 'hostssl all all all cert clientcert=verify-ca' > /var/lib/postgresql/data/pg_hba.conf" + bCtx["ssl-conf.sh"] = docker.PathContentsFromString(clientAuthConfig) + pgConfig := ` +cat << EOF > /var/lib/postgresql/data/postgresql.conf +# PostgreSQL configuration file +listen_addresses = '*' +max_connections = 100 +shared_buffers = 128MB +dynamic_shared_memory_type = posix +max_wal_size = 1GB +min_wal_size = 80MB +ssl = on +ssl_ca_file = '/var/lib/postgresql/ca.crt' +ssl_cert_file = '/var/lib/postgresql/server.crt' +ssl_key_file= '/var/lib/postgresql/server.key' +EOF +` + bCtx["pg-conf.sh"] = docker.PathContentsFromString(pgConfig) + + err = runner.CopyTo(id, "/var/lib/postgresql/", bCtx) if err != nil { - t.Fatalf("Could not run command (%v) in container: %v", cmd, err) + t.Fatalf("failed to copy to container: %v", err) } - return cleanup, url + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // overwrite the postgresql.conf config file with our ssl settings + mustRunCommand(t, ctx, runner, id, + []string{"bash", "/var/lib/postgresql/pg-conf.sh"}) + + // overwrite the pg_hba.conf file and set it to require SSL for each connection + mustRunCommand(t, ctx, runner, id, + []string{"bash", "/var/lib/postgresql/ssl-conf.sh"}) + + // reload so the config changes take effect and ssl is enabled + mustRunCommand(t, ctx, runner, id, + []string{"psql", "-U", "postgres", "-c", "SELECT pg_reload_conf()"}) + + if sslMode == "disable" { + // return non-tls connection url + return svc.Cleanup, svc.Config.URL().String() + } + + sslConfig := getPostgresSSLConfig( + t, + svc.Config.URL().Host, + sslMode, + string(caCert.CombinedPEM()), + string(clientCert.CombinedPEM()), + string(clientCert.PrivateKeyPEM()), + useFallback, + ) + if err != nil { + svc.Cleanup() + t.Fatalf("failed to connect to postgres container via SSL: %v", err) + } + return svc.Cleanup, sslConfig.URL().String() } -func PrepareTestContainerWithPassword(t *testing.T, version, password string) (func(), string) { - env := []string{ +func PrepareTestContainerWithPassword(t *testing.T, password string) (func(), string) { + runOpts := defaultRunOpts(t) + runOpts.Env = []string{ "POSTGRES_PASSWORD=" + password, "POSTGRES_DB=database", } - _, cleanup, url, _ := prepareTestContainer(t, "postgres", "docker.mirror.hashicorp.services/postgres", version, password, true, false, false, env) + _, cleanup, url, _ := prepareTestContainer(t, runOpts, password, true, false, false) return cleanup, url } -func PrepareTestContainerRepmgr(t *testing.T, name, version string, envVars []string) (*docker.Runner, func(), string, string) { - env := append(envVars, - "REPMGR_PARTNER_NODES=psql-repl-node-0,psql-repl-node-1", - "REPMGR_PRIMARY_HOST=psql-repl-node-0", - "REPMGR_PASSWORD=repmgrpass", - "POSTGRESQL_PASSWORD=secret") - - return prepareTestContainer(t, name, "docker.mirror.hashicorp.services/bitnami/postgresql-repmgr", version, "secret", false, true, true, env) -} - -func prepareTestContainer(t *testing.T, name, repo, version, password string, - addSuffix, forceLocalAddr, doNotAutoRemove bool, envVars []string, +func prepareTestContainer(t *testing.T, runOpts docker.RunOptions, password string, addSuffix, forceLocalAddr, useFallback bool, ) (*docker.Runner, func(), string, string) { if os.Getenv("PG_URL") != "" { return nil, func() {}, "", os.Getenv("PG_URL") } - if version == "" { - version = "11" + runner, err := docker.NewServiceRunner(runOpts) + if err != nil { + t.Fatalf("Could not start docker Postgres: %s", err) } - runOpts := docker.RunOptions{ - ContainerName: name, - ImageRepo: repo, - ImageTag: version, - Env: envVars, - Ports: []string{"5432/tcp"}, - DoNotAutoRemove: doNotAutoRemove, + svc, containerID, err := runner.StartNewService(context.Background(), addSuffix, forceLocalAddr, connectPostgres(password, runOpts.ImageRepo, useFallback)) + if err != nil { + t.Fatalf("Could not start docker Postgres: %s", err) } - if repo == "bitnami/postgresql-repmgr" { - runOpts.NetworkID = os.Getenv("POSTGRES_MULTIHOST_NET") + + return runner, svc.Cleanup, svc.Config.URL().String(), containerID +} + +func prepareTestContainerSelfManaged(t *testing.T, runOpts docker.RunOptions, password string, addSuffix, forceLocalAddr, useFallback bool, +) (func(), *url.URL) { + if os.Getenv("PG_URL") != "" { + return func() {}, nil } runner, err := docker.NewServiceRunner(runOpts) @@ -94,20 +213,61 @@ func prepareTestContainer(t *testing.T, name, repo, version, password string, t.Fatalf("Could not start docker Postgres: %s", err) } - svc, containerID, err := runner.StartNewService(context.Background(), addSuffix, forceLocalAddr, connectPostgres(password, repo)) + svc, _, err := runner.StartNewService(context.Background(), addSuffix, forceLocalAddr, connectPostgres(password, runOpts.ImageRepo, useFallback)) if err != nil { t.Fatalf("Could not start docker Postgres: %s", err) } - return runner, svc.Cleanup, svc.Config.URL().String(), containerID + return svc.Cleanup, svc.Config.URL() +} + +func getPostgresSSLConfig(t *testing.T, host, sslMode, caCert, clientCert, clientKey string, useFallback bool) docker.ServiceConfig { + if useFallback { + // set the first host to a bad address so we can test the fallback logic + host = "localhost:55," + host + } + + u := url.URL{} + + if ok, _ := strconv.ParseBool(os.Getenv(pluginutil.PluginUsePostgresSSLInline)); ok { + // TODO: remove this when we remove the underlying feature in a future SDK version + u = url.URL{ + Scheme: "postgres", + User: url.User("postgres"), + Host: host, + Path: "postgres", + RawQuery: url.Values{ + "sslmode": {sslMode}, + "sslinline": {"true"}, + "sslrootcert": {caCert}, + "sslcert": {clientCert}, + "sslkey": {clientKey}, + }.Encode(), + } + } else { + u = url.URL{ + Scheme: "postgres", + User: url.User("postgres"), + Host: host, + Path: "postgres", + RawQuery: url.Values{"sslmode": {sslMode}}.Encode(), + } + } + + return docker.NewServiceURL(u) } -func connectPostgres(password, repo string) docker.ServiceAdapter { +func connectPostgres(password, repo string, useFallback bool) docker.ServiceAdapter { return func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + hostAddr := fmt.Sprintf("%s:%d", host, port) + if useFallback { + // set the first host to a bad address so we can test the fallback logic + hostAddr = "localhost:55," + hostAddr + } u := url.URL{ Scheme: "postgres", User: url.UserPassword("postgres", password), - Host: fmt.Sprintf("%s:%d", host, port), + Host: hostAddr, Path: "postgres", RawQuery: "sslmode=disable", } @@ -136,3 +296,14 @@ func RestartContainer(t *testing.T, ctx context.Context, runner *docker.Runner, t.Fatalf("Could not restart docker Postgres: %s", err) } } + +func mustRunCommand(t *testing.T, ctx context.Context, runner *docker.Runner, containerID string, cmd []string) { + t.Helper() + _, stderr, retcode, err := runner.RunCmdWithOutput(ctx, containerID, cmd) + if err != nil { + t.Fatalf("Could not run command (%v) in container: %v", cmd, err) + } + if retcode != 0 || len(stderr) != 0 { + t.Fatalf("exit code: %v, stderr: %v", retcode, string(stderr)) + } +} diff --git a/helper/testhelpers/replication/testcluster.go b/helper/testhelpers/replication/testcluster.go new file mode 100644 index 000000000000..32297c1b7b25 --- /dev/null +++ b/helper/testhelpers/replication/testcluster.go @@ -0,0 +1,55 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package replication + +import ( + "context" + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/testhelpers/teststorage" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/helper/testcluster" + "github.com/hashicorp/vault/vault" + "github.com/stretchr/testify/require" +) + +// SetCorePerf returns a ReplicationSet using NewTestCluster, +// i.e. core-based rather than subprocess- or docker-based clusters. +// The set will contain two clusters A and C connected using perf replication. +func SetCorePerf(t *testing.T, conf *vault.CoreConfig, opts *vault.TestClusterOptions) *testcluster.ReplicationSet { + r := NewReplicationSetCore(t, conf, opts, teststorage.InmemBackendSetup) + t.Cleanup(r.Cleanup) + + // By default NewTestCluster will mount a kv under secret/. This isn't + // done by docker-based clusters, so remove this to make us more like that. + require.Nil(t, r.Clusters["A"].Nodes()[0].APIClient().Sys().Unmount("secret")) + + err := r.StandardPerfReplication(context.Background()) + if err != nil { + t.Fatal(err) + } + return r +} + +func NewReplicationSetCore(t *testing.T, conf *vault.CoreConfig, opts *vault.TestClusterOptions, setup teststorage.ClusterSetupMutator) *testcluster.ReplicationSet { + r := &testcluster.ReplicationSet{ + Clusters: map[string]testcluster.VaultCluster{}, + Logger: logging.NewVaultLogger(hclog.Trace).Named(t.Name()), + } + + r.Builder = func(ctx context.Context, name string, baseLogger hclog.Logger) (testcluster.VaultCluster, error) { + conf, opts := teststorage.ClusterSetup(conf, opts, setup) + opts.Logger = baseLogger.Named(name) + return vault.NewTestCluster(t, conf, opts), nil + } + + a, err := r.Builder(context.TODO(), "A", r.Logger) + if err != nil { + t.Fatal(err) + } + r.Clusters["A"] = a + + return r +} diff --git a/helper/testhelpers/seal/sealhelper.go b/helper/testhelpers/seal/sealhelper.go index f4d5aa629262..474367faa4ec 100644 --- a/helper/testhelpers/seal/sealhelper.go +++ b/helper/testhelpers/seal/sealhelper.go @@ -1,30 +1,29 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package sealhelper import ( "path" "strconv" + "testing" - "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/builtin/logical/transit" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/helper/testhelpers/teststorage" "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/internalshared/configutil" - "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/vault/seal" - "github.com/mitchellh/go-testing-interface" ) type TransitSealServer struct { *vault.TestCluster } -func NewTransitSealServer(t testing.T, idx int) *TransitSealServer { +func NewTransitSealServer(t testing.TB, idx int) *TransitSealServer { conf := &vault.CoreConfig{ LogicalBackends: map[string]logical.Factory{ "transit": transit.Factory, @@ -33,7 +32,7 @@ func NewTransitSealServer(t testing.T, idx int) *TransitSealServer { opts := &vault.TestClusterOptions{ NumCores: 1, HandlerFunc: http.Handler, - Logger: logging.NewVaultLogger(hclog.Trace).Named(t.Name()).Named("transit-seal" + strconv.Itoa(idx)), + Logger: corehelpers.NewTestLogger(t).Named("transit-seal" + strconv.Itoa(idx)), } teststorage.InmemBackendSetup(conf, opts) cluster := vault.NewTestCluster(t, conf, opts) @@ -48,7 +47,7 @@ func NewTransitSealServer(t testing.T, idx int) *TransitSealServer { return &TransitSealServer{cluster} } -func (tss *TransitSealServer) MakeKey(t testing.T, key string) { +func (tss *TransitSealServer) MakeKey(t testing.TB, key string) { client := tss.Cores[0].Client if _, err := client.Logical().Write(path.Join("transit", "keys", key), nil); err != nil { t.Fatal(err) @@ -60,7 +59,7 @@ func (tss *TransitSealServer) MakeKey(t testing.T, key string) { } } -func (tss *TransitSealServer) MakeSeal(t testing.T, key string) (vault.Seal, error) { +func (tss *TransitSealServer) MakeSeal(t testing.TB, key string) (vault.Seal, error) { client := tss.Cores[0].Client wrapperConfig := map[string]string{ "address": client.Address(), @@ -69,12 +68,14 @@ func (tss *TransitSealServer) MakeSeal(t testing.T, key string) (vault.Seal, err "key_name": key, "tls_ca_cert": tss.CACertPEMFile, } - transitSeal, _, err := configutil.GetTransitKMSFunc(&configutil.KMS{Config: wrapperConfig}) + transitSealWrapper, _, err := configutil.GetTransitKMSFunc(&configutil.KMS{Config: wrapperConfig}) if err != nil { t.Fatalf("error setting wrapper config: %v", err) } - return vault.NewAutoSeal(&seal.Access{ - Wrapper: transitSeal, - }) + access, err := seal.NewAccessFromWrapper(tss.Logger, transitSealWrapper, vault.SealConfigTypeTransit.String()) + if err != nil { + return nil, err + } + return vault.NewAutoSeal(access), nil } diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index c2d437abf279..e7f8ed30a068 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package testhelpers @@ -9,12 +9,11 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "math/rand" - "net/url" "os" "strings" - "sync/atomic" + "testing" "time" "github.com/armon/go-metrics" @@ -25,9 +24,9 @@ import ( "github.com/hashicorp/vault/physical/raft" "github.com/hashicorp/vault/sdk/helper/xor" "github.com/hashicorp/vault/vault" - "github.com/mitchellh/go-testing-interface" ) +//go:generate enumer -type=GenerateRootKind -trimprefix=GenerateRoot type GenerateRootKind int const ( @@ -37,7 +36,7 @@ const ( ) // GenerateRoot generates a root token on the target cluster. -func GenerateRoot(t testing.T, cluster *vault.TestCluster, kind GenerateRootKind) string { +func GenerateRoot(t testing.TB, cluster *vault.TestCluster, kind GenerateRootKind) string { t.Helper() token, err := GenerateRootWithError(t, cluster, kind) if err != nil { @@ -46,7 +45,7 @@ func GenerateRoot(t testing.T, cluster *vault.TestCluster, kind GenerateRootKind return token } -func GenerateRootWithError(t testing.T, cluster *vault.TestCluster, kind GenerateRootKind) (string, error) { +func GenerateRootWithError(t testing.TB, cluster *vault.TestCluster, kind GenerateRootKind) (string, error) { t.Helper() // If recovery keys supported, use those to perform root token generation instead var keys [][]byte @@ -56,6 +55,9 @@ func GenerateRootWithError(t testing.T, cluster *vault.TestCluster, kind Generat keys = cluster.BarrierKeys } client := cluster.Cores[0].Client + oldNS := client.Namespace() + defer client.SetNamespace(oldNS) + client.ClearNamespace() var err error var status *api.GenerateRootStatusResponse @@ -116,14 +118,14 @@ func RandomWithPrefix(name string) string { return fmt.Sprintf("%s-%d", name, rand.New(rand.NewSource(time.Now().UnixNano())).Int()) } -func EnsureCoresSealed(t testing.T, c *vault.TestCluster) { +func EnsureCoresSealed(t testing.TB, c *vault.TestCluster) { t.Helper() for _, core := range c.Cores { EnsureCoreSealed(t, core) } } -func EnsureCoreSealed(t testing.T, core *vault.TestClusterCore) { +func EnsureCoreSealed(t testing.TB, core *vault.TestClusterCore) { t.Helper() core.Seal(t) timeout := time.Now().Add(60 * time.Second) @@ -138,7 +140,7 @@ func EnsureCoreSealed(t testing.T, core *vault.TestClusterCore) { } } -func EnsureCoresUnsealed(t testing.T, c *vault.TestCluster) { +func EnsureCoresUnsealed(t testing.TB, c *vault.TestCluster) { t.Helper() for i, core := range c.Cores { err := AttemptUnsealCore(c, core) @@ -148,7 +150,7 @@ func EnsureCoresUnsealed(t testing.T, c *vault.TestCluster) { } } -func EnsureCoreUnsealed(t testing.T, c *vault.TestCluster, core *vault.TestClusterCore) { +func EnsureCoreUnsealed(t testing.TB, c *vault.TestCluster, core *vault.TestClusterCore) { t.Helper() err := AttemptUnsealCore(c, core) if err != nil { @@ -177,6 +179,10 @@ func AttemptUnsealCore(c *vault.TestCluster, core *vault.TestClusterCore) error } client := core.Client + oldNS := client.Namespace() + defer client.SetNamespace(oldNS) + client.ClearNamespace() + client.Sys().ResetUnsealProcess() for j := 0; j < len(c.BarrierKeys); j++ { statusResp, err := client.Sys().Unseal(base64.StdEncoding.EncodeToString(c.BarrierKeys[j])) @@ -202,17 +208,17 @@ func AttemptUnsealCore(c *vault.TestCluster, core *vault.TestClusterCore) error return nil } -func EnsureStableActiveNode(t testing.T, cluster *vault.TestCluster) { +func EnsureStableActiveNode(t testing.TB, cluster *vault.TestCluster) { t.Helper() deriveStableActiveCore(t, cluster) } -func DeriveStableActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore { +func DeriveStableActiveCore(t testing.TB, cluster *vault.TestCluster) *vault.TestClusterCore { t.Helper() return deriveStableActiveCore(t, cluster) } -func deriveStableActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore { +func deriveStableActiveCore(t testing.TB, cluster *vault.TestCluster) *vault.TestClusterCore { t.Helper() activeCore := DeriveActiveCore(t, cluster) minDuration := time.NewTimer(3 * time.Second) @@ -241,11 +247,14 @@ func deriveStableActiveCore(t testing.T, cluster *vault.TestCluster) *vault.Test return activeCore } -func DeriveActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore { +func DeriveActiveCore(t testing.TB, cluster *vault.TestCluster) *vault.TestClusterCore { t.Helper() for i := 0; i < 60; i++ { for _, core := range cluster.Cores { + oldNS := core.Client.Namespace() + core.Client.ClearNamespace() leaderResp, err := core.Client.Sys().Leader() + core.Client.SetNamespace(oldNS) if err != nil { t.Fatal(err) } @@ -259,11 +268,14 @@ func DeriveActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestCluste return nil } -func DeriveStandbyCores(t testing.T, cluster *vault.TestCluster) []*vault.TestClusterCore { +func DeriveStandbyCores(t testing.TB, cluster *vault.TestCluster) []*vault.TestClusterCore { t.Helper() cores := make([]*vault.TestClusterCore, 0, 2) for _, core := range cluster.Cores { + oldNS := core.Client.Namespace() + core.Client.ClearNamespace() leaderResp, err := core.Client.Sys().Leader() + core.Client.SetNamespace(oldNS) if err != nil { t.Fatal(err) } @@ -275,7 +287,7 @@ func DeriveStandbyCores(t testing.T, cluster *vault.TestCluster) []*vault.TestCl return cores } -func WaitForNCoresUnsealed(t testing.T, cluster *vault.TestCluster, n int) { +func WaitForNCoresUnsealed(t testing.TB, cluster *vault.TestCluster, n int) { t.Helper() for i := 0; i < 30; i++ { unsealed := 0 @@ -294,7 +306,7 @@ func WaitForNCoresUnsealed(t testing.T, cluster *vault.TestCluster, n int) { t.Fatalf("%d cores were not unsealed", n) } -func SealCores(t testing.T, cluster *vault.TestCluster) { +func SealCores(t testing.TB, cluster *vault.TestCluster) { t.Helper() for _, core := range cluster.Cores { if err := core.Shutdown(); err != nil { @@ -313,7 +325,7 @@ func SealCores(t testing.T, cluster *vault.TestCluster) { } } -func WaitForNCoresSealed(t testing.T, cluster *vault.TestCluster, n int) { +func WaitForNCoresSealed(t testing.TB, cluster *vault.TestCluster, n int) { t.Helper() for i := 0; i < 60; i++ { sealed := 0 @@ -332,7 +344,7 @@ func WaitForNCoresSealed(t testing.T, cluster *vault.TestCluster, n int) { t.Fatalf("%d cores were not sealed", n) } -func WaitForActiveNode(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore { +func WaitForActiveNode(t testing.TB, cluster *vault.TestCluster) *vault.TestClusterCore { t.Helper() for i := 0; i < 60; i++ { for _, core := range cluster.Cores { @@ -348,7 +360,7 @@ func WaitForActiveNode(t testing.T, cluster *vault.TestCluster) *vault.TestClust return nil } -func WaitForStandbyNode(t testing.T, core *vault.TestClusterCore) { +func WaitForStandbyNode(t testing.TB, core *vault.TestClusterCore) { t.Helper() for i := 0; i < 30; i++ { if isLeader, _, clusterAddr, _ := core.Core.Leader(); isLeader != true && clusterAddr != "" { @@ -364,7 +376,7 @@ func WaitForStandbyNode(t testing.T, core *vault.TestClusterCore) { t.Fatalf("node did not become standby") } -func RekeyCluster(t testing.T, cluster *vault.TestCluster, recovery bool) [][]byte { +func RekeyCluster(t testing.TB, cluster *vault.TestCluster, recovery bool) [][]byte { t.Helper() cluster.Logger.Info("rekeying cluster", "recovery", recovery) client := cluster.Cores[0].Client @@ -422,68 +434,6 @@ func RekeyCluster(t testing.T, cluster *vault.TestCluster, recovery bool) [][]by return newKeys } -// TestRaftServerAddressProvider is a ServerAddressProvider that uses the -// ClusterAddr() of each node to provide raft addresses. -// -// Note that TestRaftServerAddressProvider should only be used in cases where -// cores that are part of a raft configuration have already had -// startClusterListener() called (via either unsealing or raft joining). -type TestRaftServerAddressProvider struct { - Cluster *vault.TestCluster -} - -func (p *TestRaftServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib.ServerAddress, error) { - for _, core := range p.Cluster.Cores { - if core.NodeID == string(id) { - parsed, err := url.Parse(core.ClusterAddr()) - if err != nil { - return "", err - } - - return raftlib.ServerAddress(parsed.Host), nil - } - } - - return "", errors.New("could not find cluster addr") -} - -func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) { - addressProvider := &TestRaftServerAddressProvider{Cluster: cluster} - - atomic.StoreUint32(&vault.TestingUpdateClusterAddr, 1) - - leader := cluster.Cores[0] - - // Seal the leader so we can install an address provider - { - EnsureCoreSealed(t, leader) - leader.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - cluster.UnsealCore(t, leader) - vault.TestWaitActive(t, leader.Core) - } - - leaderInfos := []*raft.LeaderJoinInfo{ - { - LeaderAPIAddr: leader.Client.Address(), - TLSConfig: leader.TLSConfig(), - }, - } - - // Join followers - for i := 1; i < len(cluster.Cores); i++ { - core := cluster.Cores[i] - core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - _, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false) - if err != nil { - t.Fatal(err) - } - - cluster.UnsealCore(t, core) - } - - WaitForNCoresUnsealed(t, cluster, len(cluster.Cores)) -} - // HardcodedServerAddressProvider is a ServerAddressProvider that uses // a hardcoded map of raft node addresses. // @@ -555,7 +505,7 @@ func RaftAppliedIndex(core *vault.TestClusterCore) uint64 { return core.UnderlyingRawStorage.(*raft.RaftBackend).AppliedIndex() } -func WaitForRaftApply(t testing.T, core *vault.TestClusterCore, index uint64) { +func WaitForRaftApply(t testing.TB, core *vault.TestClusterCore, index uint64) { t.Helper() backend := core.UnderlyingRawStorage.(*raft.RaftBackend) @@ -571,7 +521,7 @@ func WaitForRaftApply(t testing.T, core *vault.TestClusterCore, index uint64) { } // AwaitLeader waits for one of the cluster's nodes to become leader. -func AwaitLeader(t testing.T, cluster *vault.TestCluster) (int, error) { +func AwaitLeader(t testing.TB, cluster *vault.TestCluster) (int, error) { timeout := time.Now().Add(60 * time.Second) for { if time.Now().After(timeout) { @@ -595,7 +545,7 @@ func AwaitLeader(t testing.T, cluster *vault.TestCluster) (int, error) { return 0, fmt.Errorf("timeout waiting leader") } -func GenerateDebugLogs(t testing.T, client *api.Client) chan struct{} { +func GenerateDebugLogs(t testing.TB, client *api.Client) chan struct{} { t.Helper() stopCh := make(chan struct{}) @@ -634,7 +584,7 @@ func GenerateDebugLogs(t testing.T, client *api.Client) chan struct{} { // from the map by removing entries whose keys are in the raft configuration. // Remaining entries result in an error return so that the caller can poll for // an expected configuration. -func VerifyRaftPeers(t testing.T, client *api.Client, expected map[string]bool) error { +func VerifyRaftPeers(t testing.TB, client *api.Client, expected map[string]bool) error { t.Helper() resp, err := client.Logical().Read("sys/storage/raft/configuration") @@ -691,7 +641,7 @@ func SysMetricsReq(client *api.Client, cluster *vault.TestCluster, unauth bool) if err != nil { return nil, err } - bodyBytes, err := ioutil.ReadAll(resp.Response.Body) + bodyBytes, err := io.ReadAll(resp.Response.Body) if err != nil { return nil, err } @@ -770,7 +720,17 @@ func SetNonRootToken(client *api.Client) error { // RetryUntilAtCadence runs f until it returns a nil result or the timeout is reached. // If a nil result hasn't been obtained by timeout, calls t.Fatal. -func RetryUntilAtCadence(t testing.T, timeout, sleepTime time.Duration, f func() error) { +func RetryUntilAtCadence(t testing.TB, timeout, sleepTime time.Duration, f func() error) { + t.Helper() + fail := func(err error) { + t.Fatalf("did not complete before deadline, err: %v", err) + } + RetryUntilAtCadenceWithHandler(t, timeout, sleepTime, fail, f) +} + +// RetryUntilAtCadenceWithHandler runs f until it returns a nil result or the timeout is reached. +// If a nil result hasn't been obtained by timeout, onFailure is called. +func RetryUntilAtCadenceWithHandler(t testing.TB, timeout, sleepTime time.Duration, onFailure func(error), f func() error) { t.Helper() deadline := time.Now().Add(timeout) var err error @@ -780,27 +740,28 @@ func RetryUntilAtCadence(t testing.T, timeout, sleepTime time.Duration, f func() } time.Sleep(sleepTime) } - t.Fatalf("did not complete before deadline, err: %v", err) + onFailure(err) } -// RetryUntil runs f until it returns a nil result or the timeout is reached. +// RetryUntil runs f with a 100ms pause between calls, until f returns a nil result +// or the timeout is reached. // If a nil result hasn't been obtained by timeout, calls t.Fatal. -func RetryUntil(t testing.T, timeout time.Duration, f func() error) { +// NOTE: See RetryUntilAtCadence if you want to specify a different wait/sleep +// duration between calls. +func RetryUntil(t testing.TB, timeout time.Duration, f func() error) { t.Helper() - deadline := time.Now().Add(timeout) - var err error - for time.Now().Before(deadline) { - if err = f(); err == nil { - return - } - time.Sleep(100 * time.Millisecond) - } - t.Fatalf("did not complete before deadline, err: %v", err) + RetryUntilAtCadence(t, timeout, 100*time.Millisecond, f) +} + +// CreateEntityAndAlias clones an existing client and creates an entity/alias, uses userpass mount path +// It returns the cloned client, entityID, and aliasID. +func CreateEntityAndAlias(t testing.TB, client *api.Client, mountAccessor, entityName, aliasName string) (*api.Client, string, string) { + return CreateEntityAndAliasWithinMount(t, client, mountAccessor, "userpass", entityName, aliasName) } -// CreateEntityAndAlias clones an existing client and creates an entity/alias. +// CreateEntityAndAliasWithinMount clones an existing client and creates an entity/alias, within the specified mountPath // It returns the cloned client, entityID, and aliasID. -func CreateEntityAndAlias(t testing.T, client *api.Client, mountAccessor, entityName, aliasName string) (*api.Client, string, string) { +func CreateEntityAndAliasWithinMount(t testing.TB, client *api.Client, mountAccessor, mountPath, entityName, aliasName string) (*api.Client, string, string) { t.Helper() userClient, err := client.Clone() if err != nil { @@ -828,7 +789,8 @@ func CreateEntityAndAlias(t testing.T, client *api.Client, mountAccessor, entity if aliasID == "" { t.Fatal("Alias ID not present in response") } - _, err = client.Logical().WriteWithContext(context.Background(), fmt.Sprintf("auth/userpass/users/%s", aliasName), map[string]interface{}{ + path := fmt.Sprintf("auth/%s/users/%s", mountPath, aliasName) + _, err = client.Logical().WriteWithContext(context.Background(), path, map[string]interface{}{ "password": "testpassword", }) if err != nil { @@ -840,7 +802,7 @@ func CreateEntityAndAlias(t testing.T, client *api.Client, mountAccessor, entity // SetupTOTPMount enables the totp secrets engine by mounting it. This requires // that the test cluster has a totp backend available. -func SetupTOTPMount(t testing.T, client *api.Client) { +func SetupTOTPMount(t testing.TB, client *api.Client) { t.Helper() // Mount the TOTP backend mountInfo := &api.MountInput{ @@ -852,7 +814,7 @@ func SetupTOTPMount(t testing.T, client *api.Client) { } // SetupTOTPMethod configures the TOTP secrets engine with a provided config map. -func SetupTOTPMethod(t testing.T, client *api.Client, config map[string]interface{}) string { +func SetupTOTPMethod(t testing.TB, client *api.Client, config map[string]interface{}) string { t.Helper() resp1, err := client.Logical().Write("identity/mfa/method/totp", config) @@ -871,7 +833,7 @@ func SetupTOTPMethod(t testing.T, client *api.Client, config map[string]interfac // SetupMFALoginEnforcement configures a single enforcement method using the // provided config map. "name" field is required in the config map. -func SetupMFALoginEnforcement(t testing.T, client *api.Client, config map[string]interface{}) { +func SetupMFALoginEnforcement(t testing.TB, client *api.Client, config map[string]interface{}) { t.Helper() enfName, ok := config["name"] if !ok { @@ -886,7 +848,7 @@ func SetupMFALoginEnforcement(t testing.T, client *api.Client, config map[string // SetupUserpassMountAccessor sets up userpass auth and returns its mount // accessor. This requires that the test cluster has a "userpass" auth method // available. -func SetupUserpassMountAccessor(t testing.T, client *api.Client) string { +func SetupUserpassMountAccessor(t testing.TB, client *api.Client) string { t.Helper() // Enable Userpass authentication err := client.Sys().EnableAuthWithOptions("userpass", &api.EnableAuthOptions{ @@ -909,7 +871,7 @@ func SetupUserpassMountAccessor(t testing.T, client *api.Client) string { // RegisterEntityInTOTPEngine registers an entity with a methodID and returns // the generated name. -func RegisterEntityInTOTPEngine(t testing.T, client *api.Client, entityID, methodID string) string { +func RegisterEntityInTOTPEngine(t testing.TB, client *api.Client, entityID, methodID string) string { t.Helper() totpGenName := fmt.Sprintf("%s-%s", entityID, methodID) secret, err := client.Logical().WriteWithContext(context.Background(), "identity/mfa/method/totp/admin-generate", map[string]interface{}{ @@ -943,7 +905,7 @@ func RegisterEntityInTOTPEngine(t testing.T, client *api.Client, entityID, metho } // GetTOTPCodeFromEngine requests a TOTP code from the specified enginePath. -func GetTOTPCodeFromEngine(t testing.T, client *api.Client, enginePath string) string { +func GetTOTPCodeFromEngine(t testing.TB, client *api.Client, enginePath string) string { t.Helper() totpPath := fmt.Sprintf("totp/code/%s", enginePath) secret, err := client.Logical().ReadWithContext(context.Background(), totpPath) @@ -958,7 +920,7 @@ func GetTOTPCodeFromEngine(t testing.T, client *api.Client, enginePath string) s // SetupLoginMFATOTP setups up a TOTP MFA using some basic configuration and // returns all relevant information to the client. -func SetupLoginMFATOTP(t testing.T, client *api.Client, methodName string, waitPeriod int) (*api.Client, string, string) { +func SetupLoginMFATOTP(t testing.TB, client *api.Client, methodName string, waitPeriod int) (*api.Client, string, string) { t.Helper() // Mount the totp secrets engine SetupTOTPMount(t, client) @@ -994,7 +956,7 @@ func SetupLoginMFATOTP(t testing.T, client *api.Client, methodName string, waitP return entityClient, entityID, methodID } -func SkipUnlessEnvVarsSet(t testing.T, envVars []string) { +func SkipUnlessEnvVarsSet(t testing.TB, envVars []string) { t.Helper() for _, i := range envVars { @@ -1003,3 +965,40 @@ func SkipUnlessEnvVarsSet(t testing.T, envVars []string) { } } } + +// WaitForNodesExcludingSelectedStandbys is variation on WaitForActiveNodeAndStandbys. +// It waits for the active node before waiting for standby nodes, however +// it will not wait for cores with indexes that match those specified as arguments. +// Whilst you could specify index 0 which is likely to be the leader node, the function +// checks for the leader first regardless of the indexes to skip, so it would be redundant to do so. +// The intention/use case for this function is to allow a cluster to start and become active with one +// or more nodes not joined, so that we can test scenarios where a node joins later. +// e.g. 4 nodes in the cluster, only 3 nodes in cluster 'active', 1 node can be joined later in tests. +func WaitForNodesExcludingSelectedStandbys(t testing.TB, cluster *vault.TestCluster, indexesToSkip ...int) { + WaitForActiveNode(t, cluster) + + contains := func(elems []int, e int) bool { + for _, v := range elems { + if v == e { + return true + } + } + + return false + } + for i, core := range cluster.Cores { + if contains(indexesToSkip, i) { + continue + } + + if standby, _ := core.Core.Standby(); standby { + WaitForStandbyNode(t, core) + } + } +} + +// IsLocalOrRegressionTests returns true when the tests are running locally (not in CI), or when +// the regression test env var (VAULT_REGRESSION_TESTS) is provided. +func IsLocalOrRegressionTests() bool { + return os.Getenv("CI") == "" || os.Getenv("VAULT_REGRESSION_TESTS") == "true" +} diff --git a/helper/testhelpers/testhelpers_oss.go b/helper/testhelpers/testhelpers_oss.go index fc55e9b52c73..e7e6ff8ebafd 100644 --- a/helper/testhelpers/testhelpers_oss.go +++ b/helper/testhelpers/testhelpers_oss.go @@ -1,18 +1,19 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !enterprise package testhelpers import ( + "testing" + "github.com/hashicorp/vault/vault" - "github.com/mitchellh/go-testing-interface" ) // WaitForActiveNodeAndStandbys does nothing more than wait for the active node // on OSS. On enterprise it waits for perf standbys to be healthy too. -func WaitForActiveNodeAndStandbys(t testing.T, cluster *vault.TestCluster) { +func WaitForActiveNodeAndStandbys(t testing.TB, cluster *vault.TestCluster) { WaitForActiveNode(t, cluster) for _, core := range cluster.Cores { if standby, _ := core.Core.Standby(); standby { diff --git a/helper/testhelpers/teststorage/consul/consul.go b/helper/testhelpers/teststorage/consul/consul.go index bfea5ddbb4ae..cb5cec92d561 100644 --- a/helper/testhelpers/teststorage/consul/consul.go +++ b/helper/testhelpers/teststorage/consul/consul.go @@ -1,20 +1,20 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package consul import ( + "sync" + "testing" realtesting "testing" "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/testhelpers/consul" - "github.com/hashicorp/vault/helper/testhelpers/teststorage" physConsul "github.com/hashicorp/vault/physical/consul" "github.com/hashicorp/vault/vault" - "github.com/mitchellh/go-testing-interface" ) -func MakeConsulBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBundle { +func MakeConsulBackend(t testing.TB, logger hclog.Logger) *vault.PhysicalBackendBundle { cleanup, config := consul.PrepareTestContainer(t.(*realtesting.T), "", false, true) consulConf := map[string]string{ @@ -33,5 +33,93 @@ func MakeConsulBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendB } func ConsulBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { - opts.PhysicalFactory = teststorage.SharedPhysicalFactory(MakeConsulBackend) + m := &consulContainerManager{} + opts.PhysicalFactory = m.Backend +} + +// consulContainerManager exposes Backend which matches the PhysicalFactory func +// type. When called, it will ensure that a separate Consul container is started +// for each distinct vault cluster that calls it and ensures that each Vault +// core gets a separate Consul backend instance since that contains state +// related to lock sessions. The whole test framework doesn't have a concept of +// "cluster names" outside of the prefix attached to the logger and other +// backend factories, mostly via SharedPhysicalFactory currently implicitly rely +// on being called in a sequence of core 0, 1, 2,... on one cluster and then +// core 0, 1, 2... on the next and so on. Refactoring lots of things to make +// first-class cluster identifiers a thing seems like a heavy lift given that we +// already rely on sequence of calls everywhere else anyway so we do the same +// here - each time the Backend method is called with coreIdx == 0 we create a +// whole new Consul and assume subsequent non 0 index cores are in the same +// cluster. +type consulContainerManager struct { + mu sync.Mutex + current *consulContainerBackendFactory +} + +func (m *consulContainerManager) Backend(t testing.TB, coreIdx int, + logger hclog.Logger, conf map[string]interface{}, +) *vault.PhysicalBackendBundle { + m.mu.Lock() + if coreIdx == 0 || m.current == nil { + // Create a new consul container factory + m.current = &consulContainerBackendFactory{} + } + f := m.current + m.mu.Unlock() + + return f.Backend(t, coreIdx, logger, conf) +} + +type consulContainerBackendFactory struct { + mu sync.Mutex + refCount int + cleanupFn func() + config map[string]string +} + +func (f *consulContainerBackendFactory) Backend(t testing.TB, coreIdx int, + logger hclog.Logger, conf map[string]interface{}, +) *vault.PhysicalBackendBundle { + f.mu.Lock() + defer f.mu.Unlock() + + if f.refCount == 0 { + f.startContainerLocked(t) + logger.Debug("started consul container", "clusterID", conf["cluster_id"], + "address", f.config["address"]) + } + + f.refCount++ + consulBackend, err := physConsul.NewConsulBackend(f.config, logger.Named("consul")) + if err != nil { + t.Fatal(err) + } + return &vault.PhysicalBackendBundle{ + Backend: consulBackend, + Cleanup: f.cleanup, + } +} + +func (f *consulContainerBackendFactory) startContainerLocked(t testing.TB) { + cleanup, config := consul.PrepareTestContainer(t.(*realtesting.T), "", false, true) + f.config = map[string]string{ + "address": config.Address(), + "token": config.Token, + "max_parallel": "32", + } + f.cleanupFn = cleanup +} + +func (f *consulContainerBackendFactory) cleanup() { + f.mu.Lock() + defer f.mu.Unlock() + + if f.refCount < 1 || f.cleanupFn == nil { + return + } + f.refCount-- + if f.refCount == 0 { + f.cleanupFn() + f.cleanupFn = nil + } } diff --git a/helper/testhelpers/teststorage/teststorage.go b/helper/testhelpers/teststorage/teststorage.go index a45208648862..4a39e8f98eb6 100644 --- a/helper/testhelpers/teststorage/teststorage.go +++ b/helper/testhelpers/teststorage/teststorage.go @@ -1,26 +1,34 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package teststorage import ( + "context" "fmt" "io/ioutil" + "math/rand" "os" + "testing" "time" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/testhelpers" + logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/audit" + logicalDb "github.com/hashicorp/vault/builtin/logical/database" + "github.com/hashicorp/vault/builtin/plugin" + "github.com/hashicorp/vault/helper/namespace" vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/physical/raft" + "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical" physFile "github.com/hashicorp/vault/sdk/physical/file" "github.com/hashicorp/vault/sdk/physical/inmem" "github.com/hashicorp/vault/vault" - "github.com/mitchellh/go-testing-interface" ) -func MakeInmemBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBundle { +func MakeInmemBackend(t testing.TB, logger hclog.Logger) *vault.PhysicalBackendBundle { inm, err := inmem.NewTransactionalInmem(nil, logger) if err != nil { t.Fatal(err) @@ -36,7 +44,18 @@ func MakeInmemBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBu } } -func MakeInmemNonTransactionalBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBundle { +func MakeLatentInmemBackend(t testing.TB, logger hclog.Logger) *vault.PhysicalBackendBundle { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + jitter := r.Intn(20) + latency := time.Duration(r.Intn(15)) * time.Millisecond + + pbb := MakeInmemBackend(t, logger) + latencyInjector := physical.NewTransactionalLatencyInjector(pbb.Backend, latency, jitter, logger) + pbb.Backend = latencyInjector + return pbb +} + +func MakeInmemNonTransactionalBackend(t testing.TB, logger hclog.Logger) *vault.PhysicalBackendBundle { inm, err := inmem.NewInmem(nil, logger) if err != nil { t.Fatal(err) @@ -52,7 +71,7 @@ func MakeInmemNonTransactionalBackend(t testing.T, logger hclog.Logger) *vault.P } } -func MakeFileBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBundle { +func MakeFileBackend(t testing.TB, logger hclog.Logger) *vault.PhysicalBackendBundle { path, err := ioutil.TempDir("", "vault-integ-file-") if err != nil { t.Fatal(err) @@ -82,7 +101,7 @@ func MakeFileBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBun } } -func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf map[string]interface{}) *vault.PhysicalBackendBundle { +func MakeRaftBackend(t testing.TB, coreIdx int, logger hclog.Logger, extraConf map[string]interface{}, bridge *raft.ClusterAddrBridge) *vault.PhysicalBackendBundle { nodeID := fmt.Sprintf("core-%d", coreIdx) raftDir, err := ioutil.TempDir("", "vault-raft-") if err != nil { @@ -95,10 +114,25 @@ func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf ma logger.Info("raft dir", "dir", raftDir) + backend, err := makeRaftBackend(logger, nodeID, raftDir, extraConf, bridge) + if err != nil { + cleanupFunc() + t.Fatal(err) + } + + return &vault.PhysicalBackendBundle{ + Backend: backend, + Cleanup: cleanupFunc, + } +} + +func makeRaftBackend(logger hclog.Logger, nodeID, raftDir string, extraConf map[string]interface{}, bridge *raft.ClusterAddrBridge) (physical.Backend, error) { conf := map[string]string{ - "path": raftDir, - "node_id": nodeID, - "performance_multiplier": "8", + "path": raftDir, + "node_id": nodeID, + "performance_multiplier": "8", + "autopilot_reconcile_interval": "300ms", + "autopilot_update_interval": "100ms", } for k, v := range extraConf { val, ok := v.(string) @@ -109,21 +143,20 @@ func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf ma backend, err := raft.NewRaftBackend(conf, logger.Named("raft")) if err != nil { - cleanupFunc() - t.Fatal(err) + return nil, err } - - return &vault.PhysicalBackendBundle{ - Backend: backend, - Cleanup: cleanupFunc, + if bridge != nil { + backend.(*raft.RaftBackend).SetServerAddressProvider(bridge) } + + return backend, nil } // RaftHAFactory returns a PhysicalBackendBundle with raft set as the HABackend // and the physical.Backend provided in PhysicalBackendBundler as the storage // backend. -func RaftHAFactory(f PhysicalBackendBundler) func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { - return func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { +func RaftHAFactory(f PhysicalBackendBundler) func(t testing.TB, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { + return func(t testing.TB, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { // Call the factory func to create the storage backend physFactory := SharedPhysicalFactory(f) bundle := physFactory(t, coreIdx, logger, nil) @@ -168,10 +201,10 @@ func RaftHAFactory(f PhysicalBackendBundler) func(t testing.T, coreIdx int, logg } } -type PhysicalBackendBundler func(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBundle +type PhysicalBackendBundler func(t testing.TB, logger hclog.Logger) *vault.PhysicalBackendBundle -func SharedPhysicalFactory(f PhysicalBackendBundler) func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { - return func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { +func SharedPhysicalFactory(f PhysicalBackendBundler) func(t testing.TB, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { + return func(t testing.TB, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { if coreIdx == 0 { return f(t, logger) } @@ -185,6 +218,10 @@ func InmemBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { opts.PhysicalFactory = SharedPhysicalFactory(MakeInmemBackend) } +func InmemLatentBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { + opts.PhysicalFactory = SharedPhysicalFactory(MakeLatentInmemBackend) +} + func InmemNonTransactionalBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { opts.PhysicalFactory = SharedPhysicalFactory(MakeInmemNonTransactionalBackend) } @@ -193,19 +230,55 @@ func FileBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { opts.PhysicalFactory = SharedPhysicalFactory(MakeFileBackend) } +func RaftClusterJoinNodes(t testing.TB, cluster *vault.TestCluster) { + leader := cluster.Cores[0] + + leaderInfos := []*raft.LeaderJoinInfo{ + { + LeaderAPIAddr: leader.Client.Address(), + TLSConfig: leader.TLSConfig(), + }, + } + + // Join followers + for i := 1; i < len(cluster.Cores); i++ { + core := cluster.Cores[i] + _, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false) + if err != nil { + t.Fatal(err) + } + + cluster.UnsealCore(t, core) + } +} + func RaftBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { opts.KeepStandbysSealed = true - opts.PhysicalFactory = MakeRaftBackend - opts.SetupFunc = func(t testing.T, c *vault.TestCluster) { + var bridge *raft.ClusterAddrBridge + opts.PhysicalFactory = func(t testing.TB, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { + // The same PhysicalFactory can be shared across multiple clusters. + // The coreIdx == 0 check ensures that each time a new cluster is setup, + // when setting up its first node we create a new ClusterAddrBridge. + if !opts.InmemClusterLayers && opts.ClusterLayers == nil && coreIdx == 0 { + bridge = raft.NewClusterAddrBridge() + } + bundle := MakeRaftBackend(t, coreIdx, logger, conf, bridge) + bundle.MutateCoreConfig = func(conf *vault.CoreConfig) { + logger.Trace("setting bridge", "idx", coreIdx, "bridge", fmt.Sprintf("%p", bridge)) + conf.ClusterAddrBridge = bridge + } + return bundle + } + opts.SetupFunc = func(t testing.TB, c *vault.TestCluster) { if opts.NumCores != 1 { - testhelpers.RaftClusterJoinNodes(t, c) + RaftClusterJoinNodes(t, c) time.Sleep(15 * time.Second) } } } func RaftHASetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions, bundler PhysicalBackendBundler) { - opts.KeepStandbysSealed = true + opts.InmemClusterLayers = true opts.PhysicalFactory = RaftHAFactory(bundler) } @@ -217,6 +290,9 @@ func ClusterSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions, setup } localOpts := vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, + DefaultHandlerProperties: vault.HandlerProperties{ + ListenerConfig: &configutil.Listener{}, + }, } if opts != nil { localOpts = *opts @@ -225,5 +301,28 @@ func ClusterSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions, setup setup = InmemBackendSetup } setup(&localConf, &localOpts) + if localConf.CredentialBackends == nil { + localConf.CredentialBackends = map[string]logical.Factory{ + "plugin": plugin.Factory, + } + } + if localConf.LogicalBackends == nil { + localConf.LogicalBackends = map[string]logical.Factory{ + "plugin": plugin.Factory, + "database": logicalDb.Factory, + // This is also available in the plugin catalog, but is here due to the need to + // automatically mount it. + "kv": logicalKv.Factory, + } + } + if localConf.AuditBackends == nil { + localConf.AuditBackends = map[string]audit.Factory{ + "file": audit.NewFileBackend, + "socket": audit.NewSocketBackend, + "syslog": audit.NewSyslogBackend, + "noop": audit.NoopAuditFactory(nil), + } + } + return &localConf, &localOpts } diff --git a/helper/testhelpers/teststorage/teststorage_reusable.go b/helper/testhelpers/teststorage/teststorage_reusable.go index ff9fd2b55668..db18dfb8c293 100644 --- a/helper/testhelpers/teststorage/teststorage_reusable.go +++ b/helper/testhelpers/teststorage/teststorage_reusable.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package teststorage @@ -7,13 +7,12 @@ import ( "fmt" "io/ioutil" "os" + "testing" hclog "github.com/hashicorp/go-hclog" - raftlib "github.com/hashicorp/raft" "github.com/hashicorp/vault/physical/raft" "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/vault" - "github.com/mitchellh/go-testing-interface" ) // ReusableStorage is a physical backend that can be re-used across @@ -30,7 +29,7 @@ type ReusableStorage struct { // Cleanup should be called after a TestCluster is no longer // needed -- generally in a defer, just before the call to // cluster.Cleanup(). - Cleanup func(t testing.T, cluster *vault.TestCluster) + Cleanup func(t testing.TB, cluster *vault.TestCluster) } // StorageCleanup is a function that should be called once -- at the very end @@ -40,12 +39,12 @@ type StorageCleanup func() // MakeReusableStorage makes a physical backend that can be re-used across // multiple test clusters in sequence. -func MakeReusableStorage(t testing.T, logger hclog.Logger, bundle *vault.PhysicalBackendBundle) (ReusableStorage, StorageCleanup) { +func MakeReusableStorage(t testing.TB, logger hclog.Logger, bundle *vault.PhysicalBackendBundle) (ReusableStorage, StorageCleanup) { storage := ReusableStorage{ IsRaft: false, Setup: func(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { - opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { + opts.PhysicalFactory = func(t testing.TB, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { if coreIdx == 0 { // We intentionally do not clone the backend's Cleanup func, // because we don't want it to be run until the entire test has @@ -60,7 +59,7 @@ func MakeReusableStorage(t testing.T, logger hclog.Logger, bundle *vault.Physica }, // No-op - Cleanup: func(t testing.T, cluster *vault.TestCluster) {}, + Cleanup: func(t testing.TB, cluster *vault.TestCluster) {}, } cleanup := func() { @@ -74,7 +73,7 @@ func MakeReusableStorage(t testing.T, logger hclog.Logger, bundle *vault.Physica // MakeReusableRaftStorage makes a physical raft backend that can be re-used // across multiple test clusters in sequence. -func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int, addressProvider raftlib.ServerAddressProvider) (ReusableStorage, StorageCleanup) { +func MakeReusableRaftStorage(t testing.TB, logger hclog.Logger, numCores int) (ReusableStorage, StorageCleanup) { raftDirs := make([]string, numCores) for i := 0; i < numCores; i++ { raftDirs[i] = makeRaftDir(t) @@ -86,13 +85,13 @@ func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int, add Setup: func(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { conf.DisablePerformanceStandby = true opts.KeepStandbysSealed = true - opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { - return makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], addressProvider, false) + opts.PhysicalFactory = func(t testing.TB, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { + return makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], false) } }, // Close open files being used by raft. - Cleanup: func(t testing.T, cluster *vault.TestCluster) { + Cleanup: func(t testing.TB, cluster *vault.TestCluster) { for i := 0; i < len(cluster.Cores); i++ { CloseRaftStorage(t, cluster, i) } @@ -109,14 +108,14 @@ func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int, add } // CloseRaftStorage closes open files being used by raft. -func CloseRaftStorage(t testing.T, cluster *vault.TestCluster, idx int) { +func CloseRaftStorage(t testing.TB, cluster *vault.TestCluster, idx int) { raftStorage := cluster.Cores[idx].UnderlyingRawStorage.(*raft.RaftBackend) if err := raftStorage.Close(); err != nil { t.Fatal(err) } } -func MakeReusableRaftHAStorage(t testing.T, logger hclog.Logger, numCores int, bundle *vault.PhysicalBackendBundle) (ReusableStorage, StorageCleanup) { +func MakeReusableRaftHAStorage(t testing.TB, logger hclog.Logger, numCores int, bundle *vault.PhysicalBackendBundle) (ReusableStorage, StorageCleanup) { raftDirs := make([]string, numCores) for i := 0; i < numCores; i++ { raftDirs[i] = makeRaftDir(t) @@ -124,9 +123,10 @@ func MakeReusableRaftHAStorage(t testing.T, logger hclog.Logger, numCores int, b storage := ReusableStorage{ Setup: func(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { + opts.InmemClusterLayers = true opts.KeepStandbysSealed = true - opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { - haBundle := makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], nil, true) + opts.PhysicalFactory = func(t testing.TB, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { + haBundle := makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], true) return &vault.PhysicalBackendBundle{ Backend: bundle.Backend, @@ -136,7 +136,7 @@ func MakeReusableRaftHAStorage(t testing.T, logger hclog.Logger, numCores int, b }, // Close open files being used by raft. - Cleanup: func(t testing.T, cluster *vault.TestCluster) { + Cleanup: func(t testing.TB, cluster *vault.TestCluster) { for _, core := range cluster.Cores { raftStorage := core.UnderlyingHAStorage.(*raft.RaftBackend) if err := raftStorage.Close(); err != nil { @@ -159,7 +159,7 @@ func MakeReusableRaftHAStorage(t testing.T, logger hclog.Logger, numCores int, b return storage, cleanup } -func makeRaftDir(t testing.T) string { +func makeRaftDir(t testing.TB) string { raftDir, err := ioutil.TempDir("", "vault-raft-") if err != nil { t.Fatal(err) @@ -168,25 +168,13 @@ func makeRaftDir(t testing.T) string { return raftDir } -func makeReusableRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, raftDir string, addressProvider raftlib.ServerAddressProvider, ha bool) *vault.PhysicalBackendBundle { +func makeReusableRaftBackend(t testing.TB, coreIdx int, logger hclog.Logger, raftDir string, ha bool) *vault.PhysicalBackendBundle { nodeID := fmt.Sprintf("core-%d", coreIdx) - conf := map[string]string{ - "path": raftDir, - "node_id": nodeID, - "performance_multiplier": "8", - "autopilot_reconcile_interval": "300ms", - "autopilot_update_interval": "100ms", - } - - backend, err := raft.NewRaftBackend(conf, logger) + backend, err := makeRaftBackend(logger, nodeID, raftDir, nil, nil) if err != nil { t.Fatal(err) } - if addressProvider != nil { - backend.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - } - bundle := new(vault.PhysicalBackendBundle) if ha { diff --git a/helper/timeutil/timeutil.go b/helper/timeutil/timeutil.go index 89daab7d4024..30b894d9cca9 100644 --- a/helper/timeutil/timeutil.go +++ b/helper/timeutil/timeutil.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package timeutil @@ -17,6 +17,20 @@ func StartOfPreviousMonth(t time.Time) time.Time { return time.Date(year, month, 1, 0, 0, 0, 0, t.Location()).AddDate(0, -1, 0) } +func StartOfDay(t time.Time) time.Time { + year, month, day := t.Date() + return time.Date(year, month, day, 0, 0, 0, 0, t.Location()) +} + +// IsCurrentDay checks if :t: is in the current day, as defined by :compare: +// generally, pass in time.Now().UTC() as :compare: +func IsCurrentDay(t, compare time.Time) bool { + thisDayStart := StartOfDay(compare) + queryDayStart := StartOfDay(t) + + return queryDayStart.Equal(thisDayStart) +} + func StartOfMonth(t time.Time) time.Time { year, month, _ := t.Date() return time.Date(year, month, 1, 0, 0, 0, 0, t.Location()) @@ -62,8 +76,8 @@ func IsCurrentMonth(t, compare time.Time) bool { return queryMonthStart.Equal(thisMonthStart) } -// GetMostRecentContinuousMonths finds the start time of the most -// recent set of continguous months. +// GetMostRecentContiguousMonths finds the start time of the most +// recent set of continuous months. // // For example, if the most recent start time is Aug 15, then that range is just 1 month // If the recent start times are Aug 1 and July 1 and June 15, then that range is @@ -142,3 +156,36 @@ func SkipAtEndOfMonth(t *testing.T) { t.Skip("too close to end of month") } } + +// This interface allows unit tests to substitute in a simulated Clock. +type Clock interface { + Now() time.Time + NewTicker(time.Duration) *time.Ticker + NewTimer(time.Duration) *time.Timer +} + +type DefaultClock struct{} + +var _ Clock = (*DefaultClock)(nil) + +func (_ DefaultClock) Now() time.Time { + return time.Now() +} + +func (_ DefaultClock) NewTicker(d time.Duration) *time.Ticker { + return time.NewTicker(d) +} + +func (_ DefaultClock) NewTimer(d time.Duration) *time.Timer { + return time.NewTimer(d) +} + +// NormalizeToYear returns date normalized to the latest date +// within one year of normal. Assumes the date argument is +// some date before normal. +func NormalizeToYear(date, normal time.Time) time.Time { + for date.AddDate(1, 0, 0).Compare(normal) <= 0 { + date = date.AddDate(1, 0, 0) + } + return date +} diff --git a/helper/timeutil/timeutil_test.go b/helper/timeutil/timeutil_test.go index b9ccdbd5ba22..7692fd280d66 100644 --- a/helper/timeutil/timeutil_test.go +++ b/helper/timeutil/timeutil_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package timeutil @@ -7,6 +7,8 @@ import ( "reflect" "testing" "time" + + "github.com/stretchr/testify/require" ) func TestTimeutil_StartOfPreviousMonth(t *testing.T) { @@ -223,6 +225,47 @@ func TestTimeutil_IsCurrentMonth(t *testing.T) { } } +// TestTimeutil_IsCurrentDay checks if the test times equals the current day or not. +func TestTimeutil_IsCurrentDay(t *testing.T) { + now := time.Now() + testCases := []struct { + input time.Time + expected bool + }{ + { + input: now, + expected: true, + }, + { + input: StartOfDay(now).AddDate(0, 0, -1), + expected: false, + }, + { + input: StartOfDay(now).AddDate(-1, 0, 0), + expected: false, + }, + { + input: StartOfDay(now).Add(1 * time.Second), + expected: true, + }, + { + input: StartOfDay(now).Add(-1 * time.Second), + expected: false, + }, + { + input: StartOfDay(now).Add(86400), // a day is 86400 seconds + expected: true, + }, + } + + for _, tc := range testCases { + result := IsCurrentDay(tc.input, now) + if result != tc.expected { + t.Errorf("invalid result. expected %t for %v", tc.expected, tc.input) + } + } +} + func TestTimeUtil_ContiguousMonths(t *testing.T) { testCases := []struct { input []time.Time @@ -326,3 +369,94 @@ func TestTimeUtil_ParseTimeFromPath(t *testing.T) { } } } + +// TestTimeUtil_NormalizeToYear tests NormalizeToYear function which returns the normalized input date wrt to the normal. +func TestTimeUtil_NormalizeToYear(t *testing.T) { + testCases := []struct { + inputDate time.Time + normalDate time.Time + expectedNormalizedDate time.Time + }{ + { + inputDate: time.Date(2024, 9, 29, 0, 0, 0, 0, time.UTC), + normalDate: time.Date(2024, 10, 1, 0, 0, 0, 0, time.UTC), + expectedNormalizedDate: time.Date(2024, 9, 29, 0, 0, 0, 0, time.UTC), + }, + { + inputDate: time.Date(2024, 9, 29, 0, 0, 0, 0, time.UTC), + normalDate: time.Date(2025, 9, 29, 0, 0, 0, 0, time.UTC), + expectedNormalizedDate: time.Date(2025, 9, 29, 0, 0, 0, 0, time.UTC), + }, + { + inputDate: time.Date(2024, 9, 29, 0, 0, 0, 0, time.UTC), + normalDate: time.Date(2025, 10, 1, 0, 0, 0, 0, time.UTC), + expectedNormalizedDate: time.Date(2025, 9, 29, 0, 0, 0, 0, time.UTC), + }, + // inputDate more than 2 years prior to normal date + { + inputDate: time.Date(2022, 9, 29, 0, 0, 0, 0, time.UTC), + normalDate: time.Date(2024, 6, 15, 0, 0, 0, 0, time.UTC), + expectedNormalizedDate: time.Date(2023, 9, 29, 0, 0, 0, 0, time.UTC), + }, + { + inputDate: time.Date(2022, 9, 29, 0, 0, 0, 0, time.UTC), + normalDate: time.Date(2024, 9, 28, 0, 0, 0, 0, time.UTC), + expectedNormalizedDate: time.Date(2023, 9, 29, 0, 0, 0, 0, time.UTC), + }, + { + inputDate: time.Date(2022, 9, 29, 0, 0, 0, 0, time.UTC), + normalDate: time.Date(2024, 9, 29, 0, 0, 0, 0, time.UTC), + expectedNormalizedDate: time.Date(2024, 9, 29, 0, 0, 0, 0, time.UTC), + }, + { + inputDate: time.Date(2022, 9, 29, 0, 0, 0, 0, time.UTC), + normalDate: time.Date(2024, 9, 30, 0, 0, 0, 0, time.UTC), + expectedNormalizedDate: time.Date(2024, 9, 29, 0, 0, 0, 0, time.UTC), + }, + { + inputDate: time.Date(2020, 9, 29, 0, 0, 0, 0, time.UTC), + normalDate: time.Date(2024, 12, 1, 0, 0, 0, 0, time.UTC), + expectedNormalizedDate: time.Date(2024, 9, 29, 0, 0, 0, 0, time.UTC), + }, + // leap year test cases + { + inputDate: time.Date(2020, 9, 29, 0, 0, 0, 0, time.UTC), + normalDate: time.Date(2024, 9, 28, 0, 0, 0, 0, time.UTC), + expectedNormalizedDate: time.Date(2023, 9, 29, 0, 0, 0, 0, time.UTC), + }, + { + inputDate: time.Date(2024, 2, 29, 0, 0, 0, 0, time.UTC), + normalDate: time.Date(2025, 2, 28, 0, 0, 0, 0, time.UTC), + expectedNormalizedDate: time.Date(2024, 2, 29, 0, 0, 0, 0, time.UTC), + }, + { + inputDate: time.Date(2024, 2, 29, 0, 0, 0, 0, time.UTC), + normalDate: time.Date(2025, 3, 1, 0, 0, 0, 0, time.UTC), + expectedNormalizedDate: time.Date(2025, 3, 1, 0, 0, 0, 0, time.UTC), + }, + { + inputDate: time.Date(2024, 2, 29, 0, 0, 0, 0, time.UTC), + normalDate: time.Date(2025, 3, 2, 0, 0, 0, 0, time.UTC), + expectedNormalizedDate: time.Date(2025, 3, 1, 0, 0, 0, 0, time.UTC), + }, + { + inputDate: time.Date(2024, 2, 29, 0, 0, 0, 0, time.UTC), + normalDate: time.Date(2028, 2, 28, 0, 0, 0, 0, time.UTC), + expectedNormalizedDate: time.Date(2027, 3, 1, 0, 0, 0, 0, time.UTC), + }, + { + inputDate: time.Date(2024, 2, 29, 0, 0, 0, 0, time.UTC), + normalDate: time.Date(2028, 2, 29, 0, 0, 0, 0, time.UTC), + expectedNormalizedDate: time.Date(2027, 3, 1, 0, 0, 0, 0, time.UTC), + }, + { + inputDate: time.Date(2024, 2, 29, 0, 0, 0, 0, time.UTC), + normalDate: time.Date(2028, 3, 1, 0, 0, 0, 0, time.UTC), + expectedNormalizedDate: time.Date(2028, 3, 1, 0, 0, 0, 0, time.UTC), + }, + } + for _, tc := range testCases { + normalizedDate := NormalizeToYear(tc.inputDate, tc.normalDate) + require.Equal(t, tc.expectedNormalizedDate, normalizedDate) + } +} diff --git a/helper/useragent/useragent.go b/helper/useragent/useragent.go index 30852772177c..0becfe9e9fd6 100644 --- a/helper/useragent/useragent.go +++ b/helper/useragent/useragent.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package useragent @@ -32,3 +32,79 @@ func String() string { return fmt.Sprintf("Vault/%s (+%s; %s)", versionFunc(), projectURL, rt) } + +// AgentString returns the consistent user-agent string for Vault Agent. +// +// e.g. Vault Agent/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func AgentString() string { + return fmt.Sprintf("Vault Agent/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} + +// AgentTemplatingString returns the consistent user-agent string for Vault Agent Templating. +// +// e.g. Vault Agent Templating/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func AgentTemplatingString() string { + return fmt.Sprintf("Vault Agent Templating/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} + +// AgentProxyString returns the consistent user-agent string for Vault Agent API Proxying. +// +// e.g. Vault Agent API Proxy/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func AgentProxyString() string { + return fmt.Sprintf("Vault Agent API Proxy/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} + +// AgentProxyStringWithProxiedUserAgent returns the consistent user-agent +// string for Vault Agent API Proxying, keeping the User-Agent of the proxied +// client as an extension to this UserAgent +// +// e.g. Vault Agent API Proxy/0.10.4 (+https://www.vaultproject.io/; go1.10.1); proxiedUserAgent +func AgentProxyStringWithProxiedUserAgent(proxiedUserAgent string) string { + return fmt.Sprintf("Vault Agent API Proxy/%s (+%s; %s); %s", + versionFunc(), projectURL, rt, proxiedUserAgent) +} + +// AgentAutoAuthString returns the consistent user-agent string for Vault Agent Auto-Auth. +// +// e.g. Vault Agent Auto-Auth/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func AgentAutoAuthString() string { + return fmt.Sprintf("Vault Agent Auto-Auth/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} + +// ProxyString returns the consistent user-agent string for Vault Proxy. +// +// e.g. Vault Proxy/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func ProxyString() string { + return fmt.Sprintf("Vault Proxy/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} + +// ProxyAPIProxyString returns the consistent user-agent string for Vault Proxy API Proxying. +// +// e.g. Vault Proxy API Proxy/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func ProxyAPIProxyString() string { + return fmt.Sprintf("Vault Proxy API Proxy/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} + +// ProxyStringWithProxiedUserAgent returns the consistent user-agent +// string for Vault Proxy API Proxying, keeping the User-Agent of the proxied +// client as an extension to this UserAgent +// +// e.g. Vault Proxy API Proxy/0.10.4 (+https://www.vaultproject.io/; go1.10.1); proxiedUserAgent +func ProxyStringWithProxiedUserAgent(proxiedUserAgent string) string { + return fmt.Sprintf("Vault Proxy API Proxy/%s (+%s; %s); %s", + versionFunc(), projectURL, rt, proxiedUserAgent) +} + +// ProxyAutoAuthString returns the consistent user-agent string for Vault Agent Auto-Auth. +// +// e.g. Vault Proxy Auto-Auth/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func ProxyAutoAuthString() string { + return fmt.Sprintf("Vault Proxy Auto-Auth/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} diff --git a/helper/useragent/useragent_test.go b/helper/useragent/useragent_test.go index 795a7ba60101..f58363a8e913 100644 --- a/helper/useragent/useragent_test.go +++ b/helper/useragent/useragent_test.go @@ -1,10 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package useragent import ( "testing" + + "github.com/stretchr/testify/require" ) func TestUserAgent(t *testing.T) { @@ -15,7 +17,124 @@ func TestUserAgent(t *testing.T) { act := String() exp := "Vault/1.2.3 (+https://vault-test.com; go5.0)" - if exp != act { - t.Errorf("expected %q to be %q", act, exp) - } + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultAgent tests the AgentString() function works +// as expected +func TestUserAgent_VaultAgent(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := AgentString() + + exp := "Vault Agent/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultAgentTemplating tests the AgentTemplatingString() function works +// as expected +func TestUserAgent_VaultAgentTemplating(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := AgentTemplatingString() + + exp := "Vault Agent Templating/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultAgentProxy tests the AgentProxyString() function works +// as expected +func TestUserAgent_VaultAgentProxy(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := AgentProxyString() + + exp := "Vault Agent API Proxy/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultAgentProxyWithProxiedUserAgent tests the AgentProxyStringWithProxiedUserAgent() +// function works as expected +func TestUserAgent_VaultAgentProxyWithProxiedUserAgent(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + userAgent := "my-user-agent" + + act := AgentProxyStringWithProxiedUserAgent(userAgent) + + exp := "Vault Agent API Proxy/1.2.3 (+https://vault-test.com; go5.0); my-user-agent" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultAgentAutoAuth tests the AgentAutoAuthString() function works +// as expected +func TestUserAgent_VaultAgentAutoAuth(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := AgentAutoAuthString() + + exp := "Vault Agent Auto-Auth/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultProxy tests the ProxyString() function works +// as expected +func TestUserAgent_VaultProxy(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := ProxyString() + + exp := "Vault Proxy/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultProxyAPIProxy tests the ProxyAPIProxyString() function works +// as expected +func TestUserAgent_VaultProxyAPIProxy(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := ProxyAPIProxyString() + + exp := "Vault Proxy API Proxy/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultProxyWithProxiedUserAgent tests the ProxyStringWithProxiedUserAgent() +// function works as expected +func TestUserAgent_VaultProxyWithProxiedUserAgent(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + userAgent := "my-user-agent" + + act := ProxyStringWithProxiedUserAgent(userAgent) + + exp := "Vault Proxy API Proxy/1.2.3 (+https://vault-test.com; go5.0); my-user-agent" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultProxyAutoAuth tests the ProxyAPIProxyString() function works +// as expected +func TestUserAgent_VaultProxyAutoAuth(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := ProxyAutoAuthString() + + exp := "Vault Proxy Auto-Auth/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) } diff --git a/helper/versions/version.go b/helper/versions/version.go index 9eb8077b8923..590e25ec0ada 100644 --- a/helper/versions/version.go +++ b/helper/versions/version.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package versions diff --git a/helper/versions/version_test.go b/helper/versions/version_test.go index 85b46cdd3fbf..c6d31f4dc00e 100644 --- a/helper/versions/version_test.go +++ b/helper/versions/version_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package versions diff --git a/http/assets.go b/http/assets.go index b60a594942c7..f1f080c27284 100644 --- a/http/assets.go +++ b/http/assets.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build ui diff --git a/http/assets_stub.go b/http/assets_stub.go index e1b4daf3991e..de29ee972100 100644 --- a/http/assets_stub.go +++ b/http/assets_stub.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !ui diff --git a/http/auth_token_test.go b/http/auth_token_test.go index d96e18383ac7..92bf67e00fa9 100644 --- a/http/auth_token_test.go +++ b/http/auth_token_test.go @@ -1,14 +1,45 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http import ( "strings" "testing" + "time" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" + "github.com/stretchr/testify/require" +) + +const ( + rootLeasePolicies = ` +path "sys/internal/ui/*" { +capabilities = ["create", "read", "update", "delete", "list"] +} + +path "auth/token/*" { +capabilities = ["create", "update", "read", "list"] +} + +path "kv/foo*" { + capabilities = ["create", "read", "update", "delete", "list"] +} +` + + dummy = ` +path "/ns1/sys/leases/*" { + capabilities = ["sudo", "create", "read", "update", "delete", "list"] +} + +path "/ns1/auth/token/*" { + capabilities = ["sudo", "create", "read", "update", "delete", "list"] +} +` ) func TestAuthTokenCreate(t *testing.T) { @@ -207,3 +238,106 @@ func TestAuthTokenRenew(t *testing.T) { t.Error("expected lease to be renewable") } } + +// TestToken_InvalidTokenError checks that an InvalidToken error is only returned +// for tokens that have (1) exceeded the token TTL and (2) exceeded the number of uses +func TestToken_InvalidTokenError(t *testing.T) { + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: logging.NewVaultLogger(hclog.Trace), + } + + // Init new test cluster + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + + client := cores[0].Client + + // Add policy + if err := client.Sys().PutPolicy("root-lease-policy", rootLeasePolicies); err != nil { + t.Fatal(err) + } + // Add a dummy policy + if err := client.Sys().PutPolicy("dummy", dummy); err != nil { + t.Fatal(err) + } + + rootToken := client.Token() + + // Enable kv secrets and mount initial secrets + err := client.Sys().Mount("kv", &api.MountInput{Type: "kv"}) + require.NoError(t, err) + + writeSecretsToMount(t, client, "kv/foo", map[string]interface{}{ + "user": "admin", + "password": "password", + }) + + // Create a token that has a TTL of 5s + tokenCreateRequest := &api.TokenCreateRequest{ + Policies: []string{"root-lease-policy"}, + TTL: "5s", + } + secret, err := client.Auth().Token().CreateOrphan(tokenCreateRequest) + token := secret.Auth.ClientToken + client.SetToken(token) + + // Verify that token works to read from kv mount + _, err = client.Logical().Read("kv/foo") + require.NoError(t, err) + + time.Sleep(time.Second * 5) + + // Verify that token is expired and shows an "invalid token" error + _, err = client.Logical().Read("kv/foo") + require.ErrorContains(t, err, logical.ErrInvalidToken.Error()) + require.ErrorContains(t, err, logical.ErrPermissionDenied.Error()) + + // Create a second approle token with a token use limit + client.SetToken(rootToken) + tokenCreateRequest = &api.TokenCreateRequest{ + Policies: []string{"root-lease-policy"}, + NumUses: 5, + } + + secret, err = client.Auth().Token().CreateOrphan(tokenCreateRequest) + token = secret.Auth.ClientToken + client.SetToken(token) + + for i := 0; i < 5; i++ { + _, err = client.Logical().Read("kv/foo") + require.NoError(t, err) + } + // Verify that the number of uses is exceeded so the "invalid token" error is displayed + _, err = client.Logical().Read("kv/foo") + require.ErrorContains(t, err, logical.ErrInvalidToken.Error()) + require.ErrorContains(t, err, logical.ErrPermissionDenied.Error()) + + // Create a third approle token that will have incorrect policy access to the subsequent request + client.SetToken(rootToken) + tokenCreateRequest = &api.TokenCreateRequest{ + Policies: []string{"dummy"}, + } + + secret, err = client.Auth().Token().CreateOrphan(tokenCreateRequest) + token = secret.Auth.ClientToken + client.SetToken(token) + + // Incorrect policy access should only return an ErrPermissionDenied error + _, err = client.Logical().Read("kv/foo") + require.ErrorContains(t, err, logical.ErrPermissionDenied.Error()) + require.NotContains(t, err.Error(), logical.ErrInvalidToken) +} + +func writeSecretsToMount(t *testing.T, client *api.Client, mountPath string, data map[string]interface{}) { + _, err := client.Logical().Write(mountPath, data) + require.NoError(t, err) +} diff --git a/http/cors.go b/http/cors.go index 7e8c311e624f..2689a007dbec 100644 --- a/http/cors.go +++ b/http/cors.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -18,6 +18,7 @@ var allowedMethods = []string{ http.MethodOptions, http.MethodPost, http.MethodPut, + http.MethodPatch, "LIST", // LIST is not an official HTTP method, but Vault supports it. } diff --git a/http/custom_header_test.go b/http/custom_header_test.go index 289379a84cb6..36227cc781dc 100644 --- a/http/custom_header_test.go +++ b/http/custom_header_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http diff --git a/http/events.go b/http/events.go deleted file mode 100644 index 072fcd60ea04..000000000000 --- a/http/events.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package http - -import ( - "context" - "errors" - "fmt" - "net/http" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/namespace" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" - "github.com/hashicorp/vault/vault/eventbus" - "nhooyr.io/websocket" -) - -type eventSubscribeArgs struct { - ctx context.Context - logger hclog.Logger - events *eventbus.EventBus - ns *namespace.Namespace - pattern string - conn *websocket.Conn - json bool -} - -// handleEventsSubscribeWebsocket runs forever, returning a websocket error code and reason -// only if the connection closes or there was an error. -func handleEventsSubscribeWebsocket(args eventSubscribeArgs) (websocket.StatusCode, string, error) { - ctx := args.ctx - logger := args.logger - ch, cancel, err := args.events.Subscribe(ctx, args.ns, args.pattern) - if err != nil { - logger.Info("Error subscribing", "error", err) - return websocket.StatusUnsupportedData, "Error subscribing", nil - } - defer cancel() - - for { - select { - case <-ctx.Done(): - logger.Info("Websocket context is done, closing the connection") - return websocket.StatusNormalClosure, "", nil - case message := <-ch: - logger.Debug("Sending message to websocket", "message", message.Payload) - var messageBytes []byte - var messageType websocket.MessageType - if args.json { - var ok bool - messageBytes, ok = message.Format("cloudevents-json") - if !ok { - logger.Warn("Could not get cloudevents JSON format") - return 0, "", errors.New("could not get cloudevents JSON format") - } - messageType = websocket.MessageText - } else { - messageBytes, err = proto.Marshal(message.Payload.(*logical.EventReceived)) - messageType = websocket.MessageBinary - } - if err != nil { - logger.Warn("Could not serialize websocket event", "error", err) - return 0, "", err - } - err = args.conn.Write(ctx, messageType, messageBytes) - if err != nil { - return 0, "", err - } - } - } -} - -func handleEventsSubscribe(core *vault.Core, req *logical.Request) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - logger := core.Logger().Named("events-subscribe") - logger.Debug("Got request to", "url", r.URL, "version", r.Proto) - - ctx := r.Context() - - // ACL check - _, _, err := core.CheckToken(ctx, req, false) - if err != nil { - if errors.Is(err, logical.ErrPermissionDenied) { - respondError(w, http.StatusForbidden, logical.ErrPermissionDenied) - return - } - logger.Debug("Error validating token", "error", err) - respondError(w, http.StatusInternalServerError, fmt.Errorf("error validating token")) - return - } - - ns, err := namespace.FromContext(ctx) - if err != nil { - logger.Info("Could not find namespace", "error", err) - respondError(w, http.StatusInternalServerError, fmt.Errorf("could not find namespace")) - return - } - - prefix := "/v1/sys/events/subscribe/" - if ns.ID != namespace.RootNamespaceID { - prefix = fmt.Sprintf("/v1/%ssys/events/subscribe/", ns.Path) - } - pattern := strings.TrimSpace(strings.TrimPrefix(r.URL.Path, prefix)) - if pattern == "" { - respondError(w, http.StatusBadRequest, fmt.Errorf("did not specify eventType to subscribe to")) - return - } - - json := false - jsonRaw := r.URL.Query().Get("json") - if jsonRaw != "" { - var err error - json, err = strconv.ParseBool(jsonRaw) - if err != nil { - respondError(w, http.StatusBadRequest, fmt.Errorf("invalid parameter for JSON: %v", jsonRaw)) - return - } - } - - conn, err := websocket.Accept(w, r, nil) - if err != nil { - logger.Info("Could not accept as websocket", "error", err) - respondError(w, http.StatusInternalServerError, fmt.Errorf("could not accept as websocket")) - return - } - - // we don't expect any incoming messages - ctx = conn.CloseRead(ctx) - // start the pinger - go func() { - for { - time.Sleep(30 * time.Second) // not too aggressive, but keep the HTTP connection alive - err := conn.Ping(ctx) - if err != nil { - return - } - } - }() - - closeStatus, closeReason, err := handleEventsSubscribeWebsocket(eventSubscribeArgs{ctx, logger, core.Events(), ns, pattern, conn, json}) - if err != nil { - closeStatus = websocket.CloseStatus(err) - if closeStatus == -1 { - closeStatus = websocket.StatusInternalError - } - closeReason = fmt.Sprintf("Internal error: %v", err) - logger.Debug("Error from websocket handler", "error", err) - } - // Close() will panic if the reason is greater than this length - if len(closeReason) > 123 { - logger.Debug("Truncated close reason", "closeReason", closeReason) - closeReason = closeReason[:123] - } - err = conn.Close(closeStatus, closeReason) - if err != nil { - logger.Debug("Error closing websocket", "error", err) - } - }) -} diff --git a/http/events_stubs_oss.go b/http/events_stubs_oss.go new file mode 100644 index 000000000000..c1a4a673598f --- /dev/null +++ b/http/events_stubs_oss.go @@ -0,0 +1,19 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package http + +import ( + "net/http" + + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +//go:generate go run github.com/hashicorp/vault/tools/stubmaker + +func entHandleEventsSubscribe(core *vault.Core, req *logical.Request) http.Handler { + return nil +} diff --git a/http/events_test.go b/http/events_test.go deleted file mode 100644 index b5ce0a1a3581..000000000000 --- a/http/events_test.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package http - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "strings" - "sync/atomic" - "testing" - "time" - - "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/helper/namespace" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" - "nhooyr.io/websocket" -) - -// TestEventsSubscribe tests the websocket endpoint for subscribing to events -// by generating some events. -func TestEventsSubscribe(t *testing.T) { - core := vault.TestCore(t) - ln, addr := TestServer(t, core) - defer ln.Close() - - // unseal the core - keys, token := vault.TestCoreInit(t, core) - for _, key := range keys { - _, err := core.Unseal(key) - if err != nil { - t.Fatal(err) - } - } - - stop := atomic.Bool{} - - const eventType = "abc" - - // send some events - go func() { - for !stop.Load() { - id, err := uuid.GenerateUUID() - if err != nil { - core.Logger().Info("Error generating UUID, exiting sender", "error", err) - } - pluginInfo := &logical.EventPluginInfo{ - MountPath: "secret", - } - err = core.Events().SendInternal(namespace.RootContext(context.Background()), namespace.RootNamespace, pluginInfo, logical.EventType(eventType), &logical.EventData{ - Id: id, - Metadata: nil, - EntityIds: nil, - Note: "testing", - }) - if err != nil { - core.Logger().Info("Error sending event, exiting sender", "error", err) - } - time.Sleep(100 * time.Millisecond) - } - }() - - t.Cleanup(func() { - stop.Store(true) - }) - - ctx := context.Background() - wsAddr := strings.Replace(addr, "http", "ws", 1) - - testCases := []struct { - json bool - }{{true}, {false}} - - for _, testCase := range testCases { - url := fmt.Sprintf("%s/v1/sys/events/subscribe/%s?json=%v", wsAddr, eventType, testCase.json) - conn, _, err := websocket.Dial(ctx, url, &websocket.DialOptions{ - HTTPHeader: http.Header{"x-vault-token": []string{token}}, - }) - if err != nil { - t.Fatal(err) - } - t.Cleanup(func() { - conn.Close(websocket.StatusNormalClosure, "") - }) - - _, msg, err := conn.Read(ctx) - if err != nil { - t.Fatal(err) - } - if testCase.json { - event := map[string]interface{}{} - err = json.Unmarshal(msg, &event) - if err != nil { - t.Fatal(err) - } - t.Log(string(msg)) - data := event["data"].(map[string]interface{}) - if actualType := data["event_type"].(string); actualType != eventType { - t.Fatalf("Expeced event type %s, got %s", eventType, actualType) - } - pluginInfo, ok := data["plugin_info"].(map[string]interface{}) - if !ok || pluginInfo == nil { - t.Fatalf("No plugin_info object: %v", data) - } - mountPath, ok := pluginInfo["mount_path"].(string) - if !ok || mountPath != "secret" { - t.Fatalf("Wrong mount_path: %v", data) - } - innerEvent := data["event"].(map[string]interface{}) - if innerEvent["id"].(string) != event["id"].(string) { - t.Fatalf("IDs don't match, expected %s, got %s", innerEvent["id"].(string), event["id"].(string)) - } - if innerEvent["note"].(string) != "testing" { - t.Fatalf("Expected 'testing', got %s", innerEvent["note"].(string)) - } - - checkRequiredCloudEventsFields(t, event) - } - } -} - -func checkRequiredCloudEventsFields(t *testing.T, event map[string]interface{}) { - t.Helper() - for _, attr := range []string{"id", "source", "specversion", "type"} { - if v, ok := event[attr]; !ok { - t.Errorf("Missing attribute %s", attr) - } else if str, ok := v.(string); !ok { - t.Errorf("Expected %s to be string but got %T", attr, v) - } else if str == "" { - t.Errorf("%s was empty string", attr) - } - } -} - -// TestEventsSubscribeAuth tests that unauthenticated and unauthorized subscriptions -// fail correctly. -func TestEventsSubscribeAuth(t *testing.T) { - core := vault.TestCore(t) - ln, addr := TestServer(t, core) - defer ln.Close() - - // unseal the core - keys, root := vault.TestCoreInit(t, core) - for _, key := range keys { - _, err := core.Unseal(key) - if err != nil { - t.Fatal(err) - } - } - - var nonPrivilegedToken string - // Fetch a valid non privileged token. - { - config := api.DefaultConfig() - config.Address = addr - - client, err := api.NewClient(config) - if err != nil { - t.Fatal(err) - } - client.SetToken(root) - - secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{Policies: []string{"default"}}) - if err != nil { - t.Fatal(err) - } - if secret.Auth.ClientToken == "" { - t.Fatal("Failed to fetch a non privileged token") - } - nonPrivilegedToken = secret.Auth.ClientToken - } - - ctx := context.Background() - wsAddr := strings.Replace(addr, "http", "ws", 1) - - // Get a 403 with no token. - _, resp, err := websocket.Dial(ctx, wsAddr+"/v1/sys/events/subscribe/abc", nil) - if err == nil { - t.Error("Expected websocket error but got none") - } - if resp == nil || resp.StatusCode != http.StatusForbidden { - t.Errorf("Expected 403 but got %+v", resp) - } - - // Get a 403 with a non privileged token. - _, resp, err = websocket.Dial(ctx, wsAddr+"/v1/sys/events/subscribe/abc", &websocket.DialOptions{ - HTTPHeader: http.Header{"x-vault-token": []string{nonPrivilegedToken}}, - }) - if err == nil { - t.Error("Expected websocket error but got none") - } - if resp == nil || resp.StatusCode != http.StatusForbidden { - t.Errorf("Expected 403 but got %+v", resp) - } -} diff --git a/http/forwarded_for_test.go b/http/forwarded_for_test.go index 89bc62acc265..ce6a5144706f 100644 --- a/http/forwarded_for_test.go +++ b/http/forwarded_for_test.go @@ -1,11 +1,13 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http import ( "bytes" + "encoding/base64" "net/http" + "net/url" "strings" "testing" @@ -255,4 +257,80 @@ func TestHandler_XForwardedFor(t *testing.T) { t.Fatalf("bad body: %s", buf.String()) } }) + + // Next: test an invalid certificate being sent + t.Run("reject_bad_cert_in_header", func(t *testing.T) { + t.Parallel() + testHandler := func(props *vault.HandlerProperties) http.Handler { + origHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(r.RemoteAddr)) + }) + listenerConfig := getListenerConfigForMarshalerTest(goodAddr) + listenerConfig.XForwardedForClientCertHeader = "X-Forwarded-Tls-Client-Cert" + listenerConfig.XForwardedForClientCertHeaderDecoders = "URL,BASE64" + return WrapForwardedForHandler(origHandler, listenerConfig) + } + + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: HandlerFunc(testHandler), + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + req := client.NewRequest("GET", "/") + req.Headers = make(http.Header) + req.Headers.Set("x-forwarded-for", "5.6.7.8") + req.Headers.Set("x-forwarded-tls-client-cert", `BAD_TEXTMIIDtTCCAp2gAwIBAgIUf%2BjhKTFBnqSs34II0WS1L4QsbbAwDQYJKoZIhvcNAQEL%0ABQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzQxWhcNMjUw%0AMTA1MTAyODExWjAbMRkwFwYDVQQDExBjZXJ0LmV4YW1wbGUuY29tMIIBIjANBgkq%0AhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxS%0ATRAVnygAftetT8puHflY0ss7Y6X2OXjsU0PRn%2B1PswtivhKi%2BeLtgWkUF9cFYFGn%0ASgMld6ZWRhNheZhA6ZfQmeM%2FBF2pa5HK2SDF36ljgjL9T%2BnWrru2Uv0BCoHzLAmi%0AYYMiIWplidMmMO5NTRG3k%2B3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5%0AdonyqtnaHuIJGuUdy54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf%2FGLcUVG%0AB%2B5%2BAAGF5iuHC3N2DTl4xz3FcN4Cb4w9pbaQ7%2BmCzz%2BanqiJfyr2nwIDAQABo4H1%0AMIHyMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUm%2B%2Be%0AHpyM3p708bgZJuRYEdX1o%2BUwHwYDVR0jBBgwFoAUncSzT%2F6HMexyuiU9%2F7EgHu%2Bo%0Ak5swOwYIKwYBBQUHAQEELzAtMCsGCCsGAQUFBzAChh9odHRwOi8vMTI3LjAuMC4x%0AOjgyMDAvdjEvcGtpL2NhMCEGA1UdEQQaMBiCEGNlcnQuZXhhbXBsZS5jb22HBH8A%0AAAEwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovLzEyNy4wLjAuMTo4MjAwL3YxL3Br%0AaS9jcmwwDQYJKoZIhvcNAQELBQADggEBABsuvmPSNjjKTVN6itWzdQy%2BSgMIrwfs%0AX1Yb9Lefkkwmp9ovKFNQxa4DucuCuzXcQrbKwWTfHGgR8ct4rf30xCRoA7dbQWq4%0AaYqNKFWrRaBRAaaYZ%2FO1ApRTOrXqRx9Eqr0H1BXLsoAq%2BmWassL8sf6siae%2BCpwA%0AKqBko5G0dNXq5T4i2LQbmoQSVetIrCJEeMrU%2BidkuqfV2h1BQKgSEhFDABjFdTCN%0AQDAHsEHsi2M4%2FjRW9fqEuhHSDfl2n7tkFUI8wTHUUCl7gXwweJ4qtaSXIwKXYzNj%0AxqKHA8Purc1Yfybz4iE1JCROi9fInKlzr5xABq8nb9Qc%2FJ9DIQM%2BXmk%3D`) + resp, err := client.RawRequest(req) + if err == nil { + t.Fatal("expected error") + } + defer resp.Body.Close() + buf := bytes.NewBuffer(nil) + buf.ReadFrom(resp.Body) + if !strings.Contains(buf.String(), "failed to base64 decode the client certificate: ") { + t.Fatalf("bad body: %v", buf.String()) + } + }) + + // Next: test a valid (unverified) certificate being sent + t.Run("pass_cert", func(t *testing.T) { + t.Parallel() + testHandler := func(props *vault.HandlerProperties) http.Handler { + origHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(base64.StdEncoding.EncodeToString(r.TLS.PeerCertificates[0].Raw))) + }) + listenerConfig := getListenerConfigForMarshalerTest(goodAddr) + listenerConfig.XForwardedForClientCertHeader = "X-Forwarded-Tls-Client-Cert" + listenerConfig.XForwardedForClientCertHeaderDecoders = "URL,BASE64" + return WrapForwardedForHandler(origHandler, listenerConfig) + } + + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: HandlerFunc(testHandler), + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + req := client.NewRequest("GET", "/") + req.Headers = make(http.Header) + req.Headers.Set("x-forwarded-for", "5.6.7.8") + testcertificate := `MIIDtTCCAp2gAwIBAgIUf%2BjhKTFBnqSs34II0WS1L4QsbbAwDQYJKoZIhvcNAQEL%0ABQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzQxWhcNMjUw%0AMTA1MTAyODExWjAbMRkwFwYDVQQDExBjZXJ0LmV4YW1wbGUuY29tMIIBIjANBgkq%0AhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxS%0ATRAVnygAftetT8puHflY0ss7Y6X2OXjsU0PRn%2B1PswtivhKi%2BeLtgWkUF9cFYFGn%0ASgMld6ZWRhNheZhA6ZfQmeM%2FBF2pa5HK2SDF36ljgjL9T%2BnWrru2Uv0BCoHzLAmi%0AYYMiIWplidMmMO5NTRG3k%2B3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5%0AdonyqtnaHuIJGuUdy54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf%2FGLcUVG%0AB%2B5%2BAAGF5iuHC3N2DTl4xz3FcN4Cb4w9pbaQ7%2BmCzz%2BanqiJfyr2nwIDAQABo4H1%0AMIHyMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUm%2B%2Be%0AHpyM3p708bgZJuRYEdX1o%2BUwHwYDVR0jBBgwFoAUncSzT%2F6HMexyuiU9%2F7EgHu%2Bo%0Ak5swOwYIKwYBBQUHAQEELzAtMCsGCCsGAQUFBzAChh9odHRwOi8vMTI3LjAuMC4x%0AOjgyMDAvdjEvcGtpL2NhMCEGA1UdEQQaMBiCEGNlcnQuZXhhbXBsZS5jb22HBH8A%0AAAEwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovLzEyNy4wLjAuMTo4MjAwL3YxL3Br%0AaS9jcmwwDQYJKoZIhvcNAQELBQADggEBABsuvmPSNjjKTVN6itWzdQy%2BSgMIrwfs%0AX1Yb9Lefkkwmp9ovKFNQxa4DucuCuzXcQrbKwWTfHGgR8ct4rf30xCRoA7dbQWq4%0AaYqNKFWrRaBRAaaYZ%2FO1ApRTOrXqRx9Eqr0H1BXLsoAq%2BmWassL8sf6siae%2BCpwA%0AKqBko5G0dNXq5T4i2LQbmoQSVetIrCJEeMrU%2BidkuqfV2h1BQKgSEhFDABjFdTCN%0AQDAHsEHsi2M4%2FjRW9fqEuhHSDfl2n7tkFUI8wTHUUCl7gXwweJ4qtaSXIwKXYzNj%0AxqKHA8Purc1Yfybz4iE1JCROi9fInKlzr5xABq8nb9Qc%2FJ9DIQM%2BXmk%3D` + req.Headers.Set("x-forwarded-tls-client-cert", testcertificate) + resp, err := client.RawRequest(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + buf := bytes.NewBuffer(nil) + buf.ReadFrom(resp.Body) + testcertificate, _ = url.QueryUnescape(testcertificate) + if !strings.Contains(buf.String(), strings.ReplaceAll(testcertificate, "\n", "")) { + t.Fatalf("bad body: %v vs %v", buf.String(), testcertificate) + } + }) } diff --git a/http/forwarding_bench_test.go b/http/forwarding_bench_test.go index 0c3f5e2a286d..311dc0bc66e8 100644 --- a/http/forwarding_bench_test.go +++ b/http/forwarding_bench_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -14,7 +14,6 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/builtin/logical/transit" - "github.com/hashicorp/vault/helper/benchhelpers" "github.com/hashicorp/vault/helper/forwarding" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/logging" @@ -32,7 +31,7 @@ func BenchmarkHTTP_Forwarding_Stress(b *testing.B) { }, } - cluster := vault.NewTestCluster(benchhelpers.TBtoT(b), coreConfig, &vault.TestClusterOptions{ + cluster := vault.NewTestCluster(b, coreConfig, &vault.TestClusterOptions{ HandlerFunc: Handler, Logger: logging.NewVaultLoggerWithWriter(ioutil.Discard, log.Error), }) @@ -42,7 +41,7 @@ func BenchmarkHTTP_Forwarding_Stress(b *testing.B) { // make it easy to get access to the active core := cores[0].Core - vault.TestWaitActive(benchhelpers.TBtoT(b), core) + vault.TestWaitActive(b, core) handler := cores[0].Handler host := fmt.Sprintf("https://127.0.0.1:%d/v1/transit/", cores[0].Listeners[0].Address.Port) @@ -59,7 +58,7 @@ func BenchmarkHTTP_Forwarding_Stress(b *testing.B) { } req, err := http.NewRequest("POST", fmt.Sprintf("https://127.0.0.1:%d/v1/sys/mounts/transit", cores[0].Listeners[0].Address.Port), - bytes.NewBuffer([]byte("{\"type\": \"transit\"}"))) + bytes.NewBufferString("{\"type\": \"transit\"}")) if err != nil { b.Fatal(err) } @@ -92,7 +91,7 @@ func BenchmarkHTTP_Forwarding_Stress(b *testing.B) { numOps++ } - doReq(b, "POST", host+"keys/test1", bytes.NewBuffer([]byte("{}"))) + doReq(b, "POST", host+"keys/test1", bytes.NewBufferString("{}")) keyUrl := host + "encrypt/test1" reqBuf := []byte(fmt.Sprintf("{\"plaintext\": \"%s\"}", testPlaintextB64)) diff --git a/http/forwarding_test.go b/http/forwarding_test.go index 51cc2c0e0181..89606b24f2b1 100644 --- a/http/forwarding_test.go +++ b/http/forwarding_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -17,8 +17,6 @@ import ( "testing" "time" - "golang.org/x/net/http2" - cleanhttp "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/api" credCert "github.com/hashicorp/vault/builtin/credential/cert" @@ -27,6 +25,7 @@ import ( "github.com/hashicorp/vault/sdk/helper/keysutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" + "golang.org/x/net/http2" ) func TestHTTP_Fallback_Bad_Address(t *testing.T) { @@ -179,7 +178,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) // core.Logger().Printf("[TRACE] mounting transit") req, err := http.NewRequest("POST", fmt.Sprintf("https://127.0.0.1:%d/v1/sys/mounts/transit", cores[0].Listeners[0].Address.Port), - bytes.NewBuffer([]byte("{\"type\": \"transit\"}"))) + bytes.NewBufferString("{\"type\": \"transit\"}")) if err != nil { t.Fatal(err) } @@ -272,7 +271,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) for _, chosenHost := range hosts { for _, chosenKey := range keys { // Try to write the key to make sure it exists - _, err := doReq("POST", chosenHost+"keys/"+fmt.Sprintf("%s-%t", chosenKey, parallel), bytes.NewBuffer([]byte("{}"))) + _, err := doReq("POST", chosenHost+"keys/"+fmt.Sprintf("%s-%t", chosenKey, parallel), bytes.NewBufferString("{}")) if err != nil { panic(err) } @@ -283,7 +282,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) chosenHost = hosts[id%len(hosts)] chosenKey = fmt.Sprintf("key-%t-%d", parallel, id) - _, err := doReq("POST", chosenHost+"keys/"+chosenKey, bytes.NewBuffer([]byte("{}"))) + _, err := doReq("POST", chosenHost+"keys/"+chosenKey, bytes.NewBufferString("{}")) if err != nil { panic(err) } @@ -320,7 +319,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) // Encrypt our plaintext and store the result case "encrypt": // core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id) - resp, err := doReq("POST", chosenHost+"encrypt/"+chosenKey, bytes.NewBuffer([]byte(fmt.Sprintf("{\"plaintext\": \"%s\"}", testPlaintextB64)))) + resp, err := doReq("POST", chosenHost+"encrypt/"+chosenKey, bytes.NewBufferString(fmt.Sprintf("{\"plaintext\": \"%s\"}", testPlaintextB64))) if err != nil { panic(err) } @@ -347,7 +346,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) } // core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id) - resp, err := doReq("POST", chosenHost+"decrypt/"+chosenKey, bytes.NewBuffer([]byte(fmt.Sprintf("{\"ciphertext\": \"%s\"}", ct)))) + resp, err := doReq("POST", chosenHost+"decrypt/"+chosenKey, bytes.NewBufferString(fmt.Sprintf("{\"ciphertext\": \"%s\"}", ct))) if err != nil { panic(err) } @@ -376,7 +375,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) // Rotate to a new key version case "rotate": // core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id) - _, err := doReq("POST", chosenHost+"keys/"+chosenKey+"/rotate", bytes.NewBuffer([]byte("{}"))) + _, err := doReq("POST", chosenHost+"keys/"+chosenKey+"/rotate", bytes.NewBufferString("{}")) if err != nil { panic(err) } @@ -413,7 +412,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) // core.Logger().Printf("[TRACE] %s, %s, %d, new min version %d", chosenFunc, chosenKey, id, setVersion) - _, err := doReq("POST", chosenHost+"keys/"+chosenKey+"/config", bytes.NewBuffer([]byte(fmt.Sprintf("{\"min_decryption_version\": %d}", setVersion)))) + _, err := doReq("POST", chosenHost+"keys/"+chosenKey+"/config", bytes.NewBufferString(fmt.Sprintf("{\"min_decryption_version\": %d}", setVersion))) if err != nil { panic(err) } @@ -472,7 +471,7 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) { } req, err := http.NewRequest("POST", fmt.Sprintf("https://127.0.0.1:%d/v1/sys/auth/cert", cores[0].Listeners[0].Address.Port), - bytes.NewBuffer([]byte("{\"type\": \"cert\"}"))) + bytes.NewBufferString("{\"type\": \"cert\"}")) if err != nil { t.Fatal(err) } diff --git a/http/handler.go b/http/handler.go index a91a0514ffd0..ceaaf34b457f 100644 --- a/http/handler.go +++ b/http/handler.go @@ -1,17 +1,19 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http import ( "bytes" "context" + "crypto/x509" + "encoding/base64" "encoding/json" + "encoding/pem" "errors" "fmt" "io" "io/fs" - "io/ioutil" "mime" "net" "net/http" @@ -23,19 +25,21 @@ import ( "strings" "time" - "github.com/NYTimes/gziphandler" "github.com/hashicorp/errwrap" "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-sockaddr" "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/http/priority" "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/limits" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/pathmanager" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" + gziphandler "github.com/klauspost/compress/gzhttp" ) const ( @@ -84,6 +88,7 @@ var ( // the always forward list perfStandbyAlwaysForwardPaths = pathmanager.New() alwaysRedirectPaths = pathmanager.New() + websocketPaths = pathmanager.New() injectDataIntoTopRoutes = []string{ "/v1/sys/audit", @@ -109,7 +114,9 @@ var ( "/v1/sys/rotate", "/v1/sys/wrapping/wrap", } - + websocketRawPaths = []string{ + "sys/events/subscribe", + } oidcProtectedPathRegex = regexp.MustCompile(`^identity/oidc/provider/\w(([\w-.]+)?\w)?/userinfo$`) ) @@ -119,6 +126,8 @@ func init() { "sys/storage/raft/snapshot-force", "!sys/storage/raft/snapshot-auto/config", }) + websocketPaths.AddPaths(websocketRawPaths) + alwaysRedirectPaths.AddPaths(websocketRawPaths) } type HandlerAnchor struct{} @@ -144,6 +153,10 @@ func handler(props *vault.HandlerProperties) http.Handler { // Create the muxer to handle the actual endpoints mux := http.NewServeMux() + var chrootNamespace string + if props.ListenerConfig != nil { + chrootNamespace = props.ListenerConfig.ChrootNamespace + } switch { case props.RecoveryMode: @@ -154,17 +167,23 @@ func handler(props *vault.HandlerProperties) http.Handler { mux.Handle("/v1/sys/generate-recovery-token/update", handleSysGenerateRootUpdate(core, strategy)) default: // Handle non-forwarded paths - mux.Handle("/v1/sys/config/state/", handleLogicalNoForward(core)) - mux.Handle("/v1/sys/host-info", handleLogicalNoForward(core)) + mux.Handle("/v1/sys/config/state/", handleLogicalNoForward(core, chrootNamespace)) + mux.Handle("/v1/sys/host-info", handleLogicalNoForward(core, chrootNamespace)) mux.Handle("/v1/sys/init", handleSysInit(core)) - mux.Handle("/v1/sys/seal-status", handleSysSealStatus(core)) + mux.Handle("/v1/sys/seal-status", handleSysSealStatus(core, + WithRedactClusterName(props.ListenerConfig.RedactClusterName), + WithRedactVersion(props.ListenerConfig.RedactVersion))) + mux.Handle("/v1/sys/seal-backend-status", handleSysSealBackendStatus(core)) mux.Handle("/v1/sys/seal", handleSysSeal(core)) mux.Handle("/v1/sys/step-down", handleRequestForwarding(core, handleSysStepDown(core))) mux.Handle("/v1/sys/unseal", handleSysUnseal(core)) - mux.Handle("/v1/sys/leader", handleSysLeader(core)) - mux.Handle("/v1/sys/health", handleSysHealth(core)) - mux.Handle("/v1/sys/monitor", handleLogicalNoForward(core)) + mux.Handle("/v1/sys/leader", handleSysLeader(core, + WithRedactAddresses(props.ListenerConfig.RedactAddresses))) + mux.Handle("/v1/sys/health", handleSysHealth(core, + WithRedactClusterName(props.ListenerConfig.RedactClusterName), + WithRedactVersion(props.ListenerConfig.RedactVersion))) + mux.Handle("/v1/sys/monitor", handleLogicalNoForward(core, chrootNamespace)) mux.Handle("/v1/sys/generate-root/attempt", handleRequestForwarding(core, handleAuditNonLogical(core, handleSysGenerateRootAttempt(core, vault.GenerateStandardRootTokenStrategy)))) mux.Handle("/v1/sys/generate-root/update", handleRequestForwarding(core, @@ -180,10 +199,10 @@ func handler(props *vault.HandlerProperties) http.Handler { mux.Handle("/v1/sys/internal/ui/feature-flags", handleSysInternalFeatureFlags(core)) for _, path := range injectDataIntoTopRoutes { - mux.Handle(path, handleRequestForwarding(core, handleLogicalWithInjector(core))) + mux.Handle(path, handleRequestForwarding(core, handleLogicalWithInjector(core, chrootNamespace))) } - mux.Handle("/v1/sys/", handleRequestForwarding(core, handleLogical(core))) - mux.Handle("/v1/", handleRequestForwarding(core, handleLogical(core))) + mux.Handle("/v1/sys/", handleRequestForwarding(core, handleLogical(core, chrootNamespace))) + mux.Handle("/v1/", handleRequestForwarding(core, handleLogical(core, chrootNamespace))) if core.UIEnabled() { if uiBuiltIn { mux.Handle("/ui/", http.StripPrefix("/ui/", gziphandler.GzipHandler(handleUIHeaders(core, handleUI(http.FileServer(&UIAssetWrapper{FileSystem: assetFS()})))))) @@ -200,7 +219,7 @@ func handler(props *vault.HandlerProperties) http.Handler { if props.ListenerConfig != nil && props.ListenerConfig.Telemetry.UnauthenticatedMetricsAccess { mux.Handle("/v1/sys/metrics", handleMetricsUnauthenticated(core)) } else { - mux.Handle("/v1/sys/metrics", handleLogicalNoForward(core)) + mux.Handle("/v1/sys/metrics", handleLogicalNoForward(core, chrootNamespace)) } if props.ListenerConfig != nil && props.ListenerConfig.Profiling.UnauthenticatedPProfAccess { @@ -213,31 +232,51 @@ func handler(props *vault.HandlerProperties) http.Handler { mux.Handle("/v1/sys/pprof/symbol", http.HandlerFunc(pprof.Symbol)) mux.Handle("/v1/sys/pprof/trace", http.HandlerFunc(pprof.Trace)) } else { - mux.Handle("/v1/sys/pprof/", handleLogicalNoForward(core)) + mux.Handle("/v1/sys/pprof/", handleLogicalNoForward(core, chrootNamespace)) } if props.ListenerConfig != nil && props.ListenerConfig.InFlightRequestLogging.UnauthenticatedInFlightAccess { mux.Handle("/v1/sys/in-flight-req", handleUnAuthenticatedInFlightRequest(core)) } else { - mux.Handle("/v1/sys/in-flight-req", handleLogicalNoForward(core)) + mux.Handle("/v1/sys/in-flight-req", handleLogicalNoForward(core, chrootNamespace)) } - additionalRoutes(mux, core) + entAdditionalRoutes(mux, core) } - // Wrap the handler in another handler to trigger all help paths. - helpWrappedHandler := wrapHelpHandler(mux, core) - corsWrappedHandler := wrapCORSHandler(helpWrappedHandler, core) - quotaWrappedHandler := rateLimitQuotaWrapping(corsWrappedHandler, core) - genericWrappedHandler := genericWrapping(core, quotaWrappedHandler, props) + // Build up a chain of wrapping handlers. + wrappedHandler := wrapHelpHandler(mux, core) + wrappedHandler = wrapCORSHandler(wrappedHandler, core) + wrappedHandler = rateLimitQuotaWrapping(wrappedHandler, core) + wrappedHandler = entWrapGenericHandler(core, wrappedHandler, props) + wrappedHandler = wrapMaxRequestSizeHandler(wrappedHandler, props) + wrappedHandler = priority.WrapRequestPriorityHandler(wrappedHandler) - // Wrap the handler with PrintablePathCheckHandler to check for non-printable - // characters in the request path. - printablePathCheckHandler := genericWrappedHandler + // Add an extra wrapping handler if the DisablePrintableCheck listener + // setting isn't true that checks for non-printable characters in the + // request path. if !props.DisablePrintableCheck { - printablePathCheckHandler = cleanhttp.PrintablePathCheckHandler(genericWrappedHandler, nil) + wrappedHandler = cleanhttp.PrintablePathCheckHandler(wrappedHandler, nil) + } + + // Add an extra wrapping handler if the DisableReplicationStatusEndpoints + // setting is true that will create a new request with a context that has + // a value indicating that the replication status endpoints are disabled. + if props.ListenerConfig != nil && props.ListenerConfig.DisableReplicationStatusEndpoints { + wrappedHandler = disableReplicationStatusEndpointWrapping(wrappedHandler) } - return printablePathCheckHandler + // Add an extra wrapping handler if any of the Redaction settings are + // true that will create a new request with a context containing the + // redaction settings. + if props.ListenerConfig != nil && (props.ListenerConfig.RedactAddresses || props.ListenerConfig.RedactClusterName || props.ListenerConfig.RedactVersion) { + wrappedHandler = redactionSettingsWrapping(wrappedHandler, props.ListenerConfig.RedactVersion, props.ListenerConfig.RedactAddresses, props.ListenerConfig.RedactClusterName) + } + + if props.ListenerConfig != nil && props.ListenerConfig.DisableRequestLimiter { + wrappedHandler = wrapRequestLimiterHandler(wrappedHandler, props) + } + + return wrappedHandler } type copyResponseWriter struct { @@ -273,16 +312,16 @@ func (w *copyResponseWriter) WriteHeader(code int) { func handleAuditNonLogical(core *vault.Core, h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { origBody := new(bytes.Buffer) - reader := ioutil.NopCloser(io.TeeReader(r.Body, origBody)) + reader := io.NopCloser(io.TeeReader(r.Body, origBody)) r.Body = reader - req, _, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), w, r) + req, _, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), core.RouterAccess(), w, r) if err != nil || status != 0 { respondError(w, status, err) return } - if origBody != nil { - r.Body = ioutil.NopCloser(origBody) - } + + r.Body = io.NopCloser(origBody) + input := &logical.LogInput{ Request: req, } @@ -294,17 +333,16 @@ func handleAuditNonLogical(core *vault.Core, h http.Handler) http.Handler { cw := newCopyResponseWriter(w) h.ServeHTTP(cw, r) data := make(map[string]interface{}) - err = jsonutil.DecodeJSON(cw.body.Bytes(), &data) - if err != nil { - // best effort, ignore - } + + // Refactoring this code, since the returned error was being ignored. + jsonutil.DecodeJSON(cw.body.Bytes(), &data) + httpResp := &logical.HTTPResponse{Data: data, Headers: cw.Header()} input.Response = logical.HTTPResponseToLogicalResponse(httpResp) err = core.AuditLogger().AuditResponse(r.Context(), input) if err != nil { respondError(w, status, err) } - return }) } @@ -313,23 +351,18 @@ func handleAuditNonLogical(core *vault.Core, h http.Handler) http.Handler { // are performed. func wrapGenericHandler(core *vault.Core, h http.Handler, props *vault.HandlerProperties) http.Handler { var maxRequestDuration time.Duration - var maxRequestSize int64 if props.ListenerConfig != nil { maxRequestDuration = props.ListenerConfig.MaxRequestDuration - maxRequestSize = props.ListenerConfig.MaxRequestSize } if maxRequestDuration == 0 { maxRequestDuration = vault.DefaultMaxRequestDuration } - if maxRequestSize == 0 { - maxRequestSize = DefaultMaxRequestSize - } - // Swallow this error since we don't want to pollute the logs and we also don't want to // return an HTTP error here. This information is best effort. hostname, _ := os.Hostname() - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var hf func(w http.ResponseWriter, r *http.Request) + hf = func(w http.ResponseWriter, r *http.Request) { // This block needs to be here so that upon sending SIGHUP, custom response // headers are also reloaded into the handlers. var customHeaders map[string][]*logical.CustomHeader @@ -353,17 +386,15 @@ func wrapGenericHandler(core *vault.Core, h http.Handler, props *vault.HandlerPr ctx := r.Context() var cancelFunc context.CancelFunc // Add our timeout, but not for the monitor or events endpoints, as they are streaming - if strings.HasSuffix(r.URL.Path, "sys/monitor") || strings.Contains(r.URL.Path, "sys/events") { + // Request URL path for sys/monitor looks like /v1/sys/monitor + // Request URL paths for event subscriptions look like /v1/sys/events/subscribe/{eventType}. Example: /v1/sys/events/subscribe/kv* + if r.URL.Path == "/v1/sys/monitor" || strings.HasPrefix(r.URL.Path, "/v1/sys/events/subscribe") { ctx, cancelFunc = context.WithCancel(ctx) } else { ctx, cancelFunc = context.WithTimeout(ctx, maxRequestDuration) } - // if maxRequestSize < 0, no need to set context value - // Add a size limiter if desired - if maxRequestSize > 0 { - ctx = context.WithValue(ctx, "max_request_size", maxRequestSize) - } - ctx = context.WithValue(ctx, "original_request_path", r.URL.Path) + + ctx = logical.CreateContextOriginalRequestPath(ctx, r.URL.Path) r = r.WithContext(ctx) r = r.WithContext(namespace.ContextWithNamespace(r.Context(), namespace.RootNamespace)) @@ -379,18 +410,48 @@ func wrapGenericHandler(core *vault.Core, h http.Handler, props *vault.HandlerPr nw.Header().Set("X-Vault-Hostname", hostname) } + // Extract the namespace from the header before we modify it + ns := r.Header.Get(consts.NamespaceHeaderName) switch { case strings.HasPrefix(r.URL.Path, "/v1/"): - newR, status := adjustRequest(core, r) + // Setting the namespace in the header to be included in the error message + newR, status, err := adjustRequest(core, props.ListenerConfig, r) if status != 0 { - respondError(nw, status, nil) + respondError(nw, status, err) cancelFunc() return } r = newR case strings.HasPrefix(r.URL.Path, "/ui"), r.URL.Path == "/robots.txt", r.URL.Path == "/": - default: + // RFC 5785 + case strings.HasPrefix(r.URL.Path, "/.well-known/"): + perfStandby := core.PerfStandby() + standby, err := core.Standby() + if err != nil { + core.Logger().Warn("error resolving standby status handling .well-known path", "error", err) + } else if standby && !perfStandby { + // Standby nodes, not performance standbys, don't start plugins + // so registration can not happen, instead redirect to active + respondStandby(core, w, r) + cancelFunc() + return + } else { + redir, err := core.GetWellKnownRedirect(r.Context(), r.URL.Path) + if err != nil { + core.Logger().Warn("error resolving potential API redirect", "error", err) + } else { + if redir != "" { + newReq := r.Clone(ctx) + // Save the original path for audit logging. + newReq.RequestURI = newReq.URL.Path + newReq.URL.Path = redir + hf(w, newReq) + cancelFunc() + return + } + } + } respondError(nw, http.StatusNotFound, nil) cancelFunc() return @@ -399,7 +460,12 @@ func wrapGenericHandler(core *vault.Core, h http.Handler, props *vault.HandlerPr // The uuid for the request is going to be generated when a logical // request is generated. But, here we generate one to be able to track // in-flight requests, and use that to update the req data with clientID - inFlightReqID, err := uuid.GenerateUUID() + reqIDGen := props.RequestIDGenerator + if reqIDGen == nil { + // By default use a UUID + reqIDGen = uuid.GenerateUUID + } + inFlightReqID, err := reqIDGen() if err != nil { respondError(nw, http.StatusInternalServerError, fmt.Errorf("failed to generate an identifier for the in-flight request")) } @@ -434,7 +500,6 @@ func wrapGenericHandler(core *vault.Core, h http.Handler, props *vault.HandlerPr }() // Setting the namespace in the header to be included in the error message - ns := r.Header.Get(consts.NamespaceHeaderName) if ns != "" { nw.Header().Set(consts.NamespaceHeaderName, ns) } @@ -442,8 +507,8 @@ func wrapGenericHandler(core *vault.Core, h http.Handler, props *vault.HandlerPr h.ServeHTTP(nw, r) cancelFunc() - return - }) + } + return http.HandlerFunc(hf) } func WrapForwardedForHandler(h http.Handler, l *configutil.Listener) http.Handler { @@ -451,6 +516,8 @@ func WrapForwardedForHandler(h http.Handler, l *configutil.Listener) http.Handle hopSkips := l.XForwardedForHopSkips authorizedAddrs := l.XForwardedForAuthorizedAddrs rejectNotAuthz := l.XForwardedForRejectNotAuthorized + clientCertHeader := l.XForwardedForClientCertHeader + clientCertHeaderDecoders := l.XForwardedForClientCertHeaderDecoders return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { headers, headersOK := r.Header[textproto.CanonicalMIMEHeaderKey("X-Forwarded-For")] if !headersOK || len(headers) == 0 { @@ -533,26 +600,64 @@ func WrapForwardedForHandler(h http.Handler, l *configutil.Listener) http.Handle } r.RemoteAddr = net.JoinHostPort(acc[indexToUse], port) + + // Import the Client Certificate forwarded by the reverse proxy + // There should be only 1 instance of the header, but looping allows for more flexibility + clientCertHeaders, clientCertHeadersOK := r.Header[textproto.CanonicalMIMEHeaderKey(clientCertHeader)] + if clientCertHeadersOK && len(clientCertHeaders) > 0 { + var client_certs []*x509.Certificate + for _, header := range clientCertHeaders { + // Multiple certs should be comma delimetered + vals := strings.Split(header, ",") + for _, v := range vals { + actions := strings.Split(clientCertHeaderDecoders, ",") + for _, action := range actions { + switch action { + case "URL": + decoded, err := url.QueryUnescape(v) + if err != nil { + respondError(w, http.StatusBadRequest, fmt.Errorf("failed to url unescape the client certificate: %w", err)) + return + } + v = decoded + case "BASE64": + decoded, err := base64.StdEncoding.DecodeString(v) + if err != nil { + respondError(w, http.StatusBadRequest, fmt.Errorf("failed to base64 decode the client certificate: %w", err)) + return + } + v = string(decoded[:]) + case "DER": + decoded, _ := pem.Decode([]byte(v)) + if decoded == nil { + respondError(w, http.StatusBadRequest, fmt.Errorf("failed to convert the client certificate to DER format: %w", err)) + return + } + v = string(decoded.Bytes[:]) + default: + respondError(w, http.StatusBadRequest, fmt.Errorf("unknown decode option specified: %s", action)) + return + } + } + + cert, err := x509.ParseCertificate([]byte(v)) + if err != nil { + respondError(w, http.StatusBadRequest, fmt.Errorf("failed to parse the client certificate: %w", err)) + return + } + client_certs = append(client_certs, cert) + } + } + if r.TLS == nil { + respondError(w, http.StatusBadRequest, fmt.Errorf("Server must use TLS for certificate authentication")) + } else { + r.TLS.PeerCertificates = append(client_certs, r.TLS.PeerCertificates...) + } + } h.ServeHTTP(w, r) - return }) } -// stripPrefix is a helper to strip a prefix from the path. It will -// return false from the second return value if it the prefix doesn't exist. -func stripPrefix(prefix, path string) (string, bool) { - if !strings.HasPrefix(path, prefix) { - return "", false - } - - path = path[len(prefix):] - if path == "" { - return "", false - } - - return path, true -} - func handleUIHeaders(core *vault.Core, h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { header := w.Header() @@ -562,12 +667,12 @@ func handleUIHeaders(core *vault.Core, h http.Handler) http.Handler { respondError(w, http.StatusInternalServerError, err) return } - if userHeaders != nil { - for k := range userHeaders { - v := userHeaders.Get(k) - header.Set(k, v) - } + + for k := range userHeaders { + v := userHeaders.Get(k) + header.Set(k, v) } + h.ServeHTTP(w, req) }) } @@ -579,7 +684,6 @@ func handleUI(h http.Handler) http.Handler { // here. req.URL.Path = strings.TrimSuffix(req.URL.Path, "/") h.ServeHTTP(w, req) - return }) } @@ -657,8 +761,7 @@ func handleUIStub() http.Handler { func handleUIRedirect() http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - http.Redirect(w, req, "/ui/", 307) - return + http.Redirect(w, req, "/ui/", http.StatusTemporaryRedirect) }) } @@ -706,38 +809,21 @@ func parseJSONRequest(perfStandby bool, r *http.Request, w http.ResponseWriter, // Limit the maximum number of bytes to MaxRequestSize to protect // against an indefinite amount of data being read. reader := r.Body - ctx := r.Context() - maxRequestSize := ctx.Value("max_request_size") - if maxRequestSize != nil { - max, ok := maxRequestSize.(int64) - if !ok { - return nil, errors.New("could not parse max_request_size from request context") - } - if max > 0 { - // MaxBytesReader won't do all the internal stuff it must unless it's - // given a ResponseWriter that implements the internal http interface - // requestTooLarger. So we let it have access to the underlying - // ResponseWriter. - inw := w - if myw, ok := inw.(logical.WrappingResponseWriter); ok { - inw = myw.Wrapped() - } - reader = http.MaxBytesReader(inw, r.Body, max) - } - } + var origBody io.ReadWriter + if perfStandby { // Since we're checking PerfStandby here we key on origBody being nil // or not later, so we need to always allocate so it's non-nil origBody = new(bytes.Buffer) - reader = ioutil.NopCloser(io.TeeReader(reader, origBody)) + reader = io.NopCloser(io.TeeReader(reader, origBody)) } err := jsonutil.DecodeJSONFromReader(reader, out) if err != nil && err != io.EOF { return nil, fmt.Errorf("failed to parse JSON input: %w", err) } if origBody != nil { - return ioutil.NopCloser(origBody), err + return io.NopCloser(origBody), err } return nil, err } @@ -746,16 +832,6 @@ func parseJSONRequest(perfStandby bool, r *http.Request, w http.ResponseWriter, // // A nil map will be returned if the format is empty or invalid. func parseFormRequest(r *http.Request) (map[string]interface{}, error) { - maxRequestSize := r.Context().Value("max_request_size") - if maxRequestSize != nil { - max, ok := maxRequestSize.(int64) - if !ok { - return nil, errors.New("could not parse max_request_size from request context") - } - if max > 0 { - r.Body = ioutil.NopCloser(io.LimitReader(r.Body, max)) - } - } if err := r.ParseForm(); err != nil { return nil, err } @@ -790,6 +866,7 @@ func forwardBasedOnHeaders(core *vault.Core, r *http.Request) (bool, error) { return false, fmt.Errorf("forwarding via header %s disabled in configuration", VaultForwardHeaderName) } if rawForward == "active-node" { + core.Logger().Trace("request will be routed based on the 'active-node' header") return true, nil } return false, nil @@ -831,7 +908,7 @@ func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handle respondError(w, http.StatusBadRequest, err) return } - path := ns.TrimmedPath(r.URL.Path[len("/v1/"):]) + path := trimPath(ns, r.URL.Path) if !perfStandbyAlwaysForwardPaths.HasPath(path) && !alwaysRedirectPaths.HasPath(path) { handler.ServeHTTP(w, r) return @@ -863,20 +940,19 @@ func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handle } forwardRequest(core, w, r) - return }) } func forwardRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) { if r.Header.Get(vault.IntNoForwardingHeaderName) != "" { - respondStandby(core, w, r.URL) + respondStandby(core, w, r) return } if r.Header.Get(NoRequestForwardingHeaderName) != "" { // Forwarding explicitly disabled, fall back to previous behavior core.Logger().Debug("handleRequestForwarding: forwarding disabled by client request") - respondStandby(core, w, r.URL) + respondStandby(core, w, r) return } @@ -885,9 +961,25 @@ func forwardRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) { respondError(w, http.StatusBadRequest, err) return } - path := ns.TrimmedPath(r.URL.Path[len("/v1/"):]) - if alwaysRedirectPaths.HasPath(path) { - respondStandby(core, w, r.URL) + path := trimPath(ns, r.URL.Path) + redirect := alwaysRedirectPaths.HasPath(path) + // websocket paths are special, because they can contain a namespace + // in front of them. This isn't an issue on perf standbys where the + // namespace manager will know all the namespaces, so we will have + // already extracted it from the path. But regular standbys don't have + // knowledge of the namespaces, so we need + // to add an extra check + if !redirect && !core.PerfStandby() { + for _, websocketPath := range websocketRawPaths { + if strings.Contains(path, websocketPath) { + redirect = true + break + } + } + } + if redirect { + core.Logger().Trace("cannot forward request (path included in always redirect paths), falling back to redirection to standby") + respondStandby(core, w, r) return } @@ -897,20 +989,20 @@ func forwardRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) { statusCode, header, retBytes, err := core.ForwardRequest(r) if err != nil { if err == vault.ErrCannotForward { - core.Logger().Debug("cannot forward request (possibly disabled on active node), falling back") + core.Logger().Trace("cannot forward request (possibly disabled on active node), falling back to redirection to standby") } else { core.Logger().Error("forward request error", "error", err) } // Fall back to redirection - respondStandby(core, w, r.URL) + respondStandby(core, w, r) return } - if header != nil { - for k, v := range header { - w.Header()[k] = v - } + core.Logger().Trace("request forwarded", "statusCode", statusCode) + + for k, v := range header { + w.Header()[k] = v } w.WriteHeader(statusCode) @@ -920,15 +1012,54 @@ func forwardRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) { // request is a helper to perform a request and properly exit in the // case of an error. func request(core *vault.Core, w http.ResponseWriter, rawReq *http.Request, r *logical.Request) (*logical.Response, bool, bool) { + lim := &limits.HTTPLimiter{ + Method: rawReq.Method, + PathLimited: r.PathLimited, + LookupFunc: core.GetRequestLimiter, + } + lsnr, ok := lim.Acquire(rawReq.Context()) + if !ok { + resp := &logical.Response{} + logical.RespondWithStatusCode(resp, r, http.StatusServiceUnavailable) + respondError(w, http.StatusServiceUnavailable, limits.ErrCapacity) + return resp, false, false + } + + // To guard against leaking RequestListener slots, we should ignore Limiter + // measurements on panic. OnIgnore will check to see if a RequestListener + // slot has been acquired and not released, which could happen on + // recoverable panics. + defer lsnr.OnIgnore() + resp, err := core.HandleRequest(rawReq.Context(), r) - if r.LastRemoteWAL() > 0 && !vault.WaitUntilWALShipped(rawReq.Context(), core, r.LastRemoteWAL()) { + + // Do the limiter measurement + if err != nil { + lsnr.OnDropped() + } else { + lsnr.OnSuccess() + } + + if r.LastRemoteWAL() > 0 && !core.EntWaitUntilWALShipped(rawReq.Context(), r.LastRemoteWAL()) { if resp == nil { resp = &logical.Response{} } resp.AddWarning("Timeout hit while waiting for local replicated cluster to apply primary's write; this client may encounter stale reads of values written during this operation.") } + + // We need to rely on string comparison here because the error could be + // returned from an RPC client call with a non-ReplicatedResponse return + // value (see: PersistAlias). In these cases, the error we get back will + // contain the non-wrapped error message string we're looking for. We would + // love to clean up all error wrapping to be consistent in Vault but we + // considered that too high risk for now. + if err != nil && strings.Contains(err.Error(), consts.ErrOverloaded.Error()) { + logical.RespondWithStatusCode(resp, r, http.StatusServiceUnavailable) + respondError(w, http.StatusServiceUnavailable, err) + return resp, false, false + } if errwrap.Contains(err, consts.ErrStandby.Error()) { - respondStandby(core, w, rawReq.URL) + respondStandby(core, w, rawReq) return resp, false, false } if err != nil && errwrap.Contains(err, logical.ErrPerfStandbyPleaseForward.Error()) { @@ -977,7 +1108,8 @@ func request(core *vault.Core, w http.ResponseWriter, rawReq *http.Request, r *l } // respondStandby is used to trigger a redirect in the case that this Vault is currently a hot standby -func respondStandby(core *vault.Core, w http.ResponseWriter, reqURL *url.URL) { +func respondStandby(core *vault.Core, w http.ResponseWriter, r *http.Request) { + reqURL := r.URL // Request the leader address _, redirectAddr, _, err := core.Leader() if err != nil { @@ -1014,6 +1146,25 @@ func respondStandby(core *vault.Core, w http.ResponseWriter, reqURL *url.URL) { RawQuery: reqURL.RawQuery, } + ctx := r.Context() + ns, err := namespace.FromContext(ctx) + if err != nil { + respondError(w, http.StatusBadRequest, err) + } + // WebSockets schemas are ws or wss + if websocketPaths.HasPath(trimPath(ns, reqURL.Path)) { + if finalURL.Scheme == "http" { + finalURL.Scheme = "ws" + } else { + finalURL.Scheme = "wss" + } + } + + originalPath, ok := logical.ContextOriginalRequestPathValue(ctx) + if ok { + finalURL.Path = originalPath + } + // Ensure there is a scheme, default to https if finalURL.Scheme == "" { finalURL.Scheme = "https" @@ -1265,3 +1416,8 @@ func respondOIDCPermissionDenied(w http.ResponseWriter) { enc := json.NewEncoder(w) enc.Encode(oidcResponse) } + +// trimPath removes the /v1/ prefix and the namespace from the path +func trimPath(ns *namespace.Namespace, path string) string { + return ns.TrimmedPath(path[len("/v1/"):]) +} diff --git a/http/handler_stubs_oss.go b/http/handler_stubs_oss.go new file mode 100644 index 000000000000..f59a637eb58a --- /dev/null +++ b/http/handler_stubs_oss.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package http + +import ( + "net/http" + + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/vault" +) + +//go:generate go run github.com/hashicorp/vault/tools/stubmaker + +func adjustRequest(c *vault.Core, listener *configutil.Listener, r *http.Request) (*http.Request, int, error) { + return r, 0, nil +} + +func handleEntPaths(nsPath string, core *vault.Core, r *http.Request) http.Handler { + return nil +} diff --git a/http/handler_test.go b/http/handler_test.go index 244fe4177257..dde4acd3a8df 100644 --- a/http/handler_test.go +++ b/http/handler_test.go @@ -1,19 +1,21 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http import ( + "bytes" "context" "crypto/tls" "encoding/json" "errors" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/textproto" "net/url" "reflect" + "runtime" "strings" "testing" @@ -21,9 +23,11 @@ import ( "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/helper/versions" + "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" + "github.com/stretchr/testify/require" ) func TestHandler_parseMFAHandler(t *testing.T) { @@ -113,6 +117,28 @@ func TestHandler_parseMFAHandler(t *testing.T) { } } +// TestHandler_CORS_Patch verifies that http PATCH is included in the list of +// allowed request methods +func TestHandler_CORS_Patch(t *testing.T) { + core, _, _ := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + corsConfig := core.CORSConfig() + err := corsConfig.Enable(context.Background(), []string{addr}, nil) + require.NoError(t, err) + req, err := http.NewRequest(http.MethodOptions, addr+"/v1/sys/seal-status", nil) + require.NoError(t, err) + + req.Header.Set("Origin", addr) + req.Header.Set("Access-Control-Request-Method", http.MethodPatch) + + client := cleanhttp.DefaultClient() + resp, err := client.Do(req) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) +} + func TestHandler_cors(t *testing.T) { core, _, _ := vault.TestCoreUnsealed(t) ln, addr := TestServer(t, core) @@ -403,6 +429,7 @@ func TestSysMounts_headerAuth(t *testing.T) { "lease_duration": json.Number("0"), "wrap_info": nil, "warnings": nil, + "mount_type": "system", "auth": nil, "data": map[string]interface{}{ "secret/": map[string]interface{}{ @@ -591,8 +618,9 @@ func TestSysMounts_headerAuth_Wrapped(t *testing.T) { "wrap_info": map[string]interface{}{ "ttl": json.Number("60"), }, - "warnings": nil, - "auth": nil, + "warnings": nil, + "auth": nil, + "mount_type": "", } testResponseStatus(t, resp, 200) @@ -804,6 +832,7 @@ func testNonPrintable(t *testing.T, disable bool) { props := &vault.HandlerProperties{ Core: core, DisablePrintableCheck: disable, + ListenerConfig: &configutil.Listener{}, } TestServerWithListenerAndProperties(t, ln, addr, core, props) defer ln.Close() @@ -856,7 +885,7 @@ func TestHandler_Parse_Form(t *testing.T) { if err != nil { t.Fatal(err) } - req.Body = ioutil.NopCloser(strings.NewReader(values.Encode())) + req.Body = io.NopCloser(strings.NewReader(values.Encode())) req.Header.Set("x-vault-token", cluster.RootToken) req.Header.Set("content-type", "application/x-www-form-urlencoded") resp, err := c.Do(req) @@ -887,3 +916,59 @@ func TestHandler_Parse_Form(t *testing.T) { t.Fatal(diff) } } + +// TestHandler_MaxRequestSize verifies that a request larger than the +// MaxRequestSize fails +func TestHandler_MaxRequestSize(t *testing.T) { + t.Parallel() + cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{ + DefaultHandlerProperties: vault.HandlerProperties{ + ListenerConfig: &configutil.Listener{ + MaxRequestSize: 1024, + }, + }, + HandlerFunc: Handler, + NumCores: 1, + }) + cluster.Start() + defer cluster.Cleanup() + + client := cluster.Cores[0].Client + _, err := client.KVv2("secret").Put(context.Background(), "foo", map[string]interface{}{ + "bar": strings.Repeat("a", 1025), + }) + + require.ErrorContains(t, err, "error parsing JSON") +} + +// TestHandler_MaxRequestSize_Memory sets the max request size to 1024 bytes, +// and creates a 1MB request. The test verifies that less than 1MB of memory is +// allocated when the request is sent. This test shouldn't be run in parallel, +// because it modifies GOMAXPROCS +func TestHandler_MaxRequestSize_Memory(t *testing.T) { + ln, addr := TestListener(t) + core, _, token := vault.TestCoreUnsealed(t) + TestServerWithListenerAndProperties(t, ln, addr, core, &vault.HandlerProperties{ + Core: core, + ListenerConfig: &configutil.Listener{ + Address: addr, + MaxRequestSize: 1024, + }, + }) + defer ln.Close() + + data := bytes.Repeat([]byte{0x1}, 1024*1024) + + req, err := http.NewRequest("POST", addr+"/v1/sys/unseal", bytes.NewReader(data)) + require.NoError(t, err) + req.Header.Set(consts.AuthHeaderName, token) + + client := cleanhttp.DefaultClient() + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) + var start, end runtime.MemStats + runtime.GC() + runtime.ReadMemStats(&start) + client.Do(req) + runtime.ReadMemStats(&end) + require.Less(t, end.TotalAlloc-start.TotalAlloc, uint64(1024*1024)) +} diff --git a/http/help.go b/http/help.go index 64085f1e38b6..24ff14a8e96f 100644 --- a/http/help.go +++ b/http/help.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -40,7 +40,7 @@ func handleHelp(core *vault.Core, w http.ResponseWriter, r *http.Request) { respondError(w, http.StatusNotFound, errors.New("Missing /v1/ prefix in path. Use vault path-help command to retrieve API help for paths")) return } - path := ns.TrimmedPath(r.URL.Path[len("/v1/"):]) + path := trimPath(ns, r.URL.Path) req := &logical.Request{ Operation: logical.HelpOperation, diff --git a/http/help_test.go b/http/help_test.go index d02c26a9521e..5fa96e50ddad 100644 --- a/http/help_test.go +++ b/http/help_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http diff --git a/http/http_test.go b/http/http_test.go index 5e51ce7d0fe3..addd423b6181 100644 --- a/http/http_test.go +++ b/http/http_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -14,7 +14,7 @@ import ( "testing" "time" - cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/jsonutil" ) @@ -25,41 +25,61 @@ func testHttpGet(t *testing.T, token string, addr string) *http.Response { loggedToken = "" } t.Logf("Token is %s", loggedToken) - return testHttpData(t, "GET", token, addr, nil, false, 0) + return testHttpData(t, "GET", token, addr, "", nil, false, 0, false) } func testHttpDelete(t *testing.T, token string, addr string) *http.Response { - return testHttpData(t, "DELETE", token, addr, nil, false, 0) + return testHttpData(t, "DELETE", token, addr, "", nil, false, 0, false) } // Go 1.8+ clients redirect automatically which breaks our 307 standby testing func testHttpDeleteDisableRedirect(t *testing.T, token string, addr string) *http.Response { - return testHttpData(t, "DELETE", token, addr, nil, true, 0) + return testHttpData(t, "DELETE", token, addr, "", nil, true, 0, false) } func testHttpPostWrapped(t *testing.T, token string, addr string, body interface{}, wrapTTL time.Duration) *http.Response { - return testHttpData(t, "POST", token, addr, body, false, wrapTTL) + return testHttpData(t, "POST", token, addr, "", body, false, wrapTTL, false) } func testHttpPost(t *testing.T, token string, addr string, body interface{}) *http.Response { - return testHttpData(t, "POST", token, addr, body, false, 0) + return testHttpData(t, "POST", token, addr, "", body, false, 0, false) +} + +func testHttpPostBinaryData(t *testing.T, token string, addr string, body interface{}) *http.Response { + return testHttpData(t, "POST", token, addr, "", body, false, 0, true) +} + +func testHttpPostNamespace(t *testing.T, token string, addr string, namespace string, body interface{}) *http.Response { + return testHttpData(t, "POST", token, addr, namespace, body, false, 0, false) } func testHttpPut(t *testing.T, token string, addr string, body interface{}) *http.Response { - return testHttpData(t, "PUT", token, addr, body, false, 0) + return testHttpData(t, "PUT", token, addr, "", body, false, 0, false) +} + +func testHttpPutBinaryData(t *testing.T, token string, addr string, body interface{}) *http.Response { + return testHttpData(t, "PUT", token, addr, "", body, false, 0, true) } // Go 1.8+ clients redirect automatically which breaks our 307 standby testing func testHttpPutDisableRedirect(t *testing.T, token string, addr string, body interface{}) *http.Response { - return testHttpData(t, "PUT", token, addr, body, true, 0) + return testHttpData(t, "PUT", token, addr, "", body, true, 0, false) } -func testHttpData(t *testing.T, method string, token string, addr string, body interface{}, disableRedirect bool, wrapTTL time.Duration) *http.Response { +func testHttpData(t *testing.T, method string, token string, addr string, namespace string, body interface{}, disableRedirect bool, wrapTTL time.Duration, binaryBody bool) *http.Response { bodyReader := new(bytes.Buffer) if body != nil { - enc := json.NewEncoder(bodyReader) - if err := enc.Encode(body); err != nil { - t.Fatalf("err:%s", err) + if binaryBody { + bodyAsBytes, ok := body.([]byte) + if !ok { + t.Fatalf("binary body was true, but body was not a []byte was %T", body) + } + bodyReader = bytes.NewBuffer(bodyAsBytes) + } else { + enc := json.NewEncoder(bodyReader) + if err := enc.Encode(body); err != nil { + t.Fatalf("err:%s", err) + } } } @@ -78,6 +98,9 @@ func testHttpData(t *testing.T, method string, token string, addr string, body i if wrapTTL > 0 { req.Header.Set("X-Vault-Wrap-TTL", wrapTTL.String()) } + if namespace != "" { + req.Header.Set("X-Vault-Namespace", namespace) + } if len(token) != 0 { req.Header.Set(consts.AuthHeaderName, token) diff --git a/http/logical.go b/http/logical.go index 6da2e7dc7f88..20076ae29c70 100644 --- a/http/logical.go +++ b/http/logical.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -16,8 +16,8 @@ import ( "strings" "time" + "github.com/hashicorp/errwrap" "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/helper/experiments" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" @@ -45,13 +45,12 @@ func (b *bufferedReader) Close() error { const MergePatchContentTypeHeader = "application/merge-patch+json" -func buildLogicalRequestNoAuth(perfStandby bool, w http.ResponseWriter, r *http.Request) (*logical.Request, io.ReadCloser, int, error) { +func buildLogicalRequestNoAuth(perfStandby bool, ra *vault.RouterAccess, w http.ResponseWriter, r *http.Request) (*logical.Request, io.ReadCloser, int, error) { ns, err := namespace.FromContext(r.Context()) if err != nil { return nil, nil, http.StatusBadRequest, nil } - path := ns.TrimmedPath(r.URL.Path[len("/v1/"):]) - + path := trimPath(ns, r.URL.Path) var data map[string]interface{} var origBody io.ReadCloser var passHTTPReq bool @@ -110,7 +109,9 @@ func buildLogicalRequestNoAuth(perfStandby bool, w http.ResponseWriter, r *http. // is der encoded) we don't want to parse it. Instead, we will simply // add the HTTP request to the logical request object for later consumption. contentType := r.Header.Get("Content-Type") - if path == "sys/storage/raft/snapshot" || path == "sys/storage/raft/snapshot-force" || isOcspRequest(contentType) { + + if (ra != nil && ra.IsBinaryPath(r.Context(), path)) || + path == "sys/storage/raft/snapshot" || path == "sys/storage/raft/snapshot-force" { passHTTPReq = true origBody = r.Body } else { @@ -183,12 +184,19 @@ func buildLogicalRequestNoAuth(perfStandby bool, w http.ResponseWriter, r *http. } data = parseQuery(r.URL.Query()) - - case "OPTIONS", "HEAD": + case "HEAD": + op = logical.HeaderOperation + data = parseQuery(r.URL.Query()) + case "OPTIONS": default: return nil, nil, http.StatusMethodNotAllowed, nil } + // RFC 5785 Redirect, keep the request for auditing purposes + if r.URL.Path != r.RequestURI { + passHTTPReq = true + } + requestId, err := uuid.GenerateUUID() if err != nil { return nil, nil, http.StatusInternalServerError, fmt.Errorf("failed to generate identifier for the request: %w", err) @@ -203,6 +211,10 @@ func buildLogicalRequestNoAuth(perfStandby bool, w http.ResponseWriter, r *http. Headers: r.Header, } + if ra != nil && ra.IsLimitedPath(r.Context(), path) { + req.PathLimited = true + } + if passHTTPReq { req.HTTPRequest = r } @@ -213,15 +225,6 @@ func buildLogicalRequestNoAuth(perfStandby bool, w http.ResponseWriter, r *http. return req, origBody, 0, nil } -func isOcspRequest(contentType string) bool { - contentType, _, err := mime.ParseMediaType(contentType) - if err != nil { - return false - } - - return contentType == "application/ocsp-request" -} - func buildLogicalPath(r *http.Request) (string, int, error) { ns, err := namespace.FromContext(r.Context()) if err != nil { @@ -261,11 +264,12 @@ func buildLogicalPath(r *http.Request) (string, int, error) { return path, 0, nil } -func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) (*logical.Request, io.ReadCloser, int, error) { - req, origBody, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), w, r) +func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Request, chrootNamespace string) (*logical.Request, io.ReadCloser, int, error) { + req, origBody, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), core.RouterAccess(), w, r) if err != nil || status != 0 { return nil, nil, status, err } + req.ChrootNamespace = chrootNamespace req.SetRequiredState(r.Header.Values(VaultIndexHeaderName)) requestAuth(r, req) @@ -294,27 +298,27 @@ func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Reques // - Perf standby and token with limited use count. // - Perf standby and token re-validation needed (e.g. due to invalid token). // - Perf standby and control group error. -func handleLogical(core *vault.Core) http.Handler { - return handleLogicalInternal(core, false, false) +func handleLogical(core *vault.Core, chrootNamespace string) http.Handler { + return handleLogicalInternal(core, false, false, chrootNamespace) } // handleLogicalWithInjector returns a handler for processing logical requests // that also have their logical response data injected at the top-level payload. // All forwarding behavior remains the same as `handleLogical`. -func handleLogicalWithInjector(core *vault.Core) http.Handler { - return handleLogicalInternal(core, true, false) +func handleLogicalWithInjector(core *vault.Core, chrootNamespace string) http.Handler { + return handleLogicalInternal(core, true, false, chrootNamespace) } // handleLogicalNoForward returns a handler for processing logical local-only // requests. These types of requests never forwarded, and return an // `vault.ErrCannotForwardLocalOnly` error if attempted to do so. -func handleLogicalNoForward(core *vault.Core) http.Handler { - return handleLogicalInternal(core, false, true) +func handleLogicalNoForward(core *vault.Core, chrootNamespace string) http.Handler { + return handleLogicalInternal(core, false, true, chrootNamespace) } func handleLogicalRecovery(raw *vault.RawBackend, token *atomic.String) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - req, _, statusCode, err := buildLogicalRequestNoAuth(false, w, r) + req, _, statusCode, err := buildLogicalRequestNoAuth(false, nil, w, r) if err != nil || statusCode != 0 { respondError(w, statusCode, err) return @@ -342,31 +346,38 @@ func handleLogicalRecovery(raw *vault.RawBackend, token *atomic.String) http.Han // handleLogicalInternal is a common helper that returns a handler for // processing logical requests. The behavior depends on the various boolean // toggles. Refer to usage on functions for possible behaviors. -func handleLogicalInternal(core *vault.Core, injectDataIntoTopLevel bool, noForward bool) http.Handler { +func handleLogicalInternal(core *vault.Core, injectDataIntoTopLevel bool, noForward bool, chrootNamespace string) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - req, origBody, statusCode, err := buildLogicalRequest(core, w, r) + req, origBody, statusCode, err := buildLogicalRequest(core, w, r, chrootNamespace) if err != nil || statusCode != 0 { respondError(w, statusCode, err) return } // Websockets need to be handled at HTTP layer instead of logical requests. - if core.IsExperimentEnabled(experiments.VaultExperimentEventsAlpha1) { - ns, err := namespace.FromContext(r.Context()) - if err != nil { - respondError(w, http.StatusInternalServerError, err) - return - } - nsPath := ns.Path - if ns.ID == namespace.RootNamespaceID { - nsPath = "" - } - if strings.HasPrefix(r.URL.Path, fmt.Sprintf("/v1/%ssys/events/subscribe/", nsPath)) { - handler := handleEventsSubscribe(core, req) + ns, err := namespace.FromContext(r.Context()) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + trimmedPath := trimPath(ns, r.URL.Path) + + nsPath := ns.Path + if ns.ID == namespace.RootNamespaceID { + nsPath = "" + } + if websocketPaths.HasPath(trimmedPath) { + handler := entHandleEventsSubscribe(core, req) + if handler != nil { handler.ServeHTTP(w, r) return } } + handler := handleEntPaths(nsPath, core, r) + if handler != nil { + handler.ServeHTTP(w, r) + return + } // Make the internal request. We attach the connection info // as well in case this is an authentication request that requires @@ -375,6 +386,9 @@ func handleLogicalInternal(core *vault.Core, injectDataIntoTopLevel bool, noForw // success. resp, ok, needsForward := request(core, w, r, req) switch { + case errwrap.Contains(resp.Error(), consts.ErrOverloaded.Error()): + respondError(w, http.StatusServiceUnavailable, consts.ErrOverloaded) + return case needsForward && noForward: respondError(w, http.StatusBadRequest, vault.ErrCannotForwardLocalOnly) return @@ -447,7 +461,7 @@ func respondLogical(core *vault.Core, w http.ResponseWriter, r *http.Request, re } } - adjustResponse(core, w, req) + entAdjustResponse(core, w, req) // Respond respondOk(w, ret) diff --git a/http/logical_test.go b/http/logical_test.go index e4b137c10290..65501281851f 100644 --- a/http/logical_test.go +++ b/http/logical_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -11,30 +11,30 @@ import ( "io/ioutil" "net/http" "net/http/httptest" + "os" "reflect" "strconv" "strings" "testing" "time" + "github.com/go-test/deep" + log "github.com/hashicorp/go-hclog" kv "github.com/hashicorp/vault-plugin-secrets-kv" "github.com/hashicorp/vault/api" - auditFile "github.com/hashicorp/vault/builtin/audit/file" + "github.com/hashicorp/vault/audit" credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" + "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/sdk/physical/inmem" - - "github.com/go-test/deep" - log "github.com/hashicorp/go-hclog" - - "github.com/hashicorp/vault/audit" - "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/vault" + "github.com/stretchr/testify/require" ) func TestLogical(t *testing.T) { @@ -63,9 +63,10 @@ func TestLogical(t *testing.T) { "data": map[string]interface{}{ "data": "bar", }, - "auth": nil, - "wrap_info": nil, - "warnings": nilWarnings, + "auth": nil, + "wrap_info": nil, + "warnings": nilWarnings, + "mount_type": "kv", } testResponseStatus(t, resp, 200) testResponseBody(t, resp, &actual) @@ -180,9 +181,10 @@ func TestLogical_StandbyRedirect(t *testing.T) { "entity_id": "", "type": "service", }, - "warnings": nilWarnings, - "wrap_info": nil, - "auth": nil, + "warnings": nilWarnings, + "wrap_info": nil, + "auth": nil, + "mount_type": "token", } testResponseStatus(t, resp, 200) @@ -221,6 +223,7 @@ func TestLogical_CreateToken(t *testing.T) { "renewable": false, "lease_duration": json.Number("0"), "data": nil, + "mount_type": "token", "wrap_info": nil, "auth": map[string]interface{}{ "policies": []interface{}{"root"}, @@ -319,7 +322,7 @@ func TestLogical_ListSuffix(t *testing.T) { req = req.WithContext(namespace.RootContext(nil)) req.Header.Add(consts.AuthHeaderName, rootToken) - lreq, _, status, err := buildLogicalRequest(core, nil, req) + lreq, _, status, err := buildLogicalRequest(core, nil, req, "") if err != nil { t.Fatal(err) } @@ -334,7 +337,7 @@ func TestLogical_ListSuffix(t *testing.T) { req = req.WithContext(namespace.RootContext(nil)) req.Header.Add(consts.AuthHeaderName, rootToken) - lreq, _, status, err = buildLogicalRequest(core, nil, req) + lreq, _, status, err = buildLogicalRequest(core, nil, req, "") if err != nil { t.Fatal(err) } @@ -349,12 +352,12 @@ func TestLogical_ListSuffix(t *testing.T) { req = req.WithContext(namespace.RootContext(nil)) req.Header.Add(consts.AuthHeaderName, rootToken) - _, _, status, err = buildLogicalRequestNoAuth(core.PerfStandby(), nil, req) + _, _, status, err = buildLogicalRequestNoAuth(core.PerfStandby(), core.RouterAccess(), nil, req) if err != nil || status != 0 { t.Fatal(err) } - lreq, _, status, err = buildLogicalRequest(core, nil, req) + lreq, _, status, err = buildLogicalRequest(core, nil, req, "") if err != nil { t.Fatal(err) } @@ -366,6 +369,94 @@ func TestLogical_ListSuffix(t *testing.T) { } } +// TestLogical_BinaryPath tests the legacy behavior passing in binary data to a +// path that isn't explicitly marked by a plugin as a binary path to fail, along +// with making sure we pass through when marked as a binary path +func TestLogical_BinaryPath(t *testing.T) { + t.Parallel() + + testHandler := func(ctx context.Context, l *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return nil, nil + } + operations := map[logical.Operation]framework.OperationHandler{ + logical.PatchOperation: &framework.PathOperation{Callback: testHandler}, + logical.UpdateOperation: &framework.PathOperation{Callback: testHandler}, + } + + conf := &vault.CoreConfig{ + BuiltinRegistry: corehelpers.NewMockBuiltinRegistry(), + LogicalBackends: map[string]logical.Factory{ + "bintest": func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { + b := new(framework.Backend) + b.BackendType = logical.TypeLogical + b.Paths = []*framework.Path{ + {Pattern: "binary", Operations: operations}, + {Pattern: "binary/" + framework.MatchAllRegex("test"), Operations: operations}, + } + b.PathsSpecial = &logical.Paths{Binary: []string{"binary", "binary/*"}} + err := b.Setup(ctx, config) + return b, err + }, + }, + } + + core, _, token := vault.TestCoreUnsealedWithConfig(t, conf) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + mountReq := &logical.Request{ + Operation: logical.UpdateOperation, + ClientToken: token, + Path: "sys/mounts/bintest", + Data: map[string]interface{}{ + "type": "bintest", + }, + } + mountResp, err := core.HandleRequest(namespace.RootContext(nil), mountReq) + if err != nil { + t.Fatalf("failed mounting bin-test engine: %v", err) + } + if mountResp.IsError() { + t.Fatalf("failed mounting bin-test error in response: %v", mountResp.Error()) + } + + tests := []struct { + name string + op string + url string + expectedReturn int + }{ + {name: "PUT non-binary", op: "PUT", url: addr + "/v1/bintest/non-binary", expectedReturn: http.StatusBadRequest}, + {name: "POST non-binary", op: "POST", url: addr + "/v1/bintest/non-binary", expectedReturn: http.StatusBadRequest}, + {name: "PUT binary", op: "PUT", url: addr + "/v1/bintest/binary", expectedReturn: http.StatusNoContent}, + {name: "POST binary", op: "POST", url: addr + "/v1/bintest/binary/sub-path", expectedReturn: http.StatusNoContent}, + } + for _, test := range tests { + t.Run(test.name, func(st *testing.T) { + var resp *http.Response + switch test.op { + case "PUT": + resp = testHttpPutBinaryData(st, token, test.url, make([]byte, 100)) + case "POST": + resp = testHttpPostBinaryData(st, token, test.url, make([]byte, 100)) + default: + t.Fatalf("unsupported operation: %s", test.op) + } + testResponseStatus(st, resp, test.expectedReturn) + if test.expectedReturn != http.StatusNoContent { + all, err := io.ReadAll(resp.Body) + if err != nil { + st.Fatalf("failed reading error response body: %v", err) + } + if !strings.Contains(string(all), "error parsing JSON") { + st.Fatalf("error response body did not contain expected error: %v", all) + } + } + }) + } +} + func TestLogical_ListWithQueryParameters(t *testing.T) { core, _, rootToken := vault.TestCoreUnsealed(t) @@ -425,7 +516,7 @@ func TestLogical_ListWithQueryParameters(t *testing.T) { req = req.WithContext(namespace.RootContext(nil)) req.Header.Add(consts.AuthHeaderName, rootToken) - lreq, _, status, err := buildLogicalRequest(core, nil, req) + lreq, _, status, err := buildLogicalRequest(core, nil, req, "") if err != nil { t.Fatal(err) } @@ -464,12 +555,12 @@ func TestLogical_RespondWithStatusCode(t *testing.T) { t.Fatalf("Bad Status code: %d", w.Code) } - bodyRaw, err := ioutil.ReadAll(w.Body) + bodyRaw, err := io.ReadAll(w.Body) if err != nil { t.Fatal(err) } - expected := `{"request_id":"id","lease_id":"","renewable":false,"lease_duration":0,"data":{"test-data":"foo"},"wrap_info":null,"warnings":null,"auth":null}` + expected := `{"request_id":"id","lease_id":"","renewable":false,"lease_duration":0,"data":{"test-data":"foo"},"wrap_info":null,"warnings":null,"auth":null,"mount_type":""}` if string(bodyRaw[:]) != strings.Trim(expected, "\n") { t.Fatalf("bad response: %s", string(bodyRaw[:])) @@ -478,10 +569,10 @@ func TestLogical_RespondWithStatusCode(t *testing.T) { func TestLogical_Audit_invalidWrappingToken(t *testing.T) { // Create a noop audit backend - noop := corehelpers.TestNoopAudit(t, nil) + noop := audit.TestNoopAudit(t, "noop/", nil) c, _, root := vault.TestCoreUnsealedWithConfig(t, &vault.CoreConfig{ AuditBackends: map[string]audit.Factory{ - "noop": func(ctx context.Context, config *audit.BackendConfig) (audit.Backend, error) { + "noop": func(config *audit.BackendConfig, _ audit.HeaderFormatter) (audit.Backend, error) { return noop, nil }, }, @@ -490,7 +581,6 @@ func TestLogical_Audit_invalidWrappingToken(t *testing.T) { defer ln.Close() // Enable the audit backend - resp := testHttpPost(t, root, addr+"/v1/sys/audit/noop", map[string]interface{}{ "type": "noop", }) @@ -591,7 +681,7 @@ func TestLogical_AuditPort(t *testing.T) { "kv": kv.VersionedKVFactory, }, AuditBackends: map[string]audit.Factory{ - "file": auditFile.Factory, + "file": audit.NewFileBackend, }, } @@ -655,9 +745,12 @@ func TestLogical_AuditPort(t *testing.T) { decoder := json.NewDecoder(auditLogFile) - var auditRecord map[string]interface{} count := 0 - for decoder.Decode(&auditRecord) == nil { + for decoder.More() { + var auditRecord map[string]interface{} + err := decoder.Decode(&auditRecord) + require.NoError(t, err) + count += 1 // Skip the first line @@ -760,3 +853,204 @@ func TestLogical_ErrRelativePath(t *testing.T) { t.Errorf("expected response for write to include %q", logical.ErrRelativePath.Error()) } } + +func testBuiltinPluginMetadataAuditLog(t *testing.T, log map[string]interface{}, expectedMountClass string) { + t.Helper() + + if mountClass, ok := log["mount_class"].(string); !ok { + t.Fatalf("mount_class should be a string, not %T", log["mount_class"]) + } else if mountClass != expectedMountClass { + t.Fatalf("bad: mount_class should be %s, not %s", expectedMountClass, mountClass) + } + + // Requests have 'mount_running_version' but Responses have 'mount_running_plugin_version' + runningVersionRaw, runningVersionRawOK := log["mount_running_version"] + runningPluginVersionRaw, runningPluginVersionRawOK := log["mount_running_plugin_version"] + if !runningVersionRawOK && !runningPluginVersionRawOK { + t.Fatalf("mount_running_version/mount_running_plugin_version should be present") + } else if runningVersionRawOK { + if _, ok := runningVersionRaw.(string); !ok { + t.Fatalf("mount_running_version should be string, not %T", runningVersionRaw) + } + } else if _, ok := runningPluginVersionRaw.(string); !ok { + t.Fatalf("mount_running_plugin_version should be string, not %T", runningPluginVersionRaw) + } + + if _, ok := log["mount_running_sha256"].(string); ok { + t.Fatalf("mount_running_sha256 should be nil, not %T", log["mount_running_sha256"]) + } + + if mountIsExternalPlugin, ok := log["mount_is_external_plugin"].(bool); ok && mountIsExternalPlugin { + t.Fatalf("mount_is_external_plugin should be nil or false, not %T", log["mount_is_external_plugin"]) + } +} + +// TestLogical_AuditEnabled_ShouldLogPluginMetadata_Auth tests that we have plugin metadata of a builtin auth plugin +// in audit log when it is enabled +func TestLogical_AuditEnabled_ShouldLogPluginMetadata_Auth(t *testing.T) { + coreConfig := &vault.CoreConfig{ + AuditBackends: map[string]audit.Factory{ + "file": audit.NewFileBackend, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + core := cores[0].Core + c := cluster.Cores[0].Client + vault.TestWaitActive(t, core) + + // Enable the audit backend + tempDir := t.TempDir() + auditLogFile, err := os.CreateTemp(tempDir, "") + if err != nil { + t.Fatal(err) + } + + err = c.Sys().EnableAuditWithOptions("file", &api.EnableAuditOptions{ + Type: "file", + Options: map[string]string{ + "file_path": auditLogFile.Name(), + }, + }) + require.NoError(t, err) + + _, err = c.Logical().Write("auth/token/create", map[string]interface{}{ + "ttl": "10s", + }) + require.NoError(t, err) + + // Disable audit now we're done performing operations + err = c.Sys().DisableAudit("file") + require.NoError(t, err) + + // Check the audit trail on request and response + decoder := json.NewDecoder(auditLogFile) + for decoder.More() { + var auditRecord map[string]interface{} + err := decoder.Decode(&auditRecord) + require.NoError(t, err) + + if req, ok := auditRecord["request"]; ok { + auditRequest, ok := req.(map[string]interface{}) + require.True(t, ok) + + path, ok := auditRequest["path"].(string) + require.True(t, ok) + + if path != "auth/token/create" { + continue + } + + testBuiltinPluginMetadataAuditLog(t, auditRequest, consts.PluginTypeCredential.String()) + } + + // Should never have a response without a corresponding request. + if resp, ok := auditRecord["response"]; ok { + auditResponse, ok := resp.(map[string]interface{}) + require.True(t, ok) + + testBuiltinPluginMetadataAuditLog(t, auditResponse, consts.PluginTypeCredential.String()) + } + } +} + +// TestLogical_AuditEnabled_ShouldLogPluginMetadata_Secret tests that we have plugin metadata of a builtin secret plugin +// in audit log when it is enabled +func TestLogical_AuditEnabled_ShouldLogPluginMetadata_Secret(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": kv.VersionedKVFactory, + }, + AuditBackends: map[string]audit.Factory{ + "file": audit.NewFileBackend, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + core := cores[0].Core + c := cluster.Cores[0].Client + vault.TestWaitActive(t, core) + + if err := c.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatalf("kv-v2 mount attempt failed - err: %#v\n", err) + } + + // Enable the audit backend + tempDir := t.TempDir() + auditLogFile, err := os.CreateTemp(tempDir, "") + require.NoError(t, err) + + err = c.Sys().EnableAuditWithOptions("file", &api.EnableAuditOptions{ + Type: "file", + Options: map[string]string{ + "file_path": auditLogFile.Name(), + }, + }) + require.NoError(t, err) + + { + writeData := map[string]interface{}{ + "data": map[string]interface{}{ + "bar": "a", + }, + } + corehelpers.RetryUntil(t, 10*time.Second, func() error { + resp, err := c.Logical().Write("kv/data/foo", writeData) + if err != nil { + t.Fatalf("write request failed, err: %#v, resp: %#v\n", err, resp) + } + return nil + }) + } + + // Disable audit now we're done performing operations + err = c.Sys().DisableAudit("file") + require.NoError(t, err) + + // Check the audit trail on request and response + decoder := json.NewDecoder(auditLogFile) + for decoder.More() { + var auditRecord map[string]interface{} + err := decoder.Decode(&auditRecord) + require.NoError(t, err) + + if req, ok := auditRecord["request"]; ok { + auditRequest, ok := req.(map[string]interface{}) + require.True(t, ok) + + path, ok := auditRequest["path"] + require.True(t, ok) + + if path != "kv/data/foo" { + continue + } + + testBuiltinPluginMetadataAuditLog(t, auditRequest, consts.PluginTypeSecrets.String()) + } + + if resp, ok := auditRecord["response"]; ok { + auditResponse, ok := resp.(map[string]interface{}) + require.True(t, ok) + + testBuiltinPluginMetadataAuditLog(t, auditResponse, consts.PluginTypeSecrets.String()) + } + } +} diff --git a/http/options.go b/http/options.go new file mode 100644 index 000000000000..b1200c018e72 --- /dev/null +++ b/http/options.go @@ -0,0 +1,71 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package http + +// ListenerConfigOption is how listenerConfigOptions are passed as arguments. +type ListenerConfigOption func(*listenerConfigOptions) error + +// listenerConfigOptions are used to represent configuration of listeners for http handlers. +type listenerConfigOptions struct { + withRedactionValue string + withRedactAddresses bool + withRedactClusterName bool + withRedactVersion bool +} + +// getDefaultOptions returns listenerConfigOptions with their default values. +func getDefaultOptions() listenerConfigOptions { + return listenerConfigOptions{ + withRedactionValue: "", // Redacted values will be set to an empty string by default. + } +} + +// getOpts applies each supplied ListenerConfigOption and returns the fully configured listenerConfigOptions. +// Each ListenerConfigOption is applied in the order it appears in the argument list, so it is +// possible to supply the same ListenerConfigOption numerous times and the 'last write wins'. +func getOpts(opt ...ListenerConfigOption) (listenerConfigOptions, error) { + opts := getDefaultOptions() + for _, o := range opt { + if o == nil { + continue + } + if err := o(&opts); err != nil { + return listenerConfigOptions{}, err + } + } + return opts, nil +} + +// WithRedactionValue provides an ListenerConfigOption to represent the value used to redact +// values which require redaction. +func WithRedactionValue(r string) ListenerConfigOption { + return func(o *listenerConfigOptions) error { + o.withRedactionValue = r + return nil + } +} + +// WithRedactAddresses provides an ListenerConfigOption to represent whether redaction of addresses is required. +func WithRedactAddresses(r bool) ListenerConfigOption { + return func(o *listenerConfigOptions) error { + o.withRedactAddresses = r + return nil + } +} + +// WithRedactClusterName provides an ListenerConfigOption to represent whether redaction of cluster names is required. +func WithRedactClusterName(r bool) ListenerConfigOption { + return func(o *listenerConfigOptions) error { + o.withRedactClusterName = r + return nil + } +} + +// WithRedactVersion provides an ListenerConfigOption to represent whether redaction of version is required. +func WithRedactVersion(r bool) ListenerConfigOption { + return func(o *listenerConfigOptions) error { + o.withRedactVersion = r + return nil + } +} diff --git a/http/options_test.go b/http/options_test.go new file mode 100644 index 000000000000..5d52a6e42dc8 --- /dev/null +++ b/http/options_test.go @@ -0,0 +1,273 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package http + +import ( + "net/http" + "strings" + "testing" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/version" + "github.com/stretchr/testify/require" +) + +// TestOptions_Default ensures that the default values are as expected. +func TestOptions_Default(t *testing.T) { + opts := getDefaultOptions() + require.NotNil(t, opts) + require.Equal(t, "", opts.withRedactionValue) +} + +// TestOptions_WithRedactionValue ensures that we set the correct value to use for +// redaction when required. +func TestOptions_WithRedactionValue(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value string + ExpectedValue string + IsErrorExpected bool + }{ + "empty": { + Value: "", + ExpectedValue: "", + IsErrorExpected: false, + }, + "whitespace": { + Value: " ", + ExpectedValue: " ", + IsErrorExpected: false, + }, + "value": { + Value: "*****", + ExpectedValue: "*****", + IsErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &listenerConfigOptions{} + applyOption := WithRedactionValue(tc.Value) + err := applyOption(opts) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + default: + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withRedactionValue) + } + }) + } +} + +// TestOptions_WithRedactAddresses ensures that the option works as intended. +func TestOptions_WithRedactAddresses(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value bool + ExpectedValue bool + }{ + "true": { + Value: true, + ExpectedValue: true, + }, + "false": { + Value: false, + ExpectedValue: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &listenerConfigOptions{} + applyOption := WithRedactAddresses(tc.Value) + err := applyOption(opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withRedactAddresses) + }) + } +} + +// TestOptions_WithRedactClusterName ensures that the option works as intended. +func TestOptions_WithRedactClusterName(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value bool + ExpectedValue bool + }{ + "true": { + Value: true, + ExpectedValue: true, + }, + "false": { + Value: false, + ExpectedValue: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &listenerConfigOptions{} + applyOption := WithRedactClusterName(tc.Value) + err := applyOption(opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withRedactClusterName) + }) + } +} + +// TestOptions_WithRedactVersion ensures that the option works as intended. +func TestOptions_WithRedactVersion(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value bool + ExpectedValue bool + }{ + "true": { + Value: true, + ExpectedValue: true, + }, + "false": { + Value: false, + ExpectedValue: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &listenerConfigOptions{} + applyOption := WithRedactVersion(tc.Value) + err := applyOption(opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withRedactVersion) + }) + } +} + +// TestRedactVersionListener tests that the version will be redacted +// from e.g. sys/health and the OpenAPI response if `redact_version` +// is set on the listener. +func TestRedactVersionListener(t *testing.T) { + conf := &vault.CoreConfig{ + EnableUI: false, + EnableRaw: true, + BuiltinRegistry: corehelpers.NewMockBuiltinRegistry(), + } + core, _, token := vault.TestCoreUnsealedWithConfig(t, conf) + + // Setup listener without redaction + ln, addr := TestListener(t) + props := &vault.HandlerProperties{ + Core: core, + ListenerConfig: &configutil.Listener{ + RedactVersion: false, + }, + } + TestServerWithListenerAndProperties(t, ln, addr, core, props) + defer ln.Close() + TestServerAuth(t, addr, token) + + testRedactVersionEndpoints(t, addr, token, version.Version) + + // Setup listener with redaction + ln, addr = TestListener(t) + props.ListenerConfig.RedactVersion = true + TestServerWithListenerAndProperties(t, ln, addr, core, props) + defer ln.Close() + TestServerAuth(t, addr, token) + + testRedactVersionEndpoints(t, addr, token, "") +} + +// testRedactVersionEndpoints tests the endpoints containing versions +// contain the expected version +func testRedactVersionEndpoints(t *testing.T, addr, token, expectedVersion string) { + client := cleanhttp.DefaultClient() + req, err := http.NewRequest("GET", addr+"/v1/auth/token?help=1", nil) + require.NoError(t, err) + + req.Header.Set(consts.AuthHeaderName, token) + resp, err := client.Do(req) + require.NoError(t, err) + + testResponseStatus(t, resp, 200) + + var actual map[string]interface{} + testResponseBody(t, resp, &actual) + + require.NotNil(t, actual["openapi"]) + openAPI, ok := actual["openapi"].(map[string]interface{}) + require.True(t, ok) + + require.NotNil(t, openAPI["info"]) + info, ok := openAPI["info"].(map[string]interface{}) + require.True(t, ok) + + require.NotNil(t, info["version"]) + version, ok := info["version"].(string) + require.True(t, ok) + require.Equal(t, expectedVersion, version) + + req, err = http.NewRequest("GET", addr+"/v1/sys/internal/specs/openapi", nil) + require.NoError(t, err) + + req.Header.Set(consts.AuthHeaderName, "") + resp, err = client.Do(req) + require.NoError(t, err) + + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + + require.NotNil(t, actual["info"]) + info, ok = openAPI["info"].(map[string]interface{}) + require.True(t, ok) + + require.NotNil(t, info["version"]) + version, ok = info["version"].(string) + require.True(t, ok) + require.Equal(t, expectedVersion, version) + + req, err = http.NewRequest("GET", addr+"/v1/sys/health", nil) + require.NoError(t, err) + + req.Header.Set(consts.AuthHeaderName, "") + resp, err = client.Do(req) + require.NoError(t, err) + + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + + require.NotNil(t, actual["version"]) + version, ok = actual["version"].(string) + require.True(t, ok) + + // sys/health is special and uses a different format to the OpenAPI + // version.GetVersion().VersionNumber() instead of version.Version + // We use substring to make sure the check works anyway. + // In practice, version.GetVersion().VersionNumber() will give something like 1.17.0-beta1 + // and version.Version gives something like 1.17.0 + require.Truef(t, strings.HasPrefix(version, expectedVersion), "version was not as expected, version=%s, expectedVersion=%s", + version, expectedVersion) +} diff --git a/http/plugin_test.go b/http/plugin_test.go index b0d85be6d173..8a1b35d98dbf 100644 --- a/http/plugin_test.go +++ b/http/plugin_test.go @@ -1,11 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http import ( "encoding/json" - "io/ioutil" + "fmt" + "io" "os" "reflect" "sync" @@ -14,7 +15,7 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" bplugin "github.com/hashicorp/vault/builtin/plugin" - "github.com/hashicorp/vault/helper/benchhelpers" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" @@ -25,7 +26,8 @@ import ( "github.com/hashicorp/vault/vault" ) -func getPluginClusterAndCore(t testing.TB, logger log.Logger) (*vault.TestCluster, *vault.TestClusterCore) { +func getPluginClusterAndCore(t *testing.T, logger log.Logger) (*vault.TestCluster, *vault.TestClusterCore) { + t.Helper() inm, err := inmem.NewTransactionalInmem(nil, logger) if err != nil { t.Fatal(err) @@ -35,27 +37,27 @@ func getPluginClusterAndCore(t testing.TB, logger log.Logger) (*vault.TestCluste t.Fatal(err) } + pluginDir := corehelpers.MakeTestPluginDir(t) coreConfig := &vault.CoreConfig{ Physical: inm, HAPhysical: inmha.(physical.HABackend), LogicalBackends: map[string]logical.Factory{ "plugin": bplugin.Factory, }, + PluginDirectory: pluginDir, } - cluster := vault.NewTestCluster(benchhelpers.TBtoT(t), coreConfig, &vault.TestClusterOptions{ + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ HandlerFunc: Handler, - Logger: logger.Named("testclusteroptions"), }) cluster.Start() cores := cluster.Cores core := cores[0] - os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile) - - vault.TestWaitActive(benchhelpers.TBtoT(t), core.Core) - vault.TestAddTestPlugin(benchhelpers.TBtoT(t), core.Core, "mock-plugin", consts.PluginTypeSecrets, "", "TestPlugin_PluginMain", []string{}, "") + vault.TestWaitActive(t, core.Core) + vault.TestAddTestPlugin(t, core.Core, "mock-plugin", consts.PluginTypeSecrets, "", "TestPlugin_PluginMain", + []string{fmt.Sprintf("%s=%s", pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)}) // Mount the mock plugin err = core.Client.Sys().Mount("mock", &api.MountInput{ @@ -145,7 +147,7 @@ func TestPlugin_MockRawResponse(t *testing.T) { t.Fatal(err) } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { t.Fatal(err) } diff --git a/http/priority/priority.go b/http/priority/priority.go new file mode 100644 index 000000000000..b2731554823b --- /dev/null +++ b/http/priority/priority.go @@ -0,0 +1,86 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package priority + +import ( + "context" + "net/http" + "strconv" + + "github.com/hashicorp/vault/sdk/helper/parseutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + // VaultAOPForceRejectHeaderName is the name of an HTTP header that is used primarily + // for testing (it's not documented publicly). If set to "true" in a request + // that is subject to any form of Adaptive Overload Protection, the request + // will be rejected as if there is an overload. This is useful for + // deterministically testing the error handling plumbing as there are many + // possible code paths that need to be tested. + VaultAOPForceRejectHeaderName = "X-Vault-AOP-Force-Reject" +) + +// Priorities are limited to 256 levels to keep the state space small making +// enforcement data structures much more efficient. +type AOPWritePriority uint8 + +const ( + // AlwaysDrop is intended for testing only and will cause the request to be + // rejected with a 503 even if the server is not overloaded. + AlwaysDrop AOPWritePriority = 0 + + // StandardHTTP is the default AOPWritePriority for HTTP requests. + StandardHTTP AOPWritePriority = 128 + + // NeverDrop is used to mark a request such that it will never be rejected. + // This is currently used as an administrative priority used for requests on + // paths which require sudo capabilities. + NeverDrop AOPWritePriority = 255 +) + +// String returns the string representation of the AOPWritePriority. +func (p AOPWritePriority) String() string { + return strconv.FormatUint(uint64(p), 8) +} + +// StringToAOPWritePriority converts a string to an AOPWritePriority. +func StringToAOPWritePriority(s string) AOPWritePriority { + // Just swallow the error and fall back to the standard priority + p, err := strconv.ParseUint(s, 8, 8) + if err != nil { + return StandardHTTP + } + return AOPWritePriority(p) +} + +// WrapRequestPriorityHandler provides special handling for headers with +// X-Vault-AOP-Force-Reject set to `true`. This is useful for testing status +// codes and return values related to Adaptive Overload Protection without +// overloading Vault. +func WrapRequestPriorityHandler(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if raw := req.Header.Get(VaultAOPForceRejectHeaderName); raw != "" { + if fail, _ := parseutil.ParseBool(raw); fail { + // Make the request fail as if Vault was overloaded. We don't + // explicitly error out here, but rather attach some context + // indicating that the PID controller should perform a + // rejection. This allows us to test errors propagated from the + // WAL backend. + req = req.WithContext(ContextWithRequestPriority(req.Context(), AlwaysDrop)) + } + } + handler.ServeHTTP(w, req) + }) +} + +// ContextWithRequestPriority returns a new context derived from ctx with the +// given priority set. +func ContextWithRequestPriority(ctx context.Context, priority AOPWritePriority) context.Context { + if _, ok := ctx.Value(logical.CtxKeyInFlightRequestPriority{}).(AOPWritePriority); ok { + return ctx + } + + return context.WithValue(ctx, logical.CtxKeyInFlightRequestPriority{}, priority) +} diff --git a/http/sys_audit_test.go b/http/sys_audit_test.go index 2ec4ffc30c51..d620a291e775 100644 --- a/http/sys_audit_test.go +++ b/http/sys_audit_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -32,6 +32,7 @@ func TestSysAudit(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "noop/": map[string]interface{}{ "path": "noop/", @@ -83,6 +84,7 @@ func TestSysDisableAudit(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{}, } @@ -117,6 +119,7 @@ func TestSysAuditHash(t *testing.T) { "renewable": false, "lease_duration": json.Number("0"), "wrap_info": nil, + "mount_type": "system", "warnings": nil, "auth": nil, "data": map[string]interface{}{ diff --git a/http/sys_auth_test.go b/http/sys_auth_test.go index 3bd0a009dda8..fe6c4e27390f 100644 --- a/http/sys_auth_test.go +++ b/http/sys_auth_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -31,6 +31,7 @@ func TestSysAuth(t *testing.T) { "renewable": false, "lease_duration": json.Number("0"), "wrap_info": nil, + "mount_type": "system", "warnings": nil, "auth": nil, "data": map[string]interface{}{ @@ -115,6 +116,7 @@ func TestSysEnableAuth(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "foo/": map[string]interface{}{ "description": "foo", @@ -236,6 +238,7 @@ func TestSysDisableAuth(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "token/": map[string]interface{}{ "config": map[string]interface{}{ @@ -326,6 +329,7 @@ func TestSysTuneAuth_nonHMACKeys(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "token based credentials", "default_lease_ttl": json.Number("2764800"), @@ -371,6 +375,7 @@ func TestSysTuneAuth_nonHMACKeys(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "token based credentials", "default_lease_ttl": json.Number("2764800"), @@ -409,6 +414,7 @@ func TestSysTuneAuth_showUIMount(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "token based credentials", "default_lease_ttl": json.Number("2764800"), @@ -447,6 +453,7 @@ func TestSysTuneAuth_showUIMount(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "token based credentials", "default_lease_ttl": json.Number("2764800"), @@ -513,6 +520,7 @@ func TestSysRemountAuth(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "bar/": map[string]interface{}{ "description": "foo", diff --git a/http/sys_config_cors_test.go b/http/sys_config_cors_test.go index 2f4a29a49e06..3f69888f81b1 100644 --- a/http/sys_config_cors_test.go +++ b/http/sys_config_cors_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -59,6 +59,7 @@ func TestSysConfigCors(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "enabled": true, "allowed_origins": []interface{}{addr}, diff --git a/http/sys_config_state_test.go b/http/sys_config_state_test.go index 889121837e27..8081aaf642c9 100644 --- a/http/sys_config_state_test.go +++ b/http/sys_config_state_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -9,70 +9,197 @@ import ( "testing" "github.com/go-test/deep" + "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/vault" ) func TestSysConfigState_Sanitized(t *testing.T) { - var resp *http.Response - - core, _, token := vault.TestCoreUnsealed(t) - ln, addr := TestServer(t, core) - defer ln.Close() - TestServerAuth(t, addr, token) - - resp = testHttpGet(t, token, addr+"/v1/sys/config/state/sanitized") - testResponseStatus(t, resp, 200) - - var actual map[string]interface{} - var expected map[string]interface{} - - configResp := map[string]interface{}{ - "api_addr": "", - "cache_size": json.Number("0"), - "cluster_addr": "", - "cluster_cipher_suites": "", - "cluster_name": "", - "default_lease_ttl": json.Number("0"), - "default_max_request_duration": json.Number("0"), - "disable_cache": false, - "disable_clustering": false, - "disable_indexing": false, - "disable_mlock": false, - "disable_performance_standby": false, - "disable_printable_check": false, - "disable_sealwrap": false, - "experiments": nil, - "raw_storage_endpoint": false, - "detect_deadlocks": "", - "introspection_endpoint": false, - "disable_sentinel_trace": false, - "enable_ui": false, - "log_format": "", - "log_level": "", - "max_lease_ttl": json.Number("0"), - "pid_file": "", - "plugin_directory": "", - "plugin_file_uid": json.Number("0"), - "plugin_file_permissions": json.Number("0"), - "enable_response_header_hostname": false, - "enable_response_header_raft_node_id": false, - "log_requests_level": "", + cases := []struct { + name string + storageConfig *server.Storage + haStorageConfig *server.Storage + expectedStorageOutput map[string]interface{} + expectedHAStorageOutput map[string]interface{} + }{ + { + name: "raft storage", + storageConfig: &server.Storage{ + Type: "raft", + RedirectAddr: "http://127.0.0.1:8200", + ClusterAddr: "http://127.0.0.1:8201", + DisableClustering: false, + Config: map[string]string{ + "path": "/storage/path/raft", + "node_id": "raft1", + "max_entry_size": "2097152", + }, + }, + haStorageConfig: nil, + expectedStorageOutput: map[string]interface{}{ + "type": "raft", + "redirect_addr": "http://127.0.0.1:8200", + "cluster_addr": "http://127.0.0.1:8201", + "disable_clustering": false, + "raft": map[string]interface{}{ + "max_entry_size": "2097152", + }, + }, + expectedHAStorageOutput: nil, + }, + { + name: "inmem storage, no HA storage", + storageConfig: &server.Storage{ + Type: "inmem", + RedirectAddr: "http://127.0.0.1:8200", + ClusterAddr: "http://127.0.0.1:8201", + DisableClustering: false, + }, + haStorageConfig: nil, + expectedStorageOutput: map[string]interface{}{ + "type": "inmem", + "redirect_addr": "http://127.0.0.1:8200", + "cluster_addr": "http://127.0.0.1:8201", + "disable_clustering": false, + }, + expectedHAStorageOutput: nil, + }, + { + name: "inmem storage, raft HA storage", + storageConfig: &server.Storage{ + Type: "inmem", + RedirectAddr: "http://127.0.0.1:8200", + ClusterAddr: "http://127.0.0.1:8201", + DisableClustering: false, + }, + haStorageConfig: &server.Storage{ + Type: "raft", + RedirectAddr: "http://127.0.0.1:8200", + ClusterAddr: "http://127.0.0.1:8201", + DisableClustering: false, + Config: map[string]string{ + "path": "/storage/path/raft", + "node_id": "raft1", + "max_entry_size": "2097152", + }, + }, + expectedStorageOutput: map[string]interface{}{ + "type": "inmem", + "redirect_addr": "http://127.0.0.1:8200", + "cluster_addr": "http://127.0.0.1:8201", + "disable_clustering": false, + }, + expectedHAStorageOutput: map[string]interface{}{ + "type": "raft", + "redirect_addr": "http://127.0.0.1:8200", + "cluster_addr": "http://127.0.0.1:8201", + "disable_clustering": false, + "raft": map[string]interface{}{ + "max_entry_size": "2097152", + }, + }, + }, } - expected = map[string]interface{}{ - "lease_id": "", - "renewable": false, - "lease_duration": json.Number("0"), - "wrap_info": nil, - "warnings": nil, - "auth": nil, - "data": configResp, - } + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + var resp *http.Response + confRaw := &server.Config{ + Storage: tc.storageConfig, + HAStorage: tc.haStorageConfig, + SharedConfig: &configutil.SharedConfig{ + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1", + }, + }, + }, + } + + conf := &vault.CoreConfig{ + RawConfig: confRaw, + } + + core, _, token := vault.TestCoreUnsealedWithConfig(t, conf) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp = testHttpGet(t, token, addr+"/v1/sys/config/state/sanitized") + testResponseStatus(t, resp, 200) + + var actual map[string]interface{} + var expected map[string]interface{} + + configResp := map[string]interface{}{ + "api_addr": "", + "cache_size": json.Number("0"), + "cluster_addr": "", + "cluster_cipher_suites": "", + "cluster_name": "", + "default_lease_ttl": json.Number("0"), + "default_max_request_duration": json.Number("0"), + "disable_cache": false, + "disable_clustering": false, + "disable_indexing": false, + "disable_mlock": false, + "disable_performance_standby": false, + "disable_printable_check": false, + "disable_sealwrap": false, + "experiments": nil, + "raw_storage_endpoint": false, + "detect_deadlocks": "", + "introspection_endpoint": false, + "disable_sentinel_trace": false, + "enable_ui": false, + "log_format": "", + "log_level": "", + "max_lease_ttl": json.Number("0"), + "pid_file": "", + "plugin_directory": "", + "plugin_tmpdir": "", + "plugin_file_uid": json.Number("0"), + "plugin_file_permissions": json.Number("0"), + "enable_response_header_hostname": false, + "enable_response_header_raft_node_id": false, + "log_requests_level": "", + "listeners": []interface{}{ + map[string]interface{}{ + "config": nil, + "type": "tcp", + }, + }, + "storage": tc.expectedStorageOutput, + "administrative_namespace_path": "", + "imprecise_lease_role_tracking": false, + } + + if tc.expectedHAStorageOutput != nil { + configResp["ha_storage"] = tc.expectedHAStorageOutput + } + + expected = map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": configResp, + "mount_type": "system", + } - testResponseBody(t, resp, &actual) - expected["request_id"] = actual["request_id"] + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] - if diff := deep.Equal(actual, expected); len(diff) > 0 { - t.Fatalf("bad mismatch response body: diff: %v", diff) + if diff := deep.Equal(actual, expected); len(diff) > 0 { + t.Fatalf("bad mismatch response body: diff: %v", diff) + } + }) } } diff --git a/http/sys_feature_flags.go b/http/sys_feature_flags.go index 9f654b7febda..9e7244da750b 100644 --- a/http/sys_feature_flags.go +++ b/http/sys_feature_flags.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http diff --git a/http/sys_generate_root.go b/http/sys_generate_root.go index 7f953e4d449a..ffe0c14ec154 100644 --- a/http/sys_generate_root.go +++ b/http/sys_generate_root.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http diff --git a/http/sys_generate_root_test.go b/http/sys_generate_root_test.go index dbd7796315a6..2b00926f67cf 100644 --- a/http/sys_generate_root_test.go +++ b/http/sys_generate_root_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -231,7 +231,7 @@ func testCoreUnsealedWithAudit(t *testing.T, records **[][]byte) (*vault.Core, [ conf := &vault.CoreConfig{ BuiltinRegistry: corehelpers.NewMockBuiltinRegistry(), AuditBackends: map[string]audit.Factory{ - "noop": corehelpers.NoopAuditFactory(records), + "noop": audit.NoopAuditFactory(records), }, } core, keys, token := vault.TestCoreUnsealedWithConfig(t, conf) diff --git a/http/sys_health.go b/http/sys_health.go index b3f29d4dd5e6..0ed428d3d8ce 100644 --- a/http/sys_health.go +++ b/http/sys_health.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -12,16 +12,17 @@ import ( "time" "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/version" ) -func handleSysHealth(core *vault.Core) http.Handler { +func handleSysHealth(core *vault.Core, opt ...ListenerConfigOption) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.Method { case "GET": - handleSysHealthGet(core, w, r) + handleSysHealthGet(core, w, r, opt...) case "HEAD": handleSysHealthHead(core, w, r) default: @@ -43,7 +44,7 @@ func fetchStatusCode(r *http.Request, field string) (int, bool, bool) { return statusCode, false, true } -func handleSysHealthGet(core *vault.Core, w http.ResponseWriter, r *http.Request) { +func handleSysHealthGet(core *vault.Core, w http.ResponseWriter, r *http.Request, opt ...ListenerConfigOption) { code, body, err := getSysHealth(core, r) if err != nil { core.Logger().Error("error checking health", "error", err) @@ -56,6 +57,29 @@ func handleSysHealthGet(core *vault.Core, w http.ResponseWriter, r *http.Request return } + var tokenPresent bool + token := r.Header.Get(consts.AuthHeaderName) + + if token != "" { + // We don't care about the error, we just want to know if the token exists + lock := core.HALock() + lock.Lock() + tokenEntry, err := core.LookupToken(r.Context(), token) + lock.Unlock() + tokenPresent = err == nil && tokenEntry != nil + } + opts, _ := getOpts(opt...) + + if !tokenPresent { + if opts.withRedactVersion { + body.Version = opts.withRedactionValue + } + + if opts.withRedactClusterName { + body.ClusterName = opts.withRedactionValue + } + } + w.Header().Set("Content-Type", "application/json") w.WriteHeader(code) @@ -194,11 +218,17 @@ func getSysHealth(core *vault.Core, r *http.Request) (int, *HealthResponse, erro ReplicationDRMode: replicationState.GetDRString(), ServerTimeUTC: time.Now().UTC().Unix(), Version: version.GetVersion().VersionNumber(), + Enterprise: constants.IsEnterprise, ClusterName: clusterName, ClusterID: clusterID, + ClockSkewMillis: core.ActiveNodeClockSkewMillis(), + EchoDurationMillis: core.EchoDuration().Milliseconds(), + } + if standby { + body.ReplicationPrimaryCanaryAgeMillis = core.GetReplicationLagMillisIgnoreErrs() } - licenseState, err := vault.LicenseSummary(core) + licenseState, err := core.EntGetLicenseState() if err != nil { return http.StatusInternalServerError, nil, err } @@ -214,7 +244,7 @@ func getSysHealth(core *vault.Core, r *http.Request) (int, *HealthResponse, erro } if init && !sealed && !standby { - body.LastWAL = vault.LastWAL(core) + body.LastWAL = core.EntLastWAL() } return code, body, nil @@ -227,16 +257,20 @@ type HealthResponseLicense struct { } type HealthResponse struct { - Initialized bool `json:"initialized"` - Sealed bool `json:"sealed"` - Standby bool `json:"standby"` - PerformanceStandby bool `json:"performance_standby"` - ReplicationPerformanceMode string `json:"replication_performance_mode"` - ReplicationDRMode string `json:"replication_dr_mode"` - ServerTimeUTC int64 `json:"server_time_utc"` - Version string `json:"version"` - ClusterName string `json:"cluster_name,omitempty"` - ClusterID string `json:"cluster_id,omitempty"` - LastWAL uint64 `json:"last_wal,omitempty"` - License *HealthResponseLicense `json:"license,omitempty"` + Initialized bool `json:"initialized"` + Sealed bool `json:"sealed"` + Standby bool `json:"standby"` + PerformanceStandby bool `json:"performance_standby"` + ReplicationPerformanceMode string `json:"replication_performance_mode"` + ReplicationDRMode string `json:"replication_dr_mode"` + ServerTimeUTC int64 `json:"server_time_utc"` + Version string `json:"version"` + Enterprise bool `json:"enterprise"` + ClusterName string `json:"cluster_name,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + LastWAL uint64 `json:"last_wal,omitempty"` + License *HealthResponseLicense `json:"license,omitempty"` + EchoDurationMillis int64 `json:"echo_duration_ms"` + ClockSkewMillis int64 `json:"clock_skew_ms"` + ReplicationPrimaryCanaryAgeMillis int64 `json:"replication_primary_canary_age_ms"` } diff --git a/http/sys_health_test.go b/http/sys_health_test.go index 9761ec16c98c..dcc3473b0636 100644 --- a/http/sys_health_test.go +++ b/http/sys_health_test.go @@ -1,15 +1,18 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http import ( - "io/ioutil" + "io" "net/http" "net/url" - "reflect" "testing" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/vault" ) @@ -19,71 +22,54 @@ func TestSysHealth_get(t *testing.T) { ln, addr := TestServer(t, core) defer ln.Close() - resp, err := http.Get(addr + "/v1/sys/health") + // Test without the client first since we want to verify the response code + raw, err := http.Get(addr + "/v1/sys/health") if err != nil { t.Fatalf("err: %s", err) } + testResponseStatus(t, raw, 501) - var actual map[string]interface{} - expected := map[string]interface{}{ - "replication_performance_mode": consts.ReplicationUnknown.GetPerformanceString(), - "replication_dr_mode": consts.ReplicationUnknown.GetDRString(), - "initialized": false, - "sealed": true, - "standby": true, - "performance_standby": false, + // Test with the client because it's a bit easier to work with structs + config := api.DefaultConfig() + config.Address = addr + client, err := api.NewClient(config) + if err != nil { + t.Fatal(err) } - testResponseStatus(t, resp, 501) - testResponseBody(t, resp, &actual) - expected["server_time_utc"] = actual["server_time_utc"] - expected["version"] = actual["version"] - if actual["cluster_name"] == nil { - delete(expected, "cluster_name") - } else { - expected["cluster_name"] = actual["cluster_name"] + + resp, err := client.Sys().Health() + if err != nil { + t.Fatalf("err: %s", err) } - if actual["cluster_id"] == nil { - delete(expected, "cluster_id") - } else { - expected["cluster_id"] = actual["cluster_id"] + + expected := &api.HealthResponse{ + Enterprise: constants.IsEnterprise, + Initialized: false, + Sealed: true, + Standby: true, + PerformanceStandby: false, + ReplicationPerformanceMode: consts.ReplicationUnknown.GetPerformanceString(), + ReplicationDRMode: consts.ReplicationUnknown.GetDRString(), } - delete(actual, "license") - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) + ignore := cmpopts.IgnoreFields(*expected, "ClusterName", "ClusterID", "ServerTimeUTC", "Version") + if diff := cmp.Diff(resp, expected, ignore); len(diff) > 0 { + t.Fatal(diff) } keys, _ := vault.TestCoreInit(t, core) - resp, err = http.Get(addr + "/v1/sys/health") + raw, err = http.Get(addr + "/v1/sys/health") if err != nil { t.Fatalf("err: %s", err) } + testResponseStatus(t, raw, 503) - actual = map[string]interface{}{} - expected = map[string]interface{}{ - "replication_performance_mode": consts.ReplicationUnknown.GetPerformanceString(), - "replication_dr_mode": consts.ReplicationUnknown.GetDRString(), - "initialized": true, - "sealed": true, - "standby": true, - "performance_standby": false, - } - testResponseStatus(t, resp, 503) - testResponseBody(t, resp, &actual) - expected["server_time_utc"] = actual["server_time_utc"] - expected["version"] = actual["version"] - if actual["cluster_name"] == nil { - delete(expected, "cluster_name") - } else { - expected["cluster_name"] = actual["cluster_name"] - } - if actual["cluster_id"] == nil { - delete(expected, "cluster_id") - } else { - expected["cluster_id"] = actual["cluster_id"] + resp, err = client.Sys().Health() + if err != nil { + t.Fatalf("err: %s", err) } - delete(actual, "license") - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) + expected.Initialized = true + if diff := cmp.Diff(resp, expected, ignore); len(diff) > 0 { + t.Fatal(diff) } for _, key := range keys { @@ -91,37 +77,22 @@ func TestSysHealth_get(t *testing.T) { t.Fatalf("unseal err: %s", err) } } - resp, err = http.Get(addr + "/v1/sys/health") + raw, err = http.Get(addr + "/v1/sys/health") if err != nil { t.Fatalf("err: %s", err) } + testResponseStatus(t, raw, 200) - actual = map[string]interface{}{} - expected = map[string]interface{}{ - "replication_performance_mode": consts.ReplicationPerformanceDisabled.GetPerformanceString(), - "replication_dr_mode": consts.ReplicationDRDisabled.GetDRString(), - "initialized": true, - "sealed": false, - "standby": false, - "performance_standby": false, - } - testResponseStatus(t, resp, 200) - testResponseBody(t, resp, &actual) - expected["server_time_utc"] = actual["server_time_utc"] - expected["version"] = actual["version"] - if actual["cluster_name"] == nil { - delete(expected, "cluster_name") - } else { - expected["cluster_name"] = actual["cluster_name"] - } - if actual["cluster_id"] == nil { - delete(expected, "cluster_id") - } else { - expected["cluster_id"] = actual["cluster_id"] + resp, err = client.Sys().Health() + if err != nil { + t.Fatalf("err: %s", err) } - delete(actual, "license") - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) + expected.Sealed = false + expected.Standby = false + expected.ReplicationPerformanceMode = consts.ReplicationPerformanceDisabled.GetPerformanceString() + expected.ReplicationDRMode = consts.ReplicationDRDisabled.GetDRString() + if diff := cmp.Diff(resp, expected, ignore); len(diff) > 0 { + t.Fatal(diff) } } @@ -134,73 +105,53 @@ func TestSysHealth_customcodes(t *testing.T) { if err != nil { t.Fatalf("err: %s", err) } - resp, err := http.Get(queryurl.String()) + raw, err := http.Get(queryurl.String()) if err != nil { t.Fatalf("err: %s", err) } + testResponseStatus(t, raw, 581) - var actual map[string]interface{} - expected := map[string]interface{}{ - "replication_performance_mode": consts.ReplicationUnknown.GetPerformanceString(), - "replication_dr_mode": consts.ReplicationUnknown.GetDRString(), - "initialized": false, - "sealed": true, - "standby": true, - "performance_standby": false, + // Test with the client because it's a bit easier to work with structs + config := api.DefaultConfig() + config.Address = addr + client, err := api.NewClient(config) + if err != nil { + t.Fatal(err) } - testResponseStatus(t, resp, 581) - testResponseBody(t, resp, &actual) - expected["server_time_utc"] = actual["server_time_utc"] - expected["version"] = actual["version"] - if actual["cluster_name"] == nil { - delete(expected, "cluster_name") - } else { - expected["cluster_name"] = actual["cluster_name"] + resp, err := client.Sys().Health() + if err != nil { + t.Fatalf("err: %s", err) } - if actual["cluster_id"] == nil { - delete(expected, "cluster_id") - } else { - expected["cluster_id"] = actual["cluster_id"] + + expected := &api.HealthResponse{ + Enterprise: constants.IsEnterprise, + Initialized: false, + Sealed: true, + Standby: true, + PerformanceStandby: false, + ReplicationPerformanceMode: consts.ReplicationUnknown.GetPerformanceString(), + ReplicationDRMode: consts.ReplicationUnknown.GetDRString(), } - delete(actual, "license") - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) + ignore := cmpopts.IgnoreFields(*expected, "ClusterName", "ClusterID", "ServerTimeUTC", "Version") + if diff := cmp.Diff(resp, expected, ignore); len(diff) > 0 { + t.Fatal(diff) } keys, _ := vault.TestCoreInit(t, core) - resp, err = http.Get(queryurl.String()) + raw, err = http.Get(queryurl.String()) if err != nil { t.Fatalf("err: %s", err) } + testResponseStatus(t, raw, 523) - actual = map[string]interface{}{} - expected = map[string]interface{}{ - "replication_performance_mode": consts.ReplicationUnknown.GetPerformanceString(), - "replication_dr_mode": consts.ReplicationUnknown.GetDRString(), - "initialized": true, - "sealed": true, - "standby": true, - "performance_standby": false, - } - testResponseStatus(t, resp, 523) - testResponseBody(t, resp, &actual) - - expected["server_time_utc"] = actual["server_time_utc"] - expected["version"] = actual["version"] - if actual["cluster_name"] == nil { - delete(expected, "cluster_name") - } else { - expected["cluster_name"] = actual["cluster_name"] - } - if actual["cluster_id"] == nil { - delete(expected, "cluster_id") - } else { - expected["cluster_id"] = actual["cluster_id"] + resp, err = client.Sys().Health() + if err != nil { + t.Fatalf("err: %s", err) } - delete(actual, "license") - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) + expected.Initialized = true + if diff := cmp.Diff(resp, expected, ignore); len(diff) > 0 { + t.Fatal(diff) } for _, key := range keys { @@ -208,37 +159,22 @@ func TestSysHealth_customcodes(t *testing.T) { t.Fatalf("unseal err: %s", err) } } - resp, err = http.Get(queryurl.String()) + raw, err = http.Get(queryurl.String()) if err != nil { t.Fatalf("err: %s", err) } + testResponseStatus(t, raw, 202) - actual = map[string]interface{}{} - expected = map[string]interface{}{ - "replication_performance_mode": consts.ReplicationPerformanceDisabled.GetPerformanceString(), - "replication_dr_mode": consts.ReplicationDRDisabled.GetDRString(), - "initialized": true, - "sealed": false, - "standby": false, - "performance_standby": false, - } - testResponseStatus(t, resp, 202) - testResponseBody(t, resp, &actual) - expected["server_time_utc"] = actual["server_time_utc"] - expected["version"] = actual["version"] - if actual["cluster_name"] == nil { - delete(expected, "cluster_name") - } else { - expected["cluster_name"] = actual["cluster_name"] - } - if actual["cluster_id"] == nil { - delete(expected, "cluster_id") - } else { - expected["cluster_id"] = actual["cluster_id"] + resp, err = client.Sys().Health() + if err != nil { + t.Fatalf("err: %s", err) } - delete(actual, "license") - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) + expected.Sealed = false + expected.Standby = false + expected.ReplicationPerformanceMode = consts.ReplicationPerformanceDisabled.GetPerformanceString() + expected.ReplicationDRMode = consts.ReplicationDRDisabled.GetDRString() + if diff := cmp.Diff(resp, expected, ignore); len(diff) > 0 { + t.Fatal(diff) } } @@ -270,7 +206,7 @@ func TestSysHealth_head(t *testing.T) { t.Fatalf("HEAD %v expected code %d, got %d.", queryurl, tt.code, resp.StatusCode) } - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) if err != nil { t.Fatalf("err on %v: %s", queryurl, err) } diff --git a/http/sys_hostinfo_test.go b/http/sys_hostinfo_test.go index 756841e724f7..2df641ea8403 100644 --- a/http/sys_hostinfo_test.go +++ b/http/sys_hostinfo_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http diff --git a/http/sys_in_flight_requests.go b/http/sys_in_flight_requests.go index bdf3ebaf9d20..a31ae2ffc2f8 100644 --- a/http/sys_in_flight_requests.go +++ b/http/sys_in_flight_requests.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http diff --git a/http/sys_in_flight_requests_test.go b/http/sys_in_flight_requests_test.go index 93c92c539835..880a9ad61560 100644 --- a/http/sys_in_flight_requests_test.go +++ b/http/sys_in_flight_requests_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http diff --git a/http/sys_init.go b/http/sys_init.go index 905916bef628..fee10d972a44 100644 --- a/http/sys_init.go +++ b/http/sys_init.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -167,11 +167,11 @@ func validateInitParameters(core *vault.Core, req InitRequest) error { switch core.SealAccess().RecoveryKeySupported() { case true: if len(barrierFlags) > 0 { - return fmt.Errorf("parameters %s not applicable to seal type %s", strings.Join(barrierFlags, ","), core.SealAccess().BarrierType()) + return fmt.Errorf("parameters %s not applicable to seal type %s", strings.Join(barrierFlags, ","), core.SealAccess().BarrierSealConfigType()) } default: if len(recoveryFlags) > 0 { - return fmt.Errorf("parameters %s not applicable to seal type %s", strings.Join(recoveryFlags, ","), core.SealAccess().BarrierType()) + return fmt.Errorf("parameters %s not applicable to seal type %s", strings.Join(recoveryFlags, ","), core.SealAccess().BarrierSealConfigType()) } } diff --git a/http/sys_init_test.go b/http/sys_init_test.go index 620db9d733ff..e9957f9a8f83 100644 --- a/http/sys_init_test.go +++ b/http/sys_init_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -10,9 +10,8 @@ import ( "strconv" "testing" - "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/builtin/logical/transit" - "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/vault/seal" @@ -153,12 +152,8 @@ func TestSysInit_Put_ValidateParams(t *testing.T) { } func TestSysInit_Put_ValidateParams_AutoUnseal(t *testing.T) { - testSeal := seal.NewTestSeal(nil) - autoSeal, err := vault.NewAutoSeal(testSeal) - if err != nil { - t.Fatal(err) - } - autoSeal.SetType("transit") + testSeal, _ := seal.NewTestSeal(&seal.TestSealOpts{Name: "transit"}) + autoSeal := vault.NewAutoSeal(testSeal) // Create the transit server. conf := &vault.CoreConfig{ @@ -170,7 +165,7 @@ func TestSysInit_Put_ValidateParams_AutoUnseal(t *testing.T) { opts := &vault.TestClusterOptions{ NumCores: 1, HandlerFunc: Handler, - Logger: logging.NewVaultLogger(hclog.Trace).Named(t.Name()).Named("transit-seal" + strconv.Itoa(0)), + Logger: corehelpers.NewTestLogger(t).Named("transit-seal" + strconv.Itoa(0)), } cluster := vault.NewTestCluster(t, conf, opts) cluster.Start() @@ -191,7 +186,8 @@ func TestSysInit_Put_ValidateParams_AutoUnseal(t *testing.T) { testResponseStatus(t, resp, http.StatusBadRequest) body := map[string][]string{} testResponseBody(t, resp, &body) - if body["errors"][0] != "parameters secret_shares,secret_threshold not applicable to seal type transit" { + if body["errors"][0] != "parameters secret_shares,secret_threshold not applicable to seal type transit" && + body["errors"][0] != "parameters secret_shares,secret_threshold not applicable to seal type test-auto" { t.Fatal(body) } } diff --git a/http/sys_internal_test.go b/http/sys_internal_test.go index 11d9376248c5..0be213672acd 100644 --- a/http/sys_internal_test.go +++ b/http/sys_internal_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -33,6 +33,7 @@ func TestSysInternal_UIMounts(t *testing.T) { "auth": map[string]interface{}{}, "secret": map[string]interface{}{}, }, + "mount_type": "", } testResponseBody(t, resp, &actual) expected["request_id"] = actual["request_id"] @@ -62,6 +63,7 @@ func TestSysInternal_UIMounts(t *testing.T) { "auth": nil, "lease_id": "", "renewable": false, + "mount_type": "", "lease_duration": json.Number("0"), "data": map[string]interface{}{ "secret": map[string]interface{}{ diff --git a/http/sys_leader.go b/http/sys_leader.go index 6b39c4401af3..b6e0f55e9325 100644 --- a/http/sys_leader.go +++ b/http/sys_leader.go @@ -1,32 +1,52 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http import ( "net/http" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" ) // This endpoint is needed to answer queries before Vault unseals // or becomes the leader. -func handleSysLeader(core *vault.Core) http.Handler { +func handleSysLeader(core *vault.Core, opt ...ListenerConfigOption) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.Method { case "GET": - handleSysLeaderGet(core, w, r) + handleSysLeaderGet(core, w, r, opt...) default: respondError(w, http.StatusMethodNotAllowed, nil) } }) } -func handleSysLeaderGet(core *vault.Core, w http.ResponseWriter, r *http.Request) { - resp, err := core.GetLeaderStatus() +func handleSysLeaderGet(core *vault.Core, w http.ResponseWriter, r *http.Request, opt ...ListenerConfigOption) { + var tokenPresent bool + token := r.Header.Get(consts.AuthHeaderName) + ctx := r.Context() + + if token != "" { + // We don't care about the error, we just want to know if token exists + lock := core.HALock() + lock.Lock() + tokenEntry, err := core.LookupToken(ctx, token) + lock.Unlock() + tokenPresent = err == nil && tokenEntry != nil + } + + if tokenPresent { + ctx = logical.CreateContextRedactionSettings(r.Context(), false, false, false) + } + + resp, err := core.GetLeaderStatus(ctx) if err != nil { respondError(w, http.StatusInternalServerError, err) return } + respondOk(w, resp) } diff --git a/http/sys_leader_test.go b/http/sys_leader_test.go index 3292b7f2407b..e495e118703a 100644 --- a/http/sys_leader_test.go +++ b/http/sys_leader_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http diff --git a/http/sys_lease_test.go b/http/sys_lease_test.go index 6b069ca37ced..2b1025b06ad9 100644 --- a/http/sys_lease_test.go +++ b/http/sys_lease_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -7,11 +7,17 @@ import ( "testing" "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" ) func TestSysRenew(t *testing.T) { - core, _, token := vault.TestCoreUnsealed(t) + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + core, _, token := vault.TestCoreUnsealedWithConfig(t, coreConfig) ln, addr := TestServer(t, core) defer ln.Close() TestServerAuth(t, addr, token) diff --git a/http/sys_metrics.go b/http/sys_metrics.go index 2bb819b34fd8..27cb45f2560d 100644 --- a/http/sys_metrics.go +++ b/http/sys_metrics.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http diff --git a/http/sys_metrics_test.go b/http/sys_metrics_test.go index 167347b4f700..9500bff031b5 100644 --- a/http/sys_metrics_test.go +++ b/http/sys_metrics_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -7,10 +7,9 @@ import ( "testing" "time" - "github.com/hashicorp/vault/helper/testhelpers/corehelpers" - "github.com/armon/go-metrics" "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/vault" ) diff --git a/http/sys_monitor_test.go b/http/sys_monitor_test.go index 5d428c419a9c..9eea00f867f2 100644 --- a/http/sys_monitor_test.go +++ b/http/sys_monitor_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -97,7 +97,8 @@ func TestSysMonitorStreamingLogs(t *testing.T) { } jsonLog := &jsonlog{} - timeCh := time.After(5 * time.Second) + // default timeout is 90 seconds + timeCh := time.After(120 * time.Second) for { select { @@ -119,7 +120,7 @@ func TestSysMonitorStreamingLogs(t *testing.T) { return } case <-timeCh: - t.Fatal("Failed to get a DEBUG message after 5 seconds") + t.Fatal("Failed to get a DEBUG message after 120 seconds") } } }) diff --git a/http/sys_mount_test.go b/http/sys_mount_test.go index 384f5bf810bf..9db7fed3749f 100644 --- a/http/sys_mount_test.go +++ b/http/sys_mount_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/helper/versions" "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" ) @@ -34,6 +35,7 @@ func TestSysMounts(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "secret/": map[string]interface{}{ "description": "key/value secret storage", @@ -215,6 +217,7 @@ func TestSysMount(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "foo/": map[string]interface{}{ "description": "foo", @@ -529,6 +532,7 @@ func TestSysRemount(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "bar/": map[string]interface{}{ "description": "foo", @@ -742,6 +746,7 @@ func TestSysUnmount(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "secret/": map[string]interface{}{ "description": "key/value secret storage", @@ -930,6 +935,7 @@ func TestSysTuneMount_Options(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "foo", "default_lease_ttl": json.Number("2764800"), @@ -968,6 +974,7 @@ func TestSysTuneMount_Options(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "foo", "default_lease_ttl": json.Number("2764800"), @@ -988,7 +995,12 @@ func TestSysTuneMount_Options(t *testing.T) { } func TestSysTuneMount(t *testing.T) { - core, _, token := vault.TestCoreUnsealed(t) + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + core, _, token := vault.TestCoreUnsealedWithConfig(t, coreConfig) ln, addr := TestServer(t, core) defer ln.Close() TestServerAuth(t, addr, token) @@ -1009,6 +1021,7 @@ func TestSysTuneMount(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "foo/": map[string]interface{}{ "description": "foo", @@ -1248,6 +1261,7 @@ func TestSysTuneMount(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "foo/": map[string]interface{}{ "description": "foo", @@ -1446,6 +1460,7 @@ func TestSysTuneMount(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "foo", "default_lease_ttl": json.Number("259196400"), @@ -1484,6 +1499,7 @@ func TestSysTuneMount(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "foobar", "default_lease_ttl": json.Number("40"), @@ -1581,6 +1597,7 @@ func TestSysTuneMount_nonHMACKeys(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "key/value secret storage", "default_lease_ttl": json.Number("2764800"), @@ -1627,6 +1644,7 @@ func TestSysTuneMount_nonHMACKeys(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "key/value secret storage", "default_lease_ttl": json.Number("2764800"), @@ -1665,6 +1683,7 @@ func TestSysTuneMount_listingVisibility(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "key/value secret storage", "default_lease_ttl": json.Number("2764800"), @@ -1702,6 +1721,7 @@ func TestSysTuneMount_listingVisibility(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "key/value secret storage", "default_lease_ttl": json.Number("2764800"), @@ -1748,6 +1768,7 @@ func TestSysTuneMount_passthroughRequestHeaders(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "key/value secret storage", "default_lease_ttl": json.Number("2764800"), @@ -1787,6 +1808,7 @@ func TestSysTuneMount_passthroughRequestHeaders(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "key/value secret storage", "default_lease_ttl": json.Number("2764800"), @@ -1831,6 +1853,7 @@ func TestSysTuneMount_allowedManagedKeys(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "key/value secret storage", "default_lease_ttl": json.Number("2764800"), @@ -1870,6 +1893,7 @@ func TestSysTuneMount_allowedManagedKeys(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "key/value secret storage", "default_lease_ttl": json.Number("2764800"), diff --git a/http/sys_mounts_test.go b/http/sys_mounts_test.go index 5f2218514ec0..4f597f474a26 100644 --- a/http/sys_mounts_test.go +++ b/http/sys_mounts_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http diff --git a/http/sys_policy_test.go b/http/sys_policy_test.go index 1ab1e85bb7ac..bf797cede8c3 100644 --- a/http/sys_policy_test.go +++ b/http/sys_policy_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -27,6 +27,7 @@ func TestSysPolicies(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "policies": []interface{}{"default", "root"}, "keys": []interface{}{"default", "root"}, @@ -58,6 +59,7 @@ func TestSysReadPolicy(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "name": "root", "rules": "", @@ -94,6 +96,7 @@ func TestSysWritePolicy(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "policies": []interface{}{"default", "foo", "root"}, "keys": []interface{}{"default", "foo", "root"}, @@ -143,6 +146,7 @@ func TestSysDeletePolicy(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "policies": []interface{}{"default", "root"}, "keys": []interface{}{"default", "root"}, diff --git a/http/sys_raft.go b/http/sys_raft.go index 1e00ebe5d90c..e209f0a6f4ea 100644 --- a/http/sys_raft.go +++ b/http/sys_raft.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http diff --git a/http/sys_rekey.go b/http/sys_rekey.go index c05dc8397653..a43da4f1dfe4 100644 --- a/http/sys_rekey.go +++ b/http/sys_rekey.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -20,7 +20,7 @@ func handleSysRekeyInit(core *vault.Core, recovery bool) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { standby, _ := core.Standby() if standby { - respondStandby(core, w, r.URL) + respondStandby(core, w, r) return } @@ -155,7 +155,7 @@ func handleSysRekeyUpdate(core *vault.Core, recovery bool) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { standby, _ := core.Standby() if standby { - respondStandby(core, w, r.URL) + respondStandby(core, w, r) return } @@ -228,7 +228,7 @@ func handleSysRekeyVerify(core *vault.Core, recovery bool) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { standby, _ := core.Standby() if standby { - respondStandby(core, w, r.URL) + respondStandby(core, w, r) return } diff --git a/http/sys_rekey_test.go b/http/sys_rekey_test.go index eaef4dd7a1f3..e39664447693 100644 --- a/http/sys_rekey_test.go +++ b/http/sys_rekey_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http diff --git a/http/sys_rotate_test.go b/http/sys_rotate_test.go index dfc28a257c55..6be03ce3759a 100644 --- a/http/sys_rotate_test.go +++ b/http/sys_rotate_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -30,6 +30,7 @@ func TestSysRotate(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "term": json.Number("2"), }, diff --git a/http/sys_seal.go b/http/sys_seal.go index 5d32828e70f9..4852d57d5e4c 100644 --- a/http/sys_seal.go +++ b/http/sys_seal.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -18,7 +18,7 @@ import ( func handleSysSeal(core *vault.Core) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - req, _, statusCode, err := buildLogicalRequest(core, w, r) + req, _, statusCode, err := buildLogicalRequest(core, w, r, "") if err != nil || statusCode != 0 { respondError(w, statusCode, err) return @@ -48,7 +48,7 @@ func handleSysSeal(core *vault.Core) http.Handler { func handleSysStepDown(core *vault.Core) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - req, _, statusCode, err := buildLogicalRequest(core, w, r) + req, _, statusCode, err := buildLogicalRequest(core, w, r, "") if err != nil || statusCode != 0 { respondError(w, statusCode, err) return @@ -152,25 +152,62 @@ func handleSysUnseal(core *vault.Core) http.Handler { }) } -func handleSysSealStatus(core *vault.Core) http.Handler { +func handleSysSealStatus(core *vault.Core, opt ...ListenerConfigOption) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != "GET" { respondError(w, http.StatusMethodNotAllowed, nil) return } - handleSysSealStatusRaw(core, w, r) + handleSysSealStatusRaw(core, w, r, opt...) }) } -func handleSysSealStatusRaw(core *vault.Core, w http.ResponseWriter, r *http.Request) { - ctx := context.Background() - status, err := core.GetSealStatus(ctx) +func handleSysSealBackendStatus(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + respondError(w, http.StatusMethodNotAllowed, nil) + return + } + + handleSysSealBackendStatusRaw(core, w, r) + }) +} + +func handleSysSealStatusRaw(core *vault.Core, w http.ResponseWriter, r *http.Request, opt ...ListenerConfigOption) { + ctx := r.Context() + + var tokenPresent bool + token := r.Header.Get(consts.AuthHeaderName) + if token != "" { + // We don't care about the error, we just want to know if the token exists + lock := core.HALock() + lock.Lock() + tokenEntry, err := core.LookupToken(ctx, token) + lock.Unlock() + tokenPresent = err == nil && tokenEntry != nil + } + + // If there are is no valid token then we will redact the specified values + if tokenPresent { + ctx = logical.CreateContextRedactionSettings(ctx, false, false, false) + } + + status, err := core.GetSealStatus(ctx, true) if err != nil { respondError(w, http.StatusInternalServerError, err) return } + respondOk(w, status) +} +func handleSysSealBackendStatusRaw(core *vault.Core, w http.ResponseWriter, r *http.Request) { + ctx := context.Background() + status, err := core.GetSealBackendStatus(ctx) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } respondOk(w, status) } diff --git a/http/sys_seal_test.go b/http/sys_seal_test.go index cb8fc8bb746c..694dc971c7ea 100644 --- a/http/sys_seal_test.go +++ b/http/sys_seal_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http @@ -15,11 +15,17 @@ import ( "testing" "github.com/go-test/deep" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/vault/seal" "github.com/hashicorp/vault/version" + "github.com/stretchr/testify/assert" ) func TestSysSealStatus(t *testing.T) { @@ -67,80 +73,6 @@ func TestSysSealStatus(t *testing.T) { } } -func TestSysSealStatus_Warnings(t *testing.T) { - core := vault.TestCore(t) - vault.TestCoreInit(t, core) - ln, addr := TestServer(t, core) - defer ln.Close() - - // Manually configure DisableSSCTokens to be true - core.GetCoreConfigInternal().DisableSSCTokens = true - - resp, err := http.Get(addr + "/v1/sys/seal-status") - if err != nil { - t.Fatalf("err: %s", err) - } - - var actual map[string]interface{} - expected := map[string]interface{}{ - "sealed": true, - "t": json.Number("3"), - "n": json.Number("3"), - "progress": json.Number("0"), - "nonce": "", - "type": "shamir", - "recovery_seal": false, - "initialized": true, - "migration": false, - "build_date": version.BuildDate, - } - testResponseStatus(t, resp, 200) - testResponseBody(t, resp, &actual) - if actual["version"] == nil { - t.Fatalf("expected version information") - } - expected["version"] = actual["version"] - if actual["cluster_name"] == nil { - delete(expected, "cluster_name") - } else { - expected["cluster_name"] = actual["cluster_name"] - } - if actual["cluster_id"] == nil { - delete(expected, "cluster_id") - } else { - expected["cluster_id"] = actual["cluster_id"] - } - actualWarnings := actual["warnings"] - if actualWarnings == nil { - t.Fatalf("expected warnings about SSCToken disabling") - } - - actualWarningsArray, ok := actualWarnings.([]interface{}) - if !ok { - t.Fatalf("expected warnings about SSCToken disabling were not in the right format") - } - if len(actualWarningsArray) != 1 { - t.Fatalf("too many warnings were given") - } - actualWarning, ok := actualWarningsArray[0].(string) - if !ok { - t.Fatalf("expected warning about SSCToken disabling was not in the right format") - } - - expectedWarning := "Server Side Consistent Tokens are disabled, due to the " + - "VAULT_DISABLE_SERVER_SIDE_CONSISTENT_TOKENS environment variable being set. " + - "It is not recommended to run Vault for an extended period of time with this configuration." - if actualWarning != expectedWarning { - t.Fatalf("actual warning was not as expected. Expected %s, but got %s", expectedWarning, actualWarning) - } - - expected["warnings"] = actual["warnings"] - - if diff := deep.Equal(actual, expected); diff != nil { - t.Fatal(diff) - } -} - func TestSysSealStatus_uninit(t *testing.T) { core := vault.TestCore(t) ln, addr := TestServer(t, core) @@ -630,3 +562,64 @@ func TestSysStepDown(t *testing.T) { resp := testHttpPut(t, token, addr+"/v1/sys/step-down", nil) testResponseStatus(t, resp, 204) } + +// TestSysSealStatusRedaction tests that the response from a +// a request to sys/seal-status are redacted only if no valid token +// is provided with the request +func TestSysSealStatusRedaction(t *testing.T) { + conf := &vault.CoreConfig{ + EnableUI: false, + EnableRaw: true, + BuiltinRegistry: corehelpers.NewMockBuiltinRegistry(), + AuditBackends: map[string]audit.Factory{ + "file": audit.NewFileBackend, + }, + } + core, _, token := vault.TestCoreUnsealedWithConfig(t, conf) + + // Setup new custom listener + ln, addr := TestListener(t) + props := &vault.HandlerProperties{ + Core: core, + ListenerConfig: &configutil.Listener{ + RedactVersion: true, + }, + } + TestServerWithListenerAndProperties(t, ln, addr, core, props) + defer ln.Close() + TestServerAuth(t, addr, token) + + client := cleanhttp.DefaultClient() + + // Check seal-status + req, err := http.NewRequest("GET", addr+"/v1/sys/seal-status", nil) + if err != nil { + t.Fatalf("err: %s", err) + } + req.Header.Set(consts.AuthHeaderName, token) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("err: %s", err) + } + testResponseStatus(t, resp, 200) + + // Verify that version exists when provided a valid token + var actual map[string]interface{} + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + assert.NotEmpty(t, actual["version"]) + + // Verify that version is redacted when no token is provided + req, err = http.NewRequest("GET", addr+"/v1/sys/seal-status", nil) + if err != nil { + t.Fatalf("err: %s", err) + } + req.Header.Set(consts.AuthHeaderName, "") + resp, err = client.Do(req) + if err != nil { + t.Fatalf("err: %s", err) + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + assert.Empty(t, actual["version"]) +} diff --git a/http/sys_wrapping_test.go b/http/sys_wrapping_test.go index c991bd230480..d059e5830f4b 100644 --- a/http/sys_wrapping_test.go +++ b/http/sys_wrapping_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http diff --git a/http/testing.go b/http/testing.go index 95153991e766..5797e4dc5b86 100644 --- a/http/testing.go +++ b/http/testing.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http diff --git a/http/unwrapping_raw_body_test.go b/http/unwrapping_raw_body_test.go index e1ad0df9c297..de145486dbaa 100644 --- a/http/unwrapping_raw_body_test.go +++ b/http/unwrapping_raw_body_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http diff --git a/http/util.go b/http/util.go index f714efa54f3b..3aec7fb0f8e9 100644 --- a/http/util.go +++ b/http/util.go @@ -1,41 +1,60 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package http import ( "bytes" - "errors" + "context" "fmt" - "io/ioutil" + "io" "net" "net/http" "strings" - "github.com/hashicorp/vault/sdk/logical" - + "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/limits" + "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/vault/quotas" ) -var ( - adjustRequest = func(c *vault.Core, r *http.Request) (*http.Request, int) { - return r, 0 - } +var nonVotersAllowed = false - genericWrapping = func(core *vault.Core, in http.Handler, props *vault.HandlerProperties) http.Handler { - // Wrap the help wrapped handler with another layer with a generic - // handler - return wrapGenericHandler(core, in, props) - } - - additionalRoutes = func(mux *http.ServeMux, core *vault.Core) {} +func wrapMaxRequestSizeHandler(handler http.Handler, props *vault.HandlerProperties) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var maxRequestSize int64 + if props.ListenerConfig != nil { + maxRequestSize = props.ListenerConfig.MaxRequestSize + } + if maxRequestSize == 0 { + maxRequestSize = DefaultMaxRequestSize + } + ctx := r.Context() + originalBody := r.Body + if maxRequestSize > 0 { + r.Body = http.MaxBytesReader(w, r.Body, maxRequestSize) + } + ctx = logical.CreateContextOriginalBody(ctx, originalBody) + r = r.WithContext(ctx) - nonVotersAllowed = false + handler.ServeHTTP(w, r) + }) +} - adjustResponse = func(core *vault.Core, w http.ResponseWriter, req *logical.Request) {} -) +func wrapRequestLimiterHandler(handler http.Handler, props *vault.HandlerProperties) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + request := r.WithContext( + context.WithValue( + r.Context(), + limits.CtxKeyDisableRequestLimiter{}, + props.ListenerConfig.DisableRequestLimiter, + ), + ) + handler.ServeHTTP(w, request) + }) +} func rateLimitQuotaWrapping(handler http.Handler, core *vault.Core) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -55,25 +74,46 @@ func rateLimitQuotaWrapping(handler http.Handler, core *vault.Core) http.Handler } mountPath := strings.TrimPrefix(core.MatchingMount(r.Context(), path), ns.Path) - // Clone body, so we do not close the request body reader - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - respondError(w, http.StatusInternalServerError, errors.New("failed to read request body")) - return - } - r.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes)) - - quotaResp, err := core.ApplyRateLimitQuota(r.Context(), "as.Request{ + quotaReq := "as.Request{ Type: quotas.TypeRateLimit, Path: path, MountPath: mountPath, - Role: core.DetermineRoleFromLoginRequestFromBytes(mountPath, bodyBytes, r.Context()), NamespacePath: ns.Path, ClientAddress: parseRemoteIPAddress(r), - }) + } + + // This checks if any role based quota is required (LCQ or RLQ). + requiresResolveRole, err := core.ResolveRoleForQuotas(r.Context(), quotaReq) + if err != nil { + core.Logger().Error("failed to lookup quotas", "path", path, "error", err) + respondError(w, http.StatusInternalServerError, err) + return + } + + // If any role-based quotas are enabled for this namespace/mount, just + // do the role resolution once here. + if requiresResolveRole { + buf := bytes.Buffer{} + teeReader := io.TeeReader(r.Body, &buf) + role := core.DetermineRoleFromLoginRequestFromReader(r.Context(), mountPath, teeReader) + + // Reset the body if it was read + if buf.Len() > 0 { + r.Body = io.NopCloser(&buf) + originalBody, ok := logical.ContextOriginalBodyValue(r.Context()) + if ok { + r = r.WithContext(logical.CreateContextOriginalBody(r.Context(), newMultiReaderCloser(&buf, originalBody))) + } + } + // add an entry to the context to prevent recalculating request role unnecessarily + r = r.WithContext(context.WithValue(r.Context(), logical.CtxKeyRequestRole{}, role)) + quotaReq.Role = role + } + + quotaResp, err := core.ApplyRateLimitQuota(r.Context(), quotaReq) if err != nil { core.Logger().Error("failed to apply quota", "path", path, "error", err) - respondError(w, http.StatusUnprocessableEntity, err) + respondError(w, http.StatusInternalServerError, err) return } @@ -92,7 +132,7 @@ func rateLimitQuotaWrapping(handler http.Handler, core *vault.Core) http.Handler } if core.RateLimitAuditLoggingEnabled() { - req, _, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), w, r) + req, _, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), core.RouterAccess(), w, r) if err != nil || status != 0 { respondError(w, status, err) return @@ -115,6 +155,22 @@ func rateLimitQuotaWrapping(handler http.Handler, core *vault.Core) http.Handler }) } +func disableReplicationStatusEndpointWrapping(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + request := r.WithContext(logical.CreateContextDisableReplicationStatusEndpoints(r.Context(), true)) + + h.ServeHTTP(w, request) + }) +} + +func redactionSettingsWrapping(h http.Handler, redactVersion, redactAddresses, redactClusterName bool) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + request := r.WithContext(logical.CreateContextRedactionSettings(r.Context(), redactVersion, redactAddresses, redactClusterName)) + + h.ServeHTTP(w, request) + }) +} + func parseRemoteIPAddress(r *http.Request) string { ip, _, err := net.SplitHostPort(r.RemoteAddr) if err != nil { @@ -123,3 +179,25 @@ func parseRemoteIPAddress(r *http.Request) string { return ip } + +type multiReaderCloser struct { + readers []io.Reader + io.Reader +} + +func newMultiReaderCloser(readers ...io.Reader) *multiReaderCloser { + return &multiReaderCloser{ + readers: readers, + Reader: io.MultiReader(readers...), + } +} + +func (m *multiReaderCloser) Close() error { + var err error + for _, r := range m.readers { + if c, ok := r.(io.Closer); ok { + err = multierror.Append(err, c.Close()) + } + } + return err +} diff --git a/http/util_stubs_oss.go b/http/util_stubs_oss.go new file mode 100644 index 000000000000..7bffec7924e7 --- /dev/null +++ b/http/util_stubs_oss.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package http + +import ( + "net/http" + + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +//go:generate go run github.com/hashicorp/vault/tools/stubmaker + +func entWrapGenericHandler(core *vault.Core, in http.Handler, props *vault.HandlerProperties) http.Handler { + // Wrap the help wrapped handler with another layer with a generic + // handler + return wrapGenericHandler(core, in, props) +} + +func entAdditionalRoutes(mux *http.ServeMux, core *vault.Core) {} + +func entAdjustResponse(core *vault.Core, w http.ResponseWriter, req *logical.Request) { +} diff --git a/internal/go118_sha1_patch.go b/internal/go118_sha1_patch.go deleted file mode 100644 index fc2ccf238266..000000000000 --- a/internal/go118_sha1_patch.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package internal - -import ( - "fmt" - "os" - "sync" - _ "unsafe" // for go:linkname - - goversion "github.com/hashicorp/go-version" - "github.com/hashicorp/vault/version" -) - -const sha1PatchVersionsBefore = "1.12.0" - -var patchSha1 sync.Once - -//go:linkname debugAllowSHA1 crypto/x509.debugAllowSHA1 -var debugAllowSHA1 bool - -// PatchSha1 patches Go 1.18+ to allow certificates with signatures containing SHA-1 hashes to be allowed. -// It is safe to call this function multiple times. -// This is necessary to allow Vault 1.10 and 1.11 to work with Go 1.18+ without breaking backwards compatibility -// with these certificates. See https://go.dev/doc/go1.18#sha1 and -// https://developer.hashicorp.com/vault/docs/deprecation/faq#q-what-is-the-impact-of-removing-support-for-x-509-certificates-with-signatures-that-use-sha-1 -// for more details. -// TODO: remove when Vault <=1.11 is no longer supported -func PatchSha1() { - patchSha1.Do(func() { - // for Go 1.19.4 and later - godebug := os.Getenv("GODEBUG") - if godebug != "" { - godebug += "," - } - godebug += "x509sha1=1" - os.Setenv("GODEBUG", godebug) - - // for Go 1.19.3 and earlier, patch the variable - patchBefore, err := goversion.NewSemver(sha1PatchVersionsBefore) - if err != nil { - panic(err) - } - - patch := false - v, err := goversion.NewSemver(version.GetVersion().Version) - if err == nil { - patch = v.LessThan(patchBefore) - } else { - fmt.Fprintf(os.Stderr, "Cannot parse version %s; going to apply SHA-1 deprecation patch workaround\n", version.GetVersion().Version) - patch = true - } - - if patch { - debugAllowSHA1 = true - } - }) -} diff --git a/internal/observability/event/errors.go b/internal/observability/event/errors.go new file mode 100644 index 000000000000..a8ad7516fd7c --- /dev/null +++ b/internal/observability/event/errors.go @@ -0,0 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "errors" +) + +var ErrInvalidParameter = errors.New("invalid parameter") diff --git a/internal/observability/event/event_type.go b/internal/observability/event/event_type.go new file mode 100644 index 000000000000..16a2f7674bb8 --- /dev/null +++ b/internal/observability/event/event_type.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "fmt" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-uuid" +) + +// EventType represents the event's type +type EventType string + +const ( + AuditType EventType = "audit" // AuditType represents audit events +) + +// Validate ensures that EventType is one of the set of allowed event types. +func (t EventType) Validate() error { + switch t { + case AuditType: + return nil + default: + return fmt.Errorf("invalid event type %q: %w", t, ErrInvalidParameter) + } +} + +// GenerateNodeID generates a new UUID that it casts to the eventlogger.NodeID +// type. +func GenerateNodeID() (eventlogger.NodeID, error) { + id, err := uuid.GenerateUUID() + + return eventlogger.NodeID(id), err +} + +// String returns the string version of an EventType. +func (t EventType) String() string { + return string(t) +} + +// AsEventType returns the EventType in a format for eventlogger. +func (t EventType) AsEventType() eventlogger.EventType { + return eventlogger.EventType(t.String()) +} diff --git a/internal/observability/event/event_type_test.go b/internal/observability/event/event_type_test.go new file mode 100644 index 000000000000..ce8e238dfec0 --- /dev/null +++ b/internal/observability/event/event_type_test.go @@ -0,0 +1,51 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// TestEventType_Validate exercises the Validate method for EventType. +func TestEventType_Validate(t *testing.T) { + tests := map[string]struct { + Value string + IsValid bool + ExpectedError string + }{ + "audit": { + Value: "audit", + IsValid: true, + }, + "empty": { + Value: "", + IsValid: false, + ExpectedError: "invalid event type \"\": invalid parameter", + }, + "random": { + Value: "random", + IsValid: false, + ExpectedError: "invalid event type \"random\": invalid parameter", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + eventType := EventType(tc.Value) + err := eventType.Validate() + switch { + case tc.IsValid: + require.NoError(t, err) + case !tc.IsValid: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedError) + } + }) + } +} diff --git a/internal/observability/event/node_metrics_counter.go b/internal/observability/event/node_metrics_counter.go new file mode 100644 index 000000000000..980906137634 --- /dev/null +++ b/internal/observability/event/node_metrics_counter.go @@ -0,0 +1,77 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/armon/go-metrics" + "github.com/hashicorp/eventlogger" +) + +var _ eventlogger.Node = (*MetricsCounter)(nil) + +// MetricsCounter offers a way for nodes to emit metrics which increment a label by 1. +type MetricsCounter struct { + Name string + Node eventlogger.Node + labeler Labeler +} + +// Labeler provides a way to inject the logic required to determine labels based +// on the state of the eventlogger.Event being returned and the error resulting +// from processing the by the underlying eventlogger.Node. +type Labeler interface { + Labels(*eventlogger.Event, error) []string +} + +// NewMetricsCounter should be used to create the MetricsCounter. +func NewMetricsCounter(name string, node eventlogger.Node, labeler Labeler) (*MetricsCounter, error) { + name = strings.TrimSpace(name) + if name == "" { + return nil, fmt.Errorf("name is required: %w", ErrInvalidParameter) + } + + if node == nil || reflect.ValueOf(node).IsNil() { + return nil, fmt.Errorf("node is required: %w", ErrInvalidParameter) + } + + if labeler == nil || reflect.ValueOf(labeler).IsNil() { + return nil, fmt.Errorf("labeler is required: %w", ErrInvalidParameter) + } + + return &MetricsCounter{ + Name: name, + Node: node, + labeler: labeler, + }, nil +} + +// Process will process the event using the underlying eventlogger.Node, and then +// use the configured Labeler to provide a label which is used to increment a metric by 1. +func (m MetricsCounter) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { + // NOTE: We don't provide an 'op' here, as we're just wrapping the underlying node. + var err error + + // Process the node first + e, err = m.Node.Process(ctx, e) + + // Provide the results to the Labeler. + metrics.IncrCounter(m.labeler.Labels(e, err), 1) + + return e, err +} + +// Reopen attempts to reopen the underlying eventlogger.Node. +func (m MetricsCounter) Reopen() error { + return m.Node.Reopen() +} + +// Type returns the type for the underlying eventlogger.Node. +func (m MetricsCounter) Type() eventlogger.NodeType { + return m.Node.Type() +} diff --git a/internal/observability/event/node_metrics_counter_test.go b/internal/observability/event/node_metrics_counter_test.go new file mode 100644 index 000000000000..ac1679723123 --- /dev/null +++ b/internal/observability/event/node_metrics_counter_test.go @@ -0,0 +1,97 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "context" + "testing" + + "github.com/hashicorp/eventlogger" + "github.com/stretchr/testify/require" +) + +var ( + _ eventlogger.Node = (*testEventLoggerNode)(nil) + _ Labeler = (*testMetricsCounter)(nil) +) + +// TestNewMetricsCounter ensures that NewMetricsCounter operates as intended and +// can validate the input parameters correctly, returning the right error message +// when required. +func TestNewMetricsCounter(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + name string + node eventlogger.Node + labeler Labeler + isErrorExpected bool + expectedErrorMessage string + }{ + "happy": { + name: "foo", + node: &testEventLoggerNode{}, + labeler: &testMetricsCounter{}, + isErrorExpected: false, + }, + "no-name": { + node: nil, + labeler: nil, + isErrorExpected: true, + expectedErrorMessage: "name is required: invalid parameter", + }, + "no-node": { + name: "foo", + node: nil, + isErrorExpected: true, + expectedErrorMessage: "node is required: invalid parameter", + }, + "no-labeler": { + name: "foo", + node: &testEventLoggerNode{}, + labeler: nil, + isErrorExpected: true, + expectedErrorMessage: "labeler is required: invalid parameter", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + m, err := NewMetricsCounter(tc.name, tc.node, tc.labeler) + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.expectedErrorMessage) + default: + require.NoError(t, err) + require.NotNil(t, m) + } + }) + } +} + +// testEventLoggerNode is for testing and implements the eventlogger.Node interface. +type testEventLoggerNode struct{} + +func (t testEventLoggerNode) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { + return nil, nil +} + +func (t testEventLoggerNode) Reopen() error { + return nil +} + +func (t testEventLoggerNode) Type() eventlogger.NodeType { + return eventlogger.NodeTypeSink +} + +// testMetricsCounter is for testing and implements the event.Labeler interface. +type testMetricsCounter struct{} + +func (m *testMetricsCounter) Labels(_ *eventlogger.Event, err error) []string { + return []string{""} +} diff --git a/internal/observability/event/options.go b/internal/observability/event/options.go new file mode 100644 index 000000000000..7e419d559516 --- /dev/null +++ b/internal/observability/event/options.go @@ -0,0 +1,218 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "fmt" + "os" + "reflect" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-uuid" +) + +// Option is how Options are passed as arguments. +type Option func(*options) error + +// Options are used to represent configuration for an Event. +type options struct { + withID string + withNow time.Time + withFacility string + withTag string + withSocketType string + withMaxDuration time.Duration + withFileMode *os.FileMode + withLogger hclog.Logger +} + +// getDefaultOptions returns Options with their default values. +func getDefaultOptions() options { + fileMode := os.FileMode(0o600) + + return options{ + withNow: time.Now(), + withFacility: "AUTH", + withTag: "vault", + withSocketType: "tcp", + withMaxDuration: 2 * time.Second, + withFileMode: &fileMode, + } +} + +// getOpts applies all the supplied Option and returns configured Options. +// Each Option is applied in the order it appears in the argument list, so it is +// possible to supply the same Option numerous times and the 'last write wins'. +func getOpts(opt ...Option) (options, error) { + opts := getDefaultOptions() + for _, o := range opt { + if o == nil { + continue + } + if err := o(&opts); err != nil { + return options{}, err + } + } + return opts, nil +} + +// ValidateOptions can be used to validate options before they are required. +func ValidateOptions(opt ...Option) error { + _, err := getOpts(opt...) + + return err +} + +// NewID is a bit of a modified NewID has been done to stop a circular +// dependency with the errors package that is caused by importing +// boundary/internal/db +func NewID(prefix string) (string, error) { + if prefix == "" { + return "", fmt.Errorf("missing prefix: %w", ErrInvalidParameter) + } + + id, err := uuid.GenerateUUID() + if err != nil { + return "", fmt.Errorf("unable to generate ID: %w", err) + } + + return fmt.Sprintf("%s_%s", prefix, id), nil +} + +// WithID provides an optional ID. +func WithID(id string) Option { + return func(o *options) error { + var err error + + id := strings.TrimSpace(id) + switch { + case id == "": + err = fmt.Errorf("id cannot be empty: %w", ErrInvalidParameter) + default: + o.withID = id + } + + return err + } +} + +// WithNow provides an option to represent 'now'. +func WithNow(now time.Time) Option { + return func(o *options) error { + var err error + + switch { + case now.IsZero(): + err = fmt.Errorf("cannot specify 'now' to be the zero time instant: %w", ErrInvalidParameter) + default: + o.withNow = now + } + + return err + } +} + +// WithFacility provides an Option to represent a 'facility' for a syslog sink. +func WithFacility(facility string) Option { + return func(o *options) error { + facility = strings.TrimSpace(facility) + + if facility != "" { + o.withFacility = facility + } + + return nil + } +} + +// WithTag provides an Option to represent a 'tag' for a syslog sink. +func WithTag(tag string) Option { + return func(o *options) error { + tag = strings.TrimSpace(tag) + + if tag != "" { + o.withTag = tag + } + + return nil + } +} + +// WithSocketType provides an Option to represent the socket type for a socket sink. +func WithSocketType(socketType string) Option { + return func(o *options) error { + socketType = strings.TrimSpace(socketType) + + if socketType != "" { + o.withSocketType = socketType + } + + return nil + } +} + +// WithMaxDuration provides an Option to represent the max duration for writing to a socket. +func WithMaxDuration(duration string) Option { + return func(o *options) error { + duration = strings.TrimSpace(duration) + + if duration == "" { + return nil + } + + parsed, err := parseutil.ParseDurationSecond(duration) + if err != nil { + return fmt.Errorf("unable to parse max duration: %w: %w", ErrInvalidParameter, err) + } + + o.withMaxDuration = parsed + + return nil + } +} + +// WithFileMode provides an Option to represent a file mode for a file sink. +// Supplying an empty string or whitespace will prevent this Option from being +// applied, but it will not return an error in those circumstances. +func WithFileMode(mode string) Option { + return func(o *options) error { + // If supplied file mode is empty, just return early without setting anything. + // We can assume that this Option was called by something that didn't + // parse the incoming value, perhaps from a config map etc. + mode = strings.TrimSpace(mode) + if mode == "" { + return nil + } + + // By now we believe we have something that the caller really intended to + // be parsed into a file mode. + raw, err := strconv.ParseUint(mode, 8, 32) + + switch { + case err != nil: + return fmt.Errorf("unable to parse file mode: %w: %w", ErrInvalidParameter, err) + default: + m := os.FileMode(raw) + o.withFileMode = &m + } + + return nil + } +} + +// WithLogger provides an Option to supply a logger which will be used to write logs. +// NOTE: If no logger is supplied then logging may not be possible. +func WithLogger(l hclog.Logger) Option { + return func(o *options) error { + if l != nil && !reflect.ValueOf(l).IsNil() { + o.withLogger = l + } + + return nil + } +} diff --git a/internal/observability/event/options_test.go b/internal/observability/event/options_test.go new file mode 100644 index 000000000000..2b6a1fe3ae8f --- /dev/null +++ b/internal/observability/event/options_test.go @@ -0,0 +1,460 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "os" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" +) + +// TestOptions_WithNow exercises WithNow option to ensure it performs as expected. +func TestOptions_WithNow(t *testing.T) { + tests := map[string]struct { + Value time.Time + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedValue time.Time + }{ + "default-time": { + Value: time.Time{}, + IsErrorExpected: true, + ExpectedErrorMessage: "cannot specify 'now' to be the zero time instant: invalid parameter", + }, + "valid-time": { + Value: time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local), + IsErrorExpected: false, + ExpectedValue: time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local), + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + opts := &options{} + applyOption := WithNow(tc.Value) + err := applyOption(opts) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withNow) + } + }) + } +} + +// TestOptions_WithID exercises WithID option to ensure it performs as expected. +func TestOptions_WithID(t *testing.T) { + tests := map[string]struct { + Value string + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedValue string + }{ + "empty": { + Value: "", + IsErrorExpected: true, + ExpectedErrorMessage: "id cannot be empty: invalid parameter", + }, + "whitespace": { + Value: " ", + IsErrorExpected: true, + ExpectedErrorMessage: "id cannot be empty: invalid parameter", + }, + "valid": { + Value: "test", + IsErrorExpected: false, + ExpectedValue: "test", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := WithID(tc.Value) + err := applyOption(opts) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withID) + } + }) + } +} + +// TestOptions_Default exercises getDefaultOptions to assert the default values. +func TestOptions_Default(t *testing.T) { + opts := getDefaultOptions() + require.NotNil(t, opts) + require.True(t, time.Now().After(opts.withNow)) + require.False(t, opts.withNow.IsZero()) + require.Equal(t, "AUTH", opts.withFacility) + require.Equal(t, "vault", opts.withTag) + require.Equal(t, 2*time.Second, opts.withMaxDuration) +} + +// TestOptions_Opts exercises getOpts with various Option values. +func TestOptions_Opts(t *testing.T) { + tests := map[string]struct { + opts []Option + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedID string + IsNowExpected bool + ExpectedNow time.Time + }{ + "nil-options": { + opts: nil, + IsErrorExpected: false, + IsNowExpected: true, + }, + "empty-options": { + opts: []Option{}, + IsErrorExpected: false, + IsNowExpected: true, + }, + "with-multiple-valid-id": { + opts: []Option{ + WithID("qwerty"), + WithID("juan"), + }, + IsErrorExpected: false, + ExpectedID: "juan", + IsNowExpected: true, + }, + "with-multiple-valid-now": { + opts: []Option{ + WithNow(time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local)), + WithNow(time.Date(2023, time.July, 4, 13, 3, 0, 0, time.Local)), + }, + IsErrorExpected: false, + ExpectedNow: time.Date(2023, time.July, 4, 13, 3, 0, 0, time.Local), + IsNowExpected: false, + }, + "with-multiple-valid-then-invalid-now": { + opts: []Option{ + WithNow(time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local)), + WithNow(time.Time{}), + }, + IsErrorExpected: true, + ExpectedErrorMessage: "cannot specify 'now' to be the zero time instant: invalid parameter", + }, + "with-multiple-valid-options": { + opts: []Option{ + WithID("qwerty"), + WithNow(time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local)), + }, + IsErrorExpected: false, + ExpectedID: "qwerty", + ExpectedNow: time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local), + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts, err := getOpts(tc.opts...) + + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NotNil(t, opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedID, opts.withID) + switch { + case tc.IsNowExpected: + require.True(t, time.Now().After(opts.withNow)) + require.False(t, opts.withNow.IsZero()) + default: + require.Equal(t, tc.ExpectedNow, opts.withNow) + } + + } + }) + } +} + +// TestOptions_WithFacility exercises WithFacility Option to ensure it performs as expected. +func TestOptions_WithFacility(t *testing.T) { + tests := map[string]struct { + Value string + ExpectedValue string + }{ + "empty": { + Value: "", + ExpectedValue: "", + }, + "whitespace": { + Value: " ", + ExpectedValue: "", + }, + "value": { + Value: "juan", + ExpectedValue: "juan", + }, + "spacey-value": { + Value: " juan ", + ExpectedValue: "juan", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := WithFacility(tc.Value) + err := applyOption(opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withFacility) + }) + } +} + +// TestOptions_WithTag exercises WithTag Option to ensure it performs as expected. +func TestOptions_WithTag(t *testing.T) { + tests := map[string]struct { + Value string + ExpectedValue string + }{ + "empty": { + Value: "", + ExpectedValue: "", + }, + "whitespace": { + Value: " ", + ExpectedValue: "", + }, + "value": { + Value: "juan", + ExpectedValue: "juan", + }, + "spacey-value": { + Value: " juan ", + ExpectedValue: "juan", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := WithTag(tc.Value) + err := applyOption(opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withTag) + }) + } +} + +// TestOptions_WithSocketType exercises WithSocketType Option to ensure it performs as expected. +func TestOptions_WithSocketType(t *testing.T) { + tests := map[string]struct { + Value string + ExpectedValue string + }{ + "empty": { + Value: "", + ExpectedValue: "", + }, + "whitespace": { + Value: " ", + ExpectedValue: "", + }, + "value": { + Value: "juan", + ExpectedValue: "juan", + }, + "spacey-value": { + Value: " juan ", + ExpectedValue: "juan", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := WithSocketType(tc.Value) + err := applyOption(opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withSocketType) + }) + } +} + +// TestOptions_WithMaxDuration exercises WithMaxDuration Option to ensure it performs as expected. +func TestOptions_WithMaxDuration(t *testing.T) { + tests := map[string]struct { + Value string + ExpectedValue time.Duration + IsErrorExpected bool + ExpectedErrorMessage string + }{ + "empty-gives-default": { + Value: "", + }, + "whitespace-give-default": { + Value: " ", + }, + "bad-value": { + Value: "juan", + IsErrorExpected: true, + ExpectedErrorMessage: "unable to parse max duration: invalid parameter: time: invalid duration \"juan\"", + }, + "bad-spacey-value": { + Value: " juan ", + IsErrorExpected: true, + ExpectedErrorMessage: "unable to parse max duration: invalid parameter: time: invalid duration \"juan\"", + }, + "duration-2s": { + Value: "2s", + ExpectedValue: 2 * time.Second, + }, + "duration-2m": { + Value: "2m", + ExpectedValue: 2 * time.Minute, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := WithMaxDuration(tc.Value) + err := applyOption(opts) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withMaxDuration) + } + }) + } +} + +// TestOptions_WithFileMode exercises WithFileMode Option to ensure it performs as expected. +func TestOptions_WithFileMode(t *testing.T) { + tests := map[string]struct { + Value string + IsErrorExpected bool + ExpectedErrorMessage string + IsNilExpected bool + ExpectedValue os.FileMode + }{ + "empty": { + Value: "", + IsErrorExpected: false, + IsNilExpected: true, + }, + "whitespace": { + Value: " ", + IsErrorExpected: false, + IsNilExpected: true, + }, + "nonsense": { + Value: "juan", + IsErrorExpected: true, + ExpectedErrorMessage: "unable to parse file mode: invalid parameter: strconv.ParseUint: parsing \"juan\": invalid syntax", + }, + "zero": { + Value: "0000", + IsErrorExpected: false, + ExpectedValue: os.FileMode(0o000), + }, + "valid": { + Value: "0007", + IsErrorExpected: false, + ExpectedValue: os.FileMode(0o007), + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := WithFileMode(tc.Value) + err := applyOption(opts) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + switch { + case tc.IsNilExpected: + // Optional Option 'not supplied' (i.e. was whitespace/empty string) + require.Nil(t, opts.withFileMode) + default: + // Dereference the pointer, so we can examine the file mode. + require.Equal(t, tc.ExpectedValue, *opts.withFileMode) + } + } + }) + } +} + +// TestOptions_WithLogger exercises WithLogger Option to ensure it performs as expected. +func TestOptions_WithLogger(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + value hclog.Logger + isNilExpected bool + }{ + "nil-pointer": { + value: nil, + isNilExpected: true, + }, + "logger": { + value: hclog.NewNullLogger(), + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + opts := &options{} + applyOption := WithLogger(tc.value) + err := applyOption(opts) + require.NoError(t, err) + if tc.isNilExpected { + require.Nil(t, opts.withLogger) + } else { + require.NotNil(t, opts.withLogger) + } + }) + } +} diff --git a/internal/observability/event/pipeline_reader.go b/internal/observability/event/pipeline_reader.go new file mode 100644 index 000000000000..f35672f8efa6 --- /dev/null +++ b/internal/observability/event/pipeline_reader.go @@ -0,0 +1,24 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import "github.com/hashicorp/eventlogger" + +// PipelineReader surfaces information required for pipeline registration. +type PipelineReader interface { + // EventType should return the event type to be used for pipeline registration. + EventType() eventlogger.EventType + + // HasFiltering should determine if filter nodes are used by this pipeline. + HasFiltering() bool + + // Name for the pipeline which should be used for the eventlogger.PipelineID. + Name() string + + // Nodes should return the nodes which should be used by the framework to process events. + Nodes() map[eventlogger.NodeID]eventlogger.Node + + // NodeIDs should return the IDs of the nodes, in the order they are required. + NodeIDs() []eventlogger.NodeID +} diff --git a/internal/observability/event/sink_file.go b/internal/observability/event/sink_file.go new file mode 100644 index 000000000000..5385eac1df0f --- /dev/null +++ b/internal/observability/event/sink_file.go @@ -0,0 +1,238 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" +) + +// defaultFileMode is the default file permissions (read/write for everyone). +const ( + defaultFileMode = 0o600 + devnull = "/dev/null" +) + +var _ eventlogger.Node = (*FileSink)(nil) + +// FileSink is a sink node which handles writing events to file. +type FileSink struct { + file *os.File + fileLock sync.RWMutex + fileMode os.FileMode + path string + requiredFormat string + logger hclog.Logger +} + +// NewFileSink should be used to create a new FileSink. +// Accepted options: WithFileMode. +func NewFileSink(path string, format string, opt ...Option) (*FileSink, error) { + // Parse and check path + p := strings.TrimSpace(path) + if p == "" { + return nil, fmt.Errorf("path is required: %w", ErrInvalidParameter) + } + + opts, err := getOpts(opt...) + if err != nil { + return nil, err + } + + mode := os.FileMode(defaultFileMode) + // If we got an optional file mode supplied and our path isn't a special keyword + // then we should use the supplied file mode, or maintain the existing file mode. + switch { + case path == devnull: + case opts.withFileMode == nil: + case *opts.withFileMode == 0: // Maintain the existing file's mode when set to "0000". + fileInfo, err := os.Stat(path) + if err != nil { + return nil, fmt.Errorf("unable to determine existing file mode: %w", err) + } + mode = fileInfo.Mode() + default: + mode = *opts.withFileMode + } + + sink := &FileSink{ + file: nil, + fileLock: sync.RWMutex{}, + fileMode: mode, + requiredFormat: format, + path: p, + logger: opts.withLogger, + } + + // Ensure that the file can be successfully opened for writing; + // otherwise it will be too late to catch later without problems + // (ref: https://github.com/hashicorp/vault/issues/550) + if err := sink.open(); err != nil { + return nil, fmt.Errorf("sanity check failed; unable to open %q for writing: %w", sink.path, err) + } + + return sink, nil +} + +// Process handles writing the event to the file sink. +func (s *FileSink) Process(ctx context.Context, e *eventlogger.Event) (_ *eventlogger.Event, retErr error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + defer func() { + // If the context is errored (cancelled), and we were planning to return + // an error, let's also log (if we have a logger) in case the eventlogger's + // status channel and errors propagated. + if err := ctx.Err(); err != nil && retErr != nil && s.logger != nil { + s.logger.Error("file sink error", "context", err, "error", retErr) + } + }() + + if e == nil { + return nil, fmt.Errorf("event is nil: %w", ErrInvalidParameter) + } + + // '/dev/null' path means we just do nothing and pretend we're done. + if s.path == devnull { + return nil, nil + } + + formatted, found := e.Format(s.requiredFormat) + if !found { + return nil, fmt.Errorf("unable to retrieve event formatted as %q: %w", s.requiredFormat, ErrInvalidParameter) + } + + err := s.log(ctx, formatted) + if err != nil { + return nil, fmt.Errorf("error writing file for sink %q: %w", s.path, err) + } + + // return nil for the event to indicate the pipeline is complete. + return nil, nil +} + +// Reopen handles closing and reopening the file. +func (s *FileSink) Reopen() error { + // '/dev/null' path means we just do nothing and pretend we're done. + if s.path == devnull { + return nil + } + + s.fileLock.Lock() + defer s.fileLock.Unlock() + + if s.file == nil { + return s.open() + } + + err := s.file.Close() + // Set to nil here so that even if we error out, on the next access open() will be tried. + s.file = nil + if err != nil { + return fmt.Errorf("unable to close file for re-opening on sink %q: %w", s.path, err) + } + + return s.open() +} + +// Type describes the type of this node (sink). +func (s *FileSink) Type() eventlogger.NodeType { + return eventlogger.NodeTypeSink +} + +// open attempts to open a file at the sink's path, with the sink's fileMode permissions +// if one is not already open. +// It doesn't have any locking and relies on calling functions of FileSink to +// handle this (e.g. log and Reopen methods). +func (s *FileSink) open() error { + if s.file != nil { + return nil + } + + if err := os.MkdirAll(filepath.Dir(s.path), s.fileMode); err != nil { + return fmt.Errorf("unable to create file %q: %w", s.path, err) + } + + var err error + s.file, err = os.OpenFile(s.path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, s.fileMode) + if err != nil { + return fmt.Errorf("unable to open file for sink %q: %w", s.path, err) + } + + // Change the file mode in case the log file already existed. + // We special case '/dev/null' since we can't chmod it, and bypass if the mode is zero. + switch s.path { + case devnull: + default: + if s.fileMode != 0 { + err = os.Chmod(s.path, s.fileMode) + if err != nil { + return fmt.Errorf("unable to change file permissions '%v' for sink %q: %w", s.fileMode, s.path, err) + } + } + } + + return nil +} + +// log writes the buffer to the file. +// NOTE: We attempt to acquire a lock on the file in order to write, but will +// yield if the context is 'done'. +func (s *FileSink) log(ctx context.Context, data []byte) error { + // Wait for the lock, but ensure we check for a cancelled context as soon as + // we have it, as there's no point in continuing if we're cancelled. + s.fileLock.Lock() + defer s.fileLock.Unlock() + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + reader := bytes.NewReader(data) + + if err := s.open(); err != nil { + return fmt.Errorf("unable to open file for sink %q: %w", s.path, err) + } + + if _, err := reader.WriteTo(s.file); err == nil { + return nil + } + + // Otherwise, opportunistically try to re-open the FD, once per call (1 retry attempt). + err := s.file.Close() + if err != nil { + return fmt.Errorf("unable to close file for sink %q: %w", s.path, err) + } + + s.file = nil + + if err := s.open(); err != nil { + return fmt.Errorf("unable to re-open file for sink %q: %w", s.path, err) + } + + _, err = reader.Seek(0, io.SeekStart) + if err != nil { + return fmt.Errorf("unable to seek to start of file for sink %q: %w", s.path, err) + } + + _, err = reader.WriteTo(s.file) + if err != nil { + return fmt.Errorf("unable to re-write to file for sink %q: %w", s.path, err) + } + + return nil +} diff --git a/internal/observability/event/sink_file_test.go b/internal/observability/event/sink_file_test.go new file mode 100644 index 000000000000..6f4b721b07d8 --- /dev/null +++ b/internal/observability/event/sink_file_test.go @@ -0,0 +1,354 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "sync/atomic" + "testing" + "time" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/stretchr/testify/require" +) + +// TestFileSink_Type ensures that the node is a 'sink' type. +func TestFileSink_Type(t *testing.T) { + f, err := NewFileSink(filepath.Join(t.TempDir(), "vault.log"), "json") + require.NoError(t, err) + require.NotNil(t, f) + require.Equal(t, eventlogger.NodeTypeSink, f.Type()) +} + +// TestNewFileSink tests creation of an AuditFileSink. +func TestNewFileSink(t *testing.T) { + tests := map[string]struct { + ShouldUseAbsolutePath bool // Path should contain the filename if temp dir is true + Path string + Format string + Options []Option + IsErrorExpected bool + ExpectedErrorMessage string + // Expected values of AuditFileSink + ExpectedFileMode os.FileMode + ExpectedFormat string + ExpectedPath string + ExpectedPrefix string + }{ + "default-values": { + ShouldUseAbsolutePath: true, + IsErrorExpected: true, + ExpectedErrorMessage: "path is required: invalid parameter", + }, + "spacey-path": { + ShouldUseAbsolutePath: true, + Path: " ", + Format: "json", + IsErrorExpected: true, + ExpectedErrorMessage: "path is required: invalid parameter", + }, + "valid-path-and-format": { + Path: "vault.log", + Format: "json", + IsErrorExpected: false, + ExpectedFileMode: defaultFileMode, + ExpectedFormat: "json", + ExpectedPrefix: "", + }, + "file-mode-not-default-or-zero": { + Path: "vault.log", + Format: "json", + Options: []Option{WithFileMode("0007")}, + IsErrorExpected: false, + ExpectedFormat: "json", + ExpectedPrefix: "", + ExpectedFileMode: 0o007, + }, + "prefix": { + Path: "vault.log", + Format: "json", + Options: []Option{WithFileMode("0007")}, + IsErrorExpected: false, + ExpectedPrefix: "bleep", + ExpectedFormat: "json", + ExpectedFileMode: 0o007, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + // t.Parallel() + + // If we need a real directory as a path we can use a temp dir. + // but we should keep track of it for comparison in the new sink. + var tempDir string + tempPath := tc.Path + if !tc.ShouldUseAbsolutePath { + tempDir = t.TempDir() + tempPath = filepath.Join(tempDir, tempPath) + } + + sink, err := NewFileSink(tempPath, tc.Format, tc.Options...) + + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + require.Nil(t, sink) + default: + require.NoError(t, err) + require.NotNil(t, sink) + + // Assert properties are correct. + require.Equal(t, tc.ExpectedFormat, sink.requiredFormat) + require.Equal(t, tc.ExpectedFileMode, sink.fileMode) + + switch { + case tc.ShouldUseAbsolutePath: + require.Equal(t, tc.ExpectedPath, sink.path) + default: + require.Equal(t, tempPath, sink.path) + } + } + }) + } +} + +// TestFileSink_Reopen tests that the sink reopens files as expected when requested to. +// stdout and discard paths are ignored. +// see: https://developer.hashicorp.com/vault/docs/audit/file#file_path +func TestFileSink_Reopen(t *testing.T) { + tests := map[string]struct { + Path string + ShouldUseAbsolutePath bool + ShouldCreateFile bool + ShouldIgnoreFileMode bool + Options []Option + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedFileMode os.FileMode + }{ + // Should be ignored by Reopen + "devnull": { + Path: "/dev/null", + ShouldUseAbsolutePath: true, + ShouldIgnoreFileMode: true, + }, + "happy": { + Path: "vault.log", + ExpectedFileMode: os.FileMode(defaultFileMode), + }, + "filemode-existing": { + Path: "vault.log", + ShouldCreateFile: true, + Options: []Option{WithFileMode("0000")}, + ExpectedFileMode: os.FileMode(defaultFileMode), + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // If we need a real directory as a path we can use a temp dir. + // but we should keep track of it for comparison in the new sink. + var tempDir string + tempPath := tc.Path + if !tc.ShouldUseAbsolutePath { + tempDir = t.TempDir() + tempPath = filepath.Join(tempDir, tc.Path) + } + + // If the file mode is 0 then we will need a pre-created file to stat. + // Only do this for paths that are not 'special keywords' + if tc.ShouldCreateFile && tc.Path != devnull { + f, err := os.OpenFile(tempPath, os.O_CREATE, defaultFileMode) + require.NoError(t, err) + defer func() { + err = os.Remove(f.Name()) + require.NoError(t, err) + }() + } + + sink, err := NewFileSink(tempPath, "json", tc.Options...) + require.NoError(t, err) + require.NotNil(t, sink) + + err = sink.Reopen() + + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + info, err := os.Stat(tempPath) + require.NoError(t, err) + require.NotNil(t, info) + if !tc.ShouldIgnoreFileMode { + require.Equal(t, tc.ExpectedFileMode, info.Mode()) + } + } + }) + } +} + +// TestFileSink_Process ensures that Process behaves as expected. +func TestFileSink_Process(t *testing.T) { + tests := map[string]struct { + ShouldUseAbsolutePath bool + Path string + ShouldCreateFile bool + Format string + ShouldIgnoreFormat bool + Data string + ShouldUseNilEvent bool + IsErrorExpected bool + ExpectedErrorMessage string + }{ + "devnull": { + ShouldUseAbsolutePath: true, + Path: devnull, + Format: "json", + Data: "foo", + IsErrorExpected: false, + }, + "no-formatted-data": { + ShouldCreateFile: true, + Path: "juan.log", + Format: "json", + Data: "foo", + ShouldIgnoreFormat: true, + IsErrorExpected: true, + ExpectedErrorMessage: "unable to retrieve event formatted as \"json\": invalid parameter", + }, + "nil": { + Path: "foo.log", + Format: "json", + Data: "foo", + ShouldUseNilEvent: true, + IsErrorExpected: true, + ExpectedErrorMessage: "event is nil: invalid parameter", + }, + "happy-path": { + Path: "juan.log", + ShouldCreateFile: true, + Format: "json", + Data: "{\"foo\": \"bar\"}", + IsErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + // Temp dir for most testing unless we're trying to test an error + var tempDir string + tempPath := tc.Path + if !tc.ShouldUseAbsolutePath { + tempDir = t.TempDir() + tempPath = filepath.Join(tempDir, tc.Path) + } + + // Create a file if we will need it there before Process kicks off. + if tc.ShouldCreateFile && tc.Path != devnull { + f, err := os.OpenFile(tempPath, os.O_CREATE, defaultFileMode) + require.NoError(t, err) + defer func() { + err = os.Remove(f.Name()) + require.NoError(t, err) + }() + } + + // Set up a sink + sink, err := NewFileSink(tempPath, tc.Format) + require.NoError(t, err) + require.NotNil(t, sink) + + // Generate a fake event + ctx := namespace.RootContext(nil) + + event := &eventlogger.Event{ + Type: "audit", + CreatedAt: time.Now(), + Formatted: make(map[string][]byte), + Payload: struct{ ID string }{ID: "123"}, + } + + if !tc.ShouldIgnoreFormat { + event.FormattedAs(tc.Format, []byte(tc.Data)) + } + + if tc.ShouldUseNilEvent { + event = nil + } + + // The actual exercising of the sink. + event, err = sink.Process(ctx, event) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + require.Nil(t, event) + default: + require.NoError(t, err) + require.Nil(t, event) + } + }) + } +} + +// TestFileSink_log_cancelledContext tests that 'log' is context aware and won't +// just wait for the lock on the file sink forever. +func TestFileSink_log_cancelledContext(t *testing.T) { + tempDir := t.TempDir() + tempPath := filepath.Join(tempDir, "juan.log") + data := []byte("{\"foo\": \"bar\"}") + + sink, err := NewFileSink(tempPath, "json") + require.NoError(t, err) + require.NotNil(t, sink) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(func() { cancel() }) + + // Manually acquire the lock so that the 'log' method cannot. + sink.fileLock.Lock() + + var done atomic.Bool + go func() { + err = sink.log(ctx, data) + done.Store(true) + }() + + // We shouldn't have an error as 'log' will be trying to acquire the lock. + require.NoError(t, err) + + // Manually cancel the context and unlock to let the waiting 'log' in. + cancel() + sink.fileLock.Unlock() + + // Just a little bit of time to make sure that 'log' returned and err was set. + corehelpers.RetryUntil(t, 3*time.Second, func() error { + if done.Load() { + return nil + } + + return fmt.Errorf("logging still not done") + }) + + // We expect that the error now has context cancelled in it. + require.True(t, errors.Is(err, context.Canceled)) +} diff --git a/internal/observability/event/sink_noop.go b/internal/observability/event/sink_noop.go new file mode 100644 index 000000000000..165fd700f5bf --- /dev/null +++ b/internal/observability/event/sink_noop.go @@ -0,0 +1,36 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "context" + + "github.com/hashicorp/eventlogger" +) + +var _ eventlogger.Node = (*NoopSink)(nil) + +// NoopSink is a sink node which handles ignores everything. +type NoopSink struct{} + +// NewNoopSink should be used to create a new NoopSink. +func NewNoopSink() *NoopSink { + return &NoopSink{} +} + +// Process is a no-op and always returns nil event and nil error. +func (_ *NoopSink) Process(ctx context.Context, _ *eventlogger.Event) (*eventlogger.Event, error) { + // return nil for the event to indicate the pipeline is complete. + return nil, nil +} + +// Reopen is a no-op and always returns nil. +func (_ *NoopSink) Reopen() error { + return nil +} + +// Type describes the type of this node (sink). +func (_ *NoopSink) Type() eventlogger.NodeType { + return eventlogger.NodeTypeSink +} diff --git a/internal/observability/event/sink_socket.go b/internal/observability/event/sink_socket.go new file mode 100644 index 000000000000..dc88a78b7109 --- /dev/null +++ b/internal/observability/event/sink_socket.go @@ -0,0 +1,220 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "context" + "fmt" + "net" + "strings" + "sync" + "time" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" +) + +var _ eventlogger.Node = (*SocketSink)(nil) + +// SocketSink is a sink node which handles writing events to socket. +type SocketSink struct { + requiredFormat string + address string + socketType string + maxDuration time.Duration + socketLock sync.RWMutex + connection net.Conn + logger hclog.Logger +} + +// NewSocketSink should be used to create a new SocketSink. +// Accepted options: WithMaxDuration and WithSocketType. +func NewSocketSink(address string, format string, opt ...Option) (*SocketSink, error) { + address = strings.TrimSpace(address) + if address == "" { + return nil, fmt.Errorf("address is required: %w", ErrInvalidParameter) + } + + format = strings.TrimSpace(format) + if format == "" { + return nil, fmt.Errorf("format is required: %w", ErrInvalidParameter) + } + + opts, err := getOpts(opt...) + if err != nil { + return nil, err + } + + sink := &SocketSink{ + requiredFormat: format, + address: address, + socketType: opts.withSocketType, + maxDuration: opts.withMaxDuration, + socketLock: sync.RWMutex{}, + connection: nil, + logger: opts.withLogger, + } + + return sink, nil +} + +// Process handles writing the event to the socket. +func (s *SocketSink) Process(ctx context.Context, e *eventlogger.Event) (_ *eventlogger.Event, retErr error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + defer func() { + // If the context is errored (cancelled), and we were planning to return + // an error, let's also log (if we have a logger) in case the eventlogger's + // status channel and errors propagated. + if err := ctx.Err(); err != nil && retErr != nil && s.logger != nil { + s.logger.Error("socket sink error", "context", err, "error", retErr) + } + }() + + if e == nil { + return nil, fmt.Errorf("event is nil: %w", ErrInvalidParameter) + } + + formatted, found := e.Format(s.requiredFormat) + if !found { + return nil, fmt.Errorf("unable to retrieve event formatted as %q: %w", s.requiredFormat, ErrInvalidParameter) + } + + // Wait for the lock, but ensure we check for a cancelled context as soon as + // we have it, as there's no point in continuing if we're cancelled. + s.socketLock.Lock() + defer s.socketLock.Unlock() + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + // Try writing and return early if successful. + err := s.write(ctx, formatted) + if err == nil { + return nil, nil + } + + // We will try to reconnect and retry a single write. + reconErr := s.reconnect(ctx) + switch { + case reconErr != nil: + // Add the reconnection error to the existing error. + err = multierror.Append(err, reconErr) + default: + err = s.write(ctx, formatted) + } + + // Format the error nicely if we need to return one. + if err != nil { + err = fmt.Errorf("error writing to socket %q: %w", s.address, err) + } + + // return nil for the event to indicate the pipeline is complete. + return nil, err +} + +// Reopen handles reopening the connection for the socket sink. +func (s *SocketSink) Reopen() error { + s.socketLock.Lock() + defer s.socketLock.Unlock() + + err := s.reconnect(nil) + if err != nil { + return fmt.Errorf("error reconnecting %q: %w", s.address, err) + } + + return nil +} + +// Type describes the type of this node (sink). +func (_ *SocketSink) Type() eventlogger.NodeType { + return eventlogger.NodeTypeSink +} + +// connect attempts to establish a connection using the socketType and address. +// NOTE: connect is context aware and will not attempt to connect if the context is 'done'. +func (s *SocketSink) connect(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + // If we're already connected, we should have disconnected first. + if s.connection != nil { + return nil + } + + timeoutContext, cancel := context.WithTimeout(ctx, s.maxDuration) + defer cancel() + + dialer := net.Dialer{} + conn, err := dialer.DialContext(timeoutContext, s.socketType, s.address) + if err != nil { + return fmt.Errorf("error connecting to %q address %q: %w", s.socketType, s.address, err) + } + + s.connection = conn + + return nil +} + +// disconnect attempts to close and clear an existing connection. +func (s *SocketSink) disconnect() error { + // If we're already disconnected, we can return early. + if s.connection == nil { + return nil + } + + err := s.connection.Close() + if err != nil { + return fmt.Errorf("error closing connection to %q address %q: %w", s.socketType, s.address, err) + } + s.connection = nil + + return nil +} + +// reconnect attempts to disconnect and then connect to the configured socketType and address. +func (s *SocketSink) reconnect(ctx context.Context) error { + err := s.disconnect() + if err != nil { + return err + } + + err = s.connect(ctx) + if err != nil { + return err + } + + return nil +} + +// write attempts to write the specified data using the established connection. +func (s *SocketSink) write(ctx context.Context, data []byte) error { + // Ensure we're connected. + err := s.connect(ctx) + if err != nil { + return err + } + + err = s.connection.SetWriteDeadline(time.Now().Add(s.maxDuration)) + if err != nil { + return fmt.Errorf("unable to set write deadline: %w", err) + } + + _, err = s.connection.Write(data) + if err != nil { + return fmt.Errorf("unable to write to socket: %w", err) + } + + return nil +} diff --git a/internal/observability/event/sink_socket_test.go b/internal/observability/event/sink_socket_test.go new file mode 100644 index 000000000000..f0685e52e461 --- /dev/null +++ b/internal/observability/event/sink_socket_test.go @@ -0,0 +1,85 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// TestNewSocketSink ensures that we validate the input arguments and can create +// the SocketSink if everything goes to plan. +func TestNewSocketSink(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + address string + format string + opts []Option + want *SocketSink + wantErr bool + expectedErrMsg string + }{ + "address-empty": { + address: "", + wantErr: true, + expectedErrMsg: "address is required: invalid parameter", + }, + "address-whitespace": { + address: " ", + wantErr: true, + expectedErrMsg: "address is required: invalid parameter", + }, + "format-empty": { + address: "addr", + format: "", + wantErr: true, + expectedErrMsg: "format is required: invalid parameter", + }, + "format-whitespace": { + address: "addr", + format: " ", + wantErr: true, + expectedErrMsg: "format is required: invalid parameter", + }, + "bad-max-duration": { + address: "addr", + format: "json", + opts: []Option{WithMaxDuration("bar")}, + wantErr: true, + expectedErrMsg: "unable to parse max duration: invalid parameter: time: invalid duration \"bar\"", + }, + "happy": { + address: "wss://foo", + format: "json", + want: &SocketSink{ + requiredFormat: "json", + address: "wss://foo", + socketType: "tcp", // defaults to tcp + maxDuration: 2 * time.Second, // defaults to 2 secs + }, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + got, err := NewSocketSink(tc.address, tc.format, tc.opts...) + + if tc.wantErr { + require.Error(t, err) + require.EqualError(t, err, tc.expectedErrMsg) + require.Nil(t, got) + } else { + require.NoError(t, err) + require.Equal(t, tc.want, got) + } + }) + } +} diff --git a/internal/observability/event/sink_stdout.go b/internal/observability/event/sink_stdout.go new file mode 100644 index 000000000000..1c0508f80da6 --- /dev/null +++ b/internal/observability/event/sink_stdout.go @@ -0,0 +1,70 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/hashicorp/eventlogger" +) + +var _ eventlogger.Node = (*StdoutSink)(nil) + +// StdoutSink is structure that implements the eventlogger.Node interface +// as a Sink node that writes the events to the standard output stream. +type StdoutSink struct { + requiredFormat string +} + +// NewStdoutSinkNode creates a new StdoutSink that will persist the events +// it processes using the specified expected format. +func NewStdoutSinkNode(format string) (*StdoutSink, error) { + format = strings.TrimSpace(format) + if format == "" { + return nil, fmt.Errorf("format is required: %w", ErrInvalidParameter) + } + + return &StdoutSink{ + requiredFormat: format, + }, nil +} + +// Process persists the provided eventlogger.Event to the standard output stream. +func (s *StdoutSink) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if e == nil { + return nil, fmt.Errorf("event is nil: %w", ErrInvalidParameter) + } + + formatted, found := e.Format(s.requiredFormat) + if !found { + return nil, fmt.Errorf("unable to retrieve event formatted as %q: %w", s.requiredFormat, ErrInvalidParameter) + } + + _, err := os.Stdout.Write(formatted) + if err != nil { + return nil, fmt.Errorf("error writing to stdout: %w", err) + } + + // Return nil, nil to indicate the pipeline is complete. + return nil, nil +} + +// Reopen is a no-op for the StdoutSink type. +func (s *StdoutSink) Reopen() error { + return nil +} + +// Type returns the eventlogger.NodeTypeSink constant. +func (s *StdoutSink) Type() eventlogger.NodeType { + return eventlogger.NodeTypeSink +} diff --git a/internal/observability/event/sink_syslog.go b/internal/observability/event/sink_syslog.go new file mode 100644 index 000000000000..147b87089034 --- /dev/null +++ b/internal/observability/event/sink_syslog.go @@ -0,0 +1,95 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" + gsyslog "github.com/hashicorp/go-syslog" +) + +var _ eventlogger.Node = (*SyslogSink)(nil) + +// SyslogSink is a sink node which handles writing events to syslog. +type SyslogSink struct { + requiredFormat string + syslogger gsyslog.Syslogger + logger hclog.Logger +} + +// NewSyslogSink should be used to create a new SyslogSink. +// Accepted options: WithFacility and WithTag. +func NewSyslogSink(format string, opt ...Option) (*SyslogSink, error) { + format = strings.TrimSpace(format) + if format == "" { + return nil, fmt.Errorf("format is required: %w", ErrInvalidParameter) + } + + opts, err := getOpts(opt...) + if err != nil { + return nil, err + } + + logger, err := gsyslog.NewLogger(gsyslog.LOG_INFO, opts.withFacility, opts.withTag) + if err != nil { + return nil, fmt.Errorf("error creating syslogger: %w", err) + } + + syslog := &SyslogSink{ + requiredFormat: format, + syslogger: logger, + logger: opts.withLogger, + } + + return syslog, nil +} + +// Process handles writing the event to the syslog. +func (s *SyslogSink) Process(ctx context.Context, e *eventlogger.Event) (_ *eventlogger.Event, retErr error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + defer func() { + // If the context is errored (cancelled), and we were planning to return + // an error, let's also log (if we have a logger) in case the eventlogger's + // status channel and errors propagated. + if err := ctx.Err(); err != nil && retErr != nil && s.logger != nil { + s.logger.Error("syslog sink error", "context", err, "error", retErr) + } + }() + + if e == nil { + return nil, fmt.Errorf("event is nil: %w", ErrInvalidParameter) + } + + formatted, found := e.Format(s.requiredFormat) + if !found { + return nil, fmt.Errorf("unable to retrieve event formatted as %q: %w", s.requiredFormat, ErrInvalidParameter) + } + + _, err := s.syslogger.Write(formatted) + if err != nil { + return nil, fmt.Errorf("error writing to syslog: %w", err) + } + + // return nil for the event to indicate the pipeline is complete. + return nil, nil +} + +// Reopen is a no-op for a syslog sink. +func (_ *SyslogSink) Reopen() error { + return nil +} + +// Type describes the type of this node (sink). +func (_ *SyslogSink) Type() eventlogger.NodeType { + return eventlogger.NodeTypeSink +} diff --git a/internal/observability/event/sink_syslog_test.go b/internal/observability/event/sink_syslog_test.go new file mode 100644 index 000000000000..519ae5197c6d --- /dev/null +++ b/internal/observability/event/sink_syslog_test.go @@ -0,0 +1,57 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// TestNewSyslogSink ensures that we validate the input arguments and can create +// the SyslogSink if everything goes to plan. +func TestNewSyslogSink(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + format string + opts []Option + want *SyslogSink + wantErr bool + expectedErrMsg string + }{ + "format-empty": { + format: "", + wantErr: true, + expectedErrMsg: "format is required: invalid parameter", + }, + "format-whitespace": { + format: " ", + wantErr: true, + expectedErrMsg: "format is required: invalid parameter", + }, + "happy": { + format: "json", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + got, err := NewSyslogSink(tc.format, tc.opts...) + + if tc.wantErr { + require.Error(t, err) + require.EqualError(t, err, tc.expectedErrMsg) + require.Nil(t, got) + } else { + require.NoError(t, err) + require.NotNil(t, got) + } + }) + } +} diff --git a/internalshared/configutil/config.go b/internalshared/configutil/config.go index 99777229f0d4..7ca2c32c509d 100644 --- a/internalshared/configutil/config.go +++ b/internalshared/configutil/config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package configutil @@ -41,18 +41,20 @@ type SharedConfig struct { // LogFormat specifies the log format. Valid values are "standard" and // "json". The values are case-insenstive. If no log format is specified, // then standard format will be used. + LogFile string `hcl:"log_file"` LogFormat string `hcl:"log_format"` LogLevel string `hcl:"log_level"` - LogFile string `hcl:"log_file"` - LogRotateDuration string `hcl:"log_rotate_duration"` LogRotateBytes int `hcl:"log_rotate_bytes"` LogRotateBytesRaw interface{} `hcl:"log_rotate_bytes"` + LogRotateDuration string `hcl:"log_rotate_duration"` LogRotateMaxFiles int `hcl:"log_rotate_max_files"` LogRotateMaxFilesRaw interface{} `hcl:"log_rotate_max_files"` PidFile string `hcl:"pid_file"` ClusterName string `hcl:"cluster_name"` + + AdministrativeNamespacePath string `hcl:"administrative_namespace_path"` } func ParseConfig(d string) (*SharedConfig, error) { @@ -99,7 +101,7 @@ func ParseConfig(d string) (*SharedConfig, error) { if o := list.Filter("seal"); len(o.Items) > 0 { result.found("seal", "Seal") - if err := parseKMS(&result.Seals, o, "seal", 3); err != nil { + if err := parseKMS(&result.Seals, o, "seal", 5); err != nil { return nil, fmt.Errorf("error parsing 'seal': %w", err) } } @@ -120,9 +122,17 @@ func ParseConfig(d string) (*SharedConfig, error) { if o := list.Filter("listener"); len(o.Items) > 0 { result.found("listener", "Listener") - if err := ParseListeners(&result, o); err != nil { + listeners, err := ParseListeners(o) + if err != nil { return nil, fmt.Errorf("error parsing 'listener': %w", err) } + // Update the shared config + result.Listeners = listeners + + // Track which types of listener were found. + for _, l := range result.Listeners { + result.found(l.Type.String(), l.Type.String()) + } } if o := list.Filter("user_lockout"); len(o.Items) > 0 { @@ -167,16 +177,27 @@ func (c *SharedConfig) Sanitized() map[string]interface{} { } result := map[string]interface{}{ - "disable_mlock": c.DisableMlock, - - "default_max_request_duration": c.DefaultMaxRequestDuration, - - "log_level": c.LogLevel, - "log_format": c.LogFormat, - - "pid_file": c.PidFile, + "default_max_request_duration": c.DefaultMaxRequestDuration, + "disable_mlock": c.DisableMlock, + "log_level": c.LogLevel, + "log_format": c.LogFormat, + "pid_file": c.PidFile, + "cluster_name": c.ClusterName, + "administrative_namespace_path": c.AdministrativeNamespacePath, + } - "cluster_name": c.ClusterName, + // Optional log related settings + if c.LogFile != "" { + result["log_file"] = c.LogFile + } + if c.LogRotateBytes != 0 { + result["log_rotate_bytes"] = c.LogRotateBytes + } + if c.LogRotateDuration != "" { + result["log_rotate_duration"] = c.LogRotateDuration + } + if c.LogRotateMaxFiles != 0 { + result["log_rotate_max_files"] = c.LogRotateMaxFiles } // Sanitize listeners @@ -215,7 +236,12 @@ func (c *SharedConfig) Sanitized() map[string]interface{} { cleanSeal := map[string]interface{}{ "type": s.Type, "disabled": s.Disabled, + "name": s.Name, } + if s.Priority > 0 { + cleanSeal["priority"] = s.Priority + } + sanitizedSeals = append(sanitizedSeals, cleanSeal) } result["seals"] = sanitizedSeals @@ -253,6 +279,7 @@ func (c *SharedConfig) Sanitized() map[string]interface{} { "lease_metrics_epsilon": c.Telemetry.LeaseMetricsEpsilon, "num_lease_metrics_buckets": c.Telemetry.NumLeaseMetricsTimeBuckets, "add_lease_metrics_namespace_labels": c.Telemetry.LeaseMetricsNameSpaceLabels, + "add_mount_point_rollback_metrics": c.Telemetry.RollbackMetricsIncludeMountPoint, } result["telemetry"] = sanitizedTelemetry } diff --git a/internalshared/configutil/config_test.go b/internalshared/configutil/config_test.go new file mode 100644 index 000000000000..4362f92284e5 --- /dev/null +++ b/internalshared/configutil/config_test.go @@ -0,0 +1,157 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package configutil + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +type mapValue[T any] struct { + Value T + IsFound bool +} + +type expectedLogFields struct { + File mapValue[string] + Format mapValue[string] + Level mapValue[string] + RotateBytes mapValue[int] + RotateDuration mapValue[string] + RotateMaxFiles mapValue[int] +} + +// TestSharedConfig_Sanitized_LogFields ensures that 'log related' shared config +// is sanitized as expected. +func TestSharedConfig_Sanitized_LogFields(t *testing.T) { + tests := map[string]struct { + Value *SharedConfig + IsNil bool + Expected expectedLogFields + }{ + "nil": { + Value: nil, + IsNil: true, + }, + "empty": { + Value: &SharedConfig{}, + IsNil: false, + Expected: expectedLogFields{ + Format: mapValue[string]{IsFound: true, Value: ""}, + Level: mapValue[string]{IsFound: true, Value: ""}, + }, + }, + "only-log-level-and-format": { + Value: &SharedConfig{ + LogFormat: "json", + LogLevel: "warn", + }, + IsNil: false, + Expected: expectedLogFields{ + Format: mapValue[string]{IsFound: true, Value: "json"}, + Level: mapValue[string]{IsFound: true, Value: "warn"}, + }, + }, + "valid-log-fields": { + Value: &SharedConfig{ + LogFile: "vault.log", + LogFormat: "json", + LogLevel: "warn", + LogRotateBytes: 1024, + LogRotateDuration: "30m", + LogRotateMaxFiles: -1, + }, + IsNil: false, + Expected: expectedLogFields{ + File: mapValue[string]{IsFound: true, Value: "vault.log"}, + Format: mapValue[string]{IsFound: true, Value: "json"}, + Level: mapValue[string]{IsFound: true, Value: "warn"}, + RotateBytes: mapValue[int]{IsFound: true, Value: 1024}, + RotateDuration: mapValue[string]{IsFound: true, Value: "30m"}, + RotateMaxFiles: mapValue[int]{IsFound: true, Value: -1}, + }, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + cfg := tc.Value.Sanitized() + switch { + case tc.IsNil: + require.Nil(t, cfg) + default: + require.NotNil(t, cfg) + + // Log file + val, found := cfg["log_file"] + switch { + case tc.Expected.File.IsFound: + require.True(t, found) + require.NotNil(t, val) + require.Equal(t, tc.Expected.File.Value, val) + default: + require.Nil(t, val) + } + + // Log format + val, found = cfg["log_format"] + switch { + case tc.Expected.Format.IsFound: + require.True(t, found) + require.NotNil(t, val) + require.Equal(t, tc.Expected.Format.Value, val) + default: + require.Nil(t, val) + } + + // Log level + val, found = cfg["log_level"] + switch { + case tc.Expected.Level.IsFound: + require.True(t, found) + require.NotNil(t, val) + require.Equal(t, tc.Expected.Level.Value, val) + default: + require.Nil(t, val) + } + + // Log rotate bytes + val, found = cfg["log_rotate_bytes"] + switch { + case tc.Expected.RotateBytes.IsFound: + require.True(t, found) + require.NotNil(t, val) + require.Equal(t, tc.Expected.RotateBytes.Value, val) + default: + require.Nil(t, val) + } + + // Log rotate duration + val, found = cfg["log_rotate_duration"] + switch { + case tc.Expected.RotateDuration.IsFound: + require.True(t, found) + require.NotNil(t, val) + require.Equal(t, tc.Expected.RotateDuration.Value, val) + default: + require.Nil(t, val) + } + + // Log rotate max files + val, found = cfg["log_rotate_max_files"] + switch { + case tc.Expected.RotateMaxFiles.IsFound: + require.True(t, found) + require.NotNil(t, val) + require.Equal(t, tc.Expected.RotateMaxFiles.Value, val) + default: + require.Nil(t, val) + } + } + }) + } +} diff --git a/internalshared/configutil/config_util.go b/internalshared/configutil/config_util.go index 3fd4bb948713..76e2b198a60e 100644 --- a/internalshared/configutil/config_util.go +++ b/internalshared/configutil/config_util.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !enterprise diff --git a/internalshared/configutil/encrypt_decrypt.go b/internalshared/configutil/encrypt_decrypt.go index f0e5fcc04270..84ad39fb6a24 100644 --- a/internalshared/configutil/encrypt_decrypt.go +++ b/internalshared/configutil/encrypt_decrypt.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package configutil diff --git a/internalshared/configutil/encrypt_decrypt_test.go b/internalshared/configutil/encrypt_decrypt_test.go index 19bf6858338b..3b4609e4549a 100644 --- a/internalshared/configutil/encrypt_decrypt_test.go +++ b/internalshared/configutil/encrypt_decrypt_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package configutil @@ -46,7 +46,7 @@ telemetry { } first := true - locs := decryptRegex.FindAllIndex([]byte(out), -1) + locs := decryptRegex.FindAllStringIndex(out, -1) for _, match := range locs { matchBytes := []byte(out)[match[0]:match[1]] matchBytes = bytes.TrimSuffix(bytes.TrimPrefix(matchBytes, []byte("{{decrypt(")), []byte(")}}")) diff --git a/internalshared/configutil/entropymode_enumer.go b/internalshared/configutil/entropymode_enumer.go new file mode 100644 index 000000000000..6b804001c481 --- /dev/null +++ b/internalshared/configutil/entropymode_enumer.go @@ -0,0 +1,49 @@ +// Code generated by "enumer -type=EntropyMode -trimprefix=Entropy"; DO NOT EDIT. + +package configutil + +import ( + "fmt" +) + +const _EntropyModeName = "UnknownAugmentation" + +var _EntropyModeIndex = [...]uint8{0, 7, 19} + +func (i EntropyMode) String() string { + if i < 0 || i >= EntropyMode(len(_EntropyModeIndex)-1) { + return fmt.Sprintf("EntropyMode(%d)", i) + } + return _EntropyModeName[_EntropyModeIndex[i]:_EntropyModeIndex[i+1]] +} + +var _EntropyModeValues = []EntropyMode{0, 1} + +var _EntropyModeNameToValueMap = map[string]EntropyMode{ + _EntropyModeName[0:7]: 0, + _EntropyModeName[7:19]: 1, +} + +// EntropyModeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func EntropyModeString(s string) (EntropyMode, error) { + if val, ok := _EntropyModeNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to EntropyMode values", s) +} + +// EntropyModeValues returns all values of the enum +func EntropyModeValues() []EntropyMode { + return _EntropyModeValues +} + +// IsAEntropyMode returns "true" if the value is listed in the enum definition. "false" otherwise +func (i EntropyMode) IsAEntropyMode() bool { + for _, v := range _EntropyModeValues { + if i == v { + return true + } + } + return false +} diff --git a/internalshared/configutil/env_var_util.go b/internalshared/configutil/env_var_util.go new file mode 100644 index 000000000000..4c2986d18408 --- /dev/null +++ b/internalshared/configutil/env_var_util.go @@ -0,0 +1,101 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package configutil + +import ( + "github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2" + "github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2" + "github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2" + "github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2" + "github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2" + "github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2" +) + +var ( + AliCloudKMSEnvVars = map[string]string{ + "ALICLOUD_REGION": "region", + "ALICLOUD_DOMAIN": "domain", + "ALICLOUD_ACCESS_KEY": "access_key", + "ALICLOUD_SECRET_KEY": "secret_key", + alicloudkms.EnvVaultAliCloudKmsSealKeyId: "kms_key_id", + alicloudkms.EnvAliCloudKmsWrapperKeyId: "kms_key_id", + } + + AWSKMSEnvVars = map[string]string{ + "AWS_REGION": "region", + "AWS_DEFAULT_REGION": "region", + "AWS_ACCESS_KEY_ID": "access_key", + "AWS_SESSION_TOKEN": "session_token", + "AWS_SECRET_ACCESS_KEY": "secret_key", + awskms.EnvVaultAwsKmsSealKeyId: "kms_key_id", + awskms.EnvAwsKmsWrapperKeyId: "kms_key_id", + "AWS_KMS_ENDPOINT": "endpoint", + } + + AzureEnvVars = map[string]string{ + "AZURE_TENANT_ID": "tenant_id", + "AZURE_CLIENT_ID": "client_id", + "AZURE_CLIENT_SECRET": "client_secret", + "AZURE_ENVIRONMENT": "environment", + "AZURE_AD_RESOURCE": "resource", + azurekeyvault.EnvAzureKeyVaultWrapperKeyName: "key_name", + azurekeyvault.EnvVaultAzureKeyVaultKeyName: "key_name", + azurekeyvault.EnvAzureKeyVaultWrapperVaultName: "vault_name", + azurekeyvault.EnvVaultAzureKeyVaultVaultName: "vault_name", + } + + GCPCKMSEnvVars = map[string]string{ + gcpckms.EnvGcpCkmsWrapperCredsPath: "credentials", + "GOOGLE_APPLICATION_CREDENTIALS": "credentials", + gcpckms.EnvGcpCkmsWrapperProject: "project", + gcpckms.EnvGcpCkmsWrapperLocation: "region", + gcpckms.EnvVaultGcpCkmsSealCryptoKey: "crypto_key", + gcpckms.EnvGcpCkmsWrapperCryptoKey: "crypto_key", + gcpckms.EnvGcpCkmsWrapperKeyRing: "key_ring", + gcpckms.EnvVaultGcpCkmsSealKeyRing: "key_ring", + } + + OCIKMSEnvVars = map[string]string{ + ocikms.EnvOciKmsWrapperCryptoEndpoint: "crypto_endpoint", + ocikms.EnvVaultOciKmsSealCryptoEndpoint: "crypto_endpoint", + ocikms.EnvOciKmsWrapperKeyId: "key_id", + ocikms.EnvVaultOciKmsSealKeyId: "key_id", + ocikms.EnvOciKmsWrapperManagementEndpoint: "management_endpoint", + ocikms.EnvVaultOciKmsSealManagementEndpoint: "management_endpoint", + } + + TransitEnvVars = map[string]string{ + "VAULT_ADDR": "address", + "VAULT_TOKEN": "token", + "VAULT_NAMESPACE": "namespace", + "VAULT_CACERT": "tls_ca_cert", + "VAULT_CLIENT_CERT": "tls_client_cert", + "VAULT_CLIENT_KEY": "tls_client_key", + "VAULT_TLS_SERVER_NAME": "tls_server_name", + "VAULT_SKIP_VERIFY": "tls_skip_verify", + transit.EnvVaultTransitSealKeyName: "key_name", + transit.EnvTransitWrapperKeyName: "key_name", + transit.EnvTransitWrapperMountPath: "mount_path", + transit.EnvVaultTransitSealMountPath: "mount_path", + transit.EnvTransitWrapperDisableRenewal: "disable_renewal", + transit.EnvVaultTransitSealDisableRenewal: "disable_renewal", + } + + // TransitPrioritizeConfigValues are the variables where file config takes precedence over env vars in transit seals + TransitPrioritizeConfigValues = []string{ + "token", + "address", + } + + // TransitTLSConfigVars are the TLS config variables for transit seals + // if one of them is set in file config, transit seals use the file config for all TLS values and ignore env vars + // otherwise they use the env vars for TLS config + TransitTLSConfigVars = []string{ + "tls_ca_cert", + "tls_client_cert", + "tls_client_key", + "tls_server_name", + "tls_skip_verify", + } +) diff --git a/internalshared/configutil/hcp_link.go b/internalshared/configutil/hcp_link.go index fd8d6b6ca853..0f130961d1af 100644 --- a/internalshared/configutil/hcp_link.go +++ b/internalshared/configutil/hcp_link.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package configutil diff --git a/internalshared/configutil/http_response_headers.go b/internalshared/configutil/http_response_headers.go index b808f9e6522c..cbc71bccfac3 100644 --- a/internalshared/configutil/http_response_headers.go +++ b/internalshared/configutil/http_response_headers.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package configutil diff --git a/internalshared/configutil/kms.go b/internalshared/configutil/kms.go index 025018124984..f0948118dd95 100644 --- a/internalshared/configutil/kms.go +++ b/internalshared/configutil/kms.go @@ -1,17 +1,21 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package configutil import ( "context" "crypto/rand" + "errors" "fmt" "io" + "os" + "regexp" "strings" "github.com/hashicorp/errwrap" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-kms-wrapping/entropy/v2" wrapping "github.com/hashicorp/go-kms-wrapping/v2" aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2" "github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2" @@ -24,24 +28,36 @@ import ( "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/sdk/helper/strutil" "github.com/hashicorp/vault/sdk/logical" ) var ( ConfigureWrapper = configureWrapper CreateSecureRandomReaderFunc = createSecureRandomReader + GetEnvConfigFunc = getEnvConfig ) -// Entropy contains Entropy configuration for the server +//go:generate enumer -type=EntropyMode -trimprefix=Entropy + +// EntropyMode contains Entropy configuration for the server type EntropyMode int const ( EntropyUnknown EntropyMode = iota EntropyAugmentation + + KmsRenameDisabledSuffix = "-disabled" ) type Entropy struct { - Mode EntropyMode + Mode EntropyMode + SealName string +} + +type EntropySourcerInfo struct { + Sourcer entropy.Sourcer + Name string } // KMS contains KMS configuration for the server @@ -55,6 +71,9 @@ type KMS struct { Disabled bool Config map[string]string + + Priority int `hcl:"priority"` + Name string `hcl:"name"` } func (k *KMS) GoString() string { @@ -63,7 +82,7 @@ func (k *KMS) GoString() string { func parseKMS(result *[]*KMS, list *ast.ObjectList, blockName string, maxKMS int) error { if len(list.Items) > maxKMS { - return fmt.Errorf("only two or less %q blocks are permitted", blockName) + return fmt.Errorf("only %d or less %q blocks are permitted", maxKMS, blockName) } seals := make([]*KMS, 0, len(list.Items)) @@ -102,6 +121,36 @@ func parseKMS(result *[]*KMS, list *ast.ObjectList, blockName string, maxKMS int delete(m, "disabled") } + var priority int + if v, ok := m["priority"]; ok { + priority, err = parseutil.SafeParseInt(v) + if err != nil { + return multierror.Prefix(fmt.Errorf("unable to parse 'priority' in kms type %q: %w", key, err), fmt.Sprintf("%s.%s", blockName, key)) + } + delete(m, "priority") + + if priority < 1 { + return multierror.Prefix(fmt.Errorf("invalid priority in kms type %q: %d", key, priority), fmt.Sprintf("%s.%s", blockName, key)) + } + } + + name := strings.ToLower(key) + // ensure that seals of the same type will have unique names for seal migration + if disabled { + name += KmsRenameDisabledSuffix + } + if v, ok := m["name"]; ok { + name, ok = v.(string) + if !ok { + return multierror.Prefix(fmt.Errorf("unable to parse 'name' in kms type %q: unexpected type %T", key, v), fmt.Sprintf("%s.%s", blockName, key)) + } + delete(m, "name") + + if !regexp.MustCompile("^[a-zA-Z0-9-_]+$").MatchString(name) { + return multierror.Prefix(errors.New("'name' field can only include alphanumeric characters, hyphens, and underscores"), fmt.Sprintf("%s.%s", blockName, key)) + } + } + strMap := make(map[string]string, len(m)) for k, v := range m { s, err := parseutil.ParseString(v) @@ -115,6 +164,8 @@ func parseKMS(result *[]*KMS, list *ast.ObjectList, blockName string, maxKMS int Type: strings.ToLower(key), Purpose: purpose, Disabled: disabled, + Priority: priority, + Name: name, } if len(strMap) > 0 { seal.Config = strMap @@ -168,6 +219,19 @@ func configureWrapper(configKMS *KMS, infoKeys *[]string, info *map[string]strin var kmsInfo map[string]string var err error + envConfig := GetEnvConfigFunc(configKMS) + if len(envConfig) > 0 && configKMS.Config == nil { + configKMS.Config = make(map[string]string) + } + // transit is a special case, because some config values take precedence over env vars + if configKMS.Type == wrapping.WrapperTypeTransit.String() { + mergeTransitConfig(configKMS.Config, envConfig) + } else { + for name, val := range envConfig { + configKMS.Config[name] = val + } + } + switch wrapping.WrapperType(configKMS.Type) { case wrapping.WrapperTypeShamir: return nil, nil @@ -235,7 +299,7 @@ func GetAEADKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[st func GetAliCloudKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { wrapper := alicloudkms.NewWrapper() - wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...) + wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithDisallowEnvVars(true), wrapping.WithConfigMap(kms.Config))...) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { @@ -255,7 +319,7 @@ func GetAliCloudKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, ma var GetAWSKMSFunc = func(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { wrapper := awskms.NewWrapper() - wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...) + wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, awskms.WithDisallowEnvVars(true), wrapping.WithConfigMap(kms.Config))...) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { @@ -275,7 +339,7 @@ var GetAWSKMSFunc = func(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, m func GetAzureKeyVaultKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { wrapper := azurekeyvault.NewWrapper() - wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...) + wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, azurekeyvault.WithDisallowEnvVars(true), wrapping.WithConfigMap(kms.Config))...) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { @@ -293,7 +357,7 @@ func GetAzureKeyVaultKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrappe func GetGCPCKMSKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { wrapper := gcpckms.NewWrapper() - wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...) + wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithDisallowEnvVars(true), wrapping.WithConfigMap(kms.Config))...) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { @@ -312,7 +376,7 @@ func GetGCPCKMSKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map func GetOCIKMSKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { wrapper := ocikms.NewWrapper() - wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...) + wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithDisallowEnvVars(true), wrapping.WithConfigMap(kms.Config))...) if err != nil { return nil, nil, err } @@ -328,7 +392,17 @@ func GetOCIKMSKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[ var GetTransitKMSFunc = func(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { wrapper := transit.NewWrapper() - wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...) + var prefix string + if p, ok := kms.Config["key_id_prefix"]; ok { + prefix = p + } else { + prefix = kms.Name + } + if !strings.HasSuffix(prefix, "/") { + prefix = prefix + "/" + } + wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithDisallowEnvVars(true), wrapping.WithConfigMap(kms.Config), + transit.WithKeyIdPrefix(prefix))...) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { @@ -347,6 +421,75 @@ var GetTransitKMSFunc = func(kms *KMS, opts ...wrapping.Option) (wrapping.Wrappe return wrapper, info, nil } -func createSecureRandomReader(conf *SharedConfig, wrapper wrapping.Wrapper) (io.Reader, error) { +func createSecureRandomReader(_ *SharedConfig, _ []*EntropySourcerInfo, _ hclog.Logger) (io.Reader, error) { return rand.Reader, nil } + +func getEnvConfig(kms *KMS) map[string]string { + envValues := make(map[string]string) + + var wrapperEnvVars map[string]string + switch wrapping.WrapperType(kms.Type) { + case wrapping.WrapperTypeAliCloudKms: + wrapperEnvVars = AliCloudKMSEnvVars + case wrapping.WrapperTypeAwsKms: + wrapperEnvVars = AWSKMSEnvVars + case wrapping.WrapperTypeAzureKeyVault: + wrapperEnvVars = AzureEnvVars + case wrapping.WrapperTypeGcpCkms: + wrapperEnvVars = GCPCKMSEnvVars + case wrapping.WrapperTypeOciKms: + wrapperEnvVars = OCIKMSEnvVars + case wrapping.WrapperTypeTransit: + wrapperEnvVars = TransitEnvVars + default: + return nil + } + + for envVar, configName := range wrapperEnvVars { + val := os.Getenv(envVar) + if val != "" { + envValues[configName] = val + } + } + + return envValues +} + +func mergeTransitConfig(config map[string]string, envConfig map[string]string) { + useFileTlsConfig := false + for _, varName := range TransitTLSConfigVars { + if _, ok := config[varName]; ok { + useFileTlsConfig = true + break + } + } + + if useFileTlsConfig { + for _, varName := range TransitTLSConfigVars { + delete(envConfig, varName) + } + } + + for varName, val := range envConfig { + // for some values, file config takes precedence + if strutil.StrListContains(TransitPrioritizeConfigValues, varName) && config[varName] != "" { + continue + } + + config[varName] = val + } +} + +func (k *KMS) Clone() *KMS { + ret := &KMS{ + UnusedKeys: k.UnusedKeys, + Type: k.Type, + Purpose: k.Purpose, + Config: k.Config, + Name: k.Name, + Disabled: k.Disabled, + Priority: k.Priority, + } + return ret +} diff --git a/internalshared/configutil/kms_test.go b/internalshared/configutil/kms_test.go new file mode 100644 index 000000000000..9eb19a3e3d7d --- /dev/null +++ b/internalshared/configutil/kms_test.go @@ -0,0 +1,102 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package configutil + +import ( + "os" + "reflect" + "testing" +) + +func Test_getEnvConfig(t *testing.T) { + tests := []struct { + name string + kms *KMS + envVars map[string]string + want map[string]string + }{ + { + "AliCloud wrapper", + &KMS{ + Type: "alicloudkms", + Priority: 1, + }, + map[string]string{"ALICLOUD_REGION": "test_region", "ALICLOUD_DOMAIN": "test_domain", "ALICLOUD_ACCESS_KEY": "test_access_key", "ALICLOUD_SECRET_KEY": "test_secret_key", "VAULT_ALICLOUDKMS_SEAL_KEY_ID": "test_key_id"}, + map[string]string{"region": "test_region", "domain": "test_domain", "access_key": "test_access_key", "secret_key": "test_secret_key", "kms_key_id": "test_key_id"}, + }, + { + "AWS KMS wrapper", + &KMS{ + Type: "awskms", + Priority: 1, + }, + map[string]string{"AWS_REGION": "test_region", "AWS_ACCESS_KEY_ID": "test_access_key", "AWS_SECRET_ACCESS_KEY": "test_secret_key", "VAULT_AWSKMS_SEAL_KEY_ID": "test_key_id"}, + map[string]string{"region": "test_region", "access_key": "test_access_key", "secret_key": "test_secret_key", "kms_key_id": "test_key_id"}, + }, + { + "Azure KeyVault wrapper", + &KMS{ + Type: "azurekeyvault", + Priority: 1, + }, + map[string]string{"AZURE_TENANT_ID": "test_tenant_id", "AZURE_CLIENT_ID": "test_client_id", "AZURE_CLIENT_SECRET": "test_client_secret", "AZURE_ENVIRONMENT": "test_environment", "VAULT_AZUREKEYVAULT_VAULT_NAME": "test_vault_name", "VAULT_AZUREKEYVAULT_KEY_NAME": "test_key_name"}, + map[string]string{"tenant_id": "test_tenant_id", "client_id": "test_client_id", "client_secret": "test_client_secret", "environment": "test_environment", "vault_name": "test_vault_name", "key_name": "test_key_name"}, + }, + { + "GCP CKMS wrapper", + &KMS{ + Type: "gcpckms", + Priority: 1, + }, + map[string]string{"GOOGLE_CREDENTIALS": "test_credentials", "GOOGLE_PROJECT": "test_project", "GOOGLE_REGION": "test_region", "VAULT_GCPCKMS_SEAL_KEY_RING": "test_key_ring", "VAULT_GCPCKMS_SEAL_CRYPTO_KEY": "test_crypto_key"}, + map[string]string{"credentials": "test_credentials", "project": "test_project", "region": "test_region", "key_ring": "test_key_ring", "crypto_key": "test_crypto_key"}, + }, + { + "OCI KMS wrapper", + &KMS{ + Type: "ocikms", + Priority: 1, + }, + map[string]string{"VAULT_OCIKMS_SEAL_KEY_ID": "test_key_id", "VAULT_OCIKMS_CRYPTO_ENDPOINT": "test_crypto_endpoint", "VAULT_OCIKMS_MANAGEMENT_ENDPOINT": "test_management_endpoint"}, + map[string]string{"key_id": "test_key_id", "crypto_endpoint": "test_crypto_endpoint", "management_endpoint": "test_management_endpoint"}, + }, + { + "Transit wrapper", + &KMS{ + Type: "transit", + Priority: 1, + }, + map[string]string{"VAULT_ADDR": "test_address", "VAULT_TOKEN": "test_token", "VAULT_TRANSIT_SEAL_KEY_NAME": "test_key_name", "VAULT_TRANSIT_SEAL_MOUNT_PATH": "test_mount_path"}, + map[string]string{"address": "test_address", "token": "test_token", "key_name": "test_key_name", "mount_path": "test_mount_path"}, + }, + { + "Environment vars not set", + &KMS{ + Type: "awskms", + Priority: 1, + }, + map[string]string{}, + map[string]string{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + for envName, envVal := range tt.envVars { + if err := os.Setenv(envName, envVal); err != nil { + t.Errorf("error setting environment vars for test: %s", err) + } + } + + if got := GetEnvConfigFunc(tt.kms); !reflect.DeepEqual(got, tt.want) { + t.Errorf("getEnvConfig() = %v, want %v", got, tt.want) + } + + for env := range tt.envVars { + if err := os.Unsetenv(env); err != nil { + t.Errorf("error unsetting environment vars for test: %s", err) + } + } + }) + } +} diff --git a/internalshared/configutil/lint.go b/internalshared/configutil/lint.go index 24b968e6b96b..3831564549c7 100644 --- a/internalshared/configutil/lint.go +++ b/internalshared/configutil/lint.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package configutil diff --git a/internalshared/configutil/listener.go b/internalshared/configutil/listener.go index 5418f6643f6c..b9ed168abf7e 100644 --- a/internalshared/configutil/listener.go +++ b/internalshared/configutil/listener.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package configutil @@ -19,8 +19,17 @@ import ( "github.com/hashicorp/go-sockaddr/template" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/helper/namespace" ) +const ( + TCP ListenerType = "tcp" + Unix ListenerType = "unix" +) + +// ListenerType represents the supported types of listener. +type ListenerType string + type ListenerTelemetry struct { UnusedKeys UnusedKeyMap `hcl:",unusedKeyPositions"` UnauthenticatedMetricsAccess bool `hcl:"-"` @@ -44,7 +53,7 @@ type Listener struct { UnusedKeys UnusedKeyMap `hcl:",unusedKeyPositions"` RawConfig map[string]interface{} - Type string + Type ListenerType Purpose []string `hcl:"-"` PurposeRaw interface{} `hcl:"purpose"` Role string `hcl:"role"` @@ -85,14 +94,16 @@ type Listener struct { ProxyProtocolAuthorizedAddrs []*sockaddr.SockAddrMarshaler `hcl:"-"` ProxyProtocolAuthorizedAddrsRaw interface{} `hcl:"proxy_protocol_authorized_addrs,alias:ProxyProtocolAuthorizedAddrs"` - XForwardedForAuthorizedAddrs []*sockaddr.SockAddrMarshaler `hcl:"-"` - XForwardedForAuthorizedAddrsRaw interface{} `hcl:"x_forwarded_for_authorized_addrs,alias:XForwardedForAuthorizedAddrs"` - XForwardedForHopSkips int64 `hcl:"-"` - XForwardedForHopSkipsRaw interface{} `hcl:"x_forwarded_for_hop_skips,alias:XForwardedForHopSkips"` - XForwardedForRejectNotPresent bool `hcl:"-"` - XForwardedForRejectNotPresentRaw interface{} `hcl:"x_forwarded_for_reject_not_present,alias:XForwardedForRejectNotPresent"` - XForwardedForRejectNotAuthorized bool `hcl:"-"` - XForwardedForRejectNotAuthorizedRaw interface{} `hcl:"x_forwarded_for_reject_not_authorized,alias:XForwardedForRejectNotAuthorized"` + XForwardedForAuthorizedAddrs []*sockaddr.SockAddrMarshaler `hcl:"-"` + XForwardedForAuthorizedAddrsRaw interface{} `hcl:"x_forwarded_for_authorized_addrs,alias:XForwardedForAuthorizedAddrs"` + XForwardedForHopSkips int64 `hcl:"-"` + XForwardedForHopSkipsRaw interface{} `hcl:"x_forwarded_for_hop_skips,alias:XForwardedForHopSkips"` + XForwardedForRejectNotPresent bool `hcl:"-"` + XForwardedForRejectNotPresentRaw interface{} `hcl:"x_forwarded_for_reject_not_present,alias:XForwardedForRejectNotPresent"` + XForwardedForRejectNotAuthorized bool `hcl:"-"` + XForwardedForRejectNotAuthorizedRaw interface{} `hcl:"x_forwarded_for_reject_not_authorized,alias:XForwardedForRejectNotAuthorized"` + XForwardedForClientCertHeader string `hcl:"x_forwarded_for_client_cert_header,alias:XForwardedForClientCertHeader"` + XForwardedForClientCertHeaderDecoders string `hcl:"x_forwarded_for_client_cert_header_decoders,alias:XForwardedForClientCertHeaderDecoders"` SocketMode string `hcl:"socket_mode"` SocketUser string `hcl:"socket_user"` @@ -100,6 +111,8 @@ type Listener struct { AgentAPI *AgentAPI `hcl:"agent_api"` + ProxyAPI *ProxyAPI `hcl:"proxy_api"` + Telemetry ListenerTelemetry `hcl:"telemetry"` Profiling ListenerProfiling `hcl:"profiling"` InFlightRequestLogging ListenerInFlightRequestLogging `hcl:"inflight_requests_logging"` @@ -116,6 +129,26 @@ type Listener struct { // Custom Http response headers CustomResponseHeaders map[string]map[string]string `hcl:"-"` CustomResponseHeadersRaw interface{} `hcl:"custom_response_headers"` + + // ChrootNamespace will prepend the specified namespace to requests + ChrootNamespaceRaw interface{} `hcl:"chroot_namespace"` + ChrootNamespace string `hcl:"-"` + + // Per-listener redaction configuration + RedactAddressesRaw any `hcl:"redact_addresses"` + RedactAddresses bool `hcl:"-"` + RedactClusterNameRaw any `hcl:"redact_cluster_name"` + RedactClusterName bool `hcl:"-"` + RedactVersionRaw any `hcl:"redact_version"` + RedactVersion bool `hcl:"-"` + + // DisableReplicationStatusEndpoint disables the unauthenticated replication status endpoints + DisableReplicationStatusEndpointsRaw interface{} `hcl:"disable_replication_status_endpoints"` + DisableReplicationStatusEndpoints bool `hcl:"-"` + + // DisableRequestLimiter allows per-listener disabling of the Request Limiter. + DisableRequestLimiterRaw any `hcl:"disable_request_limiter"` + DisableRequestLimiter bool `hcl:"-"` } // AgentAPI allows users to select which parts of the Agent API they want enabled. @@ -123,6 +156,11 @@ type AgentAPI struct { EnableQuit bool `hcl:"enable_quit"` } +// ProxyAPI allows users to select which parts of the Vault Proxy API they want enabled. +type ProxyAPI struct { + EnableQuit bool `hcl:"enable_quit"` +} + func (l *Listener) GoString() string { return fmt.Sprintf("*%#v", *l) } @@ -132,317 +170,543 @@ func (l *Listener) Validate(path string) []ConfigError { return append(results, ValidateUnusedFields(l.Profiling.UnusedKeys, path)...) } -func ParseListeners(result *SharedConfig, list *ast.ObjectList) error { - var err error - result.Listeners = make([]*Listener, 0, len(list.Items)) +// ParseSingleIPTemplate is used as a helper function to parse out a single IP +// address from a config parameter. +// If the input doesn't appear to contain the 'template' format, +// it will return the specified input unchanged. +func ParseSingleIPTemplate(ipTmpl string) (string, error) { + r := regexp.MustCompile("{{.*?}}") + if !r.MatchString(ipTmpl) { + return ipTmpl, nil + } + + out, err := template.Parse(ipTmpl) + if err != nil { + return "", fmt.Errorf("unable to parse address template %q: %v", ipTmpl, err) + } + + ips := strings.Split(out, " ") + switch len(ips) { + case 0: + return "", errors.New("no addresses found, please configure one") + case 1: + return strings.TrimSpace(ips[0]), nil + default: + return "", fmt.Errorf("multiple addresses found (%q), please configure one", out) + } +} + +// ParseListeners attempts to parse the AST list of objects into listeners. +func ParseListeners(list *ast.ObjectList) ([]*Listener, error) { + listeners := make([]*Listener, len(list.Items)) + for i, item := range list.Items { - var l Listener - if err := hcl.DecodeObject(&l, item.Val); err != nil { - return multierror.Prefix(err, fmt.Sprintf("listeners.%d:", i)) - } - if rendered, err := ParseSingleIPTemplate(l.Address); err != nil { - return multierror.Prefix(err, fmt.Sprintf("listeners.%d:", i)) - } else { - l.Address = rendered + l, err := parseListener(item) + if err != nil { + return nil, multierror.Prefix(err, fmt.Sprintf("listeners.%d:", i)) } - if rendered, err := ParseSingleIPTemplate(l.ClusterAddress); err != nil { - return multierror.Prefix(err, fmt.Sprintf("listeners.%d:", i)) - } else { - l.ClusterAddress = rendered + listeners[i] = l + } + + return listeners, nil +} + +// parseListener attempts to parse the AST object into a listener. +func parseListener(item *ast.ObjectItem) (*Listener, error) { + var l *Listener + var err error + + // Decode the current item + if err = hcl.DecodeObject(&l, item.Val); err != nil { + return nil, err + } + + // Parse and update address if required. + if l.Address, err = ParseSingleIPTemplate(l.Address); err != nil { + return nil, err + } + + // Parse and update cluster address if required. + if l.ClusterAddress, err = ParseSingleIPTemplate(l.ClusterAddress); err != nil { + return nil, err + } + + // Get the values for sanitizing + var m map[string]interface{} + if err := hcl.DecodeObject(&m, item.Val); err != nil { + return nil, err + } + l.RawConfig = m + + // Parse type, but supply a fallback if type wasn't set. + var fallbackType string + if len(item.Keys) == 1 { + fallbackType = strings.ToLower(item.Keys[0].Token.Value().(string)) + } + + if err = l.parseType(fallbackType); err != nil { + return nil, err + } + + // Parse out each set off settings for the listener. + for _, parser := range []func() error{ + l.parseRequestSettings, + l.parseTLSSettings, + l.parseHTTPTimeoutSettings, + l.parseProxySettings, + l.parseForwardedForSettings, + l.parseTelemetrySettings, + l.parseProfilingSettings, + l.parseInFlightRequestSettings, + l.parseCORSSettings, + l.parseHTTPHeaderSettings, + l.parseChrootNamespaceSettings, + l.parseRedactionSettings, + l.parseDisableReplicationStatusEndpointSettings, + l.parseDisableRequestLimiter, + } { + err := parser() + if err != nil { + return nil, err } + } - // Hacky way, for now, to get the values we want for sanitizing - var m map[string]interface{} - if err := hcl.DecodeObject(&m, item.Val); err != nil { - return multierror.Prefix(err, fmt.Sprintf("listeners.%d:", i)) + return l, nil +} + +// Normalize returns the lower case string version of a listener type. +func (t ListenerType) Normalize() ListenerType { + return ListenerType(strings.ToLower(string(t))) +} + +// String returns the string version of a listener type. +func (t ListenerType) String() string { + return string(t.Normalize()) +} + +// parseAndClearBool parses a raw setting as a bool configuration parameter. If +// the raw value is successfully parsed, the parsedSetting argument is set to it +// and the rawSetting argument is cleared. Otherwise, the rawSetting argument is +// left unchanged and an error is returned. +func parseAndClearBool(rawSetting *interface{}, parsedSetting *bool) error { + var err error + + if *rawSetting != nil { + *parsedSetting, err = parseutil.ParseBool(*rawSetting) + if err != nil { + return err } - l.RawConfig = m - - // Base values - { - switch { - case l.Type != "": - case len(item.Keys) == 1: - l.Type = strings.ToLower(item.Keys[0].Token.Value().(string)) - default: - return multierror.Prefix(errors.New("listener type must be specified"), fmt.Sprintf("listeners.%d:", i)) - } - - l.Type = strings.ToLower(l.Type) - switch l.Type { - case "tcp", "unix": - result.found(l.Type, l.Type) - default: - return multierror.Prefix(fmt.Errorf("unsupported listener type %q", l.Type), fmt.Sprintf("listeners.%d:", i)) - } - - if l.PurposeRaw != nil { - if l.Purpose, err = parseutil.ParseCommaStringSlice(l.PurposeRaw); err != nil { - return multierror.Prefix(fmt.Errorf("unable to parse 'purpose' in listener type %q: %w", l.Type, err), fmt.Sprintf("listeners.%d:", i)) - } - for i, v := range l.Purpose { - l.Purpose[i] = strings.ToLower(v) - } - - l.PurposeRaw = nil - } - - switch l.Role { - case "default", "metrics_only", "": - result.found(l.Type, l.Type) - default: - return multierror.Prefix(fmt.Errorf("unsupported listener role %q", l.Role), fmt.Sprintf("listeners.%d:", i)) - } + + *rawSetting = nil + } + + return nil +} + +// parseAndClearString parses a raw setting as a string configuration parameter. +// If the raw value is successfully parsed, the parsedSetting argument is set to +// it and the rawSetting argument is cleared. Otherwise, the rawSetting argument +// is left unchanged and an error is returned. +func parseAndClearString(rawSetting *interface{}, parsedSetting *string) error { + var err error + + if *rawSetting != nil { + *parsedSetting, err = parseutil.ParseString(*rawSetting) + if err != nil { + return err } - // Request Parameters - { - if l.MaxRequestSizeRaw != nil { - if l.MaxRequestSize, err = parseutil.ParseInt(l.MaxRequestSizeRaw); err != nil { - return multierror.Prefix(fmt.Errorf("error parsing max_request_size: %w", err), fmt.Sprintf("listeners.%d", i)) - } - - l.MaxRequestSizeRaw = nil - } - - if l.MaxRequestDurationRaw != nil { - if l.MaxRequestDuration, err = parseutil.ParseDurationSecond(l.MaxRequestDurationRaw); err != nil { - return multierror.Prefix(fmt.Errorf("error parsing max_request_duration: %w", err), fmt.Sprintf("listeners.%d", i)) - } - if l.MaxRequestDuration < 0 { - return multierror.Prefix(errors.New("max_request_duration cannot be negative"), fmt.Sprintf("listeners.%d", i)) - } - - l.MaxRequestDurationRaw = nil - } - - if l.RequireRequestHeaderRaw != nil { - if l.RequireRequestHeader, err = parseutil.ParseBool(l.RequireRequestHeaderRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for require_request_header: %w", err), fmt.Sprintf("listeners.%d", i)) - } - - l.RequireRequestHeaderRaw = nil - } + *rawSetting = nil + } + + return nil +} + +// parseAndClearInt parses a raw setting as an integer configuration parameter. +// If the raw value is successfully parsed, the parsedSetting argument is set to +// it and the rawSetting argument is cleared. Otherwise, the rawSetting argument +// is left unchanged and an error is returned. +func parseAndClearInt(rawSetting *interface{}, parsedSetting *int64) error { + var err error + + if *rawSetting != nil { + *parsedSetting, err = parseutil.ParseInt(*rawSetting) + if err != nil { + return err } - // TLS Parameters - { - if l.TLSDisableRaw != nil { - if l.TLSDisable, err = parseutil.ParseBool(l.TLSDisableRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for tls_disable: %w", err), fmt.Sprintf("listeners.%d", i)) - } - - l.TLSDisableRaw = nil - } - - if l.TLSCipherSuitesRaw != "" { - if l.TLSCipherSuites, err = tlsutil.ParseCiphers(l.TLSCipherSuitesRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for tls_cipher_suites: %w", err), fmt.Sprintf("listeners.%d", i)) - } - } - - if l.TLSRequireAndVerifyClientCertRaw != nil { - if l.TLSRequireAndVerifyClientCert, err = parseutil.ParseBool(l.TLSRequireAndVerifyClientCertRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for tls_require_and_verify_client_cert: %w", err), fmt.Sprintf("listeners.%d", i)) - } - - l.TLSRequireAndVerifyClientCertRaw = nil - } - - if l.TLSDisableClientCertsRaw != nil { - if l.TLSDisableClientCerts, err = parseutil.ParseBool(l.TLSDisableClientCertsRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for tls_disable_client_certs: %w", err), fmt.Sprintf("listeners.%d", i)) - } - - l.TLSDisableClientCertsRaw = nil - } + *rawSetting = nil + } + + return nil +} + +// parseAndClearDurationSecond parses a raw setting as a time duration +// configuration parameter. If the raw value is successfully parsed, the +// parsedSetting argument is set to it and the rawSetting argument is cleared. +// Otherwise, the rawSetting argument is left unchanged and an error is +// returned. +func parseAndClearDurationSecond(rawSetting *interface{}, parsedSetting *time.Duration) error { + var err error + + if *rawSetting != nil { + *parsedSetting, err = parseutil.ParseDurationSecond(*rawSetting) + if err != nil { + return err } - // HTTP timeouts - { - if l.HTTPReadTimeoutRaw != nil { - if l.HTTPReadTimeout, err = parseutil.ParseDurationSecond(l.HTTPReadTimeoutRaw); err != nil { - return multierror.Prefix(fmt.Errorf("error parsing http_read_timeout: %w", err), fmt.Sprintf("listeners.%d", i)) - } + *rawSetting = nil + } - l.HTTPReadTimeoutRaw = nil - } + return nil +} - if l.HTTPReadHeaderTimeoutRaw != nil { - if l.HTTPReadHeaderTimeout, err = parseutil.ParseDurationSecond(l.HTTPReadHeaderTimeoutRaw); err != nil { - return multierror.Prefix(fmt.Errorf("error parsing http_read_header_timeout: %w", err), fmt.Sprintf("listeners.%d", i)) - } +// parseDisableReplicationStatusEndpointSettings attempts to parse the raw +// disable_replication_status_endpoints setting. The receiving Listener's +// DisableReplicationStatusEndpoints field will be set with the successfully +// parsed value. +func (l *Listener) parseDisableReplicationStatusEndpointSettings() error { + if l.Type != TCP { + return nil + } - l.HTTPReadHeaderTimeoutRaw = nil - } + if err := parseAndClearBool(&l.DisableReplicationStatusEndpointsRaw, &l.DisableReplicationStatusEndpoints); err != nil { + return fmt.Errorf("invalid value for disable_replication_status_endpoints: %w", err) + } - if l.HTTPWriteTimeoutRaw != nil { - if l.HTTPWriteTimeout, err = parseutil.ParseDurationSecond(l.HTTPWriteTimeoutRaw); err != nil { - return multierror.Prefix(fmt.Errorf("error parsing http_write_timeout: %w", err), fmt.Sprintf("listeners.%d", i)) - } + return nil +} - l.HTTPWriteTimeoutRaw = nil - } +// parseDisableRequestLimiter attempts to parse the raw disable_request_limiter +// setting. The receiving Listener's DisableRequestLimiter field will be set +// with the successfully parsed value or return an error +func (l *Listener) parseDisableRequestLimiter() error { + if err := parseAndClearBool(&l.DisableRequestLimiterRaw, &l.DisableRequestLimiter); err != nil { + return fmt.Errorf("invalid value for disable_request_limiter: %w", err) + } - if l.HTTPIdleTimeoutRaw != nil { - if l.HTTPIdleTimeout, err = parseutil.ParseDurationSecond(l.HTTPIdleTimeoutRaw); err != nil { - return multierror.Prefix(fmt.Errorf("error parsing http_idle_timeout: %w", err), fmt.Sprintf("listeners.%d", i)) - } + return nil +} - l.HTTPIdleTimeoutRaw = nil - } - } +// parseChrootNamespace attempts to parse the raw listener chroot namespace settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseChrootNamespaceSettings() error { + var ( + err error + setting string + ) - // Proxy Protocol config - { - if l.ProxyProtocolAuthorizedAddrsRaw != nil { - if l.ProxyProtocolAuthorizedAddrs, err = parseutil.ParseAddrs(l.ProxyProtocolAuthorizedAddrsRaw); err != nil { - return multierror.Prefix(fmt.Errorf("error parsing proxy_protocol_authorized_addrs: %w", err), fmt.Sprintf("listeners.%d", i)) - } - - switch l.ProxyProtocolBehavior { - case "allow_authorized", "deny_authorized": - if len(l.ProxyProtocolAuthorizedAddrs) == 0 { - return multierror.Prefix(errors.New("proxy_protocol_behavior set to allow or deny only authorized addresses but no proxy_protocol_authorized_addrs value"), fmt.Sprintf("listeners.%d", i)) - } - } - - l.ProxyProtocolAuthorizedAddrsRaw = nil - } - } + err = parseAndClearString(&l.ChrootNamespaceRaw, &setting) + if err != nil { + return fmt.Errorf("invalid value for chroot_namespace: %w", err) + } - // X-Forwarded-For config - { - if l.XForwardedForAuthorizedAddrsRaw != nil { - if l.XForwardedForAuthorizedAddrs, err = parseutil.ParseAddrs(l.XForwardedForAuthorizedAddrsRaw); err != nil { - return multierror.Prefix(fmt.Errorf("error parsing x_forwarded_for_authorized_addrs: %w", err), fmt.Sprintf("listeners.%d", i)) - } + l.ChrootNamespace = namespace.Canonicalize(setting) - l.XForwardedForAuthorizedAddrsRaw = nil - } + return nil +} - if l.XForwardedForHopSkipsRaw != nil { - if l.XForwardedForHopSkips, err = parseutil.ParseInt(l.XForwardedForHopSkipsRaw); err != nil { - return multierror.Prefix(fmt.Errorf("error parsing x_forwarded_for_hop_skips: %w", err), fmt.Sprintf("listeners.%d", i)) - } +// parseType attempts to sanitize and validate the type set on the listener. +// If the listener has no type set, the fallback value will be used. +// The state of the listener will be modified. +func (l *Listener) parseType(fallback string) error { + switch { + case l.Type != "": + case fallback != "": + default: + return errors.New("listener type must be specified") + } - if l.XForwardedForHopSkips < 0 { - return multierror.Prefix(fmt.Errorf("x_forwarded_for_hop_skips cannot be negative but set to %d", l.XForwardedForHopSkips), fmt.Sprintf("listeners.%d", i)) - } + // Use type if available, otherwise fall back. + rawType := l.Type + if rawType == "" { + rawType = ListenerType(fallback) + } - l.XForwardedForHopSkipsRaw = nil - } + parsedType := rawType.Normalize() - if l.XForwardedForRejectNotAuthorizedRaw != nil { - if l.XForwardedForRejectNotAuthorized, err = parseutil.ParseBool(l.XForwardedForRejectNotAuthorizedRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for x_forwarded_for_reject_not_authorized: %w", err), fmt.Sprintf("listeners.%d", i)) - } + // Sanity check the values + switch parsedType { + case TCP, Unix: + default: + return fmt.Errorf("unsupported listener type %q", parsedType) + } + + l.Type = parsedType - l.XForwardedForRejectNotAuthorizedRaw = nil - } + return nil +} - if l.XForwardedForRejectNotPresentRaw != nil { - if l.XForwardedForRejectNotPresent, err = parseutil.ParseBool(l.XForwardedForRejectNotPresentRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for x_forwarded_for_reject_not_present: %w", err), fmt.Sprintf("listeners.%d", i)) - } +// parseRequestSettings attempts to parse the raw listener request settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseRequestSettings() error { + if err := parseAndClearInt(&l.MaxRequestSizeRaw, &l.MaxRequestSize); err != nil { + return fmt.Errorf("error parsing max_request_size: %w", err) + } - l.XForwardedForRejectNotPresentRaw = nil - } + if l.MaxRequestDurationRaw != nil { + maxRequestDuration, err := parseutil.ParseDurationSecond(l.MaxRequestDurationRaw) + if err != nil { + return fmt.Errorf("error parsing max_request_duration: %w", err) } - // Telemetry - { - if l.Telemetry.UnauthenticatedMetricsAccessRaw != nil { - if l.Telemetry.UnauthenticatedMetricsAccess, err = parseutil.ParseBool(l.Telemetry.UnauthenticatedMetricsAccessRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for telemetry.unauthenticated_metrics_access: %w", err), fmt.Sprintf("listeners.%d", i)) - } + if maxRequestDuration < 0 { + return errors.New("max_request_duration cannot be negative") + } + + l.MaxRequestDuration = maxRequestDuration + l.MaxRequestDurationRaw = nil + } + + if err := parseAndClearBool(&l.RequireRequestHeaderRaw, &l.RequireRequestHeader); err != nil { + return fmt.Errorf("invalid value for require_request_header: %w", err) + } + + if err := parseAndClearBool(&l.DisableRequestLimiterRaw, &l.DisableRequestLimiter); err != nil { + return fmt.Errorf("invalid value for disable_request_limiter: %w", err) + } - l.Telemetry.UnauthenticatedMetricsAccessRaw = nil - } + return nil +} + +// parseTLSSettings attempts to parse the raw listener TLS settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseTLSSettings() error { + if err := parseAndClearBool(&l.TLSDisableRaw, &l.TLSDisable); err != nil { + return fmt.Errorf("invalid value for tls_disable: %w", err) + } + + if l.TLSCipherSuitesRaw != "" { + tlsCipherSuites, err := tlsutil.ParseCiphers(l.TLSCipherSuitesRaw) + if err != nil { + return fmt.Errorf("invalid value for tls_cipher_suites: %w", err) } + l.TLSCipherSuites = tlsCipherSuites + } + + if err := parseAndClearBool(&l.TLSRequireAndVerifyClientCertRaw, &l.TLSRequireAndVerifyClientCert); err != nil { + return fmt.Errorf("invalid value for tls_require_and_verify_client_cert: %w", err) + } + + if err := parseAndClearBool(&l.TLSDisableClientCertsRaw, &l.TLSDisableClientCerts); err != nil { + return fmt.Errorf("invalid value for tls_disable_client_certs: %w", err) + } + + // Clear raw values after successful parsing. + l.TLSCipherSuitesRaw = "" + + return nil +} - // Profiling - { - if l.Profiling.UnauthenticatedPProfAccessRaw != nil { - if l.Profiling.UnauthenticatedPProfAccess, err = parseutil.ParseBool(l.Profiling.UnauthenticatedPProfAccessRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for profiling.unauthenticated_pprof_access: %w", err), fmt.Sprintf("listeners.%d", i)) - } +// parseHTTPHeaderSettings attempts to parse the raw listener HTTP header settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseHTTPHeaderSettings() error { + // Custom response headers are only supported by TCP listeners. + // Clear raw data and return early if it was something else. + if l.Type != TCP { + l.CustomResponseHeadersRaw = nil + return nil + } + + // if CustomResponseHeadersRaw is nil, we still need to set the default headers + customHeadersMap, err := ParseCustomResponseHeaders(l.CustomResponseHeadersRaw) + if err != nil { + return fmt.Errorf("failed to parse custom_response_headers: %w", err) + } - l.Profiling.UnauthenticatedPProfAccessRaw = nil - } + l.CustomResponseHeaders = customHeadersMap + l.CustomResponseHeadersRaw = nil + + return nil +} + +// parseHTTPTimeoutSettings attempts to parse the raw listener HTTP timeout settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseHTTPTimeoutSettings() error { + if err := parseAndClearDurationSecond(&l.HTTPReadTimeoutRaw, &l.HTTPReadTimeout); err != nil { + return fmt.Errorf("error parsing http_read_timeout: %w", err) + } + + if err := parseAndClearDurationSecond(&l.HTTPReadHeaderTimeoutRaw, &l.HTTPReadHeaderTimeout); err != nil { + return fmt.Errorf("error parsing http_read_header_timeout: %w", err) + } + + if err := parseAndClearDurationSecond(&l.HTTPWriteTimeoutRaw, &l.HTTPWriteTimeout); err != nil { + return fmt.Errorf("error parsing http_write_timeout: %w", err) + } + + if err := parseAndClearDurationSecond(&l.HTTPIdleTimeoutRaw, &l.HTTPIdleTimeout); err != nil { + return fmt.Errorf("error parsing http_idle_timeout: %w", err) + } + + return nil +} + +// parseProxySettings attempts to parse the raw listener proxy settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseProxySettings() error { + var err error + + if l.ProxyProtocolAuthorizedAddrsRaw != nil { + l.ProxyProtocolAuthorizedAddrs, err = parseutil.ParseAddrs(l.ProxyProtocolAuthorizedAddrsRaw) + if err != nil { + return fmt.Errorf("error parsing proxy_protocol_authorized_addrs: %w", err) } + } + + // Validation/sanity check on allowed settings for behavior. + switch l.ProxyProtocolBehavior { + case "allow_authorized", "deny_unauthorized", "use_always", "": + // Ignore these cases, they're all valid values. + // In the case of 'allow_authorized' and 'deny_unauthorized', we don't need + // to check how many addresses we have in ProxyProtocolAuthorizedAddrs + // as parseutil.ParseAddrs returns "one or more addresses" (or an error) + // so we'd have returned earlier. + default: + return fmt.Errorf("unsupported value supplied for proxy_protocol_behavior: %q", l.ProxyProtocolBehavior) + } - // InFlight Request logging - { - if l.InFlightRequestLogging.UnauthenticatedInFlightAccessRaw != nil { - if l.InFlightRequestLogging.UnauthenticatedInFlightAccess, err = parseutil.ParseBool(l.InFlightRequestLogging.UnauthenticatedInFlightAccessRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for inflight_requests_logging.unauthenticated_in_flight_requests_access: %w", err), fmt.Sprintf("listeners.%d", i)) - } + // Clear raw values after successful parsing. + l.ProxyProtocolAuthorizedAddrsRaw = nil + + return nil +} - l.InFlightRequestLogging.UnauthenticatedInFlightAccessRaw = "" - } +// parseForwardedForSettings attempts to parse the raw listener x-forwarded-for settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseForwardedForSettings() error { + var err error + + if l.XForwardedForAuthorizedAddrsRaw != nil { + if l.XForwardedForAuthorizedAddrs, err = parseutil.ParseAddrs(l.XForwardedForAuthorizedAddrsRaw); err != nil { + return fmt.Errorf("error parsing x_forwarded_for_authorized_addrs: %w", err) } + } - // CORS - { - if l.CorsEnabledRaw != nil { - if l.CorsEnabled, err = parseutil.ParseBool(l.CorsEnabledRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for cors_enabled: %w", err), fmt.Sprintf("listeners.%d", i)) - } - - l.CorsEnabledRaw = nil - } - - if strutil.StrListContains(l.CorsAllowedOrigins, "*") && len(l.CorsAllowedOrigins) > 1 { - return multierror.Prefix(errors.New("cors_allowed_origins must only contain a wildcard or only non-wildcard values"), fmt.Sprintf("listeners.%d", i)) - } - - if len(l.CorsAllowedHeadersRaw) > 0 { - for _, header := range l.CorsAllowedHeadersRaw { - l.CorsAllowedHeaders = append(l.CorsAllowedHeaders, textproto.CanonicalMIMEHeaderKey(header)) - } - } + if l.XForwardedForHopSkipsRaw != nil { + if l.XForwardedForHopSkips, err = parseutil.ParseInt(l.XForwardedForHopSkipsRaw); err != nil { + return fmt.Errorf("error parsing x_forwarded_for_hop_skips: %w", err) } - // HTTP Headers - { - // if CustomResponseHeadersRaw is nil, we still need to set the default headers - customHeadersMap, err := ParseCustomResponseHeaders(l.CustomResponseHeadersRaw) - if err != nil { - return multierror.Prefix(fmt.Errorf("failed to parse custom_response_headers: %w", err), fmt.Sprintf("listeners.%d", i)) - } - l.CustomResponseHeaders = customHeadersMap - l.CustomResponseHeadersRaw = nil + if l.XForwardedForHopSkips < 0 { + return fmt.Errorf("x_forwarded_for_hop_skips cannot be negative but set to %d", l.XForwardedForHopSkips) } - result.Listeners = append(result.Listeners, &l) + l.XForwardedForHopSkipsRaw = nil + } + + if err := parseAndClearBool(&l.XForwardedForRejectNotAuthorizedRaw, &l.XForwardedForRejectNotAuthorized); err != nil { + return fmt.Errorf("invalid value for x_forwarded_for_reject_not_authorized: %w", err) + } + + if err := parseAndClearBool(&l.XForwardedForRejectNotPresentRaw, &l.XForwardedForRejectNotPresent); err != nil { + return fmt.Errorf("invalid value for x_forwarded_for_reject_not_present: %w", err) } + // Clear raw values after successful parsing. + l.XForwardedForAuthorizedAddrsRaw = nil + return nil } -// ParseSingleIPTemplate is used as a helper function to parse out a single IP -// address from a config parameter. -// If the input doesn't appear to contain the 'template' format, -// it will return the specified input unchanged. -func ParseSingleIPTemplate(ipTmpl string) (string, error) { - r := regexp.MustCompile("{{.*?}}") - if !r.MatchString(ipTmpl) { - return ipTmpl, nil +// parseTelemetrySettings attempts to parse the raw listener telemetry settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseTelemetrySettings() error { + if err := parseAndClearBool(&l.Telemetry.UnauthenticatedMetricsAccessRaw, &l.Telemetry.UnauthenticatedMetricsAccess); err != nil { + return fmt.Errorf("invalid value for telemetry.unauthenticated_metrics_access: %w", err) } - out, err := template.Parse(ipTmpl) - if err != nil { - return "", fmt.Errorf("unable to parse address template %q: %v", ipTmpl, err) + return nil +} + +// parseProfilingSettings attempts to parse the raw listener profiling settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseProfilingSettings() error { + if err := parseAndClearBool(&l.Profiling.UnauthenticatedPProfAccessRaw, &l.Profiling.UnauthenticatedPProfAccess); err != nil { + return fmt.Errorf("invalid value for profiling.unauthenticated_pprof_access: %w", err) } - ips := strings.Split(out, " ") - switch len(ips) { - case 0: - return "", errors.New("no addresses found, please configure one") - case 1: - return strings.TrimSpace(ips[0]), nil - default: - return "", fmt.Errorf("multiple addresses found (%q), please configure one", out) + return nil +} + +// parseInFlightRequestSettings attempts to parse the raw listener in-flight request logging settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseInFlightRequestSettings() error { + if err := parseAndClearBool(&l.InFlightRequestLogging.UnauthenticatedInFlightAccessRaw, &l.InFlightRequestLogging.UnauthenticatedInFlightAccess); err != nil { + return fmt.Errorf("invalid value for inflight_requests_logging.unauthenticated_in_flight_requests_access: %w", err) } + + return nil +} + +// parseCORSSettings attempts to parse the raw listener CORS settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseCORSSettings() error { + if err := parseAndClearBool(&l.CorsEnabledRaw, &l.CorsEnabled); err != nil { + return fmt.Errorf("invalid value for cors_enabled: %w", err) + } + + if strutil.StrListContains(l.CorsAllowedOrigins, "*") && len(l.CorsAllowedOrigins) > 1 { + return errors.New("cors_allowed_origins must only contain a wildcard or only non-wildcard values") + } + + if len(l.CorsAllowedHeadersRaw) > 0 { + for _, header := range l.CorsAllowedHeadersRaw { + l.CorsAllowedHeaders = append(l.CorsAllowedHeaders, textproto.CanonicalMIMEHeaderKey(header)) + } + } + + l.CorsAllowedHeadersRaw = nil + + return nil +} + +// parseRedactionSettings attempts to parse the raw listener redaction settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseRedactionSettings() error { + // Redaction is only supported on TCP listeners. + // Clear raw data and return early if it was something else. + if l.Type != TCP { + l.RedactAddressesRaw = nil + l.RedactClusterNameRaw = nil + l.RedactVersionRaw = nil + + return nil + } + + var err error + + if l.RedactAddressesRaw != nil { + if l.RedactAddresses, err = parseutil.ParseBool(l.RedactAddressesRaw); err != nil { + return fmt.Errorf("invalid value for redact_addresses: %w", err) + } + } + if l.RedactClusterNameRaw != nil { + if l.RedactClusterName, err = parseutil.ParseBool(l.RedactClusterNameRaw); err != nil { + return fmt.Errorf("invalid value for redact_cluster_name: %w", err) + } + } + if l.RedactVersionRaw != nil { + if l.RedactVersion, err = parseutil.ParseBool(l.RedactVersionRaw); err != nil { + return fmt.Errorf("invalid value for redact_version: %w", err) + } + } + + l.RedactAddressesRaw = nil + l.RedactClusterNameRaw = nil + l.RedactVersionRaw = nil + + return nil } diff --git a/internalshared/configutil/listener_test.go b/internalshared/configutil/listener_test.go index da7d76596b6f..51d0c094ed3b 100644 --- a/internalshared/configutil/listener_test.go +++ b/internalshared/configutil/listener_test.go @@ -1,52 +1,1380 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package configutil import ( - "fmt" + "crypto/tls" "testing" + "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestParseSingleIPTemplate(t *testing.T) { - type args struct { - ipTmpl string +// TestListener_ParseSingleIPTemplate exercises the ParseSingleIPTemplate function to +// ensure that we only attempt to parse templates when the input contains a +// template placeholder (see: go-sockaddr/template). +func TestListener_ParseSingleIPTemplate(t *testing.T) { + tests := map[string]struct { + arg string + want string + isErrorExpected bool + errorMessage string + }{ + "test https addr": { + arg: "https://vaultproject.io:8200", + want: "https://vaultproject.io:8200", + isErrorExpected: false, + }, + "test invalid template func": { + arg: "{{ FooBar }}", + want: "", + isErrorExpected: true, + errorMessage: "unable to parse address template", + }, + "test partial template": { + arg: "{{FooBar", + want: "{{FooBar", + isErrorExpected: false, + }, + } + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + got, err := ParseSingleIPTemplate(tc.arg) + + if tc.isErrorExpected { + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + } else { + require.NoError(t, err) + } + + require.Equal(t, tc.want, got) + }) + } +} + +// TestListener_parseType exercises the listener receiver parseType. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseType(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + inputType string + inputFallback string + expectedValue string + isErrorExpected bool + errorMessage string + }{ + "empty-all": { + inputType: "", + inputFallback: "", + isErrorExpected: true, + errorMessage: "listener type must be specified", + }, + "bad-type": { + inputType: "foo", + isErrorExpected: true, + errorMessage: "unsupported listener type", + }, + "bad-fallback": { + inputType: "", + inputFallback: "foo", + isErrorExpected: true, + errorMessage: "unsupported listener type", + }, + "tcp-type-lower": { + inputType: "tcp", + expectedValue: "tcp", + isErrorExpected: false, + }, + "tcp-type-upper": { + inputType: "TCP", + expectedValue: "tcp", + isErrorExpected: false, + }, + "tcp-type-mixed": { + inputType: "tCp", + expectedValue: "tcp", + isErrorExpected: false, + }, + "tcp-fallback-lower": { + inputType: "", + inputFallback: "tcp", + expectedValue: "tcp", + isErrorExpected: false, + }, + "tcp-fallback-upper": { + inputType: "", + inputFallback: "TCP", + expectedValue: "tcp", + isErrorExpected: false, + }, + "tcp-fallback-mixed": { + inputType: "", + inputFallback: "tCp", + expectedValue: "tcp", + isErrorExpected: false, + }, + "unix-type-lower": { + inputType: "unix", + expectedValue: "unix", + isErrorExpected: false, + }, + "unix-type-upper": { + inputType: "UNIX", + expectedValue: "unix", + isErrorExpected: false, + }, + "unix-type-mixed": { + inputType: "uNiX", + expectedValue: "unix", + isErrorExpected: false, + }, + "unix-fallback-lower": { + inputType: "", + inputFallback: "unix", + expectedValue: "unix", + isErrorExpected: false, + }, + "unix-fallback-upper": { + inputType: "", + inputFallback: "UNIX", + expectedValue: "unix", + isErrorExpected: false, + }, + "unix-fallback-mixed": { + inputType: "", + inputFallback: "uNiX", + expectedValue: "unix", + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + l := &Listener{Type: ListenerType(tc.inputType)} + err := l.parseType(tc.inputFallback) + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + require.NoError(t, err) + require.Equal(t, tc.expectedValue, l.Type.String()) + } + }) + } +} + +// TestListener_parseRequestSettings exercises the listener receiver parseRequestSettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseRequestSettings(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + rawMaxRequestSize any + expectedMaxRequestSize int64 + rawMaxRequestDuration any + expectedDuration time.Duration + rawRequireRequestHeader any + expectedRequireRequestHeader bool + rawDisableRequestLimiter any + expectedDisableRequestLimiter bool + isErrorExpected bool + errorMessage string + }{ + "nil": { + isErrorExpected: false, + }, + "max-request-size-bad": { + rawMaxRequestSize: "juan", + isErrorExpected: true, + errorMessage: "error parsing max_request_size", + }, + "max-request-size-good": { + rawMaxRequestSize: "5", + expectedMaxRequestSize: 5, + isErrorExpected: false, + }, + "max-request-duration-bad": { + rawMaxRequestDuration: "juan", + isErrorExpected: true, + errorMessage: "error parsing max_request_duration", + }, + "max-request-duration-good": { + rawMaxRequestDuration: "30s", + expectedDuration: 30 * time.Second, + isErrorExpected: false, + }, + "require-request-header-bad": { + rawRequireRequestHeader: "juan", + expectedRequireRequestHeader: false, + isErrorExpected: true, + errorMessage: "invalid value for require_request_header", + }, + "require-request-header-good": { + rawRequireRequestHeader: "true", + expectedRequireRequestHeader: true, + isErrorExpected: false, + }, + "disable-request-limiter-bad": { + rawDisableRequestLimiter: "badvalue", + expectedDisableRequestLimiter: false, + isErrorExpected: true, + errorMessage: "invalid value for disable_request_limiter", + }, + "disable-request-limiter-good": { + rawDisableRequestLimiter: "true", + expectedDisableRequestLimiter: true, + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + MaxRequestSizeRaw: tc.rawMaxRequestSize, + MaxRequestDurationRaw: tc.rawMaxRequestDuration, + RequireRequestHeaderRaw: tc.rawRequireRequestHeader, + DisableRequestLimiterRaw: tc.rawDisableRequestLimiter, + } + + err := l.parseRequestSettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Equal(t, tc.expectedMaxRequestSize, l.MaxRequestSize) + require.Equal(t, tc.expectedDuration, l.MaxRequestDuration) + require.Equal(t, tc.expectedRequireRequestHeader, l.RequireRequestHeader) + require.Equal(t, tc.expectedDisableRequestLimiter, l.DisableRequestLimiter) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.MaxRequestSizeRaw) + require.Nil(t, l.MaxRequestDurationRaw) + require.Nil(t, l.RequireRequestHeaderRaw) + require.Nil(t, l.DisableRequestLimiterRaw) + } + }) + } +} + +// TestListener_parseTLSSettings exercises the listener receiver parseTLSSettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseTLSSettings(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + rawTLSDisable any + expectedTLSDisable bool + rawTLSCipherSuites string + expectedTLSCipherSuites []uint16 + rawTLSRequireAndVerifyClientCert any + expectedTLSRequireAndVerifyClientCert bool + rawTLSDisableClientCerts any + expectedTLSDisableClientCerts bool + isErrorExpected bool + errorMessage string + }{ + "nil": { + isErrorExpected: false, + }, + "tls-disable-bad": { + rawTLSDisable: "juan", + isErrorExpected: true, + errorMessage: "invalid value for tls_disable", + }, + "tls-disable-good": { + rawTLSDisable: "true", + expectedTLSDisable: true, + isErrorExpected: false, + }, + "tls-cipher-suites-bad": { + rawTLSCipherSuites: "juan", + isErrorExpected: true, + errorMessage: "invalid value for tls_cipher_suites", + }, + "tls-cipher-suites-good": { + rawTLSCipherSuites: "TLS_RSA_WITH_RC4_128_SHA", + expectedTLSCipherSuites: []uint16{tls.TLS_RSA_WITH_RC4_128_SHA}, + isErrorExpected: false, + }, + "tls-require-and-verify-client-cert-bad": { + rawTLSRequireAndVerifyClientCert: "juan", + isErrorExpected: true, + errorMessage: "invalid value for tls_require_and_verify_client_cert", + }, + "tls-require-and-verify-client-cert-good": { + rawTLSRequireAndVerifyClientCert: "true", + expectedTLSRequireAndVerifyClientCert: true, + isErrorExpected: false, + }, + "tls-disable-client-certs-bad": { + rawTLSDisableClientCerts: "juan", + isErrorExpected: true, + errorMessage: "invalid value for tls_disable_client_certs", + }, + "tls-disable-client-certs-good": { + rawTLSDisableClientCerts: "true", + expectedTLSDisableClientCerts: true, + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + TLSDisableRaw: tc.rawTLSDisable, + TLSCipherSuitesRaw: tc.rawTLSCipherSuites, + TLSRequireAndVerifyClientCertRaw: tc.rawTLSRequireAndVerifyClientCert, + TLSDisableClientCertsRaw: tc.rawTLSDisableClientCerts, + } + + err := l.parseTLSSettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Equal(t, tc.expectedTLSDisable, l.TLSDisable) + require.Equal(t, tc.expectedTLSCipherSuites, l.TLSCipherSuites) + require.Equal(t, tc.expectedTLSRequireAndVerifyClientCert, l.TLSRequireAndVerifyClientCert) + require.Equal(t, tc.expectedTLSDisableClientCerts, l.TLSDisableClientCerts) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.TLSDisableRaw) + require.Empty(t, l.TLSCipherSuitesRaw) + require.Nil(t, l.TLSRequireAndVerifyClientCertRaw) + require.Nil(t, l.TLSDisableClientCertsRaw) + } + }) + } +} + +// TestListener_parseHTTPTimeoutSettings exercises the listener receiver parseHTTPTimeoutSettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseHTTPTimeoutSettings(t *testing.T) { + tests := map[string]struct { + rawHTTPReadTimeout any + expectedHTTPReadTimeout time.Duration + rawHTTPReadHeaderTimeout any + expectedHTTPReadHeaderTimeout time.Duration + rawHTTPWriteTimeout any + expectedHTTPWriteTimeout time.Duration + rawHTTPIdleTimeout any + expectedHTTPIdleTimeout time.Duration + isErrorExpected bool + errorMessage string + }{ + "nil": { + isErrorExpected: false, + }, + "read-timeout-bad": { + rawHTTPReadTimeout: "juan", + isErrorExpected: true, + errorMessage: "error parsing http_read_timeout", + }, + "read-timeout-good": { + rawHTTPReadTimeout: "30s", + expectedHTTPReadTimeout: 30 * time.Second, + isErrorExpected: false, + }, + "read-header-timeout-bad": { + rawHTTPReadHeaderTimeout: "juan", + isErrorExpected: true, + errorMessage: "error parsing http_read_header_timeout", + }, + "read-header-timeout-good": { + rawHTTPReadHeaderTimeout: "30s", + expectedHTTPReadHeaderTimeout: 30 * time.Second, + isErrorExpected: false, + }, + "write-timeout-bad": { + rawHTTPWriteTimeout: "juan", + isErrorExpected: true, + errorMessage: "error parsing http_write_timeout", + }, + "write-timeout-good": { + rawHTTPWriteTimeout: "30s", + expectedHTTPWriteTimeout: 30 * time.Second, + isErrorExpected: false, + }, + "idle-timeout-bad": { + rawHTTPIdleTimeout: "juan", + isErrorExpected: true, + errorMessage: "error parsing http_idle_timeout", + }, + "idle-timeout-good": { + rawHTTPIdleTimeout: "30s", + expectedHTTPIdleTimeout: 30 * time.Second, + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + HTTPReadTimeoutRaw: tc.rawHTTPReadTimeout, + HTTPReadHeaderTimeoutRaw: tc.rawHTTPReadHeaderTimeout, + HTTPWriteTimeoutRaw: tc.rawHTTPWriteTimeout, + HTTPIdleTimeoutRaw: tc.rawHTTPIdleTimeout, + } + + err := l.parseHTTPTimeoutSettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Equal(t, tc.expectedHTTPReadTimeout, l.HTTPReadTimeout) + require.Equal(t, tc.expectedHTTPReadHeaderTimeout, l.HTTPReadHeaderTimeout) + require.Equal(t, tc.expectedHTTPWriteTimeout, l.HTTPWriteTimeout) + require.Equal(t, tc.expectedHTTPIdleTimeout, l.HTTPIdleTimeout) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.HTTPReadTimeoutRaw) + require.Nil(t, l.HTTPReadHeaderTimeoutRaw) + require.Nil(t, l.HTTPWriteTimeoutRaw) + require.Nil(t, l.HTTPIdleTimeoutRaw) + } + }) + } +} + +// TestListener_parseProxySettings exercises the listener receiver parseProxySettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseProxySettings(t *testing.T) { + tests := map[string]struct { + rawProxyProtocolAuthorizedAddrs any + expectedNumAddrs int + proxyBehavior string + isErrorExpected bool + errorMessage string + }{ + "nil": { + isErrorExpected: false, + }, + "bad-addrs": { + rawProxyProtocolAuthorizedAddrs: "juan", + isErrorExpected: true, + errorMessage: "error parsing proxy_protocol_authorized_addrs", + }, + "good-addrs": { + rawProxyProtocolAuthorizedAddrs: "10.0.0.1,10.0.2.1", + expectedNumAddrs: 2, + proxyBehavior: "", + isErrorExpected: false, + }, + "behavior-bad": { + rawProxyProtocolAuthorizedAddrs: "10.0.0.1,10.0.2.1", + proxyBehavior: "juan", + isErrorExpected: true, + errorMessage: "unsupported value supplied for proxy_protocol_behavior", + }, + "behavior-use-always": { + rawProxyProtocolAuthorizedAddrs: "10.0.0.1,10.0.2.1", + expectedNumAddrs: 2, + proxyBehavior: "use_always", + isErrorExpected: false, + }, + "behavior-empty": { + rawProxyProtocolAuthorizedAddrs: "10.0.0.1,10.0.2.1", + expectedNumAddrs: 2, + proxyBehavior: "", + isErrorExpected: false, + }, + "behavior-allow": { + rawProxyProtocolAuthorizedAddrs: "10.0.0.1,10.0.2.1", + expectedNumAddrs: 2, + proxyBehavior: "allow_authorized", + isErrorExpected: false, + }, + "behavior-deny": { + rawProxyProtocolAuthorizedAddrs: "10.0.0.1,10.0.2.1", + expectedNumAddrs: 2, + proxyBehavior: "deny_unauthorized", + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + ProxyProtocolAuthorizedAddrsRaw: tc.rawProxyProtocolAuthorizedAddrs, + ProxyProtocolBehavior: tc.proxyBehavior, + } + + err := l.parseProxySettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Len(t, l.ProxyProtocolAuthorizedAddrs, tc.expectedNumAddrs) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.ProxyProtocolAuthorizedAddrsRaw) + } + }) + } +} + +// TestListener_parseForwardedForSettings exercises the listener receiver parseForwardedForSettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseForwardedForSettings(t *testing.T) { + tests := map[string]struct { + rawAuthorizedAddrs any + expectedNumAddrs int + rawHopSkips any + expectedHopSkips int64 + rawRejectNotAuthorized any + expectedRejectNotAuthorized bool + rawRejectNotPresent any + expectedRejectNotPresent bool + isErrorExpected bool + errorMessage string + }{ + "nil": { + isErrorExpected: false, + }, + "authorized-addrs-bad": { + rawAuthorizedAddrs: "juan", + isErrorExpected: true, + errorMessage: "error parsing x_forwarded_for_authorized_addrs", + }, + "authorized-addrs-good": { + rawAuthorizedAddrs: "10.0.0.1,10.0.2.1", + expectedNumAddrs: 2, + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + XForwardedForAuthorizedAddrsRaw: tc.rawAuthorizedAddrs, + XForwardedForHopSkipsRaw: tc.rawHopSkips, + XForwardedForRejectNotAuthorizedRaw: tc.rawRejectNotAuthorized, + XForwardedForRejectNotPresentRaw: tc.rawRejectNotPresent, + } + + err := l.parseForwardedForSettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + + require.Len(t, l.XForwardedForAuthorizedAddrs, tc.expectedNumAddrs) + require.Equal(t, tc.expectedHopSkips, l.XForwardedForHopSkips) + require.Equal(t, tc.expectedRejectNotAuthorized, l.XForwardedForRejectNotAuthorized) + require.Equal(t, tc.expectedRejectNotPresent, l.XForwardedForRejectNotPresent) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.XForwardedForAuthorizedAddrsRaw) + require.Nil(t, l.XForwardedForHopSkipsRaw) + require.Nil(t, l.XForwardedForRejectNotAuthorizedRaw) + require.Nil(t, l.XForwardedForRejectNotPresentRaw) + } + }) + } +} + +// TestListener_parseTelemetrySettings exercises the listener receiver parseTelemetrySettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseTelemetrySettings(t *testing.T) { + tests := map[string]struct { + rawUnauthenticatedMetricsAccess any + expectedUnauthenticatedMetricsAccess bool + isErrorExpected bool + errorMessage string + }{ + "nil": { + isErrorExpected: false, + }, + "unauth-bad": { + rawUnauthenticatedMetricsAccess: "juan", + isErrorExpected: true, + errorMessage: "invalid value for telemetry.unauthenticated_metrics_access", + }, + "unauth-good": { + rawUnauthenticatedMetricsAccess: "true", + expectedUnauthenticatedMetricsAccess: true, + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + Telemetry: ListenerTelemetry{ + UnauthenticatedMetricsAccessRaw: tc.rawUnauthenticatedMetricsAccess, + }, + } + + err := l.parseTelemetrySettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Equal(t, tc.expectedUnauthenticatedMetricsAccess, l.Telemetry.UnauthenticatedMetricsAccess) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.Telemetry.UnauthenticatedMetricsAccessRaw) + } + }) + } +} + +// TestListener_parseProfilingSettings exercises the listener receiver parseProfilingSettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseProfilingSettings(t *testing.T) { + tests := map[string]struct { + rawUnauthenticatedPProfAccess any + expectedUnauthenticatedPProfAccess bool + isErrorExpected bool + errorMessage string + }{ + "nil": { + isErrorExpected: false, + }, + "bad": { + rawUnauthenticatedPProfAccess: "juan", + isErrorExpected: true, + errorMessage: "invalid value for profiling.unauthenticated_pprof_access", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + Profiling: ListenerProfiling{ + UnauthenticatedPProfAccessRaw: tc.rawUnauthenticatedPProfAccess, + }, + } + + err := l.parseProfilingSettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Equal(t, tc.expectedUnauthenticatedPProfAccess, l.Profiling.UnauthenticatedPProfAccess) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.Profiling.UnauthenticatedPProfAccessRaw) + } + }) + } +} + +// TestListener_parseInFlightRequestSettings exercises the listener receiver parseInFlightRequestSettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseInFlightRequestSettings(t *testing.T) { + tests := map[string]struct { + rawUnauthenticatedInFlightAccess any + expectedUnauthenticatedInFlightAccess bool + isErrorExpected bool + errorMessage string + }{ + "nil": { + isErrorExpected: false, + }, + "bad": { + rawUnauthenticatedInFlightAccess: "juan", + isErrorExpected: true, + errorMessage: "invalid value for inflight_requests_logging.unauthenticated_in_flight_requests_access", + }, + "good": { + rawUnauthenticatedInFlightAccess: "true", + expectedUnauthenticatedInFlightAccess: true, + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + InFlightRequestLogging: ListenerInFlightRequestLogging{ + UnauthenticatedInFlightAccessRaw: tc.rawUnauthenticatedInFlightAccess, + }, + } + + err := l.parseInFlightRequestSettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Equal(t, tc.expectedUnauthenticatedInFlightAccess, l.InFlightRequestLogging.UnauthenticatedInFlightAccess) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.InFlightRequestLogging.UnauthenticatedInFlightAccessRaw) + } + }) + } +} + +// TestListener_parseCORSSettings exercises the listener receiver parseCORSSettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseCORSSettings(t *testing.T) { + tests := map[string]struct { + rawCorsEnabled any + rawCorsAllowedHeaders []string + corsAllowedOrigins []string + expectedCorsEnabled bool + expectedNumCorsAllowedHeaders int + isErrorExpected bool + errorMessage string + }{ + "nil": { + isErrorExpected: false, + }, + "cors-enabled-bad": { + rawCorsEnabled: "juan", + expectedCorsEnabled: false, + isErrorExpected: true, + errorMessage: "invalid value for cors_enabled", + }, + "cors-enabled-good": { + rawCorsEnabled: "true", + expectedCorsEnabled: true, + isErrorExpected: false, + }, + "cors-allowed-origins-single-wildcard": { + corsAllowedOrigins: []string{"*"}, + isErrorExpected: false, + }, + "cors-allowed-origins-multi-wildcard": { + corsAllowedOrigins: []string{"*", "hashicorp.com"}, + isErrorExpected: true, + errorMessage: "cors_allowed_origins must only contain a wildcard or only non-wildcard values", + }, + "cors-allowed-headers-anything": { + rawCorsAllowedHeaders: []string{"foo", "bar"}, + expectedNumCorsAllowedHeaders: 2, + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + CorsEnabledRaw: tc.rawCorsEnabled, + CorsAllowedHeadersRaw: tc.rawCorsAllowedHeaders, + CorsAllowedOrigins: tc.corsAllowedOrigins, + } + + err := l.parseCORSSettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Equal(t, tc.expectedCorsEnabled, l.CorsEnabled) + require.Len(t, l.CorsAllowedHeaders, tc.expectedNumCorsAllowedHeaders) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.CorsEnabledRaw) + require.Nil(t, l.CorsAllowedHeadersRaw) + } + }) + } +} + +// TestListener_parseHTTPHeaderSettings exercises the listener receiver parseHTTPHeaderSettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseHTTPHeaderSettings(t *testing.T) { + tests := map[string]struct { + listenerType ListenerType + rawCustomResponseHeaders []map[string]any + expectedNumCustomResponseHeaders int + isErrorExpected bool + errorMessage string + }{ + "nil": { + listenerType: TCP, + isErrorExpected: false, + expectedNumCustomResponseHeaders: 1, // default: Strict-Transport-Security + }, + "custom-headers-bad": { + listenerType: TCP, + rawCustomResponseHeaders: []map[string]any{ + {"juan": false}, + }, + isErrorExpected: true, + errorMessage: "failed to parse custom_response_headers", + }, + "custom-headers-good": { + listenerType: TCP, + rawCustomResponseHeaders: []map[string]any{ + { + "2xx": []map[string]any{ + {"X-Custom-Header": []any{"Custom Header Value 1", "Custom Header Value 2"}}, + }, + }, + }, + expectedNumCustomResponseHeaders: 2, + isErrorExpected: false, + }, + "unix-no-headers": { + listenerType: Unix, + rawCustomResponseHeaders: []map[string]any{ + { + "2xx": []map[string]any{ + {"X-Custom-Header": []any{"Custom Header Value 1", "Custom Header Value 2"}}, + }, + }, + }, + expectedNumCustomResponseHeaders: 0, + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + Type: tc.listenerType, + CustomResponseHeadersRaw: tc.rawCustomResponseHeaders, + } + + err := l.parseHTTPHeaderSettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Len(t, l.CustomResponseHeaders, tc.expectedNumCustomResponseHeaders) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.CustomResponseHeadersRaw) + } + }) + } +} + +// TestListener_parseChrootNamespaceSettings exercises the listener receiver parseChrootNamespaceSettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseChrootNamespaceSettings(t *testing.T) { + tests := map[string]struct { + rawChrootNamespace any + expectedChrootNamespace string + isErrorExpected bool + errorMessage string + }{ + "nil": { + isErrorExpected: false, + }, + "bad": { + rawChrootNamespace: &Listener{}, // Unsure how we'd ever see this really. + isErrorExpected: true, + errorMessage: "invalid value for chroot_namespace", + }, + "good": { + rawChrootNamespace: "juan", + expectedChrootNamespace: "juan/", + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + ChrootNamespaceRaw: tc.rawChrootNamespace, + } + + err := l.parseChrootNamespaceSettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Equal(t, tc.expectedChrootNamespace, l.ChrootNamespace) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.ChrootNamespaceRaw) + } + }) } - tests := []struct { - name string - arg string - want string - wantErr assert.ErrorAssertionFunc +} + +// TestListener_parseRedactionSettings exercises the listener receiver parseRedactionSettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseRedactionSettings(t *testing.T) { + tests := map[string]struct { + listenerType ListenerType + rawRedactAddresses any + expectedRedactAddresses bool + rawRedactClusterName any + expectedRedactClusterName bool + rawRedactVersion any + expectedRedactVersion bool + isErrorExpected bool + errorMessage string }{ + "missing": { + listenerType: TCP, + isErrorExpected: false, + expectedRedactAddresses: false, + expectedRedactClusterName: false, + expectedRedactVersion: false, + }, + "redact-addresses-bad": { + listenerType: TCP, + rawRedactAddresses: "juan", + isErrorExpected: true, + errorMessage: "invalid value for redact_addresses", + }, + "redact-addresses-good": { + listenerType: TCP, + rawRedactAddresses: "true", + expectedRedactAddresses: true, + isErrorExpected: false, + }, + "redact-cluster-name-bad": { + listenerType: TCP, + rawRedactClusterName: "juan", + isErrorExpected: true, + errorMessage: "invalid value for redact_cluster_name", + }, + "redact-cluster-name-good": { + listenerType: TCP, + rawRedactClusterName: "true", + expectedRedactClusterName: true, + isErrorExpected: false, + }, + "redact-version-bad": { + listenerType: TCP, + rawRedactVersion: "juan", + isErrorExpected: true, + errorMessage: "invalid value for redact_version", + }, + "redact-version-good": { + listenerType: TCP, + rawRedactVersion: "true", + expectedRedactVersion: true, + isErrorExpected: false, + }, + "redact-unix-na": { + listenerType: Unix, + rawRedactAddresses: "true", + expectedRedactAddresses: false, + rawRedactClusterName: "true", + expectedRedactClusterName: false, + rawRedactVersion: "true", + expectedRedactVersion: false, + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + Type: tc.listenerType, + RedactAddressesRaw: tc.rawRedactAddresses, + RedactClusterNameRaw: tc.rawRedactClusterName, + RedactVersionRaw: tc.rawRedactVersion, + } + + err := l.parseRedactionSettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Equal(t, tc.expectedRedactAddresses, l.RedactAddresses) + require.Equal(t, tc.expectedRedactClusterName, l.RedactClusterName) + require.Equal(t, tc.expectedRedactVersion, l.RedactVersion) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.RedactAddressesRaw) + require.Nil(t, l.RedactClusterNameRaw) + require.Nil(t, l.RedactVersionRaw) + } + }) + } +} + +func TestParseAndClearBool(t *testing.T) { + testcases := []struct { + name string + raw interface{} + rawAssertion func(assert.TestingT, any, ...any) bool + expectedParsed bool + errorAssertion func(assert.TestingT, error, ...any) bool + }{ + { + name: "valid-true-as-string", + raw: "true", + rawAssertion: assert.Nil, + expectedParsed: true, + errorAssertion: assert.NoError, + }, + { + name: "valid-false-as-string", + raw: "false", + rawAssertion: assert.Nil, + expectedParsed: false, + errorAssertion: assert.NoError, + }, + { + name: "valid-true-as-bool", + raw: true, + rawAssertion: assert.Nil, + expectedParsed: true, + errorAssertion: assert.NoError, + }, + { + name: "valid-false-as-bool", + raw: false, + rawAssertion: assert.Nil, + expectedParsed: false, + errorAssertion: assert.NoError, + }, + { + name: "valid-true-as-string-mix-case", + raw: "True", + rawAssertion: assert.Nil, + expectedParsed: true, + errorAssertion: assert.NoError, + }, + { + name: "valid-false-as-integer", + raw: 0, + rawAssertion: assert.Nil, + expectedParsed: false, + errorAssertion: assert.NoError, + }, + { + name: "valid-true-as-integer", + raw: 2, + rawAssertion: assert.Nil, + expectedParsed: true, + errorAssertion: assert.NoError, + }, { - name: "test https addr", - arg: "https://vaultproject.io:8200", - want: "https://vaultproject.io:8200", - wantErr: assert.NoError, + name: "valid-true-as-float", + raw: 3.14, + rawAssertion: assert.Nil, + expectedParsed: true, + errorAssertion: assert.NoError, }, { - name: "test invalid template func", - arg: "{{FooBar}}", - want: "", - wantErr: assert.Error, + name: "valid-false-as-float", + raw: 0.0, + rawAssertion: assert.Nil, + expectedParsed: false, + errorAssertion: assert.NoError, }, { - name: "test partial template", - arg: "{{FooBar", - want: "{{FooBar", - wantErr: assert.NoError, + name: "invalid-as-string", + raw: "0.0.0.0:8200", + rawAssertion: assert.NotNil, + errorAssertion: assert.Error, + }, + { + name: "invalid-as-struct", + raw: struct{}{}, + rawAssertion: assert.NotNil, + errorAssertion: assert.Error, + }, + { + name: "not-set", + raw: nil, + rawAssertion: assert.Nil, + errorAssertion: assert.NoError, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := ParseSingleIPTemplate(tt.arg) - if !tt.wantErr(t, err, fmt.Sprintf("ParseSingleIPTemplate(%v)", tt.arg)) { - return - } - assert.Equalf(t, tt.want, got, "ParseSingleIPTemplate(%v)", tt.arg) - }) + for _, testcase := range testcases { + var parsed bool + err := parseAndClearBool(&testcase.raw, &parsed) + + testcase.errorAssertion(t, err, testcase.name) + assert.Equal(t, testcase.expectedParsed, parsed, testcase.name) + testcase.rawAssertion(t, testcase.raw, testcase.name) + } +} + +func TestParseAndClearString(t *testing.T) { + testcases := []struct { + name string + raw any + rawAssertion func(assert.TestingT, any, ...any) bool + expectedParsed string + errorAssertion func(assert.TestingT, error, ...any) bool + }{ + { + name: "valid-empty-string", + raw: "", + rawAssertion: assert.Nil, + expectedParsed: "", + errorAssertion: assert.NoError, + }, + { + name: "valid-some-string", + raw: "blah blah", + rawAssertion: assert.Nil, + expectedParsed: "blah blah", + errorAssertion: assert.NoError, + }, + { + name: "valid-as-integer", + raw: 8, + rawAssertion: assert.Nil, + expectedParsed: "8", + errorAssertion: assert.NoError, + }, + { + name: "valid-as-bool", + raw: true, + rawAssertion: assert.Nil, + expectedParsed: "1", + errorAssertion: assert.NoError, + }, + { + name: "not-set", + raw: nil, + rawAssertion: assert.Nil, + expectedParsed: "", + errorAssertion: assert.NoError, + }, + { + name: "invalid-as-struct", + raw: struct{}{}, + rawAssertion: assert.NotNil, + errorAssertion: assert.Error, + }, + } + for _, testcase := range testcases { + var parsed string + err := parseAndClearString(&testcase.raw, &parsed) + + testcase.errorAssertion(t, err, testcase.name) + assert.Equal(t, testcase.expectedParsed, parsed, testcase.name) + testcase.rawAssertion(t, testcase.raw, testcase.name) + } +} + +func TestParseAndClearInt(t *testing.T) { + testcases := []struct { + name string + raw any + rawAssertion func(assert.TestingT, any, ...any) bool + expectedParsed int64 + errorAssertion func(assert.TestingT, error, ...any) bool + }{ + { + name: "valid-as-int", + raw: 200, + rawAssertion: assert.Nil, + expectedParsed: int64(200), + errorAssertion: assert.NoError, + }, + { + name: "valid-as-string", + raw: "53", + rawAssertion: assert.Nil, + expectedParsed: int64(53), + errorAssertion: assert.NoError, + }, + { + name: "invalid-as-hex-string", + raw: "0xa", + rawAssertion: assert.NotNil, + errorAssertion: assert.Error, + }, + { + name: "not-set", + raw: nil, + rawAssertion: assert.Nil, + errorAssertion: assert.NoError, + }, + } + + for _, testcase := range testcases { + var parsed int64 + err := parseAndClearInt(&testcase.raw, &parsed) + + testcase.errorAssertion(t, err, testcase.name) + assert.Equal(t, testcase.expectedParsed, parsed, testcase.name) + testcase.rawAssertion(t, testcase.raw, testcase.name) + } +} + +func TestParseAndClearDurationSecond(t *testing.T) { + testcases := []struct { + name string + raw any + rawAssertion func(assert.TestingT, any, ...any) bool + expectedParsed time.Duration + errorAssertion func(assert.TestingT, error, ...any) bool + }{ + { + name: "valid-as-string", + raw: "30s", + rawAssertion: assert.Nil, + expectedParsed: time.Duration(30 * time.Second), + errorAssertion: assert.NoError, + }, + { + name: "valid-as-string-more-complex", + raw: "29h24m49s", + rawAssertion: assert.Nil, + expectedParsed: time.Duration((29 * time.Hour) + (24 * time.Minute) + (49 * time.Second)), + errorAssertion: assert.NoError, + }, + { + name: "invalid-as-string-using-days", + raw: "1d3s", + rawAssertion: assert.NotNil, + errorAssertion: assert.Error, + }, + { + name: "valid-as-integer", + raw: 87, + rawAssertion: assert.Nil, + expectedParsed: time.Duration(87 * time.Second), + errorAssertion: assert.NoError, + }, + { + name: "not-set", + raw: nil, + rawAssertion: assert.Nil, + errorAssertion: assert.NoError, + }, + { + name: "invalid-as-struct", + raw: struct{}{}, + rawAssertion: assert.NotNil, + errorAssertion: assert.Error, + }, + } + + for _, testcase := range testcases { + var parsed time.Duration + + err := parseAndClearDurationSecond(&testcase.raw, &parsed) + testcase.errorAssertion(t, err, testcase.name) + assert.Equal(t, testcase.expectedParsed, parsed) + testcase.rawAssertion(t, testcase.raw, testcase.name) } } diff --git a/internalshared/configutil/merge.go b/internalshared/configutil/merge.go index 940e8bfcfb2c..5068be556c24 100644 --- a/internalshared/configutil/merge.go +++ b/internalshared/configutil/merge.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package configutil diff --git a/internalshared/configutil/telemetry.go b/internalshared/configutil/telemetry.go index 270eb493d5ee..7c49fce00917 100644 --- a/internalshared/configutil/telemetry.go +++ b/internalshared/configutil/telemetry.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package configutil @@ -9,8 +9,6 @@ import ( "fmt" "time" - "github.com/hashicorp/go-secure-stdlib/parseutil" - monitoring "cloud.google.com/go/monitoring/apiv3" "github.com/armon/go-metrics" "github.com/armon/go-metrics/circonus" @@ -18,11 +16,13 @@ import ( "github.com/armon/go-metrics/prometheus" stackdriver "github.com/google/go-metrics-stackdriver" stackdrivervault "github.com/google/go-metrics-stackdriver/vault" + "github.com/hashicorp/cli" "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/vault/helper/metricsutil" - "github.com/mitchellh/cli" + "github.com/hashicorp/vault/sdk/helper/metricregistry" "google.golang.org/api/option" ) @@ -162,6 +162,10 @@ type Telemetry struct { // PrefixFilter is a list of filter rules to apply for allowing // or blocking metrics by prefix. PrefixFilter []string `hcl:"prefix_filter"` + + // Whether or not telemetry should include the mount point in the rollback + // metrics + RollbackMetricsIncludeMountPoint bool `hcl:"add_mount_point_rollback_metrics"` } func (t *Telemetry) Validate(source string) []ConfigError { @@ -286,6 +290,10 @@ func SetupTelemetry(opts *SetupTelemetryOpts) (*metrics.InmemSink, *metricsutil. Expiration: opts.Config.PrometheusRetentionTime, } + // Merge in explicit metric definitions so Prometheus always reports those + // metrics. + metricregistry.MergeDefinitions(&prometheusOpts) + sink, err := prometheus.NewPrometheusSinkFrom(prometheusOpts) if err != nil { return nil, nil, false, err @@ -402,6 +410,7 @@ func SetupTelemetry(opts *SetupTelemetryOpts) (*metrics.InmemSink, *metricsutil. wrapper.TelemetryConsts.LeaseMetricsEpsilon = opts.Config.LeaseMetricsEpsilon wrapper.TelemetryConsts.LeaseMetricsNameSpaceLabels = opts.Config.LeaseMetricsNameSpaceLabels wrapper.TelemetryConsts.NumLeaseMetricsTimeBuckets = opts.Config.NumLeaseMetricsTimeBuckets + wrapper.TelemetryConsts.RollbackMetricsIncludeMountPoint = opts.Config.RollbackMetricsIncludeMountPoint // Parse the metric filters telemetryAllowedPrefixes, telemetryBlockedPrefixes, err := parsePrefixFilter(opts.Config.PrefixFilter) diff --git a/internalshared/configutil/telemetry_test.go b/internalshared/configutil/telemetry_test.go index aaeb808171ab..285278eeaeba 100644 --- a/internalshared/configutil/telemetry_test.go +++ b/internalshared/configutil/telemetry_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package configutil diff --git a/internalshared/configutil/userlockout.go b/internalshared/configutil/userlockout.go index df76308ddb5a..68e6bc4a4856 100644 --- a/internalshared/configutil/userlockout.go +++ b/internalshared/configutil/userlockout.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package configutil @@ -117,7 +117,7 @@ func ParseUserLockouts(result *SharedConfig, list *ast.ObjectList) error { // we set values for these fields with defaults // The issue with not being able to use non-raw entries is because of fields lockout threshold // and disable lockout. We cannot differentiate using non-raw entries if the user configured these fields - // with values (0 and false) or if the the user did not configure these values in config file at all. + // with values (0 and false) or if the user did not configure these values in config file at all. // The raw fields are set to nil after setting missing values in setNilValuesForRawUserLockoutFields function userLockoutsMap = setMissingUserLockoutValuesInMap(userLockoutsMap) for _, userLockoutValues := range userLockoutsMap { @@ -147,7 +147,7 @@ func setUserLockoutValueAllInMap(userLockoutAll *UserLockout) *UserLockout { return setNilValuesForRawUserLockoutFields(userLockoutAll) } -// setDefaultUserLockoutValuesInMap sets missing user lockout fields for auth methods +// setMissingUserLockoutValuesInMap sets missing user lockout fields for auth methods // with default values (from key "all") that are not configured using config file func setMissingUserLockoutValuesInMap(userLockoutsMap map[string]*UserLockout) map[string]*UserLockout { // set values for "all" key with default values for "all" user lockout fields that are not configured diff --git a/internalshared/configutil/userlockout_test.go b/internalshared/configutil/userlockout_test.go index db05441c684d..0bc5f0dce10e 100644 --- a/internalshared/configutil/userlockout_test.go +++ b/internalshared/configutil/userlockout_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package configutil diff --git a/internalshared/listenerutil/bufconn.go b/internalshared/listenerutil/bufconn.go index 54af0a783e20..d471ee118fce 100644 --- a/internalshared/listenerutil/bufconn.go +++ b/internalshared/listenerutil/bufconn.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package listenerutil diff --git a/internalshared/listenerutil/listener.go b/internalshared/listenerutil/listener.go index 9a4edb45dcdc..1f8afe717650 100644 --- a/internalshared/listenerutil/listener.go +++ b/internalshared/listenerutil/listener.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package listenerutil @@ -13,12 +13,12 @@ import ( osuser "os/user" "strconv" + "github.com/hashicorp/cli" "github.com/hashicorp/errwrap" "github.com/hashicorp/go-secure-stdlib/reloadutil" "github.com/hashicorp/go-secure-stdlib/tlsutil" "github.com/hashicorp/vault/internalshared/configutil" "github.com/jefferai/isbadcipher" - "github.com/mitchellh/cli" ) type Listener struct { diff --git a/internalshared/listenerutil/listener_test.go b/internalshared/listenerutil/listener_test.go index 6219727e1907..c315fd24326a 100644 --- a/internalshared/listenerutil/listener_test.go +++ b/internalshared/listenerutil/listener_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package listenerutil diff --git a/limits/http_limiter.go b/limits/http_limiter.go new file mode 100644 index 000000000000..19b94e4acb05 --- /dev/null +++ b/limits/http_limiter.go @@ -0,0 +1,56 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package limits + +import ( + "context" + "errors" + "net/http" +) + +//lint:ignore ST1005 Vault is the product name +var ErrCapacity = errors.New("Vault server temporarily overloaded") + +const ( + WriteLimiter = "write" + SpecialPathLimiter = "special-path" +) + +// HTTPLimiter is a convenience struct that we use to wrap some logical request +// context and prevent dependence on Core. +type HTTPLimiter struct { + Method string + PathLimited bool + LookupFunc func(key string) *RequestLimiter +} + +// CtxKeyDisableRequestLimiter holds the HTTP Listener's disable config if set. +type CtxKeyDisableRequestLimiter struct{} + +func (c CtxKeyDisableRequestLimiter) String() string { + return "disable_request_limiter" +} + +// Acquire checks the HTTPLimiter metadata to determine if an HTTP request +// should be limited, or simply passed through as a no-op. +func (h *HTTPLimiter) Acquire(ctx context.Context) (*RequestListener, bool) { + // If the limiter is disabled, return an empty wrapper so the limiter is a + // no-op and indicate that the request can proceed. + if disable := ctx.Value(CtxKeyDisableRequestLimiter{}); disable != nil && disable.(bool) { + return &RequestListener{}, true + } + + lim := &RequestLimiter{} + if h.PathLimited { + lim = h.LookupFunc(SpecialPathLimiter) + } else { + switch h.Method { + case http.MethodGet, http.MethodHead, http.MethodTrace, http.MethodOptions: + // We're only interested in the inverse, so do nothing here. + default: + lim = h.LookupFunc(WriteLimiter) + } + } + return lim.Acquire(ctx) +} diff --git a/limits/limiter.go b/limits/limiter.go new file mode 100644 index 000000000000..09c8bd452e1f --- /dev/null +++ b/limits/limiter.go @@ -0,0 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package limits + +import ( + "context" +) + +type RequestLimiter struct{} + +// Acquire is a no-op on CE +func (l *RequestLimiter) Acquire(_ctx context.Context) (*RequestListener, bool) { + return &RequestListener{}, true +} + +// EstimatedLimit is effectively 0, since we're not limiting requests on CE. +func (l *RequestLimiter) EstimatedLimit() int { return 0 } diff --git a/limits/listener.go b/limits/listener.go new file mode 100644 index 000000000000..f3bffee8026b --- /dev/null +++ b/limits/listener.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package limits + +type RequestListener struct{} + +func (l *RequestListener) OnSuccess() {} + +func (l *RequestListener) OnDropped() {} + +func (l *RequestListener) OnIgnore() {} diff --git a/limits/registry.go b/limits/registry.go new file mode 100644 index 000000000000..a9deee29046a --- /dev/null +++ b/limits/registry.go @@ -0,0 +1,9 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package limits + +// LimiterRegistry holds the map of RequestLimiters mapped to keys. +type LimiterRegistry struct{} diff --git a/main.go b/main.go index 0417bd98773b..35d2f584ee96 100644 --- a/main.go +++ b/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main // import "github.com/hashicorp/vault" @@ -7,14 +7,8 @@ import ( "os" "github.com/hashicorp/vault/command" - "github.com/hashicorp/vault/internal" ) -func init() { - // this is a good place to patch SHA-1 support back into x509 - internal.PatchSha1() -} - func main() { os.Exit(command.Run(os.Args[1:])) } diff --git a/main_test.go b/main_test.go index 36398339dfcf..78b0e69a6cb0 100644 --- a/main_test.go +++ b/main_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main // import "github.com/hashicorp/vault" diff --git a/physical/aerospike/aerospike.go b/physical/aerospike/aerospike.go index 81aab224ac24..4e8aeb782afb 100644 --- a/physical/aerospike/aerospike.go +++ b/physical/aerospike/aerospike.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package aerospike diff --git a/physical/aerospike/aerospike_test.go b/physical/aerospike/aerospike_test.go index 2adec8293a69..e519b2da50c6 100644 --- a/physical/aerospike/aerospike_test.go +++ b/physical/aerospike/aerospike_test.go @@ -1,21 +1,27 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package aerospike import ( "context" + "math/bits" + "runtime" + "strings" "testing" "time" aero "github.com/aerospike/aerospike-client-go/v5" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" ) func TestAerospikeBackend(t *testing.T) { + if bits.UintSize == 32 { + t.Skip("Aerospike storage is only supported on 64-bit architectures") + } cleanup, config := prepareAerospikeContainer(t) defer cleanup() @@ -43,6 +49,11 @@ type aerospikeConfig struct { } func prepareAerospikeContainer(t *testing.T) (func(), *aerospikeConfig) { + // Skipping on ARM, as this image can't run on ARM architecture + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as this image is not supported on ARM architectures") + } + runner, err := docker.NewServiceRunner(docker.RunOptions{ ImageRepo: "docker.mirror.hashicorp.services/aerospike/aerospike-server", ContainerName: "aerospikedb", diff --git a/physical/alicloudoss/alicloudoss.go b/physical/alicloudoss/alicloudoss.go index d32d14b3b00a..d82287e20268 100644 --- a/physical/alicloudoss/alicloudoss.go +++ b/physical/alicloudoss/alicloudoss.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package alicloudoss diff --git a/physical/alicloudoss/alicloudoss_test.go b/physical/alicloudoss/alicloudoss_test.go index 1b098bd34405..b7a94db3a818 100644 --- a/physical/alicloudoss/alicloudoss_test.go +++ b/physical/alicloudoss/alicloudoss_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package alicloudoss diff --git a/physical/azure/azure.go b/physical/azure/azure.go index fe884491dbf5..e941af1611b8 100644 --- a/physical/azure/azure.go +++ b/physical/azure/azure.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package azure @@ -7,9 +7,10 @@ import ( "context" "errors" "fmt" - "io/ioutil" + "io" "net/url" "os" + "regexp" "sort" "strconv" "strings" @@ -56,6 +57,10 @@ func NewAzureBackend(conf map[string]string, logger log.Logger) (physical.Backen } } + if err := validateContainerName(name); err != nil { + return nil, fmt.Errorf("invalid container name %s: %w", name, err) + } + accountName := os.Getenv("AZURE_ACCOUNT_NAME") if accountName == "" { accountName = conf["accountName"] @@ -63,6 +68,9 @@ func NewAzureBackend(conf map[string]string, logger log.Logger) (physical.Backen return nil, fmt.Errorf("'accountName' must be set") } } + if err := validateAccountName(accountName); err != nil { + return nil, fmt.Errorf("invalid account name %s: %w", accountName, err) + } accountKey := os.Getenv("AZURE_ACCOUNT_KEY") if accountKey == "" { @@ -188,6 +196,35 @@ func NewAzureBackend(conf map[string]string, logger log.Logger) (physical.Backen return a, nil } +// validation rules for containers are defined here: +// https://learn.microsoft.com/en-us/rest/api/storageservices/Naming-and-Referencing-Containers--Blobs--and-Metadata#container-names +var containerNameRegex = regexp.MustCompile("^[a-z0-9]+(-[a-z0-9]+)*$") + +func validateContainerName(name string) error { + if len(name) < 3 || len(name) > 63 { + return errors.New("name must be between 3 and 63 characters long") + } + + if !containerNameRegex.MatchString(name) { + return errors.New("name is invalid") + } + return nil +} + +// validation rules are defined here: +// https://learn.microsoft.com/en-us/azure/azure-resource-manager/troubleshooting/error-storage-account-name?tabs=bicep#cause +var accountNameRegex = regexp.MustCompile("^[a-z0-9]+$") + +func validateAccountName(name string) error { + if len(name) < 3 || len(name) > 24 { + return errors.New("name must be between 3 and 24 characters long") + } + if !accountNameRegex.MatchString(name) { + return errors.New("name is invalid") + } + return nil +} + // Put is used to insert or update an entry func (a *AzureBackend) Put(ctx context.Context, entry *physical.Entry) error { defer metrics.MeasureSince([]string{"azure", "put"}, time.Now()) @@ -234,7 +271,7 @@ func (a *AzureBackend) Get(ctx context.Context, key string) (*physical.Entry, er reader := res.Body(azblob.RetryReaderOptions{}) defer reader.Close() - data, err := ioutil.ReadAll(reader) + data, err := io.ReadAll(reader) ent := &physical.Entry{ Key: key, @@ -307,7 +344,7 @@ func (a *AzureBackend) List(ctx context.Context, prefix string) ([]string, error // getAuthTokenFromIMDS uses the Azure Instance Metadata Service to retrieve a short-lived credential using OAuth // more info on this https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview func getAuthTokenFromIMDS(resource string) (*adal.ServicePrincipalToken, error) { - msiEndpoint, err := adal.GetMSIVMEndpoint() + msiEndpoint, err := adal.GetMSIEndpoint() if err != nil { return nil, err } diff --git a/physical/azure/azure_test.go b/physical/azure/azure_test.go index a004c8335763..2e491bd9b886 100644 --- a/physical/azure/azure_test.go +++ b/physical/azure/azure_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package azure @@ -9,15 +9,16 @@ import ( "net" "os" "strconv" + "strings" "testing" "time" - "github.com/hashicorp/vault/helper/testhelpers/azurite" - "github.com/Azure/azure-storage-blob-go/azblob" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/testhelpers/azurite" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" + "github.com/stretchr/testify/require" ) /// These tests run against an Azurite docker container, unless AZURE_ACCOUNT_NAME is given. @@ -34,7 +35,7 @@ func testFixture(t *testing.T) (*AzureBackend, func()) { t.Helper() ts := time.Now().UnixNano() - name := fmt.Sprintf("vault-test-%d", ts) + name := fmt.Sprintf("vlt-%d", ts) _ = os.Setenv("AZURE_BLOB_CONTAINER", name) cleanup := func() {} @@ -129,3 +130,116 @@ func isIMDSReachable(t *testing.T) bool { return true } + +// TestAzureBackend_validateContainerName validates that the given container +// names meet the Azure restrictions for container names +func TestAzureBackend_validateContainerName(t *testing.T) { + t.Parallel() + testCases := []struct { + name string + containerName string + wantError bool + }{ + { + name: "success", + containerName: "abcd-1234-efgh", + wantError: false, + }, + { + name: "uppercase", + containerName: "Abcd-1234-efgh", + wantError: true, + }, + { + name: "hyphen start", + containerName: "-abcd-1234-efgh", + wantError: true, + }, + { + name: "hyphen end", + containerName: "abcd-1234-efgh-", + wantError: true, + }, + { + name: "double hyphen", + containerName: "abcd-1234--efgh", + wantError: true, + }, + { + name: "too short", + containerName: "ab", + wantError: true, + }, + { + name: "too long", + containerName: strings.Repeat("a", 64), + wantError: true, + }, + { + name: "other character", + containerName: "abcd-1234-e!gh", + wantError: true, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + err := validateContainerName(tc.containerName) + if tc.wantError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +// TestAzureBackend_validateAccountName validates that the given account names +// meet the Azure restrictions for account names +func TestAzureBackend_validateAccountName(t *testing.T) { + t.Parallel() + testCases := []struct { + name string + accountName string + wantError bool + }{ + { + name: "success", + accountName: "abcd1234", + wantError: false, + }, + { + name: "uppercase", + accountName: "Abcd0123", + wantError: true, + }, + { + name: "hyphen", + accountName: "abcd-1234", + wantError: true, + }, + { + name: "too short", + accountName: "ab", + wantError: true, + }, + { + name: "too long", + accountName: strings.Repeat("a", 25), + wantError: true, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + err := validateAccountName(tc.accountName) + if tc.wantError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/physical/cassandra/cassandra.go b/physical/cassandra/cassandra.go index fc9261a55027..9e650f5ff830 100644 --- a/physical/cassandra/cassandra.go +++ b/physical/cassandra/cassandra.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cassandra @@ -104,6 +104,7 @@ func NewCassandraBackend(conf map[string]string, logger log.Logger) (physical.Ba cluster := gocql.NewCluster(hosts...) cluster.Port = port cluster.Keyspace = keyspace + cluster.Consistency = consistency if retryCountStr, ok := conf["simple_retry_policy_retries"]; ok { retryCount, err := strconv.Atoi(retryCountStr) @@ -149,6 +150,14 @@ func NewCassandraBackend(conf map[string]string, logger log.Logger) (physical.Ba cluster.Timeout = time.Duration(connectionTimeout) * time.Second } + if disableInitialHostLookupStr, ok := conf["disable_host_initial_lookup"]; ok { + disableInitialHostLookup, err := strconv.ParseBool(disableInitialHostLookupStr) + if err != nil { + return nil, fmt.Errorf("'disable_host_initial_lookup' must be a bool") + } + cluster.DisableInitialHostLookup = disableInitialHostLookup + } + if err := setupCassandraTLS(conf, cluster); err != nil { return nil, err } diff --git a/physical/cassandra/cassandra_test.go b/physical/cassandra/cassandra_test.go index 9466d0a70845..3370c3947960 100644 --- a/physical/cassandra/cassandra_test.go +++ b/physical/cassandra/cassandra_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cassandra diff --git a/physical/cockroachdb/cockroachdb.go b/physical/cockroachdb/cockroachdb.go index 38f935cdebd5..17e202922f3b 100644 --- a/physical/cockroachdb/cockroachdb.go +++ b/physical/cockroachdb/cockroachdb.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cockroachdb @@ -14,13 +14,11 @@ import ( "unicode" metrics "github.com/armon/go-metrics" - "github.com/cockroachdb/cockroach-go/crdb" + "github.com/cockroachdb/cockroach-go/v2/crdb" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/vault/sdk/physical" - - // CockroachDB uses the Postgres SQL driver _ "github.com/jackc/pgx/v4/stdlib" ) diff --git a/physical/cockroachdb/cockroachdb_ha.go b/physical/cockroachdb/cockroachdb_ha.go index 39b617546a87..03728d63c236 100644 --- a/physical/cockroachdb/cockroachdb_ha.go +++ b/physical/cockroachdb/cockroachdb_ha.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cockroachdb diff --git a/physical/cockroachdb/cockroachdb_test.go b/physical/cockroachdb/cockroachdb_test.go index 244330ad1a58..05d342519c33 100644 --- a/physical/cockroachdb/cockroachdb_test.go +++ b/physical/cockroachdb/cockroachdb_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cockroachdb @@ -9,10 +9,12 @@ import ( "fmt" "net/url" "os" + "runtime" + "strings" "testing" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" ) @@ -26,6 +28,11 @@ type Config struct { var _ docker.ServiceConfig = &Config{} func prepareCockroachDBTestContainer(t *testing.T) (func(), *Config) { + // Skipping, as this image can't run on arm architecture + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as CockroachDB 1.0 is not supported on ARM architectures") + } + if retURL := os.Getenv("CR_URL"); retURL != "" { s, err := docker.NewServiceURLParse(retURL) if err != nil { diff --git a/physical/cockroachdb/keywords.go b/physical/cockroachdb/keywords.go index f44089f9f76f..c57c9eae1a59 100644 --- a/physical/cockroachdb/keywords.go +++ b/physical/cockroachdb/keywords.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cockroachdb diff --git a/physical/consul/consul.go b/physical/consul/consul.go index b17dbc4c1693..dec3717a0207 100644 --- a/physical/consul/consul.go +++ b/physical/consul/consul.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package consul @@ -10,6 +10,7 @@ import ( "net/http" "strconv" "strings" + "sync" "sync/atomic" "time" @@ -40,10 +41,11 @@ const ( // Verify ConsulBackend satisfies the correct interfaces var ( - _ physical.Backend = (*ConsulBackend)(nil) - _ physical.HABackend = (*ConsulBackend)(nil) - _ physical.Lock = (*ConsulLock)(nil) - _ physical.Transactional = (*ConsulBackend)(nil) + _ physical.Backend = (*ConsulBackend)(nil) + _ physical.FencingHABackend = (*ConsulBackend)(nil) + _ physical.Lock = (*ConsulLock)(nil) + _ physical.Transactional = (*ConsulBackend)(nil) + _ physical.TransactionalLimits = (*ConsulBackend)(nil) GetInTxnDisabledError = errors.New("get operations inside transactions are disabled in consul backend") ) @@ -53,6 +55,7 @@ var ( // it allows Vault to run on multiple machines in a highly-available manner. // failGetInTxn is only used in tests. type ConsulBackend struct { + logger log.Logger client *api.Client path string kv *api.KV @@ -62,6 +65,7 @@ type ConsulBackend struct { sessionTTL string lockWaitTime time.Duration failGetInTxn *uint32 + activeNodeLock atomic.Pointer[ConsulLock] } // NewConsulBackend constructs a Consul backend using the given API client @@ -152,6 +156,7 @@ func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backe // Set up the backend c := &ConsulBackend{ + logger: logger, path: path, client: client, kv: client.KV(), @@ -262,12 +267,53 @@ func (c *ConsulBackend) ExpandedCapabilitiesAvailable(ctx context.Context) bool return available } +func (c *ConsulBackend) writeTxnOps(ctx context.Context, len int) ([]*api.TxnOp, string) { + if len < 1 { + len = 1 + } + ops := make([]*api.TxnOp, 0, len+1) + + // If we don't have a lock yet, return a transaction with no session check. We + // need to do this to allow writes during cluster initialization before there + // is an active node. + lock := c.activeNodeLock.Load() + if lock == nil { + return ops, "" + } + + lockKey, lockSession := lock.Info() + if lockKey == "" || lockSession == "" { + return ops, "" + } + + // If the context used to write has been marked as a special case write that + // happens outside of a lock then don't add the session check. + if physical.IsUnfencedWrite(ctx) { + return ops, "" + } + + // Insert the session check operation at index 0. This will allow us later to + // work out easily if a write failure is because of the session check. + ops = append(ops, &api.TxnOp{ + KV: &api.KVTxnOp{ + Verb: api.KVCheckSession, + Key: lockKey, + Session: lockSession, + }, + }) + return ops, lockSession +} + // Transaction is used to run multiple entries via a transaction. func (c *ConsulBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { + return c.txnInternal(ctx, txns, "transaction") +} + +func (c *ConsulBackend) txnInternal(ctx context.Context, txns []*physical.TxnEntry, apiOpName string) error { if len(txns) == 0 { return nil } - defer metrics.MeasureSince([]string{"consul", "transaction"}, time.Now()) + defer metrics.MeasureSince([]string{"consul", apiOpName}, time.Now()) failGetInTxn := atomic.LoadUint32(c.failGetInTxn) for _, t := range txns { @@ -276,7 +322,7 @@ func (c *ConsulBackend) Transaction(ctx context.Context, txns []*physical.TxnEnt } } - ops := make([]*api.TxnOp, 0, len(txns)) + ops, sessionID := c.writeTxnOps(ctx, len(txns)) for _, t := range txns { o, err := c.makeApiTxn(t) if err != nil { @@ -302,14 +348,15 @@ func (c *ConsulBackend) Transaction(ctx context.Context, txns []*physical.TxnEnt } return err } - if ok && len(resp.Errors) == 0 { - // Loop over results and cache them in a map. Note that we're only caching the first time we see a key, - // which _should_ correspond to a Get operation, since we expect those come first in our txns slice. + // Loop over results and cache them in a map. Note that we're only caching + // the first time we see a key, which _should_ correspond to a Get + // operation, since we expect those come first in our txns slice (though + // after check-session). for _, txnr := range resp.Results { if len(txnr.KV.Value) > 0 { - // We need to trim the Consul kv path (typically "vault/") from the key otherwise it won't - // match the transaction entries we have. + // We need to trim the Consul kv path (typically "vault/") from the key + // otherwise it won't match the transaction entries we have. key := strings.TrimPrefix(txnr.KV.Key, c.path) if _, found := kvMap[key]; !found { kvMap[key] = txnr.KV.Value @@ -321,6 +368,31 @@ func (c *ConsulBackend) Transaction(ctx context.Context, txns []*physical.TxnEnt if len(resp.Errors) > 0 { for _, res := range resp.Errors { retErr = multierror.Append(retErr, errors.New(res.What)) + if res.OpIndex == 0 && sessionID != "" { + // We added a session check (sessionID not empty) so an error at OpIndex + // 0 means that we failed that session check. We don't attempt to string + // match because Consul can return at least three different errors here + // with no common string. In all cases though failing this check means + // we no longer hold the lock because it was released, modified or + // deleted. Rather than just continuing to try writing until the + // blocking query manages to notice we're no longer the lock holder + // (which can take 10s of seconds even in good network conditions in my + // testing) we can now Unlock directly here. Our ConsulLock now has a + // shortcut that will cause the lock to close the leaderCh immediately + // when we call without waiting for the blocking query to return (unlike + // Consul's current Lock implementation). But before we unlock, we + // should re-load the lock and ensure it's still the same instance we + // just tried to write with in case this goroutine is somehow really + // delayed and we actually acquired a whole new lock in the meantime! + lock := c.activeNodeLock.Load() + if lock != nil { + _, lockSessionID := lock.Info() + if sessionID == lockSessionID { + c.logger.Warn("session check failed on write, we lost active node lock, stepping down", "err", res.What) + lock.Unlock() + } + } + } } } @@ -359,29 +431,24 @@ func (c *ConsulBackend) makeApiTxn(txn *physical.TxnEntry) (*api.TxnOp, error) { return &api.TxnOp{KV: op}, nil } +func (c *ConsulBackend) TransactionLimits() (int, int) { + // Note that even for modern Consul versions that support 128 entries per txn, + // we have an effective limit of 64 write operations because the other 64 are + // used for undo log read operations. We also reserve 1 for a check-session + // operation to prevent split brain so the most we allow WAL to put in a batch + // is 63. + return 63, 128 * 1024 +} + // Put is used to insert or update an entry func (c *ConsulBackend) Put(ctx context.Context, entry *physical.Entry) error { - defer metrics.MeasureSince([]string{"consul", "put"}, time.Now()) - - c.permitPool.Acquire() - defer c.permitPool.Release() - - pair := &api.KVPair{ - Key: c.path + entry.Key, - Value: entry.Value, - } - - writeOpts := &api.WriteOptions{} - writeOpts = writeOpts.WithContext(ctx) - - _, err := c.kv.Put(pair, writeOpts) - if err != nil { - if strings.Contains(err.Error(), "Value exceeds") { - return fmt.Errorf("%s: %w", physical.ErrValueTooLarge, err) - } - return err + txns := []*physical.TxnEntry{ + { + Operation: physical.PutOperation, + Entry: entry, + }, } - return nil + return c.txnInternal(ctx, txns, "put") } // Get is used to fetch an entry @@ -414,16 +481,15 @@ func (c *ConsulBackend) Get(ctx context.Context, key string) (*physical.Entry, e // Delete is used to permanently delete an entry func (c *ConsulBackend) Delete(ctx context.Context, key string) error { - defer metrics.MeasureSince([]string{"consul", "delete"}, time.Now()) - - c.permitPool.Acquire() - defer c.permitPool.Release() - - writeOpts := &api.WriteOptions{} - writeOpts = writeOpts.WithContext(ctx) - - _, err := c.kv.Delete(c.path+key, writeOpts) - return err + txns := []*physical.TxnEntry{ + { + Operation: physical.DeleteOperation, + Entry: &physical.Entry{ + Key: key, + }, + }, + } + return c.txnInternal(ctx, txns, "delete") } // List is used to list all the keys under a given @@ -463,24 +529,14 @@ func (c *ConsulBackend) FailGetInTxn(fail bool) { // LockWith is used for mutual exclusion based on the given key. func (c *ConsulBackend) LockWith(key, value string) (physical.Lock, error) { - // Create the lock - opts := &api.LockOptions{ - Key: c.path + key, - Value: []byte(value), - SessionName: "Vault Lock", - MonitorRetries: 5, - SessionTTL: c.sessionTTL, - LockWaitTime: c.lockWaitTime, - } - lock, err := c.client.LockOpts(opts) - if err != nil { - return nil, fmt.Errorf("failed to create lock: %w", err) - } cl := &ConsulLock{ + logger: c.logger, client: c.client, key: c.path + key, - lock: lock, + value: value, consistencyMode: c.consistencyMode, + sessionTTL: c.sessionTTL, + lockWaitTime: c.lockWaitTime, } return cl, nil } @@ -505,20 +561,203 @@ func (c *ConsulBackend) DetectHostAddr() (string, error) { return addr, nil } -// ConsulLock is used to provide the Lock interface backed by Consul +// RegisterActiveNodeLock is called after active node lock is obtained to allow +// us to fence future writes. +func (c *ConsulBackend) RegisterActiveNodeLock(l physical.Lock) error { + cl, ok := l.(*ConsulLock) + if !ok { + return fmt.Errorf("invalid Lock type") + } + c.activeNodeLock.Store(cl) + key, sessionID := cl.Info() + c.logger.Info("registered active node lock", "key", key, "sessionID", sessionID) + return nil +} + +// ConsulLock is used to provide the Lock interface backed by Consul. We work +// around some limitations of Consuls api.Lock noted in +// https://github.com/hashicorp/consul/issues/18271 by creating and managing the +// session ourselves, while using Consul's Lock to do the heavy lifting. type ConsulLock struct { + logger log.Logger client *api.Client key string - lock *api.Lock + value string consistencyMode string + sessionTTL string + lockWaitTime time.Duration + + mu sync.Mutex // protects session state + session *lockSession + // sessionID is a copy of the value from session.id. We use a separate field + // because `Info` needs to keep returning the same sessionID after Unlock has + // cleaned up the session state so that we continue to fence any writes still + // in flight after the lock is Unlocked. It's easier to reason about that as a + // separate field rather than keeping an already-terminated session object + // around. Once Lock is called again this will be replaced (while mu is + // locked) with the new session ID. Must hold mu to read or write this. + sessionID string +} + +type lockSession struct { + // id is immutable after the session is created so does not need mu held + id string + + // mu protects the lock and unlockCh to ensure they are only cleaned up once + mu sync.Mutex + lock *api.Lock + unlockCh chan struct{} +} + +func (s *lockSession) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + s.mu.Lock() + defer s.mu.Unlock() + + lockHeld := false + defer func() { + if !lockHeld { + s.cleanupLocked() + } + }() + + consulLeaderCh, err := s.lock.Lock(stopCh) + if err != nil { + return nil, err + } + if consulLeaderCh == nil { + // If both leaderCh and err are nil from Consul's Lock then it means we + // waited for the lockWait without grabbing it. + return nil, nil + } + // We got the Lock, monitor it! + lockHeld = true + leaderCh := make(chan struct{}) + go s.monitorLock(leaderCh, s.unlockCh, consulLeaderCh) + return leaderCh, nil +} + +// monitorLock waits for either unlockCh or consulLeaderCh to close and then +// closes leaderCh. It's designed to be run in a separate goroutine. Note that +// we pass unlockCh rather than accessing it via the member variable because it +// is mutated under the lock during Unlock so reading it from c could be racy. +// We just need the chan created at the call site here so we pass it instead of +// locking and unlocking in here. +func (s *lockSession) monitorLock(leaderCh chan struct{}, unlockCh, consulLeaderCh <-chan struct{}) { + select { + case <-unlockCh: + case <-consulLeaderCh: + } + // We lost the lock. Close the leaderCh + close(leaderCh) + + // Whichever chan closed, cleanup to unwind all the state. If we were + // triggered by a cleanup call this will be a no-op, but if not it ensures all + // state is cleaned up correctly. + s.cleanup() +} + +func (s *lockSession) cleanup() { + s.mu.Lock() + defer s.mu.Unlock() + + s.cleanupLocked() +} + +func (s *lockSession) cleanupLocked() { + if s.lock != nil { + s.lock.Unlock() + s.lock = nil + } + if s.unlockCh != nil { + close(s.unlockCh) + s.unlockCh = nil + } + // Don't bother destroying sessions as they will be destroyed after TTL + // anyway. +} + +func (c *ConsulLock) createSession() (*lockSession, error) { + se := &api.SessionEntry{ + Name: "Vault Lock", + TTL: c.sessionTTL, + // We use Consul's default LockDelay of 15s by not specifying it + } + session, _, err := c.client.Session().Create(se, nil) + if err != nil { + return nil, err + } + + opts := &api.LockOptions{ + Key: c.key, + Value: []byte(c.value), + Session: session, + MonitorRetries: 5, + LockWaitTime: c.lockWaitTime, + SessionTTL: c.sessionTTL, + } + lock, err := c.client.LockOpts(opts) + if err != nil { + // Don't bother destroying sessions as they will be destroyed after TTL + // anyway. + return nil, fmt.Errorf("failed to create lock: %w", err) + } + + unlockCh := make(chan struct{}) + + s := &lockSession{ + id: session, + lock: lock, + unlockCh: unlockCh, + } + + // Start renewals of the session + go func() { + // Note we capture unlockCh here rather than s.unlockCh because s.unlockCh + // is mutated on cleanup which is racy since we don't hold a lock here. + // unlockCh will never be mutated though. + err := c.client.Session().RenewPeriodic(c.sessionTTL, session, nil, unlockCh) + if err != nil { + c.logger.Error("failed to renew consul session for more than the TTL, lock lost", "err", err) + } + // release other resources for this session only i.e. don't c.Unlock as that + // might now be locked under a different session). + s.cleanup() + }() + return s, nil } func (c *ConsulLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { - return c.lock.Lock(stopCh) + c.mu.Lock() + defer c.mu.Unlock() + + if c.session != nil { + return nil, fmt.Errorf("lock instance already locked") + } + + session, err := c.createSession() + if err != nil { + return nil, err + } + leaderCh, err := session.Lock(stopCh) + if leaderCh != nil && err == nil { + // We hold the lock, store the session + c.session = session + c.sessionID = session.id + } + return leaderCh, err } func (c *ConsulLock) Unlock() error { - return c.lock.Unlock() + c.mu.Lock() + defer c.mu.Unlock() + + if c.session != nil { + c.session.cleanup() + c.session = nil + // Don't clear c.sessionID since we rely on returning the same old ID after + // Unlock until the next Lock. + } + return nil } func (c *ConsulLock) Value() (bool, string, error) { @@ -538,7 +777,18 @@ func (c *ConsulLock) Value() (bool, string, error) { if pair == nil { return false, "", nil } + // Note that held is expected to mean "does _any_ node hold the lock" not + // "does this current instance hold the lock" so although we know what our own + // session ID is, we don't check it matches here only that there is _some_ + // session in Consul holding the lock right now. held := pair.Session != "" value := string(pair.Value) return held, value, nil } + +func (c *ConsulLock) Info() (key, sessionid string) { + c.mu.Lock() + defer c.mu.Unlock() + + return c.key, c.sessionID +} diff --git a/physical/consul/consul_test.go b/physical/consul/consul_test.go index b0a16ce85a37..bf1d809afdde 100644 --- a/physical/consul/consul_test.go +++ b/physical/consul/consul_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package consul @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/vault/helper/testhelpers/consul" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" + "github.com/stretchr/testify/require" ) func TestConsul_newConsulBackend(t *testing.T) { @@ -158,6 +159,10 @@ func TestConsul_newConsulBackend(t *testing.T) { // if test.max_parallel != cap(c.permitPool) { // t.Errorf("bad: %v != %v", test.max_parallel, cap(c.permitPool)) // } + + maxEntries, maxBytes := be.(physical.TransactionalLimits).TransactionLimits() + require.Equal(t, 63, maxEntries) + require.Equal(t, 128*1024, maxBytes) } } @@ -442,7 +447,9 @@ func TestConsulHABackend(t *testing.T) { t.Fatalf("err: %v", err) } - randPath := fmt.Sprintf("vault-%d/", time.Now().Unix()) + // We used to use a timestamp here but then if you run multiple instances in + // parallel with one Consul they end up conflicting. + randPath := fmt.Sprintf("vault-%d/", rand.Int()) defer func() { client.KV().DeleteTree(randPath, nil) }() @@ -453,6 +460,10 @@ func TestConsulHABackend(t *testing.T) { "token": config.Token, "path": randPath, "max_parallel": "-1", + // We have to wait this out as part of the test so shorten it a little from + // the default 15 seconds helps with test run times, especially when running + // this in a loop to detect flakes! + "lock_wait_time": "3s", } b, err := NewConsulBackend(backendConfig, logger) @@ -478,4 +489,44 @@ func TestConsulHABackend(t *testing.T) { if host == "" { t.Fatalf("bad addr: %v", host) } + + // Calling `Info` on a Lock that has been unlocked must still return the old + // sessionID (until it is locked again) otherwise we will fail to fence writes + // that are still in flight from before (e.g. queued WAL or Merkle flushes) as + // soon as the first one unlocks the session allowing corruption again. + l, err := b.(physical.HABackend).LockWith("test-lock-session-info", "bar") + require.NoError(t, err) + + expectKey := randPath + "test-lock-session-info" + + cl := l.(*ConsulLock) + + stopCh := make(chan struct{}) + time.AfterFunc(5*time.Second, func() { + close(stopCh) + }) + leaderCh, err := cl.Lock(stopCh) + require.NoError(t, err) + require.NotNil(t, leaderCh) + + key, sid := cl.Info() + require.Equal(t, expectKey, key) + require.NotEmpty(t, sid) + + // Now Unlock the lock, sessionID should be reset to empty string + err = cl.Unlock() + require.NoError(t, err) + key2, sid2 := cl.Info() + require.Equal(t, key, key2) + require.Equal(t, sid, sid2) + + // Lock it again, this should cause a new session to be created so SID should + // change. + leaderCh, err = cl.Lock(stopCh) + require.NoError(t, err) + require.NotNil(t, leaderCh) + + key3, sid3 := cl.Info() + require.Equal(t, key, key3) + require.NotEqual(t, sid, sid3) } diff --git a/physical/consul/helpers.go b/physical/consul/helpers.go index ce7c47fdcc9d..2f6ac574b0b7 100644 --- a/physical/consul/helpers.go +++ b/physical/consul/helpers.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package consul diff --git a/physical/couchdb/couchdb.go b/physical/couchdb/couchdb.go index 9c54b0494ae7..cbb32e56096f 100644 --- a/physical/couchdb/couchdb.go +++ b/physical/couchdb/couchdb.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package couchdb @@ -8,7 +8,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "os" @@ -114,7 +114,7 @@ func (m *couchDBClient) get(key string) (*physical.Entry, error) { } else if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("GET returned %q", resp.Status) } - bs, err := ioutil.ReadAll(resp.Body) + bs, err := io.ReadAll(resp.Body) if err != nil { return nil, err } @@ -143,7 +143,7 @@ func (m *couchDBClient) list(prefix string) ([]couchDBListItem, error) { } defer resp.Body.Close() - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/physical/couchdb/couchdb_test.go b/physical/couchdb/couchdb_test.go index feb132bb4728..0428d52d250f 100644 --- a/physical/couchdb/couchdb_test.go +++ b/physical/couchdb/couchdb_test.go @@ -1,21 +1,22 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package couchdb import ( "context" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "os" + "runtime" "strings" "testing" "time" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" ) @@ -78,6 +79,13 @@ func (c couchDB) URL() *url.URL { var _ docker.ServiceConfig = &couchDB{} func prepareCouchdbDBTestContainer(t *testing.T) (func(), *couchDB) { + // ARM64 is only supported on CouchDB 2 and above. If we update + // our image and support to 2 and above, we can unskip these: + // https://hub.docker.com/r/arm64v8/couchdb/ + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as CouchDB 1.6 is not supported on ARM architectures") + } + // If environment variable is set, assume caller wants to target a real // DynamoDB. if os.Getenv("COUCHDB_ENDPOINT") != "" { @@ -137,7 +145,7 @@ func setupCouchDB(ctx context.Context, host string, port int) (docker.ServiceCon } defer resp.Body.Close() if resp.StatusCode != http.StatusCreated { - bs, _ := ioutil.ReadAll(resp.Body) + bs, _ := io.ReadAll(resp.Body) return nil, fmt.Errorf("failed to create database: %s %s\n", resp.Status, string(bs)) } } @@ -156,7 +164,7 @@ func setupCouchDB(ctx context.Context, host string, port int) (docker.ServiceCon } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - bs, _ := ioutil.ReadAll(resp.Body) + bs, _ := io.ReadAll(resp.Body) return nil, fmt.Errorf("Failed to create admin user: %s %s\n", resp.Status, string(bs)) } } diff --git a/physical/dynamodb/dynamodb.go b/physical/dynamodb/dynamodb.go index 591c65cf710a..c4484d20d446 100644 --- a/physical/dynamodb/dynamodb.go +++ b/physical/dynamodb/dynamodb.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package dynamodb @@ -15,23 +15,22 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" - log "github.com/hashicorp/go-hclog" - metrics "github.com/armon/go-metrics" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" + "github.com/cenkalti/backoff/v3" cleanhttp "github.com/hashicorp/go-cleanhttp" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/awsutil" uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/physical" - - "github.com/cenkalti/backoff/v3" ) const ( @@ -89,7 +88,7 @@ type DynamoDBBackend struct { client *dynamodb.DynamoDB logger log.Logger haEnabled bool - permitPool *physical.PermitPool + permitPool *PermitPoolWithMetrics } // DynamoDBRecord is the representation of a vault entry in @@ -122,6 +121,12 @@ type DynamoDBLockRecord struct { Expires int64 } +type PermitPoolWithMetrics struct { + physical.PermitPool + pendingPermits int32 + poolSize int +} + // NewDynamoDBBackend constructs a DynamoDB backend. If the // configured DynamoDB table does not exist, it creates it. func NewDynamoDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { @@ -248,7 +253,7 @@ func NewDynamoDBBackend(conf map[string]string, logger log.Logger) (physical.Bac return &DynamoDBBackend{ table: table, client: client, - permitPool: physical.NewPermitPool(maxParInt), + permitPool: NewPermitPoolWithMetrics(maxParInt), haEnabled: haEnabledBool, logger: logger, }, nil @@ -405,6 +410,11 @@ func (d *DynamoDBBackend) List(ctx context.Context, prefix string) ([]string, er }}, }, }, + ProjectionExpression: aws.String("#key, #path"), + ExpressionAttributeNames: map[string]*string{ + "#key": aws.String("Key"), + "#path": aws.String("Path"), + }, } d.permitPool.Acquire() @@ -448,6 +458,11 @@ func (d *DynamoDBBackend) hasChildren(prefix string, exclude []string) (bool, er }}, }, }, + ProjectionExpression: aws.String("#key, #path"), + ExpressionAttributeNames: map[string]*string{ + "#key": aws.String("Key"), + "#path": aws.String("Path"), + }, // Avoid fetching too many items from DynamoDB for performance reasons. // We want to know if there are any children we don't expect to see. // Answering that question requires fetching a minimum of one more item @@ -522,7 +537,6 @@ func (d *DynamoDBBackend) batchWriteRequests(requests []*dynamodb.WriteRequest) output, err = d.client.BatchWriteItem(&dynamodb.BatchWriteItemInput{ RequestItems: batch, }) - if err != nil { break } @@ -852,7 +866,7 @@ func ensureTableExists(client *dynamodb.DynamoDB, table string, readCapacity, wr // recordPathForVaultKey transforms a vault key into // a value suitable for the `DynamoDBRecord`'s `Path` -// property. This path equals the the vault key without +// property. This path equals the vault key without // its last component. func recordPathForVaultKey(key string) string { if strings.Contains(key, "/") { @@ -863,7 +877,7 @@ func recordPathForVaultKey(key string) string { // recordKeyForVaultKey transforms a vault key into // a value suitable for the `DynamoDBRecord`'s `Key` -// property. This path equals the the vault key's +// property. This path equals the vault key's // last component. func recordKeyForVaultKey(key string) string { return pkgPath.Base(key) @@ -909,3 +923,39 @@ func isConditionCheckFailed(err error) bool { return false } + +// NewPermitPoolWithMetrics returns a new permit pool with the provided +// number of permits which emits metrics +func NewPermitPoolWithMetrics(permits int) *PermitPoolWithMetrics { + return &PermitPoolWithMetrics{ + PermitPool: *physical.NewPermitPool(permits), + pendingPermits: 0, + poolSize: permits, + } +} + +// Acquire returns when a permit has been acquired +func (c *PermitPoolWithMetrics) Acquire() { + atomic.AddInt32(&c.pendingPermits, 1) + c.emitPermitMetrics() + c.PermitPool.Acquire() + atomic.AddInt32(&c.pendingPermits, -1) + c.emitPermitMetrics() +} + +// Release returns a permit to the pool +func (c *PermitPoolWithMetrics) Release() { + c.PermitPool.Release() + c.emitPermitMetrics() +} + +// Get the number of requests in the permit pool +func (c *PermitPoolWithMetrics) CurrentPermits() int { + return c.PermitPool.CurrentPermits() +} + +func (c *PermitPoolWithMetrics) emitPermitMetrics() { + metrics.SetGauge([]string{"dynamodb", "permit_pool", "pending_permits"}, float32(c.pendingPermits)) + metrics.SetGauge([]string{"dynamodb", "permit_pool", "active_permits"}, float32(c.PermitPool.CurrentPermits())) + metrics.SetGauge([]string{"dynamodb", "permit_pool", "pool_size"}, float32(c.poolSize)) +} diff --git a/physical/dynamodb/dynamodb_test.go b/physical/dynamodb/dynamodb_test.go index 1007e6640cb5..0d6bd5914fce 100644 --- a/physical/dynamodb/dynamodb_test.go +++ b/physical/dynamodb/dynamodb_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package dynamodb @@ -10,20 +10,21 @@ import ( "net/http" "net/url" "os" + "runtime" + "strings" "testing" "time" - "github.com/go-test/deep" - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/testhelpers/docker" - "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/sdk/physical" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" + "github.com/go-test/deep" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" ) func TestDynamoDBBackend(t *testing.T) { @@ -373,6 +374,11 @@ type Config struct { var _ docker.ServiceConfig = &Config{} func prepareDynamoDBTestContainer(t *testing.T) (func(), *Config) { + // Skipping on ARM, as this image can't run on ARM architecture + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as this image is not supported on ARM architectures") + } + // If environment variable is set, assume caller wants to target a real // DynamoDB. if endpoint := os.Getenv("AWS_DYNAMODB_ENDPOINT"); endpoint != "" { diff --git a/physical/etcd/etcd.go b/physical/etcd/etcd.go index f17a552b54c9..1d332dc9c43b 100644 --- a/physical/etcd/etcd.go +++ b/physical/etcd/etcd.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package etcd diff --git a/physical/etcd/etcd3.go b/physical/etcd/etcd3.go index 57a838a69743..3182f2d83c96 100644 --- a/physical/etcd/etcd3.go +++ b/physical/etcd/etcd3.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package etcd @@ -14,7 +14,7 @@ import ( "sync" "time" - metrics "github.com/armon/go-metrics" + "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-secure-stdlib/strutil" @@ -122,6 +122,15 @@ func newEtcd3Backend(conf map[string]string, logger log.Logger) (physical.Backen cfg.MaxCallRecvMsgSize = int(val) } + if maxSend, ok := conf["max_send_size"]; ok { + // grpc converts this to uint32 internally, so parse as that to avoid passing invalid values + val, err := strconv.ParseUint(maxSend, 10, 32) + if err != nil { + return nil, fmt.Errorf("value of 'max_send_size' (%v) could not be understood: %w", maxSend, err) + } + cfg.MaxCallSendMsgSize = int(val) + } + etcd, err := clientv3.New(cfg) if err != nil { return nil, err @@ -238,7 +247,7 @@ func (c *EtcdBackend) List(ctx context.Context, prefix string) ([]string, error) ctx, cancel := context.WithTimeout(context.Background(), c.requestTimeout) defer cancel() prefix = path.Join(c.path, prefix) + "/" - resp, err := c.etcd.Get(ctx, prefix, clientv3.WithPrefix()) + resp, err := c.etcd.Get(ctx, prefix, clientv3.WithPrefix(), clientv3.WithKeysOnly()) if err != nil { return nil, err } diff --git a/physical/etcd/etcd3_test.go b/physical/etcd/etcd3_test.go index a2de6314dd6f..7af1ecd7163e 100644 --- a/physical/etcd/etcd3_test.go +++ b/physical/etcd/etcd3_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package etcd @@ -26,7 +26,7 @@ func TestEtcd3Backend(t *testing.T) { "username": "root", "password": "insecure", - // Syncing adverticed client urls should be disabled since docker port mapping confuses the client. + // Syncing advertised client urls should be disabled since docker port mapping confuses the client. "sync": "false", } diff --git a/physical/foundationdb/fdb-go-install.sh b/physical/foundationdb/fdb-go-install.sh index 4b2c12522314..8b56b09b25c8 100755 --- a/physical/foundationdb/fdb-go-install.sh +++ b/physical/foundationdb/fdb-go-install.sh @@ -1,6 +1,6 @@ #!/bin/bash -eu # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 # # fdb-go-install.sh diff --git a/physical/foundationdb/foundationdb.go b/physical/foundationdb/foundationdb.go index 03f984f56039..de706d1fa947 100644 --- a/physical/foundationdb/foundationdb.go +++ b/physical/foundationdb/foundationdb.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build foundationdb @@ -15,15 +15,13 @@ import ( "sync" "time" - log "github.com/hashicorp/go-hclog" - uuid "github.com/hashicorp/go-uuid" - "github.com/apple/foundationdb/bindings/go/src/fdb" "github.com/apple/foundationdb/bindings/go/src/fdb/directory" "github.com/apple/foundationdb/bindings/go/src/fdb/subspace" "github.com/apple/foundationdb/bindings/go/src/fdb/tuple" - metrics "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/physical" ) @@ -451,7 +449,6 @@ func (f *FDBBackend) Put(ctx context.Context, entry *physical.Entry) error { return nil, nil }) - if err != nil { return fmt.Errorf("put failed for item %s: %w", entry.Key, err) } @@ -509,7 +506,6 @@ func (f *FDBBackend) Delete(ctx context.Context, key string) error { return nil, nil }) - if err != nil { return fmt.Errorf("delete failed for item %s: %w", key, err) } diff --git a/physical/foundationdb/foundationdb_test.go b/physical/foundationdb/foundationdb_test.go index ecd6aa8234e7..8bbcbb3ed16f 100644 --- a/physical/foundationdb/foundationdb_test.go +++ b/physical/foundationdb/foundationdb_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build foundationdb @@ -13,15 +13,12 @@ import ( "testing" "time" - log "github.com/hashicorp/go-hclog" - uuid "github.com/hashicorp/go-uuid" - "github.com/apple/foundationdb/bindings/go/src/fdb" "github.com/apple/foundationdb/bindings/go/src/fdb/directory" - + log "github.com/hashicorp/go-hclog" + uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" - dockertest "gopkg.in/ory-am/dockertest.v3" ) diff --git a/physical/foundationdb/foundationdbstub.go b/physical/foundationdb/foundationdbstub.go index 283ca0969f0f..288e8f7acf9e 100644 --- a/physical/foundationdb/foundationdbstub.go +++ b/physical/foundationdb/foundationdbstub.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !foundationdb @@ -9,7 +9,6 @@ import ( "fmt" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/physical" ) diff --git a/physical/gcs/gcs.go b/physical/gcs/gcs.go index 4a3f5bdf4961..51b1f926ff44 100644 --- a/physical/gcs/gcs.go +++ b/physical/gcs/gcs.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package gcs @@ -8,7 +8,7 @@ import ( "crypto/md5" "errors" "fmt" - "io/ioutil" + "io" "os" "sort" "strconv" @@ -230,7 +230,7 @@ func (b *Backend) Get(ctx context.Context, key string) (retEntry *physical.Entry } }() - value, err := ioutil.ReadAll(r) + value, err := io.ReadAll(r) if err != nil { return nil, fmt.Errorf("failed to read value into a string: %w", err) } diff --git a/physical/gcs/gcs_ha.go b/physical/gcs/gcs_ha.go index 2e4e762a7469..279b79ab1f08 100644 --- a/physical/gcs/gcs_ha.go +++ b/physical/gcs/gcs_ha.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package gcs diff --git a/physical/gcs/gcs_ha_test.go b/physical/gcs/gcs_ha_test.go index cdd59e731da9..ab6ca888a698 100644 --- a/physical/gcs/gcs_ha_test.go +++ b/physical/gcs/gcs_ha_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package gcs diff --git a/physical/gcs/gcs_test.go b/physical/gcs/gcs_test.go index 332ba35d79ba..6ee9ab432c81 100644 --- a/physical/gcs/gcs_test.go +++ b/physical/gcs/gcs_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package gcs diff --git a/physical/manta/manta.go b/physical/manta/manta.go index cfb0770144ab..5ab1c4e05791 100644 --- a/physical/manta/manta.go +++ b/physical/manta/manta.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package manta diff --git a/physical/manta/manta_test.go b/physical/manta/manta_test.go index 67d50fe71b14..11b024dd918c 100644 --- a/physical/manta/manta_test.go +++ b/physical/manta/manta_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package manta diff --git a/physical/mssql/mssql.go b/physical/mssql/mssql.go index 2859a65ef324..ef6d54e0f5a5 100644 --- a/physical/mssql/mssql.go +++ b/physical/mssql/mssql.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mssql @@ -21,8 +21,10 @@ import ( ) // Verify MSSQLBackend satisfies the correct interfaces -var _ physical.Backend = (*MSSQLBackend)(nil) -var identifierRegex = regexp.MustCompile(`^[\p{L}_][\p{L}\p{Nd}@#$_]*$`) +var ( + _ physical.Backend = (*MSSQLBackend)(nil) + identifierRegex = regexp.MustCompile(`^[\p{L}_][\p{L}\p{Nd}@#$_]*$`) +) type MSSQLBackend struct { dbTable string diff --git a/physical/mssql/mssql_test.go b/physical/mssql/mssql_test.go index 2324ff5c03f2..6b794f10bfb4 100644 --- a/physical/mssql/mssql_test.go +++ b/physical/mssql/mssql_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mssql diff --git a/physical/mysql/mysql.go b/physical/mysql/mysql.go index 225882f7575b..8c4b89e00c7b 100644 --- a/physical/mysql/mysql.go +++ b/physical/mysql/mysql.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mysql @@ -20,11 +20,10 @@ import ( "time" "unicode" - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-multierror" - metrics "github.com/armon/go-metrics" mysql "github.com/go-sql-driver/mysql" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/vault/sdk/physical" ) diff --git a/physical/mysql/mysql_test.go b/physical/mysql/mysql_test.go index b13c7e4a57c4..419966df4944 100644 --- a/physical/mysql/mysql_test.go +++ b/physical/mysql/mysql_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mysql @@ -10,14 +10,12 @@ import ( "testing" "time" - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/sdk/physical" - _ "github.com/go-sql-driver/mysql" mysql "github.com/go-sql-driver/mysql" - + log "github.com/hashicorp/go-hclog" mysqlhelper "github.com/hashicorp/vault/helper/testhelpers/mysql" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" ) func TestMySQLPlaintextCatch(t *testing.T) { diff --git a/physical/oci/oci.go b/physical/oci/oci.go index 3665813d0479..628f31049182 100644 --- a/physical/oci/oci.go +++ b/physical/oci/oci.go @@ -5,7 +5,7 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "io" "net/http" "sort" "strconv" @@ -172,7 +172,7 @@ func (o *Backend) Put(ctx context.Context, entry *physical.Entry) error { BucketName: &o.bucketName, ObjectName: &entry.Key, ContentLength: &size, - PutObjectBody: ioutil.NopCloser(bytes.NewReader(entry.Value)), + PutObjectBody: io.NopCloser(bytes.NewReader(entry.Value)), OpcMeta: nil, OpcClientRequestId: &opcClientRequestId, } @@ -230,7 +230,7 @@ func (o *Backend) Get(ctx context.Context, key string) (*physical.Entry, error) return nil, fmt.Errorf("failed to read Value: %w", err) } - body, err := ioutil.ReadAll(resp.Content) + body, err := io.ReadAll(resp.Content) if err != nil { metrics.IncrCounter(metricGetFailed, 1) return nil, fmt.Errorf("failed to decode Value into bytes: %w", err) diff --git a/physical/oci/oci_ha.go b/physical/oci/oci_ha.go index a4c6ad52ea6b..cc0e0edc62a5 100644 --- a/physical/oci/oci_ha.go +++ b/physical/oci/oci_ha.go @@ -7,7 +7,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net/http" "sync" "sync/atomic" @@ -398,7 +398,7 @@ func (l *Lock) get(ctx context.Context) (*LockRecord, string, error) { defer response.RawResponse.Body.Close() - body, err := ioutil.ReadAll(response.Content) + body, err := io.ReadAll(response.Content) if err != nil { metrics.IncrCounter(metricGetFailed, 1) l.backend.logger.Error("Error reading content", "err", err) @@ -487,7 +487,7 @@ func (l *Lock) writeLock() (bool, error) { BucketName: &l.backend.lockBucketName, ObjectName: &l.key, ContentLength: &size, - PutObjectBody: ioutil.NopCloser(bytes.NewReader(newLockRecordJson)), + PutObjectBody: io.NopCloser(bytes.NewReader(newLockRecordJson)), OpcMeta: nil, OpcClientRequestId: &opcClientRequestId, } diff --git a/physical/postgresql/postgresql.go b/physical/postgresql/postgresql.go index a70133066750..911dfa18f3e2 100644 --- a/physical/postgresql/postgresql.go +++ b/physical/postgresql/postgresql.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package postgresql diff --git a/physical/postgresql/postgresql_test.go b/physical/postgresql/postgresql_test.go index 5dec40aba5d9..0dc0ce948602 100644 --- a/physical/postgresql/postgresql_test.go +++ b/physical/postgresql/postgresql_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package postgresql @@ -22,7 +22,7 @@ func TestPostgreSQLBackend(t *testing.T) { // Use docker as pg backend if no url is provided via environment variables connURL := os.Getenv("PGURL") if connURL == "" { - cleanup, u := postgresql.PrepareTestContainer(t, "11.1") + cleanup, u := postgresql.PrepareTestContainer(t) defer cleanup() connURL = u } @@ -159,7 +159,7 @@ func TestConnectionURL(t *testing.T) { for name, tt := range cases { t.Run(name, func(t *testing.T) { // This is necessary to avoid always testing the branch where the env is set. - // As long the the env is set --- even if the value is "" --- `ok` returns true. + // As long the env is set --- even if the value is "" --- `ok` returns true. if tt.input.envar != "" { os.Setenv("VAULT_PG_CONNECTION_URL", tt.input.envar) defer os.Unsetenv("VAULT_PG_CONNECTION_URL") diff --git a/physical/raft/bolt_32bit_test.go b/physical/raft/bolt_32bit_test.go index 7694d82f5cab..4e6aaccacd3a 100644 --- a/physical/raft/bolt_32bit_test.go +++ b/physical/raft/bolt_32bit_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build 386 || arm diff --git a/physical/raft/bolt_64bit_test.go b/physical/raft/bolt_64bit_test.go index c4b89b8cdc14..3305d450dd6d 100644 --- a/physical/raft/bolt_64bit_test.go +++ b/physical/raft/bolt_64bit_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !386 && !arm @@ -43,3 +43,29 @@ func Test_BoltOptions(t *testing.T) { }) } } + +// TestMmapFlags tests the getMmapFlags function, ensuring it returns the appropriate integer representing the desired mmap flag. +func TestMmapFlags(t *testing.T) { + testCases := []struct { + name string + disableMapPopulate bool + }{ + {"MAP_POPULATE is enabled", false}, + {"MAP_POPULATE disabled by env var", true}, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + if tc.disableMapPopulate { + t.Setenv("VAULT_RAFT_DISABLE_MAP_POPULATE", "true") + } + + isEnabled := usingMapPopulate(getMmapFlags("")) + if tc.disableMapPopulate && isEnabled { + t.Error("expected MAP_POPULATE to be disabled but it was enabled") + } + }) + } +} diff --git a/physical/raft/bolt_linux.go b/physical/raft/bolt_linux.go index b7774c61eaa8..bec6f7e386c7 100644 --- a/physical/raft/bolt_linux.go +++ b/physical/raft/bolt_linux.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package raft @@ -13,27 +13,49 @@ import ( func init() { getMmapFlags = getMmapFlagsLinux + usingMapPopulate = usingMapPopulateLinux } func getMmapFlagsLinux(dbPath string) int { + if setMapPopulateFlag(dbPath) { + return unix.MAP_POPULATE + } + + return 0 +} + +// setMapPopulateFlag determines whether we should set the MAP_POPULATE flag, which +// prepopulates page tables to be mapped in the virtual memory space, +// helping reduce slowness at runtime caused by page faults. +// We only want to set this flag if we've determined there's enough memory on the system available to do so. +func setMapPopulateFlag(dbPath string) bool { if os.Getenv("VAULT_RAFT_DISABLE_MAP_POPULATE") != "" { - return 0 + return false } stat, err := os.Stat(dbPath) if err != nil { - return 0 + return false } size := stat.Size() v, err := mem.VirtualMemoryWithContext(context.Background()) if err != nil { - return 0 + return false } // We won't worry about swap, since we already tell people not to use it. if v.Total > uint64(size) { - return unix.MAP_POPULATE + return true } - return 0 + return false +} + +// the unix.MAP_POPULATE constant only exists on Linux, +// so reference to this constant can only live in a *_linux.go file +func usingMapPopulateLinux(mmapFlag int) bool { + if mmapFlag == unix.MAP_POPULATE { + return true + } + return false } diff --git a/physical/raft/chunking_test.go b/physical/raft/chunking_test.go index 64f83e6b8daa..0309cdacdfc0 100644 --- a/physical/raft/chunking_test.go +++ b/physical/raft/chunking_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package raft @@ -15,7 +15,7 @@ import ( raftchunkingtypes "github.com/hashicorp/go-raftchunking/types" "github.com/hashicorp/go-uuid" "github.com/hashicorp/raft" - "github.com/hashicorp/raft-boltdb/v2" + raftboltdb "github.com/hashicorp/raft-boltdb/v2" "github.com/hashicorp/vault/sdk/physical" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -186,8 +186,6 @@ func TestFSM_Chunking_TermChange(t *testing.T) { } func TestRaft_Chunking_AppliedIndex(t *testing.T) { - t.Parallel() - raft, dir := GetRaft(t, true, false) defer os.RemoveAll(dir) diff --git a/physical/raft/config.go b/physical/raft/config.go new file mode 100644 index 000000000000..fbd0d5038d61 --- /dev/null +++ b/physical/raft/config.go @@ -0,0 +1,320 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package raft + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strconv" + "time" + + bolt "github.com/hashicorp-forge/bbolt" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-uuid" + goversion "github.com/hashicorp/go-version" + autopilot "github.com/hashicorp/raft-autopilot" + etcdbolt "go.etcd.io/bbolt" +) + +type RaftBackendConfig struct { + Path string + NodeId string + ApplyDelay time.Duration + RaftWal bool + RaftLogVerifierEnabled bool + RaftLogVerificationInterval time.Duration + SnapshotDelay time.Duration + MaxEntrySize uint64 + MaxBatchEntries int + MaxBatchSize int + AutopilotReconcileInterval time.Duration + AutopilotUpdateInterval time.Duration + RetryJoin string + + // Enterprise only + RaftNonVoter bool + MaxMountAndNamespaceTableEntrySize uint64 + AutopilotUpgradeVersion string + AutopilotRedundancyZone string +} + +func parseRaftBackendConfig(conf map[string]string, logger log.Logger) (*RaftBackendConfig, error) { + c := &RaftBackendConfig{} + + c.Path = conf["path"] + envPath := os.Getenv(EnvVaultRaftPath) + if envPath != "" { + c.Path = envPath + } + + if c.Path == "" { + return nil, fmt.Errorf("'path' must be set") + } + + c.NodeId = conf["node_id"] + envNodeId := os.Getenv(EnvVaultRaftNodeID) + if envNodeId != "" { + c.NodeId = envNodeId + } + + if c.NodeId == "" { + localIDRaw, err := os.ReadFile(filepath.Join(c.Path, "node-id")) + if err == nil && len(localIDRaw) > 0 { + c.NodeId = string(localIDRaw) + } + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, err + } + } + + if c.NodeId == "" { + id, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + + if err = os.WriteFile(filepath.Join(c.Path, "node-id"), []byte(id), 0o600); err != nil { + return nil, err + } + + c.NodeId = id + } + + if delayRaw, ok := conf["apply_delay"]; ok { + delay, err := parseutil.ParseDurationSecond(delayRaw) + if err != nil { + return nil, fmt.Errorf("apply_delay does not parse as a duration: %w", err) + } + + c.ApplyDelay = delay + } + + if walRaw, ok := conf["raft_wal"]; ok { + useRaftWal, err := strconv.ParseBool(walRaw) + if err != nil { + return nil, fmt.Errorf("raft_wal does not parse as a boolean: %w", err) + } + + c.RaftWal = useRaftWal + } + + if rlveRaw, ok := conf["raft_log_verifier_enabled"]; ok { + rlve, err := strconv.ParseBool(rlveRaw) + if err != nil { + return nil, fmt.Errorf("raft_log_verifier_enabled does not parse as a boolean: %w", err) + } + c.RaftLogVerifierEnabled = rlve + + c.RaftLogVerificationInterval = defaultRaftLogVerificationInterval + if rlviRaw, ok := conf["raft_log_verification_interval"]; ok { + rlvi, err := parseutil.ParseDurationSecond(rlviRaw) + if err != nil { + return nil, fmt.Errorf("raft_log_verification_interval does not parse as a duration: %w", err) + } + + // Make sure our interval is capped to a reasonable value, so e.g. people don't use 0s or 1s + if rlvi >= minimumRaftLogVerificationInterval { + c.RaftLogVerificationInterval = rlvi + } else { + logger.Warn("raft_log_verification_interval is less than the minimum allowed, using default instead", + "given", rlveRaw, + "minimum", minimumRaftLogVerificationInterval, + "default", defaultRaftLogVerificationInterval) + } + } + } + + if delayRaw, ok := conf["snapshot_delay"]; ok { + delay, err := parseutil.ParseDurationSecond(delayRaw) + if err != nil { + return nil, fmt.Errorf("snapshot_delay does not parse as a duration: %w", err) + } + c.SnapshotDelay = delay + } + + c.MaxEntrySize = defaultMaxEntrySize + if maxEntrySizeCfg := conf["max_entry_size"]; len(maxEntrySizeCfg) != 0 { + i, err := strconv.Atoi(maxEntrySizeCfg) + if err != nil { + return nil, fmt.Errorf("failed to parse 'max_entry_size': %w", err) + } + + c.MaxEntrySize = uint64(i) + } + + c.MaxMountAndNamespaceTableEntrySize = c.MaxEntrySize + if maxMNTEntrySize := conf["max_mount_and_namespace_table_entry_size"]; len(maxMNTEntrySize) != 0 { + i, err := strconv.Atoi(maxMNTEntrySize) + if err != nil { + return nil, fmt.Errorf("failed to parse 'max_mount_and_namespace_table_entry_size': %w", err) + } + if i < 1024 { + return nil, fmt.Errorf("'max_mount_and_namespace_table_entry_size' must be at least 1024 bytes") + } + if i > 10_485_760 { + return nil, fmt.Errorf("'max_mount_and_namespace_table_entry_size' must be at most 10,485,760 bytes (10MiB)") + } + + c.MaxMountAndNamespaceTableEntrySize = uint64(i) + emitEntWarning(logger, "max_mount_and_namespace_table_entry_size") + } + + c.MaxBatchEntries, c.MaxBatchSize = batchLimitsFromEnv(logger) + + if interval := conf["autopilot_reconcile_interval"]; interval != "" { + interval, err := parseutil.ParseDurationSecond(interval) + if err != nil { + return nil, fmt.Errorf("autopilot_reconcile_interval does not parse as a duration: %w", err) + } + c.AutopilotReconcileInterval = interval + } + + if interval := conf["autopilot_update_interval"]; interval != "" { + interval, err := parseutil.ParseDurationSecond(interval) + if err != nil { + return nil, fmt.Errorf("autopilot_update_interval does not parse as a duration: %w", err) + } + c.AutopilotUpdateInterval = interval + } + + effectiveReconcileInterval := autopilot.DefaultReconcileInterval + effectiveUpdateInterval := autopilot.DefaultUpdateInterval + + if c.AutopilotReconcileInterval != 0 { + effectiveReconcileInterval = c.AutopilotReconcileInterval + } + if c.AutopilotUpdateInterval != 0 { + effectiveUpdateInterval = c.AutopilotUpdateInterval + } + + if effectiveReconcileInterval < effectiveUpdateInterval { + return nil, fmt.Errorf("autopilot_reconcile_interval (%v) should be larger than autopilot_update_interval (%v)", effectiveReconcileInterval, effectiveUpdateInterval) + } + + if uv, ok := conf["autopilot_upgrade_version"]; ok && uv != "" { + _, err := goversion.NewVersion(uv) + if err != nil { + return nil, fmt.Errorf("autopilot_upgrade_version does not parse as a semantic version: %w", err) + } + + c.AutopilotUpgradeVersion = uv + } + if c.AutopilotUpgradeVersion != "" { + emitEntWarning(logger, "autopilot_upgrade_version") + } + + // Note: historically we've never parsed retry_join here because we have to + // wait until we have leader TLS info before we can work out the final retry + // join parameters. That happens in JoinConfig. So right now nothing uses + // c.RetryJoin because it's not available at that point. But I think it's less + // surprising that if the field is present in the returned struct, that it + // should actually be populated and makes tests of this function less confusing + // too. + c.RetryJoin = conf["retry_join"] + + c.RaftNonVoter = false + if v := os.Getenv(EnvVaultRaftNonVoter); v != "" { + // Consistent with handling of other raft boolean env vars + // VAULT_RAFT_AUTOPILOT_DISABLE and VAULT_RAFT_FREELIST_SYNC + c.RaftNonVoter = true + } else if v, ok := conf[raftNonVoterConfigKey]; ok { + nonVoter, err := strconv.ParseBool(v) + if err != nil { + return nil, fmt.Errorf("failed to parse %s config value %q as a boolean: %w", raftNonVoterConfigKey, v, err) + } + + c.RaftNonVoter = nonVoter + } + + if c.RaftNonVoter && c.RetryJoin == "" { + return nil, fmt.Errorf("setting %s to true is only valid if at least one retry_join stanza is specified", raftNonVoterConfigKey) + } + if c.RaftNonVoter { + emitEntWarning(logger, raftNonVoterConfigKey) + } + + c.AutopilotRedundancyZone = conf["autopilot_redundancy_zone"] + if c.AutopilotRedundancyZone != "" { + emitEntWarning(logger, "autopilot_redundancy_zone") + } + + return c, nil +} + +// boltOptions returns a bolt.Options struct, suitable for passing to +// bolt.Open(), pre-configured with all of our preferred defaults. +func boltOptions(path string) *bolt.Options { + o := &bolt.Options{ + Timeout: 1 * time.Second, + FreelistType: bolt.FreelistMapType, + NoFreelistSync: true, + MmapFlags: getMmapFlags(path), + } + + if os.Getenv("VAULT_RAFT_FREELIST_TYPE") == "array" { + o.FreelistType = bolt.FreelistArrayType + } + + if os.Getenv("VAULT_RAFT_FREELIST_SYNC") != "" { + o.NoFreelistSync = false + } + + // By default, we want to set InitialMmapSize to 100GB, but only on 64bit platforms. + // Otherwise, we set it to whatever the value of VAULT_RAFT_INITIAL_MMAP_SIZE + // is, assuming it can be parsed as an int. Bolt itself sets this to 0 by default, + // so if users are wanting to turn this off, they can also set it to 0. Setting it + // to a negative value is the same as not setting it at all. + if os.Getenv("VAULT_RAFT_INITIAL_MMAP_SIZE") == "" { + o.InitialMmapSize = initialMmapSize + } else { + imms, err := strconv.Atoi(os.Getenv("VAULT_RAFT_INITIAL_MMAP_SIZE")) + + // If there's an error here, it means they passed something that's not convertible to + // a number. Rather than fail startup, just ignore it. + if err == nil && imms > 0 { + o.InitialMmapSize = imms + } + } + + return o +} + +func etcdboltOptions(path string) *etcdbolt.Options { + o := &etcdbolt.Options{ + Timeout: 1 * time.Second, + FreelistType: etcdbolt.FreelistMapType, + NoFreelistSync: true, + MmapFlags: getMmapFlags(path), + } + + if os.Getenv("VAULT_RAFT_FREELIST_TYPE") == "array" { + o.FreelistType = etcdbolt.FreelistArrayType + } + + if os.Getenv("VAULT_RAFT_FREELIST_SYNC") != "" { + o.NoFreelistSync = false + } + + // By default, we want to set InitialMmapSize to 100GB, but only on 64bit platforms. + // Otherwise, we set it to whatever the value of VAULT_RAFT_INITIAL_MMAP_SIZE + // is, assuming it can be parsed as an int. Bolt itself sets this to 0 by default, + // so if users are wanting to turn this off, they can also set it to 0. Setting it + // to a negative value is the same as not setting it at all. + if os.Getenv("VAULT_RAFT_INITIAL_MMAP_SIZE") == "" { + o.InitialMmapSize = initialMmapSize + } else { + imms, err := strconv.Atoi(os.Getenv("VAULT_RAFT_INITIAL_MMAP_SIZE")) + + // If there's an error here, it means they passed something that's not convertible to + // a number. Rather than fail startup, just ignore it. + if err == nil && imms > 0 { + o.InitialMmapSize = imms + } + } + + return o +} diff --git a/physical/raft/config_test.go b/physical/raft/config_test.go new file mode 100644 index 000000000000..37bf84c7bf42 --- /dev/null +++ b/physical/raft/config_test.go @@ -0,0 +1,430 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package raft + +import ( + "bytes" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/constants" + "github.com/stretchr/testify/require" +) + +func ceOnlyWarnings(warns ...string) []string { + if !constants.IsEnterprise { + return warns + } + return nil +} + +func TestRaft_ParseConfig(t *testing.T) { + // Note some of these can be parallel tests but since we need to setEnv in + // some we can't make them all parallel so it's don inside the loop. We assume + // if a case doesn't set anything on the Env it's safe to run in parallel. + tcs := []struct { + name string + conf map[string]string + env map[string]string + wantMutation func(cfg *RaftBackendConfig) + wantErr string + wantWarns []string + }{ + // RAFT WAL -------------------------------------------------------------- + { + name: "WAL backend junk", + conf: map[string]string{ + "raft_wal": "notabooleanlol", + }, + wantErr: "does not parse as a boolean", + }, + { + name: "WAL verifier junk", + conf: map[string]string{ + "raft_wal": "true", + "raft_log_verifier_enabled": "notabooleanlol", + }, + wantErr: "does not parse as a boolean", + }, + { + name: "WAL verifier interval, zero", + conf: map[string]string{ + "raft_log_verifier_enabled": "true", + "raft_log_verification_interval": "0s", + }, + wantMutation: func(cfg *RaftBackendConfig) { + cfg.RaftLogVerifierEnabled = true + cfg.RaftLogVerificationInterval = defaultRaftLogVerificationInterval + }, + wantWarns: []string{"raft_log_verification_interval is less than the minimum allowed"}, + }, + { + name: "WAL verifier interval, one", + conf: map[string]string{ + "raft_log_verifier_enabled": "true", + "raft_log_verification_interval": "0s", + }, + wantMutation: func(cfg *RaftBackendConfig) { + cfg.RaftLogVerifierEnabled = true + + // Below min so should get default + cfg.RaftLogVerificationInterval = defaultRaftLogVerificationInterval + }, + wantWarns: []string{"raft_log_verification_interval is less than the minimum allowed"}, + }, + { + name: "WAL verifier interval, nothing", + conf: map[string]string{ + "raft_log_verifier_enabled": "true", + "raft_log_verification_interval": "", + }, + wantMutation: func(cfg *RaftBackendConfig) { + cfg.RaftLogVerifierEnabled = true + cfg.RaftLogVerificationInterval = defaultRaftLogVerificationInterval + }, + wantWarns: []string{"raft_log_verification_interval is less than the minimum allowed"}, + }, + { + name: "WAL verifier interval, valid", + conf: map[string]string{ + "raft_log_verifier_enabled": "true", + "raft_log_verification_interval": "75s", + }, + wantMutation: func(cfg *RaftBackendConfig) { + cfg.RaftLogVerifierEnabled = true + cfg.RaftLogVerificationInterval = 75 * time.Second + }, + }, + { + name: "WAL verifier interval, junk", + conf: map[string]string{ + "raft_log_verifier_enabled": "true", + "raft_log_verification_interval": "notaduration", + }, + wantErr: "does not parse as a duration", + }, + + // AUTOPILOT Upgrades ---------------------------------------------------- + { + name: "Autopilot upgrade version, junk", + conf: map[string]string{ + "autopilot_upgrade_version": "hahano", + }, + wantErr: "does not parse", + }, + + // AUTOPILOT Redundancy Zone --------------------------------------------- + { + name: "Autopilot redundancy zone, ok", + conf: map[string]string{ + "autopilot_redundancy_zone": "us-east-1a", + }, + wantMutation: func(cfg *RaftBackendConfig) { + cfg.AutopilotRedundancyZone = "us-east-1a" + }, + wantWarns: ceOnlyWarnings("configuration for a Vault Enterprise feature has been ignored: field=autopilot_redundancy_zone"), + }, + + // Non-voter config ------------------------------------------------------ + { + name: "non-voter, no retry-join, valid false", + conf: map[string]string{ + raftNonVoterConfigKey: "false", + }, + wantMutation: func(cfg *RaftBackendConfig) { + // Should be default + }, + }, + { + name: "non-voter, retry-join, valid false", + conf: map[string]string{ + "retry_join": "not-empty", + raftNonVoterConfigKey: "false", + }, + wantMutation: func(cfg *RaftBackendConfig) { + cfg.RetryJoin = "not-empty" + }, + }, + { + name: "non-voter, no retry-join, valid true", + conf: map[string]string{ + raftNonVoterConfigKey: "true", + }, + wantErr: "only valid if at least one retry_join stanza is specified", + }, + { + name: "non-voter, retry-join, valid true", + conf: map[string]string{ + "retry_join": "not-empty", + raftNonVoterConfigKey: "true", + }, + wantMutation: func(cfg *RaftBackendConfig) { + cfg.RetryJoin = "not-empty" + cfg.RaftNonVoter = true + }, + wantWarns: ceOnlyWarnings("configuration for a Vault Enterprise feature has been ignored: field=retry_join_as_non_voter"), + }, + { + name: "non-voter, no retry-join, invalid empty", + conf: map[string]string{ + raftNonVoterConfigKey: "", + }, + wantErr: "failed to parse retry_join_as_non_voter", + }, + { + name: "non-voter, retry-join, invalid empty", + conf: map[string]string{ + "retry_join": "not-empty", + raftNonVoterConfigKey: "", + }, + wantErr: "failed to parse retry_join_as_non_voter", + }, + { + name: "non-voter, no retry-join, invalid truthy", + conf: map[string]string{ + raftNonVoterConfigKey: "no", + }, + wantErr: "failed to parse retry_join_as_non_voter", + }, + { + name: "non-voter, retry-join, invalid truthy", + conf: map[string]string{ + "retry_join": "not-empty", + raftNonVoterConfigKey: "no", + }, + wantErr: "failed to parse retry_join_as_non_voter", + }, + { + name: "non-voter, no retry-join, invalid", + conf: map[string]string{ + raftNonVoterConfigKey: "totallywrong", + }, + wantErr: "failed to parse retry_join_as_non_voter", + }, + { + name: "non-voter, retry-join, invalid", + conf: map[string]string{ + "retry_join": "not-empty", + raftNonVoterConfigKey: "totallywrong", + }, + wantErr: "failed to parse retry_join_as_non_voter", + }, + { + // Note for historical reasons we treat any non-empty value as true in ENV + // vars. + name: "non-voter, no retry-join, valid env false", + env: map[string]string{ + EnvVaultRaftNonVoter: "false", + }, + wantErr: "only valid if at least one retry_join stanza is specified", + }, + { + name: "non-voter, retry-join, valid env false", + env: map[string]string{ + EnvVaultRaftNonVoter: "false", + }, + conf: map[string]string{ + "retry_join": "not-empty", + }, + wantMutation: func(cfg *RaftBackendConfig) { + cfg.RetryJoin = "not-empty" + cfg.RaftNonVoter = true // Any non-empty value is true + }, + wantWarns: ceOnlyWarnings("configuration for a Vault Enterprise feature has been ignored: field=retry_join_as_non_voter"), + }, + { + name: "non-voter, no retry-join, valid env true", + env: map[string]string{ + EnvVaultRaftNonVoter: "true", + }, + wantErr: "only valid if at least one retry_join stanza is specified", + }, + { + name: "non-voter, retry-join, valid env true", + env: map[string]string{ + EnvVaultRaftNonVoter: "true", + }, + conf: map[string]string{ + "retry_join": "not-empty", + }, + wantMutation: func(cfg *RaftBackendConfig) { + cfg.RetryJoin = "not-empty" + cfg.RaftNonVoter = true + }, + wantWarns: ceOnlyWarnings("configuration for a Vault Enterprise feature has been ignored: field=retry_join_as_non_voter"), + }, + { + name: "non-voter, no retry-join, valid env not-boolean", + env: map[string]string{ + EnvVaultRaftNonVoter: "anything", + }, + wantErr: "only valid if at least one retry_join stanza is specified", + }, + { + name: "non-voter, retry-join, valid env not-boolean", + env: map[string]string{ + EnvVaultRaftNonVoter: "anything", + }, + conf: map[string]string{ + "retry_join": "not-empty", + }, + wantMutation: func(cfg *RaftBackendConfig) { + cfg.RetryJoin = "not-empty" + cfg.RaftNonVoter = true + }, + wantWarns: ceOnlyWarnings("configuration for a Vault Enterprise feature has been ignored: field=retry_join_as_non_voter"), + }, + { + name: "non-voter, no retry-join, valid env empty", + env: map[string]string{ + EnvVaultRaftNonVoter: "", + }, + wantMutation: func(cfg *RaftBackendConfig) { + // Default + }, + }, + { + name: "non-voter, retry-join, valid env empty", + env: map[string]string{ + EnvVaultRaftNonVoter: "", + }, + conf: map[string]string{ + "retry_join": "not-empty", + }, + wantMutation: func(cfg *RaftBackendConfig) { + cfg.RetryJoin = "not-empty" + }, + }, + { + name: "non-voter, no retry-join, both set env preferred", + env: map[string]string{ + EnvVaultRaftNonVoter: "true", + }, + conf: map[string]string{ + raftNonVoterConfigKey: "false", + }, + wantErr: "only valid if at least one retry_join stanza is specified", + }, + { + name: "non-voter, retry-join, both set env preferred", + env: map[string]string{ + EnvVaultRaftNonVoter: "true", + }, + conf: map[string]string{ + "retry_join": "not-empty", + raftNonVoterConfigKey: "false", + }, + wantMutation: func(cfg *RaftBackendConfig) { + cfg.RetryJoin = "not-empty" + cfg.RaftNonVoter = true // Env should win + }, + wantWarns: ceOnlyWarnings("configuration for a Vault Enterprise feature has been ignored: field=retry_join_as_non_voter"), + }, + + // Entry Size Limits ----------------------------------------------------- + { + name: "entry size, happy path", + conf: map[string]string{ + "max_entry_size": "123456", + "max_mount_and_namespace_table_entry_size": "654321", + }, + wantMutation: func(cfg *RaftBackendConfig) { + cfg.MaxEntrySize = 123456 + cfg.MaxMountAndNamespaceTableEntrySize = 654321 + }, + wantWarns: ceOnlyWarnings("configuration for a Vault Enterprise feature has been ignored: field=max_mount_and_namespace_table_entry_size"), + }, + { + name: "entry size, junk entry size", + conf: map[string]string{ + "max_entry_size": "sadfsaf", + "max_mount_and_namespace_table_entry_size": "654321", + }, + wantErr: "failed to parse 'max_entry_size'", + }, + { + name: "entry size, junk mount entry size", + conf: map[string]string{ + "max_entry_size": "123456", + "max_mount_and_namespace_table_entry_size": "1MiB", + }, + wantErr: "failed to parse 'max_mount_and_namespace_table_entry_size'", + }, + { + name: "entry size, way too small mount entry size", + conf: map[string]string{ + "max_mount_and_namespace_table_entry_size": "1", + }, + wantErr: "'max_mount_and_namespace_table_entry_size' must be at least 1024 bytes", + }, + { + name: "entry size, way too big mount entry size", + conf: map[string]string{ + "max_mount_and_namespace_table_entry_size": "20000000", + }, + wantErr: "'max_mount_and_namespace_table_entry_size' must be at most 10,485,760 bytes (10MiB)", + }, + } + + // Set a nodeid and path to remove noise from all the test cases. + baseConf := map[string]string{ + "node_id": "abc123", + "path": "/dummy/path", + } + + for _, tc := range tcs { + tc := tc + t.Run(tc.name, func(t *testing.T) { + if len(tc.env) == 0 { + // Only run in parallel if there are no env vars to set. + t.Parallel() + } + + var logs bytes.Buffer + logger := hclog.New(&hclog.LoggerOptions{ + Level: hclog.Warn, + Output: &logs, + }) + + if tc.conf == nil { + tc.conf = make(map[string]string) + } + + for k, v := range baseConf { + if _, ok := tc.conf[k]; !ok { + tc.conf[k] = v + } + } + + // Make a default-valued config to compare against later. Note we do this + // before setting ENV as that would could change behavior! + wantCfg, err := parseRaftBackendConfig(baseConf, hclog.NewNullLogger()) + require.NoError(t, err) + + for k, v := range tc.env { + t.Setenv(k, v) + } + + cfg, err := parseRaftBackendConfig(tc.conf, logger) + + if tc.wantErr != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.wantErr) + return + } + + tc.wantMutation(wantCfg) + + require.Equal(t, wantCfg, cfg) + allLogs := logs.String() + for _, warn := range tc.wantWarns { + require.Contains(t, allLogs, warn) + } + if len(tc.wantWarns) == 0 { + require.NotContains(t, allLogs, "[WARN]", "no warnings expected") + } + }) + } +} diff --git a/physical/raft/fsm.go b/physical/raft/fsm.go index a8882812665e..cfbe8374aaff 100644 --- a/physical/raft/fsm.go +++ b/physical/raft/fsm.go @@ -1,17 +1,19 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package raft import ( "bytes" "context" + "encoding/binary" "encoding/hex" "errors" "fmt" "io" "os" "path/filepath" + "runtime" "strconv" "strings" "sync" @@ -20,15 +22,16 @@ import ( "github.com/armon/go-metrics" "github.com/golang/protobuf/proto" + bolt "github.com/hashicorp-forge/bbolt" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-raftchunking" "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/raft" + "github.com/hashicorp/raft-wal/verifier" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/sdk/plugin/pb" - bolt "go.etcd.io/bbolt" ) const ( @@ -36,6 +39,7 @@ const ( putOp restoreCallbackOp getOp + verifierCheckpointOp chunkingPrefix = "raftchunking/" databaseFilename = "vault.db" @@ -58,6 +62,12 @@ var ( _ raft.BatchingFSM = (*FSM)(nil) ) +var logVerifierMagicBytes [8]byte + +func init() { + binary.LittleEndian.PutUint64(logVerifierMagicBytes[:], verifier.ExtensionMagicPrefix) +} + type restoreCallback func(context.Context) error type FSMEntry struct { @@ -76,6 +86,69 @@ type FSMApplyResponse struct { EntrySlice []*FSMEntry } +type logVerificationChunkingShim struct { + chunker *raftchunking.ChunkingBatchingFSM +} + +// Apply implements raft.BatchingFSM. +func (s *logVerificationChunkingShim) Apply(l *raft.Log) interface{} { + return s.ApplyBatch([]*raft.Log{l})[0] +} + +// ApplyBatch implements raft.BatchingFSM +func (s *logVerificationChunkingShim) ApplyBatch(logs []*raft.Log) []interface{} { + // This is a hack because raftchunking doesn't play nicely with lower-level + // usage of Extensions field like we need for LogStore verification. + + // When we write a verifier log, we write a single byte that consists of the verifierCheckpointOp, + // and then we encode the verifier.ExtensionMagicPrefix into the raft log + // Extensions field. Both of those together should ensure that verifier + // raft logs can never be mistaken for chunked protobufs. See the docs on + // verifier.ExtensionMagicPrefix for the reasoning behind the specific value + // that was chosen, and how it ensures this property. + + // So here, we need to check for the exact conditions that we encoded when we wrote the + // verifier log out. If they match, we're going to insert a dummy raft log. We do this because 1) we + // don't want the chunking FSM to blow up on our verifier op that it won't understand and + // 2) we need to preserve the length of the incoming slice of raft logs because raft expects + // the length of the return value to match 1:1 to the length of the input operations. + newBatch := make([]*raft.Log, 0, len(logs)) + + for _, l := range logs { + if s.isVerifierLog(l) { + // Replace checkpoint with an empty op, but keep the index and term so + // downstream FSMs don't get confused about having a 0 index suddenly. + newBatch = append(newBatch, &raft.Log{ + Index: l.Index, + Term: l.Term, + AppendedAt: l.AppendedAt, + }) + } else { + newBatch = append(newBatch, l) + } + } + + return s.chunker.ApplyBatch(newBatch) +} + +// Snapshot implements raft.BatchingFSM +func (s *logVerificationChunkingShim) Snapshot() (raft.FSMSnapshot, error) { + return s.chunker.Snapshot() +} + +// Restore implements raft.BatchingFSM +func (s *logVerificationChunkingShim) Restore(snapshot io.ReadCloser) error { + return s.chunker.Restore(snapshot) +} + +func (s *logVerificationChunkingShim) RestoreState(state *raftchunking.State) error { + return s.chunker.RestoreState(state) +} + +func (s *logVerificationChunkingShim) isVerifierLog(l *raft.Log) bool { + return isRaftLogVerifyCheckpoint(l) +} + // FSM is Vault's primary state storage. It writes updates to a bolt db file // that lives on local disk. FSM implements raft.FSM and physical.Backend // interfaces. @@ -103,11 +176,10 @@ type FSM struct { // retoreCb is called after we've restored a snapshot restoreCb restoreCallback - chunker *raftchunking.ChunkingBatchingFSM + chunker *logVerificationChunkingShim localID string desiredSuffrage string - unknownOpTypes sync.Map } // NewFSM constructs a FSM using the given directory @@ -134,10 +206,12 @@ func NewFSM(path string, localID string, logger log.Logger) (*FSM, error) { localID: localID, } - f.chunker = raftchunking.NewChunkingBatchingFSM(f, &FSMChunkStorage{ - f: f, - ctx: context.Background(), - }) + f.chunker = &logVerificationChunkingShim{ + chunker: raftchunking.NewChunkingBatchingFSM(f, &FSMChunkStorage{ + f: f, + ctx: context.Background(), + }), + } dbPath := filepath.Join(path, databaseFilename) f.l.Lock() @@ -173,9 +247,11 @@ func (f *FSM) openDBFile(dbPath string) error { return errors.New("can not open empty filename") } + vaultDbExists := true st, err := os.Stat(dbPath) switch { case err != nil && os.IsNotExist(err): + vaultDbExists = false case err != nil: return fmt.Errorf("error checking raft FSM db file %q: %v", dbPath, err) default: @@ -187,11 +263,16 @@ func (f *FSM) openDBFile(dbPath string) error { } opts := boltOptions(dbPath) + if runtime.GOOS == "linux" && vaultDbExists && !usingMapPopulate(opts.MmapFlags) { + f.logger.Warn("the MAP_POPULATE mmap flag has not been set before opening the FSM database. This may be due to the database file being larger than the available memory on the system, or due to the VAULT_RAFT_DISABLE_MAP_POPULATE environment variable being set. As a result, Vault may be slower to start up.") + } + start := time.Now() boltDB, err := bolt.Open(dbPath, 0o600, opts) if err != nil { return err } + elapsed := time.Now().Sub(start) f.logger.Debug("time to open database", "elapsed", elapsed, "path", dbPath) metrics.MeasureSince([]string{"raft_storage", "fsm", "open_db_file"}, start) @@ -608,11 +689,16 @@ func (f *FSM) ApplyBatch(logs []*raft.Log) []interface{} { switch l.Type { case raft.LogCommand: command := &LogData{} - err := proto.Unmarshal(l.Data, command) - if err != nil { - f.logger.Error("error proto unmarshaling log data", "error", err) - panic("error proto unmarshaling log data") + + // explicitly check for zero length Data, which will be the case for verifier no-ops + if len(l.Data) > 0 { + err := proto.Unmarshal(l.Data, command) + if err != nil { + f.logger.Error("error proto unmarshaling log data", "error", err, "data", l.Data) + panic("error proto unmarshaling log data") + } } + commands = append(commands, command) case raft.LogConfiguration: configuration := raft.DecodeConfiguration(l.Data) @@ -659,6 +745,7 @@ func (f *FSM) ApplyBatch(logs []*raft.Log) []interface{} { entrySlice := make([]*FSMEntry, 0) switch command := commandRaw.(type) { case *LogData: + // empty logs will have a zero length slice of Operations, so this loop will be a no-op for _, op := range command.Operations { var err error switch op.OpType { @@ -683,10 +770,7 @@ func (f *FSM) ApplyBatch(logs []*raft.Log) []interface{} { go f.restoreCb(context.Background()) } default: - if _, ok := f.unknownOpTypes.Load(op.OpType); !ok { - f.logger.Error("unsupported transaction operation", "op", op.OpType) - f.unknownOpTypes.Store(op.OpType, struct{}{}) - } + return fmt.Errorf("%q is not a supported transaction operation", op.OpType) } if err != nil { return err diff --git a/physical/raft/fsm_test.go b/physical/raft/fsm_test.go index ba0e382f0977..d1f84bbeb1cb 100644 --- a/physical/raft/fsm_test.go +++ b/physical/raft/fsm_test.go @@ -1,14 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package raft import ( "context" "fmt" - "io/ioutil" "math/rand" - "os" "sort" "testing" @@ -17,13 +15,11 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/raft" "github.com/hashicorp/vault/sdk/physical" + "github.com/stretchr/testify/require" ) -func getFSM(t testing.TB) (*FSM, string) { - raftDir, err := ioutil.TempDir("", "vault-raft-") - if err != nil { - t.Fatal(err) - } +func getFSM(t testing.TB) *FSM { + raftDir := t.TempDir() t.Logf("raft dir: %s", raftDir) logger := hclog.New(&hclog.LoggerOptions{ @@ -36,12 +32,11 @@ func getFSM(t testing.TB) (*FSM, string) { t.Fatal(err) } - return fsm, raftDir + return fsm } func TestFSM_Batching(t *testing.T) { - fsm, dir := getFSM(t) - defer func() { _ = os.RemoveAll(dir) }() + fsm := getFSM(t) var index uint64 var term uint64 = 1 @@ -133,8 +128,7 @@ func TestFSM_Batching(t *testing.T) { } func TestFSM_List(t *testing.T) { - fsm, dir := getFSM(t) - defer func() { _ = os.RemoveAll(dir) }() + fsm := getFSM(t) ctx := context.Background() count := 100 @@ -162,3 +156,41 @@ func TestFSM_List(t *testing.T) { t.Fatal(diff) } } + +// TestFSM_UnknownOperation calls ApplyBatch with a batch that has an unknown +// command operation type. The test verifies that the call panics +func TestFSM_UnknownOperation(t *testing.T) { + fsm := getFSM(t) + command := &LogData{ + Operations: make([]*LogOperation, 5), + } + + for i := range command.Operations { + op := putOp + if i == 4 { + // the last operation has an invalid op type + op = 0 + } + command.Operations[i] = &LogOperation{ + OpType: op, + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + } + } + commandBytes, err := proto.Marshal(command) + require.NoError(t, err) + + defer func() { + r := recover() + require.NotNil(t, r) + require.Contains(t, r, "failed to store data") + }() + fsm.ApplyBatch([]*raft.Log{{ + Index: 0, + Term: 1, + Type: raft.LogCommand, + Data: commandBytes, + }}) + + require.Fail(t, "failed to panic") +} diff --git a/physical/raft/io.go b/physical/raft/io.go index d3d3d4b4cb6b..98f96bc97012 100644 --- a/physical/raft/io.go +++ b/physical/raft/io.go @@ -45,6 +45,7 @@ type WriteCloser interface { type Reader interface { ReadMsg(msg proto.Message) error + GetLastReadSize() int } type ReadCloser interface { diff --git a/physical/raft/msgpack.go b/physical/raft/msgpack.go deleted file mode 100644 index 88ac74d59498..000000000000 --- a/physical/raft/msgpack.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package raft - -// If we downgrade msgpack from v1.1.5 to v0.5.5, everything will still -// work, but any pre-existing raft clusters will break on upgrade. -// This file exists so that the Vault project has an explicit dependency -// on the library, which allows us to pin the version in go.mod. - -import ( - _ "github.com/hashicorp/go-msgpack/codec" -) diff --git a/physical/raft/raft.go b/physical/raft/raft.go index 1507aad830c1..6202b3734f81 100644 --- a/physical/raft/raft.go +++ b/physical/raft/raft.go @@ -1,18 +1,20 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package raft import ( + "bytes" "context" "crypto/tls" "errors" "fmt" "io" - "io/ioutil" "math/rand" + "net/url" "os" "path/filepath" + "runtime" "strconv" "sync" "sync/atomic" @@ -20,25 +22,26 @@ import ( "github.com/armon/go-metrics" "github.com/golang/protobuf/proto" + bolt "github.com/hashicorp-forge/bbolt" log "github.com/hashicorp/go-hclog" - wrapping "github.com/hashicorp/go-kms-wrapping/v2" "github.com/hashicorp/go-raftchunking" + "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-secure-stdlib/tlsutil" - "github.com/hashicorp/go-uuid" - goversion "github.com/hashicorp/go-version" "github.com/hashicorp/raft" autopilot "github.com/hashicorp/raft-autopilot" raftboltdb "github.com/hashicorp/raft-boltdb/v2" snapshot "github.com/hashicorp/raft-snapshot" + raftwal "github.com/hashicorp/raft-wal" + walmetrics "github.com/hashicorp/raft-wal/metrics" + "github.com/hashicorp/raft-wal/verifier" "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/vault/cluster" - "github.com/hashicorp/vault/vault/seal" "github.com/hashicorp/vault/version" - bolt "go.etcd.io/bbolt" + etcdbolt "go.etcd.io/bbolt" ) const ( @@ -51,16 +54,40 @@ const ( // EnvVaultRaftNonVoter is used to override the non_voter config option, telling Vault to join as a non-voter (i.e. read replica). EnvVaultRaftNonVoter = "VAULT_RAFT_RETRY_JOIN_AS_NON_VOTER" raftNonVoterConfigKey = "retry_join_as_non_voter" + + // EnvVaultRaftMaxBatchEntries is used to override the default maxBatchEntries + // limit. + EnvVaultRaftMaxBatchEntries = "VAULT_RAFT_MAX_BATCH_ENTRIES" + + // EnvVaultRaftMaxBatchSizeBytes is used to override the default maxBatchSize + // limit. + EnvVaultRaftMaxBatchSizeBytes = "VAULT_RAFT_MAX_BATCH_SIZE_BYTES" + + // defaultMaxBatchEntries is the default maxBatchEntries limit. This was + // derived from performance testing. It is effectively high enough never to be + // a real limit for realistic Vault operation sizes and the size limit + // provides the actual limit since that amount of data stored is more relevant + // that the specific number of operations. + defaultMaxBatchEntries = 4096 + + // defaultMaxBatchSize is the default maxBatchSize limit. This was derived + // from performance testing. + defaultMaxBatchSize = 128 * 1024 ) -var getMmapFlags = func(string) int { return 0 } +var ( + getMmapFlags = func(string) int { return 0 } + usingMapPopulate = func(int) bool { return false } +) // Verify RaftBackend satisfies the correct interfaces var ( - _ physical.Backend = (*RaftBackend)(nil) - _ physical.Transactional = (*RaftBackend)(nil) - _ physical.HABackend = (*RaftBackend)(nil) - _ physical.Lock = (*RaftLock)(nil) + _ physical.Backend = (*RaftBackend)(nil) + _ physical.Transactional = (*RaftBackend)(nil) + _ physical.TransactionalLimits = (*RaftBackend)(nil) + _ physical.HABackend = (*RaftBackend)(nil) + _ physical.MountTableLimitingBackend = (*RaftBackend)(nil) + _ physical.Lock = (*RaftLock)(nil) ) var ( @@ -68,10 +95,13 @@ var ( // This is used to reduce disk I/O for the recently committed entries. raftLogCacheSize = 512 - raftState = "raft/" - peersFileName = "peers.json" - restoreOpDelayDuration = 5 * time.Second - defaultMaxEntrySize = uint64(2 * raftchunking.ChunkSize) + raftState = "raft/" + raftWalDir = "wal/" + peersFileName = "peers.json" + restoreOpDelayDuration = 5 * time.Second + defaultMaxEntrySize = uint64(2 * raftchunking.ChunkSize) + defaultRaftLogVerificationInterval = 60 * time.Second + minimumRaftLogVerificationInterval = 10 * time.Second GetInTxnDisabledError = errors.New("get operations inside transactions are disabled in raft backend") ) @@ -121,6 +151,18 @@ type RaftBackend struct { // startup. bootstrapConfig *raft.Configuration + // closers is a list of managed resource (such as stores above or wrapper + // layers around them). That should have Close called on them when the backend + // is closed. We need to take care that each distinct object is closed only + // once which might involve knowing how wrappers to stores work. For example + // raft wal verifier wraps LogStore and is an io.Closer but it also closes the + // underlying LogStore so if we add it here we shouldn't also add the actual + // LogStore or StableStore if it's the same underlying instance. We could use + // a map[io.Closer]bool to prevent double registrations, but that doesn't + // solve the problem of "knowing" whether or not calling Close on some wrapper + // also calls "Close" on it's underlying. + closers []io.Closer + // dataDir is the location on the local filesystem that raft and FSM data // will be stored. dataDir string @@ -140,6 +182,24 @@ type RaftBackend struct { // performance. maxEntrySize uint64 + // maxMountAndNamespaceEntrySize imposes a size limit (in bytes) on a raft + // entry (put or transaction) for paths related to mount table and namespace + // metadata. The Raft storage doesn't "know" about these paths but Vault can + // call RegisterMountTablePath to inform it so that it can apply this + // alternate limit if one is configured. + maxMountAndNamespaceEntrySize uint64 + + // maxBatchEntries is the number of operation entries in each batch. It is set + // by default to a value we've tested to work well but may be overridden by + // Environment variable VAULT_RAFT_MAX_BATCH_ENTRIES. + maxBatchEntries int + + // maxBatchSize is the maximum combined key and value size of operation + // entries in each batch. It is set by default to a value we've tested to work + // well but may be overridden by Environment variable + // VAULT_RAFT_MAX_BATCH_SIZE_BYTES. + maxBatchSize int + // autopilot is the instance of raft-autopilot library implementation of the // autopilot features. This will be instantiated in both leader and followers. // However, only active node will have a "running" autopilot. @@ -186,6 +246,15 @@ type RaftBackend struct { effectiveSDKVersion string failGetInTxn *uint32 + + // raftLogVerifierEnabled and raftLogVerificationInterval control enabling the raft log verifier and how often + // it writes checkpoints. + raftLogVerifierEnabled bool + raftLogVerificationInterval time.Duration + + // specialPathLimits is a map of special paths to their configured entrySize + // limits. + specialPathLimits map[string]uint64 } // LeaderJoinInfo contains information required by a node to join itself as a @@ -218,7 +287,7 @@ type LeaderJoinInfo struct { // client authentication during TLS. LeaderClientKey string `json:"leader_client_key"` - // LeaderCACertFile is the path on disk to the the CA cert file of the + // LeaderCACertFile is the path on disk to the CA cert file of the // leader node. This should only be provided via Vault's configuration file. LeaderCACertFile string `json:"leader_ca_cert_file"` @@ -311,219 +380,245 @@ func EnsurePath(path string, dir bool) error { return os.MkdirAll(path, 0o700) } -// NewRaftBackend constructs a RaftBackend using the given directory -func NewRaftBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { - path := os.Getenv(EnvVaultRaftPath) - if path == "" { - pathFromConfig, ok := conf["path"] - if !ok { - return nil, fmt.Errorf("'path' must be set") - } - path = pathFromConfig +func NewClusterAddrBridge() *ClusterAddrBridge { + return &ClusterAddrBridge{ + clusterAddressByNodeID: make(map[string]string), } +} - var localID string - { - // Determine the local node ID from the environment. - if raftNodeID := os.Getenv(EnvVaultRaftNodeID); raftNodeID != "" { - localID = raftNodeID - } +type ClusterAddrBridge struct { + l sync.RWMutex + clusterAddressByNodeID map[string]string +} - // If not set in the environment check the configuration file. - if len(localID) == 0 { - localID = conf["node_id"] - } +func (c *ClusterAddrBridge) UpdateClusterAddr(nodeId string, clusterAddr string) { + c.l.Lock() + defer c.l.Unlock() + cu, _ := url.Parse(clusterAddr) + c.clusterAddressByNodeID[nodeId] = cu.Host +} - // If not set in the config check the "node-id" file. - if len(localID) == 0 { - localIDRaw, err := ioutil.ReadFile(filepath.Join(path, "node-id")) - switch { - case err == nil: - if len(localIDRaw) > 0 { - localID = string(localIDRaw) - } - case os.IsNotExist(err): - default: - return nil, err - } +func (c *ClusterAddrBridge) ServerAddr(id raft.ServerID) (raft.ServerAddress, error) { + c.l.RLock() + defer c.l.RUnlock() + if addr, ok := c.clusterAddressByNodeID[string(id)]; ok { + return raft.ServerAddress(addr), nil + } + return "", fmt.Errorf("could not find cluster addr for id=%s", id) +} + +func batchLimitsFromEnv(logger log.Logger) (int, int) { + maxBatchEntries := defaultMaxBatchEntries + if envVal := os.Getenv(EnvVaultRaftMaxBatchEntries); envVal != "" { + if i, err := strconv.Atoi(envVal); err == nil && i > 0 { + maxBatchEntries = i + } else { + logger.Warn("failed to parse VAULT_RAFT_MAX_BATCH_ENTRIES as an integer > 0. Using default value.", + "env_val", envVal, "default_used", maxBatchEntries) } + } - // If all of the above fails generate a UUID and persist it to the - // "node-id" file. - if len(localID) == 0 { - id, err := uuid.GenerateUUID() - if err != nil { - return nil, err - } + maxBatchSize := defaultMaxBatchSize + if envVal := os.Getenv(EnvVaultRaftMaxBatchSizeBytes); envVal != "" { + if i, err := strconv.Atoi(envVal); err == nil && i > 0 { + maxBatchSize = i + } else { + logger.Warn("failed to parse VAULT_RAFT_MAX_BATCH_SIZE_BYTES as an integer > 0. Using default value.", + "env_val", envVal, "default_used", maxBatchSize) + } + } - if err := ioutil.WriteFile(filepath.Join(path, "node-id"), []byte(id), 0o600); err != nil { - return nil, err - } + return maxBatchEntries, maxBatchSize +} - localID = id - } +// NewRaftBackend constructs a RaftBackend using the given directory +func NewRaftBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + // parse the incoming map into a proper config struct + backendConfig, err := parseRaftBackendConfig(conf, logger) + if err != nil { + return nil, fmt.Errorf("error parsing config: %w", err) } // Create the FSM. - fsm, err := NewFSM(path, localID, logger.Named("fsm")) + fsm, err := NewFSM(backendConfig.Path, backendConfig.NodeId, logger.Named("fsm")) if err != nil { return nil, fmt.Errorf("failed to create fsm: %v", err) } - if delayRaw, ok := conf["apply_delay"]; ok { - delay, err := time.ParseDuration(delayRaw) - if err != nil { - return nil, fmt.Errorf("apply_delay does not parse as a duration: %w", err) - } + if backendConfig.ApplyDelay > 0 { fsm.applyCallback = func() { - time.Sleep(delay) + time.Sleep(backendConfig.ApplyDelay) } } + // Create the log store. // Build an all in-memory setup for dev mode, otherwise prepare a full // disk-based setup. - var log raft.LogStore - var stable raft.StableStore - var snap raft.SnapshotStore + var logStore raft.LogStore + var stableStore raft.StableStore + var snapStore raft.SnapshotStore + var closers []io.Closer var devMode bool if devMode { store := raft.NewInmemStore() - stable = store - log = store - snap = raft.NewInmemSnapshotStore() + stableStore = store + logStore = store + snapStore = raft.NewInmemSnapshotStore() } else { // Create the base raft path. - path := filepath.Join(path, raftState) - if err := EnsurePath(path, true); err != nil { + raftBasePath := filepath.Join(backendConfig.Path, raftState) + if err := EnsurePath(raftBasePath, true); err != nil { return nil, err } + dbPath := filepath.Join(raftBasePath, "raft.db") - // Create the backend raft store for logs and stable storage. - dbPath := filepath.Join(path, "raft.db") - opts := boltOptions(dbPath) - raftOptions := raftboltdb.Options{ - Path: dbPath, - BoltOptions: opts, - } - store, err := raftboltdb.New(raftOptions) + // If the existing raft db exists from a previous use of BoltDB, warn about this and continue to use BoltDB + raftDbExists, err := fileExists(dbPath) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to check if raft.db already exists: %w", err) } - stable = store - // Wrap the store in a LogCache to improve performance. - cacheStore, err := raft.NewLogCache(raftLogCacheSize, store) - if err != nil { - return nil, err + if backendConfig.RaftWal && raftDbExists { + logger.Warn("raft is configured to use raft-wal for storage but existing raft.db detected. raft-wal config will be ignored.") + backendConfig.RaftWal = false } - log = cacheStore - // Create the snapshot store. - snapshots, err := NewBoltSnapshotStore(path, logger.Named("snapshot"), fsm) - if err != nil { - return nil, err - } - snap = snapshots - } + if backendConfig.RaftWal { + raftWalPath := filepath.Join(raftBasePath, raftWalDir) + if err := EnsurePath(raftWalPath, true); err != nil { + return nil, err + } - if delayRaw, ok := conf["snapshot_delay"]; ok { - delay, err := time.ParseDuration(delayRaw) - if err != nil { - return nil, fmt.Errorf("snapshot_delay does not parse as a duration: %w", err) - } - snap = newSnapshotStoreDelay(snap, delay, logger) - } + mc := walmetrics.NewGoMetricsCollector([]string{"raft", "wal"}, nil, nil) + wal, err := raftwal.Open(raftWalPath, raftwal.WithMetricsCollector(mc)) + if err != nil { + return nil, fmt.Errorf("fail to open write-ahead-log: %w", err) + } + // We need to Close the store but don't register it in closers yet because + // if we are going to wrap it with a verifier we need to close through + // that instead. - maxEntrySize := defaultMaxEntrySize - if maxEntrySizeCfg := conf["max_entry_size"]; len(maxEntrySizeCfg) != 0 { - i, err := strconv.Atoi(maxEntrySizeCfg) - if err != nil { - return nil, fmt.Errorf("failed to parse 'max_entry_size': %w", err) - } + stableStore = wal + logStore = wal + } else { + // use the traditional BoltDB setup + opts := etcdboltOptions(dbPath) + raftOptions := raftboltdb.Options{ + Path: dbPath, + BoltOptions: opts, + MsgpackUseNewTimeFormat: true, + } - maxEntrySize = uint64(i) - } + if runtime.GOOS == "linux" && raftDbExists && !usingMapPopulate(opts.MmapFlags) { + logger.Warn("the MAP_POPULATE mmap flag has not been set before opening the log store database. This may be due to the database file being larger than the available memory on the system, or due to the VAULT_RAFT_DISABLE_MAP_POPULATE environment variable being set. As a result, Vault may be slower to start up.") + } - var reconcileInterval time.Duration - if interval := conf["autopilot_reconcile_interval"]; interval != "" { - interval, err := time.ParseDuration(interval) - if err != nil { - return nil, fmt.Errorf("autopilot_reconcile_interval does not parse as a duration: %w", err) + store, err := raftboltdb.New(raftOptions) + if err != nil { + return nil, err + } + // We need to Close the store but don't register it in closers yet because + // if we are going to wrap it with a verifier we need to close through + // that instead. + + stableStore = store + logStore = store } - reconcileInterval = interval - } - var updateInterval time.Duration - if interval := conf["autopilot_update_interval"]; interval != "" { - interval, err := time.ParseDuration(interval) + // Create the snapshot store. + snapshots, err := NewBoltSnapshotStore(raftBasePath, logger.Named("snapshot"), fsm) if err != nil { - return nil, fmt.Errorf("autopilot_update_interval does not parse as a duration: %w", err) + return nil, err } - updateInterval = interval + snapStore = snapshots } - effectiveReconcileInterval := autopilot.DefaultReconcileInterval - effectiveUpdateInterval := autopilot.DefaultUpdateInterval - - if reconcileInterval != 0 { - effectiveReconcileInterval = reconcileInterval - } - if updateInterval != 0 { - effectiveUpdateInterval = updateInterval + // Hook up the verifier if it's enabled + if backendConfig.RaftLogVerifierEnabled { + mc := walmetrics.NewGoMetricsCollector([]string{"raft", "logstore", "verifier"}, nil, nil) + reportFn := makeLogVerifyReportFn(logger.Named("raft.logstore.verifier")) + v := verifier.NewLogStore(logStore, isLogVerifyCheckpoint, reportFn, mc) + logStore = v } - if effectiveReconcileInterval < effectiveUpdateInterval { - return nil, fmt.Errorf("autopilot_reconcile_interval (%v) should be larger than autopilot_update_interval (%v)", effectiveReconcileInterval, effectiveUpdateInterval) + // Register the logStore as a closer whether or not it's wrapped in a verifier + // (which is a closer). We do this before the LogCache since that is _not_ an + // io.Closer. + if closer, ok := logStore.(io.Closer); ok { + closers = append(closers, closer) } + // Note that we DON'T register the stableStore as a closer because right now + // we always use the same underlying object as the logStore and we don't want + // to call close on it twice. If we ever support different stable store and + // log store then this logic will get even more complex! We don't register + // snapStore because none of our snapshot stores are io.Closers. - var upgradeVersion string - if uv, ok := conf["autopilot_upgrade_version"]; ok && uv != "" { - upgradeVersion = uv - _, err := goversion.NewVersion(upgradeVersion) - if err != nil { - return nil, fmt.Errorf("autopilot_upgrade_version does not parse as a semantic version: %w", err) - } - } + // Close the FSM + closers = append(closers, fsm) - var nonVoter bool - if v := os.Getenv(EnvVaultRaftNonVoter); v != "" { - // Consistent with handling of other raft boolean env vars - // VAULT_RAFT_AUTOPILOT_DISABLE and VAULT_RAFT_FREELIST_SYNC - nonVoter = true - } else if v, ok := conf[raftNonVoterConfigKey]; ok { - nonVoter, err = strconv.ParseBool(v) - if err != nil { - return nil, fmt.Errorf("failed to parse %s config value %q as a boolean: %w", raftNonVoterConfigKey, v, err) - } + // Wrap the store in a LogCache to improve performance. + cacheStore, err := raft.NewLogCache(raftLogCacheSize, logStore) + if err != nil { + return nil, err } + logStore = cacheStore - if nonVoter && conf["retry_join"] == "" { - return nil, fmt.Errorf("setting %s to true is only valid if at least one retry_join stanza is specified", raftNonVoterConfigKey) + if backendConfig.SnapshotDelay > 0 { + snapStore = newSnapshotStoreDelay(snapStore, backendConfig.SnapshotDelay, logger) } return &RaftBackend{ - logger: logger, - fsm: fsm, - raftInitCh: make(chan struct{}), - conf: conf, - logStore: log, - stableStore: stable, - snapStore: snap, - dataDir: path, - localID: localID, - permitPool: physical.NewPermitPool(physical.DefaultParallelOperations), - maxEntrySize: maxEntrySize, - followerHeartbeatTicker: time.NewTicker(time.Second), - autopilotReconcileInterval: reconcileInterval, - autopilotUpdateInterval: updateInterval, - redundancyZone: conf["autopilot_redundancy_zone"], - nonVoter: nonVoter, - upgradeVersion: upgradeVersion, - failGetInTxn: new(uint32), + logger: logger, + fsm: fsm, + raftInitCh: make(chan struct{}), + conf: conf, + logStore: logStore, + stableStore: stableStore, + snapStore: snapStore, + closers: closers, + dataDir: backendConfig.Path, + localID: backendConfig.NodeId, + permitPool: physical.NewPermitPool(physical.DefaultParallelOperations), + maxEntrySize: backendConfig.MaxEntrySize, + maxMountAndNamespaceEntrySize: backendConfig.MaxMountAndNamespaceTableEntrySize, + maxBatchEntries: backendConfig.MaxBatchEntries, + maxBatchSize: backendConfig.MaxBatchSize, + followerHeartbeatTicker: time.NewTicker(time.Second), + autopilotReconcileInterval: backendConfig.AutopilotReconcileInterval, + autopilotUpdateInterval: backendConfig.AutopilotUpdateInterval, + redundancyZone: backendConfig.AutopilotRedundancyZone, + nonVoter: backendConfig.RaftNonVoter, + upgradeVersion: backendConfig.AutopilotUpgradeVersion, + failGetInTxn: new(uint32), + raftLogVerifierEnabled: backendConfig.RaftLogVerifierEnabled, + raftLogVerificationInterval: backendConfig.RaftLogVerificationInterval, + effectiveSDKVersion: version.GetVersion().Version, }, nil } +// RegisterMountTablePath informs the Backend that the given path represents +// part of the mount tables or related metadata. This allows the backend to +// apply different limits for this entry if configured to do so. +func (b *RaftBackend) RegisterMountTablePath(path string) { + // We don't need to lock here because this is only called during startup + + if b.maxMountAndNamespaceEntrySize > 0 { + // Set up the limit for this special path. + if b.specialPathLimits == nil { + b.specialPathLimits = make(map[string]uint64) + } + b.specialPathLimits[path] = b.maxMountAndNamespaceEntrySize + } +} + +// GetSpecialPathLimits returns any paths registered with special entry size +// limits. It's really only used to make integration testing of the plumbing for +// these paths simpler. +func (b *RaftBackend) GetSpecialPathLimits() map[string]uint64 { + return b.specialPathLimits +} + type snapshotStoreDelay struct { logger log.Logger wrapped raft.SnapshotStore @@ -561,14 +656,11 @@ func (b *RaftBackend) Close() error { b.l.Lock() defer b.l.Unlock() - if err := b.fsm.Close(); err != nil { - return err - } - - if err := b.stableStore.(*raftboltdb.BoltStore).Close(); err != nil { - return err + for _, cl := range b.closers { + if err := cl.Close(); err != nil { + return err + } } - return nil } @@ -580,12 +672,6 @@ func (b *RaftBackend) FailGetInTxn(fail bool) { atomic.StoreUint32(b.failGetInTxn, val) } -func (b *RaftBackend) SetEffectiveSDKVersion(sdkVersion string) { - b.l.Lock() - b.effectiveSDKVersion = sdkVersion - b.l.Unlock() -} - func (b *RaftBackend) RedundancyZone() string { b.l.RLock() defer b.l.RUnlock() @@ -600,7 +686,10 @@ func (b *RaftBackend) NonVoter() bool { return b.nonVoter } -func (b *RaftBackend) EffectiveVersion() string { +// UpgradeVersion returns the string that should be used by autopilot during automated upgrades. We return the +// specified upgradeVersion if it's present. If it's not, we fall back to effectiveSDKVersion, which is +// Vault's binary version (though that can be overridden for tests). +func (b *RaftBackend) UpgradeVersion() string { b.l.RLock() defer b.l.RUnlock() @@ -608,7 +697,27 @@ func (b *RaftBackend) EffectiveVersion() string { return b.upgradeVersion } - return version.GetVersion().Version + return b.effectiveSDKVersion +} + +func (b *RaftBackend) SDKVersion() string { + b.l.RLock() + defer b.l.RUnlock() + return b.effectiveSDKVersion +} + +func (b *RaftBackend) verificationInterval() time.Duration { + b.l.RLock() + defer b.l.RUnlock() + + return b.raftLogVerificationInterval +} + +func (b *RaftBackend) verifierEnabled() bool { + b.l.RLock() + defer b.l.RUnlock() + + return b.raftLogVerifierEnabled } // DisableUpgradeMigration returns the state of the DisableUpgradeMigration config flag and whether it was set or not @@ -623,13 +732,132 @@ func (b *RaftBackend) DisableUpgradeMigration() (bool, bool) { return b.autopilotConfig.DisableUpgradeMigration, true } +// StartRaftWalVerifier runs a raft log store verifier in the background, if configured to do so. +// This periodically writes out special raft logs to verify that the log store is not corrupting data. +// This is only safe to run on the raft leader. +func (b *RaftBackend) StartRaftWalVerifier(ctx context.Context) { + if !b.verifierEnabled() { + return + } + + go func() { + ticker := time.NewTicker(b.verificationInterval()) + defer ticker.Stop() + + logger := b.logger.Named("raft-wal-verifier") + + for { + select { + case <-ticker.C: + err := b.applyVerifierCheckpoint() + if err != nil { + logger.Error("error applying verification checkpoint", "error", err) + } + logger.Debug("sent verification checkpoint") + case <-ctx.Done(): + return + } + } + }() +} + +func (b *RaftBackend) applyVerifierCheckpoint() error { + data := make([]byte, 1) + data[0] = byte(verifierCheckpointOp) + + b.permitPool.Acquire() + b.l.RLock() + + var err error + applyFuture := b.raft.Apply(data, 0) + if e := applyFuture.Error(); e != nil { + err = e + } + + b.l.RUnlock() + b.permitPool.Release() + + return err +} + +// isLogVerifyCheckpoint is the verifier.IsCheckpointFn that can decode our raft logs for +// their type. +func isLogVerifyCheckpoint(l *raft.Log) (bool, error) { + return isRaftLogVerifyCheckpoint(l), nil +} + +func makeLogVerifyReportFn(logger log.Logger) verifier.ReportFn { + return func(r verifier.VerificationReport) { + if r.SkippedRange != nil { + logger.Warn("verification skipped range, consider decreasing validation interval if this is frequent", + "rangeStart", int64(r.SkippedRange.Start), + "rangeEnd", int64(r.SkippedRange.End), + ) + } + + l2 := logger.With( + "rangeStart", int64(r.Range.Start), + "rangeEnd", int64(r.Range.End), + "leaderChecksum", fmt.Sprintf("%08x", r.ExpectedSum), + "elapsed", r.Elapsed, + ) + + if r.Err == nil { + l2.Info("verification checksum OK", + "readChecksum", fmt.Sprintf("%08x", r.ReadSum), + ) + return + } + + if errors.Is(r.Err, verifier.ErrRangeMismatch) { + l2.Warn("verification checksum skipped as we don't have all logs in range") + return + } + + var csErr verifier.ErrChecksumMismatch + if errors.As(r.Err, &csErr) { + if r.WrittenSum > 0 && r.WrittenSum != r.ExpectedSum { + // The failure occurred before the follower wrote to the log so it + // must be corrupted in flight from the leader! + l2.Error("verification checksum FAILED: in-flight corruption", + "followerWriteChecksum", fmt.Sprintf("%08x", r.WrittenSum), + "readChecksum", fmt.Sprintf("%08x", r.ReadSum), + ) + } else { + l2.Error("verification checksum FAILED: storage corruption", + "followerWriteChecksum", fmt.Sprintf("%08x", r.WrittenSum), + "readChecksum", fmt.Sprintf("%08x", r.ReadSum), + ) + } + return + } + + // Some other unknown error occurred + l2.Error(r.Err.Error()) + } +} + func (b *RaftBackend) CollectMetrics(sink *metricsutil.ClusterMetricSink) { + var stats map[string]string + var logStoreStats *etcdbolt.Stats + b.l.RLock() - logstoreStats := b.stableStore.(*raftboltdb.BoltStore).Stats() + if boltStore, ok := b.stableStore.(*raftboltdb.BoltStore); ok { + bss := boltStore.Stats() + logStoreStats = &bss + } + + if b.raft != nil { + stats = b.raft.Stats() + } + fsmStats := b.fsm.Stats() - stats := b.raft.Stats() b.l.RUnlock() - b.collectMetricsWithStats(logstoreStats, sink, "logstore") + + if logStoreStats != nil { + b.collectEtcdBoltMetricsWithStats(*logStoreStats, sink, "logstore") + } + b.collectMetricsWithStats(fsmStats, sink, "fsm") labels := []metrics.Label{ { @@ -637,10 +865,13 @@ func (b *RaftBackend) CollectMetrics(sink *metricsutil.ClusterMetricSink) { Value: b.localID, }, } - for _, key := range []string{"term", "commit_index", "applied_index", "fsm_pending"} { - n, err := strconv.ParseUint(stats[key], 10, 64) - if err == nil { - sink.SetGaugeWithLabels([]string{"raft_storage", "stats", key}, float32(n), labels) + + if stats != nil { + for _, key := range []string{"term", "commit_index", "applied_index", "fsm_pending"} { + n, err := strconv.ParseUint(stats[key], 10, 64) + if err == nil { + sink.SetGaugeWithLabels([]string{"raft_storage", "stats", key}, float32(n), labels) + } } } } @@ -654,18 +885,41 @@ func (b *RaftBackend) collectMetricsWithStats(stats bolt.Stats, sink *metricsuti sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "freelist", "used_bytes"}, float32(stats.FreelistInuse), labels) sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "transaction", "started_read_transactions"}, float32(stats.TxN), labels) sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "transaction", "currently_open_read_transactions"}, float32(stats.OpenTxN), labels) - sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "page", "count"}, float32(txstats.PageCount), labels) - sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "page", "bytes_allocated"}, float32(txstats.PageAlloc), labels) - sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "cursor", "count"}, float32(txstats.CursorCount), labels) - sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "node", "count"}, float32(txstats.NodeCount), labels) - sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "node", "dereferences"}, float32(txstats.NodeDeref), labels) - sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "rebalance", "count"}, float32(txstats.Rebalance), labels) - sink.AddSampleWithLabels([]string{"raft_storage", "bolt", "rebalance", "time"}, float32(txstats.RebalanceTime.Milliseconds()), labels) - sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "split", "count"}, float32(txstats.Split), labels) - sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "spill", "count"}, float32(txstats.Spill), labels) - sink.AddSampleWithLabels([]string{"raft_storage", "bolt", "spill", "time"}, float32(txstats.SpillTime.Milliseconds()), labels) - sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "write", "count"}, float32(txstats.Write), labels) - sink.AddSampleWithLabels([]string{"raft_storage", "bolt", "write", "time"}, float32(txstats.WriteTime.Milliseconds()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "page", "count"}, float32(txstats.GetPageCount()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "page", "bytes_allocated"}, float32(txstats.GetPageAlloc()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "cursor", "count"}, float32(txstats.GetCursorCount()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "node", "count"}, float32(txstats.GetNodeCount()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "node", "dereferences"}, float32(txstats.GetNodeDeref()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "rebalance", "count"}, float32(txstats.GetRebalance()), labels) + sink.AddSampleWithLabels([]string{"raft_storage", "bolt", "rebalance", "time"}, float32(txstats.GetRebalanceTime().Milliseconds()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "split", "count"}, float32(txstats.GetSplit()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "spill", "count"}, float32(txstats.GetSpill()), labels) + sink.AddSampleWithLabels([]string{"raft_storage", "bolt", "spill", "time"}, float32(txstats.GetSpillTime().Milliseconds()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "write", "count"}, float32(txstats.GetWrite()), labels) + sink.IncrCounterWithLabels([]string{"raft_storage", "bolt", "write", "time"}, float32(txstats.GetWriteTime().Milliseconds()), labels) +} + +func (b *RaftBackend) collectEtcdBoltMetricsWithStats(stats etcdbolt.Stats, sink *metricsutil.ClusterMetricSink, database string) { + txstats := stats.TxStats + labels := []metricsutil.Label{{"database", database}} + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "freelist", "free_pages"}, float32(stats.FreePageN), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "freelist", "pending_pages"}, float32(stats.PendingPageN), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "freelist", "allocated_bytes"}, float32(stats.FreeAlloc), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "freelist", "used_bytes"}, float32(stats.FreelistInuse), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "transaction", "started_read_transactions"}, float32(stats.TxN), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "transaction", "currently_open_read_transactions"}, float32(stats.OpenTxN), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "page", "count"}, float32(txstats.GetPageCount()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "page", "bytes_allocated"}, float32(txstats.GetPageAlloc()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "cursor", "count"}, float32(txstats.GetCursorCount()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "node", "count"}, float32(txstats.GetNodeCount()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "node", "dereferences"}, float32(txstats.GetNodeDeref()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "rebalance", "count"}, float32(txstats.GetRebalance()), labels) + sink.AddSampleWithLabels([]string{"raft_storage", "bolt", "rebalance", "time"}, float32(txstats.GetRebalanceTime().Milliseconds()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "split", "count"}, float32(txstats.GetSplit()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "spill", "count"}, float32(txstats.GetSpill()), labels) + sink.AddSampleWithLabels([]string{"raft_storage", "bolt", "spill", "time"}, float32(txstats.GetSpillTime().Milliseconds()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "write", "count"}, float32(txstats.GetWrite()), labels) + sink.IncrCounterWithLabels([]string{"raft_storage", "bolt", "write", "time"}, float32(txstats.GetWriteTime().Milliseconds()), labels) } // RaftServer has information about a server in the Raft configuration @@ -812,7 +1066,7 @@ func (b *RaftBackend) applyConfigSettings(config *raft.Config) error { snapshotIntervalRaw, ok := b.conf["snapshot_interval"] if ok { var err error - snapshotInterval, err := time.ParseDuration(snapshotIntervalRaw) + snapshotInterval, err := parseutil.ParseDurationSecond(snapshotIntervalRaw) if err != nil { return err } @@ -847,6 +1101,11 @@ type SetupOpts struct { // RecoveryModeConfig is the configuration for the raft cluster in recovery // mode. RecoveryModeConfig *raft.Configuration + + // EffectiveSDKVersion is typically the version string baked into the binary. + // We pass it in though because it can be overridden in tests or via ENV in + // core. + EffectiveSDKVersion string } func (b *RaftBackend) StartRecoveryCluster(ctx context.Context, peer Peer) error { @@ -890,6 +1149,11 @@ func (b *RaftBackend) SetupCluster(ctx context.Context, opts SetupOpts) error { return errors.New("no local node id configured") } + if opts.EffectiveSDKVersion != "" { + // Override the SDK version + b.effectiveSDKVersion = opts.EffectiveSDKVersion + } + // Setup the raft config raftConfig := raft.DefaultConfig() if err := b.applyConfigSettings(raftConfig); err != nil { @@ -941,11 +1205,12 @@ func (b *RaftBackend) SetupCluster(ctx context.Context, opts SetupOpts) error { return err } transConfig := &raft.NetworkTransportConfig{ - Stream: streamLayer, - MaxPool: 3, - Timeout: 10 * time.Second, - ServerAddressProvider: b.serverAddressProvider, - Logger: b.logger.Named("raft-net"), + Stream: streamLayer, + MaxPool: 3, + Timeout: 10 * time.Second, + ServerAddressProvider: b.serverAddressProvider, + Logger: b.logger.Named("raft-net"), + MsgpackUseNewTimeFormat: true, } transport := raft.NewNetworkTransportWithConfig(transConfig) @@ -1316,7 +1581,7 @@ func (b *RaftBackend) AddPeer(ctx context.Context, peerID, clusterAddr string) e if b.raft == nil { return errors.New("raft storage is not initialized") } - b.logger.Trace("adding server to raft", "id", peerID) + b.logger.Trace("adding server to raft", "id", peerID, "addr", clusterAddr) future := b.raft.AddVoter(raft.ServerID(peerID), raft.ServerAddress(clusterAddr), 0, 0) return future.Error() } @@ -1325,7 +1590,7 @@ func (b *RaftBackend) AddPeer(ctx context.Context, peerID, clusterAddr string) e return errors.New("raft storage autopilot is not initialized") } - b.logger.Trace("adding server to raft via autopilot", "id", peerID) + b.logger.Trace("adding server to raft via autopilot", "id", peerID, "addr", clusterAddr) return b.autopilot.AddServer(&autopilot.Server{ ID: raft.ServerID(peerID), Name: peerID, @@ -1367,17 +1632,17 @@ func (b *RaftBackend) Peers(ctx context.Context) ([]Peer, error) { // SnapshotHTTP is a wrapper for Snapshot that sends the snapshot as an HTTP // response. -func (b *RaftBackend) SnapshotHTTP(out *logical.HTTPResponseWriter, access *seal.Access) error { +func (b *RaftBackend) SnapshotHTTP(out *logical.HTTPResponseWriter, sealer snapshot.Sealer) error { out.Header().Add("Content-Disposition", "attachment") out.Header().Add("Content-Type", "application/gzip") - return b.Snapshot(out, access) + return b.Snapshot(out, sealer) } // Snapshot takes a raft snapshot, packages it into a archive file and writes it // to the provided writer. Seal access is used to encrypt the SHASUM file so we // can validate the snapshot was taken using the same root keys or not. -func (b *RaftBackend) Snapshot(out io.Writer, access *seal.Access) error { +func (b *RaftBackend) Snapshot(out io.Writer, sealer snapshot.Sealer) error { b.l.RLock() defer b.l.RUnlock() @@ -1385,15 +1650,7 @@ func (b *RaftBackend) Snapshot(out io.Writer, access *seal.Access) error { return errors.New("raft storage is sealed") } - // If we have access to the seal create a sealer object - var s snapshot.Sealer - if access != nil { - s = &sealer{ - access: access, - } - } - - return snapshot.Write(b.logger.Named("snapshot"), b.raft, s, out) + return snapshot.Write(b.logger.Named("snapshot"), b.raft, sealer, out) } // WriteSnapshotToTemp reads a snapshot archive off the provided reader, @@ -1401,7 +1658,7 @@ func (b *RaftBackend) Snapshot(out io.Writer, access *seal.Access) error { // access is used to decrypt the SHASUM file in the archive to ensure this // snapshot has the same root key as the running instance. If the provided // access is nil then it will skip that validation. -func (b *RaftBackend) WriteSnapshotToTemp(in io.ReadCloser, access *seal.Access) (*os.File, func(), raft.SnapshotMeta, error) { +func (b *RaftBackend) WriteSnapshotToTemp(in io.ReadCloser, sealer snapshot.Sealer) (*os.File, func(), raft.SnapshotMeta, error) { b.l.RLock() defer b.l.RUnlock() @@ -1410,15 +1667,7 @@ func (b *RaftBackend) WriteSnapshotToTemp(in io.ReadCloser, access *seal.Access) return nil, nil, metadata, errors.New("raft storage is sealed") } - // If we have access to the seal create a sealer object - var s snapshot.Sealer - if access != nil { - s = &sealer{ - access: access, - } - } - - snap, cleanup, err := snapshot.WriteToTempFileWithSealer(b.logger.Named("snapshot"), in, &metadata, s) + snap, cleanup, err := snapshot.WriteToTempFileWithSealer(b.logger.Named("snapshot"), in, &metadata, sealer) return snap, cleanup, metadata, err } @@ -1506,9 +1755,10 @@ func (b *RaftBackend) Get(ctx context.Context, path string) (*physical.Entry, er entry, err := b.fsm.Get(ctx, path) if entry != nil { valueLen := len(entry.Value) - if uint64(valueLen) > b.maxEntrySize { - b.logger.Warn("retrieved entry value is too large, has raft's max_entry_size been reduced?", - "size", valueLen, "max_entry_size", b.maxEntrySize) + maxSize := b.entrySizeLimitForPath(path) + if uint64(valueLen) > maxSize { + b.logger.Warn("retrieved entry value is too large, has raft's max_entry_size or max_mount_and_namespace_table_entry_size been reduced?", + "size", valueLen, "max_size", maxSize) } } @@ -1632,6 +1882,35 @@ func (b *RaftBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry return err } +func (b *RaftBackend) TransactionLimits() (int, int) { + return b.maxBatchEntries, b.maxBatchSize +} + +// validateCommandEntrySizes is a helper to check the size of each transaction +// value isn't larger than is allowed. It must take into account the path in +// case any special limits have been set for mount table paths. Finally it +// returns the largest limit it needed to use so that calling code can size the +// overall log entry check correctly. In other words if max_entry_size is 1MB +// and max_mount_and_namespace_table_entry_size is 2MB, we check each key +// against the right limit and then return 1MB unless there is at least one +// mount table key being written in which case we allow the larger limit of 2MB. +func (b *RaftBackend) validateCommandEntrySizes(command *LogData) (uint64, error) { + largestEntryLimit := b.maxEntrySize + + for _, op := range command.Operations { + if op.OpType == putOp { + entrySize := b.entrySizeLimitForPath(op.Key) + if len(op.Value) > int(entrySize) { + return 0, fmt.Errorf("%s, max value size for key %s is %d, got %d", physical.ErrValueTooLarge, op.Key, entrySize, len(op.Value)) + } + if entrySize > largestEntryLimit { + largestEntryLimit = entrySize + } + } + } + return largestEntryLimit, nil +} + // applyLog will take a given log command and apply it to the raft log. applyLog // doesn't return until the log has been applied to a quorum of servers and is // persisted to the local FSM. Caller should hold the backend's read lock. @@ -1643,14 +1922,19 @@ func (b *RaftBackend) applyLog(ctx context.Context, command *LogData) error { return err } + totalLogSizeLimit, err := b.validateCommandEntrySizes(command) + if err != nil { + return err + } + commandBytes, err := proto.Marshal(command) if err != nil { return err } cmdSize := len(commandBytes) - if uint64(cmdSize) > b.maxEntrySize { - return fmt.Errorf("%s; got %d bytes, max: %d bytes", physical.ErrValueTooLarge, cmdSize, b.maxEntrySize) + if uint64(cmdSize) > totalLogSizeLimit { + return fmt.Errorf("%s; got %d bytes, max: %d bytes", physical.ErrValueTooLarge, cmdSize, totalLogSizeLimit) } defer metrics.AddSample([]string{"raft-storage", "entry_size"}, float32(cmdSize)) @@ -1891,74 +2175,39 @@ func (l *RaftLock) Value() (bool, string, error) { return true, value, nil } -// sealer implements the snapshot.Sealer interface and is used in the snapshot -// process for encrypting/decrypting the SHASUM file in snapshot archives. -type sealer struct { - access *seal.Access -} - -// Seal encrypts the data with using the seal access object. -func (s sealer) Seal(ctx context.Context, pt []byte) ([]byte, error) { - if s.access == nil { - return nil, errors.New("no seal access available") - } - eblob, err := s.access.Encrypt(ctx, pt, nil) - if err != nil { - return nil, err - } - - return proto.Marshal(eblob) -} - -// Open decrypts the data using the seal access object. -func (s sealer) Open(ctx context.Context, ct []byte) ([]byte, error) { - if s.access == nil { - return nil, errors.New("no seal access available") +func fileExists(name string) (bool, error) { + _, err := os.Stat(name) + if err == nil { + // File exists! + return true, nil } - - var eblob wrapping.BlobInfo - err := proto.Unmarshal(ct, &eblob) - if err != nil { - return nil, err + if errors.Is(err, os.ErrNotExist) { + return false, nil } - - return s.access.Decrypt(ctx, &eblob, nil) + // We hit some other error trying to stat the file which leaves us in an + // unknown state so we can't proceed. + return false, err } -// boltOptions returns a bolt.Options struct, suitable for passing to -// bolt.Open(), pre-configured with all of our preferred defaults. -func boltOptions(path string) *bolt.Options { - o := &bolt.Options{ - Timeout: 1 * time.Second, - FreelistType: bolt.FreelistMapType, - NoFreelistSync: true, - MmapFlags: getMmapFlags(path), - } - - if os.Getenv("VAULT_RAFT_FREELIST_TYPE") == "array" { - o.FreelistType = bolt.FreelistArrayType +func isRaftLogVerifyCheckpoint(l *raft.Log) bool { + if !bytes.Equal(l.Data, []byte{byte(verifierCheckpointOp)}) { + return false } - if os.Getenv("VAULT_RAFT_FREELIST_SYNC") != "" { - o.NoFreelistSync = false + // Single byte log with that byte value can only be a checkpoint or + // the last byte of a chunked message. If it's chunked it will have + // chunking metadata. + if len(l.Extensions) == 0 { + // No metadata, must be a checkpoint on the leader with no + // verifier metadata yet. + return true } - // By default, we want to set InitialMmapSize to 100GB, but only on 64bit platforms. - // Otherwise, we set it to whatever the value of VAULT_RAFT_INITIAL_MMAP_SIZE - // is, assuming it can be parsed as an int. Bolt itself sets this to 0 by default, - // so if users are wanting to turn this off, they can also set it to 0. Setting it - // to a negative value is the same as not setting it at all. - if os.Getenv("VAULT_RAFT_INITIAL_MMAP_SIZE") == "" { - o.InitialMmapSize = initialMmapSize - } else { - imms, err := strconv.Atoi(os.Getenv("VAULT_RAFT_INITIAL_MMAP_SIZE")) - - // If there's an error here, it means they passed something that's not convertible to - // a number. Rather than fail startup, just ignore it. - if err == nil && imms > 0 { - o.InitialMmapSize = imms - } + if bytes.HasPrefix(l.Extensions, logVerifierMagicBytes[:]) { + // Has verifier metadata so must be a replicated checkpoint on a follower + return true } - return o + // Must be the last chunk of a chunked object that has chunking meta + return false } diff --git a/physical/raft/raft_autopilot.go b/physical/raft/raft_autopilot.go index 2e62838b4a90..fb17283e663e 100644 --- a/physical/raft/raft_autopilot.go +++ b/physical/raft/raft_autopilot.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package raft @@ -89,6 +89,27 @@ type AutopilotConfig struct { UpgradeVersionTag string `mapstructure:"upgrade_version_tag"` } +func (ac *AutopilotConfig) String() string { + s := "CleanupDeadServers:%t " + + "LastContactThreshold:%s " + + "DeadServerLastContactThreshold:%s " + + "MaxTrailingLogs:%d " + + "MinQuorum:%d " + + "ServerStabilizationTime:%s " + + "DisableUpgradeMigration:%t " + + "RedundancyZoneTag:%s " + + "UpgradeVersionTag:%s" + return fmt.Sprintf(s, ac.CleanupDeadServers, + ac.LastContactThreshold, + ac.DeadServerLastContactThreshold, + ac.MaxTrailingLogs, + ac.MinQuorum, + ac.ServerStabilizationTime, + ac.DisableUpgradeMigration, + ac.RedundancyZoneTag, + ac.UpgradeVersionTag) +} + // Merge combines the supplied config with the receiver. Supplied ones take // priority. func (to *AutopilotConfig) Merge(from *AutopilotConfig) { @@ -190,6 +211,86 @@ type FollowerState struct { RedundancyZone string } +// partialCopy returns a partial copy of the follower state. +// This copy uses the same pointer to the IsDead +// atomic field. We need to do this to ensure that +// an update of the IsDead boolean will still be +// accessible in a copied state. +func (f *FollowerState) partialCopy() *FollowerState { + return &FollowerState{ + AppliedIndex: f.AppliedIndex, + LastHeartbeat: f.LastHeartbeat, + LastTerm: f.LastTerm, + IsDead: f.IsDead, + DesiredSuffrage: f.DesiredSuffrage, + Version: f.Version, + UpgradeVersion: f.UpgradeVersion, + RedundancyZone: f.RedundancyZone, + } +} + +// PersistedFollowerState holds the information that gets persisted to storage +type PersistedFollowerState struct { + Version string `json:"version"` + UpgradeVersion string `json:"upgrade_version"` +} + +type PersistedFollowerStates struct { + l sync.RWMutex + States map[string]PersistedFollowerState +} + +// shouldUpdate checks if the persisted state contains the same servers as the +// current autopilot state. If grabLock is true, a read lock is acquired before +// accessing the map +func (p *PersistedFollowerStates) shouldUpdate(state *autopilot.State, grabLock bool) bool { + if grabLock { + p.l.RLock() + defer p.l.RUnlock() + } + if len(state.Servers) != len(p.States) { + return true + } + for id, server := range state.Servers { + persistedServer, found := p.States[string(id)] + if !found { + return true + } + if server.Server.Version != persistedServer.Version || + server.Server.Meta[AutopilotUpgradeVersionTag] != persistedServer.UpgradeVersion { + return true + } + } + return false +} + +// updatePersistedState checks if the persisted state matches the current +// autopilot state. If not, the state is replaced and persisted +func (d *Delegate) updatePersistedState(state *autopilot.State) error { + if !d.persistedState.shouldUpdate(state, true) { + return nil + } + newStates := make(map[string]PersistedFollowerState) + for id, server := range state.Servers { + newStates[string(id)] = PersistedFollowerState{ + Version: server.Server.Version, + UpgradeVersion: server.Server.Meta[AutopilotUpgradeVersionTag], + } + } + d.persistedState.l.Lock() + defer d.persistedState.l.Unlock() + if !d.persistedState.shouldUpdate(state, false) { + return nil + } + d.logger.Debug("updating autopilot persisted state") + err := d.saveStateFn(newStates) + if err != nil { + return err + } + d.persistedState.States = newStates + return nil +} + // EchoRequestUpdate is here to avoid 1) the list of arguments to Update() getting huge 2) an import cycle on the vault package type EchoRequestUpdate struct { NodeID string @@ -215,13 +316,15 @@ func NewFollowerStates() *FollowerStates { } } -// Update the peer information in the follower states. Note that this function runs on the active node. -func (s *FollowerStates) Update(req *EchoRequestUpdate) { +// Update the peer information in the follower states. Note that this function +// runs on the active node. Returns true if a new entry was added, as opposed +// to modifying one already present. +func (s *FollowerStates) Update(req *EchoRequestUpdate) bool { s.l.Lock() defer s.l.Unlock() - state, ok := s.followers[req.NodeID] - if !ok { + state, present := s.followers[req.NodeID] + if !present { state = &FollowerState{ IsDead: atomic.NewBool(false), } @@ -236,6 +339,8 @@ func (s *FollowerStates) Update(req *EchoRequestUpdate) { state.Version = req.SDKVersion state.UpgradeVersion = req.UpgradeVersion state.RedundancyZone = req.RedundancyZone + + return !present } // Clear wipes all the information regarding peers in the follower states. @@ -290,13 +395,17 @@ type Delegate struct { dl sync.RWMutex inflightRemovals map[raft.ServerID]bool emptyVersionLogs map[raft.ServerID]struct{} + persistedState *PersistedFollowerStates + saveStateFn func(p map[string]PersistedFollowerState) error } -func newDelegate(b *RaftBackend) *Delegate { +func NewDelegate(b *RaftBackend, persistedStates map[string]PersistedFollowerState, savePersistedStates func(p map[string]PersistedFollowerState) error) *Delegate { return &Delegate{ RaftBackend: b, inflightRemovals: make(map[raft.ServerID]bool), emptyVersionLogs: make(map[raft.ServerID]struct{}), + persistedState: &PersistedFollowerStates{States: persistedStates}, + saveStateFn: savePersistedStates, } } @@ -340,6 +449,13 @@ func (d *Delegate) NotifyState(state *autopilot.State) { metrics.SetGaugeWithLabels([]string{"autopilot", "node", "healthy"}, 0, labels) } } + + // if there is a change in versions or membership, we should update + // our persisted state + err := d.updatePersistedState(state) + if err != nil { + d.logger.Error("failed to persist autopilot state", "error", err) + } } } @@ -385,6 +501,7 @@ func (d *Delegate) KnownServers() map[raft.ServerID]*autopilot.Server { return nil } + apServerStates := d.autopilot.GetState().Servers servers := future.Configuration().Servers serverIDs := make([]string, 0, len(servers)) for _, server := range servers { @@ -394,6 +511,9 @@ func (d *Delegate) KnownServers() map[raft.ServerID]*autopilot.Server { d.followerStates.l.RLock() defer d.followerStates.l.RUnlock() + d.persistedState.l.RLock() + defer d.persistedState.l.RUnlock() + ret := make(map[raft.ServerID]*autopilot.Server) for id, state := range d.followerStates.followers { // If the server is not in raft configuration, even if we received a follower @@ -402,22 +522,14 @@ func (d *Delegate) KnownServers() map[raft.ServerID]*autopilot.Server { continue } - // If version isn't found in the state, fake it using the version from the leader so that autopilot - // doesn't demote the node to a non-voter, just because of a missed heartbeat. currentServerID := raft.ServerID(id) - followerVersion := state.Version - leaderVersion := d.effectiveSDKVersion - d.dl.Lock() - if followerVersion == "" { - if _, ok := d.emptyVersionLogs[currentServerID]; !ok { - d.logger.Trace("received empty Vault version in heartbeat state. faking it with the leader version for now", "id", id, "leader version", leaderVersion) - d.emptyVersionLogs[currentServerID] = struct{}{} - } - followerVersion = leaderVersion - } else { - delete(d.emptyVersionLogs, currentServerID) + followerVersion, upgradeVersion := d.determineFollowerVersions(id, state) + if state.UpgradeVersion != upgradeVersion { + // we only have a read lock on state, so we can't modify it + // safely. Instead, copy it to override the upgrade version + state = state.partialCopy() + state.UpgradeVersion = upgradeVersion } - d.dl.Unlock() server := &autopilot.Server{ ID: currentServerID, @@ -428,6 +540,19 @@ func (d *Delegate) KnownServers() map[raft.ServerID]*autopilot.Server { Ext: d.autopilotServerExt(state), } + // As KnownServers is a delegate called by autopilot let's check if we already + // had this data in the correct format and use it. If we don't (which sounds a + // bit sad, unless this ISN'T a voter) then as a fail-safe, let's try what we've + // done elsewhere in code to check the desired suffrage and manually set NodeType + // based on whether that's a voter or not. If we don't do either of these + // things, NodeType isn't set which means technically it's not a voter. + // It shouldn't be a voter and end up in this state. + if apServerState, found := apServerStates[raft.ServerID(id)]; found && apServerState.Server.NodeType != "" { + server.NodeType = apServerState.Server.NodeType + } else if state.DesiredSuffrage == "voter" { + server.NodeType = autopilot.NodeVoter + } + switch state.IsDead.Load() { case true: d.logger.Debug("informing autopilot that the node left", "id", id) @@ -445,8 +570,9 @@ func (d *Delegate) KnownServers() map[raft.ServerID]*autopilot.Server { Name: d.localID, RaftVersion: raft.ProtocolVersionMax, NodeStatus: autopilot.NodeAlive, + NodeType: autopilot.NodeVoter, // The leader must be a voter Meta: d.meta(&FollowerState{ - UpgradeVersion: d.EffectiveVersion(), + UpgradeVersion: d.UpgradeVersion(), RedundancyZone: d.RedundancyZone(), }), Version: d.effectiveSDKVersion, @@ -457,6 +583,54 @@ func (d *Delegate) KnownServers() map[raft.ServerID]*autopilot.Server { return ret } +// determineFollowerVersions uses the following logic: +// - if the version and upgrade version are present in the follower state, +// return those. +// - if the persisted states map is empty, it means that persisted states +// don't exist. This happens on an upgrade to 1.18. Use the leader node's +// versions. +// - use the versions in the persisted states map +// +// This function must be called with a lock on d.followerStates +// and d.persistedStates. +func (d *Delegate) determineFollowerVersions(id string, state *FollowerState) (version string, upgradeVersion string) { + // if we have both versions in follower states, use those + if state.Version != "" && state.UpgradeVersion != "" { + return state.Version, state.UpgradeVersion + } + + version = state.Version + upgradeVersion = state.UpgradeVersion + + // the persistedState map should only be empty on upgrades + // to 1.18.x. This is the only case where we'll stub with + // the leader's versions + if len(d.persistedState.States) == 0 { + if version == "" { + version = d.effectiveSDKVersion + d.logger.Debug("no persisted state, using leader version", "id", id, "version", version) + } + if upgradeVersion == "" { + upgradeVersion = d.upgradeVersion + d.logger.Debug("no persisted state, using leader upgrade version version", "id", id, "upgrade_version", upgradeVersion) + } + return version, upgradeVersion + } + + // Use the persistedStates map to fill in the sdk + // and upgrade versions + pState := d.persistedState.States[id] + if version == "" { + version = pState.Version + d.logger.Debug("using follower version from persisted states", "id", id, "version", version) + } + if upgradeVersion == "" { + upgradeVersion = pState.UpgradeVersion + d.logger.Debug("using upgrade version from persisted states", "id", id, "upgrade_version", upgradeVersion) + } + return version, upgradeVersion +} + // RemoveFailedServer is called by the autopilot library when it desires a node // to be removed from the raft configuration. This function removes the node // from the raft cluster and stops tracking its information in follower states. @@ -592,7 +766,8 @@ func (b *RaftBackend) StopAutopilot() { if b.autopilot == nil { return } - b.autopilot.Stop() + stopCh := b.autopilot.Stop() + <-stopCh b.autopilot = nil b.followerHeartbeatTicker.Stop() } @@ -687,7 +862,7 @@ func (d *ReadableDuration) UnmarshalJSON(raw []byte) (err error) { str := string(raw) if len(str) >= 2 && str[0] == '"' && str[len(str)-1] == '"' { // quoted string - dur, err = time.ParseDuration(str[1 : len(str)-1]) + dur, err = parseutil.ParseDurationSecond(str[1 : len(str)-1]) if err != nil { return err } @@ -789,11 +964,19 @@ func (b *RaftBackend) DisableAutopilot() { b.l.Unlock() } +type AutopilotSetupOptions struct { + StorageConfig *AutopilotConfig + FollowerStates *FollowerStates + Disable bool + PersistedStates map[string]PersistedFollowerState + SavePersistedStates func(p map[string]PersistedFollowerState) error +} + // SetupAutopilot gathers information required to configure autopilot and starts // it. If autopilot is disabled, this function does nothing. -func (b *RaftBackend) SetupAutopilot(ctx context.Context, storageConfig *AutopilotConfig, followerStates *FollowerStates, disable bool) { +func (b *RaftBackend) SetupAutopilot(ctx context.Context, opts *AutopilotSetupOptions) { b.l.Lock() - if disable || os.Getenv("VAULT_RAFT_AUTOPILOT_DISABLE") != "" { + if opts.Disable || os.Getenv("VAULT_RAFT_AUTOPILOT_DISABLE") != "" { b.disableAutopilot = true } @@ -807,7 +990,9 @@ func (b *RaftBackend) SetupAutopilot(ctx context.Context, storageConfig *Autopil b.autopilotConfig = b.defaultAutopilotConfig() // Merge the setting provided over the API - b.autopilotConfig.Merge(storageConfig) + b.autopilotConfig.Merge(opts.StorageConfig) + + infoArgs := []interface{}{"config", b.autopilotConfig} // Create the autopilot instance options := []autopilot.Option{ @@ -816,17 +1001,19 @@ func (b *RaftBackend) SetupAutopilot(ctx context.Context, storageConfig *Autopil } if b.autopilotReconcileInterval != 0 { options = append(options, autopilot.WithReconcileInterval(b.autopilotReconcileInterval)) + infoArgs = append(infoArgs, []interface{}{"reconcile_interval", b.autopilotReconcileInterval}...) } if b.autopilotUpdateInterval != 0 { options = append(options, autopilot.WithUpdateInterval(b.autopilotUpdateInterval)) + infoArgs = append(infoArgs, []interface{}{"update_interval", b.autopilotUpdateInterval}...) } - b.autopilot = autopilot.New(b.raft, newDelegate(b), options...) - b.followerStates = followerStates + delegate := NewDelegate(b, opts.PersistedStates, opts.SavePersistedStates) + b.autopilot = autopilot.New(b.raft, delegate, options...) + b.followerStates = opts.FollowerStates b.followerHeartbeatTicker = time.NewTicker(1 * time.Second) - b.l.Unlock() - b.logger.Info("starting autopilot", "config", b.autopilotConfig, "reconcile_interval", b.autopilotReconcileInterval) + b.logger.Info("starting autopilot", infoArgs...) b.autopilot.Start(ctx) go b.startFollowerHeartbeatTracker() diff --git a/physical/raft/raft_test.go b/physical/raft/raft_test.go index 73d0ce32c543..112764aa1974 100644 --- a/physical/raft/raft_test.go +++ b/physical/raft/raft_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package raft @@ -11,7 +11,6 @@ import ( "encoding/hex" "fmt" "io" - "io/ioutil" "math/rand" "os" "path/filepath" @@ -21,15 +20,63 @@ import ( "github.com/go-test/deep" "github.com/golang/protobuf/proto" + bolt "github.com/hashicorp-forge/bbolt" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/base62" "github.com/hashicorp/go-uuid" "github.com/hashicorp/raft" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/physical" - bolt "go.etcd.io/bbolt" + "github.com/stretchr/testify/require" ) +func testBothRaftBackends(t *testing.T, f func(t *testing.T, raftWALValue string)) { + t.Helper() + + testCases := []struct { + name string + useWAL string + }{ + { + name: "use wal", + useWAL: "true", + }, + { + name: "use boltdb", + useWAL: "false", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // we can't use t.Parallel() here because some raft tests manipulate package level variables + f(t, tc.useWAL) + }) + } +} + +func testBothRaftBackendsBenchmark(b *testing.B, f func(raftWALValue string)) { + testCases := []struct { + name string + useWAL string + }{ + { + name: "use wal", + useWAL: "true", + }, + { + name: "use boltdb", + useWAL: "false", + }, + } + + for _, tc := range testCases { + b.Run(tc.name, func(b *testing.B) { + f(tc.useWAL) + }) + } +} + func connectPeers(nodes ...*RaftBackend) { for _, node := range nodes { for _, peer := range nodes { @@ -143,7 +190,6 @@ func compareDBs(t *testing.T, boltDB1, boltDB2 *bolt.DB, dataOnly bool) error { return nil }) - if err != nil { t.Fatal(err) } @@ -156,605 +202,789 @@ func compareDBs(t *testing.T, boltDB1, boltDB2 *bolt.DB, dataOnly bool) error { } func TestRaft_Backend(t *testing.T) { - b, dir := GetRaft(t, true, true) - defer os.RemoveAll(dir) + t.Parallel() + + testBothRaftBackends(t, func(t *testing.T, useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - physical.ExerciseBackend(t, b) + b, _ := GetRaftWithConfig(t, true, true, conf) + physical.ExerciseBackend(t, b) + }) } -func TestRaft_ParseAutopilotUpgradeVersion(t *testing.T) { - raftDir, err := ioutil.TempDir("", "vault-raft-") +// TestRaft_SwitchFromBoltDBToRaftWal is testing that we don't use raft-wal, even if configured to do so, +// if there is an existing raft.db file on disk (meaning BoltDB was previously in use). +func TestRaft_SwitchFromBoltDBToRaftWal(t *testing.T) { + tmpDir := t.TempDir() + + // configured to use raft-wal + conf := map[string]string{ + "path": tmpDir, + "trailing_logs": "100", + "raft_wal": "true", + } + + // raftBaseDir will end up looking like $tmpDir/raft + raftBaseDir := filepath.Join(tmpDir, raftState) + err := os.MkdirAll(raftBaseDir, 0o777) if err != nil { t.Fatal(err) } - defer os.RemoveAll(raftDir) - conf := map[string]string{ - "path": raftDir, - "node_id": "abc123", - "autopilot_upgrade_version": "hahano", + // create a bogus $tmpDir/raft/raft.db file + db, err := bolt.Open(filepath.Join(raftBaseDir, "raft.db"), 0o777, nil) + if err != nil { + t.Fatal(err) + } + err = db.Close() + if err != nil { + t.Fatal(err) } _, err = NewRaftBackend(conf, hclog.NewNullLogger()) - if err == nil { - t.Fatal("expected an error but got none") + if err != nil { + t.Fatal(err) } - if !strings.Contains(err.Error(), "does not parse") { - t.Fatal("expected an error about unparseable versions but got none") + // Check to see if $tmpDir/raft/raft-wal exists. It should not, because we only create that if raft-wal is in use. + // And since raft.db already existed, we should've skipped all the raft-wal setup code. + raftWalExists, err := fileExists(filepath.Join(raftBaseDir, raftWalDir)) + if err != nil { + t.Fatal(err) } -} -func TestRaft_ParseNonVoter(t *testing.T) { - p := func(s string) *string { - return &s + if raftWalExists { + t.Fatal("expected raft-wal dir to not exist, but it does") } +} - for _, retryJoinConf := range []string{"", "not-empty"} { - t.Run(retryJoinConf, func(t *testing.T) { - for name, tc := range map[string]struct { - envValue *string - configValue *string - expectNonVoter bool - invalidNonVoterValue bool - }{ - "valid false": {nil, p("false"), false, false}, - "valid true": {nil, p("true"), true, false}, - "invalid empty": {nil, p(""), false, true}, - "invalid truthy": {nil, p("no"), false, true}, - "invalid": {nil, p("totallywrong"), false, true}, - "valid env false": {p("false"), nil, true, false}, - "valid env true": {p("true"), nil, true, false}, - "valid env not boolean": {p("anything"), nil, true, false}, - "valid env empty": {p(""), nil, false, false}, - "neither set, default false": {nil, nil, false, false}, - "both set, env preferred": {p("true"), p("false"), true, false}, - } { - t.Run(name, func(t *testing.T) { - if tc.envValue != nil { - t.Setenv(EnvVaultRaftNonVoter, *tc.envValue) - } - raftDir, err := ioutil.TempDir("", "vault-raft-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(raftDir) - - conf := map[string]string{ - "path": raftDir, - "node_id": "abc123", - "retry_join": retryJoinConf, - } - if tc.configValue != nil { - conf[raftNonVoterConfigKey] = *tc.configValue - } - - backend, err := NewRaftBackend(conf, hclog.NewNullLogger()) - switch { - case tc.invalidNonVoterValue || (retryJoinConf == "" && tc.expectNonVoter): - if err == nil { - t.Fatal("expected an error but got none") - } - default: - if err != nil { - t.Fatalf("expected no error but got: %s", err) - } - - raftBackend := backend.(*RaftBackend) - if tc.expectNonVoter != raftBackend.NonVoter() { - t.Fatalf("expected %s %v but got %v", raftNonVoterConfigKey, tc.expectNonVoter, raftBackend.NonVoter()) - } - } - }) - } - }) - } +// TestRaft_VerifierEnabled is not checking to ensure that the verifier works correctly - the verifier has +// its own unit tests for that. What we're checking for here is that we've plumbed everything through correctly, +// i.e. we can stand up a raft cluster with the verifier enabled, do a bunch of raft things, let the verifier +// do its thing, and nothing blows up. +func TestRaft_VerifierEnabled(t *testing.T) { + testBothRaftBackends(t, func(t *testing.T, useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + "raft_log_verifier_enabled": "true", + } + + b, _ := GetRaftWithConfig(t, true, true, conf) + physical.ExerciseBackend(t, b) + + err := b.applyVerifierCheckpoint() + if err != nil { + t.Fatal(err) + } + physical.ExerciseBackend(t, b) + }) } func TestRaft_Backend_LargeKey(t *testing.T) { - b, dir := GetRaft(t, true, true) - defer os.RemoveAll(dir) + t.Parallel() - key, err := base62.Random(bolt.MaxKeySize + 1) - if err != nil { - t.Fatal(err) - } - entry := &physical.Entry{Key: key, Value: []byte(key)} + testBothRaftBackends(t, func(t *testing.T, useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - err = b.Put(context.Background(), entry) - if err == nil { - t.Fatal("expected error for put entry") - } + b, _ := GetRaftWithConfig(t, true, true, conf) + key, err := base62.Random(bolt.MaxKeySize + 1) + if err != nil { + t.Fatal(err) + } + entry := &physical.Entry{Key: key, Value: []byte(key)} - if !strings.Contains(err.Error(), physical.ErrKeyTooLarge) { - t.Fatalf("expected %q, got %v", physical.ErrKeyTooLarge, err) - } + err = b.Put(context.Background(), entry) + if err == nil { + t.Fatal("expected error for put entry") + } - out, err := b.Get(context.Background(), entry.Key) - if err != nil { - t.Fatalf("unexpected error after failed put: %v", err) - } - if out != nil { - t.Fatal("expected response entry to be nil after a failed put") - } + if !strings.Contains(err.Error(), physical.ErrKeyTooLarge) { + t.Fatalf("expected %q, got %v", physical.ErrKeyTooLarge, err) + } + + out, err := b.Get(context.Background(), entry.Key) + if err != nil { + t.Fatalf("unexpected error after failed put: %v", err) + } + if out != nil { + t.Fatal("expected response entry to be nil after a failed put") + } + }) } func TestRaft_Backend_LargeValue(t *testing.T) { - b, dir := GetRaft(t, true, true) - defer os.RemoveAll(dir) + t.Parallel() - value := make([]byte, defaultMaxEntrySize+1) - rand.Read(value) - entry := &physical.Entry{Key: "foo", Value: value} + testBothRaftBackends(t, func(t *testing.T, useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - err := b.Put(context.Background(), entry) - if err == nil { - t.Fatal("expected error for put entry") - } + b, _ := GetRaftWithConfig(t, true, true, conf) + value := make([]byte, defaultMaxEntrySize+1) + rand.Read(value) + entry := &physical.Entry{Key: "foo", Value: value} - if !strings.Contains(err.Error(), physical.ErrValueTooLarge) { - t.Fatalf("expected %q, got %v", physical.ErrValueTooLarge, err) - } + err := b.Put(context.Background(), entry) + if err == nil { + t.Fatal("expected error for put entry") + } - out, err := b.Get(context.Background(), entry.Key) - if err != nil { - t.Fatalf("unexpected error after failed put: %v", err) - } - if out != nil { - t.Fatal("expected response entry to be nil after a failed put") - } + if !strings.Contains(err.Error(), physical.ErrValueTooLarge) { + t.Fatalf("expected %q, got %v", physical.ErrValueTooLarge, err) + } + + out, err := b.Get(context.Background(), entry.Key) + if err != nil { + t.Fatalf("unexpected error after failed put: %v", err) + } + if out != nil { + t.Fatal("expected response entry to be nil after a failed put") + } + }) } // TestRaft_TransactionalBackend_GetTransactions tests that passing a slice of transactions to the // raft backend will populate values for any transactions that are Get operations. func TestRaft_TransactionalBackend_GetTransactions(t *testing.T) { - b, dir := GetRaft(t, true, true) - defer os.RemoveAll(dir) + t.Parallel() + testBothRaftBackends(t, func(t *testing.T, useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } + + b, _ := GetRaftWithConfig(t, true, true, conf) + ctx := context.Background() + txns := make([]*physical.TxnEntry, 0) - ctx := context.Background() - txns := make([]*physical.TxnEntry, 0) + // Add some seed values to our FSM, and prepare our slice of transactions at the same time + for i := 0; i < 5; i++ { + key := fmt.Sprintf("foo/%d", i) + err := b.fsm.Put(ctx, &physical.Entry{Key: key, Value: []byte(fmt.Sprintf("value-%d", i))}) + if err != nil { + t.Fatal(err) + } - // Add some seed values to our FSM, and prepare our slice of transactions at the same time - for i := 0; i < 5; i++ { - key := fmt.Sprintf("foo/%d", i) - err := b.fsm.Put(ctx, &physical.Entry{Key: key, Value: []byte(fmt.Sprintf("value-%d", i))}) - if err != nil { - t.Fatal(err) + txns = append(txns, &physical.TxnEntry{ + Operation: physical.GetOperation, + Entry: &physical.Entry{ + Key: key, + }, + }) } - txns = append(txns, &physical.TxnEntry{ - Operation: physical.GetOperation, - Entry: &physical.Entry{ - Key: key, - }, - }) - } + // Add some additional transactions, so we have a mix of operations + for i := 0; i < 10; i++ { + txnEntry := &physical.TxnEntry{ + Entry: &physical.Entry{ + Key: fmt.Sprintf("lol-%d", i), + }, + } - // Add some additional transactions, so we have a mix of operations - for i := 0; i < 10; i++ { - txnEntry := &physical.TxnEntry{ - Entry: &physical.Entry{ - Key: fmt.Sprintf("lol-%d", i), - }, - } + if i%2 == 0 { + txnEntry.Operation = physical.PutOperation + txnEntry.Entry.Value = []byte("lol") + } else { + txnEntry.Operation = physical.DeleteOperation + } - if i%2 == 0 { - txnEntry.Operation = physical.PutOperation - txnEntry.Entry.Value = []byte("lol") - } else { - txnEntry.Operation = physical.DeleteOperation + txns = append(txns, txnEntry) } - txns = append(txns, txnEntry) - } - - err := b.Transaction(ctx, txns) - if err != nil { - t.Fatal(err) - } + err := b.Transaction(ctx, txns) + if err != nil { + t.Fatal(err) + } - // Check that our Get operations were populated with their values - for i, txn := range txns { - if txn.Operation == physical.GetOperation { - val := []byte(fmt.Sprintf("value-%d", i)) - if !bytes.Equal(val, txn.Entry.Value) { - t.Fatalf("expected %s to equal %s but it didn't", hex.EncodeToString(val), hex.EncodeToString(txn.Entry.Value)) + // Check that our Get operations were populated with their values + for i, txn := range txns { + if txn.Operation == physical.GetOperation { + val := []byte(fmt.Sprintf("value-%d", i)) + if !bytes.Equal(val, txn.Entry.Value) { + t.Fatalf("expected %s to equal %s but it didn't", hex.EncodeToString(val), hex.EncodeToString(txn.Entry.Value)) + } } } - } + }) } func TestRaft_TransactionalBackend_LargeKey(t *testing.T) { - b, dir := GetRaft(t, true, true) - defer os.RemoveAll(dir) + t.Parallel() + testBothRaftBackends(t, func(t *testing.T, useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - value := make([]byte, defaultMaxEntrySize+1) - rand.Read(value) + b, _ := GetRaftWithConfig(t, true, true, conf) + value := make([]byte, defaultMaxEntrySize+1) + rand.Read(value) - key, err := base62.Random(bolt.MaxKeySize + 1) - if err != nil { - t.Fatal(err) - } - txns := []*physical.TxnEntry{ - { - Operation: physical.PutOperation, - Entry: &physical.Entry{ - Key: key, - Value: []byte(key), + key, err := base62.Random(bolt.MaxKeySize + 1) + if err != nil { + t.Fatal(err) + } + txns := []*physical.TxnEntry{ + { + Operation: physical.PutOperation, + Entry: &physical.Entry{ + Key: key, + Value: []byte(key), + }, }, - }, - } + } - err = b.Transaction(context.Background(), txns) - if err == nil { - t.Fatal("expected error for transactions") - } + err = b.Transaction(context.Background(), txns) + if err == nil { + t.Fatal("expected error for transactions") + } - if !strings.Contains(err.Error(), physical.ErrKeyTooLarge) { - t.Fatalf("expected %q, got %v", physical.ErrValueTooLarge, err) - } + if !strings.Contains(err.Error(), physical.ErrKeyTooLarge) { + t.Fatalf("expected %q, got %v", physical.ErrValueTooLarge, err) + } - out, err := b.Get(context.Background(), txns[0].Entry.Key) - if err != nil { - t.Fatalf("unexpected error after failed put: %v", err) - } - if out != nil { - t.Fatal("expected response entry to be nil after a failed put") - } + out, err := b.Get(context.Background(), txns[0].Entry.Key) + if err != nil { + t.Fatalf("unexpected error after failed put: %v", err) + } + if out != nil { + t.Fatal("expected response entry to be nil after a failed put") + } + }) } func TestRaft_TransactionalBackend_LargeValue(t *testing.T) { - b, dir := GetRaft(t, true, true) - defer os.RemoveAll(dir) - - value := make([]byte, defaultMaxEntrySize+1) - rand.Read(value) + t.Parallel() + testBothRaftBackends(t, func(t *testing.T, useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - txns := []*physical.TxnEntry{ - { - Operation: physical.PutOperation, - Entry: &physical.Entry{ - Key: "foo", - Value: value, + b, _ := GetRaftWithConfig(t, true, true, conf) + value := make([]byte, defaultMaxEntrySize+1) + rand.Read(value) + + txns := []*physical.TxnEntry{ + { + Operation: physical.PutOperation, + Entry: &physical.Entry{ + Key: "foo", + Value: value, + }, }, - }, - } + } - err := b.Transaction(context.Background(), txns) - if err == nil { - t.Fatal("expected error for transactions") - } + err := b.Transaction(context.Background(), txns) + if err == nil { + t.Fatal("expected error for transactions") + } - if !strings.Contains(err.Error(), physical.ErrValueTooLarge) { - t.Fatalf("expected %q, got %v", physical.ErrValueTooLarge, err) - } + if !strings.Contains(err.Error(), physical.ErrValueTooLarge) { + t.Fatalf("expected %q, got %v", physical.ErrValueTooLarge, err) + } - out, err := b.Get(context.Background(), txns[0].Entry.Key) - if err != nil { - t.Fatalf("unexpected error after failed put: %v", err) - } - if out != nil { - t.Fatal("expected response entry to be nil after a failed put") - } + out, err := b.Get(context.Background(), txns[0].Entry.Key) + if err != nil { + t.Fatalf("unexpected error after failed put: %v", err) + } + if out != nil { + t.Fatal("expected response entry to be nil after a failed put") + } + }) } func TestRaft_Backend_ListPrefix(t *testing.T) { - b, dir := GetRaft(t, true, true) - defer os.RemoveAll(dir) + t.Parallel() + testBothRaftBackends(t, func(t *testing.T, useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - physical.ExerciseBackend_ListPrefix(t, b) + b, _ := GetRaftWithConfig(t, true, true, conf) + physical.ExerciseBackend_ListPrefix(t, b) + }) } func TestRaft_TransactionalBackend(t *testing.T) { - b, dir := GetRaft(t, true, true) - defer os.RemoveAll(dir) + t.Parallel() + testBothRaftBackends(t, func(t *testing.T, useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - physical.ExerciseTransactionalBackend(t, b) + b, _ := GetRaftWithConfig(t, true, true, conf) + physical.ExerciseTransactionalBackend(t, b) + }) } func TestRaft_HABackend(t *testing.T) { t.Skip() - raft, dir := GetRaft(t, true, true) - defer os.RemoveAll(dir) - raft2, dir2 := GetRaft(t, false, true) - defer os.RemoveAll(dir2) + raft1, _ := GetRaft(t, true, true) + raft2, _ := GetRaft(t, false, true) // Add raft2 to the cluster - addPeer(t, raft, raft2) - - physical.ExerciseHABackend(t, raft, raft2) + addPeer(t, raft1, raft2) + physical.ExerciseHABackend(t, raft1, raft2) } func TestRaft_Backend_ThreeNode(t *testing.T) { - raft1, dir := GetRaft(t, true, true) - raft2, dir2 := GetRaft(t, false, true) - raft3, dir3 := GetRaft(t, false, true) - defer os.RemoveAll(dir) - defer os.RemoveAll(dir2) - defer os.RemoveAll(dir3) + t.Parallel() + testBothRaftBackends(t, func(t *testing.T, useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - // Add raft2 to the cluster - addPeer(t, raft1, raft2) + raft1, _ := GetRaftWithConfig(t, true, true, conf) + raft2, _ := GetRaftWithConfig(t, false, true, conf) + raft3, _ := GetRaftWithConfig(t, false, true, conf) + + // Add raft2 to the cluster + addPeer(t, raft1, raft2) - // Add raft3 to the cluster - addPeer(t, raft1, raft3) + // Add raft3 to the cluster + addPeer(t, raft1, raft3) - physical.ExerciseBackend(t, raft1) + physical.ExerciseBackend(t, raft1) - time.Sleep(10 * time.Second) - // Make sure all stores are the same - compareFSMs(t, raft1.fsm, raft2.fsm) - compareFSMs(t, raft1.fsm, raft3.fsm) + time.Sleep(10 * time.Second) + // Make sure all stores are the same + compareFSMs(t, raft1.fsm, raft2.fsm) + compareFSMs(t, raft1.fsm, raft3.fsm) + }) } func TestRaft_GetOfflineConfig(t *testing.T) { - // Create 3 raft nodes - raft1, dir1 := GetRaft(t, true, true) - raft2, dir2 := GetRaft(t, false, true) - raft3, dir3 := GetRaft(t, false, true) - defer os.RemoveAll(dir1) - defer os.RemoveAll(dir2) - defer os.RemoveAll(dir3) - - // Add them all to the cluster - addPeer(t, raft1, raft2) - addPeer(t, raft1, raft3) + t.Parallel() + testBothRaftBackends(t, func(t *testing.T, useRaftWal string) { + config := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - // Add some data into the FSM - physical.ExerciseBackend(t, raft1) + // Create 3 raft nodes + raft1, _ := GetRaftWithConfig(t, true, true, config) + raft2, _ := GetRaftWithConfig(t, false, true, config) + raft3, _ := GetRaftWithConfig(t, false, true, config) - time.Sleep(10 * time.Second) + // Add them all to the cluster + addPeer(t, raft1, raft2) + addPeer(t, raft1, raft3) - // Spin down the raft cluster and check that GetConfigurationOffline - // returns 3 voters - raft3.TeardownCluster(nil) - raft2.TeardownCluster(nil) - raft1.TeardownCluster(nil) + // Add some data into the FSM + physical.ExerciseBackend(t, raft1) - conf, err := raft1.GetConfigurationOffline() - if err != nil { - t.Fatal(err) - } - if len(conf.Servers) != 3 { - t.Fatalf("three raft nodes existed but we only see %d", len(conf.Servers)) - } - for _, s := range conf.Servers { - if s.Voter != true { - t.Fatalf("one of the nodes is not a voter") + time.Sleep(10 * time.Second) + + // Spin down the raft cluster and check that GetConfigurationOffline + // returns 3 voters + err := raft3.TeardownCluster(nil) + if err != nil { + t.Fatal(err) } - } + err = raft2.TeardownCluster(nil) + if err != nil { + t.Fatal(err) + } + err = raft1.TeardownCluster(nil) + if err != nil { + t.Fatal(err) + } + + conf, err := raft1.GetConfigurationOffline() + if err != nil { + t.Fatal(err) + } + if len(conf.Servers) != 3 { + t.Fatalf("three raft nodes existed but we only see %d", len(conf.Servers)) + } + for _, s := range conf.Servers { + if s.Voter != true { + t.Fatalf("one of the nodes is not a voter") + } + } + }) } func TestRaft_Recovery(t *testing.T) { - // Create 4 raft nodes - raft1, dir1 := GetRaft(t, true, true) - raft2, dir2 := GetRaft(t, false, true) - raft3, dir3 := GetRaft(t, false, true) - raft4, dir4 := GetRaft(t, false, true) - defer os.RemoveAll(dir1) - defer os.RemoveAll(dir2) - defer os.RemoveAll(dir3) - defer os.RemoveAll(dir4) - - // Add them all to the cluster - addPeer(t, raft1, raft2) - addPeer(t, raft1, raft3) - addPeer(t, raft1, raft4) + t.Parallel() + testBothRaftBackends(t, func(t *testing.T, useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - // Add some data into the FSM - physical.ExerciseBackend(t, raft1) + // Create 4 raft nodes + raft1, dir1 := GetRaftWithConfig(t, true, true, conf) + raft2, dir2 := GetRaftWithConfig(t, false, true, conf) + raft3, _ := GetRaftWithConfig(t, false, true, conf) + raft4, dir4 := GetRaftWithConfig(t, false, true, conf) - time.Sleep(10 * time.Second) + // Add them all to the cluster + addPeer(t, raft1, raft2) + addPeer(t, raft1, raft3) + addPeer(t, raft1, raft4) - // Bring down all nodes - raft1.TeardownCluster(nil) - raft2.TeardownCluster(nil) - raft3.TeardownCluster(nil) - raft4.TeardownCluster(nil) + // Add some data into the FSM + physical.ExerciseBackend(t, raft1) - // Prepare peers.json - type RecoveryPeer struct { - ID string `json:"id"` - Address string `json:"address"` - NonVoter bool `json:"non_voter"` - } + time.Sleep(10 * time.Second) - // Leave out node 1 during recovery - peersList := make([]*RecoveryPeer, 0, 3) - peersList = append(peersList, &RecoveryPeer{ - ID: raft1.NodeID(), - Address: raft1.NodeID(), - NonVoter: false, - }) - peersList = append(peersList, &RecoveryPeer{ - ID: raft2.NodeID(), - Address: raft2.NodeID(), - NonVoter: false, - }) - peersList = append(peersList, &RecoveryPeer{ - ID: raft4.NodeID(), - Address: raft4.NodeID(), - NonVoter: false, - }) + // Bring down all nodes + err := raft1.TeardownCluster(nil) + if err != nil { + t.Fatal(err) + } + err = raft2.TeardownCluster(nil) + if err != nil { + t.Fatal(err) + } + err = raft3.TeardownCluster(nil) + if err != nil { + t.Fatal(err) + } + err = raft4.TeardownCluster(nil) + if err != nil { + t.Fatal(err) + } - peersJSONBytes, err := jsonutil.EncodeJSON(peersList) - if err != nil { - t.Fatal(err) - } - err = ioutil.WriteFile(filepath.Join(filepath.Join(dir1, raftState), "peers.json"), peersJSONBytes, 0o644) - if err != nil { - t.Fatal(err) - } - err = ioutil.WriteFile(filepath.Join(filepath.Join(dir2, raftState), "peers.json"), peersJSONBytes, 0o644) - if err != nil { - t.Fatal(err) - } - err = ioutil.WriteFile(filepath.Join(filepath.Join(dir4, raftState), "peers.json"), peersJSONBytes, 0o644) - if err != nil { - t.Fatal(err) - } + // Prepare peers.json + type RecoveryPeer struct { + ID string `json:"id"` + Address string `json:"address"` + NonVoter bool `json:"non_voter"` + } + + // Leave out node 1 during recovery + peersList := make([]*RecoveryPeer, 0, 3) + peersList = append(peersList, &RecoveryPeer{ + ID: raft1.NodeID(), + Address: raft1.NodeID(), + NonVoter: false, + }) + peersList = append(peersList, &RecoveryPeer{ + ID: raft2.NodeID(), + Address: raft2.NodeID(), + NonVoter: false, + }) + peersList = append(peersList, &RecoveryPeer{ + ID: raft4.NodeID(), + Address: raft4.NodeID(), + NonVoter: false, + }) - // Bring up the nodes again - raft1.SetupCluster(context.Background(), SetupOpts{}) - raft2.SetupCluster(context.Background(), SetupOpts{}) - raft4.SetupCluster(context.Background(), SetupOpts{}) + peersJSONBytes, err := jsonutil.EncodeJSON(peersList) + if err != nil { + t.Fatal(err) + } + err = os.WriteFile(filepath.Join(filepath.Join(dir1, raftState), "peers.json"), peersJSONBytes, 0o644) + if err != nil { + t.Fatal(err) + } + err = os.WriteFile(filepath.Join(filepath.Join(dir2, raftState), "peers.json"), peersJSONBytes, 0o644) + if err != nil { + t.Fatal(err) + } + err = os.WriteFile(filepath.Join(filepath.Join(dir4, raftState), "peers.json"), peersJSONBytes, 0o644) + if err != nil { + t.Fatal(err) + } - peers, err := raft1.Peers(context.Background()) - if err != nil { - t.Fatal(err) - } - if len(peers) != 3 { - t.Fatalf("failed to recover the cluster") - } + // Bring up the nodes again + err = raft1.SetupCluster(context.Background(), SetupOpts{}) + if err != nil { + t.Fatal(err) + } + err = raft2.SetupCluster(context.Background(), SetupOpts{}) + if err != nil { + t.Fatal(err) + } + err = raft4.SetupCluster(context.Background(), SetupOpts{}) + if err != nil { + t.Fatal(err) + } - time.Sleep(10 * time.Second) + peers, err := raft1.Peers(context.Background()) + if err != nil { + t.Fatal(err) + } + if len(peers) != 3 { + t.Fatalf("failed to recover the cluster") + } - compareFSMs(t, raft1.fsm, raft2.fsm) - compareFSMs(t, raft1.fsm, raft4.fsm) + time.Sleep(10 * time.Second) + + compareFSMs(t, raft1.fsm, raft2.fsm) + compareFSMs(t, raft1.fsm, raft4.fsm) + }) } func TestRaft_TransactionalBackend_ThreeNode(t *testing.T) { - raft1, dir := GetRaft(t, true, true) - raft2, dir2 := GetRaft(t, false, true) - raft3, dir3 := GetRaft(t, false, true) - defer os.RemoveAll(dir) - defer os.RemoveAll(dir2) - defer os.RemoveAll(dir3) + t.Parallel() + testBothRaftBackends(t, func(t *testing.T, useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - // Add raft2 to the cluster - addPeer(t, raft1, raft2) + raft1, _ := GetRaftWithConfig(t, true, true, conf) + raft2, _ := GetRaftWithConfig(t, false, true, conf) + raft3, _ := GetRaftWithConfig(t, false, true, conf) - // Add raft3 to the cluster - addPeer(t, raft1, raft3) + // Add raft2 to the cluster + addPeer(t, raft1, raft2) - physical.ExerciseTransactionalBackend(t, raft1) + // Add raft3 to the cluster + addPeer(t, raft1, raft3) - time.Sleep(10 * time.Second) - // Make sure all stores are the same - compareFSMs(t, raft1.fsm, raft2.fsm) - compareFSMs(t, raft1.fsm, raft3.fsm) + physical.ExerciseTransactionalBackend(t, raft1) + + time.Sleep(10 * time.Second) + // Make sure all stores are the same + compareFSMs(t, raft1.fsm, raft2.fsm) + compareFSMs(t, raft1.fsm, raft3.fsm) + }) } -func TestRaft_Backend_Performance(t *testing.T) { - b, dir := GetRaft(t, true, false) - defer os.RemoveAll(dir) +// TestRaft_TransactionalLimitsEnvOverride ensures the ENV var overrides for +// transaction size limits are plumbed through as expected. +func TestRaft_TransactionalLimitsEnvOverride(t *testing.T) { + tc := []struct { + name string + envEntries string + envSize string + wantEntries int + wantSize int + wantLog string + }{ + { + name: "defaults", + wantEntries: defaultMaxBatchEntries, + wantSize: defaultMaxBatchSize, + }, + { + name: "valid env", + envEntries: "123", + envSize: "456", + wantEntries: 123, + wantSize: 456, + }, + { + name: "invalid entries", + envEntries: "not-a-number", + envSize: "100", + wantEntries: defaultMaxBatchEntries, + wantSize: 100, + wantLog: "failed to parse VAULT_RAFT_MAX_BATCH_ENTRIES", + }, + { + name: "invalid entries", + envEntries: "100", + envSize: "asdasdsasd", + wantEntries: 100, + wantSize: defaultMaxBatchSize, + wantLog: "failed to parse VAULT_RAFT_MAX_BATCH_SIZE_BYTES", + }, + { + name: "zero entries", + envEntries: "0", + envSize: "100", + wantEntries: defaultMaxBatchEntries, + wantSize: 100, + wantLog: "failed to parse VAULT_RAFT_MAX_BATCH_ENTRIES as an integer > 0", + }, + { + name: "zero size", + envEntries: "100", + envSize: "0", + wantEntries: 100, + wantSize: defaultMaxBatchSize, + wantLog: "failed to parse VAULT_RAFT_MAX_BATCH_SIZE_BYTES as an integer > 0", + }, + } - defaultConfig := raft.DefaultConfig() + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + // Set the env vars within this test + if tt.envEntries != "" { + t.Setenv(EnvVaultRaftMaxBatchEntries, tt.envEntries) + } + if tt.envSize != "" { + t.Setenv(EnvVaultRaftMaxBatchSizeBytes, tt.envSize) + } - localConfig := raft.DefaultConfig() - b.applyConfigSettings(localConfig) + var logBuf bytes.Buffer + raft1, dir := GetRaftWithLogOutput(t, false, true, &logBuf) + defer os.RemoveAll(dir) - if localConfig.ElectionTimeout != defaultConfig.ElectionTimeout*5 { - t.Fatalf("bad config: %v", localConfig) - } - if localConfig.HeartbeatTimeout != defaultConfig.HeartbeatTimeout*5 { - t.Fatalf("bad config: %v", localConfig) - } - if localConfig.LeaderLeaseTimeout != defaultConfig.LeaderLeaseTimeout*5 { - t.Fatalf("bad config: %v", localConfig) - } + e, s := raft1.TransactionLimits() - b.conf = map[string]string{ - "path": dir, - "performance_multiplier": "5", + require.Equal(t, tt.wantEntries, e) + require.Equal(t, tt.wantSize, s) + if tt.wantLog != "" { + require.Contains(t, logBuf.String(), tt.wantLog) + } + }) } +} - localConfig = raft.DefaultConfig() - b.applyConfigSettings(localConfig) +func TestRaft_Backend_Performance(t *testing.T) { + t.Parallel() + testBothRaftBackends(t, func(t *testing.T, useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - if localConfig.ElectionTimeout != defaultConfig.ElectionTimeout*5 { - t.Fatalf("bad config: %v", localConfig) - } - if localConfig.HeartbeatTimeout != defaultConfig.HeartbeatTimeout*5 { - t.Fatalf("bad config: %v", localConfig) - } - if localConfig.LeaderLeaseTimeout != defaultConfig.LeaderLeaseTimeout*5 { - t.Fatalf("bad config: %v", localConfig) - } + b, dir := GetRaftWithConfig(t, true, true, conf) - b.conf = map[string]string{ - "path": dir, - "performance_multiplier": "1", - } + defaultConfig := raft.DefaultConfig() + localConfig := raft.DefaultConfig() + err := b.applyConfigSettings(localConfig) + if err != nil { + t.Fatal(err) + } - localConfig = raft.DefaultConfig() - b.applyConfigSettings(localConfig) + if localConfig.ElectionTimeout != defaultConfig.ElectionTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.HeartbeatTimeout != defaultConfig.HeartbeatTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.LeaderLeaseTimeout != defaultConfig.LeaderLeaseTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } - if localConfig.ElectionTimeout != defaultConfig.ElectionTimeout { - t.Fatalf("bad config: %v", localConfig) - } - if localConfig.HeartbeatTimeout != defaultConfig.HeartbeatTimeout { - t.Fatalf("bad config: %v", localConfig) - } - if localConfig.LeaderLeaseTimeout != defaultConfig.LeaderLeaseTimeout { - t.Fatalf("bad config: %v", localConfig) - } -} + b.conf = map[string]string{ + "path": dir, + "performance_multiplier": "5", + } -func BenchmarkDB_Puts(b *testing.B) { - raft, dir := GetRaft(b, true, false) - defer os.RemoveAll(dir) - raft2, dir2 := GetRaft(b, true, false) - defer os.RemoveAll(dir2) + localConfig = raft.DefaultConfig() + err = b.applyConfigSettings(localConfig) + if err != nil { + t.Fatal(err) + } - bench := func(b *testing.B, s physical.Backend, dataSize int) { - data, err := uuid.GenerateRandomBytes(dataSize) + if localConfig.ElectionTimeout != defaultConfig.ElectionTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.HeartbeatTimeout != defaultConfig.HeartbeatTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.LeaderLeaseTimeout != defaultConfig.LeaderLeaseTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } + + b.conf = map[string]string{ + "path": dir, + "performance_multiplier": "1", + } + + localConfig = raft.DefaultConfig() + err = b.applyConfigSettings(localConfig) if err != nil { - b.Fatal(err) + t.Fatal(err) } - ctx := context.Background() - pe := &physical.Entry{ - Value: data, + if localConfig.ElectionTimeout != defaultConfig.ElectionTimeout { + t.Fatalf("bad config: %v", localConfig) } - testName := b.Name() + if localConfig.HeartbeatTimeout != defaultConfig.HeartbeatTimeout { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.LeaderLeaseTimeout != defaultConfig.LeaderLeaseTimeout { + t.Fatalf("bad config: %v", localConfig) + } + }) +} - b.ResetTimer() - for i := 0; i < b.N; i++ { - pe.Key = fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s-%d", testName, i)))) - err := s.Put(ctx, pe) +func BenchmarkDB_Puts(b *testing.B) { + testBothRaftBackendsBenchmark(b, func(useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } + + raft1, _ := GetRaftWithConfig(b, true, false, conf) + raft2, _ := GetRaftWithConfig(b, true, false, conf) + + bench := func(b *testing.B, s physical.Backend, dataSize int) { + data, err := uuid.GenerateRandomBytes(dataSize) if err != nil { b.Fatal(err) } + + ctx := context.Background() + pe := &physical.Entry{ + Value: data, + } + testName := b.Name() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + pe.Key = fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s-%d", testName, i)))) + err := s.Put(ctx, pe) + if err != nil { + b.Fatal(err) + } + } } - } - b.Run("256b", func(b *testing.B) { bench(b, raft, 256) }) - b.Run("256kb", func(b *testing.B) { bench(b, raft2, 256*1024) }) + b.Run("256b", func(b *testing.B) { bench(b, raft1, 256) }) + b.Run("256kb", func(b *testing.B) { bench(b, raft2, 256*1024) }) + }) } func BenchmarkDB_Snapshot(b *testing.B) { - raft, dir := GetRaft(b, true, false) - defer os.RemoveAll(dir) - - data, err := uuid.GenerateRandomBytes(256 * 1024) - if err != nil { - b.Fatal(err) - } - - ctx := context.Background() - pe := &physical.Entry{ - Value: data, - } - testName := b.Name() + testBothRaftBackendsBenchmark(b, func(useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - for i := 0; i < 100; i++ { - pe.Key = fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s-%d", testName, i)))) - err = raft.Put(ctx, pe) + raft1, _ := GetRaftWithConfig(b, true, false, conf) + data, err := uuid.GenerateRandomBytes(256 * 1024) if err != nil { b.Fatal(err) } - } - bench := func(b *testing.B, s *FSM) { - b.ResetTimer() - for i := 0; i < b.N; i++ { + ctx := context.Background() + pe := &physical.Entry{ + Value: data, + } + testName := b.Name() + + for i := 0; i < 100; i++ { pe.Key = fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s-%d", testName, i)))) - s.writeTo(ctx, discardCloser{Writer: ioutil.Discard}, discardCloser{Writer: ioutil.Discard}) + err = raft1.Put(ctx, pe) + if err != nil { + b.Fatal(err) + } } - } - b.Run("256kb", func(b *testing.B) { bench(b, raft.fsm) }) + bench := func(b *testing.B, s *FSM) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + pe.Key = fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s-%d", testName, i)))) + s.writeTo(ctx, discardCloser{Writer: io.Discard}, discardCloser{Writer: io.Discard}) + } + } + + b.Run("256kb", func(b *testing.B) { bench(b, raft1.fsm) }) + }) } type discardCloser struct { diff --git a/physical/raft/raft_util.go b/physical/raft/raft_util.go index bd496dfac64b..722f8e2fc8d0 100644 --- a/physical/raft/raft_util.go +++ b/physical/raft/raft_util.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !enterprise diff --git a/physical/raft/raft_util_stubs_oss.go b/physical/raft/raft_util_stubs_oss.go new file mode 100644 index 000000000000..488bc0ba305d --- /dev/null +++ b/physical/raft/raft_util_stubs_oss.go @@ -0,0 +1,18 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package raft + +import "github.com/hashicorp/go-hclog" + +//go:generate go run github.com/hashicorp/vault/tools/stubmaker + +func (b *RaftBackend) entrySizeLimitForPath(path string) uint64 { + return b.maxEntrySize +} + +func emitEntWarning(logger hclog.Logger, field string) { + logger.Warn("configuration for a Vault Enterprise feature has been ignored", "field", field) +} diff --git a/physical/raft/snapshot.go b/physical/raft/snapshot.go index 68d9c953f819..e44769bf6d2a 100644 --- a/physical/raft/snapshot.go +++ b/physical/raft/snapshot.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package raft @@ -18,13 +18,12 @@ import ( "time" "github.com/golang/protobuf/proto" + bolt "github.com/hashicorp-forge/bbolt" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/raft" "github.com/hashicorp/vault/sdk/plugin/pb" "github.com/rboyer/safeio" - bolt "go.etcd.io/bbolt" "go.uber.org/atomic" - - "github.com/hashicorp/raft" ) const ( @@ -150,7 +149,7 @@ func (f *BoltSnapshotStore) List() ([]*raft.SnapshotMeta, error) { return []*raft.SnapshotMeta{meta}, nil } -// getBoltSnapshotMeta returns the fsm's latest state and configuration. +// getMetaFromFSM returns the fsm's latest state and configuration. func (f *BoltSnapshotStore) getMetaFromFSM() (*raft.SnapshotMeta, error) { latestIndex, latestConfig := f.fsm.LatestState() meta := &raft.SnapshotMeta{ @@ -268,7 +267,7 @@ func (f *BoltSnapshotStore) openFromFile(id string) (*raft.SnapshotMeta, io.Read filename := filepath.Join(f.path, id, databaseFilename) installer := &boltSnapshotInstaller{ meta: meta, - ReadCloser: ioutil.NopCloser(strings.NewReader(filename)), + ReadCloser: io.NopCloser(strings.NewReader(filename)), filename: filename, } diff --git a/physical/raft/snapshot_test.go b/physical/raft/snapshot_test.go index 3472c8d53981..fe7fafd3fc01 100644 --- a/physical/raft/snapshot_test.go +++ b/physical/raft/snapshot_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package raft @@ -495,7 +495,7 @@ func TestRaft_Snapshot_Take_Restore(t *testing.T) { } } - snapFile, cleanup, metadata, err := raft1.WriteSnapshotToTemp(ioutil.NopCloser(recorder.Body), nil) + snapFile, cleanup, metadata, err := raft1.WriteSnapshotToTemp(io.NopCloser(recorder.Body), nil) if err != nil { t.Fatal(err) } diff --git a/physical/raft/streamlayer.go b/physical/raft/streamlayer.go index 90d8e495cbaf..861f4f60488c 100644 --- a/physical/raft/streamlayer.go +++ b/physical/raft/streamlayer.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package raft diff --git a/physical/raft/streamlayer_test.go b/physical/raft/streamlayer_test.go index d826eaadca75..bc35eb66ffe4 100644 --- a/physical/raft/streamlayer_test.go +++ b/physical/raft/streamlayer_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package raft diff --git a/physical/raft/testing.go b/physical/raft/testing.go index ea6847911f2b..0a72e3f13cc6 100644 --- a/physical/raft/testing.go +++ b/physical/raft/testing.go @@ -1,12 +1,12 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package raft import ( "context" "fmt" - "io/ioutil" + "io" "testing" "github.com/hashicorp/go-hclog" @@ -14,42 +14,72 @@ import ( ) func GetRaft(t testing.TB, bootstrap bool, noStoreState bool) (*RaftBackend, string) { - raftDir, err := ioutil.TempDir("", "vault-raft-") - if err != nil { - t.Fatal(err) - } - t.Logf("raft dir: %s", raftDir) + return getRaftInternal(t, bootstrap, defaultRaftConfig(t, bootstrap, noStoreState), nil, nil, nil) +} - return getRaftWithDir(t, bootstrap, noStoreState, raftDir) +func GetRaftWithConfig(t testing.TB, bootstrap bool, noStoreState bool, conf map[string]string) (*RaftBackend, string) { + defaultConf := defaultRaftConfig(t, bootstrap, noStoreState) + conf["path"] = defaultConf["path"] + conf["doNotStoreLatestState"] = defaultConf["doNotStoreLatestState"] + return getRaftInternal(t, bootstrap, conf, nil, nil, nil) } -func getRaftWithDir(t testing.TB, bootstrap bool, noStoreState bool, raftDir string) (*RaftBackend, string) { - id, err := uuid.GenerateUUID() - if err != nil { - t.Fatal(err) - } +func GetRaftWithConfigAndSetupOpts(t testing.TB, bootstrap bool, noStoreState bool, conf map[string]string, setupOpts *SetupOpts) (*RaftBackend, string) { + defaultConf := defaultRaftConfig(t, bootstrap, noStoreState) + conf["path"] = defaultConf["path"] + conf["doNotStoreLatestState"] = defaultConf["doNotStoreLatestState"] + return getRaftInternal(t, bootstrap, conf, setupOpts, nil, nil) +} - logger := hclog.New(&hclog.LoggerOptions{ - Name: fmt.Sprintf("raft-%s", id), - Level: hclog.Trace, - }) - logger.Info("raft dir", "dir", raftDir) +func GetRaftWithConfigAndInitFn(t testing.TB, bootstrap bool, noStoreState bool, conf map[string]string, initFn func(b *RaftBackend)) (*RaftBackend, string) { + defaultConf := defaultRaftConfig(t, bootstrap, noStoreState) + conf["path"] = defaultConf["path"] + conf["doNotStoreLatestState"] = defaultConf["doNotStoreLatestState"] + return getRaftInternal(t, bootstrap, conf, nil, nil, initFn) +} + +func GetRaftWithLogOutput(t testing.TB, bootstrap bool, noStoreState bool, logOutput io.Writer) (*RaftBackend, string) { + return getRaftInternal(t, bootstrap, defaultRaftConfig(t, bootstrap, noStoreState), nil, logOutput, nil) +} + +func defaultRaftConfig(t testing.TB, bootstrap bool, noStoreState bool) map[string]string { + raftDir := t.TempDir() + t.Logf("raft dir: %s", raftDir) conf := map[string]string{ "path": raftDir, "trailing_logs": "100", - "node_id": id, } if noStoreState { conf["doNotStoreLatestState"] = "" } + return conf +} + +func getRaftInternal(t testing.TB, bootstrap bool, conf map[string]string, setupOpts *SetupOpts, logOutput io.Writer, initFn func(b *RaftBackend)) (*RaftBackend, string) { + id, err := uuid.GenerateUUID() + if err != nil { + t.Fatal(err) + } + + logger := hclog.New(&hclog.LoggerOptions{ + Name: fmt.Sprintf("raft-%s", id), + Level: hclog.Trace, + Output: logOutput, + }) + + conf["node_id"] = id + backendRaw, err := NewRaftBackend(conf, logger) if err != nil { t.Fatal(err) } backend := backendRaw.(*RaftBackend) + if initFn != nil { + initFn(backend) + } if bootstrap { err = backend.Bootstrap([]Peer{ @@ -62,7 +92,12 @@ func getRaftWithDir(t testing.TB, bootstrap bool, noStoreState bool, raftDir str t.Fatal(err) } - err = backend.SetupCluster(context.Background(), SetupOpts{}) + so := SetupOpts{} + if setupOpts != nil { + so = *setupOpts + } + + err = backend.SetupCluster(context.Background(), so) if err != nil { t.Fatal(err) } @@ -76,6 +111,5 @@ func getRaftWithDir(t testing.TB, bootstrap bool, noStoreState bool, raftDir str } backend.DisableAutopilot() - - return backend, raftDir + return backend, conf["path"] } diff --git a/physical/raft/types.pb.go b/physical/raft/types.pb.go index 2835e1f17d96..9790739d7c88 100644 --- a/physical/raft/types.pb.go +++ b/physical/raft/types.pb.go @@ -1,10 +1,10 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc (unknown) // source: physical/raft/types.proto package raft @@ -417,7 +417,7 @@ func file_physical_raft_types_proto_rawDescGZIP() []byte { } var file_physical_raft_types_proto_msgTypes = make([]protoimpl.MessageInfo, 6) -var file_physical_raft_types_proto_goTypes = []interface{}{ +var file_physical_raft_types_proto_goTypes = []any{ (*LogOperation)(nil), // 0: raft.LogOperation (*LogData)(nil), // 1: raft.LogData (*IndexValue)(nil), // 2: raft.IndexValue @@ -441,7 +441,7 @@ func file_physical_raft_types_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_physical_raft_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_physical_raft_types_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*LogOperation); i { case 0: return &v.state @@ -453,7 +453,7 @@ func file_physical_raft_types_proto_init() { return nil } } - file_physical_raft_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_physical_raft_types_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*LogData); i { case 0: return &v.state @@ -465,7 +465,7 @@ func file_physical_raft_types_proto_init() { return nil } } - file_physical_raft_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_physical_raft_types_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*IndexValue); i { case 0: return &v.state @@ -477,7 +477,7 @@ func file_physical_raft_types_proto_init() { return nil } } - file_physical_raft_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_physical_raft_types_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Server); i { case 0: return &v.state @@ -489,7 +489,7 @@ func file_physical_raft_types_proto_init() { return nil } } - file_physical_raft_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_physical_raft_types_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ConfigurationValue); i { case 0: return &v.state @@ -501,7 +501,7 @@ func file_physical_raft_types_proto_init() { return nil } } - file_physical_raft_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_physical_raft_types_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*LocalNodeConfigValue); i { case 0: return &v.state diff --git a/physical/raft/types.proto b/physical/raft/types.proto index bb3d136e10eb..6e87157051aa 100644 --- a/physical/raft/types.proto +++ b/physical/raft/types.proto @@ -1,46 +1,46 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 syntax = "proto3"; -option go_package = "github.com/hashicorp/vault/physical/raft"; - package raft; +option go_package = "github.com/hashicorp/vault/physical/raft"; + message LogOperation { - // OpType is the Operation type - uint32 op_type = 1; + // OpType is the Operation type + uint32 op_type = 1; - // Flags is an opaque value, currently unused. Reserved. - uint64 flags = 2; + // Flags is an opaque value, currently unused. Reserved. + uint64 flags = 2; - // Key that is being affected - string key = 3; + // Key that is being affected + string key = 3; - // Value is optional, corresponds to the key - bytes value = 4; + // Value is optional, corresponds to the key + bytes value = 4; } message LogData { - repeated LogOperation operations = 1; + repeated LogOperation operations = 1; } message IndexValue { - uint64 term = 1; - uint64 index = 2; + uint64 term = 1; + uint64 index = 2; } message Server { - int32 suffrage = 1; - string id = 2; - string address = 3; + int32 suffrage = 1; + string id = 2; + string address = 3; } message ConfigurationValue { - uint64 index = 1; - repeated Server servers = 2; + uint64 index = 1; + repeated Server servers = 2; } -message LocalNodeConfigValue{ +message LocalNodeConfigValue { string desired_suffrage = 1; } diff --git a/physical/raft/varint.go b/physical/raft/varint.go index b3b9bfaaebd0..87f59eaa700d 100644 --- a/physical/raft/varint.go +++ b/physical/raft/varint.go @@ -79,14 +79,19 @@ func NewDelimitedReader(r io.Reader, maxSize int) ReadCloser { if c, ok := r.(io.Closer); ok { closer = c } - return &varintReader{bufio.NewReader(r), nil, maxSize, closer} + return &varintReader{bufio.NewReader(r), nil, maxSize, closer, 0} } type varintReader struct { - r *bufio.Reader - buf []byte - maxSize int - closer io.Closer + r *bufio.Reader + buf []byte + maxSize int + closer io.Closer + lastReadSize int +} + +func (this *varintReader) GetLastReadSize() int { + return this.lastReadSize } func (this *varintReader) ReadMsg(msg proto.Message) error { @@ -102,9 +107,11 @@ func (this *varintReader) ReadMsg(msg proto.Message) error { this.buf = make([]byte, length) } buf := this.buf[:length] - if _, err := io.ReadFull(this.r, buf); err != nil { + size, err := io.ReadFull(this.r, buf) + if err != nil { return err } + this.lastReadSize = size return proto.Unmarshal(buf, msg) } diff --git a/physical/raft/vars_32bit.go b/physical/raft/vars_32bit.go index 6e5c51fe9352..1b43384726d6 100644 --- a/physical/raft/vars_32bit.go +++ b/physical/raft/vars_32bit.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build 386 || arm || windows diff --git a/physical/raft/vars_64bit.go b/physical/raft/vars_64bit.go index a1eea0febc08..4c728e23630d 100644 --- a/physical/raft/vars_64bit.go +++ b/physical/raft/vars_64bit.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build !386 && !arm && !windows diff --git a/physical/s3/s3.go b/physical/s3/s3.go index 0cb8e0af3552..da82acccd3ca 100644 --- a/physical/s3/s3.go +++ b/physical/s3/s3.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package s3 diff --git a/physical/s3/s3_test.go b/physical/s3/s3_test.go index 139e41dfad98..68b23f129841 100644 --- a/physical/s3/s3_test.go +++ b/physical/s3/s3_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package s3 diff --git a/physical/spanner/spanner.go b/physical/spanner/spanner.go index b84e0d4637b4..4151d93ba1b8 100644 --- a/physical/spanner/spanner.go +++ b/physical/spanner/spanner.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package spanner diff --git a/physical/spanner/spanner_ha.go b/physical/spanner/spanner_ha.go index d116be0ba34e..377704d571be 100644 --- a/physical/spanner/spanner_ha.go +++ b/physical/spanner/spanner_ha.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package spanner diff --git a/physical/spanner/spanner_ha_test.go b/physical/spanner/spanner_ha_test.go index dad39ad4c95b..c6afbd001bc5 100644 --- a/physical/spanner/spanner_ha_test.go +++ b/physical/spanner/spanner_ha_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package spanner diff --git a/physical/spanner/spanner_test.go b/physical/spanner/spanner_test.go index 4b7c1c46b114..fc761f1e3235 100644 --- a/physical/spanner/spanner_test.go +++ b/physical/spanner/spanner_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package spanner diff --git a/physical/swift/swift.go b/physical/swift/swift.go index d616bfe35b32..8e53bd2fbe17 100644 --- a/physical/swift/swift.go +++ b/physical/swift/swift.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package swift @@ -12,10 +12,9 @@ import ( "strings" "time" - log "github.com/hashicorp/go-hclog" - metrics "github.com/armon/go-metrics" cleanhttp "github.com/hashicorp/go-cleanhttp" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/vault/sdk/physical" "github.com/ncw/swift" diff --git a/physical/swift/swift_test.go b/physical/swift/swift_test.go index 8f8af160fefd..3f192cd3f4a5 100644 --- a/physical/swift/swift_test.go +++ b/physical/swift/swift_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package swift @@ -9,9 +9,8 @@ import ( "testing" "time" - log "github.com/hashicorp/go-hclog" - cleanhttp "github.com/hashicorp/go-cleanhttp" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" "github.com/ncw/swift" diff --git a/physical/zookeeper/zookeeper.go b/physical/zookeeper/zookeeper.go index e52ac9b63033..32a560472b25 100644 --- a/physical/zookeeper/zookeeper.go +++ b/physical/zookeeper/zookeeper.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package zookeeper diff --git a/physical/zookeeper/zookeeper_test.go b/physical/zookeeper/zookeeper_test.go index e4448bf73ab7..7c9fe70180fa 100644 --- a/physical/zookeeper/zookeeper_test.go +++ b/physical/zookeeper/zookeeper_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package zookeeper @@ -29,7 +29,6 @@ func TestZooKeeperBackend(t *testing.T) { randPath := fmt.Sprintf("/vault-%d", time.Now().Unix()) acl := zk.WorldACL(zk.PermAll) _, err = client.Create(randPath, []byte("hi"), int32(0), acl) - if err != nil { t.Fatalf("err: %v", err) } @@ -73,7 +72,6 @@ func TestZooKeeperHABackend(t *testing.T) { randPath := fmt.Sprintf("/vault-ha-%d", time.Now().Unix()) acl := zk.WorldACL(zk.PermAll) _, err = client.Create(randPath, []byte("hi"), int32(0), acl) - if err != nil { t.Fatalf("err: %v", err) } diff --git a/plugins/database/cassandra/cassandra-database-plugin/main.go b/plugins/database/cassandra/cassandra-database-plugin/main.go index 8a91d1b50c7f..6f9f7af954f4 100644 --- a/plugins/database/cassandra/cassandra-database-plugin/main.go +++ b/plugins/database/cassandra/cassandra-database-plugin/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/plugins/database/cassandra/cassandra.go b/plugins/database/cassandra/cassandra.go index 8118fa06171b..7118e2b55b6d 100644 --- a/plugins/database/cassandra/cassandra.go +++ b/plugins/database/cassandra/cassandra.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cassandra @@ -8,13 +8,12 @@ import ( "fmt" "strings" - "github.com/hashicorp/vault/sdk/helper/template" - "github.com/gocql/gocql" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-secure-stdlib/strutil" dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/hashicorp/vault/sdk/helper/template" ) const ( diff --git a/plugins/database/cassandra/cassandra_test.go b/plugins/database/cassandra/cassandra_test.go index 7a3260935b7c..6382f89c7bcd 100644 --- a/plugins/database/cassandra/cassandra_test.go +++ b/plugins/database/cassandra/cassandra_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cassandra @@ -9,13 +9,12 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - backoff "github.com/cenkalti/backoff/v3" "github.com/gocql/gocql" "github.com/hashicorp/vault/helper/testhelpers/cassandra" dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing" + "github.com/stretchr/testify/require" ) func getCassandra(t *testing.T, protocolVersion interface{}) (*Cassandra, func()) { diff --git a/plugins/database/cassandra/connection_producer.go b/plugins/database/cassandra/connection_producer.go index a63ed27d5eb3..78f8311fbeee 100644 --- a/plugins/database/cassandra/connection_producer.go +++ b/plugins/database/cassandra/connection_producer.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cassandra diff --git a/plugins/database/cassandra/connection_producer_test.go b/plugins/database/cassandra/connection_producer_test.go index e2f4ba0fc59f..306b444f4568 100644 --- a/plugins/database/cassandra/connection_producer_test.go +++ b/plugins/database/cassandra/connection_producer_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cassandra diff --git a/plugins/database/cassandra/test-fixtures/no_tls/cassandra.yaml b/plugins/database/cassandra/test-fixtures/no_tls/cassandra.yaml index 481996968866..a55afc69309d 100644 --- a/plugins/database/cassandra/test-fixtures/no_tls/cassandra.yaml +++ b/plugins/database/cassandra/test-fixtures/no_tls/cassandra.yaml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 # Cassandra storage config YAML diff --git a/plugins/database/cassandra/tls.go b/plugins/database/cassandra/tls.go index 17e148d7496d..e8ad907235c0 100644 --- a/plugins/database/cassandra/tls.go +++ b/plugins/database/cassandra/tls.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package cassandra diff --git a/plugins/database/hana/hana-database-plugin/main.go b/plugins/database/hana/hana-database-plugin/main.go index 9ec568b66db4..8e4311eeb327 100644 --- a/plugins/database/hana/hana-database-plugin/main.go +++ b/plugins/database/hana/hana-database-plugin/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/plugins/database/hana/hana.go b/plugins/database/hana/hana.go index 987cc1af2f81..314505c15f1e 100644 --- a/plugins/database/hana/hana.go +++ b/plugins/database/hana/hana.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package hana diff --git a/plugins/database/hana/hana_test.go b/plugins/database/hana/hana_test.go index 6a3c1dbe07d4..894d3a4da164 100644 --- a/plugins/database/hana/hana_test.go +++ b/plugins/database/hana/hana_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package hana diff --git a/plugins/database/influxdb/connection_producer.go b/plugins/database/influxdb/connection_producer.go index b9f18c543386..24d03951e01e 100644 --- a/plugins/database/influxdb/connection_producer.go +++ b/plugins/database/influxdb/connection_producer.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package influxdb diff --git a/plugins/database/influxdb/influxdb-database-plugin/main.go b/plugins/database/influxdb/influxdb-database-plugin/main.go index bfc94f75fd31..41ed199556cc 100644 --- a/plugins/database/influxdb/influxdb-database-plugin/main.go +++ b/plugins/database/influxdb/influxdb-database-plugin/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/plugins/database/influxdb/influxdb.go b/plugins/database/influxdb/influxdb.go index f216319a6c13..4df308997639 100644 --- a/plugins/database/influxdb/influxdb.go +++ b/plugins/database/influxdb/influxdb.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package influxdb diff --git a/plugins/database/influxdb/influxdb_test.go b/plugins/database/influxdb/influxdb_test.go index f250a6e77a77..37401c4bc1bb 100644 --- a/plugins/database/influxdb/influxdb_test.go +++ b/plugins/database/influxdb/influxdb_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package influxdb @@ -9,14 +9,15 @@ import ( "net/url" "os" "reflect" + "runtime" "strconv" "strings" "testing" "time" - "github.com/hashicorp/vault/helper/testhelpers/docker" - dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing" + "github.com/hashicorp/vault/sdk/helper/docker" influx "github.com/influxdata/influxdb1-client/v2" "github.com/stretchr/testify/require" ) @@ -51,6 +52,11 @@ func (c *Config) connectionParams() map[string]interface{} { } func prepareInfluxdbTestContainer(t *testing.T) (func(), *Config) { + // Skipping on ARM, as this image can't run on ARM architecture + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as this image is not supported on ARM architectures") + } + c := &Config{ Username: "influx-root", Password: "influx-root", @@ -61,8 +67,9 @@ func prepareInfluxdbTestContainer(t *testing.T) (func(), *Config) { } runner, err := docker.NewServiceRunner(docker.RunOptions{ - ImageRepo: "influxdb", - ImageTag: "1.8-alpine", + ImageRepo: "docker.mirror.hashicorp.services/influxdb", + ContainerName: "influxdb", + ImageTag: "1.8-alpine", Env: []string{ "INFLUXDB_DB=vault", "INFLUXDB_ADMIN_USER=" + c.Username, diff --git a/plugins/database/mongodb/cert_helpers_test.go b/plugins/database/mongodb/cert_helpers_test.go index 9f9388b1cf2b..3a8f3afcb84f 100644 --- a/plugins/database/mongodb/cert_helpers_test.go +++ b/plugins/database/mongodb/cert_helpers_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mongodb diff --git a/plugins/database/mongodb/connection_producer.go b/plugins/database/mongodb/connection_producer.go index 4686c3b13f11..ca6244aa494a 100644 --- a/plugins/database/mongodb/connection_producer.go +++ b/plugins/database/mongodb/connection_producer.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mongodb diff --git a/plugins/database/mongodb/connection_producer_test.go b/plugins/database/mongodb/connection_producer_test.go index 2ce3872c597f..e29332b6092e 100644 --- a/plugins/database/mongodb/connection_producer_test.go +++ b/plugins/database/mongodb/connection_producer_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mongodb diff --git a/plugins/database/mongodb/mongodb-database-plugin/main.go b/plugins/database/mongodb/mongodb-database-plugin/main.go index fe68659eca63..ab0ff80c3bc9 100644 --- a/plugins/database/mongodb/mongodb-database-plugin/main.go +++ b/plugins/database/mongodb/mongodb-database-plugin/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/plugins/database/mongodb/mongodb.go b/plugins/database/mongodb/mongodb.go index 4026fbc693da..7d285b24c5d4 100644 --- a/plugins/database/mongodb/mongodb.go +++ b/plugins/database/mongodb/mongodb.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mongodb @@ -176,7 +176,7 @@ func (m *MongoDB) changeUserPassword(ctx context.Context, username, password str } database := cs.Database - if username == m.Username || database == "" { + if database == "" { database = "admin" } diff --git a/plugins/database/mongodb/mongodb_test.go b/plugins/database/mongodb/mongodb_test.go index da051069d3dd..43a3e8d3c242 100644 --- a/plugins/database/mongodb/mongodb_test.go +++ b/plugins/database/mongodb/mongodb_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mongodb @@ -8,6 +8,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" + "net/http" "reflect" "strings" "sync" @@ -26,7 +27,10 @@ import ( "go.mongodb.org/mongo-driver/mongo/readpref" ) -const mongoAdminRole = `{ "db": "admin", "roles": [ { "role": "readWrite" } ] }` +const ( + mongoAdminRole = `{ "db": "admin", "roles": [ { "role": "readWrite" } ] }` + mongoTestDBAdminRole = `{ "db": "test", "roles": [ { "role": "readWrite" } ] }` +) func TestMongoDB_Initialize(t *testing.T) { cleanup, connURL := mongodb.PrepareTestContainer(t, "latest") @@ -118,6 +122,23 @@ func TestNewUser_usernameTemplate(t *testing.T) { expectedUsernameRegex: "^[A-Z0-9]{2}_[0-9]{10}_TESTROLENAMEWITHMANYCHARACTERS_TOKEN$", }, + "admin in test database username template": { + usernameTemplate: "", + + newUserReq: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "token", + RoleName: "testrolenamewithmanycharacters", + }, + Statements: dbplugin.Statements{ + Commands: []string{mongoTestDBAdminRole}, + }, + Password: "98yq3thgnakjsfhjkl", + Expiration: time.Now().Add(time.Minute), + }, + + expectedUsernameRegex: "^v-token-testrolenamewit-[a-zA-Z0-9]{20}-[0-9]{10}$", + }, } for name, test := range tests { @@ -125,6 +146,10 @@ func TestNewUser_usernameTemplate(t *testing.T) { cleanup, connURL := mongodb.PrepareTestContainer(t, "latest") defer cleanup() + if name == "admin in test database username template" { + connURL = connURL + "/test?authSource=test" + } + db := new() defer dbtesting.AssertClose(t, db) @@ -289,6 +314,39 @@ func TestMongoDB_UpdateUser_Password(t *testing.T) { assertCredsExist(t, dbUser, newPassword, connURL) } +func TestMongoDB_RotateRoot_NonAdminDB(t *testing.T) { + cleanup, connURL := mongodb.PrepareTestContainer(t, "latest") + defer cleanup() + + connURL = connURL + "/test?authSource=test" + db := new() + defer dbtesting.AssertClose(t, db) + + initReq := dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + }, + VerifyConnection: true, + } + dbtesting.AssertInitialize(t, db, initReq) + + dbUser := "testmongouser" + startingPassword := "password" + createDBUser(t, connURL, "test", dbUser, startingPassword) + + newPassword := "myreallysecurecredentials" + + updateReq := dbplugin.UpdateUserRequest{ + Username: dbUser, + Password: &dbplugin.ChangePassword{ + NewPassword: newPassword, + }, + } + dbtesting.AssertUpdateUser(t, db, updateReq) + + assertCredsExist(t, dbUser, newPassword, connURL) +} + func TestGetTLSAuth(t *testing.T) { ca := certhelpers.NewCert(t, certhelpers.CommonName("certificate authority"), @@ -385,6 +443,8 @@ func appendToCertPool(t *testing.T, pool *x509.CertPool, caPem []byte) *x509.Cer } var cmpClientOptionsOpts = cmp.Options{ + cmpopts.IgnoreTypes(http.Transport{}), + cmp.AllowUnexported(options.ClientOptions{}), cmp.AllowUnexported(tls.Config{}), diff --git a/plugins/database/mongodb/util.go b/plugins/database/mongodb/util.go index be5842136bb6..ebeebb3f433d 100644 --- a/plugins/database/mongodb/util.go +++ b/plugins/database/mongodb/util.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mongodb diff --git a/plugins/database/mssql/mssql-database-plugin/main.go b/plugins/database/mssql/mssql-database-plugin/main.go index 2a57b5746ec6..1ed6f5d0e3ce 100644 --- a/plugins/database/mssql/mssql-database-plugin/main.go +++ b/plugins/database/mssql/mssql-database-plugin/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/plugins/database/mssql/mssql.go b/plugins/database/mssql/mssql.go index 7c7a4c27b3b7..488d7f39226d 100644 --- a/plugins/database/mssql/mssql.go +++ b/plugins/database/mssql/mssql.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mssql @@ -14,7 +14,6 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/database/helper/connutil" "github.com/hashicorp/vault/sdk/database/helper/dbutil" @@ -286,7 +285,7 @@ func (m *MSSQL) revokeUserDefault(ctx context.Context, username string) error { rows, err := stmt.QueryContext(ctx, username) if err != nil { - return err + return fmt.Errorf("failed to query users: %w", err) } defer rows.Close() diff --git a/plugins/database/mssql/mssql_test.go b/plugins/database/mssql/mssql_test.go index 385c5f0b69ff..d549d52cd178 100644 --- a/plugins/database/mssql/mssql_test.go +++ b/plugins/database/mssql/mssql_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mssql @@ -192,13 +192,13 @@ func TestUpdateUser_password(t *testing.T) { expectedPassword string } - dbUser := "vaultuser" + cleanup, connURL := mssqlhelper.PrepareMSSQLTestContainer(t) + defer cleanup() initPassword := "p4$sw0rd" tests := map[string]testCase{ "missing password": { req: dbplugin.UpdateUserRequest{ - Username: dbUser, Password: &dbplugin.ChangePassword{ NewPassword: "", Statements: dbplugin.Statements{}, @@ -209,7 +209,6 @@ func TestUpdateUser_password(t *testing.T) { }, "empty rotation statements": { req: dbplugin.UpdateUserRequest{ - Username: dbUser, Password: &dbplugin.ChangePassword{ NewPassword: "N90gkKLy8$angf", Statements: dbplugin.Statements{}, @@ -220,7 +219,6 @@ func TestUpdateUser_password(t *testing.T) { }, "username rotation": { req: dbplugin.UpdateUserRequest{ - Username: dbUser, Password: &dbplugin.ChangePassword{ NewPassword: "N90gkKLy8$angf", Statements: dbplugin.Statements{ @@ -235,7 +233,6 @@ func TestUpdateUser_password(t *testing.T) { }, "bad statements": { req: dbplugin.UpdateUserRequest{ - Username: dbUser, Password: &dbplugin.ChangePassword{ NewPassword: "N90gkKLy8$angf", Statements: dbplugin.Statements{ @@ -250,11 +247,9 @@ func TestUpdateUser_password(t *testing.T) { }, } + i := 0 for name, test := range tests { t.Run(name, func(t *testing.T) { - cleanup, connURL := mssqlhelper.PrepareMSSQLTestContainer(t) - defer cleanup() - initReq := dbplugin.InitializeRequest{ Config: map[string]interface{}{ "connection_url": connURL, @@ -266,6 +261,9 @@ func TestUpdateUser_password(t *testing.T) { dbtesting.AssertInitializeCircleCiTest(t, db, initReq) defer dbtesting.AssertClose(t, db) + dbUser := fmt.Sprintf("vaultuser%d", i) + test.req.Username = dbUser + i++ err := createTestMSSQLUser(connURL, dbUser, initPassword, testMSSQLLogin) if err != nil { t.Fatalf("Failed to create user: %s", err) @@ -296,7 +294,7 @@ func TestUpdateUser_password(t *testing.T) { Username: dbUser, } - ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel = context.WithTimeout(context.Background(), 20*time.Second) defer cancel() deleteResp, err := db.DeleteUser(ctx, deleteReq) if err != nil { @@ -344,7 +342,7 @@ func TestDeleteUser(t *testing.T) { Username: dbUser, } - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() deleteResp, err := db.DeleteUser(ctx, deleteReq) if err != nil { diff --git a/plugins/database/mysql/connection_producer.go b/plugins/database/mysql/connection_producer.go index 5c5979213166..f35bfaf522fb 100644 --- a/plugins/database/mysql/connection_producer.go +++ b/plugins/database/mysql/connection_producer.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mysql @@ -13,6 +13,7 @@ import ( "sync" "time" + cloudmysql "cloud.google.com/go/cloudsqlconn/mysql/mysql" "github.com/go-sql-driver/mysql" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-uuid" @@ -21,6 +22,11 @@ import ( "github.com/mitchellh/mapstructure" ) +const ( + cloudSQLMySQL = "cloudsql-mysql" + driverMySQL = "mysql" +) + // mySQLConnectionProducer implements ConnectionProducer and provides a generic producer for most sql databases type mySQLConnectionProducer struct { ConnectionURL string `json:"connection_url" mapstructure:"connection_url" structs:"connection_url"` @@ -29,6 +35,8 @@ type mySQLConnectionProducer struct { MaxConnectionLifetimeRaw interface{} `json:"max_connection_lifetime" mapstructure:"max_connection_lifetime" structs:"max_connection_lifetime"` Username string `json:"username" mapstructure:"username" structs:"username"` Password string `json:"password" mapstructure:"password" structs:"password"` + AuthType string `json:"auth_type" mapstructure:"auth_type" structs:"auth_type"` + ServiceAccountJSON string `json:"service_account_json" mapstructure:"service_account_json" structs:"service_account_json"` TLSCertificateKeyData []byte `json:"tls_certificate_key" mapstructure:"tls_certificate_key" structs:"-"` TLSCAData []byte `json:"tls_ca" mapstructure:"tls_ca" structs:"-"` @@ -38,6 +46,10 @@ type mySQLConnectionProducer struct { // tlsConfigName is a globally unique name that references the TLS config for this instance in the mysql driver tlsConfigName string + // cloudDriverName is a globally unique name that references the cloud dialer config for this instance of the driver + cloudDriverName string + cloudDialerCleanup func() error + RawConfig map[string]interface{} maxConnectionLifetime time.Duration Initialized bool @@ -110,6 +122,29 @@ func (c *mySQLConnectionProducer) Init(ctx context.Context, conf map[string]inte mysql.RegisterTLSConfig(c.tlsConfigName, tlsConfig) } + // validate auth_type if provided + if ok := connutil.ValidateAuthType(c.AuthType); !ok { + return nil, fmt.Errorf("invalid auth_type: %s", c.AuthType) + } + + if c.AuthType == connutil.AuthTypeGCPIAM { + c.cloudDriverName, err = uuid.GenerateUUID() + if err != nil { + return nil, fmt.Errorf("unable to generate UUID for IAM configuration: %w", err) + } + + // for _most_ sql databases, the driver itself contains no state. In the case of google's cloudsql drivers, + // however, the driver might store a credentials file, in which case the state stored by the driver is in + // fact critical to the proper function of the connection. So it needs to be registered here inside the + // ConnectionProducer init. + dialerCleanup, err := registerDriverMySQL(c.cloudDriverName, c.ServiceAccountJSON) + if err != nil { + return nil, err + } + + c.cloudDialerCleanup = dialerCleanup + } + // Set initialized to true at this point since all fields are set, // and the connection can be established at a later time. c.Initialized = true @@ -140,6 +175,20 @@ func (c *mySQLConnectionProducer) Connection(ctx context.Context) (interface{}, // If the ping was unsuccessful, close it and ignore errors as we'll be // reestablishing anyways c.db.Close() + + // if IAM authentication was enabled + // ensure open dialer is also closed + if c.AuthType == connutil.AuthTypeGCPIAM { + if c.cloudDialerCleanup != nil { + c.cloudDialerCleanup() + } + } + + } + + driverName := driverMySQL + if c.cloudDriverName != "" { + driverName = c.cloudDriverName } connURL, err := c.addTLStoDSN() @@ -147,7 +196,12 @@ func (c *mySQLConnectionProducer) Connection(ctx context.Context) (interface{}, return nil, err } - c.db, err = sql.Open("mysql", connURL) + cloudURL, err := c.rewriteProtocolForGCP(connURL) + if err != nil { + return nil, err + } + + c.db, err = sql.Open(driverName, cloudURL) if err != nil { return nil, err } @@ -174,6 +228,13 @@ func (c *mySQLConnectionProducer) Close() error { defer c.Unlock() if c.db != nil { + // if auth_type is IAM, ensure cleanup + // of cloudSQL resources + if c.AuthType == connutil.AuthTypeGCPIAM { + if c.cloudDialerCleanup != nil { + c.cloudDialerCleanup() + } + } c.db.Close() } @@ -230,3 +291,38 @@ func (c *mySQLConnectionProducer) addTLStoDSN() (connURL string, err error) { connURL = config.FormatDSN() return connURL, nil } + +// rewriteProtocolForGCP rewrites the protocol in the DSN to contain the protocol name associated +// with the dialer and therefore driver associated with the provided cloudsqlconn.DialerOpts. +// As a safety/sanity check, it will only do this for protocol "cloudsql-mysql", the name GCP uses in its documentation. +// +// For example, it will rewrite the dsn "user@cloudsql-mysql(zone:region:instance)/ to +// "user@the-uuid-generated(zone:region:instance)/ +func (c *mySQLConnectionProducer) rewriteProtocolForGCP(inDSN string) (string, error) { + if c.cloudDriverName == "" { + // unchanged if not cloud + return inDSN, nil + } + + config, err := mysql.ParseDSN(inDSN) + if err != nil { + return "", fmt.Errorf("unable to parse connectionURL: %s", err) + } + + if config.Net != cloudSQLMySQL { + return "", fmt.Errorf("didn't update net name because it wasn't what we expected as a placeholder: %s", config.Net) + } + + config.Net = c.cloudDriverName + + return config.FormatDSN(), nil +} + +func registerDriverMySQL(driverName, credentials string) (cleanup func() error, err error) { + opts, err := connutil.GetCloudSQLAuthOptions(credentials, false) + if err != nil { + return nil, err + } + + return cloudmysql.RegisterDriver(driverName, opts...) +} diff --git a/plugins/database/mysql/connection_producer_test.go b/plugins/database/mysql/connection_producer_test.go index a3f0bc7ef56e..ae6014906f02 100644 --- a/plugins/database/mysql/connection_producer_test.go +++ b/plugins/database/mysql/connection_producer_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mysql diff --git a/plugins/database/mysql/mysql-database-plugin/main.go b/plugins/database/mysql/mysql-database-plugin/main.go index 56640b2f7b13..2735e082b866 100644 --- a/plugins/database/mysql/mysql-database-plugin/main.go +++ b/plugins/database/mysql/mysql-database-plugin/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/plugins/database/mysql/mysql-legacy-database-plugin/main.go b/plugins/database/mysql/mysql-legacy-database-plugin/main.go index 8aeba0b36bdc..6818cccbf0ce 100644 --- a/plugins/database/mysql/mysql-legacy-database-plugin/main.go +++ b/plugins/database/mysql/mysql-legacy-database-plugin/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/plugins/database/mysql/mysql.go b/plugins/database/mysql/mysql.go index 0260ec20d216..c938d2be9a70 100644 --- a/plugins/database/mysql/mysql.go +++ b/plugins/database/mysql/mysql.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mysql diff --git a/plugins/database/mysql/mysql_test.go b/plugins/database/mysql/mysql_test.go index 07e0165ffe49..72c2a125a51e 100644 --- a/plugins/database/mysql/mysql_test.go +++ b/plugins/database/mysql/mysql_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package mysql @@ -7,6 +7,7 @@ import ( "context" "database/sql" "fmt" + "os" "strings" "testing" "time" @@ -14,8 +15,9 @@ import ( stdmysql "github.com/go-sql-driver/mysql" "github.com/hashicorp/go-secure-stdlib/strutil" mysqlhelper "github.com/hashicorp/vault/helper/testhelpers/mysql" - dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing" + "github.com/hashicorp/vault/sdk/database/helper/connutil" "github.com/hashicorp/vault/sdk/database/helper/credsutil" "github.com/hashicorp/vault/sdk/database/helper/dbutil" "github.com/stretchr/testify/require" @@ -44,6 +46,79 @@ func TestMySQL_Initialize(t *testing.T) { } } +// TestMySQL_Initialize_CloudGCP validates the proper initialization of a MySQL backend pointing +// to a GCP CloudSQL MySQL instance. This expects some external setup (exact TBD) +func TestMySQL_Initialize_CloudGCP(t *testing.T) { + envConnURL := "CONNECTION_URL" + connURL := os.Getenv(envConnURL) + if connURL == "" { + t.Skipf("env var %s not set, skipping test", envConnURL) + } + + credStr := dbtesting.GetGCPTestCredentials(t) + + tests := map[string]struct { + req dbplugin.InitializeRequest + wantErr bool + expectedError string + }{ + "empty auth type": { + req: dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "auth_type": "", + }, + }, + }, + "invalid auth type": { + req: dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "auth_type": "invalid", + }, + }, + wantErr: true, + expectedError: "invalid auth_type", + }, + "JSON credentials": { + req: dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "auth_type": connutil.AuthTypeGCPIAM, + "service_account_json": credStr, + }, + VerifyConnection: true, + }, + }, + } + + for n, tc := range tests { + t.Run(n, func(t *testing.T) { + db := newMySQL(DefaultUserNameTemplate) + defer dbtesting.AssertClose(t, db) + _, err := db.Initialize(context.Background(), tc.req) + + if tc.wantErr { + if err == nil { + t.Fatalf("expected error but received nil") + } + + if !strings.Contains(err.Error(), tc.expectedError) { + t.Fatalf("expected error %s, got %s", tc.expectedError, err.Error()) + } + } else { + if err != nil { + t.Fatalf("expected no error, received %s", err) + } + + if !db.Initialized { + t.Fatal("Database should be initialized") + } + } + }) + } +} + func testInitialize(t *testing.T, rootPassword string) { cleanup, connURL := mysqlhelper.PrepareTestContainer(t, false, rootPassword) defer cleanup() diff --git a/plugins/database/postgresql/passwordauthentication.go b/plugins/database/postgresql/passwordauthentication.go new file mode 100644 index 000000000000..a20214dae4d1 --- /dev/null +++ b/plugins/database/postgresql/passwordauthentication.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package postgresql + +import "fmt" + +// passwordAuthentication determines whether to send passwords in plaintext (password) or hashed (scram-sha-256). +type passwordAuthentication string + +var ( + // passwordAuthenticationPassword is the default. If set, passwords will be sent to PostgreSQL in plain text. + passwordAuthenticationPassword passwordAuthentication = "password" + passwordAuthenticationSCRAMSHA256 passwordAuthentication = "scram-sha-256" +) + +var passwordAuthentications = map[passwordAuthentication]struct{}{ + passwordAuthenticationSCRAMSHA256: {}, + passwordAuthenticationPassword: {}, +} + +func parsePasswordAuthentication(s string) (passwordAuthentication, error) { + if _, ok := passwordAuthentications[passwordAuthentication(s)]; !ok { + return "", fmt.Errorf("'%s' is not a valid password authentication type", s) + } + + return passwordAuthentication(s), nil +} diff --git a/plugins/database/postgresql/postgresql-database-plugin/main.go b/plugins/database/postgresql/postgresql-database-plugin/main.go index f543167d4a3b..3efc801e9ed4 100644 --- a/plugins/database/postgresql/postgresql-database-plugin/main.go +++ b/plugins/database/postgresql/postgresql-database-plugin/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/plugins/database/postgresql/postgresql.go b/plugins/database/postgresql/postgresql.go index 44f4844a2230..ffe460f45c53 100644 --- a/plugins/database/postgresql/postgresql.go +++ b/plugins/database/postgresql/postgresql.go @@ -1,17 +1,22 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package postgresql import ( "context" + "crypto/tls" + "crypto/x509" "database/sql" + "encoding/pem" + "errors" "fmt" "regexp" "strings" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/plugins/database/postgresql/scram" "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/database/helper/connutil" "github.com/hashicorp/vault/sdk/database/helper/dbutil" @@ -68,7 +73,8 @@ func new() *PostgreSQL { connProducer.Type = postgreSQLTypeName db := &PostgreSQL{ - SQLConnectionProducer: connProducer, + SQLConnectionProducer: connProducer, + passwordAuthentication: passwordAuthenticationPassword, } return db @@ -77,10 +83,65 @@ func new() *PostgreSQL { type PostgreSQL struct { *connutil.SQLConnectionProducer - usernameProducer template.StringTemplate + TLSCertificateData []byte `json:"tls_certificate" structs:"-" mapstructure:"tls_certificate"` + TLSPrivateKey []byte `json:"private_key" structs:"-" mapstructure:"private_key"` + TLSCAData []byte `json:"tls_ca" structs:"-" mapstructure:"tls_ca"` + + usernameProducer template.StringTemplate + passwordAuthentication passwordAuthentication } func (p *PostgreSQL) Initialize(ctx context.Context, req dbplugin.InitializeRequest) (dbplugin.InitializeResponse, error) { + sslcert, err := strutil.GetString(req.Config, "tls_certificate") + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("failed to retrieve tls_certificate: %w", err) + } + + sslkey, err := strutil.GetString(req.Config, "private_key") + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("failed to retrieve private_key: %w", err) + } + + sslrootcert, err := strutil.GetString(req.Config, "tls_ca") + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("failed to retrieve tls_ca: %w", err) + } + + useTLS := false + tlsConfig := &tls.Config{} + if sslrootcert != "" { + caCertPool := x509.NewCertPool() + if !caCertPool.AppendCertsFromPEM([]byte(sslrootcert)) { + return dbplugin.InitializeResponse{}, errors.New("unable to add CA to cert pool") + } + + tlsConfig.RootCAs = caCertPool + tlsConfig.ClientCAs = caCertPool + p.TLSConfig = tlsConfig + useTLS = true + } + + if (sslcert != "" && sslkey == "") || (sslcert == "" && sslkey != "") { + return dbplugin.InitializeResponse{}, errors.New(`both "sslcert" and "sslkey" are required`) + } + + if sslcert != "" && sslkey != "" { + block, _ := pem.Decode([]byte(sslkey)) + + cert, err := tls.X509KeyPair([]byte(sslcert), pem.EncodeToMemory(block)) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("unable to load cert: %w", err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + p.TLSConfig = tlsConfig + useTLS = true + } + + if !useTLS { + // set to nil to flag that this connection does not use a custom TLS config + p.TLSConfig = nil + } + newConf, err := p.SQLConnectionProducer.Init(ctx, req.Config, req.VerifyConnection) if err != nil { return dbplugin.InitializeResponse{}, err @@ -105,6 +166,20 @@ func (p *PostgreSQL) Initialize(ctx context.Context, req dbplugin.InitializeRequ return dbplugin.InitializeResponse{}, fmt.Errorf("invalid username template: %w", err) } + passwordAuthenticationRaw, err := strutil.GetString(req.Config, "password_authentication") + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("failed to retrieve password_authentication: %w", err) + } + + if passwordAuthenticationRaw != "" { + pwAuthentication, err := parsePasswordAuthentication(passwordAuthenticationRaw) + if err != nil { + return dbplugin.InitializeResponse{}, err + } + + p.passwordAuthentication = pwAuthentication + } + resp := dbplugin.InitializeResponse{ Config: newConf, } @@ -124,6 +199,15 @@ func (p *PostgreSQL) getConnection(ctx context.Context) (*sql.DB, error) { return db.(*sql.DB), nil } +func (p *PostgreSQL) getStaticConnection(ctx context.Context, username, password string) (*sql.DB, error) { + db, err := p.StaticConnection(ctx, username, password) + if err != nil { + return nil, err + } + + return db, nil +} + func (p *PostgreSQL) UpdateUser(ctx context.Context, req dbplugin.UpdateUserRequest) (dbplugin.UpdateUserResponse, error) { if req.Username == "" { return dbplugin.UpdateUserResponse{}, fmt.Errorf("missing username") @@ -134,17 +218,17 @@ func (p *PostgreSQL) UpdateUser(ctx context.Context, req dbplugin.UpdateUserRequ merr := &multierror.Error{} if req.Password != nil { - err := p.changeUserPassword(ctx, req.Username, req.Password) + err := p.changeUserPassword(ctx, req.Username, req.Password, req.SelfManagedPassword) merr = multierror.Append(merr, err) } if req.Expiration != nil { - err := p.changeUserExpiration(ctx, req.Username, req.Expiration) + err := p.changeUserExpiration(ctx, req.Username, req.Expiration, req.SelfManagedPassword) merr = multierror.Append(merr, err) } return dbplugin.UpdateUserResponse{}, merr.ErrorOrNil() } -func (p *PostgreSQL) changeUserPassword(ctx context.Context, username string, changePass *dbplugin.ChangePassword) error { +func (p *PostgreSQL) changeUserPassword(ctx context.Context, username string, changePass *dbplugin.ChangePassword, selfManagedPass string) error { stmts := changePass.Statements.Commands if len(stmts) == 0 { stmts = []string{defaultChangePasswordStatement} @@ -158,9 +242,18 @@ func (p *PostgreSQL) changeUserPassword(ctx context.Context, username string, ch p.Lock() defer p.Unlock() - db, err := p.getConnection(ctx) - if err != nil { - return fmt.Errorf("unable to get connection: %w", err) + var db *sql.DB + var err error + if selfManagedPass == "" { + db, err = p.getConnection(ctx) + if err != nil { + return fmt.Errorf("unable to get connection: %w", err) + } + } else { + db, err = p.getStaticConnection(ctx, username, selfManagedPass) + if err != nil { + return fmt.Errorf("unable to get static connection from cache: %w", err) + } } // Check if the role exists @@ -188,6 +281,15 @@ func (p *PostgreSQL) changeUserPassword(ctx context.Context, username string, ch "username": username, "password": password, } + + if p.passwordAuthentication == passwordAuthenticationSCRAMSHA256 { + hashedPassword, err := scram.Hash(password) + if err != nil { + return fmt.Errorf("unable to scram-sha256 password: %w", err) + } + m["password"] = hashedPassword + } + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { return fmt.Errorf("failed to execute query: %w", err) } @@ -201,7 +303,7 @@ func (p *PostgreSQL) changeUserPassword(ctx context.Context, username string, ch return nil } -func (p *PostgreSQL) changeUserExpiration(ctx context.Context, username string, changeExp *dbplugin.ChangeExpiration) error { +func (p *PostgreSQL) changeUserExpiration(ctx context.Context, username string, changeExp *dbplugin.ChangeExpiration, selfManagedPass string) error { p.Lock() defer p.Unlock() @@ -210,9 +312,18 @@ func (p *PostgreSQL) changeUserExpiration(ctx context.Context, username string, renewStmts = []string{defaultExpirationStatement} } - db, err := p.getConnection(ctx) - if err != nil { - return err + var db *sql.DB + var err error + if selfManagedPass == "" { + db, err = p.getConnection(ctx) + if err != nil { + return fmt.Errorf("unable to get connection: %w", err) + } + } else { + db, err = p.getStaticConnection(ctx, username, selfManagedPass) + if err != nil { + return fmt.Errorf("unable to get static connection from cache: %w", err) + } } tx, err := db.BeginTx(ctx, nil) @@ -272,15 +383,24 @@ func (p *PostgreSQL) NewUser(ctx context.Context, req dbplugin.NewUserRequest) ( } defer tx.Rollback() + m := map[string]string{ + "name": username, + "username": username, + "password": req.Password, + "expiration": expirationStr, + } + + if p.passwordAuthentication == passwordAuthenticationSCRAMSHA256 { + hashedPassword, err := scram.Hash(req.Password) + if err != nil { + return dbplugin.NewUserResponse{}, fmt.Errorf("unable to scram-sha256 password: %w", err) + } + m["password"] = hashedPassword + } + for _, stmt := range req.Statements.Commands { if containsMultilineStatement(stmt) { // Execute it as-is. - m := map[string]string{ - "name": username, - "username": username, - "password": req.Password, - "expiration": expirationStr, - } if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, stmt); err != nil { return dbplugin.NewUserResponse{}, fmt.Errorf("failed to execute query: %w", err) } @@ -293,12 +413,6 @@ func (p *PostgreSQL) NewUser(ctx context.Context, req dbplugin.NewUserRequest) ( continue } - m := map[string]string{ - "name": username, - "username": username, - "password": req.Password, - "expiration": expirationStr, - } if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { return dbplugin.NewUserResponse{}, fmt.Errorf("failed to execute query: %w", err) } @@ -415,7 +529,7 @@ func (p *PostgreSQL) defaultDeleteUser(ctx context.Context, username string) err } revocationStmts = append(revocationStmts, fmt.Sprintf( `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA %s FROM %s;`, - (schema), + dbutil.QuoteIdentifier(schema), dbutil.QuoteIdentifier(username))) revocationStmts = append(revocationStmts, fmt.Sprintf( diff --git a/plugins/database/postgresql/postgresql_test.go b/plugins/database/postgresql/postgresql_test.go index a268f3712072..281c42990e18 100644 --- a/plugins/database/postgresql/postgresql_test.go +++ b/plugins/database/postgresql/postgresql_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package postgresql @@ -12,17 +12,21 @@ import ( "testing" "time" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/helper/testhelpers/certhelpers" "github.com/hashicorp/vault/helper/testhelpers/postgresql" "github.com/hashicorp/vault/sdk/database/dbplugin/v5" dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing" + "github.com/hashicorp/vault/sdk/database/helper/connutil" "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/helper/template" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func getPostgreSQL(t *testing.T, options map[string]interface{}) (*PostgreSQL, func()) { - cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster") + cleanup, connURL := postgresql.PrepareTestContainer(t) connectionDetails := map[string]interface{}{ "connection_url": connURL, @@ -56,6 +60,463 @@ func TestPostgreSQL_Initialize(t *testing.T) { } } +// TestPostgreSQL_InitializeMultiHost tests the functionality of Postgres's +// multi-host connection strings. +func TestPostgreSQL_InitializeMultiHost(t *testing.T) { + cleanup, connURL := postgresql.PrepareTestContainerMultiHost(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + "max_open_connections": 5, + } + + req := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + db := new() + dbtesting.AssertInitialize(t, db, req) + + if !db.Initialized { + t.Fatal("Database should be initialized") + } + + if err := db.Close(); err != nil { + t.Fatalf("err: %s", err) + } +} + +// TestPostgreSQL_InitializeSSLInlineFeatureFlag tests that the VAULT_PLUGIN_USE_POSTGRES_SSLINLINE +// flag guards against unwanted usage of the deprecated SSL client authentication path. +// TODO: remove this when we remove the underlying feature in a future SDK version +func TestPostgreSQL_InitializeSSLInlineFeatureFlag(t *testing.T) { + // set the flag to true so we can call PrepareTestContainerWithSSL + // which does a validation check on the connection + t.Setenv(pluginutil.PluginUsePostgresSSLInline, "true") + + // Create certificates for postgres authentication + caCert := certhelpers.NewCert(t, certhelpers.CommonName("ca"), certhelpers.IsCA(true), certhelpers.SelfSign()) + clientCert := certhelpers.NewCert(t, certhelpers.CommonName("postgres"), certhelpers.DNS("localhost"), certhelpers.Parent(caCert)) + cleanup, connURL := postgresql.PrepareTestContainerWithSSL(t, "verify-ca", caCert, clientCert, false) + t.Cleanup(cleanup) + + type testCase struct { + env string + wantErr bool + expectedError string + } + + tests := map[string]testCase{ + "feature flag is true": { + env: "true", + wantErr: false, + expectedError: "", + }, + "feature flag is unset or empty": { + env: "", + wantErr: true, + // this error is expected because the env var unset means we are + // using pgx's native connection string parsing which does not + // support inlining of the certificate material in the sslrootcert, + // sslcert, and sslkey fields + expectedError: "error verifying connection", + }, + "feature flag is false": { + env: "false", + wantErr: true, + expectedError: "failed to open postgres connection with deprecated funtion", + }, + "feature flag is invalid": { + env: "foo", + wantErr: true, + expectedError: "failed to open postgres connection with deprecated funtion", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + // update the env var with the value we are testing + t.Setenv(pluginutil.PluginUsePostgresSSLInline, test.env) + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + "max_open_connections": 5, + } + + req := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + db := new() + _, err := dbtesting.VerifyInitialize(t, db, req) + if test.wantErr && err == nil { + t.Fatal("expected error, got nil") + } else if test.wantErr && !strings.Contains(err.Error(), test.expectedError) { + t.Fatalf("got: %s, want: %s", err.Error(), test.expectedError) + } + + if !test.wantErr && !db.Initialized { + t.Fatal("Database should be initialized") + } + + if err := db.Close(); err != nil { + t.Fatalf("err: %s", err) + } + // unset for the next test case + os.Unsetenv(pluginutil.PluginUsePostgresSSLInline) + }) + } +} + +// TestPostgreSQL_InitializeSSLInline tests that we can successfully authenticate +// with a postgres server via ssl with a URL connection string or DSN (key/value) +// for each ssl mode. +// TODO: remove this when we remove the underlying feature in a future SDK version +func TestPostgreSQL_InitializeSSLInline(t *testing.T) { + // required to enable the sslinline custom parsing + t.Setenv(pluginutil.PluginUsePostgresSSLInline, "true") + + type testCase struct { + sslMode string + useDSN bool + useFallback bool + wantErr bool + expectedError string + } + + tests := map[string]testCase{ + "disable sslmode": { + sslMode: "disable", + wantErr: true, + expectedError: "error verifying connection", + }, + "allow sslmode": { + sslMode: "allow", + wantErr: false, + }, + "prefer sslmode": { + sslMode: "prefer", + wantErr: false, + }, + "require sslmode": { + sslMode: "require", + wantErr: false, + }, + "verify-ca sslmode": { + sslMode: "verify-ca", + wantErr: false, + }, + "disable sslmode with DSN": { + sslMode: "disable", + useDSN: true, + wantErr: true, + expectedError: "error verifying connection", + }, + "allow sslmode with DSN": { + sslMode: "allow", + useDSN: true, + wantErr: false, + }, + "prefer sslmode with DSN": { + sslMode: "prefer", + useDSN: true, + wantErr: false, + }, + "require sslmode with DSN": { + sslMode: "require", + useDSN: true, + wantErr: false, + }, + "verify-ca sslmode with DSN": { + sslMode: "verify-ca", + useDSN: true, + wantErr: false, + }, + "disable sslmode with fallback": { + sslMode: "disable", + useFallback: true, + wantErr: true, + expectedError: "error verifying connection", + }, + "allow sslmode with fallback": { + sslMode: "allow", + useFallback: true, + }, + "prefer sslmode with fallback": { + sslMode: "prefer", + useFallback: true, + }, + "require sslmode with fallback": { + sslMode: "require", + useFallback: true, + }, + "verify-ca sslmode with fallback": { + sslMode: "verify-ca", + useFallback: true, + }, + "disable sslmode with DSN with fallback": { + sslMode: "disable", + useDSN: true, + useFallback: true, + wantErr: true, + expectedError: "error verifying connection", + }, + "allow sslmode with DSN with fallback": { + sslMode: "allow", + useDSN: true, + useFallback: true, + wantErr: false, + }, + "prefer sslmode with DSN with fallback": { + sslMode: "prefer", + useDSN: true, + useFallback: true, + wantErr: false, + }, + "require sslmode with DSN with fallback": { + sslMode: "require", + useDSN: true, + useFallback: true, + wantErr: false, + }, + "verify-ca sslmode with DSN with fallback": { + sslMode: "verify-ca", + useDSN: true, + useFallback: true, + wantErr: false, + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Create certificates for postgres authentication + caCert := certhelpers.NewCert(t, certhelpers.CommonName("ca"), certhelpers.IsCA(true), certhelpers.SelfSign()) + clientCert := certhelpers.NewCert(t, certhelpers.CommonName("postgres"), certhelpers.DNS("localhost"), certhelpers.Parent(caCert)) + cleanup, connURL := postgresql.PrepareTestContainerWithSSL(t, test.sslMode, caCert, clientCert, test.useFallback) + t.Cleanup(cleanup) + + if test.useDSN { + var err error + connURL, err = dbutil.ParseURL(connURL) + if err != nil { + t.Fatal(err) + } + } + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + "max_open_connections": 5, + } + + req := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + db := new() + _, err := dbtesting.VerifyInitialize(t, db, req) + if test.wantErr && err == nil { + t.Fatal("expected error, got nil") + } else if test.wantErr && !strings.Contains(err.Error(), test.expectedError) { + t.Fatalf("got: %s, want: %s", err.Error(), test.expectedError) + } + + if !test.wantErr && !db.Initialized { + t.Fatal("Database should be initialized") + } + + if err := db.Close(); err != nil { + t.Fatalf("err: %s", err) + } + }) + } +} + +// TestPostgreSQL_InitializeSSL tests that we can successfully authenticate +// with a postgres server via ssl with a URL connection string or DSN (key/value) +// for each ssl mode. +func TestPostgreSQL_InitializeSSL(t *testing.T) { + type testCase struct { + sslMode string + useDSN bool + useFallback bool + wantErr bool + expectedError string + } + + tests := map[string]testCase{ + "disable sslmode": { + sslMode: "disable", + wantErr: true, + expectedError: "error verifying connection", + }, + "allow sslmode": { + sslMode: "allow", + wantErr: false, + }, + "prefer sslmode": { + sslMode: "prefer", + wantErr: false, + }, + "require sslmode": { + sslMode: "require", + wantErr: false, + }, + "verify-ca sslmode": { + sslMode: "verify-ca", + wantErr: false, + }, + "verify-full sslmode": { + sslMode: "verify-full", + wantErr: false, + }, + "disable sslmode with DSN": { + sslMode: "disable", + useDSN: true, + wantErr: true, + expectedError: "error verifying connection", + }, + "allow sslmode with DSN": { + sslMode: "allow", + useDSN: true, + wantErr: false, + }, + "prefer sslmode with DSN": { + sslMode: "prefer", + useDSN: true, + wantErr: false, + }, + "require sslmode with DSN": { + sslMode: "require", + useDSN: true, + wantErr: false, + }, + "verify-ca sslmode with DSN": { + sslMode: "verify-ca", + useDSN: true, + wantErr: false, + }, + "verify-full sslmode with DSN": { + sslMode: "verify-full", + useDSN: true, + wantErr: false, + }, + "disable sslmode with fallback": { + sslMode: "disable", + useFallback: true, + wantErr: true, + expectedError: "error verifying connection", + }, + "allow sslmode with fallback": { + sslMode: "allow", + useFallback: true, + }, + "prefer sslmode with fallback": { + sslMode: "prefer", + useFallback: true, + }, + "require sslmode with fallback": { + sslMode: "require", + useFallback: true, + }, + "verify-ca sslmode with fallback": { + sslMode: "verify-ca", + useFallback: true, + }, + "verify-full sslmode with fallback": { + sslMode: "verify-full", + useFallback: true, + }, + "disable sslmode with DSN with fallback": { + sslMode: "disable", + useDSN: true, + useFallback: true, + wantErr: true, + expectedError: "error verifying connection", + }, + "allow sslmode with DSN with fallback": { + sslMode: "allow", + useDSN: true, + useFallback: true, + wantErr: false, + }, + "prefer sslmode with DSN with fallback": { + sslMode: "prefer", + useDSN: true, + useFallback: true, + wantErr: false, + }, + "require sslmode with DSN with fallback": { + sslMode: "require", + useDSN: true, + useFallback: true, + wantErr: false, + }, + "verify-ca sslmode with DSN with fallback": { + sslMode: "verify-ca", + useDSN: true, + useFallback: true, + wantErr: false, + }, + "verify-full sslmode with DSN with fallback": { + sslMode: "verify-full", + useDSN: true, + useFallback: true, + wantErr: false, + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Create certificates for postgres authentication + caCert := certhelpers.NewCert(t, certhelpers.CommonName("ca"), certhelpers.IsCA(true), certhelpers.SelfSign()) + clientCert := certhelpers.NewCert(t, certhelpers.CommonName("postgres"), certhelpers.DNS("localhost"), certhelpers.Parent(caCert)) + cleanup, connURL := postgresql.PrepareTestContainerWithSSL(t, test.sslMode, caCert, clientCert, test.useFallback) + t.Cleanup(cleanup) + + if test.useDSN { + var err error + connURL, err = dbutil.ParseURL(connURL) + if err != nil { + t.Fatal(err) + } + } + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + "max_open_connections": 5, + "tls_certificate": string(clientCert.CombinedPEM()), + "private_key": string(clientCert.PrivateKeyPEM()), + "tls_ca": string(caCert.CombinedPEM()), + } + + req := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + db := new() + _, err := dbtesting.VerifyInitialize(t, db, req) + if test.wantErr && err == nil { + t.Fatal("expected error, got nil") + } else if test.wantErr && !strings.Contains(err.Error(), test.expectedError) { + t.Fatalf("got: %s, want: %s", err.Error(), test.expectedError) + } + + if !test.wantErr && !db.Initialized { + t.Fatal("Database should be initialized") + } + + if err := db.Close(); err != nil { + t.Fatalf("err: %s", err) + } + }) + } +} + func TestPostgreSQL_InitializeWithStringVals(t *testing.T) { db, cleanup := getPostgreSQL(t, map[string]interface{}{ "max_open_connections": "5", @@ -68,7 +529,7 @@ func TestPostgreSQL_InitializeWithStringVals(t *testing.T) { } func TestPostgreSQL_Initialize_ConnURLWithDSNFormat(t *testing.T) { - cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster") + cleanup, connURL := postgresql.PrepareTestContainer(t) defer cleanup() dsnConnURL, err := dbutil.ParseURL(connURL) @@ -93,6 +554,276 @@ func TestPostgreSQL_Initialize_ConnURLWithDSNFormat(t *testing.T) { } } +// Ensures we can successfully initialize and connect to a CloudSQL database +// Requires the following: +// - GOOGLE_APPLICATION_CREDENTIALS either JSON or path to file +// - CONNECTION_URL to a valid Postgres instance on Google CloudSQL +func TestPostgreSQL_Initialize_CloudGCP(t *testing.T) { + envConnURL := "CONNECTION_URL" + connURL := os.Getenv(envConnURL) + if connURL == "" { + t.Skipf("env var %s not set, skipping test", envConnURL) + } + + credStr := dbtesting.GetGCPTestCredentials(t) + + type testCase struct { + req dbplugin.InitializeRequest + wantErr bool + expectedError string + } + + tests := map[string]testCase{ + "empty auth type": { + req: dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "auth_type": "", + }, + }, + }, + "invalid auth type": { + req: dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "auth_type": "invalid", + }, + }, + wantErr: true, + expectedError: "invalid auth_type", + }, + "default credentials": { + req: dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "auth_type": connutil.AuthTypeGCPIAM, + }, + VerifyConnection: true, + }, + }, + "JSON credentials": { + req: dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "auth_type": connutil.AuthTypeGCPIAM, + "service_account_json": credStr, + }, + VerifyConnection: true, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + db := new() + defer dbtesting.AssertClose(t, db) + + _, err := dbtesting.VerifyInitialize(t, db, test.req) + + if test.wantErr { + if err == nil { + t.Fatalf("expected error but received nil") + } + + if !strings.Contains(err.Error(), test.expectedError) { + t.Fatalf("expected error %s, got %s", test.expectedError, err.Error()) + } + } else { + if err != nil { + t.Fatalf("expected no error, received %s", err) + } + + if !db.Initialized { + t.Fatal("Database should be initialized") + } + } + }) + } +} + +// TestPostgreSQL_Initialize_SelfManaged_OSS tests the initialization of +// the self-managed flow and ensures an error is returned on OSS. +func TestPostgreSQL_Initialize_SelfManaged_OSS(t *testing.T) { + if constants.IsEnterprise { + t.Skip("this test is only valid on OSS") + } + + cleanup, url := postgresql.PrepareTestContainerSelfManaged(t) + defer cleanup() + + connURL := fmt.Sprintf("postgresql://{{username}}:{{password}}@%s/postgres?sslmode=disable", url.Host) + + testCases := []struct { + name string + connectionDetails map[string]interface{} + wantErr bool + errContains string + }{ + { + name: "no parameters set", + connectionDetails: map[string]interface{}{ + "connection_url": connURL, + "self_managed": false, + "username": "", + "password": "", + }, + wantErr: true, + errContains: "must either provide username/password or set self-managed to 'true'", + }, + { + name: "both sets of parameters set", + connectionDetails: map[string]interface{}{ + "connection_url": connURL, + "self_managed": true, + "username": "test", + "password": "test", + }, + wantErr: true, + errContains: "cannot use both self-managed and vault-managed workflows", + }, + { + name: "either username/password with self-managed", + connectionDetails: map[string]interface{}{ + "connection_url": connURL, + "self_managed": true, + "username": "test", + "password": "", + }, + wantErr: true, + errContains: "cannot use both self-managed and vault-managed workflows", + }, + { + name: "cache not implemented", + connectionDetails: map[string]interface{}{ + "connection_url": connURL, + "self_managed": true, + "username": "", + "password": "", + }, + wantErr: true, + errContains: "self-managed static roles only available in Vault Enterprise", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + req := dbplugin.InitializeRequest{ + Config: tc.connectionDetails, + VerifyConnection: true, + } + + db := new() + _, err := dbtesting.VerifyInitialize(t, db, req) + if err == nil && tc.wantErr { + t.Fatalf("got: %s, wantErr: %t", err, tc.wantErr) + } + + if err != nil && !strings.Contains(err.Error(), tc.errContains) { + t.Fatalf("expected error: %s, received error: %s", tc.errContains, err) + } + + if !tc.wantErr && !db.Initialized { + t.Fatal("Database should be initialized") + } + + if err := db.Close(); err != nil { + t.Fatalf("err closing DB: %s", err) + } + }) + } +} + +// TestPostgreSQL_PasswordAuthentication tests that the default "password_authentication" is "none", and that +// an error is returned if an invalid "password_authentication" is provided. +func TestPostgreSQL_PasswordAuthentication(t *testing.T) { + cleanup, connURL := postgresql.PrepareTestContainer(t) + defer cleanup() + + dsnConnURL, err := dbutil.ParseURL(connURL) + assert.NoError(t, err) + db := new() + + ctx := context.Background() + + t.Run("invalid-password-authentication", func(t *testing.T) { + connectionDetails := map[string]interface{}{ + "connection_url": dsnConnURL, + "password_authentication": "invalid-password-authentication", + } + + req := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + _, err := db.Initialize(ctx, req) + assert.EqualError(t, err, "'invalid-password-authentication' is not a valid password authentication type") + }) + + t.Run("default-is-none", func(t *testing.T) { + connectionDetails := map[string]interface{}{ + "connection_url": dsnConnURL, + } + + req := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + _ = dbtesting.AssertInitialize(t, db, req) + assert.Equal(t, passwordAuthenticationPassword, db.passwordAuthentication) + }) +} + +// TestPostgreSQL_PasswordAuthentication_SCRAMSHA256 tests that password_authentication works when set to scram-sha-256. +// When sending an encrypted password, the raw password should still successfully authenticate the user. +func TestPostgreSQL_PasswordAuthentication_SCRAMSHA256(t *testing.T) { + cleanup, connURL := postgresql.PrepareTestContainer(t) + defer cleanup() + + dsnConnURL, err := dbutil.ParseURL(connURL) + if err != nil { + t.Fatal(err) + } + + connectionDetails := map[string]interface{}{ + "connection_url": dsnConnURL, + "password_authentication": string(passwordAuthenticationSCRAMSHA256), + } + + req := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + db := new() + resp := dbtesting.AssertInitialize(t, db, req) + assert.Equal(t, string(passwordAuthenticationSCRAMSHA256), resp.Config["password_authentication"]) + + if !db.Initialized { + t.Fatal("Database should be initialized") + } + + ctx := context.Background() + newUserRequest := dbplugin.NewUserRequest{ + Statements: dbplugin.Statements{ + Commands: []string{ + ` + CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";`, + }, + }, + Password: "somesecurepassword", + Expiration: time.Now().Add(1 * time.Minute), + } + newUserResponse, err := db.NewUser(ctx, newUserRequest) + + assertCredsExist(t, db.ConnectionURL, newUserResponse.Username, newUserRequest.Password) +} + func TestPostgreSQL_NewUser(t *testing.T) { type testCase struct { req dbplugin.NewUserRequest @@ -407,6 +1138,41 @@ func TestUpdateUser_Password(t *testing.T) { }) } +// TestUpdateUser_SelfManaged_OSS checks basic validation +// for self-managed fields and confirms an error is returned on OSS +func TestUpdateUser_SelfManaged_OSS(t *testing.T) { + if constants.IsEnterprise { + t.Skip("this test is only valid on OSS") + } + + // Shared test container for speed - there should not be any overlap between the tests + db, cleanup := getPostgreSQL(t, nil) + defer cleanup() + + updateReq := dbplugin.UpdateUserRequest{ + Username: "static", + Password: &dbplugin.ChangePassword{ + NewPassword: "somenewpassword", + Statements: dbplugin.Statements{ + Commands: nil, + }, + }, + SelfManagedPassword: "test", + } + + expectedErr := "self-managed static roles only available in Vault Enterprise" + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _, err := db.UpdateUser(ctx, updateReq) + if err == nil { + t.Fatalf("err expected, got nil") + } + if !strings.Contains(err.Error(), expectedErr) { + t.Fatalf("err expected: %s, got: %s", expectedErr, err) + } +} + func TestUpdateUser_Expiration(t *testing.T) { type testCase struct { initialExpiration time.Time @@ -912,7 +1678,7 @@ func TestUsernameGeneration(t *testing.T) { } func TestNewUser_CustomUsername(t *testing.T) { - cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster") + cleanup, connURL := postgresql.PrepareTestContainer(t) defer cleanup() type testCase struct { @@ -1008,137 +1774,84 @@ func TestNewUser_CustomUsername(t *testing.T) { } } -// This is a long-running integration test which tests the functionality of Postgres's multi-host -// connection strings. It uses two Postgres containers preconfigured with Replication Manager -// provided by Bitnami. This test currently does not run in CI and must be run manually. This is -// due to the test length, as it requires multiple sleep calls to ensure cluster setup and -// primary node failover occurs before the test steps continue. -// -// To run the test, set the environment variable POSTGRES_MULTIHOST_NET to the value of -// a docker network you've preconfigured, e.g. -// 'docker network create -d bridge postgres-repmgr' -// 'export POSTGRES_MULTIHOST_NET=postgres-repmgr' -func TestPostgreSQL_Repmgr(t *testing.T) { - _, exists := os.LookupEnv("POSTGRES_MULTIHOST_NET") - if !exists { - t.Skipf("POSTGRES_MULTIHOST_NET not set, skipping test") - } - - // Run two postgres-repmgr containers in a replication cluster - db0, runner0, url0, container0 := testPostgreSQL_Repmgr_Container(t, "psql-repl-node-0") - _, _, url1, _ := testPostgreSQL_Repmgr_Container(t, "psql-repl-node-1") - - ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second) - defer cancel() - - time.Sleep(10 * time.Second) - - // Write a read role to the cluster - _, err := db0.NewUser(ctx, dbplugin.NewUserRequest{ - Statements: dbplugin.Statements{ - Commands: []string{ - `CREATE ROLE "ro" NOINHERIT; - GRANT SELECT ON ALL TABLES IN SCHEMA public TO "ro";`, - }, - }, - }) - if err != nil { - t.Fatalf("no error expected, got: %s", err) - } - - // Open a connection to both databases using the multihost connection string - connectionDetails := map[string]interface{}{ - "connection_url": fmt.Sprintf("postgresql://{{username}}:{{password}}@%s,%s/postgres?target_session_attrs=read-write", getHost(url0), getHost(url1)), - "username": "postgres", - "password": "secret", - } - req := dbplugin.InitializeRequest{ - Config: connectionDetails, - VerifyConnection: true, - } - - db := new() - dbtesting.AssertInitialize(t, db, req) - if !db.Initialized { - t.Fatal("Database should be initialized") +func TestNewUser_CloudGCP(t *testing.T) { + envConnURL := "CONNECTION_URL" + connURL := os.Getenv(envConnURL) + if connURL == "" { + t.Skipf("env var %s not set, skipping test", envConnURL) } - defer db.Close() - // Add a user to the cluster, then stop the primary container - if err = testPostgreSQL_Repmgr_AddUser(ctx, db); err != nil { - t.Fatalf("no error expected, got: %s", err) - } - postgresql.StopContainer(t, ctx, runner0, container0) + credStr := dbtesting.GetGCPTestCredentials(t) - // Try adding a new user immediately - expect failure as the database - // cluster is still switching primaries - err = testPostgreSQL_Repmgr_AddUser(ctx, db) - if !strings.HasSuffix(err.Error(), "ValidateConnect failed (read only connection)") { - t.Fatalf("expected error was not received, got: %s", err) + type testCase struct { + usernameTemplate string + newUserData dbplugin.UsernameMetadata + expectedRegex string } - time.Sleep(20 * time.Second) - - // Try adding a new user again which should succeed after the sleep - // as the primary failover should have finished. Then, restart - // the first container which should become a secondary DB. - if err = testPostgreSQL_Repmgr_AddUser(ctx, db); err != nil { - t.Fatalf("no error expected, got: %s", err) + tests := map[string]testCase{ + "default template": { + usernameTemplate: "", + newUserData: dbplugin.UsernameMetadata{ + DisplayName: "displayname", + RoleName: "longrolename", + }, + expectedRegex: "^v-displayn-longrole-[a-zA-Z0-9]{20}-[0-9]{10}$", + }, + "unique template": { + usernameTemplate: "foo-bar", + newUserData: dbplugin.UsernameMetadata{ + DisplayName: "displayname", + RoleName: "longrolename", + }, + expectedRegex: "^foo-bar$", + }, } - postgresql.RestartContainer(t, ctx, runner0, container0) - time.Sleep(10 * time.Second) + for name, test := range tests { + t.Run(name, func(t *testing.T) { + initReq := dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "username_template": test.usernameTemplate, + "auth_type": connutil.AuthTypeGCPIAM, + "service_account_json": credStr, + }, + VerifyConnection: true, + } - // A final new user to add, which should succeed after the secondary joins. - if err = testPostgreSQL_Repmgr_AddUser(ctx, db); err != nil { - t.Fatalf("no error expected, got: %s", err) - } + db := new() - if err := db.Close(); err != nil { - t.Fatalf("err: %s", err) - } -} + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() -func testPostgreSQL_Repmgr_Container(t *testing.T, name string) (*PostgreSQL, *docker.Runner, string, string) { - envVars := []string{ - "REPMGR_NODE_NAME=" + name, - "REPMGR_NODE_NETWORK_NAME=" + name, - } + _, err := db.Initialize(ctx, initReq) + require.NoError(t, err) - runner, cleanup, connURL, containerID := postgresql.PrepareTestContainerRepmgr(t, name, "13.4.0", envVars) - t.Cleanup(cleanup) + newUserReq := dbplugin.NewUserRequest{ + UsernameConfig: test.newUserData, + Statements: dbplugin.Statements{ + Commands: []string{ + ` + CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; + GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{name}}";`, + }, + }, + Password: "myReally-S3curePassword", + Expiration: time.Now().Add(1 * time.Hour), + } + ctx, cancel = context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() - connectionDetails := map[string]interface{}{ - "connection_url": connURL, - } - req := dbplugin.InitializeRequest{ - Config: connectionDetails, - VerifyConnection: true, - } - db := new() - dbtesting.AssertInitialize(t, db, req) - if !db.Initialized { - t.Fatal("Database should be initialized") - } + newUserResp, err := db.NewUser(ctx, newUserReq) + require.NoError(t, err) - if err := db.Close(); err != nil { - t.Fatalf("err: %s", err) + require.Regexp(t, test.expectedRegex, newUserResp.Username) + }) } - - return db, runner, connURL, containerID -} - -func testPostgreSQL_Repmgr_AddUser(ctx context.Context, db *PostgreSQL) error { - _, err := db.NewUser(ctx, dbplugin.NewUserRequest{ - Statements: dbplugin.Statements{ - Commands: []string{ - `CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}' INHERIT; - GRANT ro TO "{{name}}";`, - }, - }, - }) - - return err } func getHost(url string) string { diff --git a/plugins/database/postgresql/scram/LICENSE b/plugins/database/postgresql/scram/LICENSE new file mode 100644 index 000000000000..cc36995f299f --- /dev/null +++ b/plugins/database/postgresql/scram/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Taishi Kasuga + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/plugins/database/postgresql/scram/scram.go b/plugins/database/postgresql/scram/scram.go new file mode 100644 index 000000000000..f5c6923cef66 --- /dev/null +++ b/plugins/database/postgresql/scram/scram.go @@ -0,0 +1,86 @@ +package scram + +// +// @see https://github.com/postgres/postgres/blob/c30f54ad732ca5c8762bb68bbe0f51de9137dd72/src/interfaces/libpq/fe-auth.c#L1167-L1285 +// @see https://github.com/postgres/postgres/blob/e6bdfd9700ebfc7df811c97c2fc46d7e94e329a2/src/interfaces/libpq/fe-auth-scram.c#L868-L905 +// @see https://github.com/postgres/postgres/blob/c30f54ad732ca5c8762bb68bbe0f51de9137dd72/src/port/pg_strong_random.c#L66-L96 +// @see https://github.com/postgres/postgres/blob/e6bdfd9700ebfc7df811c97c2fc46d7e94e329a2/src/common/scram-common.c#L160-L274 +// @see https://github.com/postgres/postgres/blob/e6bdfd9700ebfc7df811c97c2fc46d7e94e329a2/src/common/scram-common.c#L27-L85 + +// Implementation from https://github.com/supercaracal/scram-sha-256/blob/d3c05cd927770a11c6e12de3e3a99c3446a1f78d/main.go +import ( + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "fmt" + "io" + + "golang.org/x/crypto/pbkdf2" +) + +const ( + // @see https://github.com/postgres/postgres/blob/e6bdfd9700ebfc7df811c97c2fc46d7e94e329a2/src/include/common/scram-common.h#L36-L41 + saltSize = 16 + + // @see https://github.com/postgres/postgres/blob/c30f54ad732ca5c8762bb68bbe0f51de9137dd72/src/include/common/sha2.h#L22 + digestLen = 32 + + // @see https://github.com/postgres/postgres/blob/e6bdfd9700ebfc7df811c97c2fc46d7e94e329a2/src/include/common/scram-common.h#L43-L47 + iterationCnt = 4096 +) + +var ( + clientRawKey = []byte("Client Key") + serverRawKey = []byte("Server Key") +) + +func genSalt(size int) ([]byte, error) { + salt := make([]byte, size) + if _, err := io.ReadFull(rand.Reader, salt); err != nil { + return nil, err + } + return salt, nil +} + +func encodeB64(src []byte) (dst []byte) { + dst = make([]byte, base64.StdEncoding.EncodedLen(len(src))) + base64.StdEncoding.Encode(dst, src) + return +} + +func getHMACSum(key, msg []byte) []byte { + h := hmac.New(sha256.New, key) + _, _ = h.Write(msg) + return h.Sum(nil) +} + +func getSHA256Sum(key []byte) []byte { + h := sha256.New() + _, _ = h.Write(key) + return h.Sum(nil) +} + +func hashPassword(rawPassword, salt []byte, iter, keyLen int) string { + digestKey := pbkdf2.Key(rawPassword, salt, iter, keyLen, sha256.New) + clientKey := getHMACSum(digestKey, clientRawKey) + storedKey := getSHA256Sum(clientKey) + serverKey := getHMACSum(digestKey, serverRawKey) + + return fmt.Sprintf("SCRAM-SHA-256$%d:%s$%s:%s", + iter, + string(encodeB64(salt)), + string(encodeB64(storedKey)), + string(encodeB64(serverKey)), + ) +} + +func Hash(password string) (string, error) { + salt, err := genSalt(saltSize) + if err != nil { + return "", err + } + + hashedPassword := hashPassword([]byte(password), salt, iterationCnt, digestLen) + return hashedPassword, nil +} diff --git a/plugins/database/postgresql/scram/scram_test.go b/plugins/database/postgresql/scram/scram_test.go new file mode 100644 index 000000000000..d2933ebbca40 --- /dev/null +++ b/plugins/database/postgresql/scram/scram_test.go @@ -0,0 +1,27 @@ +package scram + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +// TestScram tests the Hash method. The hashed password string should have a SCRAM-SHA-256 prefix. +func TestScram(t *testing.T) { + tcs := map[string]struct { + Password string + }{ + "empty-password": {Password: ""}, + "simple-password": {Password: "password"}, + } + + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + got, err := Hash(tc.Password) + assert.NoError(t, err) + assert.True(t, strings.HasPrefix(got, "SCRAM-SHA-256$4096:")) + assert.Len(t, got, 133) + }) + } +} diff --git a/plugins/database/redshift/redshift-database-plugin/main.go b/plugins/database/redshift/redshift-database-plugin/main.go index 7fcd9b0b6487..010ddb036276 100644 --- a/plugins/database/redshift/redshift-database-plugin/main.go +++ b/plugins/database/redshift/redshift-database-plugin/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/plugins/database/redshift/redshift.go b/plugins/database/redshift/redshift.go index 11ce30a73f33..1e658edbcf58 100644 --- a/plugins/database/redshift/redshift.go +++ b/plugins/database/redshift/redshift.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package redshift diff --git a/plugins/database/redshift/redshift_test.go b/plugins/database/redshift/redshift_test.go index af264587b6cd..d68ae4565f78 100644 --- a/plugins/database/redshift/redshift_test.go +++ b/plugins/database/redshift/redshift_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package redshift @@ -161,7 +161,7 @@ func TestRedshift_NewUser(t *testing.T) { } usernameRegex := regexp.MustCompile("^v-test-test-[a-zA-Z0-9]{20}-[0-9]{10}$") - if !usernameRegex.Match([]byte(username)) { + if !usernameRegex.MatchString(username) { t.Fatalf("Expected username %q to match regex %q", username, usernameRegex.String()) } } diff --git a/scan.hcl b/scan.hcl index 7553139d17f3..918a450cde6e 100644 --- a/scan.hcl +++ b/scan.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 repository { go_modules = true @@ -15,7 +15,13 @@ repository { plugin "semgrep" { use_git_ignore = true exclude = ["vendor"] - config = ["tools/semgrep/ci", "p/r2c-security-audit"] + config = [ + "tools/semgrep/ci", + "p/r2c-security-audit", + "r/trailofbits.go.hanging-goroutine.hanging-goroutine", + "r/trailofbits.go.racy-append-to-slice.racy-append-to-slice", + "r/trailofbits.go.racy-write-to-map.racy-write-to-map", + ] exclude_rule = ["generic.html-templates.security.unquoted-attribute-var.unquoted-attribute-var"] } diff --git a/scripts/assetcheck.sh b/scripts/assetcheck.sh index d846dd5f9f4c..158f5bc4aa7f 100755 --- a/scripts/assetcheck.sh +++ b/scripts/assetcheck.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 if [[ ! -e http/web_ui/index.html ]] diff --git a/scripts/build.sh b/scripts/build.sh index 6b6df7a910b9..ae247e636bf8 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 # # This script builds the application from source for multiple platforms. @@ -50,11 +50,13 @@ ${GO_CMD} build \ # Move all the compiled things to the $GOPATH/bin OLDIFS=$IFS -IFS=: MAIN_GOPATH=($GOPATH) +IFS=: FIRST=($GOPATH) BIN_PATH=${GOBIN:-${FIRST}/bin} IFS=$OLDIFS -rm -f ${MAIN_GOPATH}/bin/vault -cp bin/vault ${MAIN_GOPATH}/bin/ +# Ensure the go bin folder exists +mkdir -p ${BIN_PATH} +rm -f ${BIN_PATH}/vault +cp bin/vault ${BIN_PATH} # Done! echo diff --git a/scripts/ci-helper.sh b/scripts/ci-helper.sh index 6c0eb62f5450..856a4391e3d3 100755 --- a/scripts/ci-helper.sh +++ b/scripts/ci-helper.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 # The ci-helper is used to determine build metadata, build Vault binaries, @@ -11,86 +11,6 @@ set -euo pipefail # We don't want to get stuck in some kind of interactive pager export GIT_PAGER=cat -# Get the full version information -function version() { - local version - local prerelease - local metadata - - version=$(version_base) - prerelease=$(version_pre) - metadata=$(version_metadata) - - if [ -n "$metadata" ] && [ -n "$prerelease" ]; then - echo "$version-$prerelease+$metadata" - elif [ -n "$metadata" ]; then - echo "$version+$metadata" - elif [ -n "$prerelease" ]; then - echo "$version-$prerelease" - else - echo "$version" - fi -} - -# Get the base version -function version_base() { - : "${VAULT_VERSION:=""}" - - if [ -n "$VAULT_VERSION" ]; then - echo "$VAULT_VERSION" - return - fi - - : "${VERSION_FILE:=$(repo_root)/version/version_base.go}" - awk '$1 == "Version" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < "$VERSION_FILE" -} - -# Get the version major -function version_major() { - version_base | cut -d '.' -f 1 -} - -# Get the version minor -function version_minor() { - version_base | cut -d '.' -f 2 -} - -# Get the version patch -function version_patch() { - version_base | cut -d '.' -f 3 -} - -# Get the version pre-release -function version_pre() { - : "${VAULT_PRERELEASE:=""}" - - if [ -n "$VAULT_PRERELEASE" ]; then - echo "$VAULT_PRERELEASE" - return - fi - - : "${VERSION_FILE:=$(repo_root)/version/version_base.go}" - awk '$1 == "VersionPrerelease" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < "$VERSION_FILE" -} - -# Get the version metadata, which is commonly the edition -function version_metadata() { - : "${VAULT_METADATA:=""}" - - if [[ (-n "$VAULT_METADATA") && ("$VAULT_METADATA" != "oss") ]]; then - echo "$VAULT_METADATA" - return - fi - - : "${VERSION_FILE:=$(repo_root)/version/version_base.go}" - awk '$1 == "VersionMetadata" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < "$VERSION_FILE" -} - -# Get the version formatted for Debian and RHEL packages -function version_package() { - version | awk '{ gsub("-","~",$1); print $1 }' -} - # Get the build date from the latest commit since it can be used across all # builds function build_date() { @@ -109,18 +29,38 @@ function repo() { basename -s .git "$(git config --get remote.origin.url)" } -# Determine the root directory of the repository -function repo_root() { - git rev-parse --show-toplevel -} - # Determine the artifact basename based on metadata function artifact_basename() { : "${PKG_NAME:="vault"}" : "${GOOS:=$(go env GOOS)}" : "${GOARCH:=$(go env GOARCH)}" + : "${VERSION_METADATA:="ce"}" + + : "${VERSION:=""}" + if [ -z "$VERSION" ]; then + echo "You must specify the VERSION variable for this command" >&2 + exit 1 + fi + + local version + version="$VERSION" + if [ "$VERSION_METADATA" != "ce" ]; then + version="${VERSION}+${VERSION_METADATA}" + fi + + echo "${PKG_NAME}_${version}_${GOOS}_${GOARCH}" +} + +# Bundle the dist directory into a zip +function bundle() { + : "${BUNDLE_PATH:=$(repo_root)/vault.zip}" + echo "--> Bundling dist/* to $BUNDLE_PATH..." + zip -r -j "$BUNDLE_PATH" dist/ +} - echo "${PKG_NAME}_$(version)_${GOOS}_${GOARCH}" +# Determine the root directory of the repository +function repo_root() { + git rev-parse --show-toplevel } # Build the UI @@ -132,50 +72,43 @@ function build_ui() { mkdir -p http/web_ui popd pushd "$repo_root/ui" - yarn install --ignore-optional + yarn install npm rebuild node-sass - yarn --verbose run build + yarn run build popd } # Build Vault function build() { - local version local revision - local prerelease local build_date local ldflags local msg # Get or set our basic build metadata - version=$(version_base) revision=$(build_revision) - metadata=$(version_metadata) - prerelease=$(version_pre) - build_date=$(build_date) + build_date=$(build_date) # + : "${BIN_PATH:="dist/"}" #if not run by actions-go-build (enos local) then set this explicitly : "${GO_TAGS:=""}" - : "${KEEP_SYMBOLS:=""}" + : "${REMOVE_SYMBOLS:=""}" + + (unset GOOS; unset GOARCH; go generate ./...) # Build our ldflags - msg="--> Building Vault v$version, revision $revision, built $build_date" + msg="--> Building Vault revision $revision, built $build_date..." - # Strip the symbol and dwarf information by default - if [ -n "$KEEP_SYMBOLS" ]; then - ldflags="" - else + # Keep the symbol and dwarf information by default + if [ -n "$REMOVE_SYMBOLS" ]; then ldflags="-s -w " + else + ldflags="" fi - ldflags="${ldflags}-X github.com/hashicorp/vault/version.Version=$version -X github.com/hashicorp/vault/version.GitCommit=$revision -X github.com/hashicorp/vault/version.BuildDate=$build_date" + ldflags="${ldflags} -X github.com/hashicorp/vault/version.GitCommit=$revision -X github.com/hashicorp/vault/version.BuildDate=$build_date" - if [ -n "$prerelease" ]; then - msg="${msg}, prerelease ${prerelease}" - ldflags="${ldflags} -X github.com/hashicorp/vault/version.VersionPrerelease=$prerelease" - fi - - if [ -n "$metadata" ]; then - msg="${msg}, metadata ${metadata}" - ldflags="${ldflags} -X github.com/hashicorp/vault/version.VersionMetadata=$metadata" + if [[ ${VERSION_METADATA+x} ]]; then + msg="${msg}, metadata ${VERSION_METADATA}" + ldflags="${ldflags} -X github.com/hashicorp/vault/version.VersionMetadata=$VERSION_METADATA" fi # Build vault @@ -184,20 +117,14 @@ function build() { mkdir -p dist mkdir -p out set -x + go env go build -v -tags "$GO_TAGS" -ldflags "$ldflags" -o dist/ set +x popd } -# Bundle the dist directory into a zip -function bundle() { - : "${BUNDLE_PATH:=$(repo_root)/vault.zip}" - echo "--> Bundling dist/* to $BUNDLE_PATH" - zip -r -j "$BUNDLE_PATH" dist/ -} - -# Prepare legal requirements for packaging -function prepare_legal() { +# ENT: Prepare legal requirements for packaging +function prepare_ent_legal() { : "${PKG_NAME:="vault"}" pushd "$(repo_root)" @@ -210,47 +137,25 @@ function prepare_legal() { popd } -# Determine the matrix group number that we'll select for execution. If the -# MATRIX_TEST_GROUP environment variable has set then it will always return -# that value. If has not been set, we will randomly select a number between 1 -# and the value of MATRIX_MAX_TEST_GROUPS. -function matrix_group_id() { - : "${MATRIX_TEST_GROUP:=""}" - if [ -n "$MATRIX_TEST_GROUP" ]; then - echo "$MATRIX_TEST_GROUP" - return - fi +# CE: Prepare legal requirements for packaging +function prepare_ce_legal() { + : "${PKG_NAME:="vault"}" - : "${MATRIX_MAX_TEST_GROUPS:=1}" - awk -v min=1 -v max=$MATRIX_MAX_TEST_GROUPS 'BEGIN{srand(); print int(min+rand()*(max-min+1))}' -} + pushd "$(repo_root)" -# Filter matrix file reads in the contents of MATRIX_FILE and filters out -# scenarios that are not in the current test group and/or those that have not -# met minimux or maximum version requirements. -function matrix_filter_file() { - : "${MATRIX_FILE:=""}" - if [ -z "$MATRIX_FILE" ]; then - echo "You must specify the MATRIX_FILE variable for this command" >&2 - exit 1 - fi + mkdir -p dist + cp LICENSE dist/LICENSE.txt + + mkdir -p ".release/linux/package/usr/share/doc/$PKG_NAME" + cp LICENSE ".release/linux/package/usr/share/doc/$PKG_NAME/LICENSE.txt" - : "${MATRIX_TEST_GROUP:=$(matrix_group_id)}" - - local path - local matrix - path=$(readlink -f $MATRIX_FILE) - matrix=$(cat "$path" | jq ".include | - map(. | - select( - ((.min_minor_version == null) or (.min_minor_version <= $(version_minor))) and - ((.max_minor_version == null) or (.max_minor_version >= $(version_minor))) and - ((.test_group == null) or (.test_group == $MATRIX_TEST_GROUP)) - ) - )" - ) - - echo "{\"include\":$matrix}" | jq -c . + popd +} + +# Package version converts a vault version string into a compatible representation for system +# packages. +function version_package() { + awk '{ gsub("-","~",$1); print $1 }' <<< "$VAULT_VERSION" } # Run the CI Helper @@ -271,42 +176,18 @@ function main() { date) build_date ;; - prepare-legal) - prepare_legal - ;; - matrix-filter-file) - matrix_filter_file + prepare-ent-legal) + prepare_ent_legal ;; - matrix-group-id) - matrix_group_id + prepare-ce-legal) + prepare_ce_legal ;; revision) build_revision ;; - version) - version - ;; - version-base) - version_base - ;; - version-pre) - version_pre - ;; - version-major) - version_major - ;; - version-meta) - version_metadata - ;; - version-minor) - version_minor - ;; version-package) version_package ;; - version-patch) - version_patch - ;; *) echo "unknown sub-command" >&2 exit 1 diff --git a/scripts/copywrite-exceptions.sh b/scripts/copywrite-exceptions.sh new file mode 100755 index 000000000000..0e55acb400d1 --- /dev/null +++ b/scripts/copywrite-exceptions.sh @@ -0,0 +1,14 @@ +#!/bin/sh + +# Used as a stopgap for copywrite bot in MPL-licensed subdirs, detects BUSL licensed +# headers and deletes them, then runs the copywrite bot to utilize local subdir config +# to inject correct headers. + +find . -type f -name '*.go' | while read line; do + if grep "SPDX-License-Identifier: BUSL-1.1" $line; then + sed -i '/SPDX-License-Identifier: BUSL-1.1/d' $line + sed -i '/Copyright (c) HashiCorp, Inc./d' $line + fi +done + +copywrite headers --plan diff --git a/scripts/coverage.sh b/scripts/coverage.sh index 7f5d49e534de..90ade0e2d32c 100755 --- a/scripts/coverage.sh +++ b/scripts/coverage.sh @@ -1,6 +1,6 @@ #!/bin/sh # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 # Generate test coverage statistics for Go packages. # diff --git a/scripts/cross/Dockerfile b/scripts/cross/Dockerfile index d631383a0193..02b18961dd63 100644 --- a/scripts/cross/Dockerfile +++ b/scripts/cross/Dockerfile @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 FROM debian:buster @@ -15,7 +15,7 @@ RUN apt-get update -y && apt-get install --no-install-recommends -y -q \ libltdl-dev \ libltdl7 -RUN curl -sL https://deb.nodesource.com/setup_14.x | bash - +RUN curl -sL https://deb.nodesource.com/setup_20.x | bash - RUN curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list diff --git a/scripts/deprecations-checker.sh b/scripts/deprecations-checker.sh new file mode 100755 index 000000000000..b63ab905d776 --- /dev/null +++ b/scripts/deprecations-checker.sh @@ -0,0 +1,38 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# This script is sourced into the shell running in a Github Actions workflow. + +# Usage: +# To check deprecations locally using the script, follow these steps: +# From the repository root or within a package folder, execute deprecations-checker.sh +# Optionally: to only show deprecations in changed files between the current branch and +# a specific branch, pass the other branch name as an argument to the script. +# +# For example: +# ./scripts/deprecations-checker.sh (or) make deprecations +# ./scripts/deprecations-checker.sh main (or) make ci-deprecations +# +# If no branch name is specified, the command will show all usage of deprecations in the code. +# +# GitHub Actions runs this against the PR's base ref branch. + +# Staticcheck uses static analysis to finds bugs and performance issues, offers simplifications, +# and enforces style rules. +# Here, it is used to check if a deprecated function, variable, constant or field is used. + +# Run staticcheck +set -e +echo "==> Performing deprecations check: running staticcheck..." + + +# If no compare branch name is specified, output all deprecations +# Else only output the deprecations from the changes added +if [ -z $1 ] + then + staticcheck -checks="SA1019" -tags="$BUILD_TAGS" + else + # GitHub Actions will use this to find only changes wrt PR's base ref branch + # revgrep CLI tool will return an exit status of 1 if any issues match, else it will return 0 + staticcheck -checks="SA1019" -tags="$BUILD_TAGS" 2>&1 | revgrep origin/"$1" +fi diff --git a/scripts/deps_upgrade.py b/scripts/deps_upgrade.py deleted file mode 100644 index edd1b52f1039..000000000000 --- a/scripts/deps_upgrade.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -import os -import sys - -filename = sys.argv[1] -with open(filename) as f: - content = f.readlines() - for l in content: - name = l.split()[0] - print(name) - os.system("go get " + name + "@latest") \ No newline at end of file diff --git a/scripts/dist.sh b/scripts/dist.sh index fc605d4fdd99..0431bcc61286 100755 --- a/scripts/dist.sh +++ b/scripts/dist.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 set -e diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile index ceb6ec6ee73d..c975e445b497 100644 --- a/scripts/docker/Dockerfile +++ b/scripts/docker/Dockerfile @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 # Multi-stage builder to avoid polluting users environment with wrong # architecture binaries. diff --git a/scripts/docker/Dockerfile.ui b/scripts/docker/Dockerfile.ui index b13f0fe1fbd4..20f6cd1b1750 100644 --- a/scripts/docker/Dockerfile.ui +++ b/scripts/docker/Dockerfile.ui @@ -19,7 +19,7 @@ RUN apt-get update -y && apt-get install --no-install-recommends -y -q \ libltdl-dev \ libltdl7 -RUN curl -sL https://deb.nodesource.com/setup_14.x | bash - +RUN curl -sL https://deb.nodesource.com/setup_20.x | bash - RUN curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list diff --git a/scripts/docker/docker-entrypoint.sh b/scripts/docker/docker-entrypoint.sh index 2b9b8f35a160..a3b581697c35 100755 --- a/scripts/docker/docker-entrypoint.sh +++ b/scripts/docker/docker-entrypoint.sh @@ -1,6 +1,6 @@ #!/usr/bin/dumb-init /bin/sh # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 set -e diff --git a/scripts/gen_openapi.sh b/scripts/gen_openapi.sh index 2316ae4502d2..b0c3bba1ee77 100755 --- a/scripts/gen_openapi.sh +++ b/scripts/gen_openapi.sh @@ -1,6 +1,6 @@ #!/bin/bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 set -e @@ -24,12 +24,16 @@ then fi vault server -dev -dev-root-token-id=root & -sleep 5 VAULT_PID=$! +# Allow time for Vault to start its HTTP listener +sleep 1 + defer_stop_vault() { echo "Stopping Vault..." kill $VAULT_PID + # Allow time for Vault to print final logging and exit, + # before this script ends, and the shell prints its next prompt sleep 1 } @@ -37,14 +41,19 @@ trap defer_stop_vault INT TERM EXIT export VAULT_ADDR=http://127.0.0.1:8200 -echo "Mounting all builtin plugins..." +echo "Unmounting the default kv-v2 secrets engine ..." + +# Unmount the default kv-v2 engine so that we can remount it at 'kv_v2/' later. +# The mount path will be reflected in the resultant OpenAPI document. +vault secrets disable "secret/" + +echo "Mounting all builtin plugins ..." # Enable auth plugins vault auth enable "alicloud" vault auth enable "approle" vault auth enable "aws" vault auth enable "azure" -vault auth enable "centrify" vault auth enable "cert" vault auth enable "cf" vault auth enable "gcp" @@ -54,14 +63,11 @@ vault auth enable "kerberos" vault auth enable "kubernetes" vault auth enable "ldap" vault auth enable "oci" -vault auth enable "oidc" vault auth enable "okta" -vault auth enable "pcf" vault auth enable "radius" vault auth enable "userpass" # Enable secrets plugins -vault secrets enable "ad" vault secrets enable "alicloud" vault secrets enable "aws" vault secrets enable "azure" @@ -70,7 +76,8 @@ vault secrets enable "database" vault secrets enable "gcp" vault secrets enable "gcpkms" vault secrets enable "kubernetes" -vault secrets enable "kv" +vault secrets enable -path="kv-v1/" -version=1 "kv" +vault secrets enable -path="kv-v2/" -version=2 "kv" vault secrets enable "ldap" vault secrets enable "mongodbatlas" vault secrets enable "nomad" @@ -83,11 +90,10 @@ vault secrets enable "transit" # Enable enterprise features if [[ -n "${VAULT_LICENSE:-}" ]]; then - vault write sys/license text="${VAULT_LICENSE}" - vault secrets enable "keymgmt" vault secrets enable "kmip" vault secrets enable "transform" + vault auth enable "saml" fi # Output OpenAPI, optionally formatted diff --git a/scripts/go-helper.sh b/scripts/go-helper.sh new file mode 100755 index 000000000000..27fc0151cb57 --- /dev/null +++ b/scripts/go-helper.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -euo pipefail + +# Perform Go formatting checks with gofumpt. +check_fmt() { + echo "==> Checking code formatting..." + + declare -a malformed=() + IFS=" " read -r -a files <<< "$(tr '\n' ' ' <<< "$@")" + if [ -n "${files+set}" ] && [[ "${files[0]}" != "" ]]; then + echo "--> Checking changed files..." + for file in "${files[@]}"; do + if [ ! -f "$file" ]; then + echo "--> $file no longer exists ⚠" + continue + fi + + if echo "$file" | grep -v pb.go | grep -v vendor > /dev/null; then + local output + if ! output=$(gofumpt -l "$file") || [ "$output" != "" ]; then + echo "--> ${file} ✖" + malformed+=("$file") + continue + fi + fi + + echo "--> ${file} ✔" + done + else + echo "--> Checking all files..." + IFS=" " read -r -a malformed <<< "$(find . -name '*.go' | grep -v pb.go | grep -v vendor | xargs gofumpt -l)" + fi + + if [ "${#malformed[@]}" -ne 0 ] && [ -n "${malformed[0]}" ] ; then + echo "--> The following files need to be reformatted with gofumpt" + printf '%s\n' "${malformed[@]}" + echo "Run \`make fmt\` to reformat code." + for file in "${malformed[@]}"; do + gofumpt -w "$file" + echo "$(git diff --no-color "$file")" + done + exit 1 + fi +} + +# Check that the Go toolchain meets minimum version requiremets. +check_version() { + GO_CMD=${GO_CMD:-go} + + GO_VERSION_MIN=$1 + echo "==> Checking that build is using go version >= $1..." + + if $GO_CMD version | grep -q devel; then + GO_VERSION="devel" + else + GO_VERSION=$($GO_CMD version | grep -o 'go[0-9]\+\.[0-9]\+\(\.[0-9]\+\)\?' | tr -d 'go') + + IFS="." read -r -a GO_VERSION_ARR <<< "$GO_VERSION" + IFS="." read -r -a GO_VERSION_REQ <<< "$GO_VERSION_MIN" + + if [[ ${GO_VERSION_ARR[0]} -lt ${GO_VERSION_REQ[0]} || + ( ${GO_VERSION_ARR[0]} -eq ${GO_VERSION_REQ[0]} && + ( ${GO_VERSION_ARR[1]} -lt ${GO_VERSION_REQ[1]} || + ( ${GO_VERSION_ARR[1]} -eq ${GO_VERSION_REQ[1]} && ${GO_VERSION_ARR[2]} -lt ${GO_VERSION_REQ[2]} ))) + ]]; then + echo "Vault requires go $GO_VERSION_MIN to build; found $GO_VERSION." + exit 1 + fi + fi + + echo "--> Using go version $GO_VERSION..." +} + +# Download all the modules for all go.mod's defined in the project. +mod_download() { + while IFS= read -r -d '' mod; do + echo "==> Downloading Go modules for $mod to $(go env GOMODCACHE)..." + pushd "$(dirname "$mod")" > /dev/null || (echo "failed to push into module dir" && exit 1) + GOOS=linux GOARCH=amd64 go mod download -x + popd > /dev/null || (echo "failed to pop out of module dir" && exit 1) + done < <(find . -type f -name go.mod -print0) +} + +# Tidy all the go.mod's defined in the project. +mod_tidy() { + while IFS= read -r -d '' mod; do + echo "==> Tidying $mod..." + pushd "$(dirname "$mod")" > /dev/null || (echo "failed to push into module dir" && exit 1) + GOOS=linux GOARCH=amd64 go mod tidy + popd > /dev/null || (echo "failed to pop out of module dir" && exit 1) + done < <(find . -type f -name go.mod -print0) +} + +main() { + case $1 in + mod-download) + mod_download + ;; + mod-tidy) + mod_tidy + ;; + check-fmt) + check_fmt "${@:2}" + ;; + check-version) + check_version "$2" + ;; + *) + echo "unknown sub-command" >&2 + exit 1 + ;; + esac +} + +main "$@" diff --git a/scripts/gofmtcheck.sh b/scripts/gofmtcheck.sh deleted file mode 100755 index 5c58f178558b..000000000000 --- a/scripts/gofmtcheck.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -echo "==> Checking that code complies with gofmt requirements..." - -gofmt_files=$(gofmt -l `find . -name '*.go' | grep -v vendor`) -if [[ -n ${gofmt_files} ]]; then - echo 'gofmt needs running on the following files:' - echo "${gofmt_files}" - echo "You can use the command: \`make fmt\` to reformat code." - exit 1 -fi diff --git a/scripts/goversioncheck.sh b/scripts/goversioncheck.sh deleted file mode 100755 index 7ee7422581e5..000000000000 --- a/scripts/goversioncheck.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -GO_CMD=${GO_CMD:-go} - -GO_VERSION_MIN=$1 -echo "==> Checking that build is using go version >= $1..." - -if $GO_CMD version | grep -q devel; -then - GO_VERSION="devel" -else - GO_VERSION=$($GO_CMD version | grep -o 'go[0-9]\+\.[0-9]\+\(\.[0-9]\+\)\?' | tr -d 'go') - - IFS="." read -r -a GO_VERSION_ARR <<< "$GO_VERSION" - IFS="." read -r -a GO_VERSION_REQ <<< "$GO_VERSION_MIN" - - if [[ ${GO_VERSION_ARR[0]} -lt ${GO_VERSION_REQ[0]} || - ( ${GO_VERSION_ARR[0]} -eq ${GO_VERSION_REQ[0]} && - ( ${GO_VERSION_ARR[1]} -lt ${GO_VERSION_REQ[1]} || - ( ${GO_VERSION_ARR[1]} -eq ${GO_VERSION_REQ[1]} && ${GO_VERSION_ARR[2]} -lt ${GO_VERSION_REQ[2]} ))) - ]]; then - echo "Vault requires go $GO_VERSION_MIN to build; found $GO_VERSION." - exit 1 - fi -fi - -echo "==> Using go version $GO_VERSION..." diff --git a/scripts/protocversioncheck.sh b/scripts/protocversioncheck.sh deleted file mode 100755 index a2cbc6cc3f27..000000000000 --- a/scripts/protocversioncheck.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -set -euo pipefail - -PROTOC_CMD=${PROTOC_CMD:-protoc} -PROTOC_VERSION_EXACT="$1" -echo "==> Checking that protoc is at version $1..." - -PROTOC_VERSION=$($PROTOC_CMD --version | grep -o '[0-9]\+\.[0-9]\+\.[0-9]\+') - -if [ "$PROTOC_VERSION" == "$PROTOC_VERSION_EXACT" ]; then - echo "Using protoc version $PROTOC_VERSION" -else - echo "protoc should be at $PROTOC_VERSION_EXACT; found $PROTOC_VERSION." - echo "If your version is higher than the version this script is looking for, updating the Makefile with the newer version." - exit 1 -fi diff --git a/scripts/semgrep_plugin_repos.sh b/scripts/semgrep_plugin_repos.sh index 6dc7407320ca..1f70763feacb 100755 --- a/scripts/semgrep_plugin_repos.sh +++ b/scripts/semgrep_plugin_repos.sh @@ -1,6 +1,6 @@ #!/bin/sh # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 set -e diff --git a/scripts/testciphers.sh b/scripts/testciphers.sh index f9684f570bba..89c1e9304334 100755 --- a/scripts/testciphers.sh +++ b/scripts/testciphers.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 # Adapted from https://superuser.com/a/224263 diff --git a/scripts/update_deps.sh b/scripts/update_deps.sh deleted file mode 100755 index f491b7e69bc4..000000000000 --- a/scripts/update_deps.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/sh -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - - -set -e - -TOOL=vault - -## Make a temp dir -tempdir=$(mktemp -d update-${TOOL}-deps.XXXXXX) - -## Set paths -export GOPATH="$(pwd)/${tempdir}" -export PATH="${GOPATH}/bin:${PATH}" -cd $tempdir - -## Get Vault -mkdir -p src/github.com/hashicorp -cd src/github.com/hashicorp -echo "Fetching ${TOOL}..." -git clone https://github.com/hashicorp/${TOOL} -cd ${TOOL} - -## Clean out earlier vendoring -rm -rf Godeps vendor - -## Get govendor -go get github.com/kardianos/govendor - -## Init -govendor init - -## Fetch deps -echo "Fetching deps, will take some time..." -govendor fetch -v +missing - -# Clean up after the logrus mess -govendor remove -v github.com/Sirupsen/logrus -cd vendor -find -type f | grep '.go' | xargs sed -i -e 's/Sirupsen/sirupsen/' - -# Need the v2 branch for Azure -govendor fetch -v github.com/coreos/go-oidc@v2 - -# Need the v3 branch for dockertest -govendor fetch -v github.com/ory/dockertest@v3 - -# Current influx master is alpha, pin to v1.7.3 -govendor fetch github.com/influxdata/influxdb/client/v2@v1.7.4 -govendor fetch github.com/influxdata/influxdb/models@v1.7.4 -govendor fetch github.com/influxdata/influxdb/pkg/escape@v1.7.4 - -# Current circonus needs v3 -grep circonus-gometrics vendor.json | cut -d '"' -f 4 | while read -r i; do govendor fetch $i@v2; done - -# API breakage -govendor fetch github.com/satori/go.uuid@f58768cc1a7a7e77a3bd49e98cdd21419399b6a3 - -echo "Done; to commit run \n\ncd ${GOPATH}/src/github.com/hashicorp/${TOOL}\n" diff --git a/scripts/update_plugin_modules.sh b/scripts/update_plugin_modules.sh index 2a300f3bc846..7d88f04ade7d 100755 --- a/scripts/update_plugin_modules.sh +++ b/scripts/update_plugin_modules.sh @@ -1,6 +1,6 @@ #!/bin/sh # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 set -e diff --git a/sdk/.copywrite.hcl b/sdk/.copywrite.hcl new file mode 100644 index 000000000000..c4b09f33640c --- /dev/null +++ b/sdk/.copywrite.hcl @@ -0,0 +1,8 @@ +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2024 + + header_ignore = [] +} diff --git a/sdk/LICENSE b/sdk/LICENSE new file mode 100644 index 000000000000..f4f97ee5853a --- /dev/null +++ b/sdk/LICENSE @@ -0,0 +1,365 @@ +Copyright (c) 2015 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/sdk/database/dbplugin/database.pb.go b/sdk/database/dbplugin/database.pb.go index 62964c7d15d6..825813ec9349 100644 --- a/sdk/database/dbplugin/database.pb.go +++ b/sdk/database/dbplugin/database.pb.go @@ -3,8 +3,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc (unknown) // source: sdk/database/dbplugin/database.proto package dbplugin @@ -24,7 +24,7 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in sdk/database/dbplugin/database.proto. type InitializeRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -370,19 +370,19 @@ type Statements struct { // DEPRECATED, will be removed in 0.12 // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in sdk/database/dbplugin/database.proto. CreationStatements string `protobuf:"bytes,1,opt,name=creation_statements,json=creationStatements,proto3" json:"creation_statements,omitempty"` // DEPRECATED, will be removed in 0.12 // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in sdk/database/dbplugin/database.proto. RevocationStatements string `protobuf:"bytes,2,opt,name=revocation_statements,json=revocationStatements,proto3" json:"revocation_statements,omitempty"` // DEPRECATED, will be removed in 0.12 // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in sdk/database/dbplugin/database.proto. RollbackStatements string `protobuf:"bytes,3,opt,name=rollback_statements,json=rollbackStatements,proto3" json:"rollback_statements,omitempty"` // DEPRECATED, will be removed in 0.12 // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in sdk/database/dbplugin/database.proto. RenewStatements string `protobuf:"bytes,4,opt,name=renew_statements,json=renewStatements,proto3" json:"renew_statements,omitempty"` Creation []string `protobuf:"bytes,5,rep,name=creation,proto3" json:"creation,omitempty"` Revocation []string `protobuf:"bytes,6,rep,name=revocation,proto3" json:"revocation,omitempty"` @@ -423,7 +423,7 @@ func (*Statements) Descriptor() ([]byte, []int) { return file_sdk_database_dbplugin_database_proto_rawDescGZIP(), []int{6} } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in sdk/database/dbplugin/database.proto. func (x *Statements) GetCreationStatements() string { if x != nil { return x.CreationStatements @@ -431,7 +431,7 @@ func (x *Statements) GetCreationStatements() string { return "" } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in sdk/database/dbplugin/database.proto. func (x *Statements) GetRevocationStatements() string { if x != nil { return x.RevocationStatements @@ -439,7 +439,7 @@ func (x *Statements) GetRevocationStatements() string { return "" } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in sdk/database/dbplugin/database.proto. func (x *Statements) GetRollbackStatements() string { if x != nil { return x.RollbackStatements @@ -447,7 +447,7 @@ func (x *Statements) GetRollbackStatements() string { return "" } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in sdk/database/dbplugin/database.proto. func (x *Statements) GetRenewStatements() string { if x != nil { return x.RenewStatements @@ -1180,7 +1180,7 @@ func file_sdk_database_dbplugin_database_proto_rawDescGZIP() []byte { } var file_sdk_database_dbplugin_database_proto_msgTypes = make([]protoimpl.MessageInfo, 17) -var file_sdk_database_dbplugin_database_proto_goTypes = []interface{}{ +var file_sdk_database_dbplugin_database_proto_goTypes = []any{ (*InitializeRequest)(nil), // 0: dbplugin.InitializeRequest (*InitRequest)(nil), // 1: dbplugin.InitRequest (*CreateUserRequest)(nil), // 2: dbplugin.CreateUserRequest @@ -1242,7 +1242,7 @@ func file_sdk_database_dbplugin_database_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_sdk_database_dbplugin_database_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_database_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*InitializeRequest); i { case 0: return &v.state @@ -1254,7 +1254,7 @@ func file_sdk_database_dbplugin_database_proto_init() { return nil } } - file_sdk_database_dbplugin_database_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_database_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*InitRequest); i { case 0: return &v.state @@ -1266,7 +1266,7 @@ func file_sdk_database_dbplugin_database_proto_init() { return nil } } - file_sdk_database_dbplugin_database_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_database_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*CreateUserRequest); i { case 0: return &v.state @@ -1278,7 +1278,7 @@ func file_sdk_database_dbplugin_database_proto_init() { return nil } } - file_sdk_database_dbplugin_database_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_database_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*RenewUserRequest); i { case 0: return &v.state @@ -1290,7 +1290,7 @@ func file_sdk_database_dbplugin_database_proto_init() { return nil } } - file_sdk_database_dbplugin_database_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_database_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*RevokeUserRequest); i { case 0: return &v.state @@ -1302,7 +1302,7 @@ func file_sdk_database_dbplugin_database_proto_init() { return nil } } - file_sdk_database_dbplugin_database_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_database_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*RotateRootCredentialsRequest); i { case 0: return &v.state @@ -1314,7 +1314,7 @@ func file_sdk_database_dbplugin_database_proto_init() { return nil } } - file_sdk_database_dbplugin_database_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_database_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*Statements); i { case 0: return &v.state @@ -1326,7 +1326,7 @@ func file_sdk_database_dbplugin_database_proto_init() { return nil } } - file_sdk_database_dbplugin_database_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_database_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*UsernameConfig); i { case 0: return &v.state @@ -1338,7 +1338,7 @@ func file_sdk_database_dbplugin_database_proto_init() { return nil } } - file_sdk_database_dbplugin_database_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_database_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*InitResponse); i { case 0: return &v.state @@ -1350,7 +1350,7 @@ func file_sdk_database_dbplugin_database_proto_init() { return nil } } - file_sdk_database_dbplugin_database_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_database_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*CreateUserResponse); i { case 0: return &v.state @@ -1362,7 +1362,7 @@ func file_sdk_database_dbplugin_database_proto_init() { return nil } } - file_sdk_database_dbplugin_database_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_database_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*TypeResponse); i { case 0: return &v.state @@ -1374,7 +1374,7 @@ func file_sdk_database_dbplugin_database_proto_init() { return nil } } - file_sdk_database_dbplugin_database_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_database_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*RotateRootCredentialsResponse); i { case 0: return &v.state @@ -1386,7 +1386,7 @@ func file_sdk_database_dbplugin_database_proto_init() { return nil } } - file_sdk_database_dbplugin_database_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_database_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*Empty); i { case 0: return &v.state @@ -1398,7 +1398,7 @@ func file_sdk_database_dbplugin_database_proto_init() { return nil } } - file_sdk_database_dbplugin_database_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_database_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*GenerateCredentialsResponse); i { case 0: return &v.state @@ -1410,7 +1410,7 @@ func file_sdk_database_dbplugin_database_proto_init() { return nil } } - file_sdk_database_dbplugin_database_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_database_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*StaticUserConfig); i { case 0: return &v.state @@ -1422,7 +1422,7 @@ func file_sdk_database_dbplugin_database_proto_init() { return nil } } - file_sdk_database_dbplugin_database_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_database_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*SetCredentialsRequest); i { case 0: return &v.state @@ -1434,7 +1434,7 @@ func file_sdk_database_dbplugin_database_proto_init() { return nil } } - file_sdk_database_dbplugin_database_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_database_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*SetCredentialsResponse); i { case 0: return &v.state diff --git a/sdk/database/dbplugin/database.proto b/sdk/database/dbplugin/database.proto index ed2925278989..e32f34d221f4 100644 --- a/sdk/database/dbplugin/database.proto +++ b/sdk/database/dbplugin/database.proto @@ -3,117 +3,117 @@ syntax = "proto3"; -option go_package = "github.com/hashicorp/vault/sdk/database/dbplugin"; - package dbplugin; import "google/protobuf/timestamp.proto"; +option go_package = "github.com/hashicorp/vault/sdk/database/dbplugin"; + message InitializeRequest { - option deprecated = true; - bytes config = 1; - bool verify_connection = 2; + option deprecated = true; + bytes config = 1; + bool verify_connection = 2; } message InitRequest { - bytes config = 1; - bool verify_connection = 2; + bytes config = 1; + bool verify_connection = 2; } message CreateUserRequest { - Statements statements = 1; - UsernameConfig username_config = 2; - google.protobuf.Timestamp expiration = 3; + Statements statements = 1; + UsernameConfig username_config = 2; + google.protobuf.Timestamp expiration = 3; } message RenewUserRequest { - Statements statements = 1; - string username = 2; - google.protobuf.Timestamp expiration = 3; + Statements statements = 1; + string username = 2; + google.protobuf.Timestamp expiration = 3; } message RevokeUserRequest { - Statements statements = 1; - string username = 2; + Statements statements = 1; + string username = 2; } message RotateRootCredentialsRequest { - repeated string statements = 1; + repeated string statements = 1; } message Statements { - // DEPRECATED, will be removed in 0.12 - string creation_statements = 1 [deprecated=true]; - // DEPRECATED, will be removed in 0.12 - string revocation_statements = 2 [deprecated=true]; - // DEPRECATED, will be removed in 0.12 - string rollback_statements = 3 [deprecated=true]; - // DEPRECATED, will be removed in 0.12 - string renew_statements = 4 [deprecated=true]; - - repeated string creation = 5; - repeated string revocation = 6; - repeated string rollback = 7; - repeated string renewal = 8; - repeated string rotation = 9; + // DEPRECATED, will be removed in 0.12 + string creation_statements = 1 [deprecated = true]; + // DEPRECATED, will be removed in 0.12 + string revocation_statements = 2 [deprecated = true]; + // DEPRECATED, will be removed in 0.12 + string rollback_statements = 3 [deprecated = true]; + // DEPRECATED, will be removed in 0.12 + string renew_statements = 4 [deprecated = true]; + + repeated string creation = 5; + repeated string revocation = 6; + repeated string rollback = 7; + repeated string renewal = 8; + repeated string rotation = 9; } message UsernameConfig { - string DisplayName = 1; - string RoleName = 2; + string DisplayName = 1; + string RoleName = 2; } message InitResponse { - bytes config = 1; + bytes config = 1; } message CreateUserResponse { - string username = 1; - string password = 2; + string username = 1; + string password = 2; } message TypeResponse { - string type = 1; + string type = 1; } message RotateRootCredentialsResponse { - bytes config = 1; + bytes config = 1; } message Empty {} message GenerateCredentialsResponse { - string password = 1; + string password = 1; } -message StaticUserConfig{ - string username = 1; - string password = 2; - bool create = 3; +message StaticUserConfig { + string username = 1; + string password = 2; + bool create = 3; } message SetCredentialsRequest { - Statements statements = 1; - StaticUserConfig static_user_config = 2; + Statements statements = 1; + StaticUserConfig static_user_config = 2; } message SetCredentialsResponse { - string username = 1; - string password = 2; + string username = 1; + string password = 2; } service Database { - rpc Type(Empty) returns (TypeResponse); - rpc CreateUser(CreateUserRequest) returns (CreateUserResponse); - rpc RenewUser(RenewUserRequest) returns (Empty); - rpc RevokeUser(RevokeUserRequest) returns (Empty); - rpc RotateRootCredentials(RotateRootCredentialsRequest) returns (RotateRootCredentialsResponse); - rpc Init(InitRequest) returns (InitResponse); - rpc Close(Empty) returns (Empty); - rpc SetCredentials(SetCredentialsRequest) returns (SetCredentialsResponse); - rpc GenerateCredentials(Empty) returns (GenerateCredentialsResponse); - - rpc Initialize(InitializeRequest) returns (Empty) { - option deprecated = true; - }; + rpc Type(Empty) returns (TypeResponse); + rpc CreateUser(CreateUserRequest) returns (CreateUserResponse); + rpc RenewUser(RenewUserRequest) returns (Empty); + rpc RevokeUser(RevokeUserRequest) returns (Empty); + rpc RotateRootCredentials(RotateRootCredentialsRequest) returns (RotateRootCredentialsResponse); + rpc Init(InitRequest) returns (InitResponse); + rpc Close(Empty) returns (Empty); + rpc SetCredentials(SetCredentialsRequest) returns (SetCredentialsResponse); + rpc GenerateCredentials(Empty) returns (GenerateCredentialsResponse); + + rpc Initialize(InitializeRequest) returns (Empty) { + option deprecated = true; + } } diff --git a/sdk/database/dbplugin/database_grpc.pb.go b/sdk/database/dbplugin/database_grpc.pb.go index 0e34e00a3cc4..57a0c059adf6 100644 --- a/sdk/database/dbplugin/database_grpc.pb.go +++ b/sdk/database/dbplugin/database_grpc.pb.go @@ -1,4 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.4.0 +// - protoc (unknown) +// source: sdk/database/dbplugin/database.proto package dbplugin @@ -11,8 +18,21 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 + +const ( + Database_Type_FullMethodName = "/dbplugin.Database/Type" + Database_CreateUser_FullMethodName = "/dbplugin.Database/CreateUser" + Database_RenewUser_FullMethodName = "/dbplugin.Database/RenewUser" + Database_RevokeUser_FullMethodName = "/dbplugin.Database/RevokeUser" + Database_RotateRootCredentials_FullMethodName = "/dbplugin.Database/RotateRootCredentials" + Database_Init_FullMethodName = "/dbplugin.Database/Init" + Database_Close_FullMethodName = "/dbplugin.Database/Close" + Database_SetCredentials_FullMethodName = "/dbplugin.Database/SetCredentials" + Database_GenerateCredentials_FullMethodName = "/dbplugin.Database/GenerateCredentials" + Database_Initialize_FullMethodName = "/dbplugin.Database/Initialize" +) // DatabaseClient is the client API for Database service. // @@ -40,8 +60,9 @@ func NewDatabaseClient(cc grpc.ClientConnInterface) DatabaseClient { } func (c *databaseClient) Type(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TypeResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(TypeResponse) - err := c.cc.Invoke(ctx, "/dbplugin.Database/Type", in, out, opts...) + err := c.cc.Invoke(ctx, Database_Type_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -49,8 +70,9 @@ func (c *databaseClient) Type(ctx context.Context, in *Empty, opts ...grpc.CallO } func (c *databaseClient) CreateUser(ctx context.Context, in *CreateUserRequest, opts ...grpc.CallOption) (*CreateUserResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(CreateUserResponse) - err := c.cc.Invoke(ctx, "/dbplugin.Database/CreateUser", in, out, opts...) + err := c.cc.Invoke(ctx, Database_CreateUser_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -58,8 +80,9 @@ func (c *databaseClient) CreateUser(ctx context.Context, in *CreateUserRequest, } func (c *databaseClient) RenewUser(ctx context.Context, in *RenewUserRequest, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) - err := c.cc.Invoke(ctx, "/dbplugin.Database/RenewUser", in, out, opts...) + err := c.cc.Invoke(ctx, Database_RenewUser_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -67,8 +90,9 @@ func (c *databaseClient) RenewUser(ctx context.Context, in *RenewUserRequest, op } func (c *databaseClient) RevokeUser(ctx context.Context, in *RevokeUserRequest, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) - err := c.cc.Invoke(ctx, "/dbplugin.Database/RevokeUser", in, out, opts...) + err := c.cc.Invoke(ctx, Database_RevokeUser_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -76,8 +100,9 @@ func (c *databaseClient) RevokeUser(ctx context.Context, in *RevokeUserRequest, } func (c *databaseClient) RotateRootCredentials(ctx context.Context, in *RotateRootCredentialsRequest, opts ...grpc.CallOption) (*RotateRootCredentialsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(RotateRootCredentialsResponse) - err := c.cc.Invoke(ctx, "/dbplugin.Database/RotateRootCredentials", in, out, opts...) + err := c.cc.Invoke(ctx, Database_RotateRootCredentials_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -85,8 +110,9 @@ func (c *databaseClient) RotateRootCredentials(ctx context.Context, in *RotateRo } func (c *databaseClient) Init(ctx context.Context, in *InitRequest, opts ...grpc.CallOption) (*InitResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(InitResponse) - err := c.cc.Invoke(ctx, "/dbplugin.Database/Init", in, out, opts...) + err := c.cc.Invoke(ctx, Database_Init_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -94,8 +120,9 @@ func (c *databaseClient) Init(ctx context.Context, in *InitRequest, opts ...grpc } func (c *databaseClient) Close(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) - err := c.cc.Invoke(ctx, "/dbplugin.Database/Close", in, out, opts...) + err := c.cc.Invoke(ctx, Database_Close_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -103,8 +130,9 @@ func (c *databaseClient) Close(ctx context.Context, in *Empty, opts ...grpc.Call } func (c *databaseClient) SetCredentials(ctx context.Context, in *SetCredentialsRequest, opts ...grpc.CallOption) (*SetCredentialsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SetCredentialsResponse) - err := c.cc.Invoke(ctx, "/dbplugin.Database/SetCredentials", in, out, opts...) + err := c.cc.Invoke(ctx, Database_SetCredentials_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -112,8 +140,9 @@ func (c *databaseClient) SetCredentials(ctx context.Context, in *SetCredentialsR } func (c *databaseClient) GenerateCredentials(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*GenerateCredentialsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GenerateCredentialsResponse) - err := c.cc.Invoke(ctx, "/dbplugin.Database/GenerateCredentials", in, out, opts...) + err := c.cc.Invoke(ctx, Database_GenerateCredentials_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -122,8 +151,9 @@ func (c *databaseClient) GenerateCredentials(ctx context.Context, in *Empty, opt // Deprecated: Do not use. func (c *databaseClient) Initialize(ctx context.Context, in *InitializeRequest, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) - err := c.cc.Invoke(ctx, "/dbplugin.Database/Initialize", in, out, opts...) + err := c.cc.Invoke(ctx, Database_Initialize_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -205,7 +235,7 @@ func _Database_Type_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.Database/Type", + FullMethod: Database_Type_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).Type(ctx, req.(*Empty)) @@ -223,7 +253,7 @@ func _Database_CreateUser_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.Database/CreateUser", + FullMethod: Database_CreateUser_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).CreateUser(ctx, req.(*CreateUserRequest)) @@ -241,7 +271,7 @@ func _Database_RenewUser_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.Database/RenewUser", + FullMethod: Database_RenewUser_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).RenewUser(ctx, req.(*RenewUserRequest)) @@ -259,7 +289,7 @@ func _Database_RevokeUser_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.Database/RevokeUser", + FullMethod: Database_RevokeUser_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).RevokeUser(ctx, req.(*RevokeUserRequest)) @@ -277,7 +307,7 @@ func _Database_RotateRootCredentials_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.Database/RotateRootCredentials", + FullMethod: Database_RotateRootCredentials_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).RotateRootCredentials(ctx, req.(*RotateRootCredentialsRequest)) @@ -295,7 +325,7 @@ func _Database_Init_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.Database/Init", + FullMethod: Database_Init_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).Init(ctx, req.(*InitRequest)) @@ -313,7 +343,7 @@ func _Database_Close_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.Database/Close", + FullMethod: Database_Close_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).Close(ctx, req.(*Empty)) @@ -331,7 +361,7 @@ func _Database_SetCredentials_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.Database/SetCredentials", + FullMethod: Database_SetCredentials_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).SetCredentials(ctx, req.(*SetCredentialsRequest)) @@ -349,7 +379,7 @@ func _Database_GenerateCredentials_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.Database/GenerateCredentials", + FullMethod: Database_GenerateCredentials_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).GenerateCredentials(ctx, req.(*Empty)) @@ -367,7 +397,7 @@ func _Database_Initialize_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.Database/Initialize", + FullMethod: Database_Initialize_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).Initialize(ctx, req.(*InitializeRequest)) diff --git a/sdk/database/dbplugin/grpc_transport.go b/sdk/database/dbplugin/grpc_transport.go index 3740ef59c3b8..456be812d2be 100644 --- a/sdk/database/dbplugin/grpc_transport.go +++ b/sdk/database/dbplugin/grpc_transport.go @@ -9,12 +9,11 @@ import ( "errors" "time" + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/vault/sdk/helper/pluginutil" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - - "github.com/golang/protobuf/ptypes" - "github.com/hashicorp/vault/sdk/helper/pluginutil" ) var ( diff --git a/sdk/database/dbplugin/plugin.go b/sdk/database/dbplugin/plugin.go index 0b01454123c8..5b9aaae0aec0 100644 --- a/sdk/database/dbplugin/plugin.go +++ b/sdk/database/dbplugin/plugin.go @@ -8,13 +8,12 @@ import ( "fmt" "time" - "google.golang.org/grpc" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/pluginutil" + "google.golang.org/grpc" ) // Database is the interface that all database objects must implement. diff --git a/sdk/database/dbplugin/v5/conversions_test.go b/sdk/database/dbplugin/v5/conversions_test.go index d6e9b3f67118..c6b805bb9ff7 100644 --- a/sdk/database/dbplugin/v5/conversions_test.go +++ b/sdk/database/dbplugin/v5/conversions_test.go @@ -65,6 +65,7 @@ func TestConversionsHaveAllFields(t *testing.T) { CredentialType: CredentialTypeRSAPrivateKey, PublicKey: []byte("-----BEGIN PUBLIC KEY-----"), Password: "password", + Subject: "subject", Expiration: time.Now(), } @@ -115,6 +116,7 @@ func TestConversionsHaveAllFields(t *testing.T) { }, }, }, + SelfManagedPassword: "test-password", } protoReq, err := updateUserReqToProto(req) @@ -193,6 +195,7 @@ func TestConversionsHaveAllFields(t *testing.T) { }, }, }, + SelfManagedPassword: "test-password", } protoReq, err := getUpdateUserRequest(req) diff --git a/sdk/database/dbplugin/v5/credentialtype_enumer.go b/sdk/database/dbplugin/v5/credentialtype_enumer.go new file mode 100644 index 000000000000..d61011b718ee --- /dev/null +++ b/sdk/database/dbplugin/v5/credentialtype_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer -type=CredentialType -trimprefix=CredentialType -transform=snake"; DO NOT EDIT. + +package dbplugin + +import ( + "fmt" +) + +const _CredentialTypeName = "passwordrsa_private_keyclient_certificate" + +var _CredentialTypeIndex = [...]uint8{0, 8, 23, 41} + +func (i CredentialType) String() string { + if i < 0 || i >= CredentialType(len(_CredentialTypeIndex)-1) { + return fmt.Sprintf("CredentialType(%d)", i) + } + return _CredentialTypeName[_CredentialTypeIndex[i]:_CredentialTypeIndex[i+1]] +} + +var _CredentialTypeValues = []CredentialType{0, 1, 2} + +var _CredentialTypeNameToValueMap = map[string]CredentialType{ + _CredentialTypeName[0:8]: 0, + _CredentialTypeName[8:23]: 1, + _CredentialTypeName[23:41]: 2, +} + +// CredentialTypeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func CredentialTypeString(s string) (CredentialType, error) { + if val, ok := _CredentialTypeNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to CredentialType values", s) +} + +// CredentialTypeValues returns all values of the enum +func CredentialTypeValues() []CredentialType { + return _CredentialTypeValues +} + +// IsACredentialType returns "true" if the value is listed in the enum definition. "false" otherwise +func (i CredentialType) IsACredentialType() bool { + for _, v := range _CredentialTypeValues { + if i == v { + return true + } + } + return false +} diff --git a/sdk/database/dbplugin/v5/database.go b/sdk/database/dbplugin/v5/database.go index 24b0115c6339..2943451b7212 100644 --- a/sdk/database/dbplugin/v5/database.go +++ b/sdk/database/dbplugin/v5/database.go @@ -123,6 +123,10 @@ type NewUserRequest struct { // The value is set when the credential type is CredentialTypeRSAPrivateKey. PublicKey []byte + // Subject is the distinguished name for the client certificate credential. + // Value is set when the credential type is CredentialTypeClientCertificate. + Subject string + // Expiration of the user. Not all database plugins will support this. Expiration time.Time } @@ -140,25 +144,17 @@ type NewUserResponse struct { Username string } +//go:generate enumer -type=CredentialType -trimprefix=CredentialType -transform=snake + // CredentialType is a type of database credential. type CredentialType int const ( CredentialTypePassword CredentialType = iota CredentialTypeRSAPrivateKey + CredentialTypeClientCertificate ) -func (k CredentialType) String() string { - switch k { - case CredentialTypePassword: - return "password" - case CredentialTypeRSAPrivateKey: - return "rsa_private_key" - default: - return "unknown" - } -} - // /////////////////////////////////////////////////////// // UpdateUser() // /////////////////////////////////////////////////////// @@ -185,6 +181,13 @@ type UpdateUserRequest struct { // Expiration indicates the new expiration date to change to. // If nil, no change is requested. Expiration *ChangeExpiration + + // SelfManagedPassword is the password for an externally managed user in the DB. + // If this field is supplied, a DB connection is retrieved from the static + // account cache for the particular DB plugin and used to update the password of + // the self-managed static role. + // *ENTERPRISE-ONLY* + SelfManagedPassword string } // ChangePublicKey of a given user diff --git a/sdk/database/dbplugin/v5/grpc_client.go b/sdk/database/dbplugin/v5/grpc_client.go index 68a63faaf7ca..4e1ef57492a9 100644 --- a/sdk/database/dbplugin/v5/grpc_client.go +++ b/sdk/database/dbplugin/v5/grpc_client.go @@ -104,6 +104,10 @@ func newUserReqToProto(req NewUserRequest) (*proto.NewUserRequest, error) { if len(req.PublicKey) == 0 { return nil, fmt.Errorf("missing public key credential") } + case CredentialTypeClientCertificate: + if req.Subject == "" { + return nil, fmt.Errorf("missing certificate subject") + } default: return nil, fmt.Errorf("unknown credential type") } @@ -121,6 +125,7 @@ func newUserReqToProto(req NewUserRequest) (*proto.NewUserRequest, error) { CredentialType: int32(req.CredentialType), Password: req.Password, PublicKey: req.PublicKey, + Subject: req.Subject, Expiration: expiration, Statements: &proto.Statements{ Commands: req.Statements.Commands, @@ -194,11 +199,12 @@ func updateUserReqToProto(req UpdateUserRequest) (*proto.UpdateUserRequest, erro } rpcReq := &proto.UpdateUserRequest{ - Username: req.Username, - CredentialType: int32(req.CredentialType), - Password: password, - PublicKey: publicKey, - Expiration: expiration, + Username: req.Username, + CredentialType: int32(req.CredentialType), + Password: password, + PublicKey: publicKey, + Expiration: expiration, + SelfManagedPassword: req.SelfManagedPassword, } return rpcReq, nil } diff --git a/sdk/database/dbplugin/v5/grpc_server.go b/sdk/database/dbplugin/v5/grpc_server.go index c98452d40b11..691de2d0a89e 100644 --- a/sdk/database/dbplugin/v5/grpc_server.go +++ b/sdk/database/dbplugin/v5/grpc_server.go @@ -152,6 +152,7 @@ func (g *gRPCServer) NewUser(ctx context.Context, req *proto.NewUserRequest) (*p CredentialType: CredentialType(req.GetCredentialType()), Password: req.GetPassword(), PublicKey: req.GetPublicKey(), + Subject: req.GetSubject(), Expiration: expiration, Statements: getStatementsFromProto(req.GetStatements()), RollbackStatements: getStatementsFromProto(req.GetRollbackStatements()), @@ -221,11 +222,12 @@ func getUpdateUserRequest(req *proto.UpdateUserRequest) (UpdateUserRequest, erro } dbReq := UpdateUserRequest{ - Username: req.GetUsername(), - CredentialType: CredentialType(req.GetCredentialType()), - Password: password, - PublicKey: publicKey, - Expiration: expiration, + Username: req.GetUsername(), + CredentialType: CredentialType(req.GetCredentialType()), + Password: password, + PublicKey: publicKey, + Expiration: expiration, + SelfManagedPassword: req.SelfManagedPassword, } if !hasChange(dbReq) { diff --git a/sdk/database/dbplugin/v5/grpc_server_test.go b/sdk/database/dbplugin/v5/grpc_server_test.go index 53d44c7c2a65..847a9e770df0 100644 --- a/sdk/database/dbplugin/v5/grpc_server_test.go +++ b/sdk/database/dbplugin/v5/grpc_server_test.go @@ -11,16 +11,15 @@ import ( "testing" "time" - "github.com/hashicorp/vault/sdk/logical" - "google.golang.org/protobuf/types/known/structpb" - "github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes/timestamp" "github.com/hashicorp/vault/sdk/database/dbplugin/v5/proto" "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/structpb" ) // Before minValidSeconds in ptypes package diff --git a/sdk/database/dbplugin/v5/plugin_client_test.go b/sdk/database/dbplugin/v5/plugin_client_test.go index 10f02b7bec25..fb6852d1a4b3 100644 --- a/sdk/database/dbplugin/v5/plugin_client_test.go +++ b/sdk/database/dbplugin/v5/plugin_client_test.go @@ -156,3 +156,7 @@ func (m *mockRunnerUtil) MlockEnabled() bool { args := m.Called() return args.Bool(0) } + +func (m *mockRunnerUtil) ClusterID(ctx context.Context) (string, error) { + return "clusterid", nil +} diff --git a/sdk/database/dbplugin/v5/proto/database.pb.go b/sdk/database/dbplugin/v5/proto/database.pb.go index 06af332cd2a8..376315758927 100644 --- a/sdk/database/dbplugin/v5/proto/database.pb.go +++ b/sdk/database/dbplugin/v5/proto/database.pb.go @@ -3,8 +3,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc (unknown) // source: sdk/database/dbplugin/v5/proto/database.proto package proto @@ -142,6 +142,7 @@ type NewUserRequest struct { RollbackStatements *Statements `protobuf:"bytes,5,opt,name=rollback_statements,json=rollbackStatements,proto3" json:"rollback_statements,omitempty"` CredentialType int32 `protobuf:"varint,6,opt,name=credential_type,json=credentialType,proto3" json:"credential_type,omitempty"` PublicKey []byte `protobuf:"bytes,7,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + Subject string `protobuf:"bytes,8,opt,name=subject,proto3" json:"subject,omitempty"` } func (x *NewUserRequest) Reset() { @@ -225,6 +226,13 @@ func (x *NewUserRequest) GetPublicKey() []byte { return nil } +func (x *NewUserRequest) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + type UsernameConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -335,11 +343,12 @@ type UpdateUserRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` - Password *ChangePassword `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - Expiration *ChangeExpiration `protobuf:"bytes,3,opt,name=expiration,proto3" json:"expiration,omitempty"` - PublicKey *ChangePublicKey `protobuf:"bytes,4,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` - CredentialType int32 `protobuf:"varint,5,opt,name=credential_type,json=credentialType,proto3" json:"credential_type,omitempty"` + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + Password *ChangePassword `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Expiration *ChangeExpiration `protobuf:"bytes,3,opt,name=expiration,proto3" json:"expiration,omitempty"` + PublicKey *ChangePublicKey `protobuf:"bytes,4,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + CredentialType int32 `protobuf:"varint,5,opt,name=credential_type,json=credentialType,proto3" json:"credential_type,omitempty"` + SelfManagedPassword string `protobuf:"bytes,6,opt,name=self_managed_password,json=selfManagedPassword,proto3" json:"self_managed_password,omitempty"` } func (x *UpdateUserRequest) Reset() { @@ -409,6 +418,13 @@ func (x *UpdateUserRequest) GetCredentialType() int32 { return 0 } +func (x *UpdateUserRequest) GetSelfManagedPassword() string { + if x != nil { + return x.SelfManagedPassword + } + return "" +} + type ChangePassword struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -869,7 +885,7 @@ var file_sdk_database_dbplugin_v5_proto_database_proto_rawDesc = []byte{ 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0xf9, 0x02, 0x0a, 0x0e, 0x4e, 0x65, 0x77, 0x55, + 0x66, 0x69, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x93, 0x03, 0x0a, 0x0e, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x0f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, @@ -893,100 +909,105 @@ var file_sdk_database_dbplugin_v5_proto_database_proto_rawDesc = []byte{ 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, - 0x4b, 0x65, 0x79, 0x22, 0x50, 0x0a, 0x0e, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, - 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x6c, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x6c, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x2d, 0x0a, 0x0f, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, - 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x8d, 0x02, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, - 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, - 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, - 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, - 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x61, 0x73, - 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, - 0x3d, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, - 0x35, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, - 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, - 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, - 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x63, - 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x54, 0x79, 0x70, 0x65, 0x22, 0x6c, 0x0a, 0x0e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x61, - 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x61, - 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, - 0x77, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x37, 0x0a, 0x0a, 0x73, 0x74, 0x61, + 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x50, 0x0a, + 0x0e, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, + 0x2d, 0x0a, 0x0f, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc1, + 0x02, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x37, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, + 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, + 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x3d, 0x0a, 0x0a, 0x65, 0x78, 0x70, + 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x64, + 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, + 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x12, 0x32, + 0x0a, 0x15, 0x73, 0x65, 0x6c, 0x66, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x5f, 0x70, + 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, + 0x65, 0x6c, 0x66, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, + 0x72, 0x64, 0x22, 0x6c, 0x0a, 0x0e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x50, + 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x37, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x64, 0x62, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x22, 0x70, 0x0a, 0x0f, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, + 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6e, 0x65, 0x77, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x37, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x22, 0x70, 0x0a, 0x0f, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x75, 0x62, - 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6e, - 0x65, 0x77, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x37, 0x0a, 0x0a, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x10, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x45, - 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x6e, 0x65, 0x77, - 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x6e, - 0x65, 0x77, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x0a, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x14, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, - 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x68, 0x0a, 0x11, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0a, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x14, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, - 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, 0x0a, 0x0c, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, - 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x22, - 0x28, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1a, 0x0a, - 0x08, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x08, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x73, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x32, 0xa5, 0x03, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, - 0x4d, 0x0a, 0x0a, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, 0x1e, 0x2e, - 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x49, 0x6e, 0x69, 0x74, - 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, - 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x49, 0x6e, 0x69, 0x74, - 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, - 0x0a, 0x07, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1b, 0x2e, 0x64, 0x62, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x0a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, - 0x65, 0x72, 0x12, 0x1e, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, - 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, - 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, - 0x72, 0x12, 0x1e, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1f, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x2e, 0x64, 0x62, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, - 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x43, 0x6c, 0x6f, - 0x73, 0x65, 0x12, 0x12, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, - 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x64, 0x61, 0x74, - 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x76, - 0x35, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x10, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x45, 0x78, 0x70, + 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x6e, 0x65, 0x77, 0x5f, 0x65, + 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x6e, 0x65, 0x77, + 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x0a, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x22, 0x14, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x68, 0x0a, 0x11, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, + 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0a, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x22, 0x14, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, 0x0a, 0x0c, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x22, 0x28, 0x0a, + 0x0a, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x43, + 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x43, + 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x73, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x32, 0xa5, 0x03, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x4d, 0x0a, + 0x0a, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, 0x1e, 0x2e, 0x64, 0x62, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, + 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x64, 0x62, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, + 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x07, + 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1b, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, + 0x76, 0x35, 0x2e, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x0a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, + 0x12, 0x1e, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1f, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x4d, 0x0a, 0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, + 0x1e, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1f, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x35, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, 0x2e, 0x64, + 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x43, 0x6c, 0x6f, 0x73, 0x65, + 0x12, 0x12, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, + 0x76, 0x35, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, + 0x61, 0x73, 0x65, 0x2f, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x76, 0x35, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1002,7 +1023,7 @@ func file_sdk_database_dbplugin_v5_proto_database_proto_rawDescGZIP() []byte { } var file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes = make([]protoimpl.MessageInfo, 15) -var file_sdk_database_dbplugin_v5_proto_database_proto_goTypes = []interface{}{ +var file_sdk_database_dbplugin_v5_proto_database_proto_goTypes = []any{ (*InitializeRequest)(nil), // 0: dbplugin.v5.InitializeRequest (*InitializeResponse)(nil), // 1: dbplugin.v5.InitializeResponse (*NewUserRequest)(nil), // 2: dbplugin.v5.NewUserRequest @@ -1061,7 +1082,7 @@ func file_sdk_database_dbplugin_v5_proto_database_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*InitializeRequest); i { case 0: return &v.state @@ -1073,7 +1094,7 @@ func file_sdk_database_dbplugin_v5_proto_database_proto_init() { return nil } } - file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*InitializeResponse); i { case 0: return &v.state @@ -1085,7 +1106,7 @@ func file_sdk_database_dbplugin_v5_proto_database_proto_init() { return nil } } - file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*NewUserRequest); i { case 0: return &v.state @@ -1097,7 +1118,7 @@ func file_sdk_database_dbplugin_v5_proto_database_proto_init() { return nil } } - file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*UsernameConfig); i { case 0: return &v.state @@ -1109,7 +1130,7 @@ func file_sdk_database_dbplugin_v5_proto_database_proto_init() { return nil } } - file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*NewUserResponse); i { case 0: return &v.state @@ -1121,7 +1142,7 @@ func file_sdk_database_dbplugin_v5_proto_database_proto_init() { return nil } } - file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*UpdateUserRequest); i { case 0: return &v.state @@ -1133,7 +1154,7 @@ func file_sdk_database_dbplugin_v5_proto_database_proto_init() { return nil } } - file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*ChangePassword); i { case 0: return &v.state @@ -1145,7 +1166,7 @@ func file_sdk_database_dbplugin_v5_proto_database_proto_init() { return nil } } - file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*ChangePublicKey); i { case 0: return &v.state @@ -1157,7 +1178,7 @@ func file_sdk_database_dbplugin_v5_proto_database_proto_init() { return nil } } - file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*ChangeExpiration); i { case 0: return &v.state @@ -1169,7 +1190,7 @@ func file_sdk_database_dbplugin_v5_proto_database_proto_init() { return nil } } - file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*UpdateUserResponse); i { case 0: return &v.state @@ -1181,7 +1202,7 @@ func file_sdk_database_dbplugin_v5_proto_database_proto_init() { return nil } } - file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*DeleteUserRequest); i { case 0: return &v.state @@ -1193,7 +1214,7 @@ func file_sdk_database_dbplugin_v5_proto_database_proto_init() { return nil } } - file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*DeleteUserResponse); i { case 0: return &v.state @@ -1205,7 +1226,7 @@ func file_sdk_database_dbplugin_v5_proto_database_proto_init() { return nil } } - file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*TypeResponse); i { case 0: return &v.state @@ -1217,7 +1238,7 @@ func file_sdk_database_dbplugin_v5_proto_database_proto_init() { return nil } } - file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*Statements); i { case 0: return &v.state @@ -1229,7 +1250,7 @@ func file_sdk_database_dbplugin_v5_proto_database_proto_init() { return nil } } - file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*Empty); i { case 0: return &v.state diff --git a/sdk/database/dbplugin/v5/proto/database.proto b/sdk/database/dbplugin/v5/proto/database.proto index ee285dcf7c50..c30984ef758b 100644 --- a/sdk/database/dbplugin/v5/proto/database.proto +++ b/sdk/database/dbplugin/v5/proto/database.proto @@ -4,69 +4,71 @@ syntax = "proto3"; package dbplugin.v5; -option go_package = "github.com/hashicorp/vault/sdk/database/dbplugin/v5/proto"; - import "google/protobuf/struct.proto"; import "google/protobuf/timestamp.proto"; +option go_package = "github.com/hashicorp/vault/sdk/database/dbplugin/v5/proto"; + ///////////////// // Initialize() ///////////////// message InitializeRequest { - google.protobuf.Struct config_data = 1; - bool verify_connection = 2; + google.protobuf.Struct config_data = 1; + bool verify_connection = 2; } message InitializeResponse { - google.protobuf.Struct config_data = 1; + google.protobuf.Struct config_data = 1; } ///////////////// // NewUser() ///////////////// message NewUserRequest { - UsernameConfig username_config = 1; - string password = 2; - google.protobuf.Timestamp expiration = 3; - Statements statements = 4; - Statements rollback_statements = 5; - int32 credential_type = 6; - bytes public_key = 7; + UsernameConfig username_config = 1; + string password = 2; + google.protobuf.Timestamp expiration = 3; + Statements statements = 4; + Statements rollback_statements = 5; + int32 credential_type = 6; + bytes public_key = 7; + string subject = 8; } message UsernameConfig { - string display_name = 1; - string role_name = 2; + string display_name = 1; + string role_name = 2; } message NewUserResponse { - string username = 1; + string username = 1; } ///////////////// // UpdateUser() ///////////////// message UpdateUserRequest { - string username = 1; - ChangePassword password = 2; - ChangeExpiration expiration = 3; - ChangePublicKey public_key = 4; - int32 credential_type = 5; + string username = 1; + ChangePassword password = 2; + ChangeExpiration expiration = 3; + ChangePublicKey public_key = 4; + int32 credential_type = 5; + string self_managed_password = 6; } message ChangePassword { - string new_password = 1; - Statements statements = 2; + string new_password = 1; + Statements statements = 2; } message ChangePublicKey { - bytes new_public_key = 1; - Statements statements = 2; + bytes new_public_key = 1; + Statements statements = 2; } message ChangeExpiration { - google.protobuf.Timestamp new_expiration = 1; - Statements statements = 2; + google.protobuf.Timestamp new_expiration = 1; + Statements statements = 2; } message UpdateUserResponse {} @@ -75,8 +77,8 @@ message UpdateUserResponse {} // DeleteUser() ///////////////// message DeleteUserRequest { - string username = 1; - Statements statements = 2; + string username = 1; + Statements statements = 2; } message DeleteUserResponse {} @@ -85,23 +87,23 @@ message DeleteUserResponse {} // Type() ///////////////// message TypeResponse { - string Type = 1; + string Type = 1; } ///////////////// // General purpose ///////////////// message Statements { - repeated string Commands = 1; + repeated string Commands = 1; } message Empty {} service Database { - rpc Initialize(InitializeRequest) returns (InitializeResponse); - rpc NewUser(NewUserRequest) returns (NewUserResponse); - rpc UpdateUser(UpdateUserRequest) returns (UpdateUserResponse); - rpc DeleteUser(DeleteUserRequest) returns (DeleteUserResponse); - rpc Type(Empty) returns (TypeResponse); - rpc Close(Empty) returns (Empty); -} \ No newline at end of file + rpc Initialize(InitializeRequest) returns (InitializeResponse); + rpc NewUser(NewUserRequest) returns (NewUserResponse); + rpc UpdateUser(UpdateUserRequest) returns (UpdateUserResponse); + rpc DeleteUser(DeleteUserRequest) returns (DeleteUserResponse); + rpc Type(Empty) returns (TypeResponse); + rpc Close(Empty) returns (Empty); +} diff --git a/sdk/database/dbplugin/v5/proto/database_grpc.pb.go b/sdk/database/dbplugin/v5/proto/database_grpc.pb.go index 8a549fef92f0..28c8d775238a 100644 --- a/sdk/database/dbplugin/v5/proto/database_grpc.pb.go +++ b/sdk/database/dbplugin/v5/proto/database_grpc.pb.go @@ -1,4 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.4.0 +// - protoc (unknown) +// source: sdk/database/dbplugin/v5/proto/database.proto package proto @@ -11,8 +18,17 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 + +const ( + Database_Initialize_FullMethodName = "/dbplugin.v5.Database/Initialize" + Database_NewUser_FullMethodName = "/dbplugin.v5.Database/NewUser" + Database_UpdateUser_FullMethodName = "/dbplugin.v5.Database/UpdateUser" + Database_DeleteUser_FullMethodName = "/dbplugin.v5.Database/DeleteUser" + Database_Type_FullMethodName = "/dbplugin.v5.Database/Type" + Database_Close_FullMethodName = "/dbplugin.v5.Database/Close" +) // DatabaseClient is the client API for Database service. // @@ -35,8 +51,9 @@ func NewDatabaseClient(cc grpc.ClientConnInterface) DatabaseClient { } func (c *databaseClient) Initialize(ctx context.Context, in *InitializeRequest, opts ...grpc.CallOption) (*InitializeResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(InitializeResponse) - err := c.cc.Invoke(ctx, "/dbplugin.v5.Database/Initialize", in, out, opts...) + err := c.cc.Invoke(ctx, Database_Initialize_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -44,8 +61,9 @@ func (c *databaseClient) Initialize(ctx context.Context, in *InitializeRequest, } func (c *databaseClient) NewUser(ctx context.Context, in *NewUserRequest, opts ...grpc.CallOption) (*NewUserResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(NewUserResponse) - err := c.cc.Invoke(ctx, "/dbplugin.v5.Database/NewUser", in, out, opts...) + err := c.cc.Invoke(ctx, Database_NewUser_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -53,8 +71,9 @@ func (c *databaseClient) NewUser(ctx context.Context, in *NewUserRequest, opts . } func (c *databaseClient) UpdateUser(ctx context.Context, in *UpdateUserRequest, opts ...grpc.CallOption) (*UpdateUserResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(UpdateUserResponse) - err := c.cc.Invoke(ctx, "/dbplugin.v5.Database/UpdateUser", in, out, opts...) + err := c.cc.Invoke(ctx, Database_UpdateUser_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -62,8 +81,9 @@ func (c *databaseClient) UpdateUser(ctx context.Context, in *UpdateUserRequest, } func (c *databaseClient) DeleteUser(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*DeleteUserResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(DeleteUserResponse) - err := c.cc.Invoke(ctx, "/dbplugin.v5.Database/DeleteUser", in, out, opts...) + err := c.cc.Invoke(ctx, Database_DeleteUser_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -71,8 +91,9 @@ func (c *databaseClient) DeleteUser(ctx context.Context, in *DeleteUserRequest, } func (c *databaseClient) Type(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TypeResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(TypeResponse) - err := c.cc.Invoke(ctx, "/dbplugin.v5.Database/Type", in, out, opts...) + err := c.cc.Invoke(ctx, Database_Type_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -80,8 +101,9 @@ func (c *databaseClient) Type(ctx context.Context, in *Empty, opts ...grpc.CallO } func (c *databaseClient) Close(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) - err := c.cc.Invoke(ctx, "/dbplugin.v5.Database/Close", in, out, opts...) + err := c.cc.Invoke(ctx, Database_Close_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -146,7 +168,7 @@ func _Database_Initialize_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.v5.Database/Initialize", + FullMethod: Database_Initialize_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).Initialize(ctx, req.(*InitializeRequest)) @@ -164,7 +186,7 @@ func _Database_NewUser_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.v5.Database/NewUser", + FullMethod: Database_NewUser_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).NewUser(ctx, req.(*NewUserRequest)) @@ -182,7 +204,7 @@ func _Database_UpdateUser_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.v5.Database/UpdateUser", + FullMethod: Database_UpdateUser_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).UpdateUser(ctx, req.(*UpdateUserRequest)) @@ -200,7 +222,7 @@ func _Database_DeleteUser_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.v5.Database/DeleteUser", + FullMethod: Database_DeleteUser_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).DeleteUser(ctx, req.(*DeleteUserRequest)) @@ -218,7 +240,7 @@ func _Database_Type_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.v5.Database/Type", + FullMethod: Database_Type_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).Type(ctx, req.(*Empty)) @@ -236,7 +258,7 @@ func _Database_Close_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.v5.Database/Close", + FullMethod: Database_Close_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).Close(ctx, req.(*Empty)) diff --git a/sdk/database/dbplugin/v5/testing/test_helpers.go b/sdk/database/dbplugin/v5/testing/test_helpers.go index 83e4af3089ce..4ecebe5de08e 100644 --- a/sdk/database/dbplugin/v5/testing/test_helpers.go +++ b/sdk/database/dbplugin/v5/testing/test_helpers.go @@ -5,10 +5,12 @@ package dbtesting import ( "context" + "io/ioutil" "os" "testing" "time" + "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/sdk/database/dbplugin/v5" ) @@ -22,7 +24,7 @@ func getRequestTimeout(t *testing.T) time.Duration { return 10 * time.Second } - dur, err := time.ParseDuration(rawDur) + dur, err := parseutil.ParseDurationSecond(rawDur) if err != nil { t.Fatalf("Failed to parse custom request timeout %q: %s", rawDur, err) } @@ -38,7 +40,7 @@ func AssertInitializeCircleCiTest(t *testing.T, db dbplugin.Database, req dbplug var err error for i := 1; i <= maxAttempts; i++ { - resp, err = verifyInitialize(t, db, req) + resp, err = VerifyInitialize(t, db, req) if err != nil { t.Errorf("Failed AssertInitialize attempt: %d with error:\n%+v\n", i, err) time.Sleep(1 * time.Second) @@ -56,14 +58,14 @@ func AssertInitializeCircleCiTest(t *testing.T, db dbplugin.Database, req dbplug func AssertInitialize(t *testing.T, db dbplugin.Database, req dbplugin.InitializeRequest) dbplugin.InitializeResponse { t.Helper() - resp, err := verifyInitialize(t, db, req) + resp, err := VerifyInitialize(t, db, req) if err != nil { t.Fatalf("Failed to initialize: %s", err) } return resp } -func verifyInitialize(t *testing.T, db dbplugin.Database, req dbplugin.InitializeRequest) (dbplugin.InitializeResponse, error) { +func VerifyInitialize(t *testing.T, db dbplugin.Database, req dbplugin.InitializeRequest) (dbplugin.InitializeResponse, error) { ctx, cancel := context.WithTimeout(context.Background(), getRequestTimeout(t)) defer cancel() @@ -118,3 +120,31 @@ func AssertClose(t *testing.T, db dbplugin.Database) { t.Fatalf("Failed to close database: %s", err) } } + +// GetGCPTestCredentials reads the credentials from the +// GOOGLE_APPLICATIONS_CREDENTIALS environment variable +// The credentials are read from a file if a file exists +// otherwise they are returned as JSON +func GetGCPTestCredentials(t *testing.T) string { + t.Helper() + envCredentials := "GOOGLE_APPLICATIONS_CREDENTIALS" + + var credsStr string + credsEnv := os.Getenv(envCredentials) + if credsEnv == "" { + t.Skipf("env var %s not set, skipping test", envCredentials) + } + + // Attempt to read as file path; if invalid, assume given JSON value directly + if _, err := os.Stat(credsEnv); err == nil { + credsBytes, err := ioutil.ReadFile(credsEnv) + if err != nil { + t.Fatalf("unable to read credentials file %s: %v", credsStr, err) + } + credsStr = string(credsBytes) + } else { + credsStr = credsEnv + } + + return credsStr +} diff --git a/sdk/database/helper/cacheutil/cache_stubs_oss.go b/sdk/database/helper/cacheutil/cache_stubs_oss.go new file mode 100644 index 000000000000..18e141832fcc --- /dev/null +++ b/sdk/database/helper/cacheutil/cache_stubs_oss.go @@ -0,0 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !enterprise + +package cacheutil + +import "errors" + +type EvictionFunc func(key interface{}, value interface{}) + +type Cache struct{} + +func NewCache(_ int, _ EvictionFunc) (*Cache, error) { + return nil, errors.New("self-managed static roles only available in Vault Enterprise") +} diff --git a/sdk/database/helper/connutil/cloudsql.go b/sdk/database/helper/connutil/cloudsql.go new file mode 100644 index 000000000000..f6cbba1d2407 --- /dev/null +++ b/sdk/database/helper/connutil/cloudsql.go @@ -0,0 +1,60 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package connutil + +import ( + "fmt" + + "cloud.google.com/go/cloudsqlconn" + "cloud.google.com/go/cloudsqlconn/postgres/pgxv4" +) + +func (c *SQLConnectionProducer) getCloudSQLDriverType() (string, error) { + var driverType string + // using switch case for future extensibility + switch c.Type { + case dbTypePostgres: + driverType = cloudSQLPostgres + default: + return "", fmt.Errorf("unsupported DB type for cloud IAM: %s", c.Type) + } + + return driverType, nil +} + +func (c *SQLConnectionProducer) registerDrivers(driverName string, credentials string, usePrivateIP bool) (func() error, error) { + typ, err := c.getCloudSQLDriverType() + if err != nil { + return nil, err + } + + opts, err := GetCloudSQLAuthOptions(credentials, usePrivateIP) + if err != nil { + return nil, err + } + + // using switch case for future extensibility + switch typ { + case cloudSQLPostgres: + return pgxv4.RegisterDriver(driverName, opts...) + } + + return nil, fmt.Errorf("unrecognized cloudsql type encountered: %s", typ) +} + +// GetCloudSQLAuthOptions takes a credentials JSON and returns +// a set of GCP CloudSQL options - always WithIAMAUthN, and then the appropriate file/JSON option. +func GetCloudSQLAuthOptions(credentials string, usePrivateIP bool) ([]cloudsqlconn.Option, error) { + opts := []cloudsqlconn.Option{cloudsqlconn.WithIAMAuthN()} + + if credentials != "" { + opts = append(opts, cloudsqlconn.WithCredentialsJSON([]byte(credentials))) + } + + if usePrivateIP { + opts = append(opts, cloudsqlconn.WithDefaultDialOptions(cloudsqlconn.WithPrivateIP())) + } + + return opts, nil +} diff --git a/sdk/database/helper/connutil/postgres.go b/sdk/database/helper/connutil/postgres.go new file mode 100644 index 000000000000..f8ad876c5a58 --- /dev/null +++ b/sdk/database/helper/connutil/postgres.go @@ -0,0 +1,466 @@ +// Copyright (c) 2019-2021 Jack Christensen + +// MIT License + +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: + +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// Copied from https://github.com/jackc/pgconn/blob/1860f4e57204614f40d05a5c76a43e8d80fde9da/config.go + +package connutil + +import ( + "context" + "crypto/tls" + "crypto/x509" + "database/sql" + "encoding/pem" + "errors" + "fmt" + "math" + "net" + "net/url" + "os" + "strconv" + "strings" + + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v4/stdlib" +) + +// openPostgres parses the connection string and opens a connection to the database. +// +// If sslinline is set, strips the connection string of all ssl settings and +// creates a TLS config based on the settings provided, then uses the +// RegisterConnConfig function to create a new connection. This is necessary +// because the pgx driver does not support the sslinline parameter and instead +// expects to source ssl material from the file system. +// +// Deprecated: openPostgres will be removed in a future version of the Vault SDK. +func openPostgres(driverName, connString string) (*sql.DB, error) { + if ok, _ := strconv.ParseBool(os.Getenv(pluginutil.PluginUsePostgresSSLInline)); !ok { + return nil, fmt.Errorf("failed to open postgres connection with deprecated funtion, set feature flag to enable") + } + + var options pgconn.ParseConfigOptions + + settings := make(map[string]string) + if connString != "" { + var err error + // connString may be a database URL or a DSN + if strings.HasPrefix(connString, "postgres://") || strings.HasPrefix(connString, "postgresql://") { + settings, err = parsePostgresURLSettings(connString) + if err != nil { + return nil, fmt.Errorf("failed to parse as URL: %w", err) + } + } else { + settings, err = parsePostgresDSNSettings(connString) + if err != nil { + return nil, fmt.Errorf("failed to parse as DSN: %w", err) + } + } + } + + // get the inline flag + sslInline := settings["sslinline"] == "true" + + // if sslinline is not set, open a regular connection + if !sslInline { + return sql.Open(driverName, connString) + } + + // generate a new DSN without the ssl settings + newConnStr := []string{"sslmode=disable"} + for k, v := range settings { + switch k { + case "sslinline", "sslcert", "sslkey", "sslrootcert", "sslmode": + continue + } + + newConnStr = append(newConnStr, fmt.Sprintf("%s='%s'", k, v)) + } + + // parse the updated config + config, err := pgx.ParseConfig(strings.Join(newConnStr, " ")) + if err != nil { + return nil, err + } + + // create a TLS config + fallbacks := []*pgconn.FallbackConfig{} + + hosts := strings.Split(settings["host"], ",") + ports := strings.Split(settings["port"], ",") + + for i, host := range hosts { + var portStr string + if i < len(ports) { + portStr = ports[i] + } else { + portStr = ports[0] + } + + port, err := parsePort(portStr) + if err != nil { + return nil, fmt.Errorf("invalid port: %w", err) + } + + var tlsConfigs []*tls.Config + + // Ignore TLS settings if Unix domain socket like libpq + if network, _ := pgconn.NetworkAddress(host, port); network == "unix" { + tlsConfigs = append(tlsConfigs, nil) + } else { + var err error + tlsConfigs, err = configPostgresTLS(settings, host, options) + if err != nil { + return nil, fmt.Errorf("failed to configure TLS: %w", err) + } + } + + for _, tlsConfig := range tlsConfigs { + fallbacks = append(fallbacks, &pgconn.FallbackConfig{ + Host: host, + Port: port, + TLSConfig: tlsConfig, + }) + } + } + + config.Host = fallbacks[0].Host + config.Port = fallbacks[0].Port + config.TLSConfig = fallbacks[0].TLSConfig + config.Fallbacks = fallbacks[1:] + + return sql.Open(driverName, stdlib.RegisterConnConfig(config)) +} + +// configPostgresTLS uses libpq's TLS parameters to construct []*tls.Config. It is +// necessary to allow returning multiple TLS configs as sslmode "allow" and +// "prefer" allow fallback. +// +// Copied from https://github.com/jackc/pgconn/blob/1860f4e57204614f40d05a5c76a43e8d80fde9da/config.go +// and modified to read ssl material by value instead of file location. +func configPostgresTLS(settings map[string]string, thisHost string, parseConfigOptions pgconn.ParseConfigOptions) ([]*tls.Config, error) { + host := thisHost + sslmode := settings["sslmode"] + sslrootcert := settings["sslrootcert"] + sslcert := settings["sslcert"] + sslkey := settings["sslkey"] + sslpassword := settings["sslpassword"] + sslsni := settings["sslsni"] + + // Match libpq default behavior + if sslmode == "" { + sslmode = "prefer" + } + if sslsni == "" { + sslsni = "1" + } + + tlsConfig := &tls.Config{} + + switch sslmode { + case "disable": + return []*tls.Config{nil}, nil + case "allow", "prefer": + tlsConfig.InsecureSkipVerify = true + case "require": + // According to PostgreSQL documentation, if a root CA file exists, + // the behavior of sslmode=require should be the same as that of verify-ca + // + // See https://www.postgresql.org/docs/12/libpq-ssl.html + if sslrootcert != "" { + goto nextCase + } + tlsConfig.InsecureSkipVerify = true + break + nextCase: + fallthrough + case "verify-ca": + // Don't perform the default certificate verification because it + // will verify the hostname. Instead, verify the server's + // certificate chain ourselves in VerifyPeerCertificate and + // ignore the server name. This emulates libpq's verify-ca + // behavior. + // + // See https://github.com/golang/go/issues/21971#issuecomment-332693931 + // and https://pkg.go.dev/crypto/tls?tab=doc#example-Config-VerifyPeerCertificate + // for more info. + tlsConfig.InsecureSkipVerify = true + tlsConfig.VerifyPeerCertificate = func(certificates [][]byte, _ [][]*x509.Certificate) error { + certs := make([]*x509.Certificate, len(certificates)) + for i, asn1Data := range certificates { + cert, err := x509.ParseCertificate(asn1Data) + if err != nil { + return errors.New("failed to parse certificate from server: " + err.Error()) + } + certs[i] = cert + } + + // Leave DNSName empty to skip hostname verification. + opts := x509.VerifyOptions{ + Roots: tlsConfig.RootCAs, + Intermediates: x509.NewCertPool(), + } + // Skip the first cert because it's the leaf. All others + // are intermediates. + for _, cert := range certs[1:] { + opts.Intermediates.AddCert(cert) + } + _, err := certs[0].Verify(opts) + return err + } + case "verify-full": + tlsConfig.ServerName = host + default: + return nil, errors.New("sslmode is invalid") + } + + if sslrootcert != "" { + caCertPool := x509.NewCertPool() + if !caCertPool.AppendCertsFromPEM([]byte(sslrootcert)) { + return nil, errors.New("unable to add CA to cert pool") + } + + tlsConfig.RootCAs = caCertPool + tlsConfig.ClientCAs = caCertPool + } + + if (sslcert != "" && sslkey == "") || (sslcert == "" && sslkey != "") { + return nil, errors.New(`both "sslcert" and "sslkey" are required`) + } + + if sslcert != "" && sslkey != "" { + block, _ := pem.Decode([]byte(sslkey)) + var pemKey []byte + var decryptedKey []byte + var decryptedError error + // If PEM is encrypted, attempt to decrypt using pass phrase + if x509.IsEncryptedPEMBlock(block) { + // Attempt decryption with pass phrase + // NOTE: only supports RSA (PKCS#1) + if sslpassword != "" { + decryptedKey, decryptedError = x509.DecryptPEMBlock(block, []byte(sslpassword)) + } + // if sslpassword not provided or has decryption error when use it + // try to find sslpassword with callback function + if sslpassword == "" || decryptedError != nil { + if parseConfigOptions.GetSSLPassword != nil { + sslpassword = parseConfigOptions.GetSSLPassword(context.Background()) + } + if sslpassword == "" { + return nil, fmt.Errorf("unable to find sslpassword") + } + } + decryptedKey, decryptedError = x509.DecryptPEMBlock(block, []byte(sslpassword)) + // Should we also provide warning for PKCS#1 needed? + if decryptedError != nil { + return nil, fmt.Errorf("unable to decrypt key: %w", decryptedError) + } + + pemBytes := pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: decryptedKey, + } + pemKey = pem.EncodeToMemory(&pemBytes) + } else { + pemKey = pem.EncodeToMemory(block) + } + + cert, err := tls.X509KeyPair([]byte(sslcert), pemKey) + if err != nil { + return nil, fmt.Errorf("unable to load cert: %w", err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + // Set Server Name Indication (SNI), if enabled by connection parameters. + // Per RFC 6066, do not set it if the host is a literal IP address (IPv4 + // or IPv6). + if sslsni == "1" && net.ParseIP(host) == nil { + tlsConfig.ServerName = host + } + + switch sslmode { + case "allow": + return []*tls.Config{nil, tlsConfig}, nil + case "prefer": + return []*tls.Config{tlsConfig, nil}, nil + case "require", "verify-ca", "verify-full": + return []*tls.Config{tlsConfig}, nil + default: + panic("BUG: bad sslmode should already have been caught") + } +} + +func parsePort(s string) (uint16, error) { + port, err := strconv.ParseUint(s, 10, 16) + if err != nil { + return 0, err + } + if port < 1 || port > math.MaxUint16 { + return 0, errors.New("outside range") + } + return uint16(port), nil +} + +var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1} + +func parsePostgresURLSettings(connString string) (map[string]string, error) { + settings := make(map[string]string) + + url, err := url.Parse(connString) + if err != nil { + return nil, err + } + + if url.User != nil { + settings["user"] = url.User.Username() + if password, present := url.User.Password(); present { + settings["password"] = password + } + } + + // Handle multiple host:port's in url.Host by splitting them into host,host,host and port,port,port. + var hosts []string + var ports []string + for _, host := range strings.Split(url.Host, ",") { + if host == "" { + continue + } + if isIPOnly(host) { + hosts = append(hosts, strings.Trim(host, "[]")) + continue + } + h, p, err := net.SplitHostPort(host) + if err != nil { + return nil, fmt.Errorf("failed to split host:port in '%s', err: %w", host, err) + } + if h != "" { + hosts = append(hosts, h) + } + if p != "" { + ports = append(ports, p) + } + } + if len(hosts) > 0 { + settings["host"] = strings.Join(hosts, ",") + } + if len(ports) > 0 { + settings["port"] = strings.Join(ports, ",") + } + + database := strings.TrimLeft(url.Path, "/") + if database != "" { + settings["database"] = database + } + + nameMap := map[string]string{ + "dbname": "database", + } + + for k, v := range url.Query() { + if k2, present := nameMap[k]; present { + k = k2 + } + + settings[k] = v[0] + } + + return settings, nil +} + +func parsePostgresDSNSettings(s string) (map[string]string, error) { + settings := make(map[string]string) + + nameMap := map[string]string{ + "dbname": "database", + } + + for len(s) > 0 { + var key, val string + eqIdx := strings.IndexRune(s, '=') + if eqIdx < 0 { + return nil, errors.New("invalid dsn") + } + + key = strings.Trim(s[:eqIdx], " \t\n\r\v\f") + s = strings.TrimLeft(s[eqIdx+1:], " \t\n\r\v\f") + if len(s) == 0 { + } else if s[0] != '\'' { + end := 0 + for ; end < len(s); end++ { + if asciiSpace[s[end]] == 1 { + break + } + if s[end] == '\\' { + end++ + if end == len(s) { + return nil, errors.New("invalid backslash") + } + } + } + val = strings.Replace(strings.Replace(s[:end], "\\\\", "\\", -1), "\\'", "'", -1) + if end == len(s) { + s = "" + } else { + s = s[end+1:] + } + } else { // quoted string + s = s[1:] + end := 0 + for ; end < len(s); end++ { + if s[end] == '\'' { + break + } + if s[end] == '\\' { + end++ + } + } + if end == len(s) { + return nil, errors.New("unterminated quoted string in connection info string") + } + val = strings.Replace(strings.Replace(s[:end], "\\\\", "\\", -1), "\\'", "'", -1) + if end == len(s) { + s = "" + } else { + s = s[end+1:] + } + } + + if k, ok := nameMap[key]; ok { + key = k + } + + if key == "" { + return nil, errors.New("invalid dsn") + } + + settings[key] = val + } + + return settings, nil +} + +func isIPOnly(host string) bool { + return net.ParseIP(strings.Trim(host, "[]")) != nil || !strings.Contains(host, ":") +} diff --git a/sdk/database/helper/connutil/sql.go b/sdk/database/helper/connutil/sql.go index d1af4808cb44..489becf0c113 100644 --- a/sdk/database/helper/connutil/sql.go +++ b/sdk/database/helper/connutil/sql.go @@ -5,20 +5,45 @@ package connutil import ( "context" + "crypto/tls" "database/sql" "fmt" "net/url" + "os" "strings" "sync" "time" "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/database/dbplugin" + "github.com/hashicorp/vault/sdk/database/helper/cacheutil" "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v4/stdlib" "github.com/mitchellh/mapstructure" ) +const ( + AuthTypeGCPIAM = "gcp_iam" + AuthTypeCert = "cert" + AuthTypeUsernamePassword = "" +) + +const ( + dbTypePostgres = "pgx" + cloudSQLPostgres = "cloudsql-postgres" + + // controls the size of the static account cache + // as part of the self-managed workflow + defaultStaticCacheSize = 4 + defaultSelfManagedUsername = "self-managed-user" + defaultSelfManagedPassword = "self-managed-password" +) + var _ ConnectionProducer = &SQLConnectionProducer{} // SQLConnectionProducer implements ConnectionProducer and provides a generic producer for most sql databases @@ -27,15 +52,30 @@ type SQLConnectionProducer struct { MaxOpenConnections int `json:"max_open_connections" mapstructure:"max_open_connections" structs:"max_open_connections"` MaxIdleConnections int `json:"max_idle_connections" mapstructure:"max_idle_connections" structs:"max_idle_connections"` MaxConnectionLifetimeRaw interface{} `json:"max_connection_lifetime" mapstructure:"max_connection_lifetime" structs:"max_connection_lifetime"` - Username string `json:"username" mapstructure:"username" structs:"username"` - Password string `json:"password" mapstructure:"password" structs:"password"` DisableEscaping bool `json:"disable_escaping" mapstructure:"disable_escaping" structs:"disable_escaping"` + usePrivateIP bool `json:"use_private_ip" mapstructure:"use_private_ip" structs:"use_private_ip"` + SelfManaged bool `json:"self_managed" mapstructure:"self_managed" structs:"self_managed"` + + // Username/Password is the default auth type when AuthType is not set + Username string `json:"username" mapstructure:"username" structs:"username"` + Password string `json:"password" mapstructure:"password" structs:"password"` + + // AuthType defines the type of client authenticate used for this connection + AuthType string `json:"auth_type" mapstructure:"auth_type" structs:"auth_type"` + ServiceAccountJSON string `json:"service_account_json" mapstructure:"service_account_json" structs:"service_account_json"` + TLSConfig *tls.Config + + // cloudDriverName is globally unique, but only needs to be retained for the lifetime + // of driver registration, not across plugin restarts. + cloudDriverName string + cloudDialerCleanup func() error Type string RawConfig map[string]interface{} maxConnectionLifetime time.Duration Initialized bool db *sql.DB + staticAccountsCache *cacheutil.Cache sync.Mutex } @@ -59,6 +99,11 @@ func (c *SQLConnectionProducer) Init(ctx context.Context, conf map[string]interf return nil, fmt.Errorf("connection_url cannot be empty") } + isTemplatedURL := true + if !strings.Contains(c.ConnectionURL, "{{username}}") || !strings.Contains(c.ConnectionURL, "{{password}}") { + isTemplatedURL = false + } + // Do not allow the username or password template pattern to be used as // part of the user-supplied username or password if strings.Contains(c.Username, "{{username}}") || @@ -69,16 +114,40 @@ func (c *SQLConnectionProducer) Init(ctx context.Context, conf map[string]interf return nil, fmt.Errorf("username and/or password cannot contain the template variables") } - // Don't escape special characters for MySQL password - // Also don't escape special characters for the username and password if - // the disable_escaping parameter is set to true - username := c.Username - password := c.Password - if !c.DisableEscaping { - username = url.PathEscape(c.Username) + // validate that at least one of username/password / self_managed is set + if !c.SelfManaged && (c.Username == "" && c.Password == "") && isTemplatedURL { + return nil, fmt.Errorf("must either provide username/password or set self-managed to 'true'") } - if (c.Type != "mysql") && !c.DisableEscaping { - password = url.PathEscape(c.Password) + + // validate that self-managed and username/password are mutually exclusive + if c.SelfManaged { + if (c.Username != "" || c.Password != "") || !isTemplatedURL { + return nil, fmt.Errorf("cannot use both self-managed and vault-managed workflows") + } + } + + var username string + var password string + if !c.SelfManaged { + // Default behavior + username = c.Username + password = c.Password + + // Don't escape special characters for MySQL password + // Also don't escape special characters for the username and password if + // the disable_escaping parameter is set to true + if !c.DisableEscaping { + username = url.PathEscape(c.Username) + } + if (c.Type != "mysql") && !c.DisableEscaping { + password = url.PathEscape(c.Password) + } + + } else { + // this is added to make middleware happy + // these placeholders are replaced when we make the actual static connection + username = defaultSelfManagedUsername + password = defaultSelfManagedPassword } // QueryHelper doesn't do any SQL escaping, but if it starts to do so @@ -107,17 +176,63 @@ func (c *SQLConnectionProducer) Init(ctx context.Context, conf map[string]interf return nil, errwrap.Wrapf("invalid max_connection_lifetime: {{err}}", err) } + if ok := ValidateAuthType(c.AuthType); !ok { + return nil, fmt.Errorf("invalid auth_type: %s", c.AuthType) + } + + if c.AuthType == AuthTypeGCPIAM { + c.cloudDriverName, err = uuid.GenerateUUID() + if err != nil { + return nil, fmt.Errorf("unable to generate UUID for IAM configuration: %w", err) + } + + // for _most_ sql databases, the driver itself contains no state. In the case of google's cloudsql drivers, + // however, the driver might store a credentials file, in which case the state stored by the driver is in + // fact critical to the proper function of the connection. So it needs to be registered here inside the + // ConnectionProducer init. + dialerCleanup, err := c.registerDrivers(c.cloudDriverName, c.ServiceAccountJSON, c.usePrivateIP) + if err != nil { + return nil, err + } + + c.cloudDialerCleanup = dialerCleanup + } + + if c.SelfManaged && c.staticAccountsCache == nil { + logger := log.New(&log.LoggerOptions{ + Level: log.Trace, + }) + + closer := func(key interface{}, value interface{}) { + logger.Trace(fmt.Sprintf("Evicting key %s from static LRU cache", key)) + conn, ok := value.(*sql.DB) + if !ok { + logger.Error(fmt.Sprintf("error retrieving connection %s from static LRU cache, err=%s", key, err)) + } + + if err := conn.Close(); err != nil { + logger.Error(fmt.Sprintf("error closing connection for %s, err=%s", key, err)) + } + logger.Trace(fmt.Sprintf("closed DB connection for %s", key)) + } + c.staticAccountsCache, err = cacheutil.NewCache(defaultStaticCacheSize, closer) + if err != nil { + return nil, fmt.Errorf("error initializing static account cache: %s", err) + } + } + // Set initialized to true at this point since all fields are set, // and the connection can be established at a later time. c.Initialized = true - if verifyConnection { + // only verify if not self-managed + if verifyConnection && !c.SelfManaged { if _, err := c.Connection(ctx); err != nil { return nil, errwrap.Wrapf("error verifying connection: {{err}}", err) } if err := c.db.PingContext(ctx); err != nil { - return nil, errwrap.Wrapf("error verifying connection: {{err}}", err) + return nil, errwrap.Wrapf("error verifying connection: ping failed: {{err}}", err) } } @@ -137,36 +252,66 @@ func (c *SQLConnectionProducer) Connection(ctx context.Context) (interface{}, er // If the ping was unsuccessful, close it and ignore errors as we'll be // reestablishing anyways c.db.Close() + + // if IAM authentication is enabled + // ensure open dialer is also closed + if c.AuthType == AuthTypeGCPIAM { + if c.cloudDialerCleanup != nil { + c.cloudDialerCleanup() + } + } } - // For mssql backend, switch to sqlserver instead - dbType := c.Type - if c.Type == "mssql" { - dbType = "sqlserver" + // default non-IAM behavior + driverName := c.Type + + if c.AuthType == AuthTypeGCPIAM { + driverName = c.cloudDriverName + } else if c.Type == "mssql" { + // For mssql backend, switch to sqlserver instead + driverName = "sqlserver" } // Otherwise, attempt to make connection - conn := c.ConnectionURL + // Apply PostgreSQL specific settings if needed + conn := applyPostgresSettings(c.ConnectionURL) - // PostgreSQL specific settings - if strings.HasPrefix(conn, "postgres://") || strings.HasPrefix(conn, "postgresql://") { - // Ensure timezone is set to UTC for all the connections - if strings.Contains(conn, "?") { - conn += "&timezone=UTC" - } else { - conn += "?timezone=UTC" + if driverName == dbTypePostgres && c.TLSConfig != nil { + config, err := pgx.ParseConfig(conn) + if err != nil { + return nil, fmt.Errorf("failed to parse config: %w", err) + } + if config.TLSConfig == nil { + // handle sslmode=disable + config.TLSConfig = &tls.Config{} } - // Ensure a reasonable application_name is set - if !strings.Contains(conn, "application_name") { - conn += "&application_name=vault" + config.TLSConfig.RootCAs = c.TLSConfig.RootCAs + config.TLSConfig.ClientCAs = c.TLSConfig.ClientCAs + config.TLSConfig.Certificates = c.TLSConfig.Certificates + + // Ensure there are no stale fallbacks when manually setting TLSConfig + for _, fallback := range config.Fallbacks { + fallback.TLSConfig = config.TLSConfig } - } - var err error - c.db, err = sql.Open(dbType, conn) - if err != nil { - return nil, err + c.db = stdlib.OpenDB(*config) + if err != nil { + return nil, fmt.Errorf("failed to open connection: %w", err) + } + } else if driverName == dbTypePostgres && os.Getenv(pluginutil.PluginUsePostgresSSLInline) != "" { + var err error + // TODO: remove this deprecated function call in a future SDK version + c.db, err = openPostgres(driverName, conn) + if err != nil { + return nil, fmt.Errorf("failed to open connection: %w", err) + } + } else { + var err error + c.db, err = sql.Open(driverName, conn) + if err != nil { + return nil, fmt.Errorf("failed to open connection: %w", err) + } } // Set some connection pool settings. We don't need much of this, @@ -192,6 +337,13 @@ func (c *SQLConnectionProducer) Close() error { if c.db != nil { c.db.Close() + + // cleanup IAM dialer if it exists + if c.AuthType == AuthTypeGCPIAM { + if c.cloudDialerCleanup != nil { + c.cloudDialerCleanup() + } + } } c.db = nil @@ -208,3 +360,32 @@ func (c *SQLConnectionProducer) Close() error { func (c *SQLConnectionProducer) SetCredentials(ctx context.Context, statements dbplugin.Statements, staticUser dbplugin.StaticUserConfig) (username, password string, err error) { return "", "", dbutil.Unimplemented() } + +func applyPostgresSettings(connURL string) string { + res := connURL + if strings.HasPrefix(res, "postgres://") || strings.HasPrefix(res, "postgresql://") { + // Ensure timezone is set to UTC for all the connections + if strings.Contains(res, "?") { + res += "&timezone=UTC" + } else { + res += "?timezone=UTC" + } + + // Ensure a reasonable application_name is set + if !strings.Contains(res, "application_name") { + res += "&application_name=vault" + } + } + + return res +} + +var configurableAuthTypes = map[string]bool{ + AuthTypeUsernamePassword: true, + AuthTypeCert: true, + AuthTypeGCPIAM: true, +} + +func ValidateAuthType(authType string) bool { + return configurableAuthTypes[authType] +} diff --git a/sdk/database/helper/connutil/sql_stubs_oss.go b/sdk/database/helper/connutil/sql_stubs_oss.go new file mode 100644 index 000000000000..b8ab85bec412 --- /dev/null +++ b/sdk/database/helper/connutil/sql_stubs_oss.go @@ -0,0 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !enterprise + +package connutil + +import ( + "context" + "database/sql" + "errors" +) + +func (c *SQLConnectionProducer) StaticConnection(_ context.Context, _, _ string) (*sql.DB, error) { + return nil, errors.New("self-managed static roles only available in Vault Enterprise") +} diff --git a/sdk/database/helper/credsutil/caseop_enumer.go b/sdk/database/helper/credsutil/caseop_enumer.go new file mode 100644 index 000000000000..3a96c63222e9 --- /dev/null +++ b/sdk/database/helper/credsutil/caseop_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer -type=CaseOp -transform=snake"; DO NOT EDIT. + +package credsutil + +import ( + "fmt" +) + +const _CaseOpName = "keep_caseuppercaselowercase" + +var _CaseOpIndex = [...]uint8{0, 9, 18, 27} + +func (i CaseOp) String() string { + if i < 0 || i >= CaseOp(len(_CaseOpIndex)-1) { + return fmt.Sprintf("CaseOp(%d)", i) + } + return _CaseOpName[_CaseOpIndex[i]:_CaseOpIndex[i+1]] +} + +var _CaseOpValues = []CaseOp{0, 1, 2} + +var _CaseOpNameToValueMap = map[string]CaseOp{ + _CaseOpName[0:9]: 0, + _CaseOpName[9:18]: 1, + _CaseOpName[18:27]: 2, +} + +// CaseOpString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func CaseOpString(s string) (CaseOp, error) { + if val, ok := _CaseOpNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to CaseOp values", s) +} + +// CaseOpValues returns all values of the enum +func CaseOpValues() []CaseOp { + return _CaseOpValues +} + +// IsACaseOp returns "true" if the value is listed in the enum definition. "false" otherwise +func (i CaseOp) IsACaseOp() bool { + for _, v := range _CaseOpValues { + if i == v { + return true + } + } + return false +} diff --git a/sdk/database/helper/credsutil/usernames.go b/sdk/database/helper/credsutil/usernames.go index 962208ac9a66..4ea4491c4f1d 100644 --- a/sdk/database/helper/credsutil/usernames.go +++ b/sdk/database/helper/credsutil/usernames.go @@ -9,6 +9,7 @@ import ( "time" ) +//go:generate enumer -type=CaseOp -transform=snake type CaseOp int const ( diff --git a/sdk/framework/backend.go b/sdk/framework/backend.go index c0527addf999..5ebf9deb25de 100644 --- a/sdk/framework/backend.go +++ b/sdk/framework/backend.go @@ -18,11 +18,10 @@ import ( "sync" "time" - "github.com/hashicorp/go-kms-wrapping/entropy/v2" - jsonpatch "github.com/evanphx/json-patch/v5" "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-kms-wrapping/entropy/v2" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/sdk/helper/consts" @@ -60,6 +59,11 @@ type Backend struct { // InitializeFunc is the callback, which if set, will be invoked via // Initialize() just after a plugin has been mounted. + // + // Note that storage writes should only occur on the active instance within a + // primary cluster or local mount on a performance secondary. If your InitializeFunc + // writes to storage, you can use the backend's WriteSafeReplicationState() method + // to prevent it from attempting to write on a Vault instance with read-only storage. InitializeFunc InitializeFunc // PeriodicFunc is the callback, which if set, will be invoked when the @@ -70,6 +74,11 @@ type Backend struct { // entries in backend's storage, while the backend is still being used. // (Note the difference between this action and `Clean`, which is // invoked just before the backend is unmounted). + // + // Note that storage writes should only occur on the active instance within a + // primary cluster or local mount on a performance secondary. If your PeriodicFunc + // writes to storage, you can use the backend's WriteSafeReplicationState() method + // to prevent it from attempting to write on a Vault instance with read-only storage. PeriodicFunc periodicFunc // WALRollback is called when a WAL entry (see wal.go) has to be rolled @@ -211,7 +220,7 @@ func (b *Backend) HandleRequest(ctx context.Context, req *logical.Request) (*log // If the path is empty and it is a help operation, handle that. if req.Path == "" && req.Operation == logical.HelpOperation { - return b.handleRootHelp(req) + return b.handleRootHelp(ctx, req) } // Find the matching route @@ -466,12 +475,38 @@ func (b *Backend) Secret(k string) *Secret { return nil } +// WriteSafeReplicationState returns true if this backend instance is capable of writing +// to storage without receiving an ErrReadOnly error. The active instance in a primary +// cluster or a local mount on a performance secondary is capable of writing to storage. +func (b *Backend) WriteSafeReplicationState() bool { + replicationState := b.System().ReplicationState() + return (b.System().LocalMount() || !replicationState.HasState(consts.ReplicationPerformanceSecondary)) && + !replicationState.HasState(consts.ReplicationDRSecondary) && + !replicationState.HasState(consts.ReplicationPerformanceStandby) +} + +// init runs as a sync.Once function from any plugin entry point which needs to route requests by paths. +// It may panic if a coding error in the plugin is detected. +// For builtin plugins, this is unit tested in helper/builtinplugins/builtinplugins_test.go. +// For other plugins, any unit test that attempts to perform any request to the plugin will exercise these checks. func (b *Backend) init() { b.pathsRe = make([]*regexp.Regexp, len(b.Paths)) for i, p := range b.Paths { + // Detect the coding error of failing to initialise Pattern if len(p.Pattern) == 0 { panic(fmt.Sprintf("Routing pattern cannot be blank")) } + + // Detect the coding error of attempting to define a CreateOperation without defining an ExistenceCheck + if p.ExistenceCheck == nil { + if _, ok := p.Operations[logical.CreateOperation]; ok { + panic(fmt.Sprintf("Pattern %v defines a CreateOperation but no ExistenceCheck", p.Pattern)) + } + if _, ok := p.Callbacks[logical.CreateOperation]; ok { + panic(fmt.Sprintf("Pattern %v defines a CreateOperation but no ExistenceCheck", p.Pattern)) + } + } + // Automatically anchor the pattern if p.Pattern[0] != '^' { p.Pattern = "^" + p.Pattern @@ -479,6 +514,8 @@ func (b *Backend) init() { if p.Pattern[len(p.Pattern)-1] != '$' { p.Pattern = p.Pattern + "$" } + + // Detect the coding error of an invalid Pattern b.pathsRe[i] = regexp.MustCompile(p.Pattern) } } @@ -511,7 +548,7 @@ func (b *Backend) route(path string) (*Path, map[string]string) { return nil, nil } -func (b *Backend) handleRootHelp(req *logical.Request) (*logical.Response, error) { +func (b *Backend) handleRootHelp(ctx context.Context, req *logical.Request) (*logical.Response, error) { // Build a mapping of the paths and get the paths alphabetized to // make the output prettier. pathsMap := make(map[string]*Path) @@ -559,6 +596,10 @@ func (b *Backend) handleRootHelp(req *logical.Request) (*logical.Response, error vaultVersion = env.VaultVersion } + redactVersion, _, _, _ := logical.CtxRedactionSettingsValue(ctx) + if redactVersion { + vaultVersion = "" + } doc := NewOASDocument(vaultVersion) if err := documentPaths(b, requestResponsePrefix, doc); err != nil { b.Logger().Warn("error generating OpenAPI", "error", err) @@ -692,11 +733,13 @@ func (b *Backend) handleWALRollback(ctx context.Context, req *logical.Request) ( return logical.ErrorResponse(merr.Error()), nil } +// SendEvent is used to send events through the underlying EventSender. +// It returns ErrNoEvents if the events system has not been configured or enabled. func (b *Backend) SendEvent(ctx context.Context, eventType logical.EventType, event *logical.EventData) error { if b.events == nil { return ErrNoEvents } - return b.events.Send(ctx, eventType, event) + return b.events.SendEvent(ctx, eventType, event) } // FieldSchema is a basic schema to describe the format of a path field. @@ -710,16 +753,35 @@ type FieldSchema struct { Required bool Deprecated bool - // Query indicates this field will be sent as a query parameter: + // Query indicates this field will be expected as a query parameter as part + // of ReadOperation, ListOperation or DeleteOperation requests: // // /v1/foo/bar?some_param=some_value // - // It doesn't affect handling of the value, but may be used for documentation. + // The field will still be expected as a request body parameter for + // CreateOperation or UpdateOperation requests! + // + // To put that another way, you should set Query for any non-path parameter + // you want to use in a read/list/delete operation. While setting the Query + // field to `true` is not required in such cases (Vault will expose the + // query parameters to you via req.Data regardless), it is highly + // recommended to do so in order to improve the quality of the generated + // OpenAPI documentation (as well as any code generation based on it), which + // will otherwise incorrectly omit the parameter. + // + // The reason for this design is historical: back at the start of 2018, + // query parameters were not mapped to fields at all, and it was implicit + // that all non-path fields were exclusively for the use of create/update + // operations. Since then, support for query parameters has gradually been + // extended to read, delete and list operations - and now this declarative + // metadata is needed, so that the OpenAPI generator can know which + // parameters are actually referred to, from within the code of + // read/delete/list operation handler functions. Query bool // AllowedValues is an optional list of permitted values for this field. // This constraint is not (yet) enforced by the framework, but the list is - // output as part of OpenAPI generation and may effect documentation and + // output as part of OpenAPI generation and may affect documentation and // dynamic UI generation. AllowedValues []interface{} diff --git a/sdk/framework/backend_test.go b/sdk/framework/backend_test.go index 0b7a2054373d..dba5b0c0a3da 100644 --- a/sdk/framework/backend_test.go +++ b/sdk/framework/backend_test.go @@ -14,7 +14,6 @@ import ( "time" "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" "github.com/stretchr/testify/require" diff --git a/sdk/framework/field_type.go b/sdk/framework/field_type.go index d62ffe6359bb..ee07b6afe866 100644 --- a/sdk/framework/field_type.go +++ b/sdk/framework/field_type.go @@ -61,7 +61,7 @@ const ( // TypeFloat parses both float32 and float64 values TypeFloat - // TypeTime represents absolute time. It accepts an RFC3999-formatted + // TypeTime represents absolute time. It accepts an RFC3339-formatted // string (with or without fractional seconds), or an epoch timestamp // formatted as a string or a number. The resulting time.Time // is converted to UTC. diff --git a/sdk/framework/openapi.go b/sdk/framework/openapi.go index d69e0b83e69d..82e7f5fb6441 100644 --- a/sdk/framework/openapi.go +++ b/sdk/framework/openapi.go @@ -107,13 +107,12 @@ type OASLicense struct { } type OASPathItem struct { - Description string `json:"description,omitempty"` - Parameters []OASParameter `json:"parameters,omitempty"` - Sudo bool `json:"x-vault-sudo,omitempty" mapstructure:"x-vault-sudo"` - Unauthenticated bool `json:"x-vault-unauthenticated,omitempty" mapstructure:"x-vault-unauthenticated"` - CreateSupported bool `json:"x-vault-createSupported,omitempty" mapstructure:"x-vault-createSupported"` - DisplayNavigation bool `json:"x-vault-displayNavigation,omitempty" mapstructure:"x-vault-displayNavigation"` - DisplayAttrs *DisplayAttributes `json:"x-vault-displayAttrs,omitempty" mapstructure:"x-vault-displayAttrs"` + Description string `json:"description,omitempty"` + Parameters []OASParameter `json:"parameters,omitempty"` + Sudo bool `json:"x-vault-sudo,omitempty" mapstructure:"x-vault-sudo"` + Unauthenticated bool `json:"x-vault-unauthenticated,omitempty" mapstructure:"x-vault-unauthenticated"` + CreateSupported bool `json:"x-vault-createSupported,omitempty" mapstructure:"x-vault-createSupported"` + DisplayAttrs *DisplayAttributes `json:"x-vault-displayAttrs,omitempty" mapstructure:"x-vault-displayAttrs"` Get *OASOperation `json:"get,omitempty"` Post *OASOperation `json:"post,omitempty"` @@ -165,6 +164,8 @@ type OASSchema struct { Description string `json:"description,omitempty"` Properties map[string]*OASSchema `json:"properties,omitempty"` + AdditionalProperties interface{} `json:"additionalProperties,omitempty"` + // Required is a list of keys in Properties that are required to be present. This is a different // approach than OASParameter (unfortunately), but is how JSONSchema handles 'required'. Required []string `json:"required,omitempty"` @@ -196,6 +197,29 @@ var OASStdRespNoContent = &OASResponse{ Description: "empty body", } +var OASStdRespListOK = &OASResponse{ + Description: "OK", + Content: OASContent{ + "application/json": &OASMediaTypeObject{ + Schema: &OASSchema{ + Ref: "#/components/schemas/StandardListResponse", + }, + }, + }, +} + +var OASStdSchemaStandardListResponse = &OASSchema{ + Type: "object", + Properties: map[string]*OASSchema{ + "keys": { + Type: "array", + Items: &OASSchema{ + Type: "string", + }, + }, + }, +} + // Regex for handling fields in paths, and string cleanup. // Predefined here to avoid substantial recompilation. @@ -208,7 +232,7 @@ var ( // documentPaths parses all paths in a framework.Backend into OpenAPI paths. func documentPaths(backend *Backend, requestResponsePrefix string, doc *OASDocument) error { for _, p := range backend.Paths { - if err := documentPath(p, backend.SpecialPaths(), requestResponsePrefix, backend.BackendType, doc); err != nil { + if err := documentPath(p, backend, requestResponsePrefix, doc); err != nil { return err } } @@ -217,18 +241,18 @@ func documentPaths(backend *Backend, requestResponsePrefix string, doc *OASDocum } // documentPath parses a framework.Path into one or more OpenAPI paths. -func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix string, backendType logical.BackendType, doc *OASDocument) error { +func documentPath(p *Path, backend *Backend, requestResponsePrefix string, doc *OASDocument) error { var sudoPaths []string var unauthPaths []string - if specialPaths != nil { - sudoPaths = specialPaths.Root - unauthPaths = specialPaths.Unauthenticated + if backend.PathsSpecial != nil { + sudoPaths = backend.PathsSpecial.Root + unauthPaths = backend.PathsSpecial.Unauthenticated } // Convert optional parameters into distinct patterns to be processed independently. forceUnpublished := false - paths, err := expandPattern(p.Pattern) + paths, captures, err := expandPattern(p.Pattern) if err != nil { if errors.Is(err, errUnsupportableRegexpOperationForOpenAPI) { // Pattern cannot be transformed into sensible OpenAPI paths. In this case, we override the later @@ -244,7 +268,7 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st } } - for _, path := range paths { + for pathIndex, path := range paths { // Construct a top level PathItem which will be populated as the path is processed. pi := OASPathItem{ Description: cleanString(p.HelpSynopsis), @@ -252,7 +276,7 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st pi.Sudo = specialPathMatch(path, sudoPaths) pi.Unauthenticated = specialPathMatch(path, unauthPaths) - pi.DisplayAttrs = p.DisplayAttrs + pi.DisplayAttrs = withoutOperationHints(p.DisplayAttrs) // If the newer style Operations map isn't defined, create one from the legacy fields. operations := p.Operations @@ -269,34 +293,22 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st // Process path and header parameters, which are common to all operations. // Body fields will be added to individual operations. - pathFields, bodyFields := splitFields(p.Fields, path) + pathFields, queryFields, bodyFields := splitFields(p.Fields, path, captures) for name, field := range pathFields { - location := "path" - required := true - - if field == nil { - continue - } - - if field.Query { - location = "query" - required = false - } - t := convertType(field.Type) p := OASParameter{ Name: name, Description: cleanString(field.Description), - In: location, + In: "path", Schema: &OASSchema{ Type: t.baseType, Pattern: t.pattern, Enum: field.AllowedValues, Default: field.Default, - DisplayAttrs: field.DisplayAttrs, + DisplayAttrs: withoutOperationHints(field.DisplayAttrs), }, - Required: required, + Required: true, Deprecated: field.Deprecated, } pi.Parameters = append(pi.Parameters, p) @@ -304,11 +316,12 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st // Sort parameters for a stable output sort.Slice(pi.Parameters, func(i, j int) bool { - return strings.ToLower(pi.Parameters[i].Name) < strings.ToLower(pi.Parameters[j].Name) + return pi.Parameters[i].Name < pi.Parameters[j].Name }) // Process each supported operation by building up an Operation object // with descriptions, properties and examples from the framework.Path data. + var listOperation *OASOperation for opType, opHandler := range operations { props := opHandler.Properties() if props.Unpublished || forceUnpublished { @@ -324,19 +337,28 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st } } - // If both List and Read are defined, only process Read. - if opType == logical.ListOperation && operations[logical.ReadOperation] != nil { - continue - } - op := NewOASOperation() + operationID := constructOperationID( + path, + pathIndex, + p.DisplayAttrs, + opType, + props.DisplayAttrs, + requestResponsePrefix, + ) + op.Summary = props.Summary op.Description = props.Description op.Deprecated = props.Deprecated + op.OperationID = operationID - // Add any fields not present in the path as body parameters for POST. - if opType == logical.CreateOperation || opType == logical.UpdateOperation { + switch opType { + // For the operation types which map to POST/PUT methods, and so allow for request body parameters, + // prepare the request body definition + case logical.CreateOperation: + fallthrough + case logical.UpdateOperation: s := &OASSchema{ Type: "object", Properties: make(map[string]*OASSchema), @@ -350,38 +372,41 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st continue } - openapiField := convertType(field.Type) - if field.Required { - s.Required = append(s.Required, name) - } + addFieldToOASSchema(s, name, field) + } - p := OASSchema{ - Type: openapiField.baseType, - Description: cleanString(field.Description), - Format: openapiField.format, - Pattern: openapiField.pattern, - Enum: field.AllowedValues, - Default: field.Default, - Deprecated: field.Deprecated, - DisplayAttrs: field.DisplayAttrs, - } - if openapiField.baseType == "array" { - p.Items = &OASSchema{ - Type: openapiField.items, - } - } - s.Properties[name] = &p + // Contrary to what one might guess, fields marked with "Query: true" are only query fields when the + // request method is one which does not allow for a request body - they are still body fields when + // dealing with a POST/PUT request. + for name, field := range queryFields { + addFieldToOASSchema(s, name, field) } + // Make the ordering deterministic, so that the generated OpenAPI spec document, observed over several + // versions, doesn't contain spurious non-semantic changes. + sort.Strings(s.Required) + // If examples were given, use the first one as the sample // of this schema. if len(props.Examples) > 0 { s.Example = props.Examples[0].Data } + // TakesArbitraryInput is a case like writing to: + // - sys/wrapping/wrap + // - kv-v1/{path} + // - cubbyhole/{path} + // where the entire request body is an arbitrary JSON object used directly as input. + if p.TakesArbitraryInput { + // Whilst the default value of additionalProperties is true according to the JSON Schema standard, + // making this explicit helps communicate this to humans, and also tools such as + // https://openapi-generator.tech/ which treat it as defaulting to false. + s.AdditionalProperties = true + } + // Set the final request body. Only JSON request data is supported. - if len(s.Properties) > 0 || s.Example != nil { - requestName := constructRequestResponseName(path, requestResponsePrefix, "Request") + if len(s.Properties) > 0 { + requestName := hyphenatedToTitleCase(operationID) + "Request" doc.Components.Schemas[requestName] = s op.RequestBody = &OASRequestBody{ Required: true, @@ -391,12 +416,24 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st }, }, } + } else if p.TakesArbitraryInput { + // When there are no properties, the schema is trivial enough that it makes more sense to write it + // inline, rather than as a named component. + op.RequestBody = &OASRequestBody{ + Required: true, + Content: OASContent{ + "application/json": &OASMediaTypeObject{ + Schema: s, + }, + }, + } } - } - // LIST is represented as GET with a `list` query parameter. - if opType == logical.ListOperation { - // Only accepts List (due to the above skipping of ListOperations that also have ReadOperations) + // For the operation types which map to HTTP methods without a request body, populate query parameters + case logical.ListOperation: + // LIST is represented as GET with a `list` query parameter. Code later on in this function will assign + // list operations to a path with an extra trailing slash, ensuring they do not collide with read + // operations. op.Parameters = append(op.Parameters, OASParameter{ Name: "list", Description: "Must be set to `true`", @@ -404,19 +441,37 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st In: "query", Schema: &OASSchema{Type: "string", Enum: []interface{}{"true"}}, }) - } else if opType == logical.ReadOperation && operations[logical.ListOperation] != nil { - // Accepts both Read and List - op.Parameters = append(op.Parameters, OASParameter{ - Name: "list", - Description: "Return a list if `true`", - In: "query", - Schema: &OASSchema{Type: "string"}, + fallthrough + case logical.DeleteOperation: + fallthrough + case logical.ReadOperation: + for name, field := range queryFields { + t := convertType(field.Type) + p := OASParameter{ + Name: name, + Description: cleanString(field.Description), + In: "query", + Schema: &OASSchema{ + Type: t.baseType, + Pattern: t.pattern, + Enum: field.AllowedValues, + Default: field.Default, + DisplayAttrs: withoutOperationHints(field.DisplayAttrs), + }, + Deprecated: field.Deprecated, + } + op.Parameters = append(op.Parameters, p) + } + + // Sort parameters for a stable output + sort.Slice(op.Parameters, func(i, j int) bool { + return op.Parameters[i].Name < op.Parameters[j].Name }) } // Add tags based on backend type var tags []string - switch backendType { + switch backend.BackendType { case logical.TypeLogical: tags = []string{"secrets"} case logical.TypeCredential: @@ -429,6 +484,9 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st if len(props.Responses) == 0 { if opType == logical.DeleteOperation { op.Responses[204] = OASStdRespNoContent + } else if opType == logical.ListOperation { + op.Responses[200] = OASStdRespListOK + doc.Components.Schemas["StandardListResponse"] = OASStdSchemaStandardListResponse } else { op.Responses[200] = OASStdRespOK } @@ -477,7 +535,7 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st Enum: field.AllowedValues, Default: field.Default, Deprecated: field.Deprecated, - DisplayAttrs: field.DisplayAttrs, + DisplayAttrs: withoutOperationHints(field.DisplayAttrs), } if openapiField.baseType == "array" { p.Items = &OASSchema{ @@ -488,7 +546,7 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st } if len(resp.Fields) != 0 { - responseName := constructRequestResponseName(path, requestResponsePrefix, "Response") + responseName := hyphenatedToTitleCase(operationID) + "Response" doc.Components.Schemas[responseName] = responseSchema content = OASContent{ "application/json": &OASMediaTypeObject{ @@ -507,44 +565,107 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st switch opType { case logical.CreateOperation, logical.UpdateOperation: pi.Post = op - case logical.ReadOperation, logical.ListOperation: + case logical.ReadOperation: pi.Get = op case logical.DeleteOperation: pi.Delete = op + case logical.ListOperation: + listOperation = op } } - doc.Paths["/"+path] = &pi - } + // The conventions enforced by the Vault HTTP routing code make it impossible to match a path with a trailing + // slash to anything other than a ListOperation. Catch mistakes in path definition, to enforce that if both of + // the two following blocks of code (non-list, and list) write an OpenAPI path to the output document, then the + // first one will definitely not have a trailing slash. + originalPathHasTrailingSlash := strings.HasSuffix(path, "/") + if originalPathHasTrailingSlash && (pi.Get != nil || pi.Post != nil || pi.Delete != nil) { + backend.Logger().Warn( + "OpenAPI spec generation: discarding impossible-to-invoke non-list operations from path with "+ + "required trailing slash; this is a bug in the backend code", "path", path) + pi.Get = nil + pi.Post = nil + pi.Delete = nil + } - return nil -} + // Write the regular, non-list, OpenAPI path to the OpenAPI document, UNLESS we generated a ListOperation, and + // NO OTHER operation types. In that fairly common case (there are lots of list-only endpoints), we avoid + // writing a redundant OpenAPI path for (e.g.) "auth/token/accessors" with no operations, only to then write + // one for "auth/token/accessors/" immediately below. + // + // On the other hand, we do still write the OpenAPI path here if we generated ZERO operation types - this serves + // to provide documentation to a human that an endpoint exists, even if it has no invokable OpenAPI operations. + // Examples of this include kv-v2's ".*" endpoint (regex cannot be translated to OpenAPI parameters), and the + // auth/oci/login endpoint (implements ResolveRoleOperation only, only callable from inside Vault). + if listOperation == nil || pi.Get != nil || pi.Post != nil || pi.Delete != nil { + openAPIPath := "/" + path + if doc.Paths[openAPIPath] != nil { + backend.Logger().Warn( + "OpenAPI spec generation: multiple framework.Path instances generated the same path; "+ + "last processed wins", "path", openAPIPath) + } + doc.Paths[openAPIPath] = &pi + } -// constructRequestResponseName joins the given path with prefix & suffix into -// a CamelCase request or response name. -// -// For example, path=/config/lease/{name}, prefix="secret", suffix="request" -// will result in "SecretConfigLeaseRequest" -func constructRequestResponseName(path, prefix, suffix string) string { - var b strings.Builder + // If there is a ListOperation, write it to a separate OpenAPI path in the document. + if listOperation != nil { + // Append a slash here to disambiguate from the path written immediately above. + // However, if the path already contains a trailing slash, we want to avoid doubling it, and it is + // guaranteed (through the interaction of logic in the last two blocks) that the block immediately above + // will NOT have written a path to the OpenAPI document. + if !originalPathHasTrailingSlash { + path += "/" + } + + listPathItem := OASPathItem{ + Description: pi.Description, + Parameters: pi.Parameters, + DisplayAttrs: pi.DisplayAttrs, - title := cases.Title(language.English) + // Since the path may now have an extra slash on the end, we need to recalculate the special path + // matches, as the sudo or unauthenticated status may be changed as a result! + Sudo: specialPathMatch(path, sudoPaths), + Unauthenticated: specialPathMatch(path, unauthPaths), - b.WriteString(title.String(prefix)) + Get: listOperation, + } - // split the path by / _ - separators - for _, token := range strings.FieldsFunc(path, func(r rune) bool { - return r == '/' || r == '_' || r == '-' - }) { - // exclude request fields - if !strings.ContainsAny(token, "{}") { - b.WriteString(title.String(token)) + openAPIPath := "/" + path + if doc.Paths[openAPIPath] != nil { + backend.Logger().Warn( + "OpenAPI spec generation: multiple framework.Path instances generated the same path; "+ + "last processed wins", "path", openAPIPath) + } + doc.Paths[openAPIPath] = &listPathItem } } - b.WriteString(suffix) + return nil +} + +func addFieldToOASSchema(s *OASSchema, name string, field *FieldSchema) { + openapiField := convertType(field.Type) + if field.Required { + s.Required = append(s.Required, name) + } + + p := OASSchema{ + Type: openapiField.baseType, + Description: cleanString(field.Description), + Format: openapiField.format, + Pattern: openapiField.pattern, + Enum: field.AllowedValues, + Default: field.Default, + Deprecated: field.Deprecated, + DisplayAttrs: withoutOperationHints(field.DisplayAttrs), + } + if openapiField.baseType == "array" { + p.Items = &OASSchema{ + Type: openapiField.items, + } + } - return b.String() + s.Properties[name] = &p } // specialPathMatch checks whether the given path matches one of the special @@ -599,9 +720,121 @@ func specialPathMatch(path string, specialPaths []string) bool { return false } +// constructOperationID joins the given inputs into a hyphen-separated +// lower-case operation id, which is also used as a prefix for request and +// response names. +// +// The OperationPrefix / -Verb / -Suffix found in display attributes will be +// used, if provided. Otherwise, the function falls back to using the path and +// the operation. +// +// Examples of generated operation identifiers: +// - kvv2-write +// - kvv2-read +// - google-cloud-login +// - google-cloud-write-role +func constructOperationID( + path string, + pathIndex int, + pathAttributes *DisplayAttributes, + operation logical.Operation, + operationAttributes *DisplayAttributes, + defaultPrefix string, +) string { + var ( + prefix string + verb string + suffix string + ) + + if operationAttributes != nil { + prefix = operationAttributes.OperationPrefix + verb = operationAttributes.OperationVerb + suffix = operationAttributes.OperationSuffix + } + + if pathAttributes != nil { + if prefix == "" { + prefix = pathAttributes.OperationPrefix + } + if verb == "" { + verb = pathAttributes.OperationVerb + } + if suffix == "" { + suffix = pathAttributes.OperationSuffix + } + } + + // A single suffix string can contain multiple pipe-delimited strings. To + // determine the actual suffix, we attempt to match it by the index of the + // paths returned from `expandPattern(...)`. For example: + // + // pki/ + // Pattern: "keys/generate/(internal|exported|kms)", + // DisplayAttrs: { + // ... + // OperationSuffix: "internal-key|exported-key|kms-key", + // }, + // + // will expand into three paths and corresponding suffixes: + // + // path 0: "keys/generate/internal" suffix: internal-key + // path 1: "keys/generate/exported" suffix: exported-key + // path 2: "keys/generate/kms" suffix: kms-key + // + pathIndexOutOfRange := false + + if suffixes := strings.Split(suffix, "|"); len(suffixes) > 1 || pathIndex > 0 { + // if the index is out of bounds, fall back to the old logic + if pathIndex >= len(suffixes) { + suffix = "" + pathIndexOutOfRange = true + } else { + suffix = suffixes[pathIndex] + } + } + + // a helper that hyphenates & lower-cases the slice except the empty elements + toLowerHyphenate := func(parts []string) string { + filtered := make([]string, 0, len(parts)) + for _, e := range parts { + if e != "" { + filtered = append(filtered, e) + } + } + return strings.ToLower(strings.Join(filtered, "-")) + } + + // fall back to using the path + operation to construct the operation id + var ( + needPrefix = prefix == "" && verb == "" + needVerb = verb == "" + needSuffix = suffix == "" && (verb == "" || pathIndexOutOfRange) + ) + + if needPrefix { + prefix = defaultPrefix + } + + if needVerb { + if operation == logical.UpdateOperation { + verb = "write" + } else { + verb = string(operation) + } + } + + if needSuffix { + suffix = toLowerHyphenate(nonWordRe.Split(path, -1)) + } + + return toLowerHyphenate([]string{prefix, verb, suffix}) +} + // expandPattern expands a regex pattern by generating permutations of any optional parameters -// and changing named parameters into their {openapi} equivalents. -func expandPattern(pattern string) ([]string, error) { +// and changing named parameters into their {openapi} equivalents. It also returns the names of all capturing groups +// observed in the pattern. +func expandPattern(pattern string) (paths []string, captures map[string]struct{}, err error) { // Happily, the Go regexp library exposes its underlying "parse to AST" functionality, so we can rely on that to do // the hard work of interpreting the regexp syntax. rx, err := syntax.Parse(pattern, syntax.Perl) @@ -611,12 +844,12 @@ func expandPattern(pattern string) ([]string, error) { panic(err) } - paths, err := collectPathsFromRegexpAST(rx) + paths, captures, err = collectPathsFromRegexpAST(rx) if err != nil { - return nil, err + return nil, nil, err } - return paths, nil + return paths, captures, nil } type pathCollector struct { @@ -637,23 +870,28 @@ type pathCollector struct { // // Each named capture group - i.e. (?Psomething here) - is replaced with an OpenAPI parameter - i.e. {name} - and // the subtree of regexp AST inside the parameter is completely skipped. -func collectPathsFromRegexpAST(rx *syntax.Regexp) ([]string, error) { - pathCollectors, err := collectPathsFromRegexpASTInternal(rx, []*pathCollector{{}}) +func collectPathsFromRegexpAST(rx *syntax.Regexp) (paths []string, captures map[string]struct{}, err error) { + captures = make(map[string]struct{}) + pathCollectors, err := collectPathsFromRegexpASTInternal(rx, []*pathCollector{{}}, captures) if err != nil { - return nil, err + return nil, nil, err } - paths := make([]string, 0, len(pathCollectors)) + paths = make([]string, 0, len(pathCollectors)) for _, collector := range pathCollectors { if collector.conditionalSlashAppendedAtLength != collector.Len() { paths = append(paths, collector.String()) } } - return paths, nil + return paths, captures, nil } var errUnsupportableRegexpOperationForOpenAPI = errors.New("path regexp uses an operation that cannot be translated to an OpenAPI pattern") -func collectPathsFromRegexpASTInternal(rx *syntax.Regexp, appendingTo []*pathCollector) ([]*pathCollector, error) { +func collectPathsFromRegexpASTInternal( + rx *syntax.Regexp, + appendingTo []*pathCollector, + captures map[string]struct{}, +) ([]*pathCollector, error) { var err error // Depending on the type of this regexp AST node (its Op, i.e. operation), figure out whether it contributes any @@ -680,7 +918,7 @@ func collectPathsFromRegexpASTInternal(rx *syntax.Regexp, appendingTo []*pathCol // those pieces. case syntax.OpConcat: for _, child := range rx.Sub { - appendingTo, err = collectPathsFromRegexpASTInternal(child, appendingTo) + appendingTo, err = collectPathsFromRegexpASTInternal(child, appendingTo, captures) if err != nil { return nil, err } @@ -711,7 +949,7 @@ func collectPathsFromRegexpASTInternal(rx *syntax.Regexp, appendingTo []*pathCol childAppendingTo = append(childAppendingTo, newCollector) } } - childAppendingTo, err = collectPathsFromRegexpASTInternal(child, childAppendingTo) + childAppendingTo, err = collectPathsFromRegexpASTInternal(child, childAppendingTo, captures) if err != nil { return nil, err } @@ -729,7 +967,7 @@ func collectPathsFromRegexpASTInternal(rx *syntax.Regexp, appendingTo []*pathCol newCollector.conditionalSlashAppendedAtLength = collector.conditionalSlashAppendedAtLength childAppendingTo = append(childAppendingTo, newCollector) } - childAppendingTo, err = collectPathsFromRegexpASTInternal(child, childAppendingTo) + childAppendingTo, err = collectPathsFromRegexpASTInternal(child, childAppendingTo, captures) if err != nil { return nil, err } @@ -751,7 +989,7 @@ func collectPathsFromRegexpASTInternal(rx *syntax.Regexp, appendingTo []*pathCol // In Vault, an unnamed capturing group is not actually used for capturing. // We treat it exactly the same as OpConcat. for _, child := range rx.Sub { - appendingTo, err = collectPathsFromRegexpASTInternal(child, appendingTo) + appendingTo, err = collectPathsFromRegexpASTInternal(child, appendingTo, captures) if err != nil { return nil, err } @@ -764,6 +1002,7 @@ func collectPathsFromRegexpASTInternal(rx *syntax.Regexp, appendingTo []*pathCol builder.WriteString(rx.Name) builder.WriteRune('}') } + captures[rx.Name] = struct{}{} } // Any other kind of operation is a problem, and will trigger an error, resulting in the pattern being left out of @@ -824,8 +1063,8 @@ func convertType(t FieldType) schemaType { ret.baseType = "integer" ret.format = "int64" case TypeDurationSecond, TypeSignedDurationSecond: - ret.baseType = "integer" - ret.format = "seconds" + ret.baseType = "string" + ret.format = "duration" case TypeBool: ret.baseType = "boolean" case TypeMap: @@ -865,52 +1104,96 @@ func cleanString(s string) string { return s } -// splitFields partitions fields into path and body groups -// The input pattern is expected to have been run through expandPattern, -// with paths parameters denotes in {braces}. -func splitFields(allFields map[string]*FieldSchema, pattern string) (pathFields, bodyFields map[string]*FieldSchema) { +// splitFields partitions fields into path, query and body groups. It uses information on capturing groups previously +// collected by expandPattern, which is necessary to correctly match the treatment in (*Backend).HandleRequest: +// a field counts as a path field if it appears in any capture in the regex, and if that capture was inside an +// alternation or optional part of the regex which does not survive in the OpenAPI path pattern currently being +// processed, that field should NOT be rendered to the OpenAPI spec AT ALL. +func splitFields( + allFields map[string]*FieldSchema, + openAPIPathPattern string, + captures map[string]struct{}, +) (pathFields, queryFields, bodyFields map[string]*FieldSchema) { pathFields = make(map[string]*FieldSchema) + queryFields = make(map[string]*FieldSchema) bodyFields = make(map[string]*FieldSchema) - for _, match := range pathFieldsRe.FindAllStringSubmatch(pattern, -1) { + for _, match := range pathFieldsRe.FindAllStringSubmatch(openAPIPathPattern, -1) { name := match[1] pathFields[name] = allFields[name] } for name, field := range allFields { - if _, ok := pathFields[name]; !ok { + // Any field which relates to a regex capture was already processed above, if it needed to be. + if _, ok := captures[name]; !ok { if field.Query { - pathFields[name] = field + queryFields[name] = field } else { bodyFields[name] = field } } } - return pathFields, bodyFields + return pathFields, queryFields, bodyFields +} + +// withoutOperationHints returns a copy of the given DisplayAttributes without +// OperationPrefix / OperationVerb / OperationSuffix since we don't need these +// fields in the final output. +func withoutOperationHints(in *DisplayAttributes) *DisplayAttributes { + if in == nil { + return nil + } + + copy := *in + + copy.OperationPrefix = "" + copy.OperationVerb = "" + copy.OperationSuffix = "" + + // return nil if all fields are empty to avoid empty JSON objects + if copy == (DisplayAttributes{}) { + return nil + } + + return © +} + +func hyphenatedToTitleCase(in string) string { + var b strings.Builder + + title := cases.Title(language.English, cases.NoLower) + + for _, word := range strings.Split(in, "-") { + b.WriteString(title.String(word)) + } + + return b.String() } // cleanedResponse is identical to logical.Response but with nulls // removed from from JSON encoding type cleanedResponse struct { - Secret *logical.Secret `json:"secret,omitempty"` - Auth *logical.Auth `json:"auth,omitempty"` - Data map[string]interface{} `json:"data,omitempty"` - Redirect string `json:"redirect,omitempty"` - Warnings []string `json:"warnings,omitempty"` - WrapInfo *wrapping.ResponseWrapInfo `json:"wrap_info,omitempty"` - Headers map[string][]string `json:"headers,omitempty"` + Secret *logical.Secret `json:"secret,omitempty"` + Auth *logical.Auth `json:"auth,omitempty"` + Data map[string]interface{} `json:"data,omitempty"` + Redirect string `json:"redirect,omitempty"` + Warnings []string `json:"warnings,omitempty"` + WrapInfo *wrapping.ResponseWrapInfo `json:"wrap_info,omitempty"` + Headers map[string][]string `json:"headers,omitempty"` + MountType string `json:"mount_type,omitempty"` } func cleanResponse(resp *logical.Response) *cleanedResponse { return &cleanedResponse{ - Secret: resp.Secret, - Auth: resp.Auth, - Data: resp.Data, - Redirect: resp.Redirect, - Warnings: resp.Warnings, - WrapInfo: resp.WrapInfo, - Headers: resp.Headers, + Secret: resp.Secret, + Auth: resp.Auth, + Data: resp.Data, + Redirect: resp.Redirect, + Warnings: resp.Warnings, + WrapInfo: resp.WrapInfo, + Headers: resp.Headers, + MountType: resp.MountType, } } @@ -924,6 +1207,9 @@ func cleanResponse(resp *logical.Response) *cleanedResponse { // postSysToolsRandomUrlbytes_2 // // An optional user-provided suffix ("context") may also be appended. +// +// Deprecated: operationID's are now populated using `constructOperationID`. +// This function is here for backwards compatibility with older plugins. func (d *OASDocument) CreateOperationIDs(context string) { opIDCount := make(map[string]int) var paths []string @@ -951,6 +1237,10 @@ func (d *OASDocument) CreateOperationIDs(context string) { continue } + if oasOperation.OperationID != "" { + continue + } + // Discard "_mount_path" from any {thing_mount_path} parameters path = strings.Replace(path, "_mount_path", "", 1) diff --git a/sdk/framework/openapi_test.go b/sdk/framework/openapi_test.go index bb1338b4e0a3..4cb94342ffde 100644 --- a/sdk/framework/openapi_test.go +++ b/sdk/framework/openapi_test.go @@ -160,13 +160,13 @@ func TestOpenAPI_ExpandPattern(t *testing.T) { } for i, test := range tests { - out, err := expandPattern(test.inPattern) + paths, _, err := expandPattern(test.inPattern) if err != nil { t.Fatal(err) } - sort.Strings(out) - if !reflect.DeepEqual(out, test.outPathlets) { - t.Fatalf("Test %d: Expected %v got %v", i, test.outPathlets, out) + sort.Strings(paths) + if !reflect.DeepEqual(paths, test.outPathlets) { + t.Fatalf("Test %d: Expected %v got %v", i, test.outPathlets, paths) } } } @@ -188,7 +188,7 @@ func TestOpenAPI_ExpandPattern_ReturnsError(t *testing.T) { } for i, test := range tests { - _, err := expandPattern(test.inPattern) + _, _, err := expandPattern(test.inPattern) if err != test.outError { t.Fatalf("Test %d: Expected %q got %q", i, test.outError, err) } @@ -196,31 +196,50 @@ func TestOpenAPI_ExpandPattern_ReturnsError(t *testing.T) { } func TestOpenAPI_SplitFields(t *testing.T) { + paths, captures, err := expandPattern("some/" + GenericNameRegex("a") + "/path" + OptionalParamRegex("e")) + if err != nil { + t.Fatal(err) + } + fields := map[string]*FieldSchema{ "a": {Description: "path"}, "b": {Description: "body"}, "c": {Description: "body"}, "d": {Description: "body"}, "e": {Description: "path"}, + "f": {Description: "query", Query: true}, } - pathFields, bodyFields := splitFields(fields, "some/{a}/path/{e}") + for index, path := range paths { + pathFields, queryFields, bodyFields := splitFields(fields, path, captures) - lp := len(pathFields) - lb := len(bodyFields) - l := len(fields) - if lp+lb != l { - t.Fatalf("split length error: %d + %d != %d", lp, lb, l) - } + numPath := len(pathFields) + numQuery := len(queryFields) + numBody := len(bodyFields) + numExpectedDiscarded := 0 + // The first path generated is expected to be the one omitting the optional parameter field "e" + if index == 0 { + numExpectedDiscarded = 1 + } + l := len(fields) + if numPath+numQuery+numBody+numExpectedDiscarded != l { + t.Fatalf("split length error: %d + %d + %d + %d != %d", numPath, numQuery, numBody, numExpectedDiscarded, l) + } - for name, field := range pathFields { - if field.Description != "path" { - t.Fatalf("expected field %s to be in 'path', found in %s", name, field.Description) + for name, field := range pathFields { + if field.Description != "path" { + t.Fatalf("expected field %s to be in 'path', found in %s", name, field.Description) + } } - } - for name, field := range bodyFields { - if field.Description != "body" { - t.Fatalf("expected field %s to be in 'body', found in %s", name, field.Description) + for name, field := range queryFields { + if field.Description != "query" { + t.Fatalf("expected field %s to be in 'query', found in %s", name, field.Description) + } + } + for name, field := range bodyFields { + if field.Description != "body" { + t.Fatalf("expected field %s to be in 'body', found in %s", name, field.Description) + } } } } @@ -324,12 +343,15 @@ func TestOpenAPI_SpecialPaths(t *testing.T) { path := Path{ Pattern: test.pattern, } - specialPaths := &logical.Paths{ - Root: test.rootPaths, - Unauthenticated: test.unauthenticatedPaths, + backend := &Backend{ + PathsSpecial: &logical.Paths{ + Root: test.rootPaths, + Unauthenticated: test.unauthenticatedPaths, + }, + BackendType: logical.TypeLogical, } - if err := documentPath(&path, specialPaths, "kv", logical.TypeLogical, doc); err != nil { + if err := documentPath(&path, backend, "kv", doc); err != nil { t.Fatal(err) } @@ -564,66 +586,6 @@ func TestOpenAPI_Paths(t *testing.T) { }) } -func TestOpenAPI_OperationID(t *testing.T) { - path1 := &Path{ - Pattern: "foo/" + GenericNameRegex("id"), - Fields: map[string]*FieldSchema{ - "id": {Type: TypeString}, - }, - Operations: map[logical.Operation]OperationHandler{ - logical.ReadOperation: &PathOperation{}, - logical.UpdateOperation: &PathOperation{}, - logical.DeleteOperation: &PathOperation{}, - }, - } - - path2 := &Path{ - Pattern: "Foo/" + GenericNameRegex("id"), - Fields: map[string]*FieldSchema{ - "id": {Type: TypeString}, - }, - Operations: map[logical.Operation]OperationHandler{ - logical.ReadOperation: &PathOperation{}, - }, - } - - for _, context := range []string{"", "bar"} { - doc := NewOASDocument("version") - err := documentPath(path1, nil, "kv", logical.TypeLogical, doc) - if err != nil { - t.Fatal(err) - } - err = documentPath(path2, nil, "kv", logical.TypeLogical, doc) - if err != nil { - t.Fatal(err) - } - doc.CreateOperationIDs(context) - - tests := []struct { - path string - op string - opID string - }{ - {"/Foo/{id}", "get", "getFooId"}, - {"/foo/{id}", "get", "getFooId_2"}, - {"/foo/{id}", "post", "postFooId"}, - {"/foo/{id}", "delete", "deleteFooId"}, - } - - for _, test := range tests { - actual := getPathOp(doc.Paths[test.path], test.op).OperationID - expected := test.opID - if context != "" { - expected += "_" + context - } - - if actual != expected { - t.Fatalf("expected %v, got %v", expected, actual) - } - } - } -} - func TestOpenAPI_CustomDecoder(t *testing.T) { p := &Path{ Pattern: "foo", @@ -653,7 +615,7 @@ func TestOpenAPI_CustomDecoder(t *testing.T) { } docOrig := NewOASDocument("version") - err := documentPath(p, nil, "kv", logical.TypeLogical, docOrig) + err := documentPath(p, &Backend{BackendType: logical.TypeLogical}, "kv", docOrig) if err != nil { t.Fatal(err) } @@ -693,13 +655,14 @@ func TestOpenAPI_CleanResponse(t *testing.T) { // logical.Response. This will fail if logical.Response changes without a corresponding // change to cleanResponse() orig = &logical.Response{ - Secret: new(logical.Secret), - Auth: new(logical.Auth), - Data: map[string]interface{}{"foo": 42}, - Redirect: "foo", - Warnings: []string{"foo"}, - WrapInfo: &wrapping.ResponseWrapInfo{Token: "foo"}, - Headers: map[string][]string{"foo": {"bar"}}, + Secret: new(logical.Secret), + Auth: new(logical.Auth), + Data: map[string]interface{}{"foo": 42}, + Redirect: "foo", + Warnings: []string{"foo"}, + WrapInfo: &wrapping.ResponseWrapInfo{Token: "foo"}, + Headers: map[string][]string{"foo": {"bar"}}, + MountType: "mount", } origJSON := mustJSONMarshal(t, orig) @@ -712,11 +675,224 @@ func TestOpenAPI_CleanResponse(t *testing.T) { } } +func TestOpenAPI_constructOperationID(t *testing.T) { + tests := map[string]struct { + path string + pathIndex int + pathAttributes *DisplayAttributes + operation logical.Operation + operationAttributes *DisplayAttributes + defaultPrefix string + expected string + }{ + "empty": { + path: "", + pathIndex: 0, + pathAttributes: nil, + operation: logical.Operation(""), + operationAttributes: nil, + defaultPrefix: "", + expected: "", + }, + "simple-read": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: nil, + operation: logical.ReadOperation, + operationAttributes: nil, + defaultPrefix: "test", + expected: "test-read-path-to-thing", + }, + "simple-write": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: nil, + operation: logical.UpdateOperation, + operationAttributes: nil, + defaultPrefix: "test", + expected: "test-write-path-to-thing", + }, + "operation-verb": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationVerb: "do-something"}, + operation: logical.UpdateOperation, + operationAttributes: nil, + defaultPrefix: "test", + expected: "do-something", + }, + "operation-verb-override": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationVerb: "do-something"}, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationVerb: "do-something-else"}, + defaultPrefix: "test", + expected: "do-something-else", + }, + "operation-prefix": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix"}, + operation: logical.UpdateOperation, + operationAttributes: nil, + defaultPrefix: "test", + expected: "my-prefix-write-path-to-thing", + }, + "operation-prefix-override": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix"}, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix"}, + defaultPrefix: "test", + expected: "better-prefix-write-path-to-thing", + }, + "operation-prefix-and-suffix": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix", OperationSuffix: "my-suffix"}, + operation: logical.UpdateOperation, + operationAttributes: nil, + defaultPrefix: "test", + expected: "my-prefix-write-my-suffix", + }, + "operation-prefix-and-suffix-override": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix", OperationSuffix: "my-suffix"}, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "better-suffix"}, + defaultPrefix: "test", + expected: "better-prefix-write-better-suffix", + }, + "operation-prefix-verb-suffix": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix", OperationSuffix: "my-suffix", OperationVerb: "Create"}, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "better-suffix"}, + defaultPrefix: "test", + expected: "better-prefix-create-better-suffix", + }, + "operation-prefix-verb-suffix-override": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix", OperationSuffix: "my-suffix", OperationVerb: "Create"}, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "better-suffix", OperationVerb: "Login"}, + defaultPrefix: "test", + expected: "better-prefix-login-better-suffix", + }, + "operation-prefix-verb": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: nil, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationVerb: "Login"}, + defaultPrefix: "test", + expected: "better-prefix-login", + }, + "operation-verb-suffix": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: nil, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationVerb: "Login", OperationSuffix: "better-suffix"}, + defaultPrefix: "test", + expected: "login-better-suffix", + }, + "pipe-delimited-suffix-0": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: nil, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "suffix0|suffix1"}, + defaultPrefix: "test", + expected: "better-prefix-write-suffix0", + }, + "pipe-delimited-suffix-1": { + path: "path/to/thing", + pathIndex: 1, + pathAttributes: nil, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "suffix0|suffix1"}, + defaultPrefix: "test", + expected: "better-prefix-write-suffix1", + }, + "pipe-delimited-suffix-2-fallback": { + path: "path/to/thing", + pathIndex: 2, + pathAttributes: nil, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "suffix0|suffix1"}, + defaultPrefix: "test", + expected: "better-prefix-write-path-to-thing", + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + actual := constructOperationID( + test.path, + test.pathIndex, + test.pathAttributes, + test.operation, + test.operationAttributes, + test.defaultPrefix, + ) + if actual != test.expected { + t.Fatalf("expected: %s; got: %s", test.expected, actual) + } + }) + } +} + +func TestOpenAPI_hyphenatedToTitleCase(t *testing.T) { + tests := map[string]struct { + in string + expected string + }{ + "simple": { + in: "test", + expected: "Test", + }, + "two-words": { + in: "two-words", + expected: "TwoWords", + }, + "three-words": { + in: "one-two-three", + expected: "OneTwoThree", + }, + "not-hyphenated": { + in: "something_like_this", + expected: "Something_like_this", + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + actual := hyphenatedToTitleCase(test.in) + if actual != test.expected { + t.Fatalf("expected: %s; got: %s", test.expected, actual) + } + }) + } +} + func testPath(t *testing.T, path *Path, sp *logical.Paths, expectedJSON string) { t.Helper() doc := NewOASDocument("dummyversion") - if err := documentPath(path, sp, "kv", logical.TypeLogical, doc); err != nil { + if err := documentPath(path, &Backend{ + PathsSpecial: sp, + BackendType: logical.TypeLogical, + }, "kv", doc); err != nil { t.Fatal(err) } doc.CreateOperationIDs("") @@ -725,7 +901,6 @@ func testPath(t *testing.T, path *Path, sp *logical.Paths, expectedJSON string) if err != nil { t.Fatal(err) } - // Compare json by first decoding, then comparing with a deep equality check. var expected, actual interface{} if err := jsonutil.DecodeJSON(docJSON, &actual); err != nil { diff --git a/sdk/framework/path.go b/sdk/framework/path.go index bf28f47322fa..067b005e0fe5 100644 --- a/sdk/framework/path.go +++ b/sdk/framework/path.go @@ -56,6 +56,17 @@ type Path struct { // This should be a valid regular expression. Named captures will be // exposed as fields that should map to a schema in Fields. If a named // capture is not a field in the Fields map, then it will be ignored. + // + // The pattern will automatically have a ^ prepended and a $ appended before + // use, if these are not already present, so these may be omitted for clarity. + // + // If a ListOperation is being defined, the pattern must end with /? to match + // a trailing slash optionally, as ListOperations are always processed with a + // trailing slash added to the path if not already present. The match must not + // require the presence of a trailing slash, as HelpOperations, even for a + // path which only implements ListOperation, are processed without a trailing + // slash - so failure to make the trailing slash optional will break the + // `vault path-help` command for the path. Pattern string // Fields is the mapping of data fields to a schema describing that @@ -207,6 +218,11 @@ type DisplayAttributes struct { // Name is the name of the field suitable as a label or documentation heading. Name string `json:"name,omitempty"` + // Description of the field that renders as tooltip help text beside the label (name) in the UI. + // This may be used to replace descriptions that reference comma separation but correspond + // to UI inputs where only arrays are valid. For example params with Type: framework.TypeCommaStringSlice + Description string `json:"description,omitempty"` + // Value is a sample value to display for this field. This may be used // to indicate a default value, but it is for display only and completely separate // from any Default member handling. @@ -227,6 +243,28 @@ type DisplayAttributes struct { // Action is the verb to use for the operation. Action string `json:"action,omitempty"` + // OperationPrefix is a hyphenated lower-case string used to construct + // OpenAPI OperationID (prefix + verb + suffix). OperationPrefix is + // typically a human-readable name of the plugin or a prefix shared by + // multiple related endpoints. + OperationPrefix string `json:"operationPrefix,omitempty"` + + // OperationVerb is a hyphenated lower-case string used to construct + // OpenAPI OperationID (prefix + verb + suffix). OperationVerb is typically + // an action to be performed (e.g. "generate", "sign", "login", etc.). If + // not specified, the verb defaults to `logical.Operation.String()` + // (e.g. "read", "list", "delete", "write" for Create/Update) + OperationVerb string `json:"operationVerb,omitempty"` + + // OperationSuffix is a hyphenated lower-case string used to construct + // OpenAPI OperationID (prefix + verb + suffix). It is typically the name + // of the resource on which the action is performed (e.g. "role", + // "credentials", etc.). A pipe (|) separator can be used to list different + // suffixes for various permutations of the `Path.Pattern` regular + // expression. If not specified, the suffix defaults to the `Path.Pattern` + // split by dashes. + OperationSuffix string `json:"operationSuffix,omitempty"` + // EditType is the optional type of form field needed for a property // This is only necessary for a "textarea" or "file" EditType string `json:"editType,omitempty"` @@ -244,7 +282,7 @@ type RequestExample struct { // Response describes and optional demonstrations an operation response. type Response struct { - Description string // summary of the the response and should always be provided + Description string // summary of the response and should always be provided MediaType string // media type of the response, defaulting to "application/json" if empty Fields map[string]*FieldSchema // the fields present in this response, used to generate openapi response Example *logical.Response // example response data @@ -261,6 +299,7 @@ type PathOperation struct { Deprecated bool ForwardPerformanceSecondary bool ForwardPerformanceStandby bool + DisplayAttrs *DisplayAttributes } func (p *PathOperation) Handler() OperationFunc { @@ -277,6 +316,7 @@ func (p *PathOperation) Properties() OperationProperties { Deprecated: p.Deprecated, ForwardPerformanceSecondary: p.ForwardPerformanceSecondary, ForwardPerformanceStandby: p.ForwardPerformanceStandby, + DisplayAttrs: p.DisplayAttrs, } } @@ -344,8 +384,12 @@ func (p *Path) helpCallback(b *Backend) OperationFunc { vaultVersion = env.VaultVersion } } + redactVersion, _, _, _ := logical.CtxRedactionSettingsValue(ctx) + if redactVersion { + vaultVersion = "" + } doc := NewOASDocument(vaultVersion) - if err := documentPath(p, b.SpecialPaths(), requestResponsePrefix, b.BackendType, doc); err != nil { + if err := documentPath(p, b, requestResponsePrefix, doc); err != nil { b.Logger().Warn("error generating OpenAPI", "error", err) } diff --git a/sdk/framework/testdata/legacy.json b/sdk/framework/testdata/legacy.json index f526f1e2aade..548151c6f9e6 100644 --- a/sdk/framework/testdata/legacy.json +++ b/sdk/framework/testdata/legacy.json @@ -24,9 +24,11 @@ } ], "get": { - "operationId": "getLookupId", + "operationId": "kv-read-lookup-id", "summary": "Synopsis", - "tags": ["secrets"], + "tags": [ + "secrets" + ], "responses": { "200": { "description": "OK" @@ -34,15 +36,17 @@ } }, "post": { - "operationId": "postLookupId", + "operationId": "kv-write-lookup-id", "summary": "Synopsis", - "tags": ["secrets"], + "tags": [ + "secrets" + ], "requestBody": { "required": true, "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/KvLookupRequest" + "$ref": "#/components/schemas/KvWriteLookupIdRequest" } } } @@ -57,7 +61,7 @@ }, "components": { "schemas": { - "KvLookupRequest": { + "KvWriteLookupIdRequest": { "type": "object", "properties": { "token": { @@ -69,4 +73,3 @@ } } } - diff --git a/sdk/framework/testdata/operations.json b/sdk/framework/testdata/operations.json index e1db6744018d..8e9ec9b8d0e1 100644 --- a/sdk/framework/testdata/operations.json +++ b/sdk/framework/testdata/operations.json @@ -12,20 +12,7 @@ "paths": { "/foo/{id}": { "description": "Synopsis", - "x-vault-createSupported": true, - "x-vault-sudo": true, - "x-vault-displayAttrs": { - "navigation": true - }, "parameters": [ - { - "name": "format", - "description": "a query param", - "in": "query", - "schema": { - "type": "string" - } - }, { "name": "id", "description": "id path parameter", @@ -36,38 +23,47 @@ "required": true } ], + "x-vault-sudo": true, + "x-vault-createSupported": true, + "x-vault-displayAttrs": { + "navigation": true + }, "get": { - "operationId": "getFooId", - "tags": ["secrets"], "summary": "My Summary", "description": "My Description", - "responses": { - "200": { - "description": "OK" - } - }, + "operationId": "kv-read-foo-id", + "tags": [ + "secrets" + ], "parameters": [ { - "name": "list", - "description": "Return a list if `true`", + "name": "format", + "description": "a query param", "in": "query", "schema": { "type": "string" } } - ] + ], + "responses": { + "200": { + "description": "OK" + } + } }, "post": { - "operationId": "postFooId", - "tags": ["secrets"], "summary": "Update Summary", "description": "Update Description", + "operationId": "kv-write-foo-id", + "tags": [ + "secrets" + ], "requestBody": { "required": true, "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/KvFooRequest" + "$ref": "#/components/schemas/KvWriteFooIdRequest" } } } @@ -78,14 +74,88 @@ } } } + }, + "/foo/{id}/": { + "description": "Synopsis", + "parameters": [ + { + "name": "id", + "description": "id path parameter", + "in": "path", + "schema": { + "type": "string" + }, + "required": true + } + ], + "x-vault-sudo": true, + "x-vault-displayAttrs": { + "navigation": true + }, + "get": { + "summary": "List Summary", + "description": "List Description", + "operationId": "kv-list-foo-id", + "tags": [ + "secrets" + ], + "parameters": [ + { + "name": "format", + "description": "a query param", + "in": "query", + "schema": { + "type": "string" + } + }, + { + "name": "list", + "description": "Must be set to `true`", + "in": "query", + "schema": { + "type": "string", + "enum": [ + "true" + ] + }, + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StandardListResponse" + } + } + } + } + } + } } }, "components": { "schemas": { - "KvFooRequest": { + "KvWriteFooIdRequest": { "type": "object", - "required": ["age"], "properties": { + "age": { + "type": "integer", + "description": "the age", + "enum": [ + 1, + 2, + 3 + ], + "x-vault-displayAttrs": { + "name": "Age", + "value": 7, + "sensitive": true, + "group": "Some Group" + } + }, "flavors": { "type": "array", "description": "the flavors", @@ -93,35 +163,46 @@ "type": "string" } }, - "age": { + "format": { + "type": "string", + "description": "a query param" + }, + "maximum": { "type": "integer", - "description": "the age", - "enum": [1, 2, 3], - "x-vault-displayAttrs": { - "name": "Age", - "sensitive": true, - "group": "Some Group", - "value": 7 - } + "description": "a maximum value", + "format": "int64" }, "name": { "type": "string", "description": "the name", - "default": "Larry", - "pattern": "\\w([\\w-.]*\\w)?" + "pattern": "\\w([\\w-.]*\\w)?", + "default": "Larry" }, "x-abc-token": { "type": "string", "description": "a header value", - "enum": ["a", "b", "c"] - }, - "maximum" : { - "type": "integer", - "description": "a maximum value", - "format": "int64" + "enum": [ + "a", + "b", + "c" + ] + } + }, + "required": [ + "age" + ] + }, + "StandardListResponse": { + "type": "object", + "properties": { + "keys": { + "type": "array", + "items": { + "type": "string" + } } } } } } -} +} \ No newline at end of file diff --git a/sdk/framework/testdata/operations_list.json b/sdk/framework/testdata/operations_list.json index e89622a3c40a..feb7b2ccba08 100644 --- a/sdk/framework/testdata/operations_list.json +++ b/sdk/framework/testdata/operations_list.json @@ -10,21 +10,9 @@ } }, "paths": { - "/foo/{id}": { + "/foo/{id}/": { "description": "Synopsis", - "x-vault-sudo": true, - "x-vault-displayAttrs": { - "navigation": true - }, "parameters": [ - { - "name": "format", - "description": "a query param", - "in": "query", - "schema": { - "type": "string" - } - }, { "name": "id", "description": "id path parameter", @@ -35,33 +23,67 @@ "required": true } ], + "x-vault-sudo": true, + "x-vault-displayAttrs": { + "navigation": true + }, "get": { - "operationId": "getFooId", - "tags": ["secrets"], "summary": "List Summary", "description": "List Description", - "responses": { - "200": { - "description": "OK" - } - }, + "operationId": "kv-list-foo-id", + "tags": [ + "secrets" + ], "parameters": [ + { + "name": "format", + "description": "a query param", + "in": "query", + "schema": { + "type": "string" + } + }, { "name": "list", "description": "Must be set to `true`", - "required": true, "in": "query", "schema": { "type": "string", - "enum": ["true"] + "enum": [ + "true" + ] + }, + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StandardListResponse" + } + } } } - ] + } } } }, "components": { "schemas": { + "StandardListResponse": { + "type": "object", + "properties": { + "keys": { + "type": "array", + "items": { + "type": "string" + } + } + } + } } } -} +} \ No newline at end of file diff --git a/sdk/framework/testdata/responses.json b/sdk/framework/testdata/responses.json index 4e442cfb49ba..98d501ec5e89 100644 --- a/sdk/framework/testdata/responses.json +++ b/sdk/framework/testdata/responses.json @@ -14,8 +14,10 @@ "description": "Synopsis", "x-vault-unauthenticated": true, "delete": { - "operationId": "deleteFoo", - "tags": ["secrets"], + "operationId": "kv-delete-foo", + "tags": [ + "secrets" + ], "summary": "Delete stuff", "responses": { "204": { @@ -24,8 +26,10 @@ } }, "get": { - "operationId": "getFoo", - "tags": ["secrets"], + "operationId": "kv-read-foo", + "tags": [ + "secrets" + ], "summary": "My Summary", "description": "My Description", "responses": { @@ -34,7 +38,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/KvFooResponse" + "$ref": "#/components/schemas/KvReadFooResponse" } } } @@ -45,7 +49,7 @@ }, "components": { "schemas": { - "KvFooResponse": { + "KvReadFooResponse": { "type": "object", "properties": { "field_a": { @@ -61,4 +65,3 @@ } } } - diff --git a/sdk/go.mod b/sdk/go.mod index a51b3f919be3..55c541e50bb2 100644 --- a/sdk/go.mod +++ b/sdk/go.mod @@ -1,67 +1,136 @@ module github.com/hashicorp/vault/sdk -go 1.19 +go 1.22 require ( - github.com/armon/go-metrics v0.3.9 + cloud.google.com/go/cloudsqlconn v1.4.3 + github.com/armon/go-metrics v0.4.1 github.com/armon/go-radix v1.0.0 - github.com/evanphx/json-patch/v5 v5.5.0 + github.com/cenkalti/backoff/v3 v3.2.2 + github.com/docker/docker v26.1.5+incompatible + github.com/docker/go-connections v0.4.0 + github.com/evanphx/json-patch/v5 v5.6.0 github.com/fatih/structs v1.1.0 - github.com/go-ldap/ldap/v3 v3.1.10 - github.com/go-test/deep v1.0.2 - github.com/golang/protobuf v1.5.2 + github.com/go-ldap/ldap/v3 v3.4.6 + github.com/go-test/deep v1.1.0 + github.com/golang/protobuf v1.5.4 github.com/golang/snappy v0.0.4 + github.com/hashicorp/cap/ldap v0.0.0-20240328153749-fcfe271d0227 github.com/hashicorp/errwrap v1.1.0 - github.com/hashicorp/go-hclog v0.16.2 + github.com/hashicorp/go-cleanhttp v0.5.2 + github.com/hashicorp/go-hclog v1.6.3 github.com/hashicorp/go-immutable-radix v1.3.1 github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 github.com/hashicorp/go-kms-wrapping/v2 v2.0.8 github.com/hashicorp/go-multierror v1.1.1 - github.com/hashicorp/go-plugin v1.4.5 - github.com/hashicorp/go-retryablehttp v0.5.3 - github.com/hashicorp/go-secure-stdlib/base62 v0.1.1 - github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 - github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 + github.com/hashicorp/go-plugin v1.6.1 + github.com/hashicorp/go-retryablehttp v0.7.7 + github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 + github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 github.com/hashicorp/go-secure-stdlib/password v0.1.1 + github.com/hashicorp/go-secure-stdlib/plugincontainer v0.4.0 github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 - github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 - github.com/hashicorp/go-sockaddr v1.0.2 - github.com/hashicorp/go-uuid v1.0.2 - github.com/hashicorp/go-version v1.2.0 + github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.3 + github.com/hashicorp/go-sockaddr v1.0.6 + github.com/hashicorp/go-uuid v1.0.3 + github.com/hashicorp/go-version v1.6.0 github.com/hashicorp/golang-lru v0.5.4 - github.com/hashicorp/hcl v1.0.0 - github.com/mitchellh/copystructure v1.0.0 - github.com/mitchellh/go-testing-interface v1.0.0 + github.com/hashicorp/hcl v1.0.1-vault-5 + github.com/hashicorp/vault/api v1.14.0 + github.com/mitchellh/copystructure v1.2.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/pierrec/lz4 v2.5.2+incompatible + github.com/pierrec/lz4 v2.6.1+incompatible github.com/ryanuber/go-glob v1.0.0 - github.com/stretchr/testify v1.7.0 + github.com/stretchr/testify v1.9.0 + github.com/tink-crypto/tink-go/v2 v2.2.0 go.uber.org/atomic v1.9.0 - golang.org/x/crypto v0.6.0 - golang.org/x/text v0.7.0 - google.golang.org/grpc v1.41.0 - google.golang.org/protobuf v1.27.1 + golang.org/x/crypto v0.26.0 + golang.org/x/net v0.28.0 + golang.org/x/text v0.17.0 + google.golang.org/grpc v1.65.0 + google.golang.org/protobuf v1.34.2 ) require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/fatih/color v1.7.0 // indirect - github.com/frankban/quicktest v1.10.0 // indirect - github.com/go-asn1-ber/asn1-ber v1.3.1 // indirect - github.com/hashicorp/go-cleanhttp v0.5.0 // indirect - github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect - github.com/kr/text v0.2.0 // indirect - github.com/mattn/go-colorable v0.1.6 // indirect - github.com/mattn/go-isatty v0.0.12 // indirect - github.com/mitchellh/reflectwalk v1.0.0 // indirect - github.com/oklog/run v1.0.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-jose/go-jose/v4 v4.0.1 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/prometheus/client_golang v1.14.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + golang.org/x/sync v0.8.0 // indirect +) + +require ( + cloud.google.com/go/compute/metadata v0.3.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/containerd/containerd v1.7.12 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/fatih/color v1.16.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/frankban/quicktest v1.14.0 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.2 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/yamux v0.1.1 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.14.3 + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.3 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgtype v1.14.0 // indirect + github.com/jackc/pgx/v4 v4.18.3 + github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531 // indirect + github.com/klauspost/compress v1.16.5 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/patternmatcher v0.5.0 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/sys/user v0.1.0 // indirect + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect + github.com/oklog/run v1.1.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect + github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/rogpeppe/go-internal v1.9.0 // indirect - github.com/stretchr/objx v0.1.1 // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/term v0.5.0 // indirect - google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect - gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/rogpeppe/go-internal v1.8.1 // indirect + github.com/sasha-s/go-deadlock v0.2.0 + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/stretchr/objx v0.5.2 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/oauth2 v0.20.0 // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/term v0.23.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect + google.golang.org/api v0.169.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240812133136-8ffd90a71988 // indirect + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/sdk/go.sum b/sdk/go.sum index b33acfc9cfe3..a15693362241 100644 --- a/sdk/go.sum +++ b/sdk/go.sum @@ -1,66 +1,182 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/cloudsqlconn v1.4.3 h1:/WYFbB1NtMtoMxCbqpzzTFPDkxxlLTPme390KEGaEPc= +cloud.google.com/go/cloudsqlconn v1.4.3/go.mod h1:QL3tuStVOO70txb3rs4G8j5uMfo5ztZii8K3oGD3VYA= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= +github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= -github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA= +github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= +github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0= +github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v26.1.5+incompatible h1:NEAxTwEjxV6VbBMBoGG3zPqbiJosIApZjxlbrG9q3/g= +github.com/docker/docker v26.1.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch/v5 v5.5.0 h1:bAmFiUJ+o0o2B4OiTFeE3MqCOtyo+jjPP9iZ0VRxYUc= -github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/frankban/quicktest v1.10.0 h1:Gfh+GAJZOAoKZsIZeZbdn2JF10kN1XHNvjsvQK8gVkE= -github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-asn1-ber/asn1-ber v1.3.1 h1:gvPdv/Hr++TRFCl0UbPFHC54P9N9jgsRPnmnr419Uck= -github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= +github.com/go-asn1-ber/asn1-ber v1.5.5 h1:MNHlNMBDgEKD4TcKr36vQN68BA00aDfjIt3/bD50WnA= +github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U= +github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-ldap/ldap/v3 v3.1.10 h1:7WsKqasmPThNvdl0Q5GPpbTDD/ZD98CfuawrMIuh7qQ= -github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap/v3 v3.4.6 h1:ert95MdbiG7aWo/oPYp9btL3KJlMPKnP58r09rI8T+A= +github.com/go-ldap/ldap/v3 v3.4.6/go.mod h1:IGMQANNtxpsOzj7uUAMjpGBaOVTC4DYyIy8VsTdxmtc= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= -github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= +github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -70,27 +186,61 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= +github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/hashicorp/cap/ldap v0.0.0-20240328153749-fcfe271d0227 h1:R5CMNyBNZqODw2DcGaSa2X96AgtLotXsH7aOa07zTTI= +github.com/hashicorp/cap/ldap v0.0.0-20240328153749-fcfe271d0227/go.mod h1:Ofp5fMLl1ImcwjNGu9FtEwNOdxA0LYoWpcWQE2vltuI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= -github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -98,137 +248,368 @@ github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 h1:pSjQfW3vPtrOTcasTUKgCT github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= github.com/hashicorp/go-kms-wrapping/v2 v2.0.8 h1:9Q2lu1YbbmiAgvYZ7Pr31RdlVonUpX+mmDL7Z7qTA2U= github.com/hashicorp/go-kms-wrapping/v2 v2.0.8/go.mod h1:qTCjxGig/kjuj3hk1z8pOUrzbse/GxB1tGfbrq8tGJg= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.5 h1:oTE/oQR4eghggRg8VY7PAz3dr++VwDNBGCcOfIvHpBo= -github.com/hashicorp/go-plugin v1.4.5/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= -github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s= +github.com/hashicorp/go-plugin v1.6.1 h1:P7MR2UP6gNKGPp+y7EZw2kOiq4IR9WiqLvp0XOsVdwI= +github.com/hashicorp/go-plugin v1.6.1/go.mod h1:XPHFku2tFo3o3QKFgSYo+cghcUhw1NA1hZyMK0PWAw0= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-secure-stdlib/base62 v0.1.1 h1:6KMBnfEv0/kLAz0O76sliN5mXbCDcLfs2kP7ssP7+DQ= -github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 h1:ET4pqyjiGmY09R5y+rSd70J2w45CtbWDNvGqWp/R3Ng= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.2/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 h1:p4AKXPPS24tO8Wc8i1gLvSKdmkiSY5xuju57czJ/IJQ= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.2/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 h1:iBt4Ew4XEGLfh6/bPk4rSYmuZJGizr6/x/AEizP0CQc= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8/go.mod h1:aiJI+PIApBRQG7FZTEBx5GiiX+HbOHilUdNxUZi4eV0= github.com/hashicorp/go-secure-stdlib/password v0.1.1 h1:6JzmBqXprakgFEHwBgdchsjaA9x3GyjdI568bXKxa60= github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/plugincontainer v0.4.0 h1:7Yran48kl6X7jfUg3sfYDrFot1gD3LvzdC3oPu5l/qo= +github.com/hashicorp/go-secure-stdlib/plugincontainer v0.4.0/go.mod h1:9WJFu7L3d+Z4ViZmwUf+6/73/Uy7YMY1NXrB9wdElYE= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 h1:phcbL8urUzF/kxA/Oj6awENaRwfWsjP59GW7u2qlDyY= -github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= -github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.3 h1:xbrxd0U9XQW8qL1BAz2XrAjAF/P2vcqUTAues9c24B8= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.3/go.mod h1:LWq2Sy8UoKKuK4lFuCNWSjJj57MhNNf2zzBWMtkAIX4= +github.com/hashicorp/go-sockaddr v1.0.6 h1:RSG8rKU28VTUTvEKghe5gIhIQpv8evvNpnDEyqO4u9I= +github.com/hashicorp/go-sockaddr v1.0.6/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/vault/api v1.14.0 h1:Ah3CFLixD5jmjusOgm8grfN9M0d+Y8fVR2SW0K6pJLU= +github.com/hashicorp/vault/api v1.14.0/go.mod h1:pV9YLxBGSz+cItFDd8Ii4G17waWOQ32zVjMWHe/cOqk= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= +github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= +github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA= +github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= +github.com/jimlambrt/gldap v0.1.13 h1:jxmVQn0lfmFbM9jglueoau5LLF/IGRti0SKf0vB753M= +github.com/jimlambrt/gldap v0.1.13/go.mod h1:nlC30c7xVphjImg6etk7vg7ZewHCCvl1dfAhO3ZJzPg= +github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531 h1:hgVxRoDDPtQE68PT4LFvNlPz2nBKd3OMlGKIQ69OmR4= +github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531/go.mod h1:fqTUQpVYBvhCNIsMXGl2GE9q6z94DIP6NtFKXCSTVbg= +github.com/joshlf/testutil v0.0.0-20170608050642-b5d8aa79d93d h1:J8tJzRyiddAFF65YVgxli+TyWBi0f79Sld6rJP6CBcY= +github.com/joshlf/testutil v0.0.0-20170608050642-b5d8aa79d93d/go.mod h1:b+Q3v8Yrg5o15d71PSUraUzYb+jWl6wQMSBXSGS/hv0= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= +github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/microsoft/go-mssqldb v1.5.0 h1:CgENxkwtOBNj3Jg6T1X209y2blCfTTcwuOlznd2k9fk= +github.com/microsoft/go-mssqldb v1.5.0/go.mod h1:lmWsjHD8XX/Txr0f8ZqgbEZSC+BZjmEQy/Ms+rLrvho= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo= +github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= +github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= -github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sasha-s/go-deadlock v0.2.0 h1:lMqc+fUb7RrFS3gQLtoQsJ7/6TV/pAIFvBsqX73DK8Y= +github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tink-crypto/tink-go/v2 v2.2.0 h1:L2Da0F2Udh2agtKztdr69mV/KpnY3/lGTkMgLTVIXlA= +github.com/tink-crypto/tink-go/v2 v2.2.0/go.mod h1:JJ6PomeNPF3cJpfWC0lgyTES6zpJILkAX0cJNwlS3xU= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -236,66 +617,276 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= +golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.169.0 h1:QwWPy71FgMWqJN/l6jVlFHUa29a7dcUy02I8o799nPY= +google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 h1:g/4bk7P6TPMkAUbUhquq98xey1slwvuVJPosdBqYJlU= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240812133136-8ffd90a71988 h1:V71AcdLZr2p8dC9dbOIMCpqi4EmRl8wUwnJzXXLmbmc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240812133136-8ffd90a71988/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -304,21 +895,38 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= +gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/sdk/helper/backoff/backoff.go b/sdk/helper/backoff/backoff.go new file mode 100644 index 000000000000..ebf9aaa7e721 --- /dev/null +++ b/sdk/helper/backoff/backoff.go @@ -0,0 +1,107 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package backoff + +import ( + "errors" + "math" + "math/rand" + "time" +) + +var ErrMaxRetry = errors.New("exceeded maximum number of retries") + +const maxJitter = 0.25 + +// Backoff is used to do capped exponential backoff with jitter, with a maximum number of retries. +// Generally, use this struct by calling Next() or NextSleep() after a failure. +// If configured for N max retries, Next() and NextSleep() will return an error on the call N+1. +// The jitter is set to 25%, so values returned will have up to 25% less than twice the previous value. +// The min value will also include jitter, so the first call will almost always be less than the requested minimum value. +// Backoff is not thread-safe. +type Backoff struct { + currentAttempt int + maxRetries int + min time.Duration + max time.Duration + current time.Duration +} + +// NewBackoff creates a new exponential backoff with the given number of maximum retries and min/max durations. +func NewBackoff(maxRetries int, min, max time.Duration) *Backoff { + b := &Backoff{ + maxRetries: maxRetries, + max: max, + min: min, + } + b.Reset() + return b +} + +// Current returns the next time that will be returned by Next() (or slept in NextSleep()). +func (b *Backoff) Current() time.Duration { + return b.current +} + +// Next determines the next backoff duration that is roughly twice +// the current value, capped to a max value, with a measure of randomness. +// It returns an error if there are no more retries left. +func (b *Backoff) Next() (time.Duration, error) { + if b.currentAttempt >= b.maxRetries { + return time.Duration(-1), ErrMaxRetry + } + defer func() { + b.currentAttempt += 1 + }() + if b.currentAttempt == 0 { + return b.current, nil + } + next := 2 * b.current + if next > b.max { + next = b.max + } + next = jitter(next) + b.current = next + return next, nil +} + +// NextSleep will synchronously sleep the next backoff amount (see Next()). +// It returns an error if there are no more retries left. +func (b *Backoff) NextSleep() error { + next, err := b.Next() + if err != nil { + return err + } + time.Sleep(next) + return nil +} + +// Reset resets the state to the initial backoff amount and 0 retries. +func (b *Backoff) Reset() { + b.current = b.min + b.current = jitter(b.current) + b.currentAttempt = 0 +} + +func jitter(t time.Duration) time.Duration { + f := float64(t) * (1.0 - maxJitter*rand.Float64()) + return time.Duration(math.Floor(f)) +} + +// Retry calls the given function until it does not return an error, at least once and up to max_retries + 1 times. +// If the number of retries is exceeded, Retry() will return the last error seen joined with ErrMaxRetry. +func (b *Backoff) Retry(f func() error) error { + for { + err := f() + if err == nil { + return nil + } + + maxRetryErr := b.NextSleep() + if maxRetryErr != nil { + return errors.Join(maxRetryErr, err) + } + } + return nil // unreachable +} diff --git a/sdk/helper/backoff/backoff_test.go b/sdk/helper/backoff/backoff_test.go new file mode 100644 index 000000000000..46b85257bad5 --- /dev/null +++ b/sdk/helper/backoff/backoff_test.go @@ -0,0 +1,52 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package backoff + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +// TestBackoff_Basic tests that basic exponential backoff works as expected up to a max of 3 times. +func TestBackoff_Basic(t *testing.T) { + for i := 0; i < 100; i++ { + b := NewBackoff(3, 1*time.Millisecond, 10*time.Millisecond) + x, err := b.Next() + assert.Nil(t, err) + assert.LessOrEqual(t, x, 1*time.Millisecond) + assert.GreaterOrEqual(t, x, 750*time.Microsecond) + + x2, err := b.Next() + assert.Nil(t, err) + assert.LessOrEqual(t, x2, x*2) + assert.GreaterOrEqual(t, x2, x*3/4) + + x3, err := b.Next() + assert.Nil(t, err) + assert.LessOrEqual(t, x3, x2*2) + assert.GreaterOrEqual(t, x3, x2*3/4) + + _, err = b.Next() + assert.NotNil(t, err) + } +} + +// TestBackoff_ZeroRetriesAlwaysFails checks that if retries is set to zero, then an error is returned immediately. +func TestBackoff_ZeroRetriesAlwaysFails(t *testing.T) { + b := NewBackoff(0, 1*time.Millisecond, 10*time.Millisecond) + _, err := b.Next() + assert.NotNil(t, err) +} + +// TestBackoff_MaxIsEnforced checks that the maximum backoff is enforced. +func TestBackoff_MaxIsEnforced(t *testing.T) { + b := NewBackoff(1001, 1*time.Millisecond, 2*time.Millisecond) + for i := 0; i < 1000; i++ { + x, err := b.Next() + assert.LessOrEqual(t, x, 2*time.Millisecond) + assert.Nil(t, err) + } +} diff --git a/sdk/helper/certutil/certutil_test.go b/sdk/helper/certutil/certutil_test.go index 4ee3d1c4e016..4ebab01827d2 100644 --- a/sdk/helper/certutil/certutil_test.go +++ b/sdk/helper/certutil/certutil_test.go @@ -13,6 +13,7 @@ import ( "crypto/rsa" "crypto/x509" "crypto/x509/pkix" + "encoding/asn1" "encoding/json" "encoding/pem" "fmt" @@ -945,6 +946,161 @@ func TestSignatureAlgorithmRoundTripping(t *testing.T) { } } +// TestParseBasicConstraintExtension Verify extension generation and parsing of x509 basic constraint extensions +// works as expected. +func TestBasicConstraintExtension(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + isCA bool + maxPathLen int + }{ + {"empty-seq", false, -1}, + {"just-ca-true", true, -1}, + {"just-ca-with-maxpathlen", true, 2}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ext, err := CreateBasicConstraintExtension(tt.isCA, tt.maxPathLen) + if err != nil { + t.Fatalf("failed generating basic extension: %v", err) + } + + gotIsCa, gotMaxPathLen, err := ParseBasicConstraintExtension(ext) + if err != nil { + t.Fatalf("failed parsing basic extension: %v", err) + } + + if tt.isCA != gotIsCa { + t.Fatalf("expected isCa (%v) got isCa (%v)", tt.isCA, gotIsCa) + } + + if tt.maxPathLen != gotMaxPathLen { + t.Fatalf("expected maxPathLen (%v) got maxPathLen (%v)", tt.maxPathLen, gotMaxPathLen) + } + }) + } + + t.Run("bad-extension-oid", func(t *testing.T) { + // Test invalid type errors out + _, _, err := ParseBasicConstraintExtension(pkix.Extension{}) + if err == nil { + t.Fatalf("should have failed parsing non-basic constraint extension") + } + }) + + t.Run("garbage-value", func(t *testing.T) { + extraBytes, err := asn1.Marshal("a string") + if err != nil { + t.Fatalf("failed encoding the struct: %v", err) + } + ext := pkix.Extension{ + Id: ExtensionBasicConstraintsOID, + Value: extraBytes, + } + _, _, err = ParseBasicConstraintExtension(ext) + if err == nil { + t.Fatalf("should have failed parsing basic constraint with extra information") + } + }) +} + +// TestIgnoreCSRSigning Make sure we validate the CSR by default and that we can override +// the behavior disabling CSR signature checks +func TestIgnoreCSRSigning(t *testing.T) { + t.Parallel() + + caKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatalf("failed generating ca key: %v", err) + } + subjKeyID, err := GetSubjKeyID(caKey) + if err != nil { + t.Fatalf("failed generating ca subject key id: %v", err) + } + caCertTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "root.localhost", + }, + SubjectKeyId: subjKeyID, + DNSNames: []string{"root.localhost"}, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + BasicConstraintsValid: true, + IsCA: true, + } + caBytes, err := x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, caKey.Public(), caKey) + if err != nil { + t.Fatalf("failed creating ca certificate: %v", err) + } + caCert, err := x509.ParseCertificate(caBytes) + if err != nil { + t.Fatalf("failed parsing ca certificate: %v", err) + } + + signingBundle := &CAInfoBundle{ + ParsedCertBundle: ParsedCertBundle{ + PrivateKeyType: ECPrivateKey, + PrivateKey: caKey, + CertificateBytes: caBytes, + Certificate: caCert, + CAChain: nil, + }, + URLs: &URLEntries{}, + } + + key := genEdDSA(t) + csr := &x509.CertificateRequest{ + PublicKeyAlgorithm: x509.ECDSA, + PublicKey: key.Public(), + Subject: pkix.Name{ + CommonName: "test.dadgarcorp.com", + }, + } + t.Run(fmt.Sprintf("ignore-csr-disabled"), func(t *testing.T) { + params := &CreationParameters{ + URLs: &URLEntries{}, + } + data := &CreationBundle{ + Params: params, + SigningBundle: signingBundle, + CSR: csr, + } + + _, err := SignCertificate(data) + if err == nil { + t.Fatalf("should have failed signing csr with ignore csr signature disabled") + } + if !strings.Contains(err.Error(), "request signature invalid") { + t.Fatalf("expected error to contain 'request signature invalid': got: %v", err) + } + }) + + t.Run(fmt.Sprintf("ignore-csr-enabled"), func(t *testing.T) { + params := &CreationParameters{ + IgnoreCSRSignature: true, + URLs: &URLEntries{}, + } + data := &CreationBundle{ + Params: params, + SigningBundle: signingBundle, + CSR: csr, + } + + cert, err := SignCertificate(data) + if err != nil { + t.Fatalf("failed to sign certificate: %v", err) + } + + if err := cert.Verify(); err != nil { + t.Fatalf("signature verification failed: %v", err) + } + }) +} + func genRsaKey(t *testing.T) *rsa.PrivateKey { key, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { diff --git a/sdk/helper/certutil/cieps.go b/sdk/helper/certutil/cieps.go new file mode 100644 index 000000000000..9943cf3977a5 --- /dev/null +++ b/sdk/helper/certutil/cieps.go @@ -0,0 +1,161 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package certutil + +import ( + "crypto/x509" + "encoding/pem" + "fmt" +) + +// Source of the issuance request: sign implies that the key material was +// generated by the user and submitted via a CSR request but only ACL level +// validation was applied; issue implies that Vault created the key material +// on behalf of the user with ACL level validation occurring; ACME implies +// that the user submitted a CSR and that additional ACME validation has +// occurred before sending the request to the external service for +// construction. +type CIEPSIssuanceMode string + +const ( + SignCIEPSMode = "sign" + IssueCIEPSMode = "issue" + ACMECIEPSMode = "acme" + ICACIEPSMode = "ica" +) + +// Configuration of the issuer and mount at the time of this request; +// states the issuer's templated AIA information (falling back to the +// mount-global config if no per-issuer AIA info is set, the issuer's +// leaf_not_after_behavior (permit/truncate/err) for TTLs exceeding the +// issuer's validity period, and the mount's default and max TTL. +type CIEPSIssuanceConfig struct { + AIAValues *URLEntries `json:"aia_values"` + LeafNotAfterBehavior string `json:"leaf_not_after_behavior"` + MountDefaultTTL string `json:"mount_default_ttl"` + MountMaxTTL string `json:"mount_max_ttl"` +} + +// Structured parameters sent by Vault or explicitly validated by Vault +// prior to sending. +type CIEPSVaultParams struct { + PolicyName string `json:"policy_name,omitempty"` + Mount string `json:"mount"` + Namespace string `json:"ns"` + + // These indicate the type of the cluster node talking to the CIEPS + // service. When IsPerfStandby=true, setting StoreCert=true in the + // response will result in Vault forwarding the client's request + // up to the Performance Secondary's active node and re-trying the + // operation (including re-submitting the request to the CIEPS + // service). + // + // Any response returned by the CIEPS service in this case will be + // ignored and not signed by the CA's keys. + // + // IsPRSecondary is set to false when a local mount is used on a + // PR Secondary; in this scenario, PR Secondary nodes behave like + // PR Primary nodes. From a CIEPS service perspective, no behavior + // difference is expected between PR Primary and PR Secondary nodes; + // both will issue and store certificates on their active nodes. + // This information is included for audit tracking purposes. + IsPerfStandby bool `json:"vault_is_performance_standby"` + IsPRSecondary bool `json:"vault_is_performance_secondary"` + + IssuanceMode CIEPSIssuanceMode `json:"issuance_mode"` + + GeneratedKey bool `json:"vault_generated_private_key"` + + IssuerName string `json:"requested_issuer_name"` + IssuerID string `json:"requested_issuer_id"` + IssuerCert string `json:"requested_issuer_cert"` + + Config CIEPSIssuanceConfig `json:"requested_issuance_config"` +} + +// Outer request object sent by Vault to the external CIEPS service. +// +// The top-level fields denote properties about the CIEPS request, +// with various request fields containing untrusted and trusted input +// respectively. +type CIEPSRequest struct { + Version int `json:"request_version"` + UUID string `json:"request_uuid"` + Sync bool `json:"synchronous"` + + UserRequestKV map[string]interface{} `json:"user_request_key_values"` + IdentityRequestKV map[string]interface{} `json:"identity_request_key_values,omitempty"` + ACMERequestKV map[string]interface{} `json:"acme_request_key_values,omitempty"` + VaultRequestKV CIEPSVaultParams `json:"vault_request_values"` + + // Vault guarantees that UserRequestKV will contain a csr parameter + // for all request types; this field is useful for engine implementations + // to have in parsed format. We assume that this is sent in PEM format, + // aligning with other Vault requests. + ParsedCSR *x509.CertificateRequest `json:"-"` +} + +func (req *CIEPSRequest) ParseUserCSR() error { + csrValueRaw, present := req.UserRequestKV["csr"] + if !present { + return fmt.Errorf("missing expected 'csr' attribute on the request") + } + + csrValue, ok := csrValueRaw.(string) + if !ok { + return fmt.Errorf("unexpected type of 'csr' attribute: %T", csrValueRaw) + } + + if csrValue == "" { + return fmt.Errorf("unexpectedly empty 'csr' attribute on the request") + } + + block, rest := pem.Decode([]byte(csrValue)) + if len(rest) > 0 { + return fmt.Errorf("failed to decode 'csr': %v bytes of trailing data after PEM block", len(rest)) + } + if block == nil { + return fmt.Errorf("failed to decode 'csr' PEM block") + } + + csr, err := x509.ParseCertificateRequest(block.Bytes) + if err != nil { + return fmt.Errorf("failed to parse certificate request: %w", err) + } + + req.ParsedCSR = csr + return nil +} + +// Expected response object from the external CIEPS service. +// +// When parsing, Vault will disallow unknown fields, failing the +// parse if unknown fields are sent. +type CIEPSResponse struct { + UUID string `json:"request_uuid"` + Error string `json:"error,omitempty"` + Warnings []string `json:"warnings,omitempty"` + Certificate string `json:"certificate"` + ParsedCertificate *x509.Certificate `json:"-"` + IssuerRef string `json:"issuer_ref"` + StoreCert bool `json:"store_certificate"` + GenerateLease bool `json:"generate_lease"` +} + +func (c *CIEPSResponse) MarshalCertificate() error { + if c.ParsedCertificate == nil || len(c.ParsedCertificate.Raw) == 0 { + return fmt.Errorf("no certificate present") + } + + pem := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: c.ParsedCertificate.Raw, + }) + if len(pem) == 0 { + return fmt.Errorf("failed to generate PEM: no body") + } + c.Certificate = string(pem) + + return nil +} diff --git a/sdk/helper/certutil/helpers.go b/sdk/helper/certutil/helpers.go index ab1aecd5a0a4..1c673b058acd 100644 --- a/sdk/helper/certutil/helpers.go +++ b/sdk/helper/certutil/helpers.go @@ -13,14 +13,15 @@ import ( "crypto/rand" "crypto/rsa" "crypto/sha1" + "crypto/tls" "crypto/x509" "crypto/x509/pkix" "encoding/asn1" + "encoding/hex" "encoding/pem" "errors" "fmt" "io" - "io/ioutil" "math/big" "net" "net/url" @@ -81,6 +82,9 @@ var InvSignatureAlgorithmNames = map[x509.SignatureAlgorithm]string{ x509.PureEd25519: "Ed25519", } +// OIDs for X.509 SAN Extension +var OidExtensionSubjectAltName = asn1.ObjectIdentifier([]int{2, 5, 29, 17}) + // OID for RFC 5280 CRL Number extension. // // > id-ce-cRLNumber OBJECT IDENTIFIER ::= { id-ce 20 } @@ -91,6 +95,16 @@ var CRLNumberOID = asn1.ObjectIdentifier([]int{2, 5, 29, 20}) // > id-ce-deltaCRLIndicator OBJECT IDENTIFIER ::= { id-ce 27 } var DeltaCRLIndicatorOID = asn1.ObjectIdentifier([]int{2, 5, 29, 27}) +// OID for KeyUsage from RFC 2459 : https://www.rfc-editor.org/rfc/rfc2459.html#section-4.2.1.3 +// +// > id-ce-keyUsage OBJECT IDENTIFIER ::= { id-ce 15 } +var KeyUsageOID = asn1.ObjectIdentifier([]int{2, 5, 29, 15}) + +// OID for Extended Key Usage from RFC 5280 : https://www.rfc-editor.org/rfc/rfc5280#section-4.2.1.12 +// +// id-ce-extKeyUsage OBJECT IDENTIFIER ::= { id-ce 37 } +var ExtendedKeyUsageOID = asn1.ObjectIdentifier([]int{2, 5, 29, 37}) + // GetHexFormatted returns the byte buffer formatted in hex with // the specified separator between bytes. func GetHexFormatted(buf []byte, sep string) string { @@ -126,7 +140,7 @@ func GetSubjKeyID(privateKey crypto.Signer) ([]byte, error) { if privateKey == nil { return nil, errutil.InternalError{Err: "passed-in private key is nil"} } - return getSubjectKeyID(privateKey.Public()) + return GetSubjectKeyID(privateKey.Public()) } // Returns the explicit SKID when used for cross-signing, else computes a new @@ -136,10 +150,10 @@ func getSubjectKeyIDFromBundle(data *CreationBundle) ([]byte, error) { return data.Params.SKID, nil } - return getSubjectKeyID(data.CSR.PublicKey) + return GetSubjectKeyID(data.CSR.PublicKey) } -func getSubjectKeyID(pub interface{}) ([]byte, error) { +func GetSubjectKeyID(pub interface{}) ([]byte, error) { var publicKeyBytes []byte switch pub := pub.(type) { case *rsa.PublicKey: @@ -304,6 +318,18 @@ func ParsePEMBundle(pemBundle string) (*ParsedCertBundle, error) { return parsedBundle, nil } +func (p *ParsedCertBundle) ToTLSCertificate() tls.Certificate { + var cert tls.Certificate + cert.Certificate = append(cert.Certificate, p.CertificateBytes) + cert.Leaf = p.Certificate + cert.PrivateKey = p.PrivateKey + for _, ca := range p.CAChain { + cert.Certificate = append(cert.Certificate, ca.Bytes) + } + + return cert +} + // GeneratePrivateKey generates a private key with the specified type and key bits. func GeneratePrivateKey(keyType string, keyBits int, container ParsedPrivateKeyContainer) error { return generatePrivateKey(keyType, keyBits, container, nil) @@ -1042,8 +1068,8 @@ func selectSignatureAlgorithmForECDSA(pub crypto.PublicKey, signatureBits int) x } var ( - oidExtensionBasicConstraints = []int{2, 5, 29, 19} - oidExtensionSubjectAltName = []int{2, 5, 29, 17} + ExtensionBasicConstraintsOID = []int{2, 5, 29, 19} + ExtensionSubjectAltNameOID = []int{2, 5, 29, 17} ) // CreateCSR creates a CSR with the default rand.Reader to @@ -1076,6 +1102,7 @@ func createCSR(data *CreationBundle, addBasicConstraints bool, randReader io.Rea } // Like many root CAs, other information is ignored + csrTemplate := &x509.CertificateRequest{ Subject: data.Params.Subject, DNSNames: data.Params.DNSNames, @@ -1084,6 +1111,14 @@ func createCSR(data *CreationBundle, addBasicConstraints bool, randReader io.Rea URIs: data.Params.URIs, } + if data.Params.KeyUsage != 0 { + keyUsageExt, err := marshalKeyUsage(data.Params.KeyUsage) + if err != nil { + return nil, fmt.Errorf("failed marshalling existing key usage: %w", err) + } + csrTemplate.ExtraExtensions = []pkix.Extension{keyUsageExt} + } + if err := HandleOtherCSRSANs(csrTemplate, data.Params.OtherSANs); err != nil { return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} } @@ -1098,7 +1133,7 @@ func createCSR(data *CreationBundle, addBasicConstraints bool, randReader io.Rea return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling basic constraints: {{err}}", err).Error()} } ext := pkix.Extension{ - Id: oidExtensionBasicConstraints, + Id: ExtensionBasicConstraintsOID, Value: val, Critical: true, } @@ -1133,6 +1168,56 @@ func createCSR(data *CreationBundle, addBasicConstraints bool, randReader io.Rea return result, nil } +// Marshal Key Usage taken from: https://cs.opensource.google/go/go/+/master:src/crypto/x509/x509.go;drc=370a6959e3edd9d901446661ee9fef3f72d150d4;l=1339 +// It requires the two following functions (reverseBitsInAByte and asn1BitLength) below, taken from the same code base +// It's used for that code only for certificates; but we need to marshal key usage on CSR (createCSR above) as well +func marshalKeyUsage(ku x509.KeyUsage) (pkix.Extension, error) { + ext := pkix.Extension{Id: KeyUsageOID, Critical: true} + + var a [2]byte + a[0] = reverseBitsInAByte(byte(ku)) + a[1] = reverseBitsInAByte(byte(ku >> 8)) + + l := 1 + if a[1] != 0 { + l = 2 + } + + bitString := a[:l] + var err error + ext.Value, err = asn1.Marshal(asn1.BitString{Bytes: bitString, BitLength: asn1BitLength(bitString)}) + return ext, err +} + +// reverseBitsInAByte Taken from: https://cs.opensource.google/go/go/+/master:src/crypto/x509/x509.go;drc=370a6959e3edd9d901446661ee9fef3f72d150d4;l=1011 +// needed for marshalKeyUsage called above +func reverseBitsInAByte(in byte) byte { + b1 := in>>4 | in<<4 + b2 := b1>>2&0x33 | b1<<2&0xcc + b3 := b2>>1&0x55 | b2<<1&0xaa + return b3 +} + +// asn1BitLength returns the bit-length of bitString by considering the +// most-significant bit in a byte to be the "first" bit. This convention +// matches ASN.1, but differs from almost everything else. +func asn1BitLength(bitString []byte) int { + bitLen := len(bitString) * 8 + + for i := range bitString { + b := bitString[len(bitString)-i-1] + + for bit := uint(0); bit < 8; bit++ { + if (b>>bit)&1 == 1 { + return bitLen + } + bitLen-- + } + } + + return 0 +} + // SignCertificate performs the heavy lifting // of generating a certificate from a CSR. // Returns a ParsedCertBundle sans private keys. @@ -1159,9 +1244,10 @@ func signCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBun return nil, errutil.UserError{Err: "nil csr given to signCertificate"} } - err := data.CSR.CheckSignature() - if err != nil { - return nil, errutil.UserError{Err: "request signature invalid"} + if !data.Params.IgnoreCSRSignature { + if err := data.CSR.CheckSignature(); err != nil { + return nil, errutil.UserError{Err: "request signature invalid"} + } } result := &ParsedCertBundle{} @@ -1219,7 +1305,7 @@ func signCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBun certTemplate.URIs = data.CSR.URIs for _, name := range data.CSR.Extensions { - if !name.Id.Equal(oidExtensionBasicConstraints) && !(len(data.Params.OtherSANs) > 0 && name.Id.Equal(oidExtensionSubjectAltName)) { + if !name.Id.Equal(ExtensionBasicConstraintsOID) && !(len(data.Params.OtherSANs) > 0 && name.Id.Equal(ExtensionSubjectAltNameOID)) { certTemplate.ExtraExtensions = append(certTemplate.ExtraExtensions, name) } } @@ -1271,7 +1357,6 @@ func signCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBun } certBytes, err = x509.CreateCertificate(randReader, certTemplate, caCert, data.CSR.PublicKey, data.SigningBundle.PrivateKey) - if err != nil { return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} } @@ -1288,11 +1373,11 @@ func signCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBun } func NewCertPool(reader io.Reader) (*x509.CertPool, error) { - pemBlock, err := ioutil.ReadAll(reader) + pemBlock, err := io.ReadAll(reader) if err != nil { return nil, err } - certs, err := parseCertsPEM(pemBlock) + certs, err := ParseCertsPEM(pemBlock) if err != nil { return nil, fmt.Errorf("error reading certs: %s", err) } @@ -1303,9 +1388,9 @@ func NewCertPool(reader io.Reader) (*x509.CertPool, error) { return pool, nil } -// parseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array +// ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array // Returns an error if a certificate could not be parsed, or if the data does not contain any certificates -func parseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { +func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { ok := false certs := []*x509.Certificate{} for len(pemCerts) > 0 { @@ -1392,3 +1477,633 @@ func CreateDeltaCRLIndicatorExt(completeCRLNumber int64) (pkix.Extension, error) Value: bigNumValue, }, nil } + +// ParseBasicConstraintExtension parses a basic constraint pkix.Extension, useful if attempting to validate +// CSRs are requesting CA privileges as Go does not expose its implementation. Values returned are +// IsCA, MaxPathLen or error. If MaxPathLen was not set, a value of -1 will be returned. +func ParseBasicConstraintExtension(ext pkix.Extension) (bool, int, error) { + if !ext.Id.Equal(ExtensionBasicConstraintsOID) { + return false, -1, fmt.Errorf("passed in extension was not a basic constraint extension") + } + + // All elements are set to optional here, as it is possible that we receive a CSR with the extension + // containing an empty sequence by spec. + type basicConstraints struct { + IsCA bool `asn1:"optional"` + MaxPathLen int `asn1:"optional,default:-1"` + } + bc := &basicConstraints{} + leftOver, err := asn1.Unmarshal(ext.Value, bc) + if err != nil { + return false, -1, fmt.Errorf("failed unmarshalling extension value: %w", err) + } + + numLeftOver := len(bytes.TrimSpace(leftOver)) + if numLeftOver > 0 { + return false, -1, fmt.Errorf("%d extra bytes within basic constraints value extension", numLeftOver) + } + + return bc.IsCA, bc.MaxPathLen, nil +} + +// CreateBasicConstraintExtension create a basic constraint extension based on inputs, +// if isCa is false, an empty value sequence will be returned with maxPath being +// ignored. If isCa is true maxPath can be set to -1 to not set a maxPath value. +func CreateBasicConstraintExtension(isCa bool, maxPath int) (pkix.Extension, error) { + var asn1Bytes []byte + var err error + + switch { + case isCa && maxPath >= 0: + CaAndMaxPathLen := struct { + IsCa bool `asn1:""` + MaxPathLen int `asn1:""` + }{ + IsCa: isCa, + MaxPathLen: maxPath, + } + asn1Bytes, err = asn1.Marshal(CaAndMaxPathLen) + case isCa && maxPath < 0: + justCa := struct { + IsCa bool `asn1:""` + }{IsCa: isCa} + asn1Bytes, err = asn1.Marshal(justCa) + default: + asn1Bytes, err = asn1.Marshal(struct{}{}) + } + + if err != nil { + return pkix.Extension{}, err + } + + return pkix.Extension{ + Id: ExtensionBasicConstraintsOID, + Critical: true, + Value: asn1Bytes, + }, nil +} + +// GetOtherSANsFromX509Extensions is used to find all the extensions which have the identifier (OID) of +// a SAN (Subject Alternative Name), and then look at each extension to find out if it is one of a set of +// well-known types (like IP SANs) or "other". Currently, the only OtherSANs vault supports are of type UTF8. +func GetOtherSANsFromX509Extensions(exts []pkix.Extension) ([]OtherNameUtf8, error) { + var ret []OtherNameUtf8 + for _, ext := range exts { + if !ext.Id.Equal(OidExtensionSubjectAltName) { + continue + } + err := forEachSAN(ext.Value, func(tag int, data []byte) error { + if tag != 0 { + return nil + } + + var other OtherNameRaw + _, err := asn1.UnmarshalWithParams(data, &other, "tag:0") + if err != nil { + return fmt.Errorf("could not parse requested other SAN: %w", err) + } + val, err := other.ExtractUTF8String() + if err != nil { + return err + } + ret = append(ret, *val) + return nil + }) + if err != nil { + return nil, err + } + } + + return ret, nil +} + +func forEachSAN(extension []byte, callback func(tag int, data []byte) error) error { + // RFC 5280, 4.2.1.6 + + // SubjectAltName ::= GeneralNames + // + // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName + // + // GeneralName ::= CHOICE { + // otherName [0] OtherName, + // rfc822Name [1] IA5String, + // dNSName [2] IA5String, + // x400Address [3] ORAddress, + // directoryName [4] Name, + // ediPartyName [5] EDIPartyName, + // uniformResourceIdentifier [6] IA5String, + // iPAddress [7] OCTET STRING, + // registeredID [8] OBJECT IDENTIFIER } + var seq asn1.RawValue + rest, err := asn1.Unmarshal(extension, &seq) + if err != nil { + return err + } else if len(rest) != 0 { + return fmt.Errorf("x509: trailing data after X.509 extension") + } + if !seq.IsCompound || seq.Tag != 16 || seq.Class != 0 { + return asn1.StructuralError{Msg: "bad SAN sequence"} + } + + rest = seq.Bytes + for len(rest) > 0 { + var v asn1.RawValue + rest, err = asn1.Unmarshal(rest, &v) + if err != nil { + return err + } + + if err := callback(v.Tag, v.FullBytes); err != nil { + return err + } + } + + return nil +} + +// otherNameRaw describes a name related to a certificate which is not in one +// of the standard name formats. RFC 5280, 4.2.1.6: +// +// OtherName ::= SEQUENCE { +// type-id OBJECT IDENTIFIER, +// Value [0] EXPLICIT ANY DEFINED BY type-id } +type OtherNameRaw struct { + TypeID asn1.ObjectIdentifier + Value asn1.RawValue +} + +type OtherNameUtf8 struct { + Oid string + Value string +} + +// String() turns an OtherNameUtf8 object into the storage or field-value used to assign that name +// to a certificate in an API call +func (o OtherNameUtf8) String() string { + return fmt.Sprintf("%s;%s:%s", o.Oid, "UTF-8", o.Value) +} + +// ExtractUTF8String returns the UTF8 string contained in the Value, or an error +// if none is present. +func (oraw *OtherNameRaw) ExtractUTF8String() (*OtherNameUtf8, error) { + svalue := cryptobyte.String(oraw.Value.Bytes) + var outTag cbasn1.Tag + var val cryptobyte.String + read := svalue.ReadAnyASN1(&val, &outTag) + + if read && outTag == asn1.TagUTF8String { + return &OtherNameUtf8{Oid: oraw.TypeID.String(), Value: string(val)}, nil + } + return nil, fmt.Errorf("no UTF-8 string found in OtherName") +} + +func getOtherSANsStringFromExtensions(exts []pkix.Extension) (string, error) { + otherNames, err := GetOtherSANsFromX509Extensions(exts) + if err != nil { + return "", err + } + + otherSansList := make([]string, len(otherNames)) + for i, otherName := range otherNames { + otherSansList[i] = otherName.String() + } + + otherSans := strings.Join(otherSansList, ",") + + return otherSans, nil +} + +func getOtherSANsMapFromExtensions(exts []pkix.Extension) (map[string][]string, error) { + otherNames, err := GetOtherSANsFromX509Extensions(exts) + if err != nil { + return nil, err + } + + otherSans := make(map[string][]string) + for _, name := range otherNames { + if otherSans[name.Oid] == nil { + otherSans[name.Oid] = []string{name.Value} + } else { + otherSans[name.Oid] = append(otherSans[name.Oid], name.Value) + } + } + + return otherSans, nil +} + +func getKeyUsage(exts []pkix.Extension) (x509.KeyUsage, error) { + keyUsage := x509.KeyUsage(0) + for _, ext := range exts { + if ext.Id.Equal(KeyUsageOID) { + return parseKeyUsageExtension(ext.Value) + } + } + return keyUsage, nil +} + +// Taken from: https://cs.opensource.google/go/go/+/master:src/crypto/x509/parser.go;drc=dd84bb682482390bb8465482cb7b13d2e3b17297;l=319 +func parseKeyUsageExtension(der cryptobyte.String) (x509.KeyUsage, error) { + var usageBits asn1.BitString + if !der.ReadASN1BitString(&usageBits) { + return 0, errors.New("x509: invalid key usage") + } + + var usage int + for i := 0; i < 9; i++ { + if usageBits.At(i) != 0 { + usage |= 1 << uint(i) + } + } + return x509.KeyUsage(usage), nil +} + +func getExtKeyUsageOids(exts []pkix.Extension) ([]string, error) { + keyUsageOidStrings := make([]string, 0) + keyUsageOids := make([]asn1.ObjectIdentifier, 0) + for _, ext := range exts { + if ext.Id.Equal(ExtendedKeyUsageOID) { + _, err := asn1.Unmarshal(ext.Value, &keyUsageOids) + if err != nil { + return nil, fmt.Errorf("unable to unmarshal KeyUsageOid extension: %w", err) + } + for _, oid := range keyUsageOids { + keyUsageOidStrings = append(keyUsageOidStrings, oid.String()) + } + return keyUsageOidStrings, nil + } + } + return nil, nil +} + +func getPolicyIdentifiers(exts []pkix.Extension) ([]string, error) { + policyIdentifiers := make([]string, 0) + for _, ext := range exts { + if ext.Id.Equal(policyInformationOid) { + // PolicyInformation ::= SEQUENCE { + // policyIdentifier CertPolicyId, + // policyQualifiers SEQUENCE SIZE (1..MAX) OF + // PolicyQualifierInfo OPTIONAL } + type policyInformation struct { + PolicyIdentifier asn1.ObjectIdentifier `asn1:"optional"` + PolicyQualifier any `asn1:"optional"` + } + policies := make([]policyInformation, 0) + _, err := asn1.Unmarshal(ext.Value, &policies) + if err != nil { + return nil, err + } + for _, policy := range policies { + policyIdentifiers = append(policyIdentifiers, policy.PolicyIdentifier.String()) + } + return policyIdentifiers, nil + } + } + return nil, nil +} + +// Translate Certificates and CSRs into Certificate Template +// Four "Types" Here: Certificates; Certificate Signing Requests; Fields map[string]interface{}; Creation Parameters + +func ParseCertificateToCreationParameters(certificate x509.Certificate) (creationParameters CreationParameters, err error) { + otherSans, err := getOtherSANsMapFromExtensions(certificate.Extensions) + if err != nil { + return CreationParameters{}, err + } + + creationParameters = CreationParameters{ + Subject: removeNames(certificate.Subject), + DNSNames: certificate.DNSNames, + EmailAddresses: certificate.EmailAddresses, + IPAddresses: certificate.IPAddresses, + URIs: certificate.URIs, + OtherSANs: otherSans, + IsCA: certificate.IsCA, + KeyType: GetKeyType(certificate.PublicKeyAlgorithm.String()), + KeyBits: FindBitLength(certificate.PublicKey), + NotAfter: certificate.NotAfter, + KeyUsage: certificate.KeyUsage, + // ExtKeyUsage: We use ExtKeyUsageOIDs instead as the more general field + // ExtKeyUsageOIDs: this is an extension that may not be set, so is handled below + // PolicyIdentifiers: this is an extension that may not be set, so is handled below + BasicConstraintsValidForNonCA: certificate.BasicConstraintsValid, + SignatureBits: FindSignatureBits(certificate.SignatureAlgorithm), + UsePSS: IsPSS(certificate.SignatureAlgorithm), + // The following two values are on creation parameters, but are impossible to parse from the certificate + // ForceAppendCaChain + // UseCSRValues + PermittedDNSDomains: certificate.PermittedDNSDomains, + // URLs: punting on this for now + MaxPathLength: certificate.MaxPathLen, + NotBeforeDuration: time.Now().Sub(certificate.NotBefore), // Assumes Certificate was created this moment + SKID: certificate.SubjectKeyId, + } + + extKeyUsageOIDS, err := getExtKeyUsageOids(certificate.Extensions) + if err != nil { + return CreationParameters{}, err + } + creationParameters.ExtKeyUsageOIDs = extKeyUsageOIDS + + policyInformationOids, err := getPolicyIdentifiers(certificate.Extensions) + if err != nil { + return CreationParameters{}, err + } + creationParameters.PolicyIdentifiers = policyInformationOids + + return creationParameters, err +} + +func removeNames(name pkix.Name) pkix.Name { + name.Names = nil + return name +} + +func ParseCsrToCreationParameters(csr x509.CertificateRequest) (CreationParameters, error) { + otherSANs, err := getOtherSANsMapFromExtensions(csr.Extensions) + if err != nil { + return CreationParameters{}, err + } + + creationParameters := CreationParameters{ + Subject: removeNames(csr.Subject), + DNSNames: csr.DNSNames, + EmailAddresses: csr.EmailAddresses, + IPAddresses: csr.IPAddresses, + URIs: csr.URIs, + OtherSANs: otherSANs, + // IsCA: is handled below since the basic constraint it comes from might not be set on the CSR + KeyType: GetKeyType(csr.PublicKeyAlgorithm.String()), + KeyBits: FindBitLength(csr.PublicKey), + // NotAfter: this is not set on a CSR + // KeyUsage: handled below since this may not be set + // ExtKeyUsage: We use exclusively ExtKeyUsageOIDs here + // ExtKeyUsageOIDs: handled below since this may not be set + // PolicyIdentifiers: handled below since this may not be set + // BasicConstraintsValidForNonCA is handled below, since it may or may not be set on the CSR + SignatureBits: FindSignatureBits(csr.SignatureAlgorithm), + UsePSS: IsPSS(csr.SignatureAlgorithm), + // The following two values are on creation parameters, but are impossible to parse from the csr + // ForceAppendCaChain + // UseCSRValues + // PermittedDNSDomains : omitted, this generally isn't on a CSR + // URLs : omitted, this generally isn't on a CSR + // MaxPathLength is handled below since the basic constraint it comes from may not be set on the CSR + // NotBeforeDuration : this is not set on a CSR + // SKID: this is generally not set on a CSR, but calculated from the Key information itself + } + + keyUsage, err := getKeyUsage(csr.Extensions) + if err != nil { + return CreationParameters{}, err + } + creationParameters.KeyUsage = keyUsage + + extKeyUsageOIDS, err := getExtKeyUsageOids(csr.Extensions) + if err != nil { + return CreationParameters{}, err + } + creationParameters.ExtKeyUsageOIDs = extKeyUsageOIDS + + policyInformationOids, err := getPolicyIdentifiers(csr.Extensions) + if err != nil { + return CreationParameters{}, err + } + creationParameters.PolicyIdentifiers = policyInformationOids + + found, isCA, maxPathLength, err := getBasicConstraintsFromExtension(csr.Extensions) + if err != nil { + return CreationParameters{}, err + } + if found { + creationParameters.IsCA = isCA + creationParameters.BasicConstraintsValidForNonCA = (isCA && maxPathLength != 0) || (!isCA && (maxPathLength == 0)) + if isCA { // MaxPathLength Only Has a Meaning on a Certificate Authority + creationParameters.MaxPathLength = maxPathLength + } + } + + return creationParameters, err +} + +func ParseCsrToFields(csr x509.CertificateRequest) (map[string]interface{}, error) { + otherSans, err := getOtherSANsStringFromExtensions(csr.Extensions) + if err != nil { + return nil, err + } + + templateData := map[string]interface{}{ + "common_name": csr.Subject.CommonName, + "alt_names": MakeAltNamesCommaSeparatedString(csr.DNSNames, csr.EmailAddresses), + "ip_sans": MakeIpAddressCommaSeparatedString(csr.IPAddresses), + "uri_sans": MakeUriCommaSeparatedString(csr.URIs), + "other_sans": otherSans, + "signature_bits": FindSignatureBits(csr.SignatureAlgorithm), + "exclude_cn_from_sans": DetermineExcludeCnFromCsrSans(csr), + "ou": makeCommaSeparatedString(csr.Subject.OrganizationalUnit), + "organization": makeCommaSeparatedString(csr.Subject.Organization), + "country": makeCommaSeparatedString(csr.Subject.Country), + "locality": makeCommaSeparatedString(csr.Subject.Locality), + "province": makeCommaSeparatedString(csr.Subject.Province), + "street_address": makeCommaSeparatedString(csr.Subject.StreetAddress), + "postal_code": makeCommaSeparatedString(csr.Subject.PostalCode), + "serial_number": csr.Subject.SerialNumber, + // There is no "TTL" on a CSR, that is always set by the signer + // max_path_length is handled below + // permitted_dns_domains is a CA thing, it generally does not appear on a CSR + "use_pss": IsPSS(csr.SignatureAlgorithm), + // skid could be calculated, but does not directly exist on a csr, so punting for now + "key_type": GetKeyType(csr.PublicKeyAlgorithm.String()), + "key_bits": FindBitLength(csr.PublicKey), + } + + // isCA is not a field in our data call - that is represented inside vault by using a different endpoint + found, _, _, err := getBasicConstraintsFromExtension(csr.Extensions) + if err != nil { + return nil, err + } + templateData["add_basic_constraints"] = found + + return templateData, nil +} + +func ParseCertificateToFields(certificate x509.Certificate) (map[string]interface{}, error) { + otherSans, err := getOtherSANsStringFromExtensions(certificate.Extensions) + if err != nil { + return nil, err + } + + templateData := map[string]interface{}{ + "common_name": certificate.Subject.CommonName, + "alt_names": MakeAltNamesCommaSeparatedString(certificate.DNSNames, certificate.EmailAddresses), + "ip_sans": MakeIpAddressCommaSeparatedString(certificate.IPAddresses), + "uri_sans": MakeUriCommaSeparatedString(certificate.URIs), + "other_sans": otherSans, + "signature_bits": FindSignatureBits(certificate.SignatureAlgorithm), + "exclude_cn_from_sans": DetermineExcludeCnFromCertSans(certificate), + "ou": makeCommaSeparatedString(certificate.Subject.OrganizationalUnit), + "organization": makeCommaSeparatedString(certificate.Subject.Organization), + "country": makeCommaSeparatedString(certificate.Subject.Country), + "locality": makeCommaSeparatedString(certificate.Subject.Locality), + "province": makeCommaSeparatedString(certificate.Subject.Province), + "street_address": makeCommaSeparatedString(certificate.Subject.StreetAddress), + "postal_code": makeCommaSeparatedString(certificate.Subject.PostalCode), + "serial_number": certificate.Subject.SerialNumber, + "ttl": (certificate.NotAfter.Sub(certificate.NotBefore)).String(), + "max_path_length": certificate.MaxPathLen, + "permitted_dns_domains": strings.Join(certificate.PermittedDNSDomains, ","), + "use_pss": IsPSS(certificate.SignatureAlgorithm), + "skid": hex.EncodeToString(certificate.SubjectKeyId), + "key_type": GetKeyType(certificate.PublicKeyAlgorithm.String()), + "key_bits": FindBitLength(certificate.PublicKey), + } + + return templateData, nil +} + +func getBasicConstraintsFromExtension(exts []pkix.Extension) (found bool, isCA bool, maxPathLength int, err error) { + for _, ext := range exts { + if ext.Id.Equal(ExtensionBasicConstraintsOID) { + isCA, maxPathLength, err = ParseBasicConstraintExtension(ext) + if err != nil { + return false, false, -1, err + } + return true, isCA, maxPathLength, nil + } + } + + return false, false, -1, nil +} + +func MakeAltNamesCommaSeparatedString(names []string, emails []string) string { + return strings.Join(append(names, emails...), ",") +} + +func MakeUriCommaSeparatedString(uris []*url.URL) string { + stringAddresses := make([]string, len(uris)) + for i, uri := range uris { + stringAddresses[i] = uri.String() + } + return strings.Join(stringAddresses, ",") +} + +func MakeIpAddressCommaSeparatedString(addresses []net.IP) string { + stringAddresses := make([]string, len(addresses)) + for i, address := range addresses { + stringAddresses[i] = address.String() + } + return strings.Join(stringAddresses, ",") +} + +func makeCommaSeparatedString(values []string) string { + return strings.Join(values, ",") +} + +func DetermineExcludeCnFromCertSans(certificate x509.Certificate) bool { + cn := certificate.Subject.CommonName + if cn == "" { + return false + } + + emails := certificate.EmailAddresses + for _, email := range emails { + if email == cn { + return false + } + } + + dnses := certificate.DNSNames + for _, dns := range dnses { + if dns == cn { + return false + } + } + + return true +} + +func DetermineExcludeCnFromCsrSans(csr x509.CertificateRequest) bool { + cn := csr.Subject.CommonName + if cn == "" { + return false + } + + emails := csr.EmailAddresses + for _, email := range emails { + if email == cn { + return false + } + } + + dnses := csr.DNSNames + for _, dns := range dnses { + if dns == cn { + return false + } + } + + return true +} + +func FindBitLength(publicKey any) int { + if publicKey == nil { + return 0 + } + switch pub := publicKey.(type) { + case *rsa.PublicKey: + return pub.N.BitLen() + case *ecdsa.PublicKey: + switch pub.Curve { + case elliptic.P224(): + return 224 + case elliptic.P256(): + return 256 + case elliptic.P384(): + return 384 + case elliptic.P521(): + return 521 + default: + return 0 + } + default: + return 0 + } +} + +func FindSignatureBits(algo x509.SignatureAlgorithm) int { + switch algo { + case x509.MD2WithRSA, x509.MD5WithRSA, x509.SHA1WithRSA, x509.DSAWithSHA1, x509.ECDSAWithSHA1: + return -1 + case x509.SHA256WithRSA, x509.DSAWithSHA256, x509.ECDSAWithSHA256, x509.SHA256WithRSAPSS: + return 256 + case x509.SHA384WithRSA, x509.ECDSAWithSHA384, x509.SHA384WithRSAPSS: + return 384 + case x509.SHA512WithRSA, x509.SHA512WithRSAPSS, x509.ECDSAWithSHA512: + return 512 + case x509.PureEd25519: + return 0 + default: + return -1 + } +} + +func GetKeyType(goKeyType string) string { + switch goKeyType { + case "RSA": + return "rsa" + case "ECDSA": + return "ec" + case "Ed25519": + return "ed25519" + default: + return "" + } +} + +func IsPSS(algorithm x509.SignatureAlgorithm) bool { + switch algorithm { + case x509.SHA384WithRSAPSS, x509.SHA512WithRSAPSS, x509.SHA256WithRSAPSS: + return true + default: + return false + } +} diff --git a/sdk/helper/certutil/types.go b/sdk/helper/certutil/types.go index 039ff8a52291..cdfc912e9289 100644 --- a/sdk/helper/certutil/types.go +++ b/sdk/helper/certutil/types.go @@ -163,6 +163,21 @@ func GetPrivateKeyTypeFromSigner(signer crypto.Signer) PrivateKeyType { return UnknownPrivateKey } +// GetPrivateKeyTypeFromPublicKey based on the public key, return the PrivateKeyType +// that would be associated with it, returning UnknownPrivateKey for unsupported types +func GetPrivateKeyTypeFromPublicKey(pubKey crypto.PublicKey) PrivateKeyType { + switch pubKey.(type) { + case *rsa.PublicKey: + return RSAPrivateKey + case *ecdsa.PublicKey: + return ECPrivateKey + case ed25519.PublicKey: + return Ed25519PrivateKey + default: + return UnknownPrivateKey + } +} + // ToPEMBundle converts a string-based certificate bundle // to a PEM-based string certificate bundle in trust path // order, leaf certificate first @@ -804,6 +819,11 @@ type CreationParameters struct { // The explicit SKID to use; especially useful for cross-signing. SKID []byte + + // Ignore validating the CSR's signature. This should only be enabled if the + // sender of the CSR has proven proof of possession of the associated + // private key by some other means, otherwise keep this set to false. + IgnoreCSRSignature bool } type CreationBundle struct { diff --git a/sdk/helper/certutil/types_test.go b/sdk/helper/certutil/types_test.go new file mode 100644 index 000000000000..2cf383afaa02 --- /dev/null +++ b/sdk/helper/certutil/types_test.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package certutil + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "testing" +) + +func TestGetPrivateKeyTypeFromPublicKey(t *testing.T) { + rsaKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatalf("error generating rsa key: %s", err) + } + + ecdsaKey, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + t.Fatalf("error generating ecdsa key: %s", err) + } + + publicKey, _, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + t.Fatalf("error generating ed25519 key: %s", err) + } + + testCases := map[string]struct { + publicKey crypto.PublicKey + expectedKeyType PrivateKeyType + }{ + "rsa": { + publicKey: rsaKey.Public(), + expectedKeyType: RSAPrivateKey, + }, + "ecdsa": { + publicKey: ecdsaKey.Public(), + expectedKeyType: ECPrivateKey, + }, + "ed25519": { + publicKey: publicKey, + expectedKeyType: Ed25519PrivateKey, + }, + "bad key type": { + publicKey: []byte{}, + expectedKeyType: UnknownPrivateKey, + }, + } + + for name, tt := range testCases { + t.Run(name, func(t *testing.T) { + keyType := GetPrivateKeyTypeFromPublicKey(tt.publicKey) + + if keyType != tt.expectedKeyType { + t.Fatalf("key type mismatch: expected %s, got %s", tt.expectedKeyType, keyType) + } + }) + } +} diff --git a/sdk/helper/clientcountutil/clientcountutil.go b/sdk/helper/clientcountutil/clientcountutil.go new file mode 100644 index 000000000000..7d0be5526e1b --- /dev/null +++ b/sdk/helper/clientcountutil/clientcountutil.go @@ -0,0 +1,408 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package clientcountutil provides a library to generate activity log data for +// testing. +package clientcountutil + +import ( + "context" + "errors" + "fmt" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/clientcountutil/generation" + "google.golang.org/protobuf/encoding/protojson" +) + +// ActivityLogDataGenerator holds an ActivityLogMockInput. Users can create the +// generator with NewActivityLogData(), add content to the generator using +// the fluent API methods, and generate and write the JSON representation of the +// input to the Vault API. +type ActivityLogDataGenerator struct { + data *generation.ActivityLogMockInput + addingToMonth *generation.Data + addingToSegment *generation.Segment + client *api.Client +} + +// NewActivityLogData creates a new instance of an activity log data generator +// The type returned by this function cannot be called concurrently +func NewActivityLogData(client *api.Client) *ActivityLogDataGenerator { + return &ActivityLogDataGenerator{ + client: client, + data: new(generation.ActivityLogMockInput), + } +} + +// NewCurrentMonthData opens a new month of data for the current month. All +// clients will continue to be added to this month until a new month is created +// with NewPreviousMonthData. +func (d *ActivityLogDataGenerator) NewCurrentMonthData() *ActivityLogDataGenerator { + return d.newMonth(&generation.Data{Month: &generation.Data_CurrentMonth{CurrentMonth: true}}) +} + +// NewPreviousMonthData opens a new month of data, where the clients will be +// recorded as having been seen monthsAgo months ago. All clients will continue +// to be added to this month until a new month is created with +// NewPreviousMonthData or NewCurrentMonthData. +func (d *ActivityLogDataGenerator) NewPreviousMonthData(monthsAgo int) *ActivityLogDataGenerator { + return d.newMonth(&generation.Data{Month: &generation.Data_MonthsAgo{MonthsAgo: int32(monthsAgo)}}) +} + +func (d *ActivityLogDataGenerator) newMonth(newMonth *generation.Data) *ActivityLogDataGenerator { + d.data.Data = append(d.data.Data, newMonth) + d.addingToMonth = newMonth + d.addingToSegment = nil + return d +} + +// MonthOption holds an option that can be set for the entire month +type MonthOption func(m *generation.Data) + +// WithMaximumSegmentIndex sets the maximum segment index for the segments in +// the open month. Set this value in order to set how many indexes the data +// should be split across. This must include any empty or skipped indexes. For +// example, say that you would like all of your data split across indexes 0 and +// 3, with the following empty and skipped indexes: +// +// empty indexes: [2] +// skipped indexes: [1] +// +// To accomplish that, you will need to call WithMaximumSegmentIndex(3). +// This value will be ignored if you have called Segment() for the open month +// If not set, all data will be in 1 segment. +func WithMaximumSegmentIndex(n int) MonthOption { + return func(m *generation.Data) { + m.NumSegments = int32(n) + } +} + +// WithEmptySegmentIndexes sets which segment indexes should be empty for the +// segments in the open month. If you use this option, you must either: +// 1. ensure that you've called Segment() for the open month +// 2. use WithMaximumSegmentIndex() to set the total number of segments +// +// If you haven't set either of those values then this option will be ignored, +// unless you included 0 as an empty segment index in which case only an empty +// segment will be created. +func WithEmptySegmentIndexes(i ...int) MonthOption { + return func(m *generation.Data) { + indexes := make([]int32, 0, len(i)) + for _, index := range i { + indexes = append(indexes, int32(index)) + } + m.EmptySegmentIndexes = indexes + } +} + +// WithSkipSegmentIndexes sets which segment indexes should be skipped for the +// segments in the open month. If you use this option, you must either: +// 1. ensure that you've called Segment() for the open month +// 2. use WithMaximumSegmentIndex() to set the total number of segments +// +// If you haven't set either of those values then this option will be ignored, +// unless you included 0 as a skipped segment index in which case no segments +// will be created. +func WithSkipSegmentIndexes(i ...int) MonthOption { + return func(m *generation.Data) { + indexes := make([]int32, 0, len(i)) + for _, index := range i { + indexes = append(indexes, int32(index)) + } + m.SkipSegmentIndexes = indexes + } +} + +// SetMonthOptions can be called at any time to set options for the open month +func (d *ActivityLogDataGenerator) SetMonthOptions(opts ...MonthOption) *ActivityLogDataGenerator { + for _, opt := range opts { + opt(d.addingToMonth) + } + return d +} + +// ClientOption defines additional options for the client +// This type and the functions that return it are here for ease of use. A user +// could also choose to create the *generation.Client themselves, without using +// a ClientOption +type ClientOption func(client *generation.Client) + +// WithClientNamespace sets the namespace for the client +func WithClientNamespace(n string) ClientOption { + return func(client *generation.Client) { + client.Namespace = n + } +} + +// WithClientMount sets the mount path for the client +func WithClientMount(m string) ClientOption { + return func(client *generation.Client) { + client.Mount = m + } +} + +// WithClientIsNonEntity sets whether the client is an entity client or a non- +// entity token client +func WithClientIsNonEntity() ClientOption { + return WithClientType("non-entity") +} + +// WithClientType sets the client type to the given string. If this client type +// is not "entity", then the client will be counted in the activity log as a +// non-entity client +func WithClientType(typ string) ClientOption { + return func(client *generation.Client) { + client.ClientType = typ + } +} + +// WithClientID sets the ID for the client +func WithClientID(id string) ClientOption { + return func(client *generation.Client) { + client.Id = id + } +} + +// ClientsSeen adds clients to the month that was most recently opened with +// NewPreviousMonthData or NewCurrentMonthData. +func (d *ActivityLogDataGenerator) ClientsSeen(clients ...*generation.Client) *ActivityLogDataGenerator { + if d.addingToSegment == nil { + if d.addingToMonth.Clients == nil { + d.addingToMonth.Clients = &generation.Data_All{All: &generation.Clients{}} + } + d.addingToMonth.GetAll().Clients = append(d.addingToMonth.GetAll().Clients, clients...) + return d + } + d.addingToSegment.Clients.Clients = append(d.addingToSegment.Clients.Clients, clients...) + return d +} + +// NewClientSeen adds 1 new client with the given options to the most recently +// opened month. +func (d *ActivityLogDataGenerator) NewClientSeen(opts ...ClientOption) *ActivityLogDataGenerator { + return d.NewClientsSeen(1, opts...) +} + +// NewClientsSeen adds n new clients with the given options to the most recently +// opened month. +func (d *ActivityLogDataGenerator) NewClientsSeen(n int, opts ...ClientOption) *ActivityLogDataGenerator { + c := new(generation.Client) + for _, opt := range opts { + opt(c) + } + c.Count = int32(n) + return d.ClientsSeen(c) +} + +// RepeatedClientSeen adds 1 client that was seen in the previous month to +// the month that was most recently opened. This client will have the attributes +// described by the provided options. +func (d *ActivityLogDataGenerator) RepeatedClientSeen(opts ...ClientOption) *ActivityLogDataGenerator { + return d.RepeatedClientsSeen(1, opts...) +} + +// RepeatedClientsSeen adds n clients that were seen in the previous month to +// the month that was most recently opened. These clients will have the +// attributes described by provided options. +func (d *ActivityLogDataGenerator) RepeatedClientsSeen(n int, opts ...ClientOption) *ActivityLogDataGenerator { + c := new(generation.Client) + for _, opt := range opts { + opt(c) + } + c.Repeated = true + c.Count = int32(n) + return d.ClientsSeen(c) +} + +// RepeatedClientSeenFromMonthsAgo adds 1 client that was seen in monthsAgo +// month to the month that was most recently opened. This client will have the +// attributes described by provided options. +func (d *ActivityLogDataGenerator) RepeatedClientSeenFromMonthsAgo(monthsAgo int, opts ...ClientOption) *ActivityLogDataGenerator { + return d.RepeatedClientsSeenFromMonthsAgo(1, monthsAgo, opts...) +} + +// RepeatedClientsSeenFromMonthsAgo adds n clients that were seen in monthsAgo +// month to the month that was most recently opened. These clients will have the +// attributes described by provided options. +func (d *ActivityLogDataGenerator) RepeatedClientsSeenFromMonthsAgo(n, monthsAgo int, opts ...ClientOption) *ActivityLogDataGenerator { + c := new(generation.Client) + for _, opt := range opts { + opt(c) + } + c.RepeatedFromMonth = int32(monthsAgo) + c.Count = int32(n) + return d.ClientsSeen(c) +} + +// SegmentOption defines additional options for the segment +type SegmentOption func(segment *generation.Segment) + +// WithSegmentIndex sets the index for the segment to n. If this option is not +// provided, the segment will be given the next consecutive index +func WithSegmentIndex(n int) SegmentOption { + return func(segment *generation.Segment) { + index := int32(n) + segment.SegmentIndex = &index + } +} + +// Segment starts a segment within the current month. All clients will be added +// to this segment, until either Segment is called again to create a new open +// segment, or NewPreviousMonthData or NewCurrentMonthData is called to open a +// new month. +func (d *ActivityLogDataGenerator) Segment(opts ...SegmentOption) *ActivityLogDataGenerator { + s := &generation.Segment{ + Clients: &generation.Clients{}, + } + for _, opt := range opts { + opt(s) + } + if d.addingToMonth.GetSegments() == nil { + d.addingToMonth.Clients = &generation.Data_Segments{Segments: &generation.Segments{}} + } + d.addingToMonth.GetSegments().Segments = append(d.addingToMonth.GetSegments().Segments, s) + d.addingToSegment = s + return d +} + +// ToJSON returns the JSON representation of the data +func (d *ActivityLogDataGenerator) ToJSON(writeOptions ...generation.WriteOptions) ([]byte, error) { + if len(writeOptions) > 0 { + d.data.Write = writeOptions + } + return protojson.Marshal(d.data) +} + +// ToProto returns the ActivityLogMockInput protobuf +func (d *ActivityLogDataGenerator) ToProto() *generation.ActivityLogMockInput { + return d.data +} + +// Write writes the data to the API with the given write options. The method +// returns the new paths that have been written. Note that the API endpoint will +// only be present when Vault has been compiled with the "testonly" flag. +func (d *ActivityLogDataGenerator) Write(ctx context.Context, writeOptions ...generation.WriteOptions) ([]string, error) { + d.data.Write = writeOptions + err := VerifyInput(d.data) + if err != nil { + return nil, err + } + data, err := d.ToJSON() + if err != nil { + return nil, err + } + resp, err := d.client.Logical().WriteWithContext(ctx, "sys/internal/counters/activity/write", map[string]interface{}{"input": string(data)}) + if err != nil { + return nil, err + } + if resp.Data == nil { + return nil, fmt.Errorf("received no data") + } + paths := resp.Data["paths"] + castedPaths, ok := paths.([]interface{}) + if !ok { + return nil, fmt.Errorf("invalid paths data: %v", paths) + } + returnPaths := make([]string, 0, len(castedPaths)) + for _, path := range castedPaths { + returnPaths = append(returnPaths, path.(string)) + } + return returnPaths, nil +} + +// VerifyInput checks that the input data is valid +func VerifyInput(input *generation.ActivityLogMockInput) error { + // mapping from monthsAgo to the month's data + months := make(map[int32]*generation.Data) + + // this keeps track of the index of the earliest month. We need to verify + // that this month doesn't have any repeated clients + earliestMonthsAgo := int32(0) + + // this map holds a set of the month indexes for any RepeatedFromMonth + // values. Each element will be checked to ensure month that should be + // repeated from exists in the input data + repeatedFromMonths := make(map[int32]struct{}) + + for _, month := range input.Data { + monthsAgo := month.GetMonthsAgo() + if monthsAgo > earliestMonthsAgo { + earliestMonthsAgo = monthsAgo + } + + // verify that no monthsAgo value is repeated + if _, seen := months[monthsAgo]; seen { + return fmt.Errorf("multiple months with monthsAgo %d", monthsAgo) + } + months[monthsAgo] = month + + // the number of segments should be correct + if month.NumSegments > 0 && int(month.NumSegments)-len(month.GetSkipSegmentIndexes())-len(month.GetEmptySegmentIndexes()) <= 0 { + return fmt.Errorf("number of segments %d is too small. It must be large enough to include the empty (%v) and skipped (%v) segments", month.NumSegments, month.GetSkipSegmentIndexes(), month.GetEmptySegmentIndexes()) + } + + if segments := month.GetSegments(); segments != nil { + if month.NumSegments > 0 { + return errors.New("cannot specify both number of segments and create segmented data") + } + + segmentIndexes := make(map[int32]struct{}) + for _, segment := range segments.Segments { + + // collect any RepeatedFromMonth values + for _, client := range segment.GetClients().GetClients() { + if repeatFrom := client.RepeatedFromMonth; repeatFrom > 0 { + repeatedFromMonths[repeatFrom] = struct{}{} + } + } + + // verify that no segment indexes are repeated + segmentIndex := segment.SegmentIndex + if segmentIndex == nil { + continue + } + if _, seen := segmentIndexes[*segmentIndex]; seen { + return fmt.Errorf("cannot have repeated segment index %d", *segmentIndex) + } + segmentIndexes[*segmentIndex] = struct{}{} + } + } else { + for _, client := range month.GetAll().GetClients() { + // collect any RepeatedFromMonth values + if repeatFrom := client.RepeatedFromMonth; repeatFrom > 0 { + repeatedFromMonths[repeatFrom] = struct{}{} + } + } + } + } + + // check that the corresponding month exists for all the RepeatedFromMonth + // values + for repeated := range repeatedFromMonths { + if _, ok := months[repeated]; !ok { + return fmt.Errorf("cannot repeat from %d months ago", repeated) + } + } + // the earliest month can't have any repeated clients, because there are no + // earlier months to repeat from + earliestMonth := months[earliestMonthsAgo] + repeatedClients := false + if all := earliestMonth.GetAll(); all != nil { + for _, client := range all.GetClients() { + repeatedClients = repeatedClients || client.Repeated || client.RepeatedFromMonth != 0 + } + } else { + for _, segment := range earliestMonth.GetSegments().GetSegments() { + for _, client := range segment.GetClients().GetClients() { + repeatedClients = repeatedClients || client.Repeated || client.RepeatedFromMonth != 0 + } + } + } + + if repeatedClients { + return fmt.Errorf("%d months ago cannot have repeated clients, because it is the earliest month", earliestMonthsAgo) + } + + return nil +} diff --git a/sdk/helper/clientcountutil/clientcountutil_test.go b/sdk/helper/clientcountutil/clientcountutil_test.go new file mode 100644 index 000000000000..6a5b224bc675 --- /dev/null +++ b/sdk/helper/clientcountutil/clientcountutil_test.go @@ -0,0 +1,279 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package clientcountutil + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/clientcountutil/generation" + "github.com/stretchr/testify/require" +) + +// TestNewCurrentMonthData verifies that current month is set correctly and that +// there are no open segments +func TestNewCurrentMonthData(t *testing.T) { + generator := NewActivityLogData(nil).NewCurrentMonthData() + require.True(t, generator.data.Data[0].GetCurrentMonth()) + require.True(t, generator.addingToMonth.GetCurrentMonth()) + require.Nil(t, generator.addingToSegment) +} + +// TestNewMonthDataMonthsAgo verifies that months ago is set correctly and that +// there are no open segments +func TestNewMonthDataMonthsAgo(t *testing.T) { + generator := NewActivityLogData(nil).NewPreviousMonthData(3) + require.Equal(t, int32(3), generator.data.Data[0].GetMonthsAgo()) + require.Equal(t, int32(3), generator.addingToMonth.GetMonthsAgo()) + require.Nil(t, generator.addingToSegment) +} + +// TestNewMonthData_MultipleMonths opens a month 3 months ago then 2 months ago. +// The test verifies that the generator is set to add to the correct month. We +// then open a current month, and verify that the generator will add to the +// current month. +func TestNewMonthData_MultipleMonths(t *testing.T) { + generator := NewActivityLogData(nil).NewPreviousMonthData(3).NewPreviousMonthData(2) + require.Equal(t, int32(2), generator.data.Data[1].GetMonthsAgo()) + require.Equal(t, int32(2), generator.addingToMonth.GetMonthsAgo()) + generator = generator.NewCurrentMonthData() + require.True(t, generator.data.Data[2].GetCurrentMonth()) + require.True(t, generator.addingToMonth.GetCurrentMonth()) +} + +// TestNewCurrentMonthData_ClientsSeen calls ClientsSeen with 3 clients, and +// verifies that they are added to the input data +func TestNewCurrentMonthData_ClientsSeen(t *testing.T) { + wantClients := []*generation.Client{ + { + Id: "1", + Namespace: "ns", + Mount: "mount", + ClientType: "non-entity", + }, + { + Id: "2", + }, + { + Id: "3", + Count: int32(3), + }, + } + generator := NewActivityLogData(nil).NewCurrentMonthData().ClientsSeen(wantClients...) + require.Equal(t, generator.data.Data[0].GetAll().Clients, wantClients) + require.True(t, generator.data.Data[0].GetCurrentMonth()) +} + +// TestSegment_AddClients adds clients in a variety of ways to an open segment +// and verifies that the clients are present in the segment with the correct +// options +func TestSegment_AddClients(t *testing.T) { + testAddClients(t, func() *ActivityLogDataGenerator { + return NewActivityLogData(nil).NewCurrentMonthData().Segment() + }, func(g *ActivityLogDataGenerator) *generation.Client { + return g.data.Data[0].GetSegments().Segments[0].Clients.Clients[0] + }) +} + +// TestSegment_MultipleSegments opens a current month and adds a client to an +// un-indexed segment, then opens an indexed segment and adds a client. The test +// verifies that clients are present in both segments, and that the segment +// index is correctly recorded +func TestSegment_MultipleSegments(t *testing.T) { + generator := NewActivityLogData(nil).NewCurrentMonthData().Segment().NewClientSeen().Segment(WithSegmentIndex(2)).NewClientSeen() + require.Len(t, generator.data.Data[0].GetSegments().Segments[0].Clients.Clients, 1) + require.Len(t, generator.data.Data[0].GetSegments().Segments[1].Clients.Clients, 1) + require.Equal(t, int32(2), *generator.data.Data[0].GetSegments().Segments[1].SegmentIndex) + require.Equal(t, int32(2), *generator.addingToSegment.SegmentIndex) +} + +// TestSegment_NewMonth adds a client to a segment, then starts a new month. The +// test verifies that there are no open segments +func TestSegment_NewMonth(t *testing.T) { + generator := NewActivityLogData(nil).NewCurrentMonthData().Segment().NewClientSeen().NewPreviousMonthData(1) + require.Nil(t, generator.addingToSegment) +} + +// TestNewCurrentMonthData_AddClients adds clients in a variety of ways to an +// the current month and verifies that the clients are present in the month with +// the correct options +func TestNewCurrentMonthData_AddClients(t *testing.T) { + testAddClients(t, func() *ActivityLogDataGenerator { + return NewActivityLogData(nil).NewCurrentMonthData() + }, func(g *ActivityLogDataGenerator) *generation.Client { + return g.data.Data[0].GetAll().Clients[0] + }) +} + +// TestWrite creates a mock http server and writes generated data to it. The +// test verifies that the returned paths are parsed correctly, and that the JSON +// sent to the server is correct. +func TestWrite(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := io.WriteString(w, `{"data":{"paths":["path1","path2"]}}`) + require.NoError(t, err) + body, err := io.ReadAll(r.Body) + require.NoError(t, err) + raw := map[string]string{} + err = json.Unmarshal(body, &raw) + require.NoError(t, err) + require.JSONEq(t, `{"write":["WRITE_ENTITIES"],"data":[{"monthsAgo":3,"all":{"clients":[{"count":1}]}},{"monthsAgo":2,"segments":{"segments":[{"segmentIndex":2,"clients":{"clients":[{"count":1,"repeated":true}]}}]}},{"currentMonth":true}]}`, raw["input"]) + })) + defer ts.Close() + + client, err := api.NewClient(&api.Config{ + Address: ts.URL, + }) + require.NoError(t, err) + paths, err := NewActivityLogData(client). + NewPreviousMonthData(3). + NewClientSeen(). + NewPreviousMonthData(2). + Segment(WithSegmentIndex(2)). + RepeatedClientSeen(). + NewCurrentMonthData().Write(context.Background(), generation.WriteOptions_WRITE_ENTITIES) + + require.NoError(t, err) + require.Equal(t, []string{"path1", "path2"}, paths) +} + +func testAddClients(t *testing.T, makeGenerator func() *ActivityLogDataGenerator, getClient func(data *ActivityLogDataGenerator) *generation.Client) { + t.Helper() + clientOptions := []ClientOption{ + WithClientNamespace("ns"), WithClientMount("mount"), WithClientIsNonEntity(), WithClientID("1"), + } + generator := makeGenerator().NewClientSeen(clientOptions...) + require.Equal(t, getClient(generator), &generation.Client{ + Id: "1", + Count: 1, + Namespace: "ns", + Mount: "mount", + ClientType: "non-entity", + }) + + generator = makeGenerator().NewClientsSeen(4, clientOptions...) + require.Equal(t, getClient(generator), &generation.Client{ + Id: "1", + Count: 4, + Namespace: "ns", + Mount: "mount", + ClientType: "non-entity", + }) + + generator = makeGenerator().RepeatedClientSeen(clientOptions...) + require.Equal(t, getClient(generator), &generation.Client{ + Id: "1", + Count: 1, + Repeated: true, + Namespace: "ns", + Mount: "mount", + ClientType: "non-entity", + }) + + generator = makeGenerator().RepeatedClientsSeen(4, clientOptions...) + require.Equal(t, getClient(generator), &generation.Client{ + Id: "1", + Count: 4, + Repeated: true, + Namespace: "ns", + Mount: "mount", + ClientType: "non-entity", + }) + + generator = makeGenerator().RepeatedClientSeenFromMonthsAgo(3, clientOptions...) + require.Equal(t, getClient(generator), &generation.Client{ + Id: "1", + Count: 1, + RepeatedFromMonth: 3, + Namespace: "ns", + Mount: "mount", + ClientType: "non-entity", + }) + + generator = makeGenerator().RepeatedClientsSeenFromMonthsAgo(4, 3, clientOptions...) + require.Equal(t, getClient(generator), &generation.Client{ + Id: "1", + Count: 4, + RepeatedFromMonth: 3, + Namespace: "ns", + Mount: "mount", + ClientType: "non-entity", + }) +} + +// TestSetMonthOptions sets month options and verifies that they are saved +func TestSetMonthOptions(t *testing.T) { + generator := NewActivityLogData(nil).NewCurrentMonthData().SetMonthOptions(WithEmptySegmentIndexes(3, 4), + WithMaximumSegmentIndex(7), WithSkipSegmentIndexes(1, 2)) + require.Equal(t, int32(7), generator.data.Data[0].NumSegments) + require.Equal(t, []int32{3, 4}, generator.data.Data[0].EmptySegmentIndexes) + require.Equal(t, []int32{1, 2}, generator.data.Data[0].SkipSegmentIndexes) +} + +// TestVerifyInput constructs invalid inputs and ensures that VerifyInput +// returns an error +func TestVerifyInput(t *testing.T) { + cases := []struct { + name string + generator *ActivityLogDataGenerator + }{ + { + name: "repeated client with only 1 month", + generator: NewActivityLogData(nil). + NewCurrentMonthData(). + RepeatedClientSeen(), + }, + { + name: "repeated client with segment", + generator: NewActivityLogData(nil). + NewCurrentMonthData(). + Segment(). + RepeatedClientSeen(), + }, + { + name: "repeated client with earliest month", + generator: NewActivityLogData(nil). + NewCurrentMonthData(). + NewClientSeen(). + NewPreviousMonthData(2). + RepeatedClientSeen(), + }, + { + name: "repeated month", + generator: NewActivityLogData(nil). + NewPreviousMonthData(1). + NewPreviousMonthData(1), + }, + { + name: "repeated current month", + generator: NewActivityLogData(nil). + NewCurrentMonthData(). + NewCurrentMonthData(), + }, + { + name: "repeated segment index", + generator: NewActivityLogData(nil). + NewCurrentMonthData(). + Segment(WithSegmentIndex(1)). + Segment(WithSegmentIndex(1)), + }, + { + name: "segment with num segments", + generator: NewActivityLogData(nil). + NewCurrentMonthData(). + Segment(). + SetMonthOptions(WithMaximumSegmentIndex(1)), + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + require.Error(t, VerifyInput(tc.generator.data)) + }) + } +} diff --git a/sdk/helper/clientcountutil/generation/generate_data.pb.go b/sdk/helper/clientcountutil/generation/generate_data.pb.go new file mode 100644 index 000000000000..29267282e90f --- /dev/null +++ b/sdk/helper/clientcountutil/generation/generate_data.pb.go @@ -0,0 +1,753 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc (unknown) +// source: sdk/helper/clientcountutil/generation/generate_data.proto + +package generation + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type WriteOptions int32 + +const ( + WriteOptions_WRITE_UNKNOWN WriteOptions = 0 + WriteOptions_WRITE_PRECOMPUTED_QUERIES WriteOptions = 1 + WriteOptions_WRITE_DISTINCT_CLIENTS WriteOptions = 2 + WriteOptions_WRITE_ENTITIES WriteOptions = 3 + WriteOptions_WRITE_DIRECT_TOKENS WriteOptions = 4 + WriteOptions_WRITE_INTENT_LOGS WriteOptions = 5 +) + +// Enum value maps for WriteOptions. +var ( + WriteOptions_name = map[int32]string{ + 0: "WRITE_UNKNOWN", + 1: "WRITE_PRECOMPUTED_QUERIES", + 2: "WRITE_DISTINCT_CLIENTS", + 3: "WRITE_ENTITIES", + 4: "WRITE_DIRECT_TOKENS", + 5: "WRITE_INTENT_LOGS", + } + WriteOptions_value = map[string]int32{ + "WRITE_UNKNOWN": 0, + "WRITE_PRECOMPUTED_QUERIES": 1, + "WRITE_DISTINCT_CLIENTS": 2, + "WRITE_ENTITIES": 3, + "WRITE_DIRECT_TOKENS": 4, + "WRITE_INTENT_LOGS": 5, + } +) + +func (x WriteOptions) Enum() *WriteOptions { + p := new(WriteOptions) + *p = x + return p +} + +func (x WriteOptions) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (WriteOptions) Descriptor() protoreflect.EnumDescriptor { + return file_sdk_helper_clientcountutil_generation_generate_data_proto_enumTypes[0].Descriptor() +} + +func (WriteOptions) Type() protoreflect.EnumType { + return &file_sdk_helper_clientcountutil_generation_generate_data_proto_enumTypes[0] +} + +func (x WriteOptions) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use WriteOptions.Descriptor instead. +func (WriteOptions) EnumDescriptor() ([]byte, []int) { + return file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescGZIP(), []int{0} +} + +type ActivityLogMockInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Write []WriteOptions `protobuf:"varint,1,rep,packed,name=write,proto3,enum=generation.WriteOptions" json:"write,omitempty"` + Data []*Data `protobuf:"bytes,2,rep,name=data,proto3" json:"data,omitempty"` +} + +func (x *ActivityLogMockInput) Reset() { + *x = ActivityLogMockInput{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ActivityLogMockInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityLogMockInput) ProtoMessage() {} + +func (x *ActivityLogMockInput) ProtoReflect() protoreflect.Message { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityLogMockInput.ProtoReflect.Descriptor instead. +func (*ActivityLogMockInput) Descriptor() ([]byte, []int) { + return file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescGZIP(), []int{0} +} + +func (x *ActivityLogMockInput) GetWrite() []WriteOptions { + if x != nil { + return x.Write + } + return nil +} + +func (x *ActivityLogMockInput) GetData() []*Data { + if x != nil { + return x.Data + } + return nil +} + +type Data struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Month: + // + // *Data_CurrentMonth + // *Data_MonthsAgo + Month isData_Month `protobuf_oneof:"month"` + // Types that are assignable to Clients: + // + // *Data_All + // *Data_Segments + Clients isData_Clients `protobuf_oneof:"clients"` + EmptySegmentIndexes []int32 `protobuf:"varint,5,rep,packed,name=empty_segment_indexes,json=emptySegmentIndexes,proto3" json:"empty_segment_indexes,omitempty"` + SkipSegmentIndexes []int32 `protobuf:"varint,6,rep,packed,name=skip_segment_indexes,json=skipSegmentIndexes,proto3" json:"skip_segment_indexes,omitempty"` + NumSegments int32 `protobuf:"varint,7,opt,name=num_segments,json=numSegments,proto3" json:"num_segments,omitempty"` +} + +func (x *Data) Reset() { + *x = Data{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Data) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Data) ProtoMessage() {} + +func (x *Data) ProtoReflect() protoreflect.Message { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Data.ProtoReflect.Descriptor instead. +func (*Data) Descriptor() ([]byte, []int) { + return file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescGZIP(), []int{1} +} + +func (m *Data) GetMonth() isData_Month { + if m != nil { + return m.Month + } + return nil +} + +func (x *Data) GetCurrentMonth() bool { + if x, ok := x.GetMonth().(*Data_CurrentMonth); ok { + return x.CurrentMonth + } + return false +} + +func (x *Data) GetMonthsAgo() int32 { + if x, ok := x.GetMonth().(*Data_MonthsAgo); ok { + return x.MonthsAgo + } + return 0 +} + +func (m *Data) GetClients() isData_Clients { + if m != nil { + return m.Clients + } + return nil +} + +func (x *Data) GetAll() *Clients { + if x, ok := x.GetClients().(*Data_All); ok { + return x.All + } + return nil +} + +func (x *Data) GetSegments() *Segments { + if x, ok := x.GetClients().(*Data_Segments); ok { + return x.Segments + } + return nil +} + +func (x *Data) GetEmptySegmentIndexes() []int32 { + if x != nil { + return x.EmptySegmentIndexes + } + return nil +} + +func (x *Data) GetSkipSegmentIndexes() []int32 { + if x != nil { + return x.SkipSegmentIndexes + } + return nil +} + +func (x *Data) GetNumSegments() int32 { + if x != nil { + return x.NumSegments + } + return 0 +} + +type isData_Month interface { + isData_Month() +} + +type Data_CurrentMonth struct { + CurrentMonth bool `protobuf:"varint,1,opt,name=current_month,json=currentMonth,proto3,oneof"` +} + +type Data_MonthsAgo struct { + MonthsAgo int32 `protobuf:"varint,2,opt,name=months_ago,json=monthsAgo,proto3,oneof"` +} + +func (*Data_CurrentMonth) isData_Month() {} + +func (*Data_MonthsAgo) isData_Month() {} + +type isData_Clients interface { + isData_Clients() +} + +type Data_All struct { + All *Clients `protobuf:"bytes,3,opt,name=all,proto3,oneof"` // you can’t have repeated fields in a oneof, which is why these are separate message types +} + +type Data_Segments struct { + Segments *Segments `protobuf:"bytes,4,opt,name=segments,proto3,oneof"` +} + +func (*Data_All) isData_Clients() {} + +func (*Data_Segments) isData_Clients() {} + +type Segments struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Segments []*Segment `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty"` +} + +func (x *Segments) Reset() { + *x = Segments{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Segments) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Segments) ProtoMessage() {} + +func (x *Segments) ProtoReflect() protoreflect.Message { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Segments.ProtoReflect.Descriptor instead. +func (*Segments) Descriptor() ([]byte, []int) { + return file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescGZIP(), []int{2} +} + +func (x *Segments) GetSegments() []*Segment { + if x != nil { + return x.Segments + } + return nil +} + +type Segment struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SegmentIndex *int32 `protobuf:"varint,1,opt,name=segment_index,json=segmentIndex,proto3,oneof" json:"segment_index,omitempty"` + Clients *Clients `protobuf:"bytes,2,opt,name=clients,proto3" json:"clients,omitempty"` +} + +func (x *Segment) Reset() { + *x = Segment{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Segment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Segment) ProtoMessage() {} + +func (x *Segment) ProtoReflect() protoreflect.Message { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Segment.ProtoReflect.Descriptor instead. +func (*Segment) Descriptor() ([]byte, []int) { + return file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescGZIP(), []int{3} +} + +func (x *Segment) GetSegmentIndex() int32 { + if x != nil && x.SegmentIndex != nil { + return *x.SegmentIndex + } + return 0 +} + +func (x *Segment) GetClients() *Clients { + if x != nil { + return x.Clients + } + return nil +} + +type Clients struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Clients []*Client `protobuf:"bytes,1,rep,name=clients,proto3" json:"clients,omitempty"` +} + +func (x *Clients) Reset() { + *x = Clients{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Clients) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Clients) ProtoMessage() {} + +func (x *Clients) ProtoReflect() protoreflect.Message { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Clients.ProtoReflect.Descriptor instead. +func (*Clients) Descriptor() ([]byte, []int) { + return file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescGZIP(), []int{4} +} + +func (x *Clients) GetClients() []*Client { + if x != nil { + return x.Clients + } + return nil +} + +type Client struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Count int32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` + Repeated bool `protobuf:"varint,3,opt,name=repeated,proto3" json:"repeated,omitempty"` + RepeatedFromMonth int32 `protobuf:"varint,4,opt,name=repeated_from_month,json=repeatedFromMonth,proto3" json:"repeated_from_month,omitempty"` + Namespace string `protobuf:"bytes,5,opt,name=namespace,proto3" json:"namespace,omitempty"` + Mount string `protobuf:"bytes,6,opt,name=mount,proto3" json:"mount,omitempty"` + ClientType string `protobuf:"bytes,7,opt,name=client_type,json=clientType,proto3" json:"client_type,omitempty"` +} + +func (x *Client) Reset() { + *x = Client{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Client) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Client) ProtoMessage() {} + +func (x *Client) ProtoReflect() protoreflect.Message { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Client.ProtoReflect.Descriptor instead. +func (*Client) Descriptor() ([]byte, []int) { + return file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescGZIP(), []int{5} +} + +func (x *Client) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *Client) GetCount() int32 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *Client) GetRepeated() bool { + if x != nil { + return x.Repeated + } + return false +} + +func (x *Client) GetRepeatedFromMonth() int32 { + if x != nil { + return x.RepeatedFromMonth + } + return 0 +} + +func (x *Client) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *Client) GetMount() string { + if x != nil { + return x.Mount + } + return "" +} + +func (x *Client) GetClientType() string { + if x != nil { + return x.ClientType + } + return "" +} + +var File_sdk_helper_clientcountutil_generation_generate_data_proto protoreflect.FileDescriptor + +var file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x73, 0x64, 0x6b, 0x2f, 0x68, 0x65, 0x6c, 0x70, 0x65, 0x72, 0x2f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x75, 0x74, 0x69, 0x6c, 0x2f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6c, 0x0a, 0x14, 0x41, 0x63, 0x74, 0x69, 0x76, + 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x67, 0x4d, 0x6f, 0x63, 0x6b, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, + 0x2e, 0x0a, 0x05, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x18, + 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x05, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, + 0x24, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x52, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xc8, 0x02, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x12, 0x25, + 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x6e, 0x74, 0x68, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x4d, 0x6f, 0x6e, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0a, 0x6d, 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x5f, + 0x61, 0x67, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x6f, 0x6e, + 0x74, 0x68, 0x73, 0x41, 0x67, 0x6f, 0x12, 0x27, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x48, 0x01, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x12, + 0x32, 0x0a, 0x08, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, + 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x48, 0x01, 0x52, 0x08, 0x73, 0x65, 0x67, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x73, 0x65, 0x67, + 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x05, 0x52, 0x13, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x73, 0x6b, 0x69, 0x70, 0x5f, + 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x05, 0x52, 0x12, 0x73, 0x6b, 0x69, 0x70, 0x53, 0x65, 0x67, 0x6d, 0x65, + 0x6e, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, + 0x5f, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0b, 0x6e, 0x75, 0x6d, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x42, 0x07, 0x0a, 0x05, + 0x6d, 0x6f, 0x6e, 0x74, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, + 0x22, 0x3b, 0x0a, 0x08, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2f, 0x0a, 0x08, + 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x67, 0x6d, + 0x65, 0x6e, 0x74, 0x52, 0x08, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x74, 0x0a, + 0x07, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x65, 0x67, 0x6d, + 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, + 0x00, 0x52, 0x0c, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x88, + 0x01, 0x01, 0x12, 0x2d, 0x0a, 0x07, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x07, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x73, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x22, 0x37, 0x0a, 0x07, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2c, + 0x0a, 0x07, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x52, 0x07, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xcf, 0x01, 0x0a, + 0x06, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x70, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6d, 0x6f, 0x6e, 0x74, 0x68, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x46, 0x72, 0x6f, 0x6d, 0x4d, 0x6f, 0x6e, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1f, 0x0a, + 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x2a, 0xa0, + 0x01, 0x0a, 0x0c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x11, 0x0a, 0x0d, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x50, 0x52, 0x45, 0x43, + 0x4f, 0x4d, 0x50, 0x55, 0x54, 0x45, 0x44, 0x5f, 0x51, 0x55, 0x45, 0x52, 0x49, 0x45, 0x53, 0x10, + 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x44, 0x49, 0x53, 0x54, 0x49, + 0x4e, 0x43, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x53, 0x10, 0x02, 0x12, 0x12, 0x0a, + 0x0e, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x4e, 0x54, 0x49, 0x54, 0x49, 0x45, 0x53, 0x10, + 0x03, 0x12, 0x17, 0x0a, 0x13, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, + 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x53, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x57, 0x52, + 0x49, 0x54, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x4f, 0x47, 0x53, 0x10, + 0x05, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, + 0x73, 0x64, 0x6b, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x75, + 0x74, 0x69, 0x6c, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescOnce sync.Once + file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescData = file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDesc +) + +func file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescGZIP() []byte { + file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescOnce.Do(func() { + file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescData) + }) + return file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescData +} + +var file_sdk_helper_clientcountutil_generation_generate_data_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_sdk_helper_clientcountutil_generation_generate_data_proto_goTypes = []any{ + (WriteOptions)(0), // 0: generation.WriteOptions + (*ActivityLogMockInput)(nil), // 1: generation.ActivityLogMockInput + (*Data)(nil), // 2: generation.Data + (*Segments)(nil), // 3: generation.Segments + (*Segment)(nil), // 4: generation.Segment + (*Clients)(nil), // 5: generation.Clients + (*Client)(nil), // 6: generation.Client +} +var file_sdk_helper_clientcountutil_generation_generate_data_proto_depIdxs = []int32{ + 0, // 0: generation.ActivityLogMockInput.write:type_name -> generation.WriteOptions + 2, // 1: generation.ActivityLogMockInput.data:type_name -> generation.Data + 5, // 2: generation.Data.all:type_name -> generation.Clients + 3, // 3: generation.Data.segments:type_name -> generation.Segments + 4, // 4: generation.Segments.segments:type_name -> generation.Segment + 5, // 5: generation.Segment.clients:type_name -> generation.Clients + 6, // 6: generation.Clients.clients:type_name -> generation.Client + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_sdk_helper_clientcountutil_generation_generate_data_proto_init() } +func file_sdk_helper_clientcountutil_generation_generate_data_proto_init() { + if File_sdk_helper_clientcountutil_generation_generate_data_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ActivityLogMockInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*Data); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*Segments); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*Segment); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*Clients); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*Client); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[1].OneofWrappers = []any{ + (*Data_CurrentMonth)(nil), + (*Data_MonthsAgo)(nil), + (*Data_All)(nil), + (*Data_Segments)(nil), + } + file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[3].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDesc, + NumEnums: 1, + NumMessages: 6, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sdk_helper_clientcountutil_generation_generate_data_proto_goTypes, + DependencyIndexes: file_sdk_helper_clientcountutil_generation_generate_data_proto_depIdxs, + EnumInfos: file_sdk_helper_clientcountutil_generation_generate_data_proto_enumTypes, + MessageInfos: file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes, + }.Build() + File_sdk_helper_clientcountutil_generation_generate_data_proto = out.File + file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDesc = nil + file_sdk_helper_clientcountutil_generation_generate_data_proto_goTypes = nil + file_sdk_helper_clientcountutil_generation_generate_data_proto_depIdxs = nil +} diff --git a/sdk/helper/clientcountutil/generation/generate_data.proto b/sdk/helper/clientcountutil/generation/generate_data.proto new file mode 100644 index 000000000000..0a48b7bcafe7 --- /dev/null +++ b/sdk/helper/clientcountutil/generation/generate_data.proto @@ -0,0 +1,56 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; + +package generation; + +option go_package = "github.com/hashicorp/vault/sdk/clientcountutil/generation"; +enum WriteOptions { + WRITE_UNKNOWN = 0; + WRITE_PRECOMPUTED_QUERIES = 1; + WRITE_DISTINCT_CLIENTS = 2; + WRITE_ENTITIES = 3; + WRITE_DIRECT_TOKENS = 4; + WRITE_INTENT_LOGS = 5; +} +message ActivityLogMockInput { + repeated WriteOptions write = 1; + repeated Data data = 2; +} +message Data { + oneof month { + bool current_month = 1; + int32 months_ago = 2; + } + oneof clients { + Clients all = 3; // you can’t have repeated fields in a oneof, which is why these are separate message types + Segments segments = 4; + } + repeated int32 empty_segment_indexes = 5; + repeated int32 skip_segment_indexes = 6; + int32 num_segments = 7; +} + +message Segments { + repeated Segment segments = 1; +} + +message Segment { + optional int32 segment_index = 1; + Clients clients = 2; +} + +message Clients { + repeated Client clients = 1; +} + +message Client { + string id = 1; + int32 count = 2; + bool repeated = 3; + int32 repeated_from_month = 4; + string namespace = 5; + string mount = 6; + string client_type = 7; +} diff --git a/sdk/helper/compressutil/compress.go b/sdk/helper/compressutil/compress.go index 9e96d8dd32ec..2e096f1509ce 100644 --- a/sdk/helper/compressutil/compress.go +++ b/sdk/helper/compressutil/compress.go @@ -11,7 +11,6 @@ import ( "io" "github.com/golang/snappy" - "github.com/hashicorp/errwrap" "github.com/pierrec/lz4" ) @@ -34,7 +33,7 @@ const ( CompressionCanaryLZ4 byte = '4' ) -// SnappyReadCloser embeds the snappy reader which implements the io.Reader +// CompressUtilReadCloser embeds the snappy reader which implements the io.Reader // interface. The decompress procedure in this utility expects an // io.ReadCloser. This type implements the io.Closer interface to retain the // generic way of decompression. @@ -98,7 +97,7 @@ func Compress(data []byte, config *CompressionConfig) ([]byte, error) { // These are valid compression levels default: // If compression level is set to NoCompression or to - // any invalid value, fallback to Defaultcompression + // any invalid value, fallback to DefaultCompression config.GzipCompressionLevel = gzip.DefaultCompression } writer, err = gzip.NewWriterLevel(&buf, config.GzipCompressionLevel) @@ -116,7 +115,7 @@ func Compress(data []byte, config *CompressionConfig) ([]byte, error) { } if err != nil { - return nil, errwrap.Wrapf("failed to create a compression writer: {{err}}", err) + return nil, fmt.Errorf("failed to create a compression writer: %w", err) } if writer == nil { @@ -126,7 +125,7 @@ func Compress(data []byte, config *CompressionConfig) ([]byte, error) { // Compress the input and place it in the same buffer containing the // canary byte. if _, err = writer.Write(data); err != nil { - return nil, errwrap.Wrapf("failed to compress input data: err: {{err}}", err) + return nil, fmt.Errorf("failed to compress input data: err: %w", err) } // Close the io.WriteCloser @@ -206,7 +205,7 @@ func DecompressWithCanary(data []byte) ([]byte, string, bool, error) { return nil, "", true, nil } if err != nil { - return nil, "", false, errwrap.Wrapf("failed to create a compression reader: {{err}}", err) + return nil, "", false, fmt.Errorf("failed to create a compression reader: %w", err) } if reader == nil { return nil, "", false, fmt.Errorf("failed to create a compression reader") @@ -217,8 +216,18 @@ func DecompressWithCanary(data []byte) ([]byte, string, bool, error) { // Read all the compressed data into a buffer var buf bytes.Buffer - if _, err = io.Copy(&buf, reader); err != nil { - return nil, "", false, err + + // Read the compressed data into a buffer, but do so + // slowly to prevent reading all the data into memory + // at once (protecting against e.g. zip bombs). + for { + _, err := io.CopyN(&buf, reader, 1024) + if err != nil { + if err == io.EOF { + break + } + return nil, "", false, err + } } return buf.Bytes(), compressionType, false, nil diff --git a/sdk/helper/compressutil/compress_test.go b/sdk/helper/compressutil/compress_test.go index 7d90ce87e982..28117d8c2258 100644 --- a/sdk/helper/compressutil/compress_test.go +++ b/sdk/helper/compressutil/compress_test.go @@ -116,3 +116,40 @@ func TestCompressUtil_InvalidConfigurations(t *testing.T) { t.Fatal("expected an error") } } + +// TestDecompressWithCanaryLargeInput tests that DecompressWithCanary works +// as expected even with large values. +func TestDecompressWithCanaryLargeInput(t *testing.T) { + t.Parallel() + + inputJSON := `{"sample":"data` + for i := 0; i < 100000; i++ { + inputJSON += " and data" + } + inputJSON += `"}` + inputJSONBytes := []byte(inputJSON) + + compressedJSONBytes, err := Compress(inputJSONBytes, &CompressionConfig{Type: CompressionTypeGzip, GzipCompressionLevel: gzip.BestCompression}) + if err != nil { + t.Fatal(err) + } + + decompressedJSONBytes, wasNotCompressed, err := Decompress(compressedJSONBytes) + if err != nil { + t.Fatal(err) + } + + // Check if the input for decompress was not compressed in the first place + if wasNotCompressed { + t.Fatalf("bytes were not compressed as expected") + } + + if len(decompressedJSONBytes) == 0 { + t.Fatalf("bytes were not compressed as expected") + } + + // Compare the value after decompression + if !bytes.Equal(inputJSONBytes, decompressedJSONBytes) { + t.Fatalf("decompressed value differs: decompressed value;\nexpected: %q\nactual: %q", string(inputJSONBytes), string(decompressedJSONBytes)) + } +} diff --git a/sdk/helper/consts/agent.go b/sdk/helper/consts/agent.go index 53b8b8e2e76e..ff2b043749f2 100644 --- a/sdk/helper/consts/agent.go +++ b/sdk/helper/consts/agent.go @@ -3,6 +3,8 @@ package consts +import "time" + // AgentPathCacheClear is the path that the agent will use as its cache-clear // endpoint. const AgentPathCacheClear = "/agent/v1/cache-clear" @@ -13,3 +15,9 @@ const AgentPathMetrics = "/agent/v1/metrics" // AgentPathQuit is the path that the agent will use to trigger stopping it. const AgentPathQuit = "/agent/v1/quit" + +// DefaultMinBackoff is the default minimum backoff time for agent and proxy +const DefaultMinBackoff = 1 * time.Second + +// DefaultMaxBackoff is the default max backoff time for agent and proxy +const DefaultMaxBackoff = 5 * time.Minute diff --git a/sdk/helper/consts/consts.go b/sdk/helper/consts/consts.go index b51191050748..ccc7494a281c 100644 --- a/sdk/helper/consts/consts.go +++ b/sdk/helper/consts/consts.go @@ -19,6 +19,10 @@ const ( // SSRF protection. RequestHeaderName = "X-Vault-Request" + // WrapTTLHeaderName is the name of the header containing a directive to + // wrap the response + WrapTTLHeaderName = "X-Vault-Wrap-TTL" + // PerformanceReplicationALPN is the negotiated protocol used for // performance replication. PerformanceReplicationALPN = "replication_v1" @@ -39,4 +43,8 @@ const ( VaultEnableFilePermissionsCheckEnv = "VAULT_ENABLE_FILE_PERMISSIONS_CHECK" VaultDisableUserLockout = "VAULT_DISABLE_USER_LOCKOUT" + + PerformanceReplicationPathTarget = "performance" + + DRReplicationPathTarget = "dr" ) diff --git a/sdk/helper/consts/error.go b/sdk/helper/consts/error.go index 5bd3f5e6e261..c7e2b51f4e01 100644 --- a/sdk/helper/consts/error.go +++ b/sdk/helper/consts/error.go @@ -25,4 +25,7 @@ var ( // ErrInvalidWrappingToken is returned when checking for the validity of // a wrapping token that turns out to be invalid. ErrInvalidWrappingToken = errors.New("wrapping token is not valid or does not exist") + + // ErrOverloaded indicates the Vault server is at capacity. + ErrOverloaded = errors.New("overloaded, try again later") ) diff --git a/sdk/helper/consts/plugin_runtime_types.go b/sdk/helper/consts/plugin_runtime_types.go new file mode 100644 index 000000000000..1b7714d0b11f --- /dev/null +++ b/sdk/helper/consts/plugin_runtime_types.go @@ -0,0 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consts + +// NOTE: this file has been copied to +// https://github.com/hashicorp/vault/blob/main/api/plugin_runtime_types.go +// Any changes made should be made to both files at the same time. + +import "fmt" + +var PluginRuntimeTypes = _PluginRuntimeTypeValues + +//go:generate enumer -type=PluginRuntimeType -trimprefix=PluginRuntimeType -transform=snake +type PluginRuntimeType uint32 + +// This is a list of PluginRuntimeTypes used by Vault. +const ( + DefaultContainerPluginOCIRuntime = "runsc" + + PluginRuntimeTypeUnsupported PluginRuntimeType = iota + PluginRuntimeTypeContainer +) + +// ParsePluginRuntimeType is a wrapper around PluginRuntimeTypeString kept for backwards compatibility. +func ParsePluginRuntimeType(PluginRuntimeType string) (PluginRuntimeType, error) { + t, err := PluginRuntimeTypeString(PluginRuntimeType) + if err != nil { + return PluginRuntimeTypeUnsupported, fmt.Errorf("%q is not a supported plugin runtime type", PluginRuntimeType) + } + return t, nil +} diff --git a/sdk/helper/consts/plugin_types.go b/sdk/helper/consts/plugin_types.go index 6bc14b54f716..a7a383827312 100644 --- a/sdk/helper/consts/plugin_types.go +++ b/sdk/helper/consts/plugin_types.go @@ -7,7 +7,10 @@ package consts // https://github.com/hashicorp/vault/blob/main/api/plugin_types.go // Any changes made should be made to both files at the same time. -import "fmt" +import ( + "encoding/json" + "fmt" +) var PluginTypes = []PluginType{ PluginTypeUnknown, @@ -64,3 +67,34 @@ func ParsePluginType(pluginType string) (PluginType, error) { return PluginTypeUnknown, fmt.Errorf("%q is not a supported plugin type", pluginType) } } + +// UnmarshalJSON implements json.Unmarshaler. It supports unmarshaling either a +// string or a uint32. All new serialization will be as a string, but we +// previously serialized as a uint32 so we need to support that for backwards +// compatibility. +func (p *PluginType) UnmarshalJSON(data []byte) error { + var asString string + err := json.Unmarshal(data, &asString) + if err == nil { + *p, err = ParsePluginType(asString) + return err + } + + var asUint32 uint32 + err = json.Unmarshal(data, &asUint32) + if err != nil { + return err + } + *p = PluginType(asUint32) + switch *p { + case PluginTypeUnknown, PluginTypeCredential, PluginTypeDatabase, PluginTypeSecrets: + return nil + default: + return fmt.Errorf("%d is not a supported plugin type", asUint32) + } +} + +// MarshalJSON implements json.Marshaler. +func (p PluginType) MarshalJSON() ([]byte, error) { + return json.Marshal(p.String()) +} diff --git a/sdk/helper/consts/plugin_types_test.go b/sdk/helper/consts/plugin_types_test.go new file mode 100644 index 000000000000..ff1299f2e465 --- /dev/null +++ b/sdk/helper/consts/plugin_types_test.go @@ -0,0 +1,101 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consts + +// NOTE: this file has been copied to +// https://github.com/hashicorp/vault/blob/main/api/plugin_types_test.go +// Any changes made should be made to both files at the same time. + +import ( + "encoding/json" + "testing" +) + +type testType struct { + PluginType PluginType `json:"plugin_type"` +} + +func TestPluginTypeJSONRoundTrip(t *testing.T) { + for _, pluginType := range PluginTypes { + original := testType{ + PluginType: pluginType, + } + asBytes, err := json.Marshal(original) + if err != nil { + t.Fatal(err) + } + + var roundTripped testType + err = json.Unmarshal(asBytes, &roundTripped) + if err != nil { + t.Fatal(err) + } + + if original != roundTripped { + t.Fatalf("expected %v, got %v", original, roundTripped) + } + } +} + +func TestPluginTypeJSONUnmarshal(t *testing.T) { + // Failure/unsupported cases. + for name, tc := range map[string]string{ + "unsupported": `{"plugin_type":"unsupported"}`, + "random string": `{"plugin_type":"foo"}`, + "boolean": `{"plugin_type":true}`, + "empty": `{"plugin_type":""}`, + "negative": `{"plugin_type":-1}`, + "out of range": `{"plugin_type":10}`, + } { + t.Run(name, func(t *testing.T) { + var result testType + err := json.Unmarshal([]byte(tc), &result) + if err == nil { + t.Fatal("expected error") + } + }) + } + + // Valid cases. + for name, tc := range map[string]struct { + json string + expected PluginType + }{ + "unknown": {`{"plugin_type":"unknown"}`, PluginTypeUnknown}, + "auth": {`{"plugin_type":"auth"}`, PluginTypeCredential}, + "secret": {`{"plugin_type":"secret"}`, PluginTypeSecrets}, + "database": {`{"plugin_type":"database"}`, PluginTypeDatabase}, + "absent": {`{}`, PluginTypeUnknown}, + "integer unknown": {`{"plugin_type":0}`, PluginTypeUnknown}, + "integer auth": {`{"plugin_type":1}`, PluginTypeCredential}, + "integer db": {`{"plugin_type":2}`, PluginTypeDatabase}, + "integer secret": {`{"plugin_type":3}`, PluginTypeSecrets}, + } { + t.Run(name, func(t *testing.T) { + var result testType + err := json.Unmarshal([]byte(tc.json), &result) + if err != nil { + t.Fatal(err) + } + if tc.expected != result.PluginType { + t.Fatalf("expected %v, got %v", tc.expected, result.PluginType) + } + }) + } +} + +func TestUnknownTypeExcludedWithOmitEmpty(t *testing.T) { + type testTypeOmitEmpty struct { + Type PluginType `json:"type,omitempty"` + } + bytes, err := json.Marshal(testTypeOmitEmpty{}) + if err != nil { + t.Fatal(err) + } + m := map[string]any{} + json.Unmarshal(bytes, &m) + if _, exists := m["type"]; exists { + t.Fatal("type should not be present") + } +} diff --git a/sdk/helper/consts/pluginruntimetype_enumer.go b/sdk/helper/consts/pluginruntimetype_enumer.go new file mode 100644 index 000000000000..337afc29c3e2 --- /dev/null +++ b/sdk/helper/consts/pluginruntimetype_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer -type=PluginRuntimeType -trimprefix=PluginRuntimeType -transform=snake"; DO NOT EDIT. + +package consts + +import ( + "fmt" +) + +const _PluginRuntimeTypeName = "unsupportedcontainer" + +var _PluginRuntimeTypeIndex = [...]uint8{0, 11, 20} + +func (i PluginRuntimeType) String() string { + i -= 1 + if i >= PluginRuntimeType(len(_PluginRuntimeTypeIndex)-1) { + return fmt.Sprintf("PluginRuntimeType(%d)", i+1) + } + return _PluginRuntimeTypeName[_PluginRuntimeTypeIndex[i]:_PluginRuntimeTypeIndex[i+1]] +} + +var _PluginRuntimeTypeValues = []PluginRuntimeType{1, 2} + +var _PluginRuntimeTypeNameToValueMap = map[string]PluginRuntimeType{ + _PluginRuntimeTypeName[0:11]: 1, + _PluginRuntimeTypeName[11:20]: 2, +} + +// PluginRuntimeTypeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func PluginRuntimeTypeString(s string) (PluginRuntimeType, error) { + if val, ok := _PluginRuntimeTypeNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to PluginRuntimeType values", s) +} + +// PluginRuntimeTypeValues returns all values of the enum +func PluginRuntimeTypeValues() []PluginRuntimeType { + return _PluginRuntimeTypeValues +} + +// IsAPluginRuntimeType returns "true" if the value is listed in the enum definition. "false" otherwise +func (i PluginRuntimeType) IsAPluginRuntimeType() bool { + for _, v := range _PluginRuntimeTypeValues { + if i == v { + return true + } + } + return false +} diff --git a/sdk/helper/consts/proxy.go b/sdk/helper/consts/proxy.go new file mode 100644 index 000000000000..0fc4117ccc1d --- /dev/null +++ b/sdk/helper/consts/proxy.go @@ -0,0 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consts + +// ProxyPathCacheClear is the path that the proxy will use as its cache-clear +// endpoint. +const ProxyPathCacheClear = "/proxy/v1/cache-clear" + +// ProxyPathMetrics is the path the proxy will use to expose its internal +// metrics. +const ProxyPathMetrics = "/proxy/v1/metrics" + +// ProxyPathQuit is the path that the proxy will use to trigger stopping it. +const ProxyPathQuit = "/proxy/v1/quit" diff --git a/sdk/helper/custommetadata/custom_metadata.go b/sdk/helper/custommetadata/custom_metadata.go index 81d4c27035d2..d33370b510b1 100644 --- a/sdk/helper/custommetadata/custom_metadata.go +++ b/sdk/helper/custommetadata/custom_metadata.go @@ -6,10 +6,9 @@ package custommetadata import ( "fmt" - "github.com/mitchellh/mapstructure" - "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/mitchellh/mapstructure" ) // The following constants are used by Validate and are meant to be imposed diff --git a/sdk/helper/docker/testhelpers.go b/sdk/helper/docker/testhelpers.go new file mode 100644 index 000000000000..af1f3f72a666 --- /dev/null +++ b/sdk/helper/docker/testhelpers.go @@ -0,0 +1,935 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package docker + +import ( + "archive/tar" + "bufio" + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/url" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/cenkalti/backoff/v3" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/go-connections/nat" + "github.com/hashicorp/go-uuid" +) + +const DockerAPIVersion = "1.40" + +type Runner struct { + DockerAPI *client.Client + RunOptions RunOptions +} + +type RunOptions struct { + ImageRepo string + ImageTag string + ContainerName string + Cmd []string + Entrypoint []string + Env []string + NetworkName string + NetworkID string + CopyFromTo map[string]string + Ports []string + DoNotAutoRemove bool + AuthUsername string + AuthPassword string + OmitLogTimestamps bool + LogConsumer func(string) + Capabilities []string + PreDelete bool + PostStart func(string, string) error + LogStderr io.Writer + LogStdout io.Writer + VolumeNameToMountPoint map[string]string +} + +func NewDockerAPI() (*client.Client, error) { + return client.NewClientWithOpts(client.FromEnv, client.WithVersion(DockerAPIVersion)) +} + +func NewServiceRunner(opts RunOptions) (*Runner, error) { + dapi, err := NewDockerAPI() + if err != nil { + return nil, err + } + + if opts.NetworkName == "" { + opts.NetworkName = os.Getenv("TEST_DOCKER_NETWORK_NAME") + } + if opts.NetworkName != "" { + nets, err := dapi.NetworkList(context.TODO(), types.NetworkListOptions{ + Filters: filters.NewArgs(filters.Arg("name", opts.NetworkName)), + }) + if err != nil { + return nil, err + } + if len(nets) != 1 { + return nil, fmt.Errorf("expected exactly one docker network named %q, got %d", opts.NetworkName, len(nets)) + } + opts.NetworkID = nets[0].ID + } + if opts.NetworkID == "" { + opts.NetworkID = os.Getenv("TEST_DOCKER_NETWORK_ID") + } + if opts.ContainerName == "" { + if strings.Contains(opts.ImageRepo, "/") { + return nil, fmt.Errorf("ContainerName is required for non-library images") + } + // If there's no slash in the repo it's almost certainly going to be + // a good container name. + opts.ContainerName = opts.ImageRepo + } + return &Runner{ + DockerAPI: dapi, + RunOptions: opts, + }, nil +} + +type ServiceConfig interface { + Address() string + URL() *url.URL +} + +func NewServiceHostPort(host string, port int) *ServiceHostPort { + return &ServiceHostPort{address: fmt.Sprintf("%s:%d", host, port)} +} + +func NewServiceHostPortParse(s string) (*ServiceHostPort, error) { + pieces := strings.Split(s, ":") + if len(pieces) != 2 { + return nil, fmt.Errorf("address must be of the form host:port, got: %v", s) + } + + port, err := strconv.Atoi(pieces[1]) + if err != nil || port < 1 { + return nil, fmt.Errorf("address must be of the form host:port, got: %v", s) + } + + return &ServiceHostPort{s}, nil +} + +type ServiceHostPort struct { + address string +} + +func (s ServiceHostPort) Address() string { + return s.address +} + +func (s ServiceHostPort) URL() *url.URL { + return &url.URL{Host: s.address} +} + +func NewServiceURLParse(s string) (*ServiceURL, error) { + u, err := url.Parse(s) + if err != nil { + return nil, err + } + return &ServiceURL{u: *u}, nil +} + +func NewServiceURL(u url.URL) *ServiceURL { + return &ServiceURL{u: u} +} + +type ServiceURL struct { + u url.URL +} + +func (s ServiceURL) Address() string { + return s.u.Host +} + +func (s ServiceURL) URL() *url.URL { + return &s.u +} + +// ServiceAdapter verifies connectivity to the service, then returns either the +// connection string (typically a URL) and nil, or empty string and an error. +type ServiceAdapter func(ctx context.Context, host string, port int) (ServiceConfig, error) + +// StartService will start the runner's configured docker container with a +// random UUID suffix appended to the name to make it unique and will return +// either a hostname or local address depending on if a Docker network was given. +// +// Most tests can default to using this. +func (d *Runner) StartService(ctx context.Context, connect ServiceAdapter) (*Service, error) { + serv, _, err := d.StartNewService(ctx, true, false, connect) + + return serv, err +} + +type LogConsumerWriter struct { + consumer func(string) +} + +func (l LogConsumerWriter) Write(p []byte) (n int, err error) { + // TODO this assumes that we're never passed partial log lines, which + // seems a safe assumption for now based on how docker looks to implement + // logging, but might change in the future. + scanner := bufio.NewScanner(bytes.NewReader(p)) + scanner.Buffer(make([]byte, 64*1024), bufio.MaxScanTokenSize) + for scanner.Scan() { + l.consumer(scanner.Text()) + } + return len(p), nil +} + +var _ io.Writer = &LogConsumerWriter{} + +// StartNewService will start the runner's configured docker container but with the +// ability to control adding a name suffix or forcing a local address to be returned. +// 'addSuffix' will add a random UUID to the end of the container name. +// 'forceLocalAddr' will force the container address returned to be in the +// form of '127.0.0.1:1234' where 1234 is the mapped container port. +func (d *Runner) StartNewService(ctx context.Context, addSuffix, forceLocalAddr bool, connect ServiceAdapter) (*Service, string, error) { + if d.RunOptions.PreDelete { + name := d.RunOptions.ContainerName + matches, err := d.DockerAPI.ContainerList(ctx, container.ListOptions{ + All: true, + // TODO use labels to ensure we don't delete anything we shouldn't + Filters: filters.NewArgs( + filters.Arg("name", name), + ), + }) + if err != nil { + return nil, "", fmt.Errorf("failed to list containers named %q", name) + } + for _, cont := range matches { + err = d.DockerAPI.ContainerRemove(ctx, cont.ID, container.RemoveOptions{Force: true}) + if err != nil { + return nil, "", fmt.Errorf("failed to pre-delete container named %q", name) + } + } + } + result, err := d.Start(context.Background(), addSuffix, forceLocalAddr) + if err != nil { + return nil, "", err + } + + // The waitgroup wg is used here to support some stuff in NewDockerCluster. + // We can't generate the PKI cert for the https listener until we know the + // container's address, meaning we must first start the container, then + // generate the cert, then copy it into the container, then signal Vault + // to reload its config/certs. However, if we SIGHUP Vault before Vault + // has installed its signal handler, that will kill Vault, since the default + // behaviour for HUP is termination. So the PostStart that NewDockerCluster + // passes in (which does all that PKI cert stuff) waits to see output from + // Vault on stdout/stderr before it sends the signal, and we don't want to + // run the PostStart until we've hooked into the docker logs. + var wg sync.WaitGroup + logConsumer := d.createLogConsumer(result.Container.ID, &wg) + + if logConsumer != nil { + wg.Add(1) + go logConsumer() + } + wg.Wait() + + if d.RunOptions.PostStart != nil { + if err := d.RunOptions.PostStart(result.Container.ID, result.RealIP); err != nil { + return nil, "", fmt.Errorf("poststart failed: %w", err) + } + } + + cleanup := func() { + for i := 0; i < 10; i++ { + err := d.DockerAPI.ContainerRemove(ctx, result.Container.ID, container.RemoveOptions{Force: true}) + if err == nil || client.IsErrNotFound(err) { + return + } + time.Sleep(1 * time.Second) + } + } + + bo := backoff.NewExponentialBackOff() + bo.MaxInterval = time.Second * 5 + bo.MaxElapsedTime = 2 * time.Minute + + pieces := strings.Split(result.Addrs[0], ":") + portInt, err := strconv.Atoi(pieces[1]) + if err != nil { + return nil, "", err + } + + var config ServiceConfig + err = backoff.Retry(func() error { + container, err := d.DockerAPI.ContainerInspect(ctx, result.Container.ID) + if err != nil || !container.State.Running { + return backoff.Permanent(fmt.Errorf("failed inspect or container %q not running: %w", result.Container.ID, err)) + } + + c, err := connect(ctx, pieces[0], portInt) + if err != nil { + return err + } + if c == nil { + return fmt.Errorf("service adapter returned nil error and config") + } + config = c + return nil + }, bo) + if err != nil { + if !d.RunOptions.DoNotAutoRemove { + cleanup() + } + return nil, "", err + } + + return &Service{ + Config: config, + Cleanup: cleanup, + Container: result.Container, + StartResult: result, + }, result.Container.ID, nil +} + +// createLogConsumer returns a function to consume the logs of the container with the given ID. +// If a wait group is given, `WaitGroup.Done()` will be called as soon as the call to the +// ContainerLogs Docker API call is done. +// The returned function will block, so it should be run on a goroutine. +func (d *Runner) createLogConsumer(containerId string, wg *sync.WaitGroup) func() { + if d.RunOptions.LogStdout != nil && d.RunOptions.LogStderr != nil { + return func() { + d.consumeLogs(containerId, wg, d.RunOptions.LogStdout, d.RunOptions.LogStderr) + } + } + if d.RunOptions.LogConsumer != nil { + return func() { + d.consumeLogs(containerId, wg, &LogConsumerWriter{d.RunOptions.LogConsumer}, &LogConsumerWriter{d.RunOptions.LogConsumer}) + } + } + return nil +} + +// consumeLogs is the function called by the function returned by createLogConsumer. +func (d *Runner) consumeLogs(containerId string, wg *sync.WaitGroup, logStdout, logStderr io.Writer) { + // We must run inside a goroutine because we're using Follow:true, + // and StdCopy will block until the log stream is closed. + stream, err := d.DockerAPI.ContainerLogs(context.Background(), containerId, container.LogsOptions{ + ShowStdout: true, + ShowStderr: true, + Timestamps: !d.RunOptions.OmitLogTimestamps, + Details: true, + Follow: true, + }) + wg.Done() + if err != nil { + d.RunOptions.LogConsumer(fmt.Sprintf("error reading container logs: %v", err)) + } else { + _, err := stdcopy.StdCopy(logStdout, logStderr, stream) + if err != nil { + d.RunOptions.LogConsumer(fmt.Sprintf("error demultiplexing docker logs: %v", err)) + } + } +} + +type Service struct { + Config ServiceConfig + Cleanup func() + Container *types.ContainerJSON + StartResult *StartResult +} + +type StartResult struct { + Container *types.ContainerJSON + Addrs []string + RealIP string +} + +func (d *Runner) Start(ctx context.Context, addSuffix, forceLocalAddr bool) (*StartResult, error) { + name := d.RunOptions.ContainerName + if addSuffix { + suffix, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + name += "-" + suffix + } + + cfg := &container.Config{ + Hostname: name, + Image: fmt.Sprintf("%s:%s", d.RunOptions.ImageRepo, d.RunOptions.ImageTag), + Env: d.RunOptions.Env, + Cmd: d.RunOptions.Cmd, + } + if len(d.RunOptions.Ports) > 0 { + cfg.ExposedPorts = make(map[nat.Port]struct{}) + for _, p := range d.RunOptions.Ports { + cfg.ExposedPorts[nat.Port(p)] = struct{}{} + } + } + if len(d.RunOptions.Entrypoint) > 0 { + cfg.Entrypoint = strslice.StrSlice(d.RunOptions.Entrypoint) + } + + hostConfig := &container.HostConfig{ + AutoRemove: !d.RunOptions.DoNotAutoRemove, + PublishAllPorts: true, + } + if len(d.RunOptions.Capabilities) > 0 { + hostConfig.CapAdd = d.RunOptions.Capabilities + } + + netConfig := &network.NetworkingConfig{} + if d.RunOptions.NetworkID != "" { + netConfig.EndpointsConfig = map[string]*network.EndpointSettings{ + d.RunOptions.NetworkID: {}, + } + } + + // best-effort pull + var opts types.ImageCreateOptions + if d.RunOptions.AuthUsername != "" && d.RunOptions.AuthPassword != "" { + var buf bytes.Buffer + auth := map[string]string{ + "username": d.RunOptions.AuthUsername, + "password": d.RunOptions.AuthPassword, + } + if err := json.NewEncoder(&buf).Encode(auth); err != nil { + return nil, err + } + opts.RegistryAuth = base64.URLEncoding.EncodeToString(buf.Bytes()) + } + resp, _ := d.DockerAPI.ImageCreate(ctx, cfg.Image, opts) + if resp != nil { + _, _ = io.ReadAll(resp) + } + + for vol, mtpt := range d.RunOptions.VolumeNameToMountPoint { + hostConfig.Mounts = append(hostConfig.Mounts, mount.Mount{ + Type: "volume", + Source: vol, + Target: mtpt, + ReadOnly: false, + }) + } + + c, err := d.DockerAPI.ContainerCreate(ctx, cfg, hostConfig, netConfig, nil, cfg.Hostname) + if err != nil { + return nil, fmt.Errorf("container create failed: %v", err) + } + + for from, to := range d.RunOptions.CopyFromTo { + if err := copyToContainer(ctx, d.DockerAPI, c.ID, from, to); err != nil { + _ = d.DockerAPI.ContainerRemove(ctx, c.ID, container.RemoveOptions{}) + return nil, err + } + } + + err = d.DockerAPI.ContainerStart(ctx, c.ID, container.StartOptions{}) + if err != nil { + _ = d.DockerAPI.ContainerRemove(ctx, c.ID, container.RemoveOptions{}) + return nil, fmt.Errorf("container start failed: %v", err) + } + + inspect, err := d.DockerAPI.ContainerInspect(ctx, c.ID) + if err != nil { + _ = d.DockerAPI.ContainerRemove(ctx, c.ID, container.RemoveOptions{}) + return nil, err + } + + var addrs []string + for _, port := range d.RunOptions.Ports { + pieces := strings.Split(port, "/") + if len(pieces) < 2 { + return nil, fmt.Errorf("expected port of the form 1234/tcp, got: %s", port) + } + if d.RunOptions.NetworkID != "" && !forceLocalAddr { + addrs = append(addrs, fmt.Sprintf("%s:%s", cfg.Hostname, pieces[0])) + } else { + mapped, ok := inspect.NetworkSettings.Ports[nat.Port(port)] + if !ok || len(mapped) == 0 { + return nil, fmt.Errorf("no port mapping found for %s", port) + } + addrs = append(addrs, fmt.Sprintf("127.0.0.1:%s", mapped[0].HostPort)) + } + } + + var realIP string + if d.RunOptions.NetworkID == "" { + if len(inspect.NetworkSettings.Networks) > 1 { + return nil, fmt.Errorf("Set d.RunOptions.NetworkName instead for container with multiple networks: %v", inspect.NetworkSettings.Networks) + } + for _, network := range inspect.NetworkSettings.Networks { + realIP = network.IPAddress + break + } + } else { + realIP = inspect.NetworkSettings.Networks[d.RunOptions.NetworkName].IPAddress + } + + return &StartResult{ + Container: &inspect, + Addrs: addrs, + RealIP: realIP, + }, nil +} + +func (d *Runner) RefreshFiles(ctx context.Context, containerID string) error { + for from, to := range d.RunOptions.CopyFromTo { + if err := copyToContainer(ctx, d.DockerAPI, containerID, from, to); err != nil { + // TODO too drastic? + _ = d.DockerAPI.ContainerRemove(ctx, containerID, container.RemoveOptions{}) + return err + } + } + return d.DockerAPI.ContainerKill(ctx, containerID, "SIGHUP") +} + +func (d *Runner) Stop(ctx context.Context, containerID string) error { + if d.RunOptions.NetworkID != "" { + if err := d.DockerAPI.NetworkDisconnect(ctx, d.RunOptions.NetworkID, containerID, true); err != nil { + return fmt.Errorf("error disconnecting network (%v): %v", d.RunOptions.NetworkID, err) + } + } + + // timeout in seconds + timeout := 5 + options := container.StopOptions{ + Timeout: &timeout, + } + if err := d.DockerAPI.ContainerStop(ctx, containerID, options); err != nil { + return fmt.Errorf("error stopping container: %v", err) + } + + return nil +} + +func (d *Runner) RestartContainerWithTimeout(ctx context.Context, containerID string, timeout int) error { + err := d.DockerAPI.ContainerRestart(ctx, containerID, container.StopOptions{Timeout: &timeout}) + if err != nil { + return fmt.Errorf("failed to restart container: %s", err) + } + var wg sync.WaitGroup + logConsumer := d.createLogConsumer(containerID, &wg) + if logConsumer != nil { + wg.Add(1) + go logConsumer() + } + // we don't really care about waiting for logs to start showing up, do we? + return nil +} + +func (d *Runner) Restart(ctx context.Context, containerID string) error { + if err := d.DockerAPI.ContainerStart(ctx, containerID, container.StartOptions{}); err != nil { + return err + } + + ends := &network.EndpointSettings{ + NetworkID: d.RunOptions.NetworkID, + } + + return d.DockerAPI.NetworkConnect(ctx, d.RunOptions.NetworkID, containerID, ends) +} + +func copyToContainer(ctx context.Context, dapi *client.Client, containerID, from, to string) error { + srcInfo, err := archive.CopyInfoSourcePath(from, false) + if err != nil { + return fmt.Errorf("error copying from source %q: %v", from, err) + } + + srcArchive, err := archive.TarResource(srcInfo) + if err != nil { + return fmt.Errorf("error creating tar from source %q: %v", from, err) + } + defer srcArchive.Close() + + dstInfo := archive.CopyInfo{Path: to} + + dstDir, content, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) + if err != nil { + return fmt.Errorf("error preparing copy from %q -> %q: %v", from, to, err) + } + defer content.Close() + err = dapi.CopyToContainer(ctx, containerID, dstDir, content, types.CopyToContainerOptions{}) + if err != nil { + return fmt.Errorf("error copying from %q -> %q: %v", from, to, err) + } + + return nil +} + +type RunCmdOpt interface { + Apply(cfg *types.ExecConfig) error +} + +type RunCmdUser string + +var _ RunCmdOpt = (*RunCmdUser)(nil) + +func (u RunCmdUser) Apply(cfg *types.ExecConfig) error { + cfg.User = string(u) + return nil +} + +func (d *Runner) RunCmdWithOutput(ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) ([]byte, []byte, int, error) { + return RunCmdWithOutput(d.DockerAPI, ctx, container, cmd, opts...) +} + +func RunCmdWithOutput(api *client.Client, ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) ([]byte, []byte, int, error) { + runCfg := types.ExecConfig{ + AttachStdout: true, + AttachStderr: true, + Cmd: cmd, + } + + for index, opt := range opts { + if err := opt.Apply(&runCfg); err != nil { + return nil, nil, -1, fmt.Errorf("error applying option (%d / %v): %w", index, opt, err) + } + } + + ret, err := api.ContainerExecCreate(ctx, container, runCfg) + if err != nil { + return nil, nil, -1, fmt.Errorf("error creating execution environment: %v\ncfg: %v\n", err, runCfg) + } + + resp, err := api.ContainerExecAttach(ctx, ret.ID, types.ExecStartCheck{}) + if err != nil { + return nil, nil, -1, fmt.Errorf("error attaching to command execution: %v\ncfg: %v\nret: %v\n", err, runCfg, ret) + } + defer resp.Close() + + var stdoutB bytes.Buffer + var stderrB bytes.Buffer + if _, err := stdcopy.StdCopy(&stdoutB, &stderrB, resp.Reader); err != nil { + return nil, nil, -1, fmt.Errorf("error reading command output: %v", err) + } + + stdout := stdoutB.Bytes() + stderr := stderrB.Bytes() + + // Fetch return code. + info, err := api.ContainerExecInspect(ctx, ret.ID) + if err != nil { + return stdout, stderr, -1, fmt.Errorf("error reading command exit code: %v", err) + } + + return stdout, stderr, info.ExitCode, nil +} + +func (d *Runner) RunCmdInBackground(ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) (string, error) { + return RunCmdInBackground(d.DockerAPI, ctx, container, cmd, opts...) +} + +func RunCmdInBackground(api *client.Client, ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) (string, error) { + runCfg := types.ExecConfig{ + AttachStdout: true, + AttachStderr: true, + Cmd: cmd, + } + + for index, opt := range opts { + if err := opt.Apply(&runCfg); err != nil { + return "", fmt.Errorf("error applying option (%d / %v): %w", index, opt, err) + } + } + + ret, err := api.ContainerExecCreate(ctx, container, runCfg) + if err != nil { + return "", fmt.Errorf("error creating execution environment: %w\ncfg: %v\n", err, runCfg) + } + + err = api.ContainerExecStart(ctx, ret.ID, types.ExecStartCheck{}) + if err != nil { + return "", fmt.Errorf("error starting command execution: %w\ncfg: %v\nret: %v\n", err, runCfg, ret) + } + + return ret.ID, nil +} + +// Mapping of path->contents +type PathContents interface { + UpdateHeader(header *tar.Header) error + Get() ([]byte, error) + SetMode(mode int64) + SetOwners(uid int, gid int) +} + +type FileContents struct { + Data []byte + Mode int64 + UID int + GID int +} + +func (b FileContents) UpdateHeader(header *tar.Header) error { + header.Mode = b.Mode + header.Uid = b.UID + header.Gid = b.GID + return nil +} + +func (b FileContents) Get() ([]byte, error) { + return b.Data, nil +} + +func (b *FileContents) SetMode(mode int64) { + b.Mode = mode +} + +func (b *FileContents) SetOwners(uid int, gid int) { + b.UID = uid + b.GID = gid +} + +func PathContentsFromBytes(data []byte) PathContents { + return &FileContents{ + Data: data, + Mode: 0o644, + } +} + +func PathContentsFromString(data string) PathContents { + return PathContentsFromBytes([]byte(data)) +} + +type BuildContext map[string]PathContents + +func NewBuildContext() BuildContext { + return BuildContext{} +} + +func BuildContextFromTarball(reader io.Reader) (BuildContext, error) { + archive := tar.NewReader(reader) + bCtx := NewBuildContext() + + for true { + header, err := archive.Next() + if err != nil { + if err == io.EOF { + break + } + + return nil, fmt.Errorf("failed to parse provided tarball: %v", err) + } + + data := make([]byte, int(header.Size)) + read, err := archive.Read(data) + if err != nil { + return nil, fmt.Errorf("failed to parse read from provided tarball: %v", err) + } + + if read != int(header.Size) { + return nil, fmt.Errorf("unexpectedly short read on tarball: %v of %v", read, header.Size) + } + + bCtx[header.Name] = &FileContents{ + Data: data, + Mode: header.Mode, + UID: header.Uid, + GID: header.Gid, + } + } + + return bCtx, nil +} + +func (bCtx *BuildContext) ToTarball() (io.Reader, error) { + var err error + buffer := new(bytes.Buffer) + tarBuilder := tar.NewWriter(buffer) + defer tarBuilder.Close() + + now := time.Now() + for filepath, contents := range *bCtx { + fileHeader := &tar.Header{ + Name: filepath, + ModTime: now, + AccessTime: now, + ChangeTime: now, + } + if contents == nil && !strings.HasSuffix(filepath, "/") { + return nil, fmt.Errorf("expected file path (%v) to have trailing / due to nil contents, indicating directory", filepath) + } + + if err := contents.UpdateHeader(fileHeader); err != nil { + return nil, fmt.Errorf("failed to update tar header entry for %v: %w", filepath, err) + } + + var rawContents []byte + if contents != nil { + rawContents, err = contents.Get() + if err != nil { + return nil, fmt.Errorf("failed to get file contents for %v: %w", filepath, err) + } + + fileHeader.Size = int64(len(rawContents)) + } + + if err := tarBuilder.WriteHeader(fileHeader); err != nil { + return nil, fmt.Errorf("failed to write tar header entry for %v: %w", filepath, err) + } + + if contents != nil { + if _, err := tarBuilder.Write(rawContents); err != nil { + return nil, fmt.Errorf("failed to write tar file entry for %v: %w", filepath, err) + } + } + } + + return bytes.NewReader(buffer.Bytes()), nil +} + +type BuildOpt interface { + Apply(cfg *types.ImageBuildOptions) error +} + +type BuildRemove bool + +var _ BuildOpt = (*BuildRemove)(nil) + +func (u BuildRemove) Apply(cfg *types.ImageBuildOptions) error { + cfg.Remove = bool(u) + return nil +} + +type BuildForceRemove bool + +var _ BuildOpt = (*BuildForceRemove)(nil) + +func (u BuildForceRemove) Apply(cfg *types.ImageBuildOptions) error { + cfg.ForceRemove = bool(u) + return nil +} + +type BuildPullParent bool + +var _ BuildOpt = (*BuildPullParent)(nil) + +func (u BuildPullParent) Apply(cfg *types.ImageBuildOptions) error { + cfg.PullParent = bool(u) + return nil +} + +type BuildArgs map[string]*string + +var _ BuildOpt = (*BuildArgs)(nil) + +func (u BuildArgs) Apply(cfg *types.ImageBuildOptions) error { + cfg.BuildArgs = u + return nil +} + +type BuildTags []string + +var _ BuildOpt = (*BuildTags)(nil) + +func (u BuildTags) Apply(cfg *types.ImageBuildOptions) error { + cfg.Tags = u + return nil +} + +const containerfilePath = "_containerfile" + +func (d *Runner) BuildImage(ctx context.Context, containerfile string, containerContext BuildContext, opts ...BuildOpt) ([]byte, error) { + return BuildImage(ctx, d.DockerAPI, containerfile, containerContext, opts...) +} + +func BuildImage(ctx context.Context, api *client.Client, containerfile string, containerContext BuildContext, opts ...BuildOpt) ([]byte, error) { + var cfg types.ImageBuildOptions + + // Build container context tarball, provisioning containerfile in. + containerContext[containerfilePath] = PathContentsFromBytes([]byte(containerfile)) + tar, err := containerContext.ToTarball() + if err != nil { + return nil, fmt.Errorf("failed to create build image context tarball: %w", err) + } + cfg.Dockerfile = "/" + containerfilePath + + // Apply all given options + for index, opt := range opts { + if err := opt.Apply(&cfg); err != nil { + return nil, fmt.Errorf("failed to apply option (%d / %v): %w", index, opt, err) + } + } + + resp, err := api.ImageBuild(ctx, tar, cfg) + if err != nil { + return nil, fmt.Errorf("failed to build image: %v", err) + } + + output, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read image build output: %w", err) + } + + return output, nil +} + +func (d *Runner) CopyTo(container string, destination string, contents BuildContext) error { + // XXX: currently we use the default options but we might want to allow + // modifying cfg.CopyUIDGID in the future. + var cfg types.CopyToContainerOptions + + // Convert our provided contents to a tarball to ship up. + tar, err := contents.ToTarball() + if err != nil { + return fmt.Errorf("failed to build contents into tarball: %v", err) + } + + return d.DockerAPI.CopyToContainer(context.Background(), container, destination, tar, cfg) +} + +func (d *Runner) CopyFrom(container string, source string) (BuildContext, *types.ContainerPathStat, error) { + reader, stat, err := d.DockerAPI.CopyFromContainer(context.Background(), container, source) + if err != nil { + return nil, nil, fmt.Errorf("failed to read %v from container: %v", source, err) + } + + result, err := BuildContextFromTarball(reader) + if err != nil { + return nil, nil, fmt.Errorf("failed to build archive from result: %v", err) + } + + return result, &stat, nil +} + +func (d *Runner) GetNetworkAndAddresses(container string) (map[string]string, error) { + response, err := d.DockerAPI.ContainerInspect(context.Background(), container) + if err != nil { + return nil, fmt.Errorf("failed to fetch container inspection data: %v", err) + } + + if response.NetworkSettings == nil || len(response.NetworkSettings.Networks) == 0 { + return nil, fmt.Errorf("container (%v) had no associated network settings: %v", container, response) + } + + ret := make(map[string]string) + ns := response.NetworkSettings.Networks + for network, data := range ns { + if data == nil { + continue + } + + ret[network] = data.IPAddress + } + + if len(ret) == 0 { + return nil, fmt.Errorf("no valid network data for container (%v): %v", container, response) + } + + return ret, nil +} diff --git a/sdk/helper/identitytpl/templating.go b/sdk/helper/identitytpl/templating.go index 124a27c920c3..4cbf1e22f07d 100644 --- a/sdk/helper/identitytpl/templating.go +++ b/sdk/helper/identitytpl/templating.go @@ -12,6 +12,7 @@ import ( "time" "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -330,7 +331,7 @@ func performTemplating(input string, p *PopulateStringInput) (string, error) { return "", errors.New("missing time operand") case 3: - duration, err := time.ParseDuration(opsSplit[2]) + duration, err := parseutil.ParseDurationSecond(opsSplit[2]) if err != nil { return "", errwrap.Wrapf("invalid duration: {{err}}", err) } diff --git a/sdk/helper/keysutil/consts.go b/sdk/helper/keysutil/consts.go index b68423242321..6262b477c220 100644 --- a/sdk/helper/keysutil/consts.go +++ b/sdk/helper/keysutil/consts.go @@ -28,11 +28,12 @@ const ( HashTypeSHA3512 ) +//go:generate enumer -type=MarshalingType -trimprefix=MarshalingType -transform=snake type MarshalingType uint32 const ( - _ = iota - MarshalingTypeASN1 MarshalingType = iota + _ MarshalingType = iota + MarshalingTypeASN1 MarshalingTypeJWS ) @@ -76,8 +77,5 @@ var ( HashTypeSHA3512: crypto.SHA3_512, } - MarshalingTypeMap = map[string]MarshalingType{ - "asn1": MarshalingTypeASN1, - "jws": MarshalingTypeJWS, - } + MarshalingTypeMap = _MarshalingTypeNameToValueMap ) diff --git a/sdk/helper/keysutil/lock_manager.go b/sdk/helper/keysutil/lock_manager.go index 306dd1693878..35f29203c355 100644 --- a/sdk/helper/keysutil/lock_manager.go +++ b/sdk/helper/keysutil/lock_manager.go @@ -63,6 +63,9 @@ type PolicyRequest struct { // AllowImportedKeyRotation indicates whether an imported key may be rotated by Vault AllowImportedKeyRotation bool + // Indicates whether a private or public key is imported/upserted + IsPrivateKey bool + // The UUID of the managed key, if using one ManagedKeyUUID string } @@ -394,6 +397,12 @@ func (lm *LockManager) GetPolicy(ctx context.Context, req PolicyRequest, rand io return nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", req.KeyType) } + case KeyType_AES128_CMAC, KeyType_AES256_CMAC: + if req.Derived || req.Convergent { + cleanup() + return nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", req.KeyType) + } + default: cleanup() return nil, false, fmt.Errorf("unsupported key type %v", req.KeyType) @@ -511,7 +520,7 @@ func (lm *LockManager) ImportPolicy(ctx context.Context, req PolicyRequest, key } } - err = p.Import(ctx, req.Storage, key, rand) + err = p.ImportPublicOrPrivate(ctx, req.Storage, key, req.IsPrivateKey, rand) if err != nil { return fmt.Errorf("error importing key: %s", err) } diff --git a/sdk/helper/keysutil/marshalingtype_enumer.go b/sdk/helper/keysutil/marshalingtype_enumer.go new file mode 100644 index 000000000000..93b5c2f1f94b --- /dev/null +++ b/sdk/helper/keysutil/marshalingtype_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer -type=MarshalingType -trimprefix=MarshalingType -transform=snake"; DO NOT EDIT. + +package keysutil + +import ( + "fmt" +) + +const _MarshalingTypeName = "asn1jws" + +var _MarshalingTypeIndex = [...]uint8{0, 4, 7} + +func (i MarshalingType) String() string { + i -= 1 + if i >= MarshalingType(len(_MarshalingTypeIndex)-1) { + return fmt.Sprintf("MarshalingType(%d)", i+1) + } + return _MarshalingTypeName[_MarshalingTypeIndex[i]:_MarshalingTypeIndex[i+1]] +} + +var _MarshalingTypeValues = []MarshalingType{1, 2} + +var _MarshalingTypeNameToValueMap = map[string]MarshalingType{ + _MarshalingTypeName[0:4]: 1, + _MarshalingTypeName[4:7]: 2, +} + +// MarshalingTypeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func MarshalingTypeString(s string) (MarshalingType, error) { + if val, ok := _MarshalingTypeNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to MarshalingType values", s) +} + +// MarshalingTypeValues returns all values of the enum +func MarshalingTypeValues() []MarshalingType { + return _MarshalingTypeValues +} + +// IsAMarshalingType returns "true" if the value is listed in the enum definition. "false" otherwise +func (i MarshalingType) IsAMarshalingType() bool { + for _, v := range _MarshalingTypeValues { + if i == v { + return true + } + } + return false +} diff --git a/sdk/helper/keysutil/policy.go b/sdk/helper/keysutil/policy.go index d5620e31c1c8..873f99149eda 100644 --- a/sdk/helper/keysutil/policy.go +++ b/sdk/helper/keysutil/policy.go @@ -22,6 +22,7 @@ import ( "encoding/pem" "errors" "fmt" + "hash" "io" "math/big" "path" @@ -31,16 +32,16 @@ import ( "sync/atomic" "time" - "golang.org/x/crypto/chacha20poly1305" - "golang.org/x/crypto/ed25519" - "golang.org/x/crypto/hkdf" - "github.com/hashicorp/errwrap" - uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/kdf" "github.com/hashicorp/vault/sdk/logical" + "github.com/tink-crypto/tink-go/v2/kwp/subtle" + "golang.org/x/crypto/chacha20poly1305" + "golang.org/x/crypto/ed25519" + "golang.org/x/crypto/hkdf" ) // Careful with iota; don't put anything before it in this const block because @@ -67,6 +68,9 @@ const ( KeyType_RSA3072 KeyType_MANAGED_KEY KeyType_HMAC + KeyType_AES128_CMAC + KeyType_AES256_CMAC + // If adding to this list please update allTestKeyTypes in policy_test.go ) const ( @@ -145,7 +149,7 @@ func (kt KeyType) SigningSupported() bool { func (kt KeyType) HashSignatureInput() bool { switch kt { - case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521, KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521, KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096, KeyType_MANAGED_KEY: return true } return false @@ -167,6 +171,34 @@ func (kt KeyType) AssociatedDataSupported() bool { return false } +func (kt KeyType) CMACSupported() bool { + switch kt { + case KeyType_AES128_CMAC, KeyType_AES256_CMAC: + return true + default: + return false + } +} + +func (kt KeyType) HMACSupported() bool { + switch { + case kt.CMACSupported(): + return false + case kt == KeyType_MANAGED_KEY: + return false + default: + return true + } +} + +func (kt KeyType) ImportPublicKeySupported() bool { + switch kt { + case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096, KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521, KeyType_ED25519: + return true + } + return false +} + func (kt KeyType) String() string { switch kt { case KeyType_AES128_GCM96: @@ -193,6 +225,10 @@ func (kt KeyType) String() string { return "hmac" case KeyType_MANAGED_KEY: return "managed_key" + case KeyType_AES128_CMAC: + return "aes128-cmac" + case KeyType_AES256_CMAC: + return "aes256-cmac" } return "[unknown]" @@ -218,7 +254,8 @@ type KeyEntry struct { EC_Y *big.Int `json:"ec_y"` EC_D *big.Int `json:"ec_d"` - RSAKey *rsa.PrivateKey `json:"rsa_key"` + RSAKey *rsa.PrivateKey `json:"rsa_key"` + RSAPublicKey *rsa.PublicKey `json:"rsa_public_key"` // The public key in an appropriate format for the type of key FormattedPublicKey string `json:"public_key"` @@ -232,6 +269,18 @@ type KeyEntry struct { DeprecatedCreationTime int64 `json:"creation_time"` ManagedKeyUUID string `json:"managed_key_id,omitempty"` + + // Key entry certificate chain. If set, leaf certificate key matches the + // KeyEntry key + CertificateChain [][]byte `json:"certificate_chain"` +} + +func (ke *KeyEntry) IsPrivateKeyMissing() bool { + if ke.RSAKey != nil || ke.EC_D != nil || len(ke.Key) != 0 || len(ke.ManagedKeyUUID) != 0 { + return false + } + + return true } // deprecatedKeyEntryMap is used to allow JSON marshal/unmarshal @@ -335,6 +384,19 @@ func LoadPolicy(ctx context.Context, s logical.Storage, path string) (*Policy, e return nil, err } + // Migrate RSA private keys to include their private counterpart. This lets + // us reference RSAPublicKey whenever we need to, without necessarily + // needing the private key handy, synchronizing the behavior with EC and + // Ed25519 key pairs. + switch policy.Type { + case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: + for _, entry := range policy.Keys { + if entry.RSAPublicKey == nil && entry.RSAKey != nil { + entry.RSAPublicKey = entry.RSAKey.Public().(*rsa.PublicKey) + } + } + } + policy.l = new(sync.RWMutex) return &policy, nil @@ -663,8 +725,10 @@ func (p *Policy) NeedsUpgrade() bool { return true } - if p.Keys[strconv.Itoa(p.LatestVersion)].HMACKey == nil || len(p.Keys[strconv.Itoa(p.LatestVersion)].HMACKey) == 0 { - return true + if p.Type.HMACSupported() { + if p.Keys[strconv.Itoa(p.LatestVersion)].HMACKey == nil || len(p.Keys[strconv.Itoa(p.LatestVersion)].HMACKey) == 0 { + return true + } } return false @@ -725,15 +789,21 @@ func (p *Policy) Upgrade(ctx context.Context, storage logical.Storage, randReade persistNeeded = true } - if p.Keys[strconv.Itoa(p.LatestVersion)].HMACKey == nil || len(p.Keys[strconv.Itoa(p.LatestVersion)].HMACKey) == 0 { - entry := p.Keys[strconv.Itoa(p.LatestVersion)] - hmacKey, err := uuid.GenerateRandomBytesWithReader(32, randReader) - if err != nil { - return err + if p.Type.HMACSupported() { + if p.Keys[strconv.Itoa(p.LatestVersion)].HMACKey == nil || len(p.Keys[strconv.Itoa(p.LatestVersion)].HMACKey) == 0 { + entry := p.Keys[strconv.Itoa(p.LatestVersion)] + hmacKey, err := uuid.GenerateRandomBytesWithReader(32, randReader) + if err != nil { + return err + } + entry.HMACKey = hmacKey + p.Keys[strconv.Itoa(p.LatestVersion)] = entry + persistNeeded = true + + if p.Type == KeyType_HMAC { + entry.HMACKey = entry.Key + } } - entry.HMACKey = hmacKey - p.Keys[strconv.Itoa(p.LatestVersion)] = entry - persistNeeded = true } if persistNeeded { @@ -969,6 +1039,9 @@ func (p *Policy) DecryptWithFactory(context, nonce []byte, value string, factori return "", err } key := keyEntry.RSAKey + if key == nil { + return "", errutil.InternalError{Err: fmt.Sprintf("cannot decrypt ciphertext, key version does not have a private counterpart")} + } plain, err = rsa.DecryptOAEP(sha256.New(), rand.Reader, key, decoded, nil) if err != nil { return "", errutil.InternalError{Err: fmt.Sprintf("failed to RSA decrypt the ciphertext: %v", err)} @@ -1029,6 +1102,25 @@ func (p *Policy) HMACKey(version int) ([]byte, error) { return keyEntry.HMACKey, nil } +func (p *Policy) CMACKey(version int) ([]byte, error) { + switch { + case version < 0: + return nil, fmt.Errorf("key version does not exist (cannot be negative)") + case version > p.LatestVersion: + return nil, fmt.Errorf("key version does not exist; latest key version is %d", p.LatestVersion) + } + keyEntry, err := p.safeGetKeyEntry(version) + if err != nil { + return nil, err + } + + if p.Type.CMACSupported() { + return keyEntry.Key, nil + } + + return nil, fmt.Errorf("key type %s does not support CMAC operations", p.Type) +} + func (p *Policy) Sign(ver int, context, input []byte, hashAlgorithm HashType, sigAlgorithm string, marshaling MarshalingType) (*SigningResult, error) { return p.SignWithOptions(ver, context, input, &SigningOptions{ HashAlgorithm: hashAlgorithm, @@ -1043,13 +1135,13 @@ func (p *Policy) minRSAPSSSaltLength() int { return rsa.PSSSaltLengthEqualsHash } -func (p *Policy) maxRSAPSSSaltLength(priv *rsa.PrivateKey, hash crypto.Hash) int { +func (p *Policy) maxRSAPSSSaltLength(keyBitLen int, hash crypto.Hash) int { // https://cs.opensource.google/go/go/+/refs/tags/go1.19:src/crypto/rsa/pss.go;l=288 - return (priv.N.BitLen()-1+7)/8 - 2 - hash.Size() + return (keyBitLen-1+7)/8 - 2 - hash.Size() } -func (p *Policy) validRSAPSSSaltLength(priv *rsa.PrivateKey, hash crypto.Hash, saltLength int) bool { - return p.minRSAPSSSaltLength() <= saltLength && saltLength <= p.maxRSAPSSSaltLength(priv, hash) +func (p *Policy) validRSAPSSSaltLength(keyBitLen int, hash crypto.Hash, saltLength int) bool { + return p.minRSAPSSSaltLength() <= saltLength && saltLength <= p.maxRSAPSSSaltLength(keyBitLen, hash) } func (p *Policy) SignWithOptions(ver int, context, input []byte, options *SigningOptions) (*SigningResult, error) { @@ -1076,6 +1168,11 @@ func (p *Policy) SignWithOptions(ver int, context, input []byte, options *Signin return nil, err } + // Before signing, check if key has its private part, if not return error + if keyParams.IsPrivateKeyMissing() { + return nil, errutil.UserError{Err: "requested version for signing does not contain a private part"} + } + hashAlgorithm := options.HashAlgorithm marshaling := options.Marshaling saltLength := options.SaltLength @@ -1182,7 +1279,7 @@ func (p *Policy) SignWithOptions(ver int, context, input []byte, options *Signin switch sigAlgorithm { case "pss": - if !p.validRSAPSSSaltLength(key, algo, saltLength) { + if !p.validRSAPSSSaltLength(key.N.BitLen(), algo, saltLength) { return nil, errutil.UserError{Err: fmt.Sprintf("requested salt length %d is invalid", saltLength)} } sig, err = rsa.SignPSS(rand.Reader, key, algo, input, &rsa.PSSOptions{SaltLength: saltLength}) @@ -1336,20 +1433,30 @@ func (p *Policy) VerifySignatureWithOptions(context, input []byte, sig string, o return ecdsa.Verify(key, input, ecdsaSig.R, ecdsaSig.S), nil case KeyType_ED25519: - var key ed25519.PrivateKey + var pub ed25519.PublicKey if p.Derived { // Derive the key that should be used - var err error - key, err = p.GetKey(context, ver, 32) + key, err := p.GetKey(context, ver, 32) if err != nil { return false, errutil.InternalError{Err: fmt.Sprintf("error deriving key: %v", err)} } + pub = ed25519.PrivateKey(key).Public().(ed25519.PublicKey) } else { - key = ed25519.PrivateKey(p.Keys[strconv.Itoa(ver)].Key) + keyEntry, err := p.safeGetKeyEntry(ver) + if err != nil { + return false, err + } + + raw, err := base64.StdEncoding.DecodeString(keyEntry.FormattedPublicKey) + if err != nil { + return false, err + } + + pub = ed25519.PublicKey(raw) } - return ed25519.Verify(key.Public().(ed25519.PublicKey), input, sigBytes), nil + return ed25519.Verify(pub, input, sigBytes), nil case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: keyEntry, err := p.safeGetKeyEntry(ver) @@ -1357,8 +1464,6 @@ func (p *Policy) VerifySignatureWithOptions(context, input []byte, sig string, o return false, err } - key := keyEntry.RSAKey - algo, ok := CryptoHashMap[hashAlgorithm] if !ok { return false, errutil.InternalError{Err: "unsupported hash algorithm"} @@ -1370,12 +1475,20 @@ func (p *Policy) VerifySignatureWithOptions(context, input []byte, sig string, o switch sigAlgorithm { case "pss": - if !p.validRSAPSSSaltLength(key, algo, saltLength) { + publicKey := keyEntry.RSAPublicKey + if !keyEntry.IsPrivateKeyMissing() { + publicKey = &keyEntry.RSAKey.PublicKey + } + if !p.validRSAPSSSaltLength(publicKey.N.BitLen(), algo, saltLength) { return false, errutil.UserError{Err: fmt.Sprintf("requested salt length %d is invalid", saltLength)} } - err = rsa.VerifyPSS(&key.PublicKey, algo, input, sigBytes, &rsa.PSSOptions{SaltLength: saltLength}) + err = rsa.VerifyPSS(publicKey, algo, input, sigBytes, &rsa.PSSOptions{SaltLength: saltLength}) case "pkcs1v15": - err = rsa.VerifyPKCS1v15(&key.PublicKey, algo, input, sigBytes) + publicKey := keyEntry.RSAPublicKey + if !keyEntry.IsPrivateKeyMissing() { + publicKey = &keyEntry.RSAKey.PublicKey + } + err = rsa.VerifyPKCS1v15(publicKey, algo, input, sigBytes) default: return false, errutil.InternalError{Err: fmt.Sprintf("unsupported rsa signature algorithm %s", sigAlgorithm)} } @@ -1396,12 +1509,28 @@ func (p *Policy) VerifySignatureWithOptions(context, input []byte, sig string, o } func (p *Policy) Import(ctx context.Context, storage logical.Storage, key []byte, randReader io.Reader) error { + return p.ImportPublicOrPrivate(ctx, storage, key, true, randReader) +} + +func (p *Policy) ImportPublicOrPrivate(ctx context.Context, storage logical.Storage, key []byte, isPrivateKey bool, randReader io.Reader) error { now := time.Now() entry := KeyEntry{ CreationTime: now, DeprecatedCreationTime: now.Unix(), } + // Before we insert this entry, check if the latest version is incomplete + // and this entry matches the current version; if so, return without + // updating to the next version. + if p.LatestVersion > 0 { + latestKey := p.Keys[strconv.Itoa(p.LatestVersion)] + if latestKey.IsPrivateKeyMissing() && isPrivateKey { + if err := p.ImportPrivateKeyForVersion(ctx, storage, p.LatestVersion, key); err == nil { + return nil + } + } + } + if p.Type != KeyType_HMAC { hmacKey, err := uuid.GenerateRandomBytesWithReader(32, randReader) if err != nil { @@ -1410,103 +1539,63 @@ func (p *Policy) Import(ctx context.Context, storage logical.Storage, key []byte entry.HMACKey = hmacKey } - if (p.Type == KeyType_AES128_GCM96 && len(key) != 16) || - ((p.Type == KeyType_AES256_GCM96 || p.Type == KeyType_ChaCha20_Poly1305) && len(key) != 32) || + if p.Type == KeyType_ED25519 && p.Derived && !isPrivateKey { + return fmt.Errorf("unable to import only public key for derived Ed25519 key: imported key should not be an Ed25519 key pair but is instead an HKDF key") + } + + if ((p.Type == KeyType_AES128_GCM96 || p.Type == KeyType_AES128_CMAC) && len(key) != 16) || + ((p.Type == KeyType_AES256_GCM96 || p.Type == KeyType_ChaCha20_Poly1305 || p.Type == KeyType_AES256_CMAC) && len(key) != 32) || (p.Type == KeyType_HMAC && (len(key) < HmacMinKeySize || len(key) > HmacMaxKeySize)) { return fmt.Errorf("invalid key size %d bytes for key type %s", len(key), p.Type) } - if p.Type == KeyType_AES128_GCM96 || p.Type == KeyType_AES256_GCM96 || p.Type == KeyType_ChaCha20_Poly1305 || p.Type == KeyType_HMAC { + if p.Type == KeyType_AES128_GCM96 || p.Type == KeyType_AES256_GCM96 || p.Type == KeyType_ChaCha20_Poly1305 || p.Type == KeyType_HMAC || p.Type == KeyType_AES128_CMAC || p.Type == KeyType_AES256_CMAC { entry.Key = key if p.Type == KeyType_HMAC { p.KeySize = len(key) + entry.HMACKey = key } } else { - parsedPrivateKey, err := x509.ParsePKCS8PrivateKey(key) - if err != nil { - if strings.Contains(err.Error(), "unknown elliptic curve") { - var edErr error - parsedPrivateKey, edErr = ParsePKCS8Ed25519PrivateKey(key) - if edErr != nil { - return fmt.Errorf("error parsing asymmetric key:\n - assuming contents are an ed25519 private key: %v\n - original error: %w", edErr, err) - } - - // Parsing as Ed25519-in-PKCS8-ECPrivateKey succeeded! - } else if strings.Contains(err.Error(), oidSignatureRSAPSS.String()) { - var rsaErr error - parsedPrivateKey, rsaErr = ParsePKCS8RSAPSSPrivateKey(key) - if rsaErr != nil { - return fmt.Errorf("error parsing asymmetric key:\n - assuming contents are an RSA/PSS private key: %v\n - original error: %w", rsaErr, err) + var parsedKey any + var err error + if isPrivateKey { + parsedKey, err = x509.ParsePKCS8PrivateKey(key) + if err != nil { + if strings.Contains(err.Error(), "unknown elliptic curve") { + var edErr error + parsedKey, edErr = ParsePKCS8Ed25519PrivateKey(key) + if edErr != nil { + return fmt.Errorf("error parsing asymmetric key:\n - assuming contents are an ed25519 private key: %s\n - original error: %v", edErr, err) + } + + // Parsing as Ed25519-in-PKCS8-ECPrivateKey succeeded! + } else if strings.Contains(err.Error(), oidSignatureRSAPSS.String()) { + var rsaErr error + parsedKey, rsaErr = ParsePKCS8RSAPSSPrivateKey(key) + if rsaErr != nil { + return fmt.Errorf("error parsing asymmetric key:\n - assuming contents are an RSA/PSS private key: %v\n - original error: %w", rsaErr, err) + } + + // Parsing as RSA-PSS in PKCS8 succeeded! + } else { + return fmt.Errorf("error parsing asymmetric key: %s", err) } - - // Parsing as RSA-PSS in PKCS8 succeeded! - } else { - return fmt.Errorf("error parsing asymmetric key: %s", err) - } - } - - switch parsedPrivateKey.(type) { - case *ecdsa.PrivateKey: - if p.Type != KeyType_ECDSA_P256 && p.Type != KeyType_ECDSA_P384 && p.Type != KeyType_ECDSA_P521 { - return fmt.Errorf("invalid key type: expected %s, got %T", p.Type, parsedPrivateKey) - } - - ecdsaKey := parsedPrivateKey.(*ecdsa.PrivateKey) - curve := elliptic.P256() - if p.Type == KeyType_ECDSA_P384 { - curve = elliptic.P384() - } else if p.Type == KeyType_ECDSA_P521 { - curve = elliptic.P521() } - - if ecdsaKey.Curve != curve { - return fmt.Errorf("invalid curve: expected %s, got %s", curve.Params().Name, ecdsaKey.Curve.Params().Name) + } else { + pemBlock, _ := pem.Decode(key) + if pemBlock == nil { + return fmt.Errorf("error parsing public key: not in PEM format") } - entry.EC_D = ecdsaKey.D - entry.EC_X = ecdsaKey.X - entry.EC_Y = ecdsaKey.Y - derBytes, err := x509.MarshalPKIXPublicKey(ecdsaKey.Public()) + parsedKey, err = x509.ParsePKIXPublicKey(pemBlock.Bytes) if err != nil { - return errwrap.Wrapf("error marshaling public key: {{err}}", err) - } - pemBlock := &pem.Block{ - Type: "PUBLIC KEY", - Bytes: derBytes, - } - pemBytes := pem.EncodeToMemory(pemBlock) - if pemBytes == nil || len(pemBytes) == 0 { - return fmt.Errorf("error PEM-encoding public key") - } - entry.FormattedPublicKey = string(pemBytes) - case ed25519.PrivateKey: - if p.Type != KeyType_ED25519 { - return fmt.Errorf("invalid key type: expected %s, got %T", p.Type, parsedPrivateKey) - } - privateKey := parsedPrivateKey.(ed25519.PrivateKey) - - entry.Key = privateKey - publicKey := privateKey.Public().(ed25519.PublicKey) - entry.FormattedPublicKey = base64.StdEncoding.EncodeToString(publicKey) - case *rsa.PrivateKey: - if p.Type != KeyType_RSA2048 && p.Type != KeyType_RSA3072 && p.Type != KeyType_RSA4096 { - return fmt.Errorf("invalid key type: expected %s, got %T", p.Type, parsedPrivateKey) - } - - keyBytes := 256 - if p.Type == KeyType_RSA3072 { - keyBytes = 384 - } else if p.Type == KeyType_RSA4096 { - keyBytes = 512 - } - rsaKey := parsedPrivateKey.(*rsa.PrivateKey) - if rsaKey.Size() != keyBytes { - return fmt.Errorf("invalid key size: expected %d bytes, got %d bytes", keyBytes, rsaKey.Size()) + return fmt.Errorf("error parsing public key: %w", err) } + } - entry.RSAKey = rsaKey - default: - return fmt.Errorf("invalid key type: expected %s, got %T", p.Type, parsedPrivateKey) + err = entry.parseFromKey(p.Type, parsedKey) + if err != nil { + return err } } @@ -1572,20 +1661,23 @@ func (p *Policy) RotateInMemory(randReader io.Reader) (retErr error) { DeprecatedCreationTime: now.Unix(), } - hmacKey, err := uuid.GenerateRandomBytesWithReader(32, randReader) - if err != nil { - return err + if p.Type != KeyType_AES128_CMAC && p.Type != KeyType_AES256_CMAC && p.Type != KeyType_HMAC { + hmacKey, err := uuid.GenerateRandomBytesWithReader(32, randReader) + if err != nil { + return err + } + entry.HMACKey = hmacKey } - entry.HMACKey = hmacKey + var err error switch p.Type { - case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_HMAC: + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_HMAC, KeyType_AES128_CMAC, KeyType_AES256_CMAC: // Default to 256 bit key numBytes := 32 - if p.Type == KeyType_AES128_GCM96 { + if p.Type == KeyType_AES128_GCM96 || p.Type == KeyType_AES128_CMAC { numBytes = 16 } else if p.Type == KeyType_HMAC { - numBytes := p.KeySize + numBytes = p.KeySize if numBytes < HmacMinKeySize || numBytes > HmacMaxKeySize { return fmt.Errorf("invalid key size for HMAC key, must be between %d and %d bytes", HmacMinKeySize, HmacMaxKeySize) } @@ -1596,6 +1688,11 @@ func (p *Policy) RotateInMemory(randReader io.Reader) (retErr error) { } entry.Key = newKey + if p.Type == KeyType_HMAC { + // To avoid causing problems, ensure HMACKey = Key. + entry.HMACKey = newKey + } + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521: var curve elliptic.Curve switch p.Type { @@ -1629,13 +1726,19 @@ func (p *Policy) RotateInMemory(randReader io.Reader) (retErr error) { entry.FormattedPublicKey = string(pemBytes) case KeyType_ED25519: + // Go uses a 64-byte private key for Ed25519 keys (private+public, each + // 32-bytes long). When we do Key derivation, we still generate a 32-byte + // random value (and compute the corresponding Ed25519 public key), but + // use this entire 64-byte key as if it was an HKDF key. The corresponding + // underlying public key is never returned (which is probably good, because + // doing so would leak half of our HKDF key...), but means we cannot import + // derived-enabled Ed25519 public key components. pub, pri, err := ed25519.GenerateKey(randReader) if err != nil { return err } entry.Key = pri entry.FormattedPublicKey = base64.StdEncoding.EncodeToString(pub) - case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: bitSize := 2048 if p.Type == KeyType_RSA3072 { @@ -1649,6 +1752,8 @@ func (p *Policy) RotateInMemory(randReader io.Reader) (retErr error) { if err != nil { return err } + + entry.RSAPublicKey = entry.RSAKey.Public().(*rsa.PublicKey) } if p.ConvergentEncryption { @@ -1961,9 +2066,15 @@ func (p *Policy) EncryptWithFactory(ver int, context []byte, nonce []byte, value encBytes := 32 hmacBytes := 0 - if p.convergentVersion(ver) > 2 { + convergentVersion := p.convergentVersion(ver) + if convergentVersion > 2 { deriveHMAC = true hmacBytes = 32 + if len(nonce) > 0 { + return "", errutil.UserError{Err: "nonce provided when not allowed"} + } + } else if len(nonce) > 0 && (!p.ConvergentEncryption || convergentVersion != 1) { + return "", errutil.UserError{Err: "nonce provided when not allowed"} } if p.Type == KeyType_AES128_GCM96 { encBytes = 16 @@ -2021,8 +2132,13 @@ func (p *Policy) EncryptWithFactory(ver int, context []byte, nonce []byte, value if err != nil { return "", err } - key := keyEntry.RSAKey - ciphertext, err = rsa.EncryptOAEP(sha256.New(), rand.Reader, &key.PublicKey, plaintext, nil) + var publicKey *rsa.PublicKey + if keyEntry.RSAKey != nil { + publicKey = &keyEntry.RSAKey.PublicKey + } else { + publicKey = keyEntry.RSAPublicKey + } + ciphertext, err = rsa.EncryptOAEP(sha256.New(), rand.Reader, publicKey, plaintext, nil) if err != nil { return "", errutil.InternalError{Err: fmt.Sprintf("failed to RSA encrypt the plaintext: %v", err)} } @@ -2067,3 +2183,470 @@ func (p *Policy) EncryptWithFactory(ver int, context []byte, nonce []byte, value return encoded, nil } + +func (p *Policy) KeyVersionCanBeUpdated(keyVersion int, isPrivateKey bool) error { + keyEntry, err := p.safeGetKeyEntry(keyVersion) + if err != nil { + return err + } + + if !p.Type.ImportPublicKeySupported() { + return errors.New("provided type does not support importing key versions") + } + + isPrivateKeyMissing := keyEntry.IsPrivateKeyMissing() + if isPrivateKeyMissing && !isPrivateKey { + return errors.New("cannot add a public key to a key version that already has a public key set") + } + + if !isPrivateKeyMissing { + return errors.New("private key imported, key version cannot be updated") + } + + return nil +} + +func (p *Policy) ImportPrivateKeyForVersion(ctx context.Context, storage logical.Storage, keyVersion int, key []byte) error { + keyEntry, err := p.safeGetKeyEntry(keyVersion) + if err != nil { + return err + } + + // Parse key + parsedPrivateKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + if strings.Contains(err.Error(), "unknown elliptic curve") { + var edErr error + parsedPrivateKey, edErr = ParsePKCS8Ed25519PrivateKey(key) + if edErr != nil { + return fmt.Errorf("error parsing asymmetric key:\n - assuming contents are an ed25519 private key: %s\n - original error: %v", edErr, err) + } + + // Parsing as Ed25519-in-PKCS8-ECPrivateKey succeeded! + } else if strings.Contains(err.Error(), oidSignatureRSAPSS.String()) { + var rsaErr error + parsedPrivateKey, rsaErr = ParsePKCS8RSAPSSPrivateKey(key) + if rsaErr != nil { + return fmt.Errorf("error parsing asymmetric key:\n - assuming contents are an RSA/PSS private key: %v\n - original error: %w", rsaErr, err) + } + + // Parsing as RSA-PSS in PKCS8 succeeded! + } else { + return fmt.Errorf("error parsing asymmetric key: %s", err) + } + } + + switch parsedPrivateKey.(type) { + case *ecdsa.PrivateKey: + ecdsaKey := parsedPrivateKey.(*ecdsa.PrivateKey) + pemBlock, _ := pem.Decode([]byte(keyEntry.FormattedPublicKey)) + if pemBlock == nil { + return fmt.Errorf("failed to parse key entry public key: invalid PEM blob") + } + publicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil || publicKey == nil { + return fmt.Errorf("failed to parse key entry public key: %v", err) + } + if !publicKey.(*ecdsa.PublicKey).Equal(&ecdsaKey.PublicKey) { + return fmt.Errorf("cannot import key, key pair does not match") + } + case *rsa.PrivateKey: + rsaKey := parsedPrivateKey.(*rsa.PrivateKey) + if !rsaKey.PublicKey.Equal(keyEntry.RSAPublicKey) { + return fmt.Errorf("cannot import key, key pair does not match") + } + case ed25519.PrivateKey: + ed25519Key := parsedPrivateKey.(ed25519.PrivateKey) + publicKey, err := base64.StdEncoding.DecodeString(keyEntry.FormattedPublicKey) + if err != nil { + return fmt.Errorf("failed to parse key entry public key: %v", err) + } + if !ed25519.PublicKey(publicKey).Equal(ed25519Key.Public()) { + return fmt.Errorf("cannot import key, key pair does not match") + } + } + + err = keyEntry.parseFromKey(p.Type, parsedPrivateKey) + if err != nil { + return err + } + + p.Keys[strconv.Itoa(keyVersion)] = keyEntry + + return p.Persist(ctx, storage) +} + +func (ke *KeyEntry) parseFromKey(PolKeyType KeyType, parsedKey any) error { + switch parsedKey.(type) { + case *ecdsa.PrivateKey, *ecdsa.PublicKey: + if PolKeyType != KeyType_ECDSA_P256 && PolKeyType != KeyType_ECDSA_P384 && PolKeyType != KeyType_ECDSA_P521 { + return fmt.Errorf("invalid key type: expected %s, got %T", PolKeyType, parsedKey) + } + + curve := elliptic.P256() + if PolKeyType == KeyType_ECDSA_P384 { + curve = elliptic.P384() + } else if PolKeyType == KeyType_ECDSA_P521 { + curve = elliptic.P521() + } + + var derBytes []byte + var err error + ecdsaKey, ok := parsedKey.(*ecdsa.PrivateKey) + if ok { + + if ecdsaKey.Curve != curve { + return fmt.Errorf("invalid curve: expected %s, got %s", curve.Params().Name, ecdsaKey.Curve.Params().Name) + } + + ke.EC_D = ecdsaKey.D + ke.EC_X = ecdsaKey.X + ke.EC_Y = ecdsaKey.Y + + derBytes, err = x509.MarshalPKIXPublicKey(ecdsaKey.Public()) + if err != nil { + return errwrap.Wrapf("error marshaling public key: {{err}}", err) + } + } else { + ecdsaKey := parsedKey.(*ecdsa.PublicKey) + + if ecdsaKey.Curve != curve { + return fmt.Errorf("invalid curve: expected %s, got %s", curve.Params().Name, ecdsaKey.Curve.Params().Name) + } + + ke.EC_X = ecdsaKey.X + ke.EC_Y = ecdsaKey.Y + + derBytes, err = x509.MarshalPKIXPublicKey(ecdsaKey) + if err != nil { + return errwrap.Wrapf("error marshaling public key: {{err}}", err) + } + } + + pemBlock := &pem.Block{ + Type: "PUBLIC KEY", + Bytes: derBytes, + } + pemBytes := pem.EncodeToMemory(pemBlock) + if pemBytes == nil || len(pemBytes) == 0 { + return fmt.Errorf("error PEM-encoding public key") + } + ke.FormattedPublicKey = string(pemBytes) + case ed25519.PrivateKey, ed25519.PublicKey: + if PolKeyType != KeyType_ED25519 { + return fmt.Errorf("invalid key type: expected %s, got %T", PolKeyType, parsedKey) + } + + privateKey, ok := parsedKey.(ed25519.PrivateKey) + if ok { + ke.Key = privateKey + publicKey := privateKey.Public().(ed25519.PublicKey) + ke.FormattedPublicKey = base64.StdEncoding.EncodeToString(publicKey) + } else { + publicKey := parsedKey.(ed25519.PublicKey) + ke.FormattedPublicKey = base64.StdEncoding.EncodeToString(publicKey) + } + case *rsa.PrivateKey, *rsa.PublicKey: + if PolKeyType != KeyType_RSA2048 && PolKeyType != KeyType_RSA3072 && PolKeyType != KeyType_RSA4096 { + return fmt.Errorf("invalid key type: expected %s, got %T", PolKeyType, parsedKey) + } + + keyBytes := 256 + if PolKeyType == KeyType_RSA3072 { + keyBytes = 384 + } else if PolKeyType == KeyType_RSA4096 { + keyBytes = 512 + } + + rsaKey, ok := parsedKey.(*rsa.PrivateKey) + if ok { + if rsaKey.Size() != keyBytes { + return fmt.Errorf("invalid key size: expected %d bytes, got %d bytes", keyBytes, rsaKey.Size()) + } + ke.RSAKey = rsaKey + ke.RSAPublicKey = rsaKey.Public().(*rsa.PublicKey) + } else { + rsaKey := parsedKey.(*rsa.PublicKey) + if rsaKey.Size() != keyBytes { + return fmt.Errorf("invalid key size: expected %d bytes, got %d bytes", keyBytes, rsaKey.Size()) + } + ke.RSAPublicKey = rsaKey + } + default: + return fmt.Errorf("invalid key type: expected %s, got %T", PolKeyType, parsedKey) + } + + return nil +} + +func (p *Policy) WrapKey(ver int, targetKey interface{}, targetKeyType KeyType, hash hash.Hash) (string, error) { + if !p.Type.SigningSupported() { + return "", fmt.Errorf("message signing not supported for key type %v", p.Type) + } + + switch { + case ver == 0: + ver = p.LatestVersion + case ver < 0: + return "", errutil.UserError{Err: "requested version for key wrapping is negative"} + case ver > p.LatestVersion: + return "", errutil.UserError{Err: "requested version for key wrapping is higher than the latest key version"} + case p.MinEncryptionVersion > 0 && ver < p.MinEncryptionVersion: + return "", errutil.UserError{Err: "requested version for key wrapping is less than the minimum encryption key version"} + } + + keyEntry, err := p.safeGetKeyEntry(ver) + if err != nil { + return "", err + } + + return keyEntry.WrapKey(targetKey, targetKeyType, hash) +} + +func (ke *KeyEntry) WrapKey(targetKey interface{}, targetKeyType KeyType, hash hash.Hash) (string, error) { + // Presently this method implements a CKM_RSA_AES_KEY_WRAP-compatible + // wrapping interface and only works on RSA keyEntries as a result. + if ke.RSAPublicKey == nil { + return "", fmt.Errorf("unsupported key type in use; must be a rsa key") + } + + var preppedTargetKey []byte + switch targetKeyType { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_HMAC, KeyType_AES128_CMAC, KeyType_AES256_CMAC: + var ok bool + preppedTargetKey, ok = targetKey.([]byte) + if !ok { + return "", fmt.Errorf("failed to wrap target key for import: symmetric key not provided in byte format (%T)", targetKey) + } + default: + var err error + preppedTargetKey, err = x509.MarshalPKCS8PrivateKey(targetKey) + if err != nil { + return "", fmt.Errorf("failed to wrap target key for import: %w", err) + } + } + + result, err := wrapTargetPKCS8ForImport(ke.RSAPublicKey, preppedTargetKey, hash) + if err != nil { + return result, fmt.Errorf("failed to wrap target key for import: %w", err) + } + + return result, nil +} + +func wrapTargetPKCS8ForImport(wrappingKey *rsa.PublicKey, preppedTargetKey []byte, hash hash.Hash) (string, error) { + // Generate an ephemeral AES-256 key + ephKey, err := uuid.GenerateRandomBytes(32) + if err != nil { + return "", fmt.Errorf("failed to generate an ephemeral AES wrapping key: %w", err) + } + + // Wrap ephemeral AES key with public wrapping key + ephKeyWrapped, err := rsa.EncryptOAEP(hash, rand.Reader, wrappingKey, ephKey, []byte{} /* label */) + if err != nil { + return "", fmt.Errorf("failed to encrypt ephemeral wrapping key with public key: %w", err) + } + + // Create KWP instance for wrapping target key + kwp, err := subtle.NewKWP(ephKey) + if err != nil { + return "", fmt.Errorf("failed to generate new KWP from AES key: %w", err) + } + + // Wrap target key with KWP + targetKeyWrapped, err := kwp.Wrap(preppedTargetKey) + if err != nil { + return "", fmt.Errorf("failed to wrap target key with KWP: %w", err) + } + + // Combined wrapped keys into a single blob and base64 encode + wrappedKeys := append(ephKeyWrapped, targetKeyWrapped...) + return base64.StdEncoding.EncodeToString(wrappedKeys), nil +} + +func (p *Policy) CreateCsr(keyVersion int, csrTemplate *x509.CertificateRequest) ([]byte, error) { + if !p.Type.SigningSupported() { + return nil, errutil.UserError{Err: fmt.Sprintf("key type '%s' does not support signing", p.Type)} + } + + keyEntry, err := p.safeGetKeyEntry(keyVersion) + if err != nil { + return nil, err + } + + if keyEntry.IsPrivateKeyMissing() { + return nil, errutil.UserError{Err: "private key not imported for key version selected"} + } + + csrTemplate.Signature = nil + csrTemplate.SignatureAlgorithm = x509.UnknownSignatureAlgorithm + + var key crypto.Signer + switch p.Type { + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521: + var curve elliptic.Curve + switch p.Type { + case KeyType_ECDSA_P384: + curve = elliptic.P384() + case KeyType_ECDSA_P521: + curve = elliptic.P521() + default: + curve = elliptic.P256() + } + + key = &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: curve, + X: keyEntry.EC_X, + Y: keyEntry.EC_Y, + }, + D: keyEntry.EC_D, + } + + case KeyType_ED25519: + if p.Derived { + return nil, errutil.UserError{Err: "operation not supported on keys with derivation enabled"} + } + key = ed25519.PrivateKey(keyEntry.Key) + + case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: + key = keyEntry.RSAKey + + default: + return nil, errutil.InternalError{Err: fmt.Sprintf("selected key type '%s' does not support signing", p.Type.String())} + } + csrBytes, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, key) + if err != nil { + return nil, fmt.Errorf("could not create the cerfificate request: %w", err) + } + + pemCsr := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csrBytes, + }) + + return pemCsr, nil +} + +func (p *Policy) ValidateLeafCertKeyMatch(keyVersion int, certPublicKeyAlgorithm x509.PublicKeyAlgorithm, certPublicKey any) (bool, error) { + if !p.Type.SigningSupported() { + return false, errutil.UserError{Err: fmt.Sprintf("key type '%s' does not support signing", p.Type)} + } + + var keyTypeMatches bool + switch p.Type { + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521: + if certPublicKeyAlgorithm == x509.ECDSA { + keyTypeMatches = true + } + case KeyType_ED25519: + if certPublicKeyAlgorithm == x509.Ed25519 { + keyTypeMatches = true + } + case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: + if certPublicKeyAlgorithm == x509.RSA { + keyTypeMatches = true + } + } + if !keyTypeMatches { + return false, errutil.UserError{Err: fmt.Sprintf("provided leaf certificate public key algorithm '%s' does not match the transit key type '%s'", + certPublicKeyAlgorithm, p.Type)} + } + + keyEntry, err := p.safeGetKeyEntry(keyVersion) + if err != nil { + return false, err + } + + switch certPublicKeyAlgorithm { + case x509.ECDSA: + certPublicKey := certPublicKey.(*ecdsa.PublicKey) + var curve elliptic.Curve + switch p.Type { + case KeyType_ECDSA_P384: + curve = elliptic.P384() + case KeyType_ECDSA_P521: + curve = elliptic.P521() + default: + curve = elliptic.P256() + } + + publicKey := &ecdsa.PublicKey{ + Curve: curve, + X: keyEntry.EC_X, + Y: keyEntry.EC_Y, + } + + return publicKey.Equal(certPublicKey), nil + + case x509.Ed25519: + if p.Derived { + return false, errutil.UserError{Err: "operation not supported on keys with derivation enabled"} + } + certPublicKey := certPublicKey.(ed25519.PublicKey) + + raw, err := base64.StdEncoding.DecodeString(keyEntry.FormattedPublicKey) + if err != nil { + return false, err + } + publicKey := ed25519.PublicKey(raw) + + return publicKey.Equal(certPublicKey), nil + + case x509.RSA: + certPublicKey := certPublicKey.(*rsa.PublicKey) + publicKey := keyEntry.RSAKey.PublicKey + return publicKey.Equal(certPublicKey), nil + + case x509.UnknownPublicKeyAlgorithm: + return false, errutil.InternalError{Err: fmt.Sprint("certificate signed with an unknown algorithm")} + } + + return false, nil +} + +func (p *Policy) ValidateAndPersistCertificateChain(ctx context.Context, keyVersion int, certChain []*x509.Certificate, storage logical.Storage) error { + if len(certChain) == 0 { + return errutil.UserError{Err: "expected at least one certificate in the parsed certificate chain"} + } + + if certChain[0].BasicConstraintsValid && certChain[0].IsCA { + return errutil.UserError{Err: "certificate in the first position is not a leaf certificate"} + } + + for _, cert := range certChain[1:] { + if cert.BasicConstraintsValid && !cert.IsCA { + return errutil.UserError{Err: "provided certificate chain contains more than one leaf certificate"} + } + } + + valid, err := p.ValidateLeafCertKeyMatch(keyVersion, certChain[0].PublicKeyAlgorithm, certChain[0].PublicKey) + if err != nil { + prefixedErr := fmt.Errorf("could not validate key match between leaf certificate key and key version in transit: %w", err) + switch err.(type) { + case errutil.UserError: + return errutil.UserError{Err: prefixedErr.Error()} + default: + return prefixedErr + } + } + if !valid { + return fmt.Errorf("leaf certificate public key does match the key version selected") + } + + keyEntry, err := p.safeGetKeyEntry(keyVersion) + if err != nil { + return err + } + + // Convert the certificate chain to DER format + derCertificates := make([][]byte, len(certChain)) + for i, cert := range certChain { + derCertificates[i] = cert.Raw + } + + keyEntry.CertificateChain = derCertificates + + p.Keys[strconv.Itoa(keyVersion)] = keyEntry + return p.Persist(ctx, storage) +} diff --git a/sdk/helper/keysutil/policy_test.go b/sdk/helper/keysutil/policy_test.go index daf19a8258a7..fd753f22ba7e 100644 --- a/sdk/helper/keysutil/policy_test.go +++ b/sdk/helper/keysutil/policy_test.go @@ -21,14 +21,127 @@ import ( "testing" "time" - "golang.org/x/crypto/ed25519" - "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/logical" "github.com/mitchellh/copystructure" + "golang.org/x/crypto/ed25519" ) +// Ordering of these items needs to match the iota order defined in policy.go. Ordering changes +// should never occur, as it would lead to a key type change within existing stored policies. +var allTestKeyTypes = []KeyType{ + KeyType_AES256_GCM96, KeyType_ECDSA_P256, KeyType_ED25519, KeyType_RSA2048, + KeyType_RSA4096, KeyType_ChaCha20_Poly1305, KeyType_ECDSA_P384, KeyType_ECDSA_P521, KeyType_AES128_GCM96, + KeyType_RSA3072, KeyType_MANAGED_KEY, KeyType_HMAC, KeyType_AES128_CMAC, KeyType_AES256_CMAC, +} + +func TestPolicy_KeyTypes(t *testing.T) { + // Make sure the iota value never change for key types, as existing storage would be affected + for i, keyType := range allTestKeyTypes { + if int(keyType) != i { + t.Fatalf("iota of keytype %s changed, expected %d got %d", keyType.String(), i, keyType) + } + } + + // Make sure we have a string presentation for all types + for _, keyType := range allTestKeyTypes { + if strings.Contains(keyType.String(), "unknown") { + t.Fatalf("keytype with iota of %d should not contain 'unknown', missing in String() switch statement", keyType) + } + } +} + +func TestPolicy_HmacCmacSuported(t *testing.T) { + // Test HMAC supported feature + for _, keyType := range allTestKeyTypes { + switch keyType { + case KeyType_MANAGED_KEY: + if keyType.HMACSupported() { + t.Fatalf("hmac should not have been not be supported for keytype %s", keyType.String()) + } + if keyType.CMACSupported() { + t.Fatalf("cmac should not have been be supported for keytype %s", keyType.String()) + } + case KeyType_AES128_CMAC, KeyType_AES256_CMAC: + if keyType.HMACSupported() { + t.Fatalf("hmac should have been not be supported for keytype %s", keyType.String()) + } + if !keyType.CMACSupported() { + t.Fatalf("cmac should have been be supported for keytype %s", keyType.String()) + } + default: + if !keyType.HMACSupported() { + t.Fatalf("hmac should have been supported for keytype %s", keyType.String()) + } + if keyType.CMACSupported() { + t.Fatalf("cmac should not have been supported for keytype %s", keyType.String()) + } + } + } +} + +func TestPolicy_CMACKeyUpgrade(t *testing.T) { + ctx := context.Background() + lm, _ := NewLockManager(false, 0) + storage := &logical.InmemStorage{} + p, upserted, err := lm.GetPolicy(ctx, PolicyRequest{ + Upsert: true, + Storage: storage, + KeyType: KeyType_AES256_CMAC, + Name: "test", + }, rand.Reader) + if err != nil { + t.Fatalf("failed loading policy: %v", err) + } + if p == nil { + t.Fatal("nil policy") + } + if !upserted { + t.Fatal("expected an upsert") + } + + // This verifies we don't have a hmac key + _, err = p.HMACKey(1) + if err == nil { + t.Fatal("cmac key should not return an hmac key but did on initial creation") + } + + if p.NeedsUpgrade() { + t.Fatal("cmac key should not require an upgrade after initial key creation") + } + + err = p.Upgrade(ctx, storage, rand.Reader) + if err != nil { + t.Fatalf("an error was returned from upgrade method: %v", err) + } + p.Unlock() + + // Now reload our policy from disk and make sure we still don't have a hmac key + p, upserted, err = lm.GetPolicy(ctx, PolicyRequest{ + Upsert: true, + Storage: storage, + KeyType: KeyType_AES256_CMAC, + Name: "test", + }, rand.Reader) + if err != nil { + t.Fatalf("failed loading policy: %v", err) + } + if p == nil { + t.Fatal("nil policy") + } + if upserted { + t.Fatal("expected the key to exist but upserted was true") + } + + p.Unlock() + + _, err = p.HMACKey(1) + if err == nil { + t.Fatal("cmac key should not return an hmac key post upgrade") + } +} + func TestPolicy_KeyEntryMapUpgrade(t *testing.T) { now := time.Now() old := map[int]KeyEntry{ @@ -846,7 +959,7 @@ func Test_RSA_PSS(t *testing.T) { } cryptoHash := CryptoHashMap[hashType] minSaltLength := p.minRSAPSSSaltLength() - maxSaltLength := p.maxRSAPSSSaltLength(rsaKey, cryptoHash) + maxSaltLength := p.maxRSAPSSSaltLength(rsaKey.N.BitLen(), cryptoHash) hash := cryptoHash.New() hash.Write(input) input = hash.Sum(nil) diff --git a/sdk/helper/keysutil/util.go b/sdk/helper/keysutil/util.go index 94a56d42c573..dbba7ec1fb70 100644 --- a/sdk/helper/keysutil/util.go +++ b/sdk/helper/keysutil/util.go @@ -66,7 +66,7 @@ func isEd25519OID(oid asn1.ObjectIdentifier) bool { return oidNSSPKIXEd25519.Equal(oid) || oidRFC8410Ed25519.Equal(oid) } -// ParsePKCS8PrivateKey parses an unencrypted private key in PKCS #8, ASN.1 DER form. +// ParsePKCS8Ed25519PrivateKey parses an unencrypted private key in PKCS #8, ASN.1 DER form. // // It returns a *rsa.PrivateKey, a *ecdsa.PrivateKey, or a ed25519.PrivateKey. // More types might be supported in the future. @@ -121,7 +121,7 @@ func ParsePKCS8Ed25519PrivateKey(der []byte) (key interface{}, err error) { return ed25519.NewKeyFromSeed(ed25519Key.PrivateKey), nil } -// ParsePKCS8PrivateKey parses an unencrypted private key in PKCS #8, ASN.1 DER form. +// ParsePKCS8RSAPSSPrivateKey parses an unencrypted private key in PKCS #8, ASN.1 DER form. // // This helper only supports RSA/PSS keys (with OID 1.2.840.113549.1.1.10). // diff --git a/sdk/helper/ldaputil/client.go b/sdk/helper/ldaputil/client.go index b0e1187d56d8..a1901fdcb6dc 100644 --- a/sdk/helper/ldaputil/client.go +++ b/sdk/helper/ldaputil/client.go @@ -8,11 +8,13 @@ import ( "crypto/tls" "crypto/x509" "encoding/binary" + "encoding/hex" "fmt" "math" "net" "net/url" "strings" + "sync" "text/template" "time" @@ -31,6 +33,7 @@ func (c *Client) DialLDAP(cfg *ConfigEntry) (Connection, error) { var retErr *multierror.Error var conn Connection urls := strings.Split(cfg.Url, ",") + for _, uut := range urls { u, err := url.Parse(uut) if err != nil { @@ -43,12 +46,20 @@ func (c *Client) DialLDAP(cfg *ConfigEntry) (Connection, error) { } var tlsConfig *tls.Config + dialer := net.Dialer{ + Timeout: time.Duration(cfg.ConnectionTimeout) * time.Second, + } + switch u.Scheme { case "ldap": if port == "" { port = "389" } - conn, err = c.LDAP.Dial("tcp", net.JoinHostPort(host, port)) + + fullAddr := fmt.Sprintf("%s://%s", u.Scheme, net.JoinHostPort(host, port)) + opt := ldap.DialWithDialer(&dialer) + + conn, err = c.LDAP.DialURL(fullAddr, opt) if err != nil { break } @@ -71,7 +82,15 @@ func (c *Client) DialLDAP(cfg *ConfigEntry) (Connection, error) { if err != nil { break } - conn, err = c.LDAP.DialTLS("tcp", net.JoinHostPort(host, port), tlsConfig) + + fullAddr := fmt.Sprintf("%s://%s", u.Scheme, net.JoinHostPort(host, port)) + opt := ldap.DialWithDialer(&dialer) + tls := ldap.DialWithTLSConfig(tlsConfig) + + conn, err = c.LDAP.DialURL(fullAddr, opt, tls) + if err != nil { + break + } default: retErr = multierror.Append(retErr, fmt.Errorf("invalid LDAP scheme in url %q", net.JoinHostPort(host, port))) continue @@ -210,7 +229,11 @@ func (c *Client) RenderUserSearchFilter(cfg *ConfigEntry, username string) (stri } if cfg.UPNDomain != "" { context.UserAttr = "userPrincipalName" - context.Username = fmt.Sprintf("%s@%s", EscapeLDAPValue(username), cfg.UPNDomain) + // Intentionally, calling EscapeFilter(...) (vs EscapeValue) since the + // username is being injected into a search filter. + // As an untrusted string, the username must be escaped according to RFC + // 4515, in order to prevent attackers from injecting characters that could modify the filter + context.Username = fmt.Sprintf("%s@%s", ldap.EscapeFilter(username), cfg.UPNDomain) } // Execute the template. Note that the template context contains escaped input and does @@ -415,7 +438,7 @@ func (c *Client) performLdapFilterGroupsSearchPaging(cfg *ConfigEntry, conn Pagi cfg.GroupAttr, }, SizeLimit: math.MaxInt32, - }, math.MaxInt32) + }, uint32(cfg.MaximumPageSize)) if err != nil { return nil, fmt.Errorf("LDAP search failed: %w", err) } @@ -456,6 +479,11 @@ func sidBytesToString(b []byte) (string, error) { } func (c *Client) performLdapTokenGroupsSearch(cfg *ConfigEntry, conn Connection, userDN string) ([]*ldap.Entry, error) { + var wg sync.WaitGroup + var lock sync.Mutex + taskChan := make(chan string) + maxWorkers := 10 + result, err := conn.Search(&ldap.SearchRequest{ BaseDN: userDN, Scope: ldap.ScopeBaseObject, @@ -476,37 +504,53 @@ func (c *Client) performLdapTokenGroupsSearch(cfg *ConfigEntry, conn Connection, userEntry := result.Entries[0] groupAttrValues := userEntry.GetRawAttributeValues("tokenGroups") - groupEntries := make([]*ldap.Entry, 0, len(groupAttrValues)) + + for i := 0; i < maxWorkers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + for sid := range taskChan { + groupResult, err := conn.Search(&ldap.SearchRequest{ + BaseDN: fmt.Sprintf("", sid), + Scope: ldap.ScopeBaseObject, + DerefAliases: ldapDerefAliasMap[cfg.DerefAliases], + Filter: "(objectClass=*)", + Attributes: []string{ + "1.1", // RFC no attributes + }, + SizeLimit: 1, + }) + if err != nil { + c.Logger.Warn("unable to read the group sid", "sid", sid) + continue + } + + if len(groupResult.Entries) == 0 { + c.Logger.Warn("unable to find the group", "sid", sid) + continue + } + + lock.Lock() + groupEntries = append(groupEntries, groupResult.Entries[0]) + lock.Unlock() + } + }() + } + for _, sidBytes := range groupAttrValues { sidString, err := sidBytesToString(sidBytes) if err != nil { c.Logger.Warn("unable to read sid", "err", err) continue } - - groupResult, err := conn.Search(&ldap.SearchRequest{ - BaseDN: fmt.Sprintf("", sidString), - Scope: ldap.ScopeBaseObject, - DerefAliases: ldapDerefAliasMap[cfg.DerefAliases], - Filter: "(objectClass=*)", - Attributes: []string{ - "1.1", // RFC no attributes - }, - SizeLimit: 1, - }) - if err != nil { - c.Logger.Warn("unable to read the group sid", "sid", sidString) - continue - } - if len(groupResult.Entries) == 0 { - c.Logger.Warn("unable to find the group", "sid", sidString) - continue - } - - groupEntries = append(groupEntries, groupResult.Entries[0]) + taskChan <- sidString } + close(taskChan) + wg.Wait() + return groupEntries, nil } @@ -536,7 +580,7 @@ func (c *Client) GetLdapGroups(cfg *ConfigEntry, conn Connection, userDN string, if cfg.UseTokenGroups { entries, err = c.performLdapTokenGroupsSearch(cfg, conn, userDN) } else { - if paging, ok := conn.(PagingConnection); ok { + if paging, ok := conn.(PagingConnection); ok && cfg.MaximumPageSize > 0 { entries, err = c.performLdapFilterGroupsSearchPaging(cfg, paging, userDN, username) } else { entries, err = c.performLdapFilterGroupsSearch(cfg, conn, userDN, username) @@ -578,42 +622,59 @@ func (c *Client) GetLdapGroups(cfg *ConfigEntry, conn Connection, userDN string, } // EscapeLDAPValue is exported because a plugin uses it outside this package. +// EscapeLDAPValue will properly escape the input string as an ldap value +// rfc4514 states the following must be escaped: +// - leading space or hash +// - trailing space +// - special characters '"', '+', ',', ';', '<', '>', '\\' +// - hex func EscapeLDAPValue(input string) string { if input == "" { return "" } - // RFC4514 forbids un-escaped: - // - leading space or hash - // - trailing space - // - special characters '"', '+', ',', ';', '<', '>', '\\' - // - null - for i := 0; i < len(input); i++ { - escaped := false - if input[i] == '\\' && i+1 < len(input)-1 { - i++ - escaped = true - } - switch input[i] { - case '"', '+', ',', ';', '<', '>', '\\': - if !escaped { - input = input[0:i] + "\\" + input[i:] - i++ - } + buf := bytes.Buffer{} + + escFn := func(c byte) { + buf.WriteByte('\\') + buf.WriteByte(c) + } + + inputLen := len(input) + for i := 0; i < inputLen; i++ { + char := input[i] + switch { + case i == 0 && char == ' ' || char == '#': + // leading space or hash. + escFn(char) continue + case i == inputLen-1 && char == ' ': + // trailing space. + escFn(char) + continue + case specialChar(char): + escFn(char) + continue + case char < ' ' || char > '~': + // anything that's not between the ascii space and tilde must be hex + buf.WriteByte('\\') + buf.WriteString(hex.EncodeToString([]byte{char})) + continue + default: + // everything remaining, doesn't need to be escaped + buf.WriteByte(char) } - if escaped { - input = input[0:i] + "\\" + input[i:] - i++ - } - } - if input[0] == ' ' || input[0] == '#' { - input = "\\" + input } - if input[len(input)-1] == ' ' { - input = input[0:len(input)-1] + "\\ " + return buf.String() +} + +func specialChar(char byte) bool { + switch char { + case '"', '+', ',', ';', '<', '>', '\\': + return true + default: + return false } - return input } /* diff --git a/sdk/helper/ldaputil/client_test.go b/sdk/helper/ldaputil/client_test.go index 167d50f22d62..dcce9c6e0e85 100644 --- a/sdk/helper/ldaputil/client_test.go +++ b/sdk/helper/ldaputil/client_test.go @@ -7,6 +7,8 @@ import ( "testing" "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // TestDialLDAP duplicates a potential panic that was @@ -29,15 +31,20 @@ func TestDialLDAP(t *testing.T) { func TestLDAPEscape(t *testing.T) { testcases := map[string]string{ - "#test": "\\#test", - "test,hello": "test\\,hello", - "test,hel+lo": "test\\,hel\\+lo", - "test\\hello": "test\\\\hello", - " test ": "\\ test \\ ", - "": "", - "\\test": "\\\\test", - "test\\": "test\\\\", - "test\\ ": "test\\\\\\ ", + "#test": "\\#test", + "test,hello": "test\\,hello", + "test,hel+lo": "test\\,hel\\+lo", + "test\\hello": "test\\\\hello", + " test ": "\\ test \\ ", + "": "", + `\`: `\\`, + "trailing\000": `trailing\00`, + "mid\000dle": `mid\00dle`, + "\000": `\00`, + "multiple\000\000": `multiple\00\00`, + "backlash-before-null\\\000": `backlash-before-null\\\00`, + "trailing\\": `trailing\\`, + "double-escaping\\>": `double-escaping\\\>`, } for test, answer := range testcases { @@ -88,3 +95,58 @@ func TestSIDBytesToString(t *testing.T) { } } } + +func TestClient_renderUserSearchFilter(t *testing.T) { + t.Parallel() + tests := []struct { + name string + conf *ConfigEntry + username string + want string + errContains string + }{ + { + name: "valid-default", + username: "alice", + conf: &ConfigEntry{ + UserAttr: "cn", + }, + want: "(cn=alice)", + }, + { + name: "escaped-malicious-filter", + username: "foo@example.com)((((((((((((((((((((((((((((((((((((((userPrincipalName=foo", + conf: &ConfigEntry{ + UPNDomain: "example.com", + UserFilter: "(&({{.UserAttr}}={{.Username}})({{.UserAttr}}=admin@example.com))", + }, + want: "(&(userPrincipalName=foo@example.com\\29\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28userPrincipalName=foo@example.com)(userPrincipalName=admin@example.com))", + }, + { + name: "bad-filter-unclosed-action", + username: "alice", + conf: &ConfigEntry{ + UserFilter: "hello{{range", + }, + errContains: "search failed due to template compilation error", + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + c := Client{ + Logger: hclog.NewNullLogger(), + LDAP: NewLDAP(), + } + + f, err := c.RenderUserSearchFilter(tc.conf, tc.username) + if tc.errContains != "" { + require.Error(t, err) + assert.ErrorContains(t, err, tc.errContains) + return + } + require.NoError(t, err) + assert.NotEmpty(t, f) + assert.Equal(t, tc.want, f) + }) + } +} diff --git a/sdk/helper/ldaputil/config.go b/sdk/helper/ldaputil/config.go index df4996af7511..9044b19fdaa1 100644 --- a/sdk/helper/ldaputil/config.go +++ b/sdk/helper/ldaputil/config.go @@ -12,12 +12,11 @@ import ( "strings" "text/template" + "github.com/go-ldap/ldap/v3" + capldap "github.com/hashicorp/cap/ldap" + "github.com/hashicorp/errwrap" "github.com/hashicorp/go-secure-stdlib/tlsutil" "github.com/hashicorp/vault/sdk/framework" - - "github.com/hashicorp/errwrap" - - "github.com/go-ldap/ldap/v3" ) var ldapDerefAliasMap = map[string]int{ @@ -239,12 +238,24 @@ Default: ({{.UserAttr}}={{.Username}})`, Default: "90s", }, + "connection_timeout": { + Type: framework.TypeDurationSecond, + Description: "Timeout, in seconds, when attempting to connect to the LDAP server before trying the next URL in the configuration.", + Default: "30s", + }, + "dereference_aliases": { Type: framework.TypeString, Description: "When aliases should be dereferenced on search operations. Accepted values are 'never', 'finding', 'searching', 'always'. Defaults to 'never'.", Default: "never", AllowedValues: []interface{}{"never", "finding", "searching", "always"}, }, + + "max_page_size": { + Type: framework.TypeInt, + Description: "If set to a value greater than 0, the LDAP backend will use the LDAP server's paged search control to request pages of up to the given size. This can be used to avoid hitting the LDAP server's maximum result size limit. Otherwise, the LDAP backend will not use the paged search control.", + Default: 0, + }, } } @@ -411,10 +422,18 @@ func NewConfigEntry(existing *ConfigEntry, d *framework.FieldData) (*ConfigEntry cfg.RequestTimeout = d.Get("request_timeout").(int) } + if _, ok := d.Raw["connection_timeout"]; ok || !hadExisting { + cfg.ConnectionTimeout = d.Get("connection_timeout").(int) + } + if _, ok := d.Raw["dereference_aliases"]; ok || !hadExisting { cfg.DerefAliases = d.Get("dereference_aliases").(string) } + if _, ok := d.Raw["max_page_size"]; ok || !hadExisting { + cfg.MaximumPageSize = d.Get("max_page_size").(int) + } + return cfg, nil } @@ -441,7 +460,9 @@ type ConfigEntry struct { UseTokenGroups bool `json:"use_token_groups"` UsePre111GroupCNBehavior *bool `json:"use_pre111_group_cn_behavior"` RequestTimeout int `json:"request_timeout"` + ConnectionTimeout int `json:"connection_timeout"` // deprecated: use RequestTimeout DerefAliases string `json:"dereference_aliases"` + MaximumPageSize int `json:"max_page_size"` // These json tags deviate from snake case because there was a past issue // where the tag was being ignored, causing it to be jsonified as "CaseSensitiveNames", etc. @@ -479,8 +500,10 @@ func (c *ConfigEntry) PasswordlessMap() map[string]interface{} { "use_token_groups": c.UseTokenGroups, "anonymous_group_search": c.AnonymousGroupSearch, "request_timeout": c.RequestTimeout, + "connection_timeout": c.ConnectionTimeout, "username_as_alias": c.UsernameAsAlias, "dereference_aliases": c.DerefAliases, + "max_page_size": c.MaximumPageSize, } if c.CaseSensitiveNames != nil { m["case_sensitive_names"] = *c.CaseSensitiveNames @@ -535,3 +558,55 @@ func (c *ConfigEntry) Validate() error { } return nil } + +func ConvertConfig(cfg *ConfigEntry) *capldap.ClientConfig { + // cap/ldap doesn't have a notion of connection_timeout, and uses a single timeout value for + // both the net.Dialer and ldap connection timeout. + // So take the smaller of the two values and use that as the timeout value. + minTimeout := min(cfg.ConnectionTimeout, cfg.RequestTimeout) + urls := strings.Split(cfg.Url, ",") + config := &capldap.ClientConfig{ + URLs: urls, + UserDN: cfg.UserDN, + AnonymousGroupSearch: cfg.AnonymousGroupSearch, + GroupDN: cfg.GroupDN, + GroupFilter: cfg.GroupFilter, + GroupAttr: cfg.GroupAttr, + UPNDomain: cfg.UPNDomain, + UserFilter: cfg.UserFilter, + UserAttr: cfg.UserAttr, + ClientTLSCert: cfg.ClientTLSCert, + ClientTLSKey: cfg.ClientTLSKey, + InsecureTLS: cfg.InsecureTLS, + StartTLS: cfg.StartTLS, + BindDN: cfg.BindDN, + BindPassword: cfg.BindPassword, + AllowEmptyPasswordBinds: !cfg.DenyNullBind, + DiscoverDN: cfg.DiscoverDN, + TLSMinVersion: cfg.TLSMinVersion, + TLSMaxVersion: cfg.TLSMaxVersion, + UseTokenGroups: cfg.UseTokenGroups, + RequestTimeout: minTimeout, + IncludeUserAttributes: true, + ExcludedUserAttributes: nil, + IncludeUserGroups: true, + LowerUserAttributeKeys: true, + AllowEmptyAnonymousGroupSearch: true, + MaximumPageSize: cfg.MaximumPageSize, + DerefAliases: cfg.DerefAliases, + DeprecatedVaultPre111GroupCNBehavior: cfg.UsePre111GroupCNBehavior, + } + + if cfg.Certificate != "" { + config.Certificates = []string{cfg.Certificate} + } + + return config +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/sdk/helper/ldaputil/config_test.go b/sdk/helper/ldaputil/config_test.go index 62be2a182fd6..b7fd22ccbb2d 100644 --- a/sdk/helper/ldaputil/config_test.go +++ b/sdk/helper/ldaputil/config_test.go @@ -74,15 +74,16 @@ func testConfig(t *testing.T) *ConfigEntry { t.Helper() return &ConfigEntry{ - Url: "ldap://138.91.247.105", - UserDN: "example,com", - BindDN: "kitty", - BindPassword: "cats", - TLSMaxVersion: "tls12", - TLSMinVersion: "tls12", - RequestTimeout: 30, - ClientTLSCert: "", - ClientTLSKey: "", + Url: "ldap://138.91.247.105", + UserDN: "example,com", + BindDN: "kitty", + BindPassword: "cats", + TLSMaxVersion: "tls12", + TLSMinVersion: "tls12", + RequestTimeout: 30, + ConnectionTimeout: 15, + ClientTLSCert: "", + ClientTLSKey: "", } } @@ -141,6 +142,7 @@ var jsonConfig = []byte(`{ "tls_max_version": "tls12", "tls_min_version": "tls12", "request_timeout": 30, + "connection_timeout": 15, "ClientTLSCert": "", "ClientTLSKey": "" }`) @@ -171,7 +173,9 @@ var jsonConfigDefault = []byte(` "use_pre111_group_cn_behavior": null, "username_as_alias": false, "request_timeout": 90, + "connection_timeout": 30, "dereference_aliases": "never", + "max_page_size": 0, "CaseSensitiveNames": false, "ClientTLSCert": "", "ClientTLSKey": "" diff --git a/sdk/helper/ldaputil/connection.go b/sdk/helper/ldaputil/connection.go index c33ad403f78e..2e4ab54ee1ef 100644 --- a/sdk/helper/ldaputil/connection.go +++ b/sdk/helper/ldaputil/connection.go @@ -14,7 +14,7 @@ import ( // but through an interface. type Connection interface { Bind(username, password string) error - Close() + Close() error Add(addRequest *ldap.AddRequest) error Modify(modifyRequest *ldap.ModifyRequest) error Del(delRequest *ldap.DelRequest) error diff --git a/sdk/helper/ldaputil/ldap.go b/sdk/helper/ldaputil/ldap.go index f03fa8948460..bdf746e5c8cd 100644 --- a/sdk/helper/ldaputil/ldap.go +++ b/sdk/helper/ldaputil/ldap.go @@ -4,8 +4,6 @@ package ldaputil import ( - "crypto/tls" - "github.com/go-ldap/ldap/v3" ) @@ -16,16 +14,11 @@ func NewLDAP() LDAP { // LDAP provides ldap functionality, but through an interface // rather than statically. This allows faking it for tests. type LDAP interface { - Dial(network, addr string) (Connection, error) - DialTLS(network, addr string, config *tls.Config) (Connection, error) + DialURL(addr string, opts ...ldap.DialOpt) (Connection, error) } type ldapIfc struct{} -func (l *ldapIfc) Dial(network, addr string) (Connection, error) { - return ldap.Dial(network, addr) -} - -func (l *ldapIfc) DialTLS(network, addr string, config *tls.Config) (Connection, error) { - return ldap.DialTLS(network, addr, config) +func (l *ldapIfc) DialURL(addr string, opts ...ldap.DialOpt) (Connection, error) { + return ldap.DialURL(addr, opts...) } diff --git a/sdk/helper/locksutil/locks.go b/sdk/helper/locksutil/locks.go index c7538b63b4f7..2711098247a3 100644 --- a/sdk/helper/locksutil/locks.go +++ b/sdk/helper/locksutil/locks.go @@ -7,12 +7,18 @@ import ( "sync" "github.com/hashicorp/vault/sdk/helper/cryptoutil" + "github.com/sasha-s/go-deadlock" ) const ( LockCount = 256 ) +// DeadlockRWMutex is the RW version of DeadlockMutex. +type DeadlockRWMutex struct { + deadlock.RWMutex +} + type LockEntry struct { sync.RWMutex } @@ -36,6 +42,14 @@ func CreateLocks() []*LockEntry { return ret } +func CreateLocksWithDeadlockDetection() []*DeadlockRWMutex { + ret := make([]*DeadlockRWMutex, LockCount) + for i := range ret { + ret[i] = new(DeadlockRWMutex) + } + return ret +} + func LockIndexForKey(key string) uint8 { return uint8(cryptoutil.Blake2b256Hash(key)[0]) } @@ -59,3 +73,23 @@ func LocksForKeys(locks []*LockEntry, keys []string) []*LockEntry { return locksToReturn } + +func LockForKeyWithDeadLockDetection(locks []*DeadlockRWMutex, key string) *DeadlockRWMutex { + return locks[LockIndexForKey(key)] +} + +func LocksForKeysWithDeadLockDetection(locks []*DeadlockRWMutex, keys []string) []*DeadlockRWMutex { + lockIndexes := make(map[uint8]struct{}, len(keys)) + for _, k := range keys { + lockIndexes[LockIndexForKey(k)] = struct{}{} + } + + locksToReturn := make([]*DeadlockRWMutex, 0, len(keys)) + for i, l := range locks { + if _, ok := lockIndexes[uint8(i)]; ok { + locksToReturn = append(locksToReturn, l) + } + } + + return locksToReturn +} diff --git a/sdk/helper/metricregistry/metricregistry.go b/sdk/helper/metricregistry/metricregistry.go new file mode 100644 index 000000000000..30e8d44cb19b --- /dev/null +++ b/sdk/helper/metricregistry/metricregistry.go @@ -0,0 +1,107 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package metricregistry is a helper that allows Vault code or plugins that are +// compiled into Vault to pre-define any metrics they will emit to go-metrics at +// init time. Metrics registered this way will always be reported by the +// go-metrics PrometheusSink if it is used so infrequently updated metrics are +// always present. It is not required to pre-register metrics to use go-metrics +// with Prometheus, but it's preferable as it makes them behave more like the +// Prometheus ecosystem expects, being always present and with a helpful +// description in the output which some systems use to help operators explore +// metrics. +// +// Note that this will not work for external Vault plugins since they are in a +// separate process and only started after Vault's metrics sink is already +// configured. +package metricregistry + +import promsink "github.com/armon/go-metrics/prometheus" + +var Registry definitionRegistry + +// Re-export these types so that we don't have the whole of Vault depending +// directly on go-metrics prometheus sink and can buffer changes if needed +type ( + // GaugeDefinition provides the name and help text of a gauge metric that will + // be exported via go-metrics' Prometheus sink if enabled. + GaugeDefinition promsink.GaugeDefinition + + // CounterDefinition provides the name and help text of a counter metric that + // will be exported via go-metrics' Prometheus sink if enabled. + CounterDefinition promsink.CounterDefinition + + // SummaryDefinition provides the name and help text of a summary metric that + // will be exported via go-metrics' Prometheus sink if enabled. + SummaryDefinition promsink.SummaryDefinition +) + +// definitionRegistry is a central place for packages to register their metrics +// definitions during init so that we can correctly report metrics to Prometheus +// even before they are observed. Typically there is one global instance. +type definitionRegistry struct { + gauges []GaugeDefinition + counters []CounterDefinition + summaries []SummaryDefinition +} + +// RegisterGauges is intended to be called during init. It accesses global state +// without synchronization. Statically defined definitions should be registered +// during `init` of a package read to be configured if the prometheus sink is +// enabled in configuration. Registering metrics is not mandatory but it is +// strongly preferred as it ensures they are always output even before the are +// observed which makes dashboards much easier to work with, provides helpful +// descriptions and matches Prometheus eco system expectations. It also prevents +// the metrics ever being expired which means users don't need to work around +// that quirk of go-metrics by setting long prometheus retention times. All +// registered metrics will report 0 until an actual observation is made. +func RegisterGauges(defs []GaugeDefinition) { + Registry.gauges = append(Registry.gauges, defs...) +} + +// RegisterCounters is intended to be called during init. It accesses global +// state without synchronization. Statically defined definitions should be +// registered during `init` of a package read to be configured if the prometheus +// sink is enabled in configuration. Registering metrics is not mandatory but it +// is strongly preferred as it ensures they are always output even before the +// are observed which makes dashboards much easier to work with, provides +// helpful descriptions and matches Prometheus eco system expectations. It also +// prevents the metrics ever being expired which means users don't need to work +// around that quirk of go-metrics by setting long prometheus retention times. +// All registered metrics will report 0 until an actual observation is made. +func RegisterCounters(defs []CounterDefinition) { + Registry.counters = append(Registry.counters, defs...) +} + +// RegisterSummaries is intended to be called during init. It accesses global +// state without synchronization. Statically defined definitions should be +// registered during `init` of a package read to be configured if the prometheus +// sink is enabled in configuration. Registering metrics is not mandatory but it +// is strongly preferred as it ensures they are always output even before the +// are observed which makes dashboards much easier to work with, provides +// helpful descriptions and matches Prometheus eco system expectations. It also +// prevents the metrics ever being expired which means users don't need to work +// around that quirk of go-metrics by setting long prometheus retention times. +// All registered metrics will report 0 until an actual observation is made. +func RegisterSummaries(defs []SummaryDefinition) { + Registry.summaries = append(Registry.summaries, defs...) +} + +// MergeDefinitions adds all registered metrics to any already present in `cfg` +// ready to be passed to the go-metrics prometheus sink. Note it is not safe to +// call this concurrently with registrations or other calls, it's intended this +// is called once only after all registrations (which should be in init +// functions) just before the PrometheusSink is created. Calling more than once +// could result in duplicate metrics definitions being passed unless the cfg is +// different each time for different Prometheus sinks. +func MergeDefinitions(cfg *promsink.PrometheusOpts) { + for _, g := range Registry.gauges { + cfg.GaugeDefinitions = append(cfg.GaugeDefinitions, promsink.GaugeDefinition(g)) + } + for _, c := range Registry.counters { + cfg.CounterDefinitions = append(cfg.CounterDefinitions, promsink.CounterDefinition(c)) + } + for _, s := range Registry.summaries { + cfg.SummaryDefinitions = append(cfg.SummaryDefinitions, promsink.SummaryDefinition(s)) + } +} diff --git a/sdk/helper/metricregistry/metricregistry_test.go b/sdk/helper/metricregistry/metricregistry_test.go new file mode 100644 index 000000000000..c94e8c821dcc --- /dev/null +++ b/sdk/helper/metricregistry/metricregistry_test.go @@ -0,0 +1,122 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package metricregistry + +import ( + "testing" + + promsink "github.com/armon/go-metrics/prometheus" + "github.com/stretchr/testify/require" +) + +var testGauges = []GaugeDefinition{ + { + Name: []string{"test_gauge"}, + Help: "A test gauge", + }, + { + Name: []string{"test_gauge2"}, + Help: "Another test gauge", + }, +} + +var testCounters = []CounterDefinition{ + { + Name: []string{"test_counter"}, + Help: "A test counter", + }, + { + Name: []string{"test_counter2"}, + Help: "Another test counter", + }, +} + +var testSummaries = []SummaryDefinition{ + { + Name: []string{"test_summary"}, + Help: "A test summary", + }, + { + Name: []string{"test_summary2"}, + Help: "Another test summary", + }, +} + +func TestMetricRegistry(t *testing.T) { + // Register some metrics + RegisterGauges(testGauges) + RegisterCounters(testCounters) + RegisterSummaries(testSummaries) + + var opts promsink.PrometheusOpts + + // Add some pre-existing metrics to ensure merge is really a merge + opts.GaugeDefinitions = []promsink.GaugeDefinition{ + { + Name: []string{"preexisting_gauge"}, + Help: "A pre-existing gauge", + }, + } + opts.CounterDefinitions = []promsink.CounterDefinition{ + { + Name: []string{"preexisting_counter"}, + Help: "A pre-existing counter", + }, + } + opts.SummaryDefinitions = []promsink.SummaryDefinition{ + { + Name: []string{"preexisting_summary"}, + Help: "A pre-existing summary", + }, + } + + MergeDefinitions(&opts) + + require.Len(t, opts.GaugeDefinitions, 3) + require.Len(t, opts.CounterDefinitions, 3) + require.Len(t, opts.SummaryDefinitions, 3) + + wantGauges := []string{"test_gauge", "test_gauge2", "preexisting_gauge"} + wantGaugeHelp := []string{"A test gauge", "Another test gauge", "A pre-existing gauge"} + gotGauges := reduce(opts.GaugeDefinitions, nil, func(r []string, d promsink.GaugeDefinition) []string { + return append(r, d.Name[0]) + }) + gotGaugeHelp := reduce(opts.GaugeDefinitions, nil, func(r []string, d promsink.GaugeDefinition) []string { + return append(r, d.Help) + }) + + require.ElementsMatch(t, wantGauges, gotGauges) + require.ElementsMatch(t, wantGaugeHelp, gotGaugeHelp) + + wantCounters := []string{"test_counter", "test_counter2", "preexisting_counter"} + wantCounterHelp := []string{"A test counter", "Another test counter", "A pre-existing counter"} + gotCounters := reduce(opts.CounterDefinitions, nil, func(r []string, d promsink.CounterDefinition) []string { + return append(r, d.Name[0]) + }) + gotCounterHelp := reduce(opts.CounterDefinitions, nil, func(r []string, d promsink.CounterDefinition) []string { + return append(r, d.Help) + }) + + require.ElementsMatch(t, wantCounters, gotCounters) + require.ElementsMatch(t, wantCounterHelp, gotCounterHelp) + + wantSummaries := []string{"test_summary", "test_summary2", "preexisting_summary"} + wantSummaryHelp := []string{"A test summary", "Another test summary", "A pre-existing summary"} + gotSummaries := reduce(opts.SummaryDefinitions, nil, func(r []string, d promsink.SummaryDefinition) []string { + return append(r, d.Name[0]) + }) + gotSummaryHelp := reduce(opts.SummaryDefinitions, nil, func(r []string, d promsink.SummaryDefinition) []string { + return append(r, d.Help) + }) + + require.ElementsMatch(t, wantSummaries, gotSummaries) + require.ElementsMatch(t, wantSummaryHelp, gotSummaryHelp) +} + +func reduce[T, R any](s []T, r R, f func(R, T) R) R { + for _, v := range s { + r = f(r, v) + } + return r +} diff --git a/sdk/helper/ocsp/client.go b/sdk/helper/ocsp/client.go index f30da3ec5f14..71f75f168a4a 100644 --- a/sdk/helper/ocsp/client.go +++ b/sdk/helper/ocsp/client.go @@ -31,6 +31,8 @@ import ( "golang.org/x/crypto/ocsp" ) +//go:generate enumer -type=FailOpenMode -trimprefix=FailOpen + // FailOpenMode is OCSP fail open mode. FailOpenTrue by default and may // set to ocspModeFailClosed for fail closed mode type FailOpenMode uint32 @@ -76,6 +78,15 @@ const ( cacheExpire = float64(24 * 60 * 60) ) +// ErrOcspIssuerVerification indicates an error verifying the identity of an OCSP response occurred +type ErrOcspIssuerVerification struct { + Err error +} + +func (e *ErrOcspIssuerVerification) Error() string { + return fmt.Sprintf("ocsp response verification error: %v", e.Err) +} + type ocspCachedResponse struct { time float64 producedAt float64 @@ -162,9 +173,27 @@ func (c *Client) getHashAlgorithmFromOID(target pkix.AlgorithmIdentifier) crypto return crypto.SHA1 } -// isInValidityRange checks the validity -func isInValidityRange(currTime, nextUpdate time.Time) bool { - return !nextUpdate.IsZero() && !currTime.After(nextUpdate) +// isInValidityRange checks the validity times of the OCSP response making sure +// that thisUpdate and nextUpdate values are bounded within currTime +func isInValidityRange(currTime time.Time, ocspRes *ocsp.Response) bool { + thisUpdate := ocspRes.ThisUpdate + + // If the thisUpdate value in the OCSP response wasn't set or greater than current time fail this check + if thisUpdate.IsZero() || thisUpdate.After(currTime) { + return false + } + + nextUpdate := ocspRes.NextUpdate + if nextUpdate.IsZero() { + // We don't have a nextUpdate field set, assume we are okay. + return true + } + + if currTime.After(nextUpdate) || thisUpdate.After(nextUpdate) { + return false + } + + return true } func extractCertIDKeyFromRequest(ocspReq []byte) (*certIDKey, *ocspStatus) { @@ -209,7 +238,7 @@ func (c *Client) encodeCertIDKey(certIDKeyBase64 string) (*certIDKey, error) { }, nil } -func (c *Client) checkOCSPResponseCache(encodedCertID *certIDKey, subject, issuer *x509.Certificate) (*ocspStatus, error) { +func (c *Client) checkOCSPResponseCache(encodedCertID *certIDKey, subject, issuer *x509.Certificate, config *VerifyConfig) (*ocspStatus, error) { c.ocspResponseCacheLock.RLock() var cacheValue *ocspCachedResponse v, ok := c.ocspResponseCache.Get(*encodedCertID) @@ -218,7 +247,7 @@ func (c *Client) checkOCSPResponseCache(encodedCertID *certIDKey, subject, issue } c.ocspResponseCacheLock.RUnlock() - status, err := c.extractOCSPCacheResponseValue(cacheValue, subject, issuer) + status, err := c.extractOCSPCacheResponseValue(cacheValue, subject, issuer, config) if err != nil { return nil, err } @@ -235,18 +264,25 @@ func (c *Client) deleteOCSPCache(encodedCertID *certIDKey) { c.ocspResponseCacheLock.Unlock() } -func validateOCSP(ocspRes *ocsp.Response) (*ocspStatus, error) { +func validateOCSP(conf *VerifyConfig, ocspRes *ocsp.Response) (*ocspStatus, error) { curTime := time.Now() if ocspRes == nil { return nil, errors.New("OCSP Response is nil") } - if !isInValidityRange(curTime, ocspRes.NextUpdate) { + if !isInValidityRange(curTime, ocspRes) { return &ocspStatus{ code: ocspInvalidValidity, err: fmt.Errorf("invalid validity: producedAt: %v, thisUpdate: %v, nextUpdate: %v", ocspRes.ProducedAt, ocspRes.ThisUpdate, ocspRes.NextUpdate), }, nil } + + if conf.OcspThisUpdateMaxAge > 0 && curTime.Sub(ocspRes.ThisUpdate) > conf.OcspThisUpdateMaxAge { + return &ocspStatus{ + code: ocspInvalidValidity, + err: fmt.Errorf("invalid validity: thisUpdate: %v is greater than max age: %s", ocspRes.ThisUpdate, conf.OcspThisUpdateMaxAge), + }, nil + } return returnOCSPStatus(ocspRes), nil } @@ -283,7 +319,9 @@ func (c *Client) retryOCSP( ocspHost *url.URL, headers map[string]string, reqBody []byte, + subject, issuer *x509.Certificate, + extraCas []*x509.Certificate, ) (ocspRes *ocsp.Response, ocspResBytes []byte, ocspS *ocspStatus, retErr error) { doRequest := func(request *retryablehttp.Request) (*http.Response, error) { if request != nil { @@ -347,13 +385,28 @@ func (c *Client) retryOCSP( // endpoint might return invalid results for e.g., GET but return // valid results for POST on retry. This could happen if e.g., the // server responds with JSON. - ocspRes, err = ocsp.ParseResponse(ocspResBytes, issuer) + ocspRes, err = ocsp.ParseResponse(ocspResBytes /*issuer = */, nil /* !!unsafe!! */) if err != nil { err = fmt.Errorf("error parsing %v OCSP response: %w", method, err) retErr = multierror.Append(retErr, err) continue } + if err := validateOCSPParsedResponse(ocspRes, subject, issuer, extraCas); err != nil { + err = fmt.Errorf("error validating %v OCSP response: %w", method, err) + + if IsOcspVerificationError(err) { + // We want to immediately give up on a verification error to a response + // and inform the user something isn't correct + return nil, nil, nil, err + } + + retErr = multierror.Append(retErr, err) + // Clear the response out as we can't trust it. + ocspRes = nil + continue + } + // While we haven't validated the signature on the OCSP response, we // got what we presume is a definitive answer and simply changing // methods will likely not help us in that regard. Use this status @@ -386,9 +439,143 @@ func (c *Client) retryOCSP( return } +func IsOcspVerificationError(err error) bool { + errOcspIssuer := &ErrOcspIssuerVerification{} + return errors.As(err, &errOcspIssuer) +} + +func validateOCSPParsedResponse(ocspRes *ocsp.Response, subject, issuer *x509.Certificate, extraCas []*x509.Certificate) error { + // Above, we use the unsafe issuer=nil parameter to ocsp.ParseResponse + // because Go's library does the wrong thing. + // + // Here, we lack a full chain, but we know we trust the parent issuer, + // so if the Go library incorrectly discards useful certificates, we + // likely cannot verify this without passing through the full chain + // back to the root. + // + // Instead, take one of two paths: 1. if there is no certificate in + // the ocspRes, verify the OCSP response directly with our trusted + // issuer certificate, or 2. if there is a certificate, either verify + // it directly matches our trusted issuer certificate, or verify it + // is signed by our trusted issuer certificate. + // + // See also: https://github.com/golang/go/issues/59641 + // + // This addresses the !!unsafe!! behavior above. + if ocspRes.Certificate == nil { + // With no certificate, we need to validate that the response is signed by the issuer or an extra CA + if err := ocspRes.CheckSignatureFrom(issuer); err != nil { + if len(extraCas) > 0 { + // Perhaps it was signed by one of the extra configured OCSP CAs + matchedCA, overallErr := verifySignature(ocspRes, extraCas) + + if overallErr != nil { + return &ErrOcspIssuerVerification{fmt.Errorf("error checking chain of trust %v failed: %w", issuer.Subject.String(), overallErr)} + } + + err := validateSigner(matchedCA) + if err != nil { + return err + } + } else { + return &ErrOcspIssuerVerification{fmt.Errorf("error directly verifying signature: %w", err)} + } + } + } else { + // Because we have at least one certificate here, we know that + // Go's ocsp library verified the signature from this certificate + // onto the response and it was valid. Now we need to know we trust + // this certificate. There are three ways we can do this: + // + // 1. Via confirming issuer == ocspRes.Certificate, or + // 2. Via confirming ocspRes.Certificate.CheckSignatureFrom(issuer). + // 3. Trusting extra configured OCSP CAs + if !bytes.Equal(issuer.Raw, ocspRes.Raw) { + var overallErr error + var matchedCA *x509.Certificate + + // Assumption 1 failed, try 2 + if sigFromIssuerErr := ocspRes.Certificate.CheckSignatureFrom(issuer); sigFromIssuerErr != nil { + if len(extraCas) > 0 { + // Assumption 2 failed, try 3 + m, err := verifySignature(ocspRes, extraCas) + if err != nil { + overallErr = multierror.Append(overallErr, sigFromIssuerErr) + overallErr = multierror.Append(overallErr, err) + } else { + overallErr = nil + matchedCA = m + } + } else { + overallErr = multierror.Append(overallErr, sigFromIssuerErr) + } + } else { + matchedCA = ocspRes.Certificate + } + + if overallErr != nil { + return &ErrOcspIssuerVerification{fmt.Errorf("error checking chain of trust %v failed: %w", issuer.Subject.String(), overallErr)} + } + + err := validateSigner(matchedCA) + if err != nil { + return err + } + } + } + + // Verify the response was for our original subject + if ocspRes.SerialNumber == nil || subject.SerialNumber == nil { + return &ErrOcspIssuerVerification{fmt.Errorf("OCSP response or cert did not contain a serial number")} + } + if ocspRes.SerialNumber.Cmp(subject.SerialNumber) != 0 { + return &ErrOcspIssuerVerification{fmt.Errorf( + "OCSP response serial number %s did not match the leaf certificate serial number %s", + certutil.GetHexFormatted(ocspRes.SerialNumber.Bytes(), ":"), + certutil.GetHexFormatted(subject.SerialNumber.Bytes(), ":"))} + } + + return nil +} + +func verifySignature(res *ocsp.Response, extraCas []*x509.Certificate) (*x509.Certificate, error) { + var overallErr error + var matchedCA *x509.Certificate + for _, ca := range extraCas { + if err := res.CheckSignatureFrom(ca); err != nil { + overallErr = multierror.Append(overallErr, err) + } else { + matchedCA = ca + overallErr = nil + break + } + } + return matchedCA, overallErr +} + +func validateSigner(matchedCA *x509.Certificate) error { + // Verify the OCSP responder certificate is still valid and + // contains the required EKU since it is a delegated OCSP + // responder certificate. + if matchedCA.NotAfter.Before(time.Now()) { + return &ErrOcspIssuerVerification{fmt.Errorf("error checking delegated OCSP responder OCSP response: certificate has expired")} + } + haveEKU := false + for _, ku := range matchedCA.ExtKeyUsage { + if ku == x509.ExtKeyUsageOCSPSigning { + haveEKU = true + break + } + } + if !haveEKU { + return &ErrOcspIssuerVerification{fmt.Errorf("error checking delegated OCSP responder: certificate lacks the OCSP Signing EKU")} + } + return nil +} + // GetRevocationStatus checks the certificate revocation status for subject using issuer certificate. func (c *Client) GetRevocationStatus(ctx context.Context, subject, issuer *x509.Certificate, conf *VerifyConfig) (*ocspStatus, error) { - status, ocspReq, encodedCertID, err := c.validateWithCache(subject, issuer) + status, ocspReq, encodedCertID, err := c.validateWithCache(subject, issuer, conf) if err != nil { return nil, err } @@ -429,15 +616,17 @@ func (c *Client) GetRevocationStatus(ctx context.Context, subject, issuer *x509. timeout := defaultOCSPResponderTimeout ocspClient := retryablehttp.NewClient() + ocspClient.Logger = c.Logger() + ocspClient.RetryMax = conf.OcspMaxRetries ocspClient.HTTPClient.Timeout = timeout ocspClient.HTTPClient.Transport = newInsecureOcspTransport(conf.ExtraCas) - doRequest := func() error { + doRequest := func(i int) error { if conf.QueryAllServers { defer wg.Done() } ocspRes, _, ocspS, err := c.retryOCSP( - ctx, ocspClient, retryablehttp.NewRequest, u, headers, ocspReq, issuer) + ctx, ocspClient, retryablehttp.NewRequest, u, headers, ocspReq, subject, issuer, conf.ExtraCas) ocspResponses[i] = ocspRes if err != nil { errors[i] = err @@ -448,21 +637,26 @@ func (c *Client) GetRevocationStatus(ctx context.Context, subject, issuer *x509. return nil } - ret, err := validateOCSP(ocspRes) + ret, err := validateOCSP(conf, ocspRes) if err != nil { errors[i] = err return err } if isValidOCSPStatus(ret.code) { ocspStatuses[i] = ret + } else if ret.err != nil { + // This check needs to occur after the isValidOCSPStatus as the unknown + // status also sets an err value within ret. + errors[i] = ret.err + return ret.err } return nil } if conf.QueryAllServers { wg.Add(1) - go doRequest() + go doRequest(i) } else { - err = doRequest() + err = doRequest(i) if err == nil { break } @@ -477,6 +671,9 @@ func (c *Client) GetRevocationStatus(ctx context.Context, subject, issuer *x509. var firstError error for i := range ocspHosts { if errors[i] != nil { + if IsOcspVerificationError(errors[i]) { + return nil, errors[i] + } if firstError == nil { firstError = errors[i] } @@ -502,15 +699,33 @@ func (c *Client) GetRevocationStatus(ctx context.Context, subject, issuer *x509. } } + // If querying all servers is enabled, and we have an error from a host, we can't trust + // a good status from the other as we can't confirm the other server would have returned the + // same response, we do allow revoke responses through + if conf.QueryAllServers && firstError != nil && (ret != nil && ret.code == ocspStatusGood) { + return nil, fmt.Errorf("encountered an error on a server, "+ + "ignoring good response status as ocsp_query_all_servers is set to true: %w", firstError) + } + // If no server reported the cert revoked, but we did have an error, report it if (ret == nil || ret.code == ocspStatusUnknown) && firstError != nil { return nil, firstError } - // otherwise ret should contain a response for the overall request + // An extra safety in case ret and firstError are both nil + if ret == nil { + return nil, fmt.Errorf("failed to extract a known response code or error from the OCSP server") + } + // otherwise ret should contain a response for the overall request if !isValidOCSPStatus(ret.code) { return ret, nil } + + if ocspRes.NextUpdate.IsZero() { + // We should not cache values with no NextUpdate values + return ret, nil + } + v := ocspCachedResponse{ status: ret.code, time: float64(time.Now().UTC().Unix()), @@ -531,11 +746,13 @@ func isValidOCSPStatus(status ocspStatusCode) bool { } type VerifyConfig struct { - OcspEnabled bool - ExtraCas []*x509.Certificate - OcspServersOverride []string - OcspFailureMode FailOpenMode - QueryAllServers bool + OcspEnabled bool + ExtraCas []*x509.Certificate + OcspServersOverride []string + OcspFailureMode FailOpenMode + QueryAllServers bool + OcspThisUpdateMaxAge time.Duration + OcspMaxRetries int } // VerifyLeafCertificate verifies just the subject against it's direct issuer @@ -547,12 +764,12 @@ func (c *Client) VerifyLeafCertificate(ctx context.Context, subject, issuer *x50 if results.code == ocspStatusGood { return nil } else { - serial := issuer.SerialNumber + serial := subject.SerialNumber serialHex := strings.TrimSpace(certutil.GetHexFormatted(serial.Bytes(), ":")) if results.code == ocspStatusRevoked { return fmt.Errorf("certificate with serial number %s has been revoked", serialHex) } else if conf.OcspFailureMode == FailOpenFalse { - return fmt.Errorf("unknown OCSP status for cert with serial number %s", strings.TrimSpace(certutil.GetHexFormatted(serial.Bytes(), ":"))) + return fmt.Errorf("unknown OCSP status for cert with serial number %s", serialHex) } else { c.Logger().Warn("could not validate OCSP status for cert, but continuing in fail open mode", "serial", serialHex) } @@ -621,12 +838,12 @@ func (c *Client) canEarlyExitForOCSP(results []*ocspStatus, chainSize int, conf return nil } -func (c *Client) validateWithCacheForAllCertificates(verifiedChains []*x509.Certificate) (bool, error) { +func (c *Client) validateWithCacheForAllCertificates(verifiedChains []*x509.Certificate, config *VerifyConfig) (bool, error) { n := len(verifiedChains) - 1 for j := 0; j < n; j++ { subject := verifiedChains[j] issuer := verifiedChains[j+1] - status, _, _, err := c.validateWithCache(subject, issuer) + status, _, _, err := c.validateWithCache(subject, issuer, config) if err != nil { return false, err } @@ -637,7 +854,7 @@ func (c *Client) validateWithCacheForAllCertificates(verifiedChains []*x509.Cert return true, nil } -func (c *Client) validateWithCache(subject, issuer *x509.Certificate) (*ocspStatus, []byte, *certIDKey, error) { +func (c *Client) validateWithCache(subject, issuer *x509.Certificate, config *VerifyConfig) (*ocspStatus, []byte, *certIDKey, error) { ocspReq, err := ocsp.CreateRequest(subject, issuer, &ocsp.RequestOptions{}) if err != nil { return nil, nil, nil, fmt.Errorf("failed to create OCSP request from the certificates: %v", err) @@ -646,7 +863,7 @@ func (c *Client) validateWithCache(subject, issuer *x509.Certificate) (*ocspStat if ocspS.code != ocspSuccess { return nil, nil, nil, fmt.Errorf("failed to extract CertID from OCSP Request: %v", err) } - status, err := c.checkOCSPResponseCache(encodedCertID, subject, issuer) + status, err := c.checkOCSPResponseCache(encodedCertID, subject, issuer, config) if err != nil { return nil, nil, nil, err } @@ -654,7 +871,7 @@ func (c *Client) validateWithCache(subject, issuer *x509.Certificate) (*ocspStat } func (c *Client) GetAllRevocationStatus(ctx context.Context, verifiedChains []*x509.Certificate, conf *VerifyConfig) ([]*ocspStatus, error) { - _, err := c.validateWithCacheForAllCertificates(verifiedChains) + _, err := c.validateWithCacheForAllCertificates(verifiedChains, conf) if err != nil { return nil, err } @@ -679,11 +896,11 @@ func (c *Client) verifyPeerCertificateSerial(conf *VerifyConfig) func(_ [][]byte } } -func (c *Client) extractOCSPCacheResponseValueWithoutSubject(cacheValue ocspCachedResponse) (*ocspStatus, error) { - return c.extractOCSPCacheResponseValue(&cacheValue, nil, nil) +func (c *Client) extractOCSPCacheResponseValueWithoutSubject(cacheValue ocspCachedResponse, conf *VerifyConfig) (*ocspStatus, error) { + return c.extractOCSPCacheResponseValue(&cacheValue, nil, nil, conf) } -func (c *Client) extractOCSPCacheResponseValue(cacheValue *ocspCachedResponse, subject, issuer *x509.Certificate) (*ocspStatus, error) { +func (c *Client) extractOCSPCacheResponseValue(cacheValue *ocspCachedResponse, subject, issuer *x509.Certificate, conf *VerifyConfig) (*ocspStatus, error) { subjectName := "Unknown" if subject != nil { subjectName = subject.Subject.CommonName @@ -705,14 +922,29 @@ func (c *Client) extractOCSPCacheResponseValue(cacheValue *ocspCachedResponse, s }, nil } - return validateOCSP(&ocsp.Response{ + sdkOcspStatus := internalStatusCodeToSDK(cacheValue.status) + + return validateOCSP(conf, &ocsp.Response{ ProducedAt: time.Unix(int64(cacheValue.producedAt), 0).UTC(), ThisUpdate: time.Unix(int64(cacheValue.thisUpdate), 0).UTC(), NextUpdate: time.Unix(int64(cacheValue.nextUpdate), 0).UTC(), - Status: int(cacheValue.status), + Status: sdkOcspStatus, }) } +func internalStatusCodeToSDK(internalStatusCode ocspStatusCode) int { + switch internalStatusCode { + case ocspStatusGood: + return ocsp.Good + case ocspStatusRevoked: + return ocsp.Revoked + case ocspStatusUnknown: + return ocsp.Unknown + default: + return int(internalStatusCode) + } +} + /* // writeOCSPCache writes a OCSP Response cache func (c *Client) writeOCSPCache(ctx context.Context, storage logical.Storage) error { diff --git a/sdk/helper/ocsp/failopenmode_enumer.go b/sdk/helper/ocsp/failopenmode_enumer.go new file mode 100644 index 000000000000..d0cf9f5e9240 --- /dev/null +++ b/sdk/helper/ocsp/failopenmode_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer -type=FailOpenMode -trimprefix=FailOpen"; DO NOT EDIT. + +package ocsp + +import ( + "fmt" +) + +const _FailOpenModeName = "ocspFailOpenNotSetTrueFalse" + +var _FailOpenModeIndex = [...]uint8{0, 18, 22, 27} + +func (i FailOpenMode) String() string { + if i >= FailOpenMode(len(_FailOpenModeIndex)-1) { + return fmt.Sprintf("FailOpenMode(%d)", i) + } + return _FailOpenModeName[_FailOpenModeIndex[i]:_FailOpenModeIndex[i+1]] +} + +var _FailOpenModeValues = []FailOpenMode{0, 1, 2} + +var _FailOpenModeNameToValueMap = map[string]FailOpenMode{ + _FailOpenModeName[0:18]: 0, + _FailOpenModeName[18:22]: 1, + _FailOpenModeName[22:27]: 2, +} + +// FailOpenModeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func FailOpenModeString(s string) (FailOpenMode, error) { + if val, ok := _FailOpenModeNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to FailOpenMode values", s) +} + +// FailOpenModeValues returns all values of the enum +func FailOpenModeValues() []FailOpenMode { + return _FailOpenModeValues +} + +// IsAFailOpenMode returns "true" if the value is listed in the enum definition. "false" otherwise +func (i FailOpenMode) IsAFailOpenMode() bool { + for _, v := range _FailOpenModeValues { + if i == v { + return true + } + } + return false +} diff --git a/sdk/helper/ocsp/ocsp_test.go b/sdk/helper/ocsp/ocsp_test.go index 2f3f1976d2a8..fcd868e2d613 100644 --- a/sdk/helper/ocsp/ocsp_test.go +++ b/sdk/helper/ocsp/ocsp_test.go @@ -6,21 +6,28 @@ import ( "bytes" "context" "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" "crypto/tls" "crypto/x509" + "crypto/x509/pkix" "errors" "fmt" "io" - "io/ioutil" + "math/big" "net" "net/http" + "net/http/httptest" "net/url" + "sync/atomic" "testing" "time" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-retryablehttp" lru "github.com/hashicorp/golang-lru" + "github.com/stretchr/testify/require" "golang.org/x/crypto/ocsp" ) @@ -43,20 +50,21 @@ func TestOCSP(t *testing.T) { for _, tgt := range targetURL { c.ocspResponseCache, _ = lru.New2Q(10) for _, tr := range transports { - c := &http.Client{ - Transport: tr, - Timeout: 30 * time.Second, - } - req, err := http.NewRequest("GET", tgt, bytes.NewReader(nil)) + ocspClient := retryablehttp.NewClient() + ocspClient.Logger = c.Logger() + ocspClient.RetryMax = conf.OcspMaxRetries + ocspClient.HTTPClient.Timeout = 30 * time.Second + ocspClient.HTTPClient.Transport = tr + req, err := retryablehttp.NewRequest("GET", tgt, bytes.NewReader(nil)) if err != nil { t.Fatalf("fail to create a request. err: %v", err) } - res, err := c.Do(req) + res, err := ocspClient.Do(req) if err != nil { t.Fatalf("failed to GET contents. err: %v", err) } defer res.Body.Close() - _, err = ioutil.ReadAll(res.Body) + _, err = io.ReadAll(res.Body) if err != nil { t.Fatalf("failed to read content body for %v", tgt) } @@ -105,7 +113,7 @@ func TestMultiOCSP(t *testing.T) { t.Fatalf("failed to GET contents. err: %v", err) } defer res.Body.Close() - _, err = ioutil.ReadAll(res.Body) + _, err = io.ReadAll(res.Body) if err != nil { t.Fatalf("failed to read content body for %v", tgt) } @@ -161,6 +169,7 @@ func TestUnitEncodeCertIDGood(t *testing.T) { } func TestUnitCheckOCSPResponseCache(t *testing.T) { + conf := &VerifyConfig{OcspEnabled: true} c := New(testLogFactory, 10) dummyKey0 := certIDKey{ NameHash: "dummy0", @@ -176,7 +185,7 @@ func TestUnitCheckOCSPResponseCache(t *testing.T) { c.ocspResponseCache.Add(dummyKey0, &ocspCachedResponse{time: currentTime}) subject := &x509.Certificate{} issuer := &x509.Certificate{} - ost, err := c.checkOCSPResponseCache(&dummyKey, subject, issuer) + ost, err := c.checkOCSPResponseCache(&dummyKey, subject, issuer, conf) if err != nil { t.Fatal(err) } @@ -185,7 +194,7 @@ func TestUnitCheckOCSPResponseCache(t *testing.T) { } // old timestamp c.ocspResponseCache.Add(dummyKey, &ocspCachedResponse{time: float64(1395054952)}) - ost, err = c.checkOCSPResponseCache(&dummyKey, subject, issuer) + ost, err = c.checkOCSPResponseCache(&dummyKey, subject, issuer, conf) if err != nil { t.Fatal(err) } @@ -195,15 +204,385 @@ func TestUnitCheckOCSPResponseCache(t *testing.T) { // invalid validity c.ocspResponseCache.Add(dummyKey, &ocspCachedResponse{time: float64(currentTime - 1000)}) - ost, err = c.checkOCSPResponseCache(&dummyKey, subject, nil) + ost, err = c.checkOCSPResponseCache(&dummyKey, subject, nil, conf) if err == nil && isValidOCSPStatus(ost.code) { t.Fatalf("should have failed.") } } +// TestUnitValidOCSPResponse validates various combinations of acceptable OCSP responses +func TestUnitValidOCSPResponse(t *testing.T) { + rootCaKey, rootCa, leafCert := createCaLeafCerts(t) + + type tests struct { + name string + ocspRes ocsp.Response + expectedStatus ocspStatusCode + } + + now := time.Now() + ctx := context.Background() + + tt := []tests{ + { + name: "normal", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(30 * time.Minute), + Status: ocsp.Good, + }, + expectedStatus: ocspStatusGood, + }, + { + name: "no-next-update", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + Status: ocsp.Good, + }, + expectedStatus: ocspStatusGood, + }, + { + name: "revoked-update", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + Status: ocsp.Revoked, + }, + expectedStatus: ocspStatusRevoked, + }, + { + name: "revoked-update-with-next-update", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(1 * time.Hour), + Status: ocsp.Revoked, + }, + expectedStatus: ocspStatusRevoked, + }, + } + for _, tc := range tt { + for _, maxAge := range []time.Duration{time.Duration(0), time.Duration(2 * time.Hour)} { + t.Run(tc.name+"-max-age-"+maxAge.String(), func(t *testing.T) { + ocspHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + response := buildOcspResponse(t, rootCa, rootCaKey, tc.ocspRes) + _, _ = w.Write(response) + }) + ts := httptest.NewServer(ocspHandler) + defer ts.Close() + + logFactory := func() hclog.Logger { + return hclog.NewNullLogger() + } + client := New(logFactory, 100) + config := &VerifyConfig{ + OcspEnabled: true, + OcspServersOverride: []string{ts.URL}, + OcspFailureMode: FailOpenFalse, + QueryAllServers: false, + OcspThisUpdateMaxAge: maxAge, + } + + status, err := client.GetRevocationStatus(ctx, leafCert, rootCa, config) + require.NoError(t, err, "ocsp response should have been considered valid") + require.NoError(t, status.err, "ocsp status should not contain an error") + require.Equal(t, &ocspStatus{code: tc.expectedStatus}, status) + }) + } + } +} + +// TestUnitBadOCSPResponses verifies that we fail properly on a bunch of different +// OCSP response conditions +func TestUnitBadOCSPResponses(t *testing.T) { + rootCaKey, rootCa, leafCert := createCaLeafCerts(t) + rootCaKey2, rootCa2, _ := createCaLeafCerts(t) + + type tests struct { + name string + ocspRes ocsp.Response + maxAge time.Duration + ca *x509.Certificate + caKey *ecdsa.PrivateKey + errContains string + } + + now := time.Now() + ctx := context.Background() + + tt := []tests{ + { + name: "bad-signing-issuer", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(30 * time.Minute), + Status: ocsp.Good, + }, + ca: rootCa2, + caKey: rootCaKey2, + errContains: "error directly verifying signature", + }, + { + name: "incorrect-serial-number", + ocspRes: ocsp.Response{ + SerialNumber: big.NewInt(1000), + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(30 * time.Minute), + Status: ocsp.Good, + }, + ca: rootCa, + caKey: rootCaKey, + errContains: "did not match the leaf certificate serial number", + }, + { + name: "expired-next-update", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(-30 * time.Minute), + Status: ocsp.Good, + }, + errContains: "invalid validity", + }, + { + name: "this-update-in-future", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(1 * time.Hour), + NextUpdate: now.Add(2 * time.Hour), + Status: ocsp.Good, + }, + errContains: "invalid validity", + }, + { + name: "next-update-before-this-update", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(-2 * time.Hour), + Status: ocsp.Good, + }, + errContains: "invalid validity", + }, + { + name: "missing-this-update", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + NextUpdate: now.Add(2 * time.Hour), + Status: ocsp.Good, + }, + errContains: "invalid validity", + }, + { + name: "unknown-status", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(30 * time.Minute), + Status: ocsp.Unknown, + }, + errContains: "OCSP status unknown", + }, + { + name: "over-max-age", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(30 * time.Minute), + Status: ocsp.Good, + }, + maxAge: 10 * time.Minute, + errContains: "is greater than max age", + }, + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + ocspHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + useCa := rootCa + useCaKey := rootCaKey + if tc.ca != nil { + useCa = tc.ca + } + if tc.caKey != nil { + useCaKey = tc.caKey + } + response := buildOcspResponse(t, useCa, useCaKey, tc.ocspRes) + _, _ = w.Write(response) + }) + ts := httptest.NewServer(ocspHandler) + defer ts.Close() + + logFactory := func() hclog.Logger { + return hclog.NewNullLogger() + } + client := New(logFactory, 100) + + config := &VerifyConfig{ + OcspEnabled: true, + OcspServersOverride: []string{ts.URL}, + OcspFailureMode: FailOpenFalse, + QueryAllServers: false, + OcspThisUpdateMaxAge: tc.maxAge, + } + + status, err := client.GetRevocationStatus(ctx, leafCert, rootCa, config) + if err == nil && status == nil || (status != nil && status.err == nil) { + t.Fatalf("expected an error got none") + } + if err != nil { + require.ErrorContains(t, err, tc.errContains, + "Expected error got response: %v, %v", status, err) + } + if status != nil && status.err != nil { + require.ErrorContains(t, status.err, tc.errContains, + "Expected error got response: %v, %v", status, err) + } + }) + } +} + +// TestUnitZeroNextUpdateAreNotCached verifies that we are not caching the responses +// with no NextUpdate field set as according to RFC6960 4.2.2.1 +// "If nextUpdate is not set, the responder is indicating that newer +// revocation information is available all the time." +func TestUnitZeroNextUpdateAreNotCached(t *testing.T) { + rootCaKey, rootCa, leafCert := createCaLeafCerts(t) + numQueries := &atomic.Uint32{} + ocspHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + numQueries.Add(1) + now := time.Now() + ocspRes := ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + Status: ocsp.Good, + } + response := buildOcspResponse(t, rootCa, rootCaKey, ocspRes) + _, _ = w.Write(response) + }) + ts := httptest.NewServer(ocspHandler) + defer ts.Close() + + logFactory := func() hclog.Logger { + return hclog.NewNullLogger() + } + client := New(logFactory, 100) + + config := &VerifyConfig{ + OcspEnabled: true, + OcspServersOverride: []string{ts.URL}, + } + + _, err := client.GetRevocationStatus(context.Background(), leafCert, rootCa, config) + require.NoError(t, err, "Failed fetching revocation status") + + _, err = client.GetRevocationStatus(context.Background(), leafCert, rootCa, config) + require.NoError(t, err, "Failed fetching revocation status second time") + + require.Equal(t, uint32(2), numQueries.Load()) +} + +// TestUnitResponsesAreCached verify that the OCSP responses are properly cached when +// querying for the same leaf certificates +func TestUnitResponsesAreCached(t *testing.T) { + rootCaKey, rootCa, leafCert := createCaLeafCerts(t) + numQueries := &atomic.Uint32{} + ocspHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + numQueries.Add(1) + now := time.Now() + ocspRes := ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(1 * time.Hour), + Status: ocsp.Good, + } + response := buildOcspResponse(t, rootCa, rootCaKey, ocspRes) + _, _ = w.Write(response) + }) + ts1 := httptest.NewServer(ocspHandler) + ts2 := httptest.NewServer(ocspHandler) + defer ts1.Close() + defer ts2.Close() + + logFactory := func() hclog.Logger { + return hclog.NewNullLogger() + } + client := New(logFactory, 100) + + config := &VerifyConfig{ + OcspEnabled: true, + OcspServersOverride: []string{ts1.URL, ts2.URL}, + QueryAllServers: true, + } + + _, err := client.GetRevocationStatus(context.Background(), leafCert, rootCa, config) + require.NoError(t, err, "Failed fetching revocation status") + // Make sure that we queried both servers and not the cache + require.Equal(t, uint32(2), numQueries.Load()) + + // These query should be cached and not influence our counter + _, err = client.GetRevocationStatus(context.Background(), leafCert, rootCa, config) + require.NoError(t, err, "Failed fetching revocation status second time") + + require.Equal(t, uint32(2), numQueries.Load()) +} + +func buildOcspResponse(t *testing.T, ca *x509.Certificate, caKey *ecdsa.PrivateKey, ocspRes ocsp.Response) []byte { + response, err := ocsp.CreateResponse(ca, ca, ocspRes, caKey) + if err != nil { + t.Fatalf("failed generating OCSP response: %v", err) + } + return response +} + +func createCaLeafCerts(t *testing.T) (*ecdsa.PrivateKey, *x509.Certificate, *x509.Certificate) { + rootCaKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated root key for CA") + + // Validate we reject CSRs that contain CN that aren't in the original order + cr := &x509.Certificate{ + Subject: pkix.Name{CommonName: "Root Cert"}, + SerialNumber: big.NewInt(1), + IsCA: true, + BasicConstraintsValid: true, + SignatureAlgorithm: x509.ECDSAWithSHA256, + NotBefore: time.Now().Add(-1 * time.Second), + NotAfter: time.Now().AddDate(1, 0, 0), + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageOCSPSigning}, + } + rootCaBytes, err := x509.CreateCertificate(rand.Reader, cr, cr, &rootCaKey.PublicKey, rootCaKey) + require.NoError(t, err, "failed generating root ca") + + rootCa, err := x509.ParseCertificate(rootCaBytes) + require.NoError(t, err, "failed parsing root ca") + + leafKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated leaf key") + + cr = &x509.Certificate{ + Subject: pkix.Name{CommonName: "Leaf Cert"}, + SerialNumber: big.NewInt(2), + SignatureAlgorithm: x509.ECDSAWithSHA256, + NotBefore: time.Now().Add(-1 * time.Second), + NotAfter: time.Now().AddDate(1, 0, 0), + KeyUsage: x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + } + leafCertBytes, err := x509.CreateCertificate(rand.Reader, cr, rootCa, &leafKey.PublicKey, rootCaKey) + require.NoError(t, err, "failed generating root ca") + + leafCert, err := x509.ParseCertificate(leafCertBytes) + require.NoError(t, err, "failed parsing root ca") + return rootCaKey, rootCa, leafCert +} + func TestUnitValidateOCSP(t *testing.T) { + conf := &VerifyConfig{OcspEnabled: true} ocspRes := &ocsp.Response{} - ost, err := validateOCSP(ocspRes) + ost, err := validateOCSP(conf, ocspRes) if err == nil && isValidOCSPStatus(ost.code) { t.Fatalf("should have failed.") } @@ -212,7 +591,7 @@ func TestUnitValidateOCSP(t *testing.T) { ocspRes.ThisUpdate = currentTime.Add(-2 * time.Hour) ocspRes.NextUpdate = currentTime.Add(2 * time.Hour) ocspRes.Status = ocsp.Revoked - ost, err = validateOCSP(ocspRes) + ost, err = validateOCSP(conf, ocspRes) if err != nil { t.Fatal(err) } @@ -221,7 +600,7 @@ func TestUnitValidateOCSP(t *testing.T) { t.Fatalf("should have failed. expected: %v, got: %v", ocspStatusRevoked, ost.code) } ocspRes.Status = ocsp.Good - ost, err = validateOCSP(ocspRes) + ost, err = validateOCSP(conf, ocspRes) if err != nil { t.Fatal(err) } @@ -230,7 +609,7 @@ func TestUnitValidateOCSP(t *testing.T) { t.Fatalf("should have success. expected: %v, got: %v", ocspStatusGood, ost.code) } ocspRes.Status = ocsp.Unknown - ost, err = validateOCSP(ocspRes) + ost, err = validateOCSP(conf, ocspRes) if err != nil { t.Fatal(err) } @@ -238,7 +617,7 @@ func TestUnitValidateOCSP(t *testing.T) { t.Fatalf("should have failed. expected: %v, got: %v", ocspStatusUnknown, ost.code) } ocspRes.Status = ocsp.ServerFailed - ost, err = validateOCSP(ocspRes) + ost, err = validateOCSP(conf, ocspRes) if err != nil { t.Fatal(err) } @@ -299,7 +678,7 @@ func TestOCSPRetry(t *testing.T) { context.TODO(), client, fakeRequestFunc, dummyOCSPHost, - make(map[string]string), []byte{0}, certs[len(certs)-1]) + make(map[string]string), []byte{0}, certs[0], certs[len(certs)-1], nil) if err == nil { fmt.Printf("should fail: %v, %v, %v\n", res, b, st) } @@ -314,7 +693,7 @@ func TestOCSPRetry(t *testing.T) { context.TODO(), client, fakeRequestFunc, dummyOCSPHost, - make(map[string]string), []byte{0}, certs[len(certs)-1]) + make(map[string]string), []byte{0}, certs[0], certs[len(certs)-1], nil) if err == nil { fmt.Printf("should fail: %v, %v, %v\n", res, b, st) } diff --git a/sdk/helper/pluginidentityutil/errors.go b/sdk/helper/pluginidentityutil/errors.go new file mode 100644 index 000000000000..92a6ed6f5d26 --- /dev/null +++ b/sdk/helper/pluginidentityutil/errors.go @@ -0,0 +1,8 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pluginidentityutil + +import "errors" + +var ErrPluginWorkloadIdentityUnsupported = errors.New("plugin workload identity not supported in Vault community edition") diff --git a/sdk/helper/pluginidentityutil/fields.go b/sdk/helper/pluginidentityutil/fields.go new file mode 100644 index 000000000000..3d97537ecc94 --- /dev/null +++ b/sdk/helper/pluginidentityutil/fields.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pluginidentityutil + +import ( + "fmt" + "time" + + "github.com/hashicorp/vault/sdk/framework" +) + +// PluginIdentityTokenParams contains a set of common parameters that plugins +// can use for setting plugin identity token behavior. +type PluginIdentityTokenParams struct { + // IdentityTokenTTL is the duration that tokens will be valid for + IdentityTokenTTL time.Duration `json:"identity_token_ttl"` + // IdentityTokenAudience identifies the recipient of the token + IdentityTokenAudience string `json:"identity_token_audience"` +} + +// ParsePluginIdentityTokenFields provides common field parsing to embedding structs. +func (p *PluginIdentityTokenParams) ParsePluginIdentityTokenFields(d *framework.FieldData) error { + if tokenTTLRaw, ok := d.GetOk("identity_token_ttl"); ok { + p.IdentityTokenTTL = time.Duration(tokenTTLRaw.(int)) * time.Second + } + + if tokenAudienceRaw, ok := d.GetOk("identity_token_audience"); ok { + p.IdentityTokenAudience = tokenAudienceRaw.(string) + } + + return nil +} + +// PopulatePluginIdentityTokenData adds PluginIdentityTokenParams info into the given map. +func (p *PluginIdentityTokenParams) PopulatePluginIdentityTokenData(m map[string]interface{}) { + m["identity_token_ttl"] = int64(p.IdentityTokenTTL.Seconds()) + m["identity_token_audience"] = p.IdentityTokenAudience +} + +// AddPluginIdentityTokenFields adds plugin identity token fields to the given +// field schema map. +func AddPluginIdentityTokenFields(m map[string]*framework.FieldSchema) { + fields := map[string]*framework.FieldSchema{ + "identity_token_audience": { + Type: framework.TypeString, + Description: "Audience of plugin identity tokens", + Default: "", + }, + "identity_token_ttl": { + Type: framework.TypeDurationSecond, + Description: "Time-to-live of plugin identity tokens", + Default: 3600, + }, + } + + for name, schema := range fields { + if _, ok := m[name]; ok { + panic(fmt.Sprintf("adding field %q would overwrite existing field", name)) + } + m[name] = schema + } +} diff --git a/sdk/helper/pluginidentityutil/fields_test.go b/sdk/helper/pluginidentityutil/fields_test.go new file mode 100644 index 000000000000..a64f1aca439b --- /dev/null +++ b/sdk/helper/pluginidentityutil/fields_test.go @@ -0,0 +1,176 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pluginidentityutil + +import ( + "testing" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/stretchr/testify/assert" +) + +const ( + fieldIDTokenTTL = "identity_token_ttl" + fieldIDTokenAudience = "identity_token_audience" +) + +func identityTokenFieldData(raw map[string]interface{}) *framework.FieldData { + return &framework.FieldData{ + Raw: raw, + Schema: map[string]*framework.FieldSchema{ + fieldIDTokenTTL: { + Type: framework.TypeDurationSecond, + }, + fieldIDTokenAudience: { + Type: framework.TypeString, + }, + }, + } +} + +func TestParsePluginIdentityTokenFields(t *testing.T) { + testcases := []struct { + name string + d *framework.FieldData + wantErr bool + want map[string]interface{} + }{ + { + name: "all input", + d: identityTokenFieldData(map[string]interface{}{ + fieldIDTokenTTL: 10, + fieldIDTokenAudience: "test-aud", + }), + want: map[string]interface{}{ + fieldIDTokenTTL: time.Duration(10) * time.Second, + fieldIDTokenAudience: "test-aud", + }, + }, + { + name: "empty ttl", + d: identityTokenFieldData(map[string]interface{}{ + fieldIDTokenAudience: "test-aud", + }), + want: map[string]interface{}{ + fieldIDTokenTTL: time.Duration(0), + fieldIDTokenAudience: "test-aud", + }, + }, + { + name: "empty audience", + d: identityTokenFieldData(map[string]interface{}{ + fieldIDTokenTTL: 10, + }), + want: map[string]interface{}{ + fieldIDTokenTTL: time.Duration(10) * time.Second, + fieldIDTokenAudience: "", + }, + }, + } + + for _, tt := range testcases { + t.Run(tt.name, func(t *testing.T) { + p := new(PluginIdentityTokenParams) + err := p.ParsePluginIdentityTokenFields(tt.d) + if tt.wantErr { + assert.Error(t, err) + return + } + got := map[string]interface{}{ + fieldIDTokenTTL: p.IdentityTokenTTL, + fieldIDTokenAudience: p.IdentityTokenAudience, + } + assert.Equal(t, tt.want, got) + }) + } +} + +func TestPopulatePluginIdentityTokenData(t *testing.T) { + testcases := []struct { + name string + p *PluginIdentityTokenParams + want map[string]interface{} + }{ + { + name: "basic", + p: &PluginIdentityTokenParams{ + IdentityTokenAudience: "test-aud", + IdentityTokenTTL: time.Duration(10) * time.Second, + }, + want: map[string]interface{}{ + fieldIDTokenTTL: int64(10), + fieldIDTokenAudience: "test-aud", + }, + }, + } + + for _, tt := range testcases { + t.Run(tt.name, func(t *testing.T) { + got := make(map[string]interface{}) + tt.p.PopulatePluginIdentityTokenData(got) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestAddPluginIdentityTokenFields(t *testing.T) { + testcases := []struct { + name string + input map[string]*framework.FieldSchema + want map[string]*framework.FieldSchema + }{ + { + name: "basic", + input: map[string]*framework.FieldSchema{}, + want: map[string]*framework.FieldSchema{ + fieldIDTokenAudience: { + Type: framework.TypeString, + Description: "Audience of plugin identity tokens", + Default: "", + }, + fieldIDTokenTTL: { + Type: framework.TypeDurationSecond, + Description: "Time-to-live of plugin identity tokens", + Default: 3600, + }, + }, + }, + { + name: "additional-fields", + input: map[string]*framework.FieldSchema{ + "test": { + Type: framework.TypeString, + Description: "Test description", + Default: "default", + }, + }, + want: map[string]*framework.FieldSchema{ + fieldIDTokenAudience: { + Type: framework.TypeString, + Description: "Audience of plugin identity tokens", + Default: "", + }, + fieldIDTokenTTL: { + Type: framework.TypeDurationSecond, + Description: "Time-to-live of plugin identity tokens", + Default: 3600, + }, + "test": { + Type: framework.TypeString, + Description: "Test description", + Default: "default", + }, + }, + }, + } + + for _, tt := range testcases { + t.Run(tt.name, func(t *testing.T) { + got := tt.input + AddPluginIdentityTokenFields(got) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/sdk/helper/pluginruntimeutil/config.go b/sdk/helper/pluginruntimeutil/config.go new file mode 100644 index 000000000000..f674c7df3625 --- /dev/null +++ b/sdk/helper/pluginruntimeutil/config.go @@ -0,0 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pluginruntimeutil + +import "github.com/hashicorp/vault/sdk/helper/consts" + +// PluginRuntimeConfig defines the metadata needed to run a plugin runtime +type PluginRuntimeConfig struct { + Name string `json:"name" structs:"name"` + Type consts.PluginRuntimeType `json:"type" structs:"type"` + OCIRuntime string `json:"oci_runtime" structs:"oci_runtime"` + CgroupParent string `json:"cgroup_parent" structs:"cgroup_parent"` + CPU int64 `json:"cpu" structs:"cpu"` + Memory int64 `json:"memory" structs:"memory"` + Rootless bool `json:"rootless" structs:"rootlesss"` +} diff --git a/sdk/helper/pluginutil/env.go b/sdk/helper/pluginutil/env.go index 1b45ef32dca2..ea05e8462c70 100644 --- a/sdk/helper/pluginutil/env.go +++ b/sdk/helper/pluginutil/env.go @@ -38,6 +38,17 @@ const ( // PluginMultiplexingOptOut is an ENV name used to define a comma separated list of plugin names // opted-out of the multiplexing feature; for emergencies if multiplexing ever causes issues PluginMultiplexingOptOut = "VAULT_PLUGIN_MULTIPLEXING_OPT_OUT" + + // PluginUseLegacyEnvLayering opts out of new environment variable precedence. + // If set to true, Vault process environment variables take precedence over any + // colliding plugin-specific environment variables. Otherwise, plugin-specific + // environment variables take precedence over Vault process environment variables. + PluginUseLegacyEnvLayering = "VAULT_PLUGIN_USE_LEGACY_ENV_LAYERING" + + // PluginUsePostgresSSLInline enables the usage of a custom sslinline + // configuration as a shim to the pgx posgtres library. + // Deprecated: VAULT_PLUGIN_USE_POSTGRES_SSLINLINE will be removed in a future version of the Vault SDK. + PluginUsePostgresSSLInline = "VAULT_PLUGIN_USE_POSTGRES_SSLINLINE" ) // OptionallyEnableMlock determines if mlock should be called, and if so enables diff --git a/sdk/helper/pluginutil/identity_token.go b/sdk/helper/pluginutil/identity_token.go new file mode 100644 index 000000000000..7e764bb1e137 --- /dev/null +++ b/sdk/helper/pluginutil/identity_token.go @@ -0,0 +1,40 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pluginutil + +import ( + "time" +) + +const redactedTokenString = "ey***" + +type IdentityTokenRequest struct { + // Audience identifies the recipient of the token. The requested + // value will be in the "aud" claim. Required. + Audience string + // TTL is the requested duration that the token will be valid for. + // Optional with a default of 1hr. + TTL time.Duration +} + +type IdentityTokenResponse struct { + // Token is the plugin identity token. + Token IdentityToken + // TTL is the duration that the token is valid for after truncation is applied. + // The TTL may be truncated depending on the lifecycle of its signing key. + TTL time.Duration +} + +type IdentityToken string + +// String returns a redacted token string. Use the Token() method +// to obtain the non-redacted token contents. +func (t IdentityToken) String() string { + return redactedTokenString +} + +// Token returns the non-redacted token contents. +func (t IdentityToken) Token() string { + return string(t) +} diff --git a/sdk/helper/pluginutil/identity_token_test.go b/sdk/helper/pluginutil/identity_token_test.go new file mode 100644 index 000000000000..d0c01c390b30 --- /dev/null +++ b/sdk/helper/pluginutil/identity_token_test.go @@ -0,0 +1,29 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pluginutil + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +// TestIdentityToken_Stringer ensures that plugin identity tokens that +// are printed in formatted strings or errors are redacted and getters +// return expected values. +func TestIdentityToken_Stringer(t *testing.T) { + contents := "header.payload.signature" + tk := IdentityToken(contents) + + // token getters + assert.Equal(t, contents, tk.Token()) + assert.Equal(t, redactedTokenString, tk.String()) + + // formatted strings and errors + assert.NotContains(t, fmt.Sprintf("%v", tk), tk.Token()) + assert.NotContains(t, fmt.Sprintf("%s", tk), tk.Token()) + assert.NotContains(t, fmt.Errorf("%v", tk).Error(), tk.Token()) + assert.NotContains(t, fmt.Errorf("%s", tk).Error(), tk.Token()) +} diff --git a/sdk/helper/pluginutil/multiplexing.pb.go b/sdk/helper/pluginutil/multiplexing.pb.go index d7663b90215f..4e6c9f46a2e7 100644 --- a/sdk/helper/pluginutil/multiplexing.pb.go +++ b/sdk/helper/pluginutil/multiplexing.pb.go @@ -3,8 +3,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc (unknown) // source: sdk/helper/pluginutil/multiplexing.proto package pluginutil @@ -150,7 +150,7 @@ func file_sdk_helper_pluginutil_multiplexing_proto_rawDescGZIP() []byte { } var file_sdk_helper_pluginutil_multiplexing_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_sdk_helper_pluginutil_multiplexing_proto_goTypes = []interface{}{ +var file_sdk_helper_pluginutil_multiplexing_proto_goTypes = []any{ (*MultiplexingSupportRequest)(nil), // 0: pluginutil.multiplexing.MultiplexingSupportRequest (*MultiplexingSupportResponse)(nil), // 1: pluginutil.multiplexing.MultiplexingSupportResponse } @@ -170,7 +170,7 @@ func file_sdk_helper_pluginutil_multiplexing_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*MultiplexingSupportRequest); i { case 0: return &v.state @@ -182,7 +182,7 @@ func file_sdk_helper_pluginutil_multiplexing_proto_init() { return nil } } - file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*MultiplexingSupportResponse); i { case 0: return &v.state diff --git a/sdk/helper/pluginutil/multiplexing.proto b/sdk/helper/pluginutil/multiplexing.proto index c1a2ca0a4aa4..3b5d19198417 100644 --- a/sdk/helper/pluginutil/multiplexing.proto +++ b/sdk/helper/pluginutil/multiplexing.proto @@ -8,9 +8,9 @@ option go_package = "github.com/hashicorp/vault/sdk/helper/pluginutil"; message MultiplexingSupportRequest {} message MultiplexingSupportResponse { - bool supported = 1; + bool supported = 1; } service PluginMultiplexing { - rpc MultiplexingSupport(MultiplexingSupportRequest) returns (MultiplexingSupportResponse); + rpc MultiplexingSupport(MultiplexingSupportRequest) returns (MultiplexingSupportResponse); } diff --git a/sdk/helper/pluginutil/multiplexing_grpc.pb.go b/sdk/helper/pluginutil/multiplexing_grpc.pb.go index aa8d0e47ba84..0f0df2128ba7 100644 --- a/sdk/helper/pluginutil/multiplexing_grpc.pb.go +++ b/sdk/helper/pluginutil/multiplexing_grpc.pb.go @@ -1,4 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.4.0 +// - protoc (unknown) +// source: sdk/helper/pluginutil/multiplexing.proto package pluginutil @@ -11,8 +18,12 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 + +const ( + PluginMultiplexing_MultiplexingSupport_FullMethodName = "/pluginutil.multiplexing.PluginMultiplexing/MultiplexingSupport" +) // PluginMultiplexingClient is the client API for PluginMultiplexing service. // @@ -30,8 +41,9 @@ func NewPluginMultiplexingClient(cc grpc.ClientConnInterface) PluginMultiplexing } func (c *pluginMultiplexingClient) MultiplexingSupport(ctx context.Context, in *MultiplexingSupportRequest, opts ...grpc.CallOption) (*MultiplexingSupportResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(MultiplexingSupportResponse) - err := c.cc.Invoke(ctx, "/pluginutil.multiplexing.PluginMultiplexing/MultiplexingSupport", in, out, opts...) + err := c.cc.Invoke(ctx, PluginMultiplexing_MultiplexingSupport_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -76,7 +88,7 @@ func _PluginMultiplexing_MultiplexingSupport_Handler(srv interface{}, ctx contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pluginutil.multiplexing.PluginMultiplexing/MultiplexingSupport", + FullMethod: PluginMultiplexing_MultiplexingSupport_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PluginMultiplexingServer).MultiplexingSupport(ctx, req.(*MultiplexingSupportRequest)) diff --git a/sdk/helper/pluginutil/run_config.go b/sdk/helper/pluginutil/run_config.go index be34fa9dc09b..1af71d09b75c 100644 --- a/sdk/helper/pluginutil/run_config.go +++ b/sdk/helper/pluginutil/run_config.go @@ -8,11 +8,25 @@ import ( "crypto/sha256" "crypto/tls" "fmt" + "os" "os/exec" + "strconv" + "strings" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" + "github.com/hashicorp/go-secure-stdlib/plugincontainer" "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/pluginruntimeutil" +) + +const ( + // Labels for plugin container ownership + labelVaultPID = "com.hashicorp.vault.pid" + labelVaultClusterID = "com.hashicorp.vault.cluster.id" + labelVaultPluginName = "com.hashicorp.vault.plugin.name" + labelVaultPluginVersion = "com.hashicorp.vault.plugin.version" + labelVaultPluginType = "com.hashicorp.vault.plugin.type" ) type PluginClientConfig struct { @@ -30,85 +44,188 @@ type PluginClientConfig struct { type runConfig struct { // Provided by PluginRunner - command string - args []string - sha256 []byte + command string + image string + imageTag string + args []string + sha256 []byte // Initialized with what's in PluginRunner.Env, but can be added to env []string + runtimeConfig *pluginruntimeutil.PluginRuntimeConfig + PluginClientConfig + tmpdir string } -func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error) { - cmd := exec.Command(rc.command, rc.args...) - cmd.Env = append(cmd.Env, rc.env...) +func (rc runConfig) mlockEnabled() bool { + return rc.MLock || (rc.Wrapper != nil && rc.Wrapper.MlockEnabled()) +} + +func (rc runConfig) generateCmd(ctx context.Context) (cmd *exec.Cmd, clientTLSConfig *tls.Config, err error) { + cmd = exec.Command(rc.command, rc.args...) + env := rc.env // Add the mlock setting to the ENV of the plugin - if rc.MLock || (rc.Wrapper != nil && rc.Wrapper.MlockEnabled()) { - cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMlockEnabled, "true")) + if rc.mlockEnabled() { + env = append(env, fmt.Sprintf("%s=%s", PluginMlockEnabled, "true")) } version, err := rc.Wrapper.VaultVersion(ctx) if err != nil { - return nil, err + return nil, nil, err } - cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version)) + env = append(env, fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version)) if rc.IsMetadataMode { rc.Logger = rc.Logger.With("metadata", "true") } metadataEnv := fmt.Sprintf("%s=%t", PluginMetadataModeEnv, rc.IsMetadataMode) - cmd.Env = append(cmd.Env, metadataEnv) + env = append(env, metadataEnv) automtlsEnv := fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, rc.AutoMTLS) - cmd.Env = append(cmd.Env, automtlsEnv) + env = append(env, automtlsEnv) - var clientTLSConfig *tls.Config if !rc.AutoMTLS && !rc.IsMetadataMode { // Get a CA TLS Certificate certBytes, key, err := generateCert() if err != nil { - return nil, err + return nil, nil, err } // Use CA to sign a client cert and return a configured TLS config clientTLSConfig, err = createClientTLSConfig(certBytes, key) if err != nil { - return nil, err + return nil, nil, err } // Use CA to sign a server cert and wrap the values in a response wrapped // token. wrapToken, err := wrapServerConfig(ctx, rc.Wrapper, certBytes, key) if err != nil { - return nil, err + return nil, nil, err } // Add the response wrap token to the ENV of the plugin - cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginUnwrapTokenEnv, wrapToken)) + env = append(env, fmt.Sprintf("%s=%s", PluginUnwrapTokenEnv, wrapToken)) } - secureConfig := &plugin.SecureConfig{ - Checksum: rc.sha256, - Hash: sha256.New(), + if rc.image == "" { + // go-plugin has always overridden user-provided env vars with the OS + // (Vault process) env vars, but we want plugins to be able to override + // the Vault process env. We don't want to make a breaking change in + // go-plugin so always set SkipHostEnv and replicate the legacy behavior + // ourselves if user opts in. + if legacy, _ := strconv.ParseBool(os.Getenv(PluginUseLegacyEnvLayering)); legacy { + // Env vars are layered as follows, with later entries overriding + // earlier entries if there are duplicate keys: + // 1. Env specified at plugin registration + // 2. Env from Vault SDK + // 3. Env from Vault process (OS) + // 4. Env from go-plugin + cmd.Env = append(env, os.Environ()...) + } else { + // Env vars are layered as follows, with later entries overriding + // earlier entries if there are duplicate keys: + // 1. Env from Vault process (OS) + // 2. Env specified at plugin registration + // 3. Env from Vault SDK + // 4. Env from go-plugin + cmd.Env = append(os.Environ(), env...) + } + } else { + // Containerized plugins do not inherit any env vars from Vault. + cmd.Env = env + } + + return cmd, clientTLSConfig, nil +} + +func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error) { + cmd, clientTLSConfig, err := rc.generateCmd(ctx) + if err != nil { + return nil, err } clientConfig := &plugin.ClientConfig{ HandshakeConfig: rc.HandshakeConfig, VersionedPlugins: rc.PluginSets, - Cmd: cmd, - SecureConfig: secureConfig, TLSConfig: clientTLSConfig, Logger: rc.Logger, AllowedProtocols: []plugin.Protocol{ plugin.ProtocolNetRPC, plugin.ProtocolGRPC, }, - AutoMTLS: rc.AutoMTLS, + AutoMTLS: rc.AutoMTLS, + SkipHostEnv: true, + } + if rc.image == "" { + clientConfig.Cmd = cmd + clientConfig.SecureConfig = &plugin.SecureConfig{ + Checksum: rc.sha256, + Hash: sha256.New(), + } + } else { + containerCfg, err := rc.containerConfig(ctx, cmd.Env) + if err != nil { + return nil, err + } + clientConfig.RunnerFunc = containerCfg.NewContainerRunner + clientConfig.UnixSocketConfig = &plugin.UnixSocketConfig{ + Group: strconv.Itoa(containerCfg.GroupAdd), + TempDir: rc.tmpdir, + } + clientConfig.GRPCBrokerMultiplex = true } return clientConfig, nil } +func (rc runConfig) containerConfig(ctx context.Context, env []string) (*plugincontainer.Config, error) { + clusterID, err := rc.Wrapper.ClusterID(ctx) + if err != nil { + return nil, err + } + cfg := &plugincontainer.Config{ + Image: rc.image, + Tag: rc.imageTag, + SHA256: fmt.Sprintf("%x", rc.sha256), + + Env: env, + GroupAdd: os.Getegid(), + Runtime: consts.DefaultContainerPluginOCIRuntime, + CapIPCLock: rc.mlockEnabled(), + Labels: map[string]string{ + labelVaultPID: strconv.Itoa(os.Getpid()), + labelVaultClusterID: clusterID, + labelVaultPluginName: rc.PluginClientConfig.Name, + labelVaultPluginType: rc.PluginClientConfig.PluginType.String(), + labelVaultPluginVersion: rc.PluginClientConfig.Version, + }, + } + + // Use rc.command and rc.args directly instead of cmd.Path and cmd.Args, as + // exec.Command may mutate the provided command. + if rc.command != "" { + cfg.Entrypoint = []string{rc.command} + } + if len(rc.args) > 0 { + cfg.Args = rc.args + } + if rc.runtimeConfig != nil { + cfg.CgroupParent = rc.runtimeConfig.CgroupParent + cfg.NanoCpus = rc.runtimeConfig.CPU + cfg.Memory = rc.runtimeConfig.Memory + if rc.runtimeConfig.OCIRuntime != "" { + cfg.Runtime = rc.runtimeConfig.OCIRuntime + } + if rc.runtimeConfig.Rootless { + cfg.Rootless = true + } + } + + return cfg, nil +} + func (rc runConfig) run(ctx context.Context) (*plugin.Client, error) { clientConfig, err := rc.makeConfig(ctx) if err != nil { @@ -170,11 +287,25 @@ func MLock(mlock bool) RunOpt { } func (r *PluginRunner) RunConfig(ctx context.Context, opts ...RunOpt) (*plugin.Client, error) { + var image, imageTag string + if r.OCIImage != "" { + image = r.OCIImage + imageTag = strings.TrimPrefix(r.Version, "v") + } rc := runConfig{ - command: r.Command, - args: r.Args, - sha256: r.Sha256, - env: r.Env, + command: r.Command, + image: image, + imageTag: imageTag, + args: r.Args, + sha256: r.Sha256, + env: r.Env, + runtimeConfig: r.RuntimeConfig, + tmpdir: r.Tmpdir, + PluginClientConfig: PluginClientConfig{ + Name: r.Name, + PluginType: r.Type, + Version: r.Version, + }, } for _, opt := range opts { diff --git a/sdk/helper/pluginutil/run_config_test.go b/sdk/helper/pluginutil/run_config_test.go index e64057783ad0..6bb840f462d9 100644 --- a/sdk/helper/pluginutil/run_config_test.go +++ b/sdk/helper/pluginutil/run_config_test.go @@ -5,13 +5,19 @@ package pluginutil import ( "context" + "encoding/hex" "fmt" + "os" "os/exec" + "strconv" "testing" "time" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" + "github.com/hashicorp/go-secure-stdlib/plugincontainer" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/pluginruntimeutil" "github.com/hashicorp/vault/sdk/helper/wrapping" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -28,8 +34,11 @@ func TestMakeConfig(t *testing.T) { mlockEnabled bool mlockEnabledTimes int - expectedConfig *plugin.ClientConfig - expectTLSConfig bool + expectedConfig *plugin.ClientConfig + expectTLSConfig bool + expectRunnerFunc bool + skipSecureConfig bool + useLegacyEnvLayering bool } tests := map[string]testCase{ @@ -58,8 +67,9 @@ func TestMakeConfig(t *testing.T) { responseWrapInfoTimes: 0, - mlockEnabled: false, - mlockEnabledTimes: 1, + mlockEnabled: false, + mlockEnabledTimes: 1, + useLegacyEnvLayering: true, expectedConfig: &plugin.ClientConfig{ HandshakeConfig: plugin.HandshakeConfig{ @@ -75,12 +85,12 @@ func TestMakeConfig(t *testing.T) { Cmd: commandWithEnv( "echo", []string{"foo", "bar"}, - []string{ + append(append([]string{ "initial=true", fmt.Sprintf("%s=%s", PluginVaultVersionEnv, "dummyversion"), fmt.Sprintf("%s=%t", PluginMetadataModeEnv, true), fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, false), - }, + }, os.Environ()...), PluginUseLegacyEnvLayering+"=true"), ), SecureConfig: &plugin.SecureConfig{ Checksum: []byte("some_sha256"), @@ -90,8 +100,9 @@ func TestMakeConfig(t *testing.T) { plugin.ProtocolNetRPC, plugin.ProtocolGRPC, }, - Logger: hclog.NewNullLogger(), - AutoMTLS: false, + Logger: hclog.NewNullLogger(), + AutoMTLS: false, + SkipHostEnv: true, }, expectTLSConfig: false, }, @@ -140,14 +151,14 @@ func TestMakeConfig(t *testing.T) { Cmd: commandWithEnv( "echo", []string{"foo", "bar"}, - []string{ + append(os.Environ(), []string{ "initial=true", fmt.Sprintf("%s=%t", PluginMlockEnabled, true), fmt.Sprintf("%s=%s", PluginVaultVersionEnv, "dummyversion"), fmt.Sprintf("%s=%t", PluginMetadataModeEnv, false), fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, false), fmt.Sprintf("%s=%s", PluginUnwrapTokenEnv, "testtoken"), - }, + }...), ), SecureConfig: &plugin.SecureConfig{ Checksum: []byte("some_sha256"), @@ -157,8 +168,9 @@ func TestMakeConfig(t *testing.T) { plugin.ProtocolNetRPC, plugin.ProtocolGRPC, }, - Logger: hclog.NewNullLogger(), - AutoMTLS: false, + Logger: hclog.NewNullLogger(), + AutoMTLS: false, + SkipHostEnv: true, }, expectTLSConfig: true, }, @@ -204,12 +216,12 @@ func TestMakeConfig(t *testing.T) { Cmd: commandWithEnv( "echo", []string{"foo", "bar"}, - []string{ + append(os.Environ(), []string{ "initial=true", fmt.Sprintf("%s=%s", PluginVaultVersionEnv, "dummyversion"), fmt.Sprintf("%s=%t", PluginMetadataModeEnv, true), fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, true), - }, + }...), ), SecureConfig: &plugin.SecureConfig{ Checksum: []byte("some_sha256"), @@ -219,8 +231,9 @@ func TestMakeConfig(t *testing.T) { plugin.ProtocolNetRPC, plugin.ProtocolGRPC, }, - Logger: hclog.NewNullLogger(), - AutoMTLS: true, + Logger: hclog.NewNullLogger(), + AutoMTLS: true, + SkipHostEnv: true, }, expectTLSConfig: false, }, @@ -266,12 +279,12 @@ func TestMakeConfig(t *testing.T) { Cmd: commandWithEnv( "echo", []string{"foo", "bar"}, - []string{ + append(os.Environ(), []string{ "initial=true", fmt.Sprintf("%s=%s", PluginVaultVersionEnv, "dummyversion"), fmt.Sprintf("%s=%t", PluginMetadataModeEnv, false), fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, true), - }, + }...), ), SecureConfig: &plugin.SecureConfig{ Checksum: []byte("some_sha256"), @@ -281,11 +294,71 @@ func TestMakeConfig(t *testing.T) { plugin.ProtocolNetRPC, plugin.ProtocolGRPC, }, - Logger: hclog.NewNullLogger(), - AutoMTLS: true, + Logger: hclog.NewNullLogger(), + AutoMTLS: true, + SkipHostEnv: true, }, expectTLSConfig: false, }, + "image set": { + rc: runConfig{ + command: "echo", + args: []string{"foo", "bar"}, + sha256: []byte("some_sha256"), + env: []string{"initial=true"}, + image: "some-image", + imageTag: "0.1.0", + PluginClientConfig: PluginClientConfig{ + PluginSets: map[int]plugin.PluginSet{ + 1: { + "bogus": nil, + }, + }, + HandshakeConfig: plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "magic_cookie_key", + MagicCookieValue: "magic_cookie_value", + }, + Logger: hclog.NewNullLogger(), + IsMetadataMode: false, + AutoMTLS: true, + }, + }, + + responseWrapInfoTimes: 0, + + mlockEnabled: false, + mlockEnabledTimes: 2, + + expectedConfig: &plugin.ClientConfig{ + HandshakeConfig: plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "magic_cookie_key", + MagicCookieValue: "magic_cookie_value", + }, + VersionedPlugins: map[int]plugin.PluginSet{ + 1: { + "bogus": nil, + }, + }, + Cmd: nil, + SecureConfig: nil, + AllowedProtocols: []plugin.Protocol{ + plugin.ProtocolNetRPC, + plugin.ProtocolGRPC, + }, + Logger: hclog.NewNullLogger(), + AutoMTLS: true, + SkipHostEnv: true, + GRPCBrokerMultiplex: true, + UnixSocketConfig: &plugin.UnixSocketConfig{ + Group: strconv.Itoa(os.Getgid()), + }, + }, + expectTLSConfig: false, + expectRunnerFunc: true, + skipSecureConfig: true, + }, } for name, test := range tests { @@ -302,6 +375,10 @@ func TestMakeConfig(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() + if test.useLegacyEnvLayering { + t.Setenv(PluginUseLegacyEnvLayering, "true") + } + config, err := test.rc.makeConfig(ctx) if err != nil { t.Fatalf("no error expected, got: %s", err) @@ -309,11 +386,13 @@ func TestMakeConfig(t *testing.T) { // The following fields are generated, so we just need to check for existence, not specific value // The value must be nilled out before performing a DeepEqual check - hsh := config.SecureConfig.Hash - if hsh == nil { - t.Fatalf("Missing SecureConfig.Hash") + if !test.skipSecureConfig { + hsh := config.SecureConfig.Hash + if hsh == nil { + t.Fatalf("Missing SecureConfig.Hash") + } + config.SecureConfig.Hash = nil } - config.SecureConfig.Hash = nil if test.expectTLSConfig && config.TLSConfig == nil { t.Fatalf("TLS config expected, got nil") @@ -323,6 +402,11 @@ func TestMakeConfig(t *testing.T) { } config.TLSConfig = nil + if test.expectRunnerFunc != (config.RunnerFunc != nil) { + t.Fatalf("expected RunnerFunc: %v, actual: %v", test.expectRunnerFunc, config.RunnerFunc != nil) + } + config.RunnerFunc = nil + require.Equal(t, test.expectedConfig, config) }) } @@ -358,3 +442,137 @@ func (m *mockRunnerUtil) MlockEnabled() bool { args := m.Called() return args.Bool(0) } + +func (m *mockRunnerUtil) ClusterID(ctx context.Context) (string, error) { + return "1234", nil +} + +func TestContainerConfig(t *testing.T) { + dummySHA, err := hex.DecodeString("abc123") + if err != nil { + t.Fatal(err) + } + myPID := strconv.Itoa(os.Getpid()) + for name, tc := range map[string]struct { + rc runConfig + expected plugincontainer.Config + }{ + "image set, no runtime": { + rc: runConfig{ + command: "echo", + args: []string{"foo", "bar"}, + sha256: dummySHA, + env: []string{"initial=true"}, + image: "some-image", + imageTag: "0.1.0", + PluginClientConfig: PluginClientConfig{ + PluginSets: map[int]plugin.PluginSet{ + 1: { + "bogus": nil, + }, + }, + HandshakeConfig: plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "magic_cookie_key", + MagicCookieValue: "magic_cookie_value", + }, + Logger: hclog.NewNullLogger(), + AutoMTLS: true, + Name: "some-plugin", + PluginType: consts.PluginTypeCredential, + Version: "v0.1.0", + }, + }, + expected: plugincontainer.Config{ + Image: "some-image", + Tag: "0.1.0", + SHA256: "abc123", + Entrypoint: []string{"echo"}, + Args: []string{"foo", "bar"}, + Env: []string{ + "initial=true", + fmt.Sprintf("%s=%s", PluginVaultVersionEnv, "dummyversion"), + fmt.Sprintf("%s=%t", PluginMetadataModeEnv, false), + fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, true), + }, + Labels: map[string]string{ + labelVaultPID: myPID, + labelVaultClusterID: "1234", + labelVaultPluginName: "some-plugin", + labelVaultPluginType: "auth", + labelVaultPluginVersion: "v0.1.0", + }, + Runtime: consts.DefaultContainerPluginOCIRuntime, + GroupAdd: os.Getgid(), + }, + }, + "image set, with runtime": { + rc: runConfig{ + sha256: dummySHA, + image: "some-image", + imageTag: "0.1.0", + runtimeConfig: &pluginruntimeutil.PluginRuntimeConfig{ + OCIRuntime: "some-oci-runtime", + CgroupParent: "/cgroup/parent", + CPU: 1000, + Memory: 2000, + }, + PluginClientConfig: PluginClientConfig{ + PluginSets: map[int]plugin.PluginSet{ + 1: { + "bogus": nil, + }, + }, + HandshakeConfig: plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "magic_cookie_key", + MagicCookieValue: "magic_cookie_value", + }, + Logger: hclog.NewNullLogger(), + AutoMTLS: true, + Name: "some-plugin", + PluginType: consts.PluginTypeCredential, + Version: "v0.1.0", + }, + }, + expected: plugincontainer.Config{ + Image: "some-image", + Tag: "0.1.0", + SHA256: "abc123", + Env: []string{ + fmt.Sprintf("%s=%s", PluginVaultVersionEnv, "dummyversion"), + fmt.Sprintf("%s=%t", PluginMetadataModeEnv, false), + fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, true), + }, + Labels: map[string]string{ + labelVaultPID: myPID, + labelVaultClusterID: "1234", + labelVaultPluginName: "some-plugin", + labelVaultPluginType: "auth", + labelVaultPluginVersion: "v0.1.0", + }, + Runtime: "some-oci-runtime", + GroupAdd: os.Getgid(), + CgroupParent: "/cgroup/parent", + NanoCpus: 1000, + Memory: 2000, + }, + }, + } { + t.Run(name, func(t *testing.T) { + mockWrapper := new(mockRunnerUtil) + mockWrapper.On("ResponseWrapData", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil, nil) + mockWrapper.On("MlockEnabled"). + Return(false) + tc.rc.Wrapper = mockWrapper + cmd, _, err := tc.rc.generateCmd(context.Background()) + if err != nil { + t.Fatal(err) + } + cfg, err := tc.rc.containerConfig(context.Background(), cmd.Env) + require.NoError(t, err) + require.Equal(t, tc.expected, *cfg) + }) + } +} diff --git a/sdk/helper/pluginutil/runner.go b/sdk/helper/pluginutil/runner.go index 977f95d72208..ebbe110c3474 100644 --- a/sdk/helper/pluginutil/runner.go +++ b/sdk/helper/pluginutil/runner.go @@ -5,16 +5,22 @@ package pluginutil import ( "context" + "errors" + "strings" "time" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "github.com/hashicorp/go-version" "github.com/hashicorp/vault/sdk/helper/consts" + prutil "github.com/hashicorp/vault/sdk/helper/pluginruntimeutil" "github.com/hashicorp/vault/sdk/helper/wrapping" "google.golang.org/grpc" ) +// ErrPluginNotFound is returned when a plugin does not have a pinned version. +var ErrPinnedVersionNotFound = errors.New("pinned version not found") + // Looker defines the plugin Lookup function that looks into the plugin catalog // for available plugins and returns a PluginRunner type Looker interface { @@ -31,6 +37,7 @@ type RunnerUtil interface { ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) MlockEnabled() bool VaultVersion(ctx context.Context) (string, error) + ClusterID(ctx context.Context) (string, error) } // LookRunnerUtil defines the functions for both Looker and Wrapper @@ -53,12 +60,49 @@ type PluginRunner struct { Name string `json:"name" structs:"name"` Type consts.PluginType `json:"type" structs:"type"` Version string `json:"version" structs:"version"` + OCIImage string `json:"oci_image" structs:"oci_image"` + Runtime string `json:"runtime" structs:"runtime"` Command string `json:"command" structs:"command"` Args []string `json:"args" structs:"args"` Env []string `json:"env" structs:"env"` Sha256 []byte `json:"sha256" structs:"sha256"` Builtin bool `json:"builtin" structs:"builtin"` BuiltinFactory func() (interface{}, error) `json:"-" structs:"-"` + RuntimeConfig *prutil.PluginRuntimeConfig `json:"-" structs:"-"` + Tmpdir string `json:"-" structs:"-"` +} + +// BinaryReference returns either the OCI image reference if it's a container +// plugin or the path to the binary if it's a plain process plugin. +func (p *PluginRunner) BinaryReference() string { + if p.Builtin { + return "" + } + if p.OCIImage == "" { + return p.Command + } + + imageRef := p.OCIImage + if p.Version != "" { + imageRef += ":" + strings.TrimPrefix(p.Version, "v") + } + + return imageRef +} + +// SetPluginInput is only used as input for the plugin catalog's set methods. +// We don't use the very similar PluginRunner struct to avoid confusion about +// what's settable, which does not include the builtin fields. +type SetPluginInput struct { + Name string + Type consts.PluginType + Version string + Command string + OCIImage string + Runtime string + Args []string + Env []string + Sha256 []byte } // Run takes a wrapper RunnerUtil instance along with the go-plugin parameters and @@ -95,6 +139,8 @@ type VersionedPlugin struct { Type string `json:"type"` // string instead of consts.PluginType so that we get the string form in API responses. Name string `json:"name"` Version string `json:"version"` + OCIImage string `json:"oci_image,omitempty"` + Runtime string `json:"runtime,omitempty"` SHA256 string `json:"sha256,omitempty"` Builtin bool `json:"builtin"` DeprecationStatus string `json:"deprecation_status,omitempty"` @@ -103,6 +149,12 @@ type VersionedPlugin struct { SemanticVersion *version.Version `json:"-"` } +type PinnedVersion struct { + Name string `json:"name"` + Type consts.PluginType `json:"type"` + Version string `json:"version"` +} + // CtxCancelIfCanceled takes a context cancel func and a context. If the context is // shutdown the cancelfunc is called. This is useful for merging two cancel // functions. diff --git a/sdk/helper/pointerutil/pointer.go b/sdk/helper/pointerutil/pointer.go index b4bfe114cfdf..a3cb55898207 100644 --- a/sdk/helper/pointerutil/pointer.go +++ b/sdk/helper/pointerutil/pointer.go @@ -6,6 +6,8 @@ package pointerutil import ( "os" "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" ) // StringPtr returns a pointer to a string value @@ -20,7 +22,7 @@ func BoolPtr(b bool) *bool { // TimeDurationPtr returns a pointer to a time duration value func TimeDurationPtr(duration string) *time.Duration { - d, _ := time.ParseDuration(duration) + d, _ := parseutil.ParseDurationSecond(duration) return &d } diff --git a/sdk/helper/policyutil/policyutil.go b/sdk/helper/policyutil/policyutil.go index a5a8082e13c2..a8e6214ab0b6 100644 --- a/sdk/helper/policyutil/policyutil.go +++ b/sdk/helper/policyutil/policyutil.go @@ -79,33 +79,25 @@ func SanitizePolicies(policies []string, addDefault bool) []string { // EquivalentPolicies checks whether the given policy sets are equivalent, as in, // they contain the same values. The benefit of this method is that it leaves // the "default" policy out of its comparisons as it may be added later by core -// after a set of policies has been saved by a backend. +// after a set of policies has been saved by a backend and performs policy name +// normalization. func EquivalentPolicies(a, b []string) bool { - switch { - case a == nil && b == nil: - return true - case a == nil && len(b) == 1 && b[0] == "default": - return true - case b == nil && len(a) == 1 && a[0] == "default": - return true - case a == nil || b == nil: - return false - } - // First we'll build maps to ensure unique values and filter default - mapA := map[string]bool{} - mapB := map[string]bool{} + mapA := map[string]struct{}{} + mapB := map[string]struct{}{} for _, keyA := range a { + keyA := strings.ToLower(keyA) if keyA == "default" { continue } - mapA[keyA] = true + mapA[keyA] = struct{}{} } for _, keyB := range b { + keyB := strings.ToLower(keyB) if keyB == "default" { continue } - mapB[keyB] = true + mapB[keyB] = struct{}{} } // Now we'll build our checking slices diff --git a/sdk/helper/policyutil/policyutil_test.go b/sdk/helper/policyutil/policyutil_test.go index 2280ba93eed8..04aedfae727e 100644 --- a/sdk/helper/policyutil/policyutil_test.go +++ b/sdk/helper/policyutil/policyutil_test.go @@ -56,24 +56,48 @@ func TestParsePolicies(t *testing.T) { } func TestEquivalentPolicies(t *testing.T) { - a := []string{"foo", "bar"} - var b []string - if EquivalentPolicies(a, b) { - t.Fatal("bad") + testCases := map[string]struct { + A []string + B []string + Expected bool + }{ + "nil": { + A: nil, + B: nil, + Expected: true, + }, + "empty": { + A: []string{"foo", "bar"}, + B: []string{}, + Expected: false, + }, + "missing": { + A: []string{"foo", "bar"}, + B: []string{"foo"}, + Expected: false, + }, + "equal": { + A: []string{"bar", "foo"}, + B: []string{"bar", "foo"}, + Expected: true, + }, + "default": { + A: []string{"bar", "foo"}, + B: []string{"foo", "default", "bar"}, + Expected: true, + }, + "case-insensitive": { + A: []string{"test"}, + B: []string{"Test"}, + Expected: true, + }, } - b = []string{"foo"} - if EquivalentPolicies(a, b) { - t.Fatal("bad") - } - - b = []string{"bar", "foo"} - if !EquivalentPolicies(a, b) { - t.Fatal("bad") - } - - b = []string{"foo", "default", "bar"} - if !EquivalentPolicies(a, b) { - t.Fatal("bad") + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + if EquivalentPolicies(tc.A, tc.B) != tc.Expected { + t.Fatal("bad") + } + }) } } diff --git a/sdk/helper/template/template.go b/sdk/helper/template/template.go index dea65f3f5ed3..ade9cde85ae3 100644 --- a/sdk/helper/template/template.go +++ b/sdk/helper/template/template.go @@ -129,6 +129,10 @@ func NewTemplate(opts ...Opt) (up StringTemplate, err error) { return StringTemplate{}, fmt.Errorf("missing template") } + if len(up.rawTemplate) >= 100000 { + return StringTemplate{}, fmt.Errorf("template too large, length of template must be less than 100,000") + } + tmpl, err := template.New("template"). Funcs(up.funcMap). Parse(up.rawTemplate) diff --git a/sdk/helper/template/template_test.go b/sdk/helper/template/template_test.go index 2f66bf36fe03..fcea1f0cfcf4 100644 --- a/sdk/helper/template/template_test.go +++ b/sdk/helper/template/template_test.go @@ -5,6 +5,7 @@ package template import ( "fmt" + "strings" "testing" "github.com/stretchr/testify/require" @@ -150,6 +151,16 @@ Some string 6841cf80`, require.Regexp(t, `^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$`, actual) } }) + + t.Run("too-large-overflow", func(t *testing.T) { + data := "{{" + strings.Repeat("(", 1000000) + _, err := NewTemplate( + Template(data), + ) + // We expect an error due it being too large, + // this test should not fail with an overflow + require.Error(t, err) + }) } func TestBadConstructorArguments(t *testing.T) { diff --git a/sdk/helper/testcluster/consts.go b/sdk/helper/testcluster/consts.go new file mode 100644 index 000000000000..b736b5f88f7f --- /dev/null +++ b/sdk/helper/testcluster/consts.go @@ -0,0 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testcluster + +const ( + // EnvVaultLicenseCI is the name of an environment variable that contains + // a signed license string used for Vault Enterprise binary-based tests. + // The binary will be run with the env var VAULT_LICENSE set to this value. + EnvVaultLicenseCI = "VAULT_LICENSE_CI" + + // DefaultCAFile is the path to the CA file. This is a docker-specific + // constant. TODO: needs to be moved to a more relevant place + DefaultCAFile = "/vault/config/ca.pem" +) diff --git a/sdk/helper/testcluster/docker/cert.go b/sdk/helper/testcluster/docker/cert.go new file mode 100644 index 000000000000..4704030cb52f --- /dev/null +++ b/sdk/helper/testcluster/docker/cert.go @@ -0,0 +1,88 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package docker + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "sync" + + "github.com/hashicorp/errwrap" +) + +// ReloadFunc are functions that are called when a reload is requested +type ReloadFunc func() error + +// CertificateGetter satisfies ReloadFunc and its GetCertificate method +// satisfies the tls.GetCertificate function signature. Currently it does not +// allow changing paths after the fact. +type CertificateGetter struct { + sync.RWMutex + + cert *tls.Certificate + + certFile string + keyFile string + passphrase string +} + +func NewCertificateGetter(certFile, keyFile, passphrase string) *CertificateGetter { + return &CertificateGetter{ + certFile: certFile, + keyFile: keyFile, + passphrase: passphrase, + } +} + +func (cg *CertificateGetter) Reload() error { + certPEMBlock, err := ioutil.ReadFile(cg.certFile) + if err != nil { + return err + } + keyPEMBlock, err := ioutil.ReadFile(cg.keyFile) + if err != nil { + return err + } + + // Check for encrypted pem block + keyBlock, _ := pem.Decode(keyPEMBlock) + if keyBlock == nil { + return errors.New("decoded PEM is blank") + } + + if x509.IsEncryptedPEMBlock(keyBlock) { + keyBlock.Bytes, err = x509.DecryptPEMBlock(keyBlock, []byte(cg.passphrase)) + if err != nil { + return errwrap.Wrapf("Decrypting PEM block failed {{err}}", err) + } + keyPEMBlock = pem.EncodeToMemory(keyBlock) + } + + cert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + return err + } + + cg.Lock() + defer cg.Unlock() + + cg.cert = &cert + + return nil +} + +func (cg *CertificateGetter) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + cg.RLock() + defer cg.RUnlock() + + if cg.cert == nil { + return nil, fmt.Errorf("nil certificate") + } + + return cg.cert, nil +} diff --git a/sdk/helper/testcluster/docker/environment.go b/sdk/helper/testcluster/docker/environment.go new file mode 100644 index 000000000000..8dd40904f7d9 --- /dev/null +++ b/sdk/helper/testcluster/docker/environment.go @@ -0,0 +1,1341 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package docker + +import ( + "bufio" + "bytes" + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "math/big" + mathrand "math/rand" + "net" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/volume" + docker "github.com/docker/docker/client" + "github.com/hashicorp/go-cleanhttp" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/api" + dockhelper "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/helper/testcluster" + uberAtomic "go.uber.org/atomic" + "golang.org/x/net/http2" +) + +var ( + _ testcluster.VaultCluster = &DockerCluster{} + _ testcluster.VaultClusterNode = &DockerClusterNode{} +) + +const MaxClusterNameLength = 52 + +// DockerCluster is used to managing the lifecycle of the test Vault cluster +type DockerCluster struct { + ClusterName string + + ClusterNodes []*DockerClusterNode + + // Certificate fields + *testcluster.CA + RootCAs *x509.CertPool + + barrierKeys [][]byte + recoveryKeys [][]byte + tmpDir string + + // rootToken is the initial root token created when the Vault cluster is + // created. + rootToken string + DockerAPI *docker.Client + ID string + Logger log.Logger + builtTags map[string]struct{} + + storage testcluster.ClusterStorage +} + +func (dc *DockerCluster) NamedLogger(s string) log.Logger { + return dc.Logger.Named(s) +} + +func (dc *DockerCluster) ClusterID() string { + return dc.ID +} + +func (dc *DockerCluster) Nodes() []testcluster.VaultClusterNode { + ret := make([]testcluster.VaultClusterNode, len(dc.ClusterNodes)) + for i := range dc.ClusterNodes { + ret[i] = dc.ClusterNodes[i] + } + return ret +} + +func (dc *DockerCluster) GetBarrierKeys() [][]byte { + return dc.barrierKeys +} + +func testKeyCopy(key []byte) []byte { + result := make([]byte, len(key)) + copy(result, key) + return result +} + +func (dc *DockerCluster) GetRecoveryKeys() [][]byte { + ret := make([][]byte, len(dc.recoveryKeys)) + for i, k := range dc.recoveryKeys { + ret[i] = testKeyCopy(k) + } + return ret +} + +func (dc *DockerCluster) GetBarrierOrRecoveryKeys() [][]byte { + return dc.GetBarrierKeys() +} + +func (dc *DockerCluster) SetBarrierKeys(keys [][]byte) { + dc.barrierKeys = make([][]byte, len(keys)) + for i, k := range keys { + dc.barrierKeys[i] = testKeyCopy(k) + } +} + +func (dc *DockerCluster) SetRecoveryKeys(keys [][]byte) { + dc.recoveryKeys = make([][]byte, len(keys)) + for i, k := range keys { + dc.recoveryKeys[i] = testKeyCopy(k) + } +} + +func (dc *DockerCluster) GetCACertPEMFile() string { + return testcluster.DefaultCAFile +} + +func (dc *DockerCluster) Cleanup() { + dc.cleanup() +} + +func (dc *DockerCluster) cleanup() error { + var result *multierror.Error + for _, node := range dc.ClusterNodes { + if err := node.cleanup(); err != nil { + result = multierror.Append(result, err) + } + } + + return result.ErrorOrNil() +} + +// GetRootToken returns the root token of the cluster, if set +func (dc *DockerCluster) GetRootToken() string { + return dc.rootToken +} + +func (dc *DockerCluster) SetRootToken(s string) { + dc.Logger.Trace("cluster root token changed", "helpful_env", fmt.Sprintf("VAULT_TOKEN=%s VAULT_CACERT=/vault/config/ca.pem", s)) + dc.rootToken = s +} + +func (n *DockerClusterNode) Name() string { + return n.Cluster.ClusterName + "-" + n.NodeID +} + +func (dc *DockerCluster) setupNode0(ctx context.Context) error { + client := dc.ClusterNodes[0].client + + var resp *api.InitResponse + var err error + for ctx.Err() == nil { + resp, err = client.Sys().Init(&api.InitRequest{ + SecretShares: 3, + SecretThreshold: 3, + }) + if err == nil && resp != nil { + break + } + time.Sleep(500 * time.Millisecond) + } + if err != nil { + return err + } + if resp == nil { + return fmt.Errorf("nil response to init request") + } + + for _, k := range resp.Keys { + raw, err := hex.DecodeString(k) + if err != nil { + return err + } + dc.barrierKeys = append(dc.barrierKeys, raw) + } + + for _, k := range resp.RecoveryKeys { + raw, err := hex.DecodeString(k) + if err != nil { + return err + } + dc.recoveryKeys = append(dc.recoveryKeys, raw) + } + + dc.rootToken = resp.RootToken + client.SetToken(dc.rootToken) + dc.ClusterNodes[0].client = client + + err = testcluster.UnsealNode(ctx, dc, 0) + if err != nil { + return err + } + + err = ensureLeaderMatches(ctx, client, func(leader *api.LeaderResponse) error { + if !leader.IsSelf { + return fmt.Errorf("node %d leader=%v, expected=%v", 0, leader.IsSelf, true) + } + + return nil + }) + + status, err := client.Sys().SealStatusWithContext(ctx) + if err != nil { + return err + } + dc.ID = status.ClusterID + return err +} + +func (dc *DockerCluster) clusterReady(ctx context.Context) error { + for i, node := range dc.ClusterNodes { + expectLeader := i == 0 + err := ensureLeaderMatches(ctx, node.client, func(leader *api.LeaderResponse) error { + if expectLeader != leader.IsSelf { + return fmt.Errorf("node %d leader=%v, expected=%v", i, leader.IsSelf, expectLeader) + } + + return nil + }) + if err != nil { + return err + } + } + + return nil +} + +func (dc *DockerCluster) setupCA(opts *DockerClusterOptions) error { + var err error + var ca testcluster.CA + + if opts != nil && opts.CAKey != nil { + ca.CAKey = opts.CAKey + } else { + ca.CAKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return err + } + } + + var caBytes []byte + if opts != nil && len(opts.CACert) > 0 { + caBytes = opts.CACert + } else { + serialNumber := mathrand.New(mathrand.NewSource(time.Now().UnixNano())).Int63() + CACertTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "localhost", + }, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + SerialNumber: big.NewInt(serialNumber), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + BasicConstraintsValid: true, + IsCA: true, + } + caBytes, err = x509.CreateCertificate(rand.Reader, CACertTemplate, CACertTemplate, ca.CAKey.Public(), ca.CAKey) + if err != nil { + return err + } + } + CACert, err := x509.ParseCertificate(caBytes) + if err != nil { + return err + } + ca.CACert = CACert + ca.CACertBytes = caBytes + + CACertPEMBlock := &pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + } + ca.CACertPEM = pem.EncodeToMemory(CACertPEMBlock) + + ca.CACertPEMFile = filepath.Join(dc.tmpDir, "ca", "ca.pem") + err = os.WriteFile(ca.CACertPEMFile, ca.CACertPEM, 0o755) + if err != nil { + return err + } + + marshaledCAKey, err := x509.MarshalECPrivateKey(ca.CAKey) + if err != nil { + return err + } + CAKeyPEMBlock := &pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: marshaledCAKey, + } + ca.CAKeyPEM = pem.EncodeToMemory(CAKeyPEMBlock) + + dc.CA = &ca + + return nil +} + +func (n *DockerClusterNode) setupCert(ip string) error { + var err error + + n.ServerKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return err + } + + serialNumber := mathrand.New(mathrand.NewSource(time.Now().UnixNano())).Int63() + certTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: n.Name(), + }, + DNSNames: []string{"localhost", n.Name()}, + IPAddresses: []net.IP{net.IPv6loopback, net.ParseIP("127.0.0.1"), net.ParseIP(ip)}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, + SerialNumber: big.NewInt(serialNumber), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + } + n.ServerCertBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, n.Cluster.CACert, n.ServerKey.Public(), n.Cluster.CAKey) + if err != nil { + return err + } + n.ServerCert, err = x509.ParseCertificate(n.ServerCertBytes) + if err != nil { + return err + } + n.ServerCertPEM = pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: n.ServerCertBytes, + }) + + marshaledKey, err := x509.MarshalECPrivateKey(n.ServerKey) + if err != nil { + return err + } + n.ServerKeyPEM = pem.EncodeToMemory(&pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: marshaledKey, + }) + + n.ServerCertPEMFile = filepath.Join(n.WorkDir, "cert.pem") + err = os.WriteFile(n.ServerCertPEMFile, n.ServerCertPEM, 0o755) + if err != nil { + return err + } + + n.ServerKeyPEMFile = filepath.Join(n.WorkDir, "key.pem") + err = os.WriteFile(n.ServerKeyPEMFile, n.ServerKeyPEM, 0o755) + if err != nil { + return err + } + + tlsCert, err := tls.X509KeyPair(n.ServerCertPEM, n.ServerKeyPEM) + if err != nil { + return err + } + + certGetter := NewCertificateGetter(n.ServerCertPEMFile, n.ServerKeyPEMFile, "") + if err := certGetter.Reload(); err != nil { + return err + } + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{tlsCert}, + RootCAs: n.Cluster.RootCAs, + ClientCAs: n.Cluster.RootCAs, + ClientAuth: tls.RequestClientCert, + NextProtos: []string{"h2", "http/1.1"}, + GetCertificate: certGetter.GetCertificate, + } + + n.tlsConfig = tlsConfig + + err = os.WriteFile(filepath.Join(n.WorkDir, "ca.pem"), n.Cluster.CACertPEM, 0o755) + if err != nil { + return err + } + return nil +} + +func NewTestDockerCluster(t *testing.T, opts *DockerClusterOptions) *DockerCluster { + if opts == nil { + opts = &DockerClusterOptions{} + } + if opts.ClusterName == "" { + opts.ClusterName = strings.ReplaceAll(t.Name(), "/", "-") + } + if opts.Logger == nil { + opts.Logger = logging.NewVaultLogger(log.Trace).Named(t.Name()) + } + if opts.NetworkName == "" { + opts.NetworkName = os.Getenv("TEST_DOCKER_NETWORK_NAME") + } + + ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) + t.Cleanup(cancel) + + dc, err := NewDockerCluster(ctx, opts) + if err != nil { + t.Fatal(err) + } + dc.Logger.Trace("cluster started", "helpful_env", fmt.Sprintf("VAULT_TOKEN=%s VAULT_CACERT=/vault/config/ca.pem", dc.GetRootToken())) + return dc +} + +func NewDockerCluster(ctx context.Context, opts *DockerClusterOptions) (*DockerCluster, error) { + api, err := dockhelper.NewDockerAPI() + if err != nil { + return nil, err + } + + if opts == nil { + opts = &DockerClusterOptions{} + } + if opts.Logger == nil { + opts.Logger = log.NewNullLogger() + } + if opts.VaultLicense == "" { + opts.VaultLicense = os.Getenv(testcluster.EnvVaultLicenseCI) + } + + dc := &DockerCluster{ + DockerAPI: api, + ClusterName: opts.ClusterName, + Logger: opts.Logger, + builtTags: map[string]struct{}{}, + CA: opts.CA, + storage: opts.Storage, + } + + if err := dc.setupDockerCluster(ctx, opts); err != nil { + dc.Cleanup() + return nil, err + } + + return dc, nil +} + +// DockerClusterNode represents a single instance of Vault in a cluster +type DockerClusterNode struct { + NodeID string + HostPort string + client *api.Client + ServerCert *x509.Certificate + ServerCertBytes []byte + ServerCertPEM []byte + ServerCertPEMFile string + ServerKey *ecdsa.PrivateKey + ServerKeyPEM []byte + ServerKeyPEMFile string + tlsConfig *tls.Config + WorkDir string + Cluster *DockerCluster + Container *types.ContainerJSON + DockerAPI *docker.Client + runner *dockhelper.Runner + Logger log.Logger + cleanupContainer func() + RealAPIAddr string + ContainerNetworkName string + ContainerIPAddress string + ImageRepo string + ImageTag string + DataVolumeName string + cleanupVolume func() + AllClients []*api.Client +} + +func (n *DockerClusterNode) TLSConfig() *tls.Config { + return n.tlsConfig.Clone() +} + +func (n *DockerClusterNode) APIClient() *api.Client { + // We clone to ensure that whenever this method is called, the caller gets + // back a pristine client, without e.g. any namespace or token changes that + // might pollute a shared client. We clone the config instead of the + // client because (1) Client.clone propagates the replicationStateStore and + // the httpClient pointers, (2) it doesn't copy the tlsConfig at all, and + // (3) if clone returns an error, it doesn't feel as appropriate to panic + // below. Who knows why clone might return an error? + cfg := n.client.CloneConfig() + client, err := api.NewClient(cfg) + if err != nil { + // It seems fine to panic here, since this should be the same input + // we provided to NewClient when we were setup, and we didn't panic then. + // Better not to completely ignore the error though, suppose there's a + // bug in CloneConfig? + panic(fmt.Sprintf("NewClient error on cloned config: %v", err)) + } + client.SetToken(n.Cluster.rootToken) + return client +} + +func (n *DockerClusterNode) APIClientN(listenerNumber int) (*api.Client, error) { + // We clone to ensure that whenever this method is called, the caller gets + // back a pristine client, without e.g. any namespace or token changes that + // might pollute a shared client. We clone the config instead of the + // client because (1) Client.clone propagates the replicationStateStore and + // the httpClient pointers, (2) it doesn't copy the tlsConfig at all, and + // (3) if clone returns an error, it doesn't feel as appropriate to panic + // below. Who knows why clone might return an error? + if listenerNumber >= len(n.AllClients) { + return nil, fmt.Errorf("invalid listener number %d", listenerNumber) + } + cfg := n.AllClients[listenerNumber].CloneConfig() + client, err := api.NewClient(cfg) + if err != nil { + // It seems fine to panic here, since this should be the same input + // we provided to NewClient when we were setup, and we didn't panic then. + // Better not to completely ignore the error though, suppose there's a + // bug in CloneConfig? + panic(fmt.Sprintf("NewClient error on cloned config: %v", err)) + } + client.SetToken(n.Cluster.rootToken) + return client, nil +} + +// NewAPIClient creates and configures a Vault API client to communicate with +// the running Vault Cluster for this DockerClusterNode +func (n *DockerClusterNode) apiConfig() (*api.Config, error) { + transport := cleanhttp.DefaultPooledTransport() + transport.TLSClientConfig = n.TLSConfig() + if err := http2.ConfigureTransport(transport); err != nil { + return nil, err + } + client := &http.Client{ + Transport: transport, + CheckRedirect: func(*http.Request, []*http.Request) error { + // This can of course be overridden per-test by using its own client + return fmt.Errorf("redirects not allowed in these tests") + }, + } + config := api.DefaultConfig() + if config.Error != nil { + return nil, config.Error + } + + protocol := "https" + if n.tlsConfig == nil { + protocol = "http" + } + config.Address = fmt.Sprintf("%s://%s", protocol, n.HostPort) + + config.HttpClient = client + config.MaxRetries = 0 + return config, nil +} + +func (n *DockerClusterNode) newAPIClient() (*api.Client, error) { + config, err := n.apiConfig() + if err != nil { + return nil, err + } + client, err := api.NewClient(config) + if err != nil { + return nil, err + } + client.SetToken(n.Cluster.GetRootToken()) + return client, nil +} + +func (n *DockerClusterNode) newAPIClientForAddress(address string) (*api.Client, error) { + config, err := n.apiConfig() + if err != nil { + return nil, err + } + config.Address = fmt.Sprintf("https://%s", address) + client, err := api.NewClient(config) + if err != nil { + return nil, err + } + client.SetToken(n.Cluster.GetRootToken()) + return client, nil +} + +// Cleanup kills the container of the node and deletes its data volume +func (n *DockerClusterNode) Cleanup() { + n.cleanup() +} + +// Stop kills the container of the node +func (n *DockerClusterNode) Stop() { + n.cleanupContainer() +} + +func (n *DockerClusterNode) cleanup() error { + if n.Container == nil || n.Container.ID == "" { + return nil + } + n.cleanupContainer() + n.cleanupVolume() + return nil +} + +func (n *DockerClusterNode) createDefaultListenerConfig() map[string]interface{} { + return map[string]interface{}{"tcp": map[string]interface{}{ + "address": fmt.Sprintf("%s:%d", "0.0.0.0", 8200), + "tls_cert_file": "/vault/config/cert.pem", + "tls_key_file": "/vault/config/key.pem", + "telemetry": map[string]interface{}{ + "unauthenticated_metrics_access": true, + }, + }} +} + +func (n *DockerClusterNode) createTLSDisabledListenerConfig() map[string]interface{} { + return map[string]interface{}{"tcp": map[string]interface{}{ + "address": fmt.Sprintf("%s:%d", "0.0.0.0", 8200), + "telemetry": map[string]interface{}{ + "unauthenticated_metrics_access": true, + }, + "tls_disable": true, + }} +} + +func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOptions) error { + if n.DataVolumeName == "" { + vol, err := n.DockerAPI.VolumeCreate(ctx, volume.CreateOptions{}) + if err != nil { + return err + } + n.DataVolumeName = vol.Name + n.cleanupVolume = func() { + _ = n.DockerAPI.VolumeRemove(ctx, vol.Name, false) + } + } + vaultCfg := map[string]interface{}{} + var listenerConfig []map[string]interface{} + + var defaultListenerConfig map[string]interface{} + if opts.DisableTLS { + defaultListenerConfig = n.createTLSDisabledListenerConfig() + } else { + defaultListenerConfig = n.createDefaultListenerConfig() + } + + listenerConfig = append(listenerConfig, defaultListenerConfig) + ports := []string{"8200/tcp", "8201/tcp"} + + if opts.VaultNodeConfig != nil && opts.VaultNodeConfig.AdditionalListeners != nil { + for _, config := range opts.VaultNodeConfig.AdditionalListeners { + cfg := n.createDefaultListenerConfig() + listener := cfg["tcp"].(map[string]interface{}) + listener["address"] = fmt.Sprintf("%s:%d", "0.0.0.0", config.Port) + listener["chroot_namespace"] = config.ChrootNamespace + listener["redact_addresses"] = config.RedactAddresses + listener["redact_cluster_name"] = config.RedactClusterName + listener["redact_version"] = config.RedactVersion + listenerConfig = append(listenerConfig, cfg) + portStr := fmt.Sprintf("%d/tcp", config.Port) + if strutil.StrListContains(ports, portStr) { + return fmt.Errorf("duplicate port %d specified", config.Port) + } + ports = append(ports, portStr) + } + } + vaultCfg["listener"] = listenerConfig + vaultCfg["telemetry"] = map[string]interface{}{ + "disable_hostname": true, + } + + // Setup storage. Default is raft. + storageType := "raft" + storageOpts := map[string]interface{}{ + // TODO add options from vnc + "path": "/vault/file", + "node_id": n.NodeID, + } + + if opts.Storage != nil { + storageType = opts.Storage.Type() + storageOpts = opts.Storage.Opts() + } + + if opts != nil && opts.VaultNodeConfig != nil { + for k, v := range opts.VaultNodeConfig.StorageOptions { + if _, ok := storageOpts[k].(string); !ok { + storageOpts[k] = v + } + } + } + vaultCfg["storage"] = map[string]interface{}{ + storageType: storageOpts, + } + + //// disable_mlock is required for working in the Docker environment with + //// custom plugins + vaultCfg["disable_mlock"] = true + + protocol := "https" + if opts.DisableTLS { + protocol = "http" + } + vaultCfg["api_addr"] = fmt.Sprintf(`%s://{{- GetAllInterfaces | exclude "flags" "loopback" | attr "address" -}}:8200`, protocol) + vaultCfg["cluster_addr"] = `https://{{- GetAllInterfaces | exclude "flags" "loopback" | attr "address" -}}:8201` + + vaultCfg["administrative_namespace_path"] = opts.AdministrativeNamespacePath + + systemJSON, err := json.Marshal(vaultCfg) + if err != nil { + return err + } + err = os.WriteFile(filepath.Join(n.WorkDir, "system.json"), systemJSON, 0o644) + if err != nil { + return err + } + + if opts.VaultNodeConfig != nil { + localCfg := *opts.VaultNodeConfig + if opts.VaultNodeConfig.LicensePath != "" { + b, err := os.ReadFile(opts.VaultNodeConfig.LicensePath) + if err != nil || len(b) == 0 { + return fmt.Errorf("unable to read LicensePath at %q: %w", opts.VaultNodeConfig.LicensePath, err) + } + localCfg.LicensePath = "/vault/config/license" + dest := filepath.Join(n.WorkDir, "license") + err = os.WriteFile(dest, b, 0o644) + if err != nil { + return fmt.Errorf("error writing license to %q: %w", dest, err) + } + + } + userJSON, err := json.Marshal(localCfg) + if err != nil { + return err + } + err = os.WriteFile(filepath.Join(n.WorkDir, "user.json"), userJSON, 0o644) + if err != nil { + return err + } + } + + if !opts.DisableTLS { + // Create a temporary cert so vault will start up + err = n.setupCert("127.0.0.1") + if err != nil { + return err + } + } + + caDir := filepath.Join(n.Cluster.tmpDir, "ca") + + // setup plugin bin copy if needed + copyFromTo := map[string]string{ + n.WorkDir: "/vault/config", + caDir: "/usr/local/share/ca-certificates/", + } + + var wg sync.WaitGroup + wg.Add(1) + var seenLogs uberAtomic.Bool + logConsumer := func(s string) { + if seenLogs.CAS(false, true) { + wg.Done() + } + n.Logger.Trace(s) + } + logStdout := &LogConsumerWriter{logConsumer} + logStderr := &LogConsumerWriter{func(s string) { + if seenLogs.CAS(false, true) { + wg.Done() + } + testcluster.JSONLogNoTimestamp(n.Logger, s) + }} + + postStartFunc := func(containerID string, realIP string) error { + err := n.setupCert(realIP) + if err != nil { + return err + } + + // If we signal Vault before it installs its sighup handler, it'll die. + wg.Wait() + n.Logger.Trace("running poststart", "containerID", containerID, "IP", realIP) + return n.runner.RefreshFiles(ctx, containerID) + } + + if opts.DisableTLS { + postStartFunc = func(containerID string, realIP string) error { + // If we signal Vault before it installs its sighup handler, it'll die. + wg.Wait() + n.Logger.Trace("running poststart", "containerID", containerID, "IP", realIP) + return n.runner.RefreshFiles(ctx, containerID) + } + } + + envs := []string{ + // For now we're using disable_mlock, because this is for testing + // anyway, and because it prevents us using external plugins. + "SKIP_SETCAP=true", + "VAULT_LOG_FORMAT=json", + "VAULT_LICENSE=" + opts.VaultLicense, + } + envs = append(envs, opts.Envs...) + + r, err := dockhelper.NewServiceRunner(dockhelper.RunOptions{ + ImageRepo: n.ImageRepo, + ImageTag: n.ImageTag, + // We don't need to run update-ca-certificates in the container, because + // we're providing the CA in the raft join call, and otherwise Vault + // servers don't talk to one another on the API port. + Cmd: append([]string{"server"}, opts.Args...), + Env: envs, + Ports: ports, + ContainerName: n.Name(), + NetworkName: opts.NetworkName, + CopyFromTo: copyFromTo, + LogConsumer: logConsumer, + LogStdout: logStdout, + LogStderr: logStderr, + PreDelete: true, + DoNotAutoRemove: true, + PostStart: postStartFunc, + Capabilities: []string{"NET_ADMIN"}, + OmitLogTimestamps: true, + VolumeNameToMountPoint: map[string]string{ + n.DataVolumeName: "/vault/file", + }, + }) + if err != nil { + return err + } + n.runner = r + + probe := opts.StartProbe + if probe == nil { + probe = func(c *api.Client) error { + _, err = c.Sys().SealStatus() + return err + } + } + svc, _, err := r.StartNewService(ctx, false, false, func(ctx context.Context, host string, port int) (dockhelper.ServiceConfig, error) { + config, err := n.apiConfig() + if err != nil { + return nil, err + } + config.Address = fmt.Sprintf("%s://%s:%d", protocol, host, port) + client, err := api.NewClient(config) + if err != nil { + return nil, err + } + err = probe(client) + if err != nil { + return nil, err + } + + return dockhelper.NewServiceHostPort(host, port), nil + }) + if err != nil { + return err + } + + n.HostPort = svc.Config.Address() + n.Container = svc.Container + netName := opts.NetworkName + if netName == "" { + if len(svc.Container.NetworkSettings.Networks) > 1 { + return fmt.Errorf("Set d.RunOptions.NetworkName instead for container with multiple networks: %v", svc.Container.NetworkSettings.Networks) + } + for netName = range svc.Container.NetworkSettings.Networks { + // Networks above is a map; we just need to find the first and + // only key of this map (network name). The range handles this + // for us, but we need a loop construction in order to use range. + } + } + n.ContainerNetworkName = netName + n.ContainerIPAddress = svc.Container.NetworkSettings.Networks[netName].IPAddress + n.RealAPIAddr = protocol + "://" + n.ContainerIPAddress + ":8200" + n.cleanupContainer = svc.Cleanup + + client, err := n.newAPIClient() + if err != nil { + return err + } + client.SetToken(n.Cluster.rootToken) + n.client = client + + n.AllClients = append(n.AllClients, client) + + for _, addr := range svc.StartResult.Addrs[2:] { + // The second element of this list of addresses is the cluster address + // We do not want to create a client for the cluster address mapping + client, err := n.newAPIClientForAddress(addr) + if err != nil { + return err + } + client.SetToken(n.Cluster.rootToken) + n.AllClients = append(n.AllClients, client) + } + return nil +} + +func (n *DockerClusterNode) Pause(ctx context.Context) error { + return n.DockerAPI.ContainerPause(ctx, n.Container.ID) +} + +func (n *DockerClusterNode) Restart(ctx context.Context) error { + timeout := 5 + err := n.DockerAPI.ContainerRestart(ctx, n.Container.ID, container.StopOptions{Timeout: &timeout}) + if err != nil { + return err + } + + resp, err := n.DockerAPI.ContainerInspect(ctx, n.Container.ID) + if err != nil { + return fmt.Errorf("error inspecting container after restart: %s", err) + } + + var port int + if len(resp.NetworkSettings.Ports) > 0 { + for key, binding := range resp.NetworkSettings.Ports { + if len(binding) < 1 { + continue + } + + if key == "8200/tcp" { + port, err = strconv.Atoi(binding[0].HostPort) + } + } + } + + if port == 0 { + return fmt.Errorf("failed to find container port after restart") + } + + hostPieces := strings.Split(n.HostPort, ":") + if len(hostPieces) < 2 { + return errors.New("could not parse node hostname") + } + + n.HostPort = fmt.Sprintf("%s:%d", hostPieces[0], port) + + client, err := n.newAPIClient() + if err != nil { + return err + } + client.SetToken(n.Cluster.rootToken) + n.client = client + + return nil +} + +func (n *DockerClusterNode) AddNetworkDelay(ctx context.Context, delay time.Duration, targetIP string) error { + ip := net.ParseIP(targetIP) + if ip == nil { + return fmt.Errorf("targetIP %q is not an IP address", targetIP) + } + // Let's attempt to get a unique handle for the filter rule; we'll assume that + // every targetIP has a unique last octet, which is true currently for how + // we're doing docker networking. + lastOctet := ip.To4()[3] + + stdout, stderr, exitCode, err := n.runner.RunCmdWithOutput(ctx, n.Container.ID, []string{ + "/bin/sh", + "-xec", strings.Join([]string{ + fmt.Sprintf("echo isolating node %s", targetIP), + "apk add iproute2", + // If we're running this script a second time on the same node, + // the add dev will fail; since we only want to run the netem + // command once, we'll do so in the case where the add dev doesn't fail. + "tc qdisc add dev eth0 root handle 1: prio && " + + fmt.Sprintf("tc qdisc add dev eth0 parent 1:1 handle 2: netem delay %dms", delay/time.Millisecond), + // Here we create a u32 filter as per https://man7.org/linux/man-pages/man8/tc-u32.8.html + // Its parent is 1:0 (which I guess is the root?) + // Its handle must be unique, so we base it on targetIP + fmt.Sprintf("tc filter add dev eth0 parent 1:0 protocol ip pref 55 handle ::%x u32 match ip dst %s flowid 2:1", lastOctet, targetIP), + }, "; "), + }) + if err != nil { + return err + } + + n.Logger.Trace(string(stdout)) + n.Logger.Trace(string(stderr)) + if exitCode != 0 { + return fmt.Errorf("got nonzero exit code from iptables: %d", exitCode) + } + return nil +} + +// PartitionFromCluster will cause the node to be disconnected at the network +// level from the rest of the docker cluster. It does so in a way that the node +// will not see TCP RSTs and all packets it sends will be "black holed". It +// attempts to keep packets to and from the host intact which allows docker +// daemon to continue streaming logs and any test code to continue making +// requests from the host to the partitioned node. +func (n *DockerClusterNode) PartitionFromCluster(ctx context.Context) error { + stdout, stderr, exitCode, err := n.runner.RunCmdWithOutput(ctx, n.Container.ID, []string{ + "/bin/sh", + "-xec", strings.Join([]string{ + fmt.Sprintf("echo partitioning container from network"), + "apk add iproute2", + "apk add iptables", + // Get the gateway address for the bridge so we can allow host to + // container traffic still. + "GW=$(ip r | grep default | grep eth0 | cut -f 3 -d' ')", + // First delete the rules in case this is called twice otherwise we'll add + // multiple copies and only remove one in Unpartition (yay iptables). + // Ignore the error if it didn't exist. + "iptables -D INPUT -i eth0 ! -s \"$GW\" -j DROP | true", + "iptables -D OUTPUT -o eth0 ! -d \"$GW\" -j DROP | true", + // Add rules to drop all packets in and out of the docker network + // connection. + "iptables -I INPUT -i eth0 ! -s \"$GW\" -j DROP", + "iptables -I OUTPUT -o eth0 ! -d \"$GW\" -j DROP", + }, "; "), + }) + if err != nil { + return err + } + + n.Logger.Trace(string(stdout)) + n.Logger.Trace(string(stderr)) + if exitCode != 0 { + return fmt.Errorf("got nonzero exit code from iptables: %d", exitCode) + } + return nil +} + +// UnpartitionFromCluster reverses a previous call to PartitionFromCluster and +// restores full connectivity. Currently assumes the default "bridge" network. +func (n *DockerClusterNode) UnpartitionFromCluster(ctx context.Context) error { + stdout, stderr, exitCode, err := n.runner.RunCmdWithOutput(ctx, n.Container.ID, []string{ + "/bin/sh", + "-xec", strings.Join([]string{ + fmt.Sprintf("echo un-partitioning container from network"), + // Get the gateway address for the bridge so we can allow host to + // container traffic still. + "GW=$(ip r | grep default | grep eth0 | cut -f 3 -d' ')", + // Remove the rules, ignore if they are not present or iptables wasn't + // installed yet (i.e. no-one called PartitionFromCluster yet). + "iptables -D INPUT -i eth0 ! -s \"$GW\" -j DROP | true", + "iptables -D OUTPUT -o eth0 ! -d \"$GW\" -j DROP | true", + }, "; "), + }) + if err != nil { + return err + } + + n.Logger.Trace(string(stdout)) + n.Logger.Trace(string(stderr)) + if exitCode != 0 { + return fmt.Errorf("got nonzero exit code from iptables: %d", exitCode) + } + return nil +} + +type LogConsumerWriter struct { + consumer func(string) +} + +func (l LogConsumerWriter) Write(p []byte) (n int, err error) { + // TODO this assumes that we're never passed partial log lines, which + // seems a safe assumption for now based on how docker looks to implement + // logging, but might change in the future. + scanner := bufio.NewScanner(bytes.NewReader(p)) + scanner.Buffer(make([]byte, 64*1024), bufio.MaxScanTokenSize) + for scanner.Scan() { + l.consumer(scanner.Text()) + } + return len(p), nil +} + +// DockerClusterOptions has options for setting up the docker cluster +type DockerClusterOptions struct { + testcluster.ClusterOptions + CAKey *ecdsa.PrivateKey + NetworkName string + ImageRepo string + ImageTag string + CA *testcluster.CA + VaultBinary string + Args []string + Envs []string + StartProbe func(*api.Client) error + Storage testcluster.ClusterStorage + DisableTLS bool +} + +func ensureLeaderMatches(ctx context.Context, client *api.Client, ready func(response *api.LeaderResponse) error) error { + var leader *api.LeaderResponse + var err error + for ctx.Err() == nil { + leader, err = client.Sys().Leader() + switch { + case err != nil: + case leader == nil: + err = fmt.Errorf("nil response to leader check") + default: + err = ready(leader) + if err == nil { + return nil + } + } + time.Sleep(500 * time.Millisecond) + } + return fmt.Errorf("error checking leader: %v", err) +} + +const DefaultNumCores = 3 + +// creates a managed docker container running Vault +func (dc *DockerCluster) setupDockerCluster(ctx context.Context, opts *DockerClusterOptions) error { + if opts.TmpDir != "" { + if _, err := os.Stat(opts.TmpDir); os.IsNotExist(err) { + if err := os.MkdirAll(opts.TmpDir, 0o700); err != nil { + return err + } + } + dc.tmpDir = opts.TmpDir + } else { + tempDir, err := ioutil.TempDir("", "vault-test-cluster-") + if err != nil { + return err + } + dc.tmpDir = tempDir + } + caDir := filepath.Join(dc.tmpDir, "ca") + if err := os.MkdirAll(caDir, 0o755); err != nil { + return err + } + + var numCores int + if opts.NumCores == 0 { + numCores = DefaultNumCores + } else { + numCores = opts.NumCores + } + + if !opts.DisableTLS { + if dc.CA == nil { + if err := dc.setupCA(opts); err != nil { + return err + } + } + dc.RootCAs = x509.NewCertPool() + dc.RootCAs.AddCert(dc.CA.CACert) + } + + if dc.storage != nil { + if err := dc.storage.Start(ctx, &opts.ClusterOptions); err != nil { + return err + } + } + + for i := 0; i < numCores; i++ { + if err := dc.addNode(ctx, opts); err != nil { + return err + } + if opts.SkipInit { + continue + } + if i == 0 { + if err := dc.setupNode0(ctx); err != nil { + return err + } + } else { + if err := dc.joinNode(ctx, i, 0); err != nil { + return err + } + } + } + + return nil +} + +func (dc *DockerCluster) AddNode(ctx context.Context, opts *DockerClusterOptions) error { + leaderIdx, err := testcluster.LeaderNode(ctx, dc) + if err != nil { + return err + } + if err := dc.addNode(ctx, opts); err != nil { + return err + } + + return dc.joinNode(ctx, len(dc.ClusterNodes)-1, leaderIdx) +} + +func (dc *DockerCluster) addNode(ctx context.Context, opts *DockerClusterOptions) error { + tag, err := dc.setupImage(ctx, opts) + if err != nil { + return err + } + i := len(dc.ClusterNodes) + nodeID := fmt.Sprintf("core-%d", i) + node := &DockerClusterNode{ + DockerAPI: dc.DockerAPI, + NodeID: nodeID, + Cluster: dc, + WorkDir: filepath.Join(dc.tmpDir, nodeID), + Logger: dc.Logger.Named(nodeID), + ImageRepo: opts.ImageRepo, + ImageTag: tag, + } + dc.ClusterNodes = append(dc.ClusterNodes, node) + if err := os.MkdirAll(node.WorkDir, 0o755); err != nil { + return err + } + if err := node.Start(ctx, opts); err != nil { + return err + } + return nil +} + +func (dc *DockerCluster) joinNode(ctx context.Context, nodeIdx int, leaderIdx int) error { + if dc.storage != nil && dc.storage.Type() != "raft" { + // Storage is not raft so nothing to do but unseal. + return testcluster.UnsealNode(ctx, dc, nodeIdx) + } + + leader := dc.ClusterNodes[leaderIdx] + + if nodeIdx >= len(dc.ClusterNodes) { + return fmt.Errorf("invalid node %d", nodeIdx) + } + node := dc.ClusterNodes[nodeIdx] + client := node.APIClient() + + var resp *api.RaftJoinResponse + resp, err := client.Sys().RaftJoinWithContext(ctx, &api.RaftJoinRequest{ + // When running locally on a bridge network, the containers must use their + // actual (private) IP to talk to one another. Our code must instead use + // the portmapped address since we're not on their network in that case. + LeaderAPIAddr: leader.RealAPIAddr, + LeaderCACert: string(dc.CACertPEM), + LeaderClientCert: string(node.ServerCertPEM), + LeaderClientKey: string(node.ServerKeyPEM), + }) + if resp == nil || !resp.Joined { + return fmt.Errorf("nil or negative response from raft join request: %v", resp) + } + if err != nil { + return fmt.Errorf("failed to join cluster: %w", err) + } + + return testcluster.UnsealNode(ctx, dc, nodeIdx) +} + +func (dc *DockerCluster) setupImage(ctx context.Context, opts *DockerClusterOptions) (string, error) { + if opts == nil { + opts = &DockerClusterOptions{} + } + sourceTag := opts.ImageTag + if sourceTag == "" { + sourceTag = "latest" + } + + if opts.VaultBinary == "" { + return sourceTag, nil + } + + suffix := "testing" + if sha := os.Getenv("COMMIT_SHA"); sha != "" { + suffix = sha + } + tag := sourceTag + "-" + suffix + if _, ok := dc.builtTags[tag]; ok { + return tag, nil + } + + f, err := os.Open(opts.VaultBinary) + if err != nil { + return "", err + } + data, err := io.ReadAll(f) + if err != nil { + return "", err + } + bCtx := dockhelper.NewBuildContext() + bCtx["vault"] = &dockhelper.FileContents{ + Data: data, + Mode: 0o755, + } + + containerFile := fmt.Sprintf(` +FROM %s:%s +COPY vault /bin/vault +`, opts.ImageRepo, sourceTag) + + _, err = dockhelper.BuildImage(ctx, dc.DockerAPI, containerFile, bCtx, + dockhelper.BuildRemove(true), dockhelper.BuildForceRemove(true), + dockhelper.BuildPullParent(true), + dockhelper.BuildTags([]string{opts.ImageRepo + ":" + tag})) + if err != nil { + return "", err + } + dc.builtTags[tag] = struct{}{} + return tag, nil +} + +func (dc *DockerCluster) GetActiveClusterNode() *DockerClusterNode { + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + node, err := testcluster.WaitForActiveNode(ctx, dc) + if err != nil { + panic(fmt.Sprintf("no cluster node became active in timeout window: %v", err)) + } + + return dc.ClusterNodes[node] +} + +/* Notes on testing the non-bridge network case: +- you need the test itself to be running in a container so that it can use + the network; create the network using + docker network create testvault +- this means that you need to mount the docker socket in that test container, + but on macos there's stuff that prevents that from working; to hack that, + on the host run + sudo ln -s "$HOME/Library/Containers/com.docker.docker/Data/docker.raw.sock" /var/run/docker.sock.raw +- run the test container like + docker run --rm -it --network testvault \ + -v /var/run/docker.sock.raw:/var/run/docker.sock \ + -v $(pwd):/home/circleci/go/src/github.com/hashicorp/vault/ \ + -w /home/circleci/go/src/github.com/hashicorp/vault/ \ + "docker.mirror.hashicorp.services/cimg/go:1.19.2" /bin/bash +- in the container you may need to chown/chmod /var/run/docker.sock; use `docker ps` + to test if it's working + +*/ diff --git a/sdk/helper/testcluster/docker/environment_test.go b/sdk/helper/testcluster/docker/environment_test.go new file mode 100644 index 000000000000..bb2376405281 --- /dev/null +++ b/sdk/helper/testcluster/docker/environment_test.go @@ -0,0 +1,38 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package docker + +import ( + "testing" +) + +func TestSettingEnvsToContainer(t *testing.T) { + expectedEnv := "TEST_ENV=value1" + expectedEnv2 := "TEST_ENV2=value2" + opts := &DockerClusterOptions{ + ImageRepo: "hashicorp/vault", + ImageTag: "latest", + Envs: []string{expectedEnv, expectedEnv2}, + } + cluster := NewTestDockerCluster(t, opts) + defer cluster.Cleanup() + + envs := cluster.GetActiveClusterNode().Container.Config.Env + + if !findEnv(envs, expectedEnv) { + t.Errorf("Missing ENV variable: %s", expectedEnv) + } + if !findEnv(envs, expectedEnv2) { + t.Errorf("Missing ENV variable: %s", expectedEnv2) + } +} + +func findEnv(envs []string, env string) bool { + for _, e := range envs { + if e == env { + return true + } + } + return false +} diff --git a/sdk/helper/testcluster/docker/replication.go b/sdk/helper/testcluster/docker/replication.go new file mode 100644 index 000000000000..c313e7af4d8d --- /dev/null +++ b/sdk/helper/testcluster/docker/replication.go @@ -0,0 +1,71 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package docker + +import ( + "context" + "fmt" + "os" + "strings" + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/helper/testcluster" +) + +func DefaultOptions(t *testing.T) *DockerClusterOptions { + return &DockerClusterOptions{ + ImageRepo: "hashicorp/vault", + ImageTag: "latest", + VaultBinary: os.Getenv("VAULT_BINARY"), + ClusterOptions: testcluster.ClusterOptions{ + NumCores: 3, + ClusterName: strings.ReplaceAll(t.Name(), "/", "-"), + VaultNodeConfig: &testcluster.VaultNodeConfig{ + LogLevel: "TRACE", + }, + }, + } +} + +func NewReplicationSetDocker(t *testing.T, opts *DockerClusterOptions) (*testcluster.ReplicationSet, error) { + binary := os.Getenv("VAULT_BINARY") + if binary == "" { + t.Skip("only running docker test when $VAULT_BINARY present") + } + + r := &testcluster.ReplicationSet{ + Clusters: map[string]testcluster.VaultCluster{}, + Logger: logging.NewVaultLogger(hclog.Trace).Named(t.Name()), + } + + // clusterName is used for container name as well. + // A container name should not exceed 64 chars. + // There are additional chars that are added to the name as well + // like "-A-core0". So, setting a max limit for a cluster name. + if len(opts.ClusterName) > MaxClusterNameLength { + return nil, fmt.Errorf("cluster name length exceeded the maximum allowed length of %v", MaxClusterNameLength) + } + + r.Builder = func(ctx context.Context, name string, baseLogger hclog.Logger) (testcluster.VaultCluster, error) { + myOpts := *opts + myOpts.Logger = baseLogger.Named(name) + if myOpts.ClusterName == "" { + myOpts.ClusterName = strings.ReplaceAll(t.Name(), "/", "-") + } + myOpts.ClusterName += "-" + strings.ReplaceAll(name, "/", "-") + myOpts.CA = r.CA + return NewTestDockerCluster(t, &myOpts), nil + } + + a, err := r.Builder(context.TODO(), "A", r.Logger) + if err != nil { + return nil, err + } + r.Clusters["A"] = a + r.CA = a.(*DockerCluster).CA + + return r, err +} diff --git a/sdk/helper/testcluster/exec.go b/sdk/helper/testcluster/exec.go new file mode 100644 index 000000000000..f871e0dabb8a --- /dev/null +++ b/sdk/helper/testcluster/exec.go @@ -0,0 +1,323 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testcluster + +import ( + "bufio" + "context" + "crypto/tls" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/logging" +) + +type ExecDevCluster struct { + ID string + ClusterName string + ClusterNodes []*execDevClusterNode + CACertPEMFile string + barrierKeys [][]byte + recoveryKeys [][]byte + tmpDir string + clientAuthRequired bool + rootToken string + stop func() + stopCh chan struct{} + Logger log.Logger +} + +func (dc *ExecDevCluster) SetRootToken(token string) { + dc.rootToken = token +} + +func (dc *ExecDevCluster) NamedLogger(s string) log.Logger { + return dc.Logger.Named(s) +} + +var _ VaultCluster = &ExecDevCluster{} + +type ExecDevClusterOptions struct { + ClusterOptions + BinaryPath string + // this is -dev-listen-address, defaults to "127.0.0.1:8200" + BaseListenAddress string +} + +func NewTestExecDevServer(t *testing.T, opts *ExecDevClusterOptions) *ExecDevCluster { + if opts == nil { + opts = &ExecDevClusterOptions{} + } + if opts.ClusterName == "" { + opts.ClusterName = strings.ReplaceAll(t.Name(), "/", "-") + } + if opts.Logger == nil { + opts.Logger = logging.NewVaultLogger(log.Trace).Named(t.Name()) // .Named("container") + } + if opts.VaultLicense == "" { + opts.VaultLicense = os.Getenv(EnvVaultLicenseCI) + } + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + t.Cleanup(cancel) + + dc, err := NewExecDevCluster(ctx, opts) + if err != nil { + t.Fatal(err) + } + return dc +} + +func NewExecDevCluster(ctx context.Context, opts *ExecDevClusterOptions) (*ExecDevCluster, error) { + dc := &ExecDevCluster{ + ClusterName: opts.ClusterName, + stopCh: make(chan struct{}), + } + + if opts == nil { + opts = &ExecDevClusterOptions{} + } + if opts.NumCores == 0 { + opts.NumCores = 3 + } + if err := dc.setupExecDevCluster(ctx, opts); err != nil { + dc.Cleanup() + return nil, err + } + + return dc, nil +} + +func (dc *ExecDevCluster) setupExecDevCluster(ctx context.Context, opts *ExecDevClusterOptions) (retErr error) { + if opts == nil { + opts = &ExecDevClusterOptions{} + } + if opts.Logger == nil { + opts.Logger = log.NewNullLogger() + } + dc.Logger = opts.Logger + + if opts.TmpDir != "" { + if _, err := os.Stat(opts.TmpDir); os.IsNotExist(err) { + if err := os.MkdirAll(opts.TmpDir, 0o700); err != nil { + return err + } + } + dc.tmpDir = opts.TmpDir + } else { + tempDir, err := os.MkdirTemp("", "vault-test-cluster-") + if err != nil { + return err + } + dc.tmpDir = tempDir + } + + // This context is used to stop the subprocess + execCtx, cancel := context.WithCancel(context.Background()) + dc.stop = func() { + cancel() + close(dc.stopCh) + } + defer func() { + if retErr != nil { + cancel() + } + }() + + bin := opts.BinaryPath + if bin == "" { + bin = "vault" + } + + clusterJsonPath := filepath.Join(dc.tmpDir, "cluster.json") + args := []string{"server", "-dev", "-dev-cluster-json", clusterJsonPath} + switch { + case opts.NumCores == 1: + args = append(args, "-dev-tls") + default: + return fmt.Errorf("NumCores=1 is the only supported option right now") + } + if opts.BaseListenAddress != "" { + args = append(args, "-dev-listen-address", opts.BaseListenAddress) + } + cmd := exec.CommandContext(execCtx, bin, args...) + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, "VAULT_LICENSE="+opts.VaultLicense) + cmd.Env = append(cmd.Env, "VAULT_LOG_FORMAT=json") + cmd.Env = append(cmd.Env, "VAULT_DEV_TEMP_DIR="+dc.tmpDir) + if opts.Logger != nil { + stdout, err := cmd.StdoutPipe() + if err != nil { + return err + } + go func() { + outlog := opts.Logger.Named("stdout") + scanner := bufio.NewScanner(stdout) + for scanner.Scan() { + outlog.Trace(scanner.Text()) + } + }() + stderr, err := cmd.StderrPipe() + if err != nil { + return err + } + go func() { + errlog := opts.Logger.Named("stderr") + scanner := bufio.NewScanner(stderr) + // The default buffer is 4k, and Vault can emit bigger log lines + scanner.Buffer(make([]byte, 64*1024), bufio.MaxScanTokenSize) + for scanner.Scan() { + JSONLogNoTimestamp(errlog, scanner.Text()) + } + }() + } + + if err := cmd.Start(); err != nil { + return err + } + + for ctx.Err() == nil { + if b, err := os.ReadFile(clusterJsonPath); err == nil && len(b) > 0 { + var clusterJson ClusterJson + if err := jsonutil.DecodeJSON(b, &clusterJson); err != nil { + continue + } + dc.CACertPEMFile = clusterJson.CACertPath + dc.rootToken = clusterJson.RootToken + for i, node := range clusterJson.Nodes { + config := api.DefaultConfig() + config.Address = node.APIAddress + err := config.ConfigureTLS(&api.TLSConfig{ + CACert: clusterJson.CACertPath, + }) + if err != nil { + return err + } + client, err := api.NewClient(config) + if err != nil { + return err + } + client.SetToken(dc.rootToken) + _, err = client.Sys().ListMounts() + if err != nil { + return err + } + + dc.ClusterNodes = append(dc.ClusterNodes, &execDevClusterNode{ + name: fmt.Sprintf("core-%d", i), + client: client, + }) + } + return nil + } + time.Sleep(500 * time.Millisecond) + } + + return ctx.Err() +} + +type execDevClusterNode struct { + name string + client *api.Client +} + +var _ VaultClusterNode = &execDevClusterNode{} + +func (e *execDevClusterNode) Name() string { + return e.name +} + +func (e *execDevClusterNode) APIClient() *api.Client { + // We clone to ensure that whenever this method is called, the caller gets + // back a pristine client, without e.g. any namespace or token changes that + // might pollute a shared client. We clone the config instead of the + // client because (1) Client.clone propagates the replicationStateStore and + // the httpClient pointers, (2) it doesn't copy the tlsConfig at all, and + // (3) if clone returns an error, it doesn't feel as appropriate to panic + // below. Who knows why clone might return an error? + cfg := e.client.CloneConfig() + client, err := api.NewClient(cfg) + if err != nil { + // It seems fine to panic here, since this should be the same input + // we provided to NewClient when we were setup, and we didn't panic then. + // Better not to completely ignore the error though, suppose there's a + // bug in CloneConfig? + panic(fmt.Sprintf("NewClient error on cloned config: %v", err)) + } + client.SetToken(e.client.Token()) + return client +} + +func (e *execDevClusterNode) TLSConfig() *tls.Config { + return e.client.CloneConfig().TLSConfig() +} + +func (dc *ExecDevCluster) ClusterID() string { + return dc.ID +} + +func (dc *ExecDevCluster) Nodes() []VaultClusterNode { + ret := make([]VaultClusterNode, len(dc.ClusterNodes)) + for i := range dc.ClusterNodes { + ret[i] = dc.ClusterNodes[i] + } + return ret +} + +func (dc *ExecDevCluster) GetBarrierKeys() [][]byte { + return dc.barrierKeys +} + +func copyKey(key []byte) []byte { + result := make([]byte, len(key)) + copy(result, key) + return result +} + +func (dc *ExecDevCluster) GetRecoveryKeys() [][]byte { + ret := make([][]byte, len(dc.recoveryKeys)) + for i, k := range dc.recoveryKeys { + ret[i] = copyKey(k) + } + return ret +} + +func (dc *ExecDevCluster) GetBarrierOrRecoveryKeys() [][]byte { + return dc.GetBarrierKeys() +} + +func (dc *ExecDevCluster) SetBarrierKeys(keys [][]byte) { + dc.barrierKeys = make([][]byte, len(keys)) + for i, k := range keys { + dc.barrierKeys[i] = copyKey(k) + } +} + +func (dc *ExecDevCluster) SetRecoveryKeys(keys [][]byte) { + dc.recoveryKeys = make([][]byte, len(keys)) + for i, k := range keys { + dc.recoveryKeys[i] = copyKey(k) + } +} + +func (dc *ExecDevCluster) GetCACertPEMFile() string { + return dc.CACertPEMFile +} + +func (dc *ExecDevCluster) Cleanup() { + dc.stop() +} + +// GetRootToken returns the root token of the cluster, if set +func (dc *ExecDevCluster) GetRootToken() string { + return dc.rootToken +} diff --git a/sdk/helper/testcluster/generaterootkind_enumer.go b/sdk/helper/testcluster/generaterootkind_enumer.go new file mode 100644 index 000000000000..367c1a5df400 --- /dev/null +++ b/sdk/helper/testcluster/generaterootkind_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer -type=GenerateRootKind -trimprefix=GenerateRoot"; DO NOT EDIT. + +package testcluster + +import ( + "fmt" +) + +const _GenerateRootKindName = "RegularDRGenerateRecovery" + +var _GenerateRootKindIndex = [...]uint8{0, 7, 9, 25} + +func (i GenerateRootKind) String() string { + if i < 0 || i >= GenerateRootKind(len(_GenerateRootKindIndex)-1) { + return fmt.Sprintf("GenerateRootKind(%d)", i) + } + return _GenerateRootKindName[_GenerateRootKindIndex[i]:_GenerateRootKindIndex[i+1]] +} + +var _GenerateRootKindValues = []GenerateRootKind{0, 1, 2} + +var _GenerateRootKindNameToValueMap = map[string]GenerateRootKind{ + _GenerateRootKindName[0:7]: 0, + _GenerateRootKindName[7:9]: 1, + _GenerateRootKindName[9:25]: 2, +} + +// GenerateRootKindString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func GenerateRootKindString(s string) (GenerateRootKind, error) { + if val, ok := _GenerateRootKindNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to GenerateRootKind values", s) +} + +// GenerateRootKindValues returns all values of the enum +func GenerateRootKindValues() []GenerateRootKind { + return _GenerateRootKindValues +} + +// IsAGenerateRootKind returns "true" if the value is listed in the enum definition. "false" otherwise +func (i GenerateRootKind) IsAGenerateRootKind() bool { + for _, v := range _GenerateRootKindValues { + if i == v { + return true + } + } + return false +} diff --git a/sdk/helper/testcluster/logging.go b/sdk/helper/testcluster/logging.go new file mode 100644 index 000000000000..dda759c7f84f --- /dev/null +++ b/sdk/helper/testcluster/logging.go @@ -0,0 +1,37 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testcluster + +import ( + "encoding/json" + "strings" + + "github.com/hashicorp/go-hclog" +) + +func JSONLogNoTimestamp(outlog hclog.Logger, text string) { + d := json.NewDecoder(strings.NewReader(text)) + m := map[string]interface{}{} + if err := d.Decode(&m); err != nil { + outlog.Error("failed to decode json output from dev vault", "error", err, "input", text) + return + } + + delete(m, "@timestamp") + message := m["@message"].(string) + delete(m, "@message") + level := m["@level"].(string) + delete(m, "@level") + if module, ok := m["@module"]; ok { + delete(m, "@module") + outlog = outlog.Named(module.(string)) + } + + var pairs []interface{} + for k, v := range m { + pairs = append(pairs, k, v) + } + + outlog.Log(hclog.LevelFromString(level), message, pairs...) +} diff --git a/sdk/helper/testcluster/replication.go b/sdk/helper/testcluster/replication.go new file mode 100644 index 000000000000..1ab4485c50fa --- /dev/null +++ b/sdk/helper/testcluster/replication.go @@ -0,0 +1,918 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testcluster + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "strings" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/mitchellh/mapstructure" +) + +func GetPerformanceToken(pri VaultCluster, id, secondaryPublicKey string) (string, error) { + client := pri.Nodes()[0].APIClient() + req := map[string]interface{}{ + "id": id, + } + if secondaryPublicKey != "" { + req["secondary_public_key"] = secondaryPublicKey + } + secret, err := client.Logical().Write("sys/replication/performance/primary/secondary-token", req) + if err != nil { + return "", err + } + + if secondaryPublicKey != "" { + return secret.Data["token"].(string), nil + } + return secret.WrapInfo.Token, nil +} + +func EnablePerfPrimary(ctx context.Context, pri VaultCluster) error { + client := pri.Nodes()[0].APIClient() + _, err := client.Logical().WriteWithContext(ctx, "sys/replication/performance/primary/enable", nil) + if err != nil { + return fmt.Errorf("error enabling perf primary: %w", err) + } + + err = WaitForPerfReplicationState(ctx, pri, consts.ReplicationPerformancePrimary) + if err != nil { + return fmt.Errorf("error waiting for perf primary to have the correct state: %w", err) + } + return WaitForActiveNodeAndPerfStandbys(ctx, pri) +} + +func WaitForPerfReplicationState(ctx context.Context, cluster VaultCluster, state consts.ReplicationState) error { + client := cluster.Nodes()[0].APIClient() + var health *api.HealthResponse + var err error + for ctx.Err() == nil { + health, err = client.Sys().HealthWithContext(ctx) + if err == nil && health.ReplicationPerformanceMode == state.GetPerformanceString() { + return nil + } + time.Sleep(500 * time.Millisecond) + } + if err == nil { + err = ctx.Err() + } + return err +} + +func EnablePerformanceSecondaryNoWait(ctx context.Context, perfToken string, pri, sec VaultCluster, updatePrimary bool) error { + postData := map[string]interface{}{ + "token": perfToken, + "ca_file": pri.GetCACertPEMFile(), + } + path := "sys/replication/performance/secondary/enable" + if updatePrimary { + path = "sys/replication/performance/secondary/update-primary" + } + err := WaitForActiveNodeAndPerfStandbys(ctx, sec) + if err != nil { + return err + } + _, err = sec.Nodes()[0].APIClient().Logical().Write(path, postData) + if err != nil { + return err + } + + return WaitForPerfReplicationState(ctx, sec, consts.ReplicationPerformanceSecondary) +} + +func EnablePerformanceSecondary(ctx context.Context, perfToken string, pri, sec VaultCluster, updatePrimary, skipPoisonPill bool) (string, error) { + if err := EnablePerformanceSecondaryNoWait(ctx, perfToken, pri, sec, updatePrimary); err != nil { + return "", err + } + if err := WaitForMatchingMerkleRoots(ctx, "sys/replication/performance/", pri, sec); err != nil { + return "", err + } + root, err := WaitForPerformanceSecondary(ctx, pri, sec, skipPoisonPill) + if err != nil { + return "", err + } + if err := WaitForPerfReplicationWorking(ctx, pri, sec); err != nil { + return "", err + } + return root, nil +} + +func WaitForMatchingMerkleRoots(ctx context.Context, endpoint string, pri, sec VaultCluster) error { + return WaitForMatchingMerkleRootsClients(ctx, endpoint, pri.Nodes()[0].APIClient(), sec.Nodes()[0].APIClient()) +} + +func WaitForMatchingMerkleRootsClients(ctx context.Context, endpoint string, pri, sec *api.Client) error { + getRoot := func(mode string, cli *api.Client) (string, error) { + status, err := cli.Logical().Read(endpoint + "status") + if err != nil { + return "", err + } + if status == nil || status.Data == nil || status.Data["mode"] == nil { + return "", fmt.Errorf("got nil secret or data") + } + if status.Data["mode"].(string) != mode { + return "", fmt.Errorf("expected mode=%s, got %s", mode, status.Data["mode"].(string)) + } + return status.Data["merkle_root"].(string), nil + } + + var priRoot, secRoot string + var err error + genRet := func() error { + return fmt.Errorf("unequal merkle roots, pri=%s sec=%s, err=%w", priRoot, secRoot, err) + } + for ctx.Err() == nil { + secRoot, err = getRoot("secondary", sec) + if err != nil { + return genRet() + } + priRoot, err = getRoot("primary", pri) + if err != nil { + return genRet() + } + + if reflect.DeepEqual(priRoot, secRoot) { + return nil + } + time.Sleep(time.Second) + } + + return fmt.Errorf("roots did not become equal") +} + +func WaitForPerformanceWAL(ctx context.Context, pri, sec VaultCluster) error { + endpoint := "sys/replication/performance/" + if err := WaitForMatchingMerkleRoots(ctx, endpoint, pri, sec); err != nil { + return nil + } + getWAL := func(mode, walKey string, cli *api.Client) (int64, error) { + status, err := cli.Logical().Read(endpoint + "status") + if err != nil { + return 0, err + } + if status == nil || status.Data == nil || status.Data["mode"] == nil { + return 0, fmt.Errorf("got nil secret or data") + } + if status.Data["mode"].(string) != mode { + return 0, fmt.Errorf("expected mode=%s, got %s", mode, status.Data["mode"].(string)) + } + return status.Data[walKey].(json.Number).Int64() + } + + secClient := sec.Nodes()[0].APIClient() + priClient := pri.Nodes()[0].APIClient() + for ctx.Err() == nil { + secLastRemoteWAL, err := getWAL("secondary", "last_remote_wal", secClient) + if err != nil { + return err + } + priLastPerfWAL, err := getWAL("primary", "last_performance_wal", priClient) + if err != nil { + return err + } + + if secLastRemoteWAL >= priLastPerfWAL { + return nil + } + time.Sleep(time.Second) + } + + return fmt.Errorf("performance WALs on the secondary did not catch up with the primary, context err: %w", ctx.Err()) +} + +func WaitForPerformanceSecondary(ctx context.Context, pri, sec VaultCluster, skipPoisonPill bool) (string, error) { + if len(pri.GetRecoveryKeys()) > 0 { + sec.SetBarrierKeys(pri.GetRecoveryKeys()) + sec.SetRecoveryKeys(pri.GetRecoveryKeys()) + } else { + sec.SetBarrierKeys(pri.GetBarrierKeys()) + sec.SetRecoveryKeys(pri.GetBarrierKeys()) + } + + if len(sec.Nodes()) > 1 { + if skipPoisonPill { + // As part of prepareSecondary on the active node the keyring is + // deleted from storage. Its absence can cause standbys to seal + // themselves. But it's not reliable, so we'll seal them + // ourselves to force the issue. + for i := range sec.Nodes()[1:] { + if err := SealNode(ctx, sec, i+1); err != nil { + return "", err + } + } + } else { + // We want to make sure we unseal all the nodes so we first need to wait + // until two of the nodes seal due to the poison pill being written + if err := WaitForNCoresSealed(ctx, sec, len(sec.Nodes())-1); err != nil { + return "", err + } + } + } + if _, err := WaitForActiveNode(ctx, sec); err != nil { + return "", err + } + if err := UnsealAllNodes(ctx, sec); err != nil { + return "", err + } + + perfSecondaryRootToken, err := GenerateRoot(sec, GenerateRootRegular) + if err != nil { + return "", err + } + sec.SetRootToken(perfSecondaryRootToken) + if err := WaitForActiveNodeAndPerfStandbys(ctx, sec); err != nil { + return "", err + } + + return perfSecondaryRootToken, nil +} + +func WaitForPerfReplicationWorking(ctx context.Context, pri, sec VaultCluster) error { + priActiveIdx, err := WaitForActiveNode(ctx, pri) + if err != nil { + return err + } + secActiveIdx, err := WaitForActiveNode(ctx, sec) + if err != nil { + return err + } + + priClient, secClient := pri.Nodes()[priActiveIdx].APIClient(), sec.Nodes()[secActiveIdx].APIClient() + mountPoint, err := uuid.GenerateUUID() + if err != nil { + return err + } + err = priClient.Sys().Mount(mountPoint, &api.MountInput{ + Type: "kv", + Local: false, + }) + if err != nil { + return fmt.Errorf("unable to mount KV engine on primary") + } + + path := mountPoint + "/foo" + _, err = priClient.Logical().Write(path, map[string]interface{}{ + "bar": 1, + }) + if err != nil { + return fmt.Errorf("unable to write KV on primary, path=%s", path) + } + + for ctx.Err() == nil { + var secret *api.Secret + secret, err = secClient.Logical().Read(path) + if err == nil && secret != nil { + err = priClient.Sys().Unmount(mountPoint) + if err != nil { + return fmt.Errorf("unable to unmount KV engine on primary") + } + return nil + } + time.Sleep(100 * time.Millisecond) + } + if err == nil { + err = ctx.Err() + } + return fmt.Errorf("unable to read replicated KV on secondary, path=%s, err=%v", path, err) +} + +func SetupTwoClusterPerfReplication(ctx context.Context, pri, sec VaultCluster) error { + if err := EnablePerfPrimary(ctx, pri); err != nil { + return fmt.Errorf("failed to enable perf primary: %w", err) + } + perfToken, err := GetPerformanceToken(pri, sec.ClusterID(), "") + if err != nil { + return fmt.Errorf("failed to get performance token from perf primary: %w", err) + } + + _, err = EnablePerformanceSecondary(ctx, perfToken, pri, sec, false, false) + if err != nil { + return fmt.Errorf("failed to enable perf secondary: %w", err) + } + return nil +} + +// PassiveWaitForActiveNodeAndPerfStandbys should be used instead of +// WaitForActiveNodeAndPerfStandbys when you don't want to do any writes +// as a side-effect. This returns perfStandby nodes in the cluster and +// an error. +func PassiveWaitForActiveNodeAndPerfStandbys(ctx context.Context, pri VaultCluster) (VaultClusterNode, []VaultClusterNode, error) { + leaderNode, standbys, err := GetActiveAndStandbys(ctx, pri) + if err != nil { + return nil, nil, fmt.Errorf("failed to derive standby nodes, %w", err) + } + + for i, node := range standbys { + client := node.APIClient() + // Make sure we get perf standby nodes + if err = EnsureCoreIsPerfStandby(ctx, client); err != nil { + return nil, nil, fmt.Errorf("standby node %d is not a perfStandby, %w", i, err) + } + } + + return leaderNode, standbys, nil +} + +func GetActiveAndStandbys(ctx context.Context, cluster VaultCluster) (VaultClusterNode, []VaultClusterNode, error) { + var leaderIndex int + var err error + if leaderIndex, err = WaitForActiveNode(ctx, cluster); err != nil { + return nil, nil, err + } + + var leaderNode VaultClusterNode + var nodes []VaultClusterNode + for i, node := range cluster.Nodes() { + if i == leaderIndex { + leaderNode = node + continue + } + nodes = append(nodes, node) + } + + return leaderNode, nodes, nil +} + +func EnsureCoreIsPerfStandby(ctx context.Context, client *api.Client) error { + var err error + var health *api.HealthResponse + for ctx.Err() == nil { + health, err = client.Sys().HealthWithContext(ctx) + if err == nil && health.PerformanceStandby { + return nil + } + time.Sleep(time.Millisecond * 500) + } + if err == nil { + err = ctx.Err() + } + return err +} + +func WaitForDRReplicationState(ctx context.Context, cluster VaultCluster, state consts.ReplicationState) error { + client := cluster.Nodes()[0].APIClient() + var health *api.HealthResponse + var err error + for ctx.Err() == nil { + health, err = client.Sys().HealthWithContext(ctx) + if err == nil && health.ReplicationDRMode == state.GetDRString() { + return nil + } + time.Sleep(500 * time.Millisecond) + } + if err == nil { + err = ctx.Err() + } + return err +} + +func EnableDrPrimary(ctx context.Context, pri VaultCluster) error { + client := pri.Nodes()[0].APIClient() + _, err := client.Logical().Write("sys/replication/dr/primary/enable", nil) + if err != nil { + return err + } + + err = WaitForDRReplicationState(ctx, pri, consts.ReplicationDRPrimary) + if err != nil { + return err + } + return WaitForActiveNodeAndPerfStandbys(ctx, pri) +} + +func GenerateDRActivationToken(pri VaultCluster, id, secondaryPublicKey string) (string, error) { + client := pri.Nodes()[0].APIClient() + req := map[string]interface{}{ + "id": id, + } + if secondaryPublicKey != "" { + req["secondary_public_key"] = secondaryPublicKey + } + secret, err := client.Logical().Write("sys/replication/dr/primary/secondary-token", req) + if err != nil { + return "", err + } + + if secondaryPublicKey != "" { + return secret.Data["token"].(string), nil + } + return secret.WrapInfo.Token, nil +} + +func WaitForDRSecondary(ctx context.Context, pri, sec VaultCluster, skipPoisonPill bool) error { + if len(pri.GetRecoveryKeys()) > 0 { + sec.SetBarrierKeys(pri.GetRecoveryKeys()) + sec.SetRecoveryKeys(pri.GetRecoveryKeys()) + } else { + sec.SetBarrierKeys(pri.GetBarrierKeys()) + sec.SetRecoveryKeys(pri.GetBarrierKeys()) + } + + if len(sec.Nodes()) > 1 { + if skipPoisonPill { + // As part of prepareSecondary on the active node the keyring is + // deleted from storage. Its absence can cause standbys to seal + // themselves. But it's not reliable, so we'll seal them + // ourselves to force the issue. + for i := range sec.Nodes()[1:] { + if err := SealNode(ctx, sec, i+1); err != nil { + return err + } + } + } else { + // We want to make sure we unseal all the nodes so we first need to wait + // until two of the nodes seal due to the poison pill being written + if err := WaitForNCoresSealed(ctx, sec, len(sec.Nodes())-1); err != nil { + return err + } + } + } + if _, err := WaitForActiveNode(ctx, sec); err != nil { + return err + } + + // unseal nodes + for i := range sec.Nodes() { + if err := UnsealNode(ctx, sec, i); err != nil { + // Sometimes when we get here it's already unsealed on its own + // and then this fails for DR secondaries so check again + // The error is "path disabled in replication DR secondary mode". + if healthErr := NodeHealthy(ctx, sec, i); healthErr != nil { + // return the original error + return err + } + } + } + + sec.SetRootToken(pri.GetRootToken()) + + if _, err := WaitForActiveNode(ctx, sec); err != nil { + return err + } + + return nil +} + +func EnableDRSecondaryNoWait(ctx context.Context, sec VaultCluster, drToken string) error { + postData := map[string]interface{}{ + "token": drToken, + "ca_file": sec.GetCACertPEMFile(), + } + + _, err := sec.Nodes()[0].APIClient().Logical().Write("sys/replication/dr/secondary/enable", postData) + if err != nil { + return err + } + + return WaitForDRReplicationState(ctx, sec, consts.ReplicationDRSecondary) +} + +func WaitForReplicationStatus(ctx context.Context, client *api.Client, dr bool, accept func(map[string]interface{}) error) error { + url := "sys/replication/performance/status" + if dr { + url = "sys/replication/dr/status" + } + + var err error + var secret *api.Secret + for ctx.Err() == nil { + secret, err = client.Logical().Read(url) + if err == nil && secret != nil && secret.Data != nil { + if err = accept(secret.Data); err == nil { + return nil + } + } + time.Sleep(500 * time.Millisecond) + } + if err == nil { + err = ctx.Err() + } + + return fmt.Errorf("unable to get acceptable replication status: error=%v secret=%#v", err, secret) +} + +func WaitForDRReplicationWorking(ctx context.Context, pri, sec VaultCluster) error { + priClient := pri.Nodes()[0].APIClient() + secClient := sec.Nodes()[0].APIClient() + + // Make sure we've entered stream-wals mode + err := WaitForReplicationStatus(ctx, secClient, true, func(secret map[string]interface{}) error { + state := secret["state"] + if state == string("stream-wals") { + return nil + } + return fmt.Errorf("expected stream-wals replication state, got %v", state) + }) + if err != nil { + return err + } + + // Now write some data and make sure that we see last_remote_wal nonzero, i.e. + // at least one WAL has been streamed. + secret, err := priClient.Auth().Token().Create(&api.TokenCreateRequest{}) + if err != nil { + return err + } + + // Revoke the token since some tests won't be happy to see it. + err = priClient.Auth().Token().RevokeTree(secret.Auth.ClientToken) + if err != nil { + return err + } + + err = WaitForReplicationStatus(ctx, secClient, true, func(secret map[string]interface{}) error { + state := secret["state"] + if state != string("stream-wals") { + return fmt.Errorf("expected stream-wals replication state, got %v", state) + } + + if secret["last_remote_wal"] != nil { + lastRemoteWal, _ := secret["last_remote_wal"].(json.Number).Int64() + if lastRemoteWal <= 0 { + return fmt.Errorf("expected last_remote_wal to be greater than zero") + } + return nil + } + + return fmt.Errorf("replication seems to be still catching up, maybe need to wait more") + }) + if err != nil { + return err + } + return nil +} + +func EnableDrSecondary(ctx context.Context, pri, sec VaultCluster, drToken string) error { + err := EnableDRSecondaryNoWait(ctx, sec, drToken) + if err != nil { + return err + } + + if err = WaitForMatchingMerkleRoots(ctx, "sys/replication/dr/", pri, sec); err != nil { + return err + } + + err = WaitForDRSecondary(ctx, pri, sec, false) + if err != nil { + return err + } + + if err = WaitForDRReplicationWorking(ctx, pri, sec); err != nil { + return err + } + return nil +} + +func SetupTwoClusterDRReplication(ctx context.Context, pri, sec VaultCluster) error { + if err := EnableDrPrimary(ctx, pri); err != nil { + return err + } + + drToken, err := GenerateDRActivationToken(pri, sec.ClusterID(), "") + if err != nil { + return err + } + err = EnableDrSecondary(ctx, pri, sec, drToken) + if err != nil { + return err + } + return nil +} + +func DemoteDRPrimary(client *api.Client) error { + _, err := client.Logical().Write("sys/replication/dr/primary/demote", map[string]interface{}{}) + return err +} + +func createBatchToken(client *api.Client, path string) (string, error) { + // TODO: should these be more random in case more than one batch token needs to be created? + suffix := strings.Replace(path, "/", "", -1) + policyName := "path-batch-policy-" + suffix + roleName := "path-batch-role-" + suffix + + rules := fmt.Sprintf(`path "%s" { capabilities = [ "read", "update" ] }`, path) + + // create policy + _, err := client.Logical().Write("sys/policy/"+policyName, map[string]interface{}{ + "policy": rules, + }) + if err != nil { + return "", err + } + + // create a role + _, err = client.Logical().Write("auth/token/roles/"+roleName, map[string]interface{}{ + "allowed_policies": policyName, + "orphan": true, + "renewable": false, + "token_type": "batch", + }) + if err != nil { + return "", err + } + + // create batch token + secret, err := client.Logical().Write("auth/token/create/"+roleName, nil) + if err != nil { + return "", err + } + + return secret.Auth.ClientToken, nil +} + +// PromoteDRSecondaryWithBatchToken creates a batch token for DR promotion +// before promotion, it demotes the primary cluster. The primary cluster needs +// to be functional for the generation of the batch token +func PromoteDRSecondaryWithBatchToken(ctx context.Context, pri, sec VaultCluster) error { + client := pri.Nodes()[0].APIClient() + drToken, err := createBatchToken(client, "sys/replication/dr/secondary/promote") + if err != nil { + return err + } + + err = DemoteDRPrimary(client) + if err != nil { + return err + } + + return promoteDRSecondaryInternal(ctx, sec, drToken) +} + +// PromoteDRSecondary generates a DR operation token on the secondary using +// unseal/recovery keys. Therefore, the primary cluster could potentially +// be out of service. +func PromoteDRSecondary(ctx context.Context, sec VaultCluster) error { + // generate DR operation token to do update primary on vC to point to + // the new perfSec primary vD + drToken, err := GenerateRoot(sec, GenerateRootDR) + if err != nil { + return err + } + return promoteDRSecondaryInternal(ctx, sec, drToken) +} + +func promoteDRSecondaryInternal(ctx context.Context, sec VaultCluster, drToken string) error { + secClient := sec.Nodes()[0].APIClient() + + // Allow retries of 503s, e.g.: replication is still catching up, + // try again later or provide the "force" argument + oldMaxRetries := secClient.MaxRetries() + secClient.SetMaxRetries(10) + defer secClient.SetMaxRetries(oldMaxRetries) + resp, err := secClient.Logical().Write("sys/replication/dr/secondary/promote", map[string]interface{}{ + "dr_operation_token": drToken, + }) + if err != nil { + return err + } + if resp == nil { + return fmt.Errorf("nil status response during DR promotion") + } + + if _, err := WaitForActiveNode(ctx, sec); err != nil { + return err + } + + return WaitForDRReplicationState(ctx, sec, consts.ReplicationDRPrimary) +} + +func checkClusterAddr(ctx context.Context, pri, sec VaultCluster) error { + priClient := pri.Nodes()[0].APIClient() + priLeader, err := priClient.Sys().LeaderWithContext(ctx) + if err != nil { + return err + } + secClient := sec.Nodes()[0].APIClient() + endpoint := "sys/replication/dr/" + status, err := secClient.Logical().Read(endpoint + "status") + if err != nil { + return err + } + if status == nil || status.Data == nil { + return fmt.Errorf("got nil secret or data") + } + + var priAddrs []string + err = mapstructure.Decode(status.Data["known_primary_cluster_addrs"], &priAddrs) + if err != nil { + return err + } + if !strutil.StrListContains(priAddrs, priLeader.LeaderClusterAddress) { + return fmt.Errorf("failed to fine the expected primary cluster address %v in known_primary_cluster_addrs", priLeader.LeaderClusterAddress) + } + + return nil +} + +func UpdatePrimary(ctx context.Context, pri, sec VaultCluster) error { + // generate DR operation token to do update primary on vC to point to + // the new perfSec primary vD + rootToken, err := GenerateRoot(sec, GenerateRootDR) + if err != nil { + return err + } + + // secondary activation token + drToken, err := GenerateDRActivationToken(pri, sec.ClusterID(), "") + if err != nil { + return err + } + + // update-primary on vC (new perfSec Dr secondary) to point to + // the new perfSec Dr primary + secClient := sec.Nodes()[0].APIClient() + resp, err := secClient.Logical().Write("sys/replication/dr/secondary/update-primary", map[string]interface{}{ + "dr_operation_token": rootToken, + "token": drToken, + "ca_file": sec.GetCACertPEMFile(), + }) + if err != nil { + return err + } + if resp == nil { + return fmt.Errorf("nil status response during update primary") + } + + if _, err = WaitForActiveNode(ctx, sec); err != nil { + return err + } + + if err = WaitForDRReplicationState(ctx, sec, consts.ReplicationDRSecondary); err != nil { + return err + } + + if err = checkClusterAddr(ctx, pri, sec); err != nil { + return err + } + + return nil +} + +func SetupFourClusterReplication(ctx context.Context, pri, sec, pridr, secdr VaultCluster) error { + err := SetupTwoClusterPerfReplication(ctx, pri, sec) + if err != nil { + return err + } + err = SetupTwoClusterDRReplication(ctx, pri, pridr) + if err != nil { + return err + } + err = SetupTwoClusterDRReplication(ctx, sec, secdr) + if err != nil { + return err + } + return nil +} + +type ReplicationSet struct { + // By convention, we recommend the following naming scheme for + // clusters in this map: + // A: perf primary + // B: primary's DR + // C: first perf secondary of A + // D: C's DR + // E: second perf secondary of A + // F: E's DR + // ... etc. + // + // We use generic names rather than role-specific names because + // that's less confusing when promotions take place that result in role + // changes. In other words, if D gets promoted to replace C as a perf + // secondary, and C gets demoted and updated to become D's DR secondary, + // they should maintain their initial names of D and C throughout. + Clusters map[string]VaultCluster + Builder ClusterBuilder + Logger hclog.Logger + CA *CA +} + +type ClusterBuilder func(ctx context.Context, name string, logger hclog.Logger) (VaultCluster, error) + +func NewReplicationSet(b ClusterBuilder) (*ReplicationSet, error) { + return &ReplicationSet{ + Clusters: map[string]VaultCluster{}, + Builder: b, + Logger: hclog.NewNullLogger(), + }, nil +} + +func (r *ReplicationSet) StandardPerfReplication(ctx context.Context) error { + for _, name := range []string{"A", "C"} { + if _, ok := r.Clusters[name]; !ok { + cluster, err := r.Builder(ctx, name, r.Logger) + if err != nil { + return err + } + r.Clusters[name] = cluster + } + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + err := SetupTwoClusterPerfReplication(ctx, r.Clusters["A"], r.Clusters["C"]) + if err != nil { + return err + } + + return nil +} + +func (r *ReplicationSet) StandardDRReplication(ctx context.Context) error { + for _, name := range []string{"A", "B"} { + if _, ok := r.Clusters[name]; !ok { + cluster, err := r.Builder(ctx, name, r.Logger) + if err != nil { + return err + } + r.Clusters[name] = cluster + } + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + err := SetupTwoClusterDRReplication(ctx, r.Clusters["A"], r.Clusters["B"]) + if err != nil { + return err + } + + return nil +} + +func (r *ReplicationSet) GetFourReplicationCluster(ctx context.Context) error { + for _, name := range []string{"A", "B", "C", "D"} { + if _, ok := r.Clusters[name]; !ok { + cluster, err := r.Builder(ctx, name, r.Logger) + if err != nil { + return err + } + r.Clusters[name] = cluster + } + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + err := SetupFourClusterReplication(ctx, r.Clusters["A"], r.Clusters["C"], r.Clusters["B"], r.Clusters["D"]) + if err != nil { + return err + } + return nil +} + +func (r *ReplicationSet) Cleanup() { + for _, cluster := range r.Clusters { + cluster.Cleanup() + } +} + +func WaitForPerfReplicationConnectionStatus(ctx context.Context, client *api.Client) error { + type Primary struct { + APIAddress string `mapstructure:"api_address"` + ConnectionStatus string `mapstructure:"connection_status"` + ClusterAddress string `mapstructure:"cluster_address"` + LastHeartbeat string `mapstructure:"last_heartbeat"` + } + type Status struct { + Primaries []Primary `mapstructure:"primaries"` + } + return WaitForPerfReplicationStatus(ctx, client, func(m map[string]interface{}) error { + var status Status + err := mapstructure.Decode(m, &status) + if err != nil { + return err + } + if len(status.Primaries) == 0 { + return fmt.Errorf("primaries is zero") + } + for _, v := range status.Primaries { + if v.ConnectionStatus == "connected" { + return nil + } + } + return fmt.Errorf("no primaries connected") + }) +} + +func WaitForPerfReplicationStatus(ctx context.Context, client *api.Client, accept func(map[string]interface{}) error) error { + var err error + var secret *api.Secret + for ctx.Err() == nil { + secret, err = client.Logical().Read("sys/replication/performance/status") + if err == nil && secret != nil && secret.Data != nil { + if err = accept(secret.Data); err == nil { + return nil + } + } + time.Sleep(500 * time.Millisecond) + } + return fmt.Errorf("unable to get acceptable replication status within allotted time: error=%v secret=%#v", err, secret) +} diff --git a/sdk/helper/testcluster/types.go b/sdk/helper/testcluster/types.go new file mode 100644 index 000000000000..0c04c224c1de --- /dev/null +++ b/sdk/helper/testcluster/types.go @@ -0,0 +1,129 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testcluster + +import ( + "context" + "crypto/ecdsa" + "crypto/tls" + "crypto/x509" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" +) + +type VaultClusterNode interface { + APIClient() *api.Client + TLSConfig() *tls.Config +} + +type VaultCluster interface { + Nodes() []VaultClusterNode + GetBarrierKeys() [][]byte + GetRecoveryKeys() [][]byte + GetBarrierOrRecoveryKeys() [][]byte + SetBarrierKeys([][]byte) + SetRecoveryKeys([][]byte) + GetCACertPEMFile() string + Cleanup() + ClusterID() string + NamedLogger(string) hclog.Logger + SetRootToken(token string) + GetRootToken() string +} + +type VaultNodeConfig struct { + // Not configurable because cluster creator wants to control these: + // PluginDirectory string `hcl:"plugin_directory"` + // APIAddr string `hcl:"api_addr"` + // ClusterAddr string `hcl:"cluster_addr"` + // Storage *Storage `hcl:"-"` + // HAStorage *Storage `hcl:"-"` + // DisableMlock bool `hcl:"disable_mlock"` + // ClusterName string `hcl:"cluster_name"` + + // Not configurable yet: + // Listeners []*Listener `hcl:"-"` + // Seals []*KMS `hcl:"-"` + // Entropy *Entropy `hcl:"-"` + // Telemetry *Telemetry `hcl:"telemetry"` + // HCPLinkConf *HCPLinkConfig `hcl:"cloud"` + // PidFile string `hcl:"pid_file"` + // ServiceRegistrationType string + // ServiceRegistrationOptions map[string]string + + StorageOptions map[string]string + AdditionalListeners []VaultNodeListenerConfig + + DefaultMaxRequestDuration time.Duration `json:"default_max_request_duration"` + LogFormat string `json:"log_format"` + LogLevel string `json:"log_level"` + CacheSize int `json:"cache_size"` + DisableCache bool `json:"disable_cache"` + DisablePrintableCheck bool `json:"disable_printable_check"` + EnableUI bool `json:"ui"` + MaxLeaseTTL time.Duration `json:"max_lease_ttl"` + DefaultLeaseTTL time.Duration `json:"default_lease_ttl"` + ClusterCipherSuites string `json:"cluster_cipher_suites"` + PluginFileUid int `json:"plugin_file_uid"` + PluginFilePermissions int `json:"plugin_file_permissions"` + EnableRawEndpoint bool `json:"raw_storage_endpoint"` + DisableClustering bool `json:"disable_clustering"` + DisablePerformanceStandby bool `json:"disable_performance_standby"` + DisableSealWrap bool `json:"disable_sealwrap"` + DisableIndexing bool `json:"disable_indexing"` + DisableSentinelTrace bool `json:"disable_sentinel"` + EnableResponseHeaderHostname bool `json:"enable_response_header_hostname"` + LogRequestsLevel string `json:"log_requests_level"` + EnableResponseHeaderRaftNodeID bool `json:"enable_response_header_raft_node_id"` + LicensePath string `json:"license_path"` +} + +type ClusterNode struct { + APIAddress string `json:"api_address"` +} + +type ClusterJson struct { + Nodes []ClusterNode `json:"nodes"` + CACertPath string `json:"ca_cert_path"` + RootToken string `json:"root_token"` +} + +type ClusterOptions struct { + ClusterName string + KeepStandbysSealed bool + SkipInit bool + CACert []byte + NumCores int + TmpDir string + Logger hclog.Logger + VaultNodeConfig *VaultNodeConfig + VaultLicense string + AdministrativeNamespacePath string +} + +type VaultNodeListenerConfig struct { + Port int + ChrootNamespace string + RedactAddresses bool + RedactClusterName bool + RedactVersion bool +} + +type CA struct { + CACert *x509.Certificate + CACertBytes []byte + CACertPEM []byte + CACertPEMFile string + CAKey *ecdsa.PrivateKey + CAKeyPEM []byte +} + +type ClusterStorage interface { + Start(context.Context, *ClusterOptions) error + Cleanup() error + Opts() map[string]interface{} + Type() string +} diff --git a/sdk/helper/testcluster/util.go b/sdk/helper/testcluster/util.go new file mode 100644 index 000000000000..ec47e9b0809b --- /dev/null +++ b/sdk/helper/testcluster/util.go @@ -0,0 +1,463 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testcluster + +import ( + "context" + "encoding/base64" + "encoding/hex" + "fmt" + "sync/atomic" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/xor" +) + +// Note that OSS standbys will not accept seal requests. And ent perf standbys +// may fail it as well if they haven't yet been able to get "elected" as perf standbys. +func SealNode(ctx context.Context, cluster VaultCluster, nodeIdx int) error { + if nodeIdx >= len(cluster.Nodes()) { + return fmt.Errorf("invalid nodeIdx %d for cluster", nodeIdx) + } + node := cluster.Nodes()[nodeIdx] + client := node.APIClient() + + err := client.Sys().SealWithContext(ctx) + if err != nil { + return err + } + + return NodeSealed(ctx, cluster, nodeIdx) +} + +func SealAllNodes(ctx context.Context, cluster VaultCluster) error { + for i := range cluster.Nodes() { + if err := SealNode(ctx, cluster, i); err != nil { + return err + } + } + return nil +} + +func UnsealNode(ctx context.Context, cluster VaultCluster, nodeIdx int) error { + if nodeIdx >= len(cluster.Nodes()) { + return fmt.Errorf("invalid nodeIdx %d for cluster", nodeIdx) + } + node := cluster.Nodes()[nodeIdx] + client := node.APIClient() + + for _, key := range cluster.GetBarrierOrRecoveryKeys() { + _, err := client.Sys().UnsealWithContext(ctx, hex.EncodeToString(key)) + if err != nil { + return err + } + } + + return NodeHealthy(ctx, cluster, nodeIdx) +} + +func UnsealAllNodes(ctx context.Context, cluster VaultCluster) error { + for i := range cluster.Nodes() { + if err := UnsealNode(ctx, cluster, i); err != nil { + return err + } + } + return nil +} + +func NodeSealed(ctx context.Context, cluster VaultCluster, nodeIdx int) error { + if nodeIdx >= len(cluster.Nodes()) { + return fmt.Errorf("invalid nodeIdx %d for cluster", nodeIdx) + } + node := cluster.Nodes()[nodeIdx] + client := node.APIClient() + + var health *api.HealthResponse + var err error + for ctx.Err() == nil { + health, err = client.Sys().HealthWithContext(ctx) + switch { + case err != nil: + case !health.Sealed: + err = fmt.Errorf("unsealed: %#v", health) + default: + return nil + } + time.Sleep(500 * time.Millisecond) + } + return fmt.Errorf("node %d is not sealed: %v", nodeIdx, err) +} + +func WaitForNCoresSealed(ctx context.Context, cluster VaultCluster, n int) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + errs := make(chan error) + for i := range cluster.Nodes() { + go func(i int) { + var err error + for ctx.Err() == nil { + err = NodeSealed(ctx, cluster, i) + if err == nil { + errs <- nil + return + } + time.Sleep(100 * time.Millisecond) + } + if err == nil { + err = ctx.Err() + } + errs <- err + }(i) + } + + var merr *multierror.Error + var sealed int + for range cluster.Nodes() { + err := <-errs + if err != nil { + merr = multierror.Append(merr, err) + } else { + sealed++ + if sealed == n { + return nil + } + } + } + + return fmt.Errorf("%d cores were not sealed, errs: %v", n, merr.ErrorOrNil()) +} + +func NodeHealthy(ctx context.Context, cluster VaultCluster, nodeIdx int) error { + if nodeIdx >= len(cluster.Nodes()) { + return fmt.Errorf("invalid nodeIdx %d for cluster", nodeIdx) + } + node := cluster.Nodes()[nodeIdx] + client := node.APIClient() + + var health *api.HealthResponse + var err error + for ctx.Err() == nil { + health, err = client.Sys().HealthWithContext(ctx) + switch { + case err != nil: + case health == nil: + err = fmt.Errorf("nil response to health check") + case health.Sealed: + err = fmt.Errorf("sealed: %#v", health) + default: + return nil + } + time.Sleep(500 * time.Millisecond) + } + return fmt.Errorf("node %d is unhealthy: %v", nodeIdx, err) +} + +func LeaderNode(ctx context.Context, cluster VaultCluster) (int, error) { + // Be robust to multiple nodes thinking they are active. This is possible in + // certain network partition situations where the old leader has not + // discovered it's lost leadership yet. In tests this is only likely to come + // up when we are specifically provoking it, but it's possible it could happen + // at any point if leadership flaps of connectivity suffers transient errors + // etc. so be robust against it. The best solution would be to have some sort + // of epoch like the raft term that is guaranteed to be monotonically + // increasing through elections, however we don't have that abstraction for + // all HABackends in general. The best we have is the ActiveTime. In a + // distributed systems text book this would be bad to rely on due to clock + // sync issues etc. but for our tests it's likely fine because even if we are + // running separate Vault containers, they are all using the same hardware + // clock in the system. + leaderActiveTimes := make(map[int]time.Time) + for i, node := range cluster.Nodes() { + client := node.APIClient() + ctx, cancel := context.WithTimeout(ctx, 500*time.Millisecond) + resp, err := client.Sys().LeaderWithContext(ctx) + cancel() + if err != nil || resp == nil || !resp.IsSelf { + continue + } + leaderActiveTimes[i] = resp.ActiveTime + } + if len(leaderActiveTimes) == 0 { + return -1, fmt.Errorf("no leader found") + } + // At least one node thinks it is active. If multiple, pick the one with the + // most recent ActiveTime. Note if there is only one then this just returns + // it. + var newestLeaderIdx int + var newestActiveTime time.Time + for i, at := range leaderActiveTimes { + if at.After(newestActiveTime) { + newestActiveTime = at + newestLeaderIdx = i + } + } + return newestLeaderIdx, nil +} + +func WaitForActiveNode(ctx context.Context, cluster VaultCluster) (int, error) { + for ctx.Err() == nil { + if idx, _ := LeaderNode(ctx, cluster); idx != -1 { + return idx, nil + } + time.Sleep(500 * time.Millisecond) + } + return -1, ctx.Err() +} + +func WaitForStandbyNode(ctx context.Context, cluster VaultCluster, nodeIdx int) error { + if nodeIdx >= len(cluster.Nodes()) { + return fmt.Errorf("invalid nodeIdx %d for cluster", nodeIdx) + } + node := cluster.Nodes()[nodeIdx] + client := node.APIClient() + + var err error + for ctx.Err() == nil { + var resp *api.LeaderResponse + + resp, err = client.Sys().LeaderWithContext(ctx) + switch { + case err != nil: + case resp.IsSelf: + return fmt.Errorf("waiting for standby but node is leader") + case resp.LeaderAddress == "": + err = fmt.Errorf("node doesn't know leader address") + default: + return nil + } + + time.Sleep(100 * time.Millisecond) + } + if err == nil { + err = ctx.Err() + } + return err +} + +func WaitForActiveNodeAndStandbys(ctx context.Context, cluster VaultCluster) (int, error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + leaderIdx, err := WaitForActiveNode(ctx, cluster) + if err != nil { + return 0, err + } + + if len(cluster.Nodes()) == 1 { + return 0, nil + } + + errs := make(chan error) + for i := range cluster.Nodes() { + if i == leaderIdx { + continue + } + go func(i int) { + errs <- WaitForStandbyNode(ctx, cluster, i) + }(i) + } + + var merr *multierror.Error + expectedStandbys := len(cluster.Nodes()) - 1 + for i := 0; i < expectedStandbys; i++ { + merr = multierror.Append(merr, <-errs) + } + + return leaderIdx, merr.ErrorOrNil() +} + +func WaitForActiveNodeAndPerfStandbys(ctx context.Context, cluster VaultCluster) error { + logger := cluster.NamedLogger("WaitForActiveNodeAndPerfStandbys") + // This WaitForActiveNode was added because after a Raft cluster is sealed + // and then unsealed, when it comes up it may have a different leader than + // Core0, making this helper fail. + // A sleep before calling WaitForActiveNodeAndPerfStandbys seems to sort + // things out, but so apparently does this. We should be able to eliminate + // this call to WaitForActiveNode by reworking the logic in this method. + leaderIdx, err := WaitForActiveNode(ctx, cluster) + if err != nil { + return fmt.Errorf("did not find leader: %w", err) + } + + if len(cluster.Nodes()) == 1 { + return nil + } + + expectedStandbys := len(cluster.Nodes()) - 1 + + mountPoint, err := uuid.GenerateUUID() + if err != nil { + return err + } + leaderClient := cluster.Nodes()[leaderIdx].APIClient() + + for ctx.Err() == nil { + err = leaderClient.Sys().MountWithContext(ctx, mountPoint, &api.MountInput{ + Type: "kv", + Local: true, + }) + if err == nil { + break + } + time.Sleep(1 * time.Second) + } + if err != nil { + return fmt.Errorf("unable to mount KV engine: %w", err) + } + path := mountPoint + "/waitforactivenodeandperfstandbys" + var standbys, actives int64 + errchan := make(chan error, len(cluster.Nodes())) + for i := range cluster.Nodes() { + go func(coreNo int) { + node := cluster.Nodes()[coreNo] + client := node.APIClient() + val := 1 + var err error + defer func() { + errchan <- err + }() + + var lastWAL uint64 + for ctx.Err() == nil { + _, err = leaderClient.Logical().WriteWithContext(ctx, path, map[string]interface{}{ + "bar": val, + }) + val++ + time.Sleep(250 * time.Millisecond) + if err != nil { + continue + } + var leader *api.LeaderResponse + leader, err = client.Sys().LeaderWithContext(ctx) + if err != nil { + logger.Trace("waiting for core", "core", coreNo, "err", err) + continue + } + switch { + case leader.IsSelf: + logger.Trace("waiting for core", "core", coreNo, "isLeader", true) + atomic.AddInt64(&actives, 1) + return + case leader.PerfStandby && leader.PerfStandbyLastRemoteWAL > 0: + switch { + case lastWAL == 0: + lastWAL = leader.PerfStandbyLastRemoteWAL + logger.Trace("waiting for core", "core", coreNo, "lastRemoteWAL", leader.PerfStandbyLastRemoteWAL, "lastWAL", lastWAL) + case lastWAL < leader.PerfStandbyLastRemoteWAL: + logger.Trace("waiting for core", "core", coreNo, "lastRemoteWAL", leader.PerfStandbyLastRemoteWAL, "lastWAL", lastWAL) + atomic.AddInt64(&standbys, 1) + return + } + default: + logger.Trace("waiting for core", "core", coreNo, + "ha_enabled", leader.HAEnabled, + "is_self", leader.IsSelf, + "perf_standby", leader.PerfStandby, + "perf_standby_remote_wal", leader.PerfStandbyLastRemoteWAL) + } + } + }(i) + } + + errs := make([]error, 0, len(cluster.Nodes())) + for range cluster.Nodes() { + errs = append(errs, <-errchan) + } + if actives != 1 || int(standbys) != expectedStandbys { + return fmt.Errorf("expected 1 active core and %d standbys, got %d active and %d standbys, errs: %v", + expectedStandbys, actives, standbys, errs) + } + + for ctx.Err() == nil { + err = leaderClient.Sys().UnmountWithContext(ctx, mountPoint) + if err == nil { + break + } + time.Sleep(time.Second) + } + if err != nil { + return fmt.Errorf("unable to unmount KV engine: %w", err) + } + return nil +} + +func Clients(vc VaultCluster) []*api.Client { + var ret []*api.Client + for _, n := range vc.Nodes() { + ret = append(ret, n.APIClient()) + } + return ret +} + +//go:generate enumer -type=GenerateRootKind -trimprefix=GenerateRoot +type GenerateRootKind int + +const ( + GenerateRootRegular GenerateRootKind = iota + GenerateRootDR + GenerateRecovery +) + +func GenerateRoot(cluster VaultCluster, kind GenerateRootKind) (string, error) { + // If recovery keys supported, use those to perform root token generation instead + keys := cluster.GetBarrierOrRecoveryKeys() + + client := cluster.Nodes()[0].APIClient() + + var err error + var status *api.GenerateRootStatusResponse + switch kind { + case GenerateRootRegular: + status, err = client.Sys().GenerateRootInit("", "") + case GenerateRootDR: + status, err = client.Sys().GenerateDROperationTokenInit("", "") + case GenerateRecovery: + status, err = client.Sys().GenerateRecoveryOperationTokenInit("", "") + } + if err != nil { + return "", err + } + + if status.Required > len(keys) { + return "", fmt.Errorf("need more keys than have, need %d have %d", status.Required, len(keys)) + } + + otp := status.OTP + + for i, key := range keys { + if i >= status.Required { + break + } + + strKey := base64.StdEncoding.EncodeToString(key) + switch kind { + case GenerateRootRegular: + status, err = client.Sys().GenerateRootUpdate(strKey, status.Nonce) + case GenerateRootDR: + status, err = client.Sys().GenerateDROperationTokenUpdate(strKey, status.Nonce) + case GenerateRecovery: + status, err = client.Sys().GenerateRecoveryOperationTokenUpdate(strKey, status.Nonce) + } + if err != nil { + return "", err + } + } + if !status.Complete { + return "", fmt.Errorf("generate root operation did not end successfully") + } + + tokenBytes, err := base64.RawStdEncoding.DecodeString(status.EncodedToken) + if err != nil { + return "", err + } + tokenBytes, err = xor.XORBytes(tokenBytes, []byte(otp)) + if err != nil { + return "", err + } + return string(tokenBytes), nil +} diff --git a/sdk/helper/testhelpers/namespaces/namespaces.go b/sdk/helper/testhelpers/namespaces/namespaces.go new file mode 100644 index 000000000000..9981104f2b56 --- /dev/null +++ b/sdk/helper/testhelpers/namespaces/namespaces.go @@ -0,0 +1,165 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package namespaces + +import ( + "context" + "errors" + "fmt" + "path" + "slices" + "strings" + "time" + + "github.com/hashicorp/vault/api" +) + +// RootNamespacePath is the path of the root namespace. +const RootNamespacePath = "" + +// RootNamespaceID is the ID of the root namespace. +const RootNamespaceID = "root" + +// ErrNotFound is returned by funcs in this package when something isn't found, +// instead of returning (nil, nil). +var ErrNotFound = errors.New("no namespaces found") + +// folderPath transforms an input path that refers to a namespace or mount point, +// such that it adheres to the norms Vault prefers. The result will have any +// leading "/" stripped, and, except for the root namespace which is always +// RootNamespacePath, will always end in a "/". +func folderPath(path string) string { + if !strings.HasSuffix(path, "/") { + path += "/" + } + + return strings.TrimPrefix(path, "/") +} + +// joinPath concatenates its inputs using "/" as a delimiter. The result will +// adhere to folderPath conventions. +func joinPath(s ...string) string { + return folderPath(path.Join(s...)) +} + +// GetNamespaceIDPaths does a namespace list and extracts the resulting paths +// and namespace IDs, returning a map from namespace ID to path. Returns +// ErrNotFound if no namespaces exist beneath the current namespace set on the +// client. +func GetNamespaceIDPaths(client *api.Client) (map[string]string, error) { + secret, err := client.Logical().List("sys/namespaces") + if err != nil { + return nil, err + } + if secret == nil { + return nil, ErrNotFound + } + if _, ok := secret.Data["key_info"]; !ok { + return nil, ErrNotFound + } + + ret := map[string]string{} + for relNsPath, infoAny := range secret.Data["key_info"].(map[string]any) { + info := infoAny.(map[string]any) + id := info["id"].(string) + ret[id] = relNsPath + } + return ret, err +} + +// WalkNamespaces does recursive namespace list commands to discover the complete +// namespace hierarchy. This may yield an error or inconsistent results if +// namespaces change while we're querying them. +// The callback f is invoked for every namespace discovered. Namespace traversal +// is pre-order depth-first. If f returns an error, traversal is aborted and the +// error is returned. Otherwise, an error is only returned if a request results +// in an error. +func WalkNamespaces(client *api.Client, f func(id, apiPath string) error) error { + return walkNamespacesRecursive(client, RootNamespaceID, RootNamespacePath, f) +} + +func walkNamespacesRecursive(client *api.Client, startID, startApiPath string, f func(id, apiPath string) error) error { + if err := f(startID, startApiPath); err != nil { + return err + } + + idpaths, err := GetNamespaceIDPaths(client.WithNamespace(startApiPath)) + if err != nil { + if errors.Is(err, ErrNotFound) { + return nil + } + return err + } + + for id, path := range idpaths { + fullPath := joinPath(startApiPath, path) + + if err = walkNamespacesRecursive(client, id, fullPath, f); err != nil { + return err + } + } + return nil +} + +// PollDeleteNamespace issues a namespace delete request and waits for it +// to complete (since namespace deletes are asynchronous), at least until +// ctx expires. +func PollDeleteNamespace(ctx context.Context, client *api.Client, nsPath string) error { + _, err := client.Logical().Delete("sys/namespaces/" + nsPath) + if err != nil { + return err + } + +LOOP: + for ctx.Err() == nil { + resp, err := client.Logical().Delete("sys/namespaces/" + nsPath) + if err != nil { + return err + } + for _, warn := range resp.Warnings { + if strings.HasPrefix(warn, "Namespace is already being deleted") { + time.Sleep(10 * time.Millisecond) + continue LOOP + } + } + break + } + + return ctx.Err() +} + +// DeleteAllNamespaces uses WalkNamespaces to delete all namespaces, +// waiting for deletion to complete before returning. The same caveats about +// namespaces changing underneath us apply as in WalkNamespaces. +// Traversal is depth-first pre-order, but we must do the deletion in the reverse +// order, since a namespace containing namespaces cannot be deleted. +func DeleteAllNamespaces(ctx context.Context, client *api.Client) error { + var nss []string + err := WalkNamespaces(client, func(id, apiPath string) error { + if apiPath != RootNamespacePath { + nss = append(nss, apiPath) + } + return nil + }) + if err != nil { + return err + } + slices.Reverse(nss) + for _, apiPath := range nss { + if err := PollDeleteNamespace(ctx, client, apiPath); err != nil { + return fmt.Errorf("error deleting namespace %q: %v", apiPath, err) + } + } + + // Do a final check to make sure that we got everything, and so that the + // caller doesn't assume that all namespaces are deleted when a glitch + // occurred due to namespaces changing while we were traversing or deleting + // them. + _, err = GetNamespaceIDPaths(client) + if err != nil && !errors.Is(err, ErrNotFound) { + return err + } + + return nil +} diff --git a/sdk/helper/testhelpers/output.go b/sdk/helper/testhelpers/output.go new file mode 100644 index 000000000000..9fde77f0d6c6 --- /dev/null +++ b/sdk/helper/testhelpers/output.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testhelpers + +import ( + "crypto/sha256" + "fmt" + "reflect" + "testing" + + "github.com/mitchellh/mapstructure" +) + +// ToMap renders an input value of any type as a map. This is intended for +// logging human-readable data dumps in test logs, so it uses the `json` +// tags on struct fields: this makes it easy to exclude `"-"` values that +// are typically not interesting, respect omitempty, etc. +// +// We also replace any []byte fields with a hash of their value. +// This is usually sufficient for test log purposes, and is a lot more readable +// than a big array of individual byte values like Go would normally stringify a +// byte slice. +func ToMap(in any) (map[string]any, error) { + temp := make(map[string]any) + cfg := &mapstructure.DecoderConfig{ + TagName: "json", + IgnoreUntaggedFields: true, + Result: &temp, + } + md, err := mapstructure.NewDecoder(cfg) + if err != nil { + return nil, err + } + err = md.Decode(in) + if err != nil { + return nil, err + } + + // mapstructure doesn't call the DecodeHook for each field when doing + // struct->map conversions, but it does for map->map, so call it a second + // time to convert each []byte field. + out := make(map[string]any) + md2, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + Result: &out, + DecodeHook: func(from reflect.Type, to reflect.Type, data interface{}) (interface{}, error) { + if from.Kind() != reflect.Slice || from.Elem().Kind() != reflect.Uint8 { + return data, nil + } + b := data.([]byte) + return fmt.Sprintf("%x", sha256.Sum256(b)), nil + }, + }) + if err != nil { + return nil, err + } + err = md2.Decode(temp) + if err != nil { + return nil, err + } + + return out, nil +} + +// ToString renders its input using ToMap, and returns a string containing the +// result or an error if that fails. +func ToString(in any) string { + m, err := ToMap(in) + if err != nil { + return err.Error() + } + return fmt.Sprintf("%v", m) +} + +// StringOrDie renders its input using ToMap, and returns a string containing the +// result. If rendering yields an error, calls t.Fatal. +func StringOrDie(t testing.TB, in any) string { + t.Helper() + m, err := ToMap(in) + if err != nil { + t.Fatal(err) + } + return fmt.Sprintf("%v", m) +} diff --git a/sdk/helper/testhelpers/output_test.go b/sdk/helper/testhelpers/output_test.go new file mode 100644 index 000000000000..ada51a1fe119 --- /dev/null +++ b/sdk/helper/testhelpers/output_test.go @@ -0,0 +1,48 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testhelpers + +import ( + "fmt" + "reflect" + "testing" +) + +func TestToMap(t *testing.T) { + type s struct { + A string `json:"a"` + B []byte `json:"b"` + C map[string]string `json:"c"` + D string `json:"-"` + } + type args struct { + in s + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "basic", + args: args{s{A: "a", B: []byte("bytes"), C: map[string]string{"k": "v"}, D: "d"}}, + want: "map[a:a b:277089d91c0bdf4f2e6862ba7e4a07605119431f5d13f726dd352b06f1b206a9 c:map[k:v]]", + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m, err := ToMap(&tt.args.in) + if (err != nil) != tt.wantErr { + t.Errorf("ToMap() error = %v, wantErr %v", err, tt.wantErr) + return + } + got := fmt.Sprintf("%s", m) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("ToMap() got = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/sdk/helper/testhelpers/schema/response_validation.go b/sdk/helper/testhelpers/schema/response_validation.go index febb857ecea0..8085b042b6dd 100644 --- a/sdk/helper/testhelpers/schema/response_validation.go +++ b/sdk/helper/testhelpers/schema/response_validation.go @@ -14,11 +14,11 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -// ValidateResponseData is a test helper that validates whether the given -// response data map conforms to the response schema (schema.Fields). It cycles -// through the data map and validates conversions in the schema. In "strict" -// mode, this function will also ensure that the data map has all schema's -// requred fields and does not have any fields outside of the schema. +// ValidateResponse is a test helper that validates whether the given response +// object conforms to the response schema (schema.Fields). It cycles through +// the data map and validates conversions in the schema. In "strict" mode, this +// function will also ensure that the data map has all schema-required fields +// and does not have any fields outside of the schema. func ValidateResponse(t *testing.T, schema *framework.Response, response *logical.Response, strict bool) { t.Helper() @@ -29,11 +29,11 @@ func ValidateResponse(t *testing.T, schema *framework.Response, response *logica } } -// ValidateResponse is a test helper that validates whether the given response -// object conforms to the response schema (schema.Fields). It cycles through -// the data map and validates conversions in the schema. In "strict" mode, this -// function will also ensure that the data map has all schema-required fields -// and does not have any fields outside of the schema. +// ValidateResponseData is a test helper that validates whether the given +// response data map conforms to the response schema (schema.Fields). It cycles +// through the data map and validates conversions in the schema. In "strict" +// mode, this function will also ensure that the data map has all schema's +// requred fields and does not have any fields outside of the schema. func ValidateResponseData(t *testing.T, schema *framework.Response, data map[string]interface{}, strict bool) { t.Helper() @@ -53,6 +53,16 @@ func validateResponseDataImpl(schema *framework.Response, data map[string]interf return nil } + // Certain responses may come through with non-2xx status codes. While + // these are not always errors (e.g. 3xx redirection codes), we don't + // consider them for the purposes of schema validation + if status, exists := data[logical.HTTPStatusCode]; exists { + s, ok := status.(int) + if ok && (s < 200 || s > 299) { + return nil + } + } + // Marshal the data to JSON and back to convert the map's values into // JSON strings expected by Validate() and ValidateStrict(). This is // not efficient and is done for testing purposes only. @@ -100,7 +110,8 @@ func validateResponseDataImpl(schema *framework.Response, data map[string]interf return fd.Validate() } -// FindResponseSchema is a test helper to extract response schema from the given framework path / operation +// FindResponseSchema is a test helper to extract response schema from the +// given framework path / operation. func FindResponseSchema(t *testing.T, paths []*framework.Path, pathIdx int, operation logical.Operation) *framework.Response { t.Helper() @@ -139,6 +150,18 @@ func GetResponseSchema(t *testing.T, path *framework.Path, operation logical.Ope } if len(schemaResponses) == 0 { + // ListOperations have a default response schema that is implicit unless overridden + if operation == logical.ListOperation { + return &framework.Response{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "keys": { + Type: framework.TypeStringSlice, + }, + }, + } + } + t.Fatalf( "could not find response schema: %s: %q operation: no responses found", path.Pattern, @@ -149,8 +172,8 @@ func GetResponseSchema(t *testing.T, path *framework.Path, operation logical.Ope return &schemaResponses[0] } -// ResponseValidatingCallback can be used in setting up a [vault.TestCluster] that validates every response against the -// openapi specifications +// ResponseValidatingCallback can be used in setting up a [vault.TestCluster] +// that validates every response against the openapi specifications. // // [vault.TestCluster]: https://pkg.go.dev/github.com/hashicorp/vault/vault#TestCluster func ResponseValidatingCallback(t *testing.T) func(logical.Backend, *logical.Request, *logical.Response) { @@ -164,15 +187,16 @@ func ResponseValidatingCallback(t *testing.T) func(logical.Backend, *logical.Req if b == nil { t.Fatalf("non-nil backend required") } + backend, ok := b.(PathRouter) if !ok { t.Fatalf("could not cast %T to have `Route(string) *framework.Path`", b) } - // the full request path includes the backend - // but when passing to the backend, we have to trim the mount point - // `sys/mounts/secret` -> `mounts/secret` - // `auth/token/create` -> `create` + // The full request path includes the backend but when passing to the + // backend, we have to trim the mount point: + // `sys/mounts/secret` -> `mounts/secret` + // `auth/token/create` -> `create` requestPath := strings.TrimPrefix(req.Path, req.MountPoint) route := backend.Route(requestPath) diff --git a/sdk/helper/testhelpers/schema/response_validation_test.go b/sdk/helper/testhelpers/schema/response_validation_test.go index 98880d007606..4f4aa8b1cc3c 100644 --- a/sdk/helper/testhelpers/schema/response_validation_test.go +++ b/sdk/helper/testhelpers/schema/response_validation_test.go @@ -275,6 +275,36 @@ func TestValidateResponse(t *testing.T) { errorExpected: false, }, + "string schema field, response has non-200 http_status_code, strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "foo": { + Type: framework.TypeString, + }, + }, + }, + response: map[string]interface{}{ + "http_status_code": 304, + }, + strict: true, + errorExpected: false, + }, + + "string schema field, response has non-200 http_status_code, not strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "foo": { + Type: framework.TypeString, + }, + }, + }, + response: map[string]interface{}{ + "http_status_code": 304, + }, + strict: false, + errorExpected: false, + }, + "schema has http_raw_body, strict": { schema: &framework.Response{ Fields: map[string]*framework.FieldSchema{ diff --git a/sdk/helper/tokenutil/tokenutil.go b/sdk/helper/tokenutil/tokenutil.go index 0310b9db4602..4319bd182369 100644 --- a/sdk/helper/tokenutil/tokenutil.go +++ b/sdk/helper/tokenutil/tokenutil.go @@ -78,8 +78,9 @@ func TokenFields() map[string]*framework.FieldSchema { Type: framework.TypeCommaStringSlice, Description: `Comma separated string or JSON list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.`, DisplayAttrs: &framework.DisplayAttributes{ - Name: "Generated Token's Bound CIDRs", - Group: "Tokens", + Name: "Generated Token's Bound CIDRs", + Group: "Tokens", + Description: "A list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.", }, }, @@ -123,8 +124,9 @@ func TokenFields() map[string]*framework.FieldSchema { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies", DisplayAttrs: &framework.DisplayAttributes{ - Name: "Generated Token's Policies", - Group: "Tokens", + Name: "Generated Token's Policies", + Group: "Tokens", + Description: "A list of policies that will apply to the generated token for this user.", }, }, diff --git a/sdk/logical/acme_billing.go b/sdk/logical/acme_billing.go new file mode 100644 index 000000000000..6e4f6ef398b8 --- /dev/null +++ b/sdk/logical/acme_billing.go @@ -0,0 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import "context" + +type ACMEBillingSystemView interface { + CreateActivityCountEventForIdentifiers(ctx context.Context, identifiers []string) error +} diff --git a/sdk/logical/audit.go b/sdk/logical/audit.go index 30c03e6113ac..ecd1f5865a40 100644 --- a/sdk/logical/audit.go +++ b/sdk/logical/audit.go @@ -3,6 +3,13 @@ package logical +import ( + "fmt" + + "github.com/mitchellh/copystructure" +) + +// LogInput is used as the input to the audit system on which audit entries are based. type LogInput struct { Type string Auth *Auth @@ -20,3 +27,133 @@ type MarshalOptions struct { type OptMarshaler interface { MarshalJSONWithOptions(*MarshalOptions) ([]byte, error) } + +// LogInputBexpr is used for evaluating boolean expressions with go-bexpr. +type LogInputBexpr struct { + MountPoint string `bexpr:"mount_point"` + MountType string `bexpr:"mount_type"` + Namespace string `bexpr:"namespace"` + Operation string `bexpr:"operation"` + Path string `bexpr:"path"` +} + +// BexprDatum returns values from a LogInput formatted for use in evaluating go-bexpr boolean expressions. +// The namespace should be supplied from the current request's context. +func (l *LogInput) BexprDatum(namespace string) *LogInputBexpr { + var mountPoint string + var mountType string + var operation string + var path string + + if l.Request != nil { + mountPoint = l.Request.MountPoint + mountType = l.Request.MountType + operation = string(l.Request.Operation) + path = l.Request.Path + } + + return &LogInputBexpr{ + MountPoint: mountPoint, + MountType: mountType, + Namespace: namespace, + Operation: operation, + Path: path, + } +} + +// Clone will attempt to create a deep copy (almost) of the LogInput. +// If the LogInput type or any of the subtypes referenced by LogInput fields are +// changed, then the Clone methods will need to be updated. +// NOTE: Does not deep clone the LogInput.OuterError field as it represents an +// error interface. +// NOTE: LogInput.Request.Connection (at the time of writing) is also not deep-copied +// and remains a pointer, see Request.Clone for more information. +func (l *LogInput) Clone() (*LogInput, error) { + // Clone Auth + auth, err := cloneAuth(l.Auth) + if err != nil { + return nil, err + } + + // Clone Request + var req *Request + if l.Request != nil { + req, err = l.Request.Clone() + if err != nil { + return nil, err + } + } + + // Clone Response + resp, err := cloneResponse(l.Response) + if err != nil { + return nil, err + } + + // Copy HMAC keys + reqDataKeys := make([]string, len(l.NonHMACReqDataKeys)) + copy(reqDataKeys, l.NonHMACReqDataKeys) + respDataKeys := make([]string, len(l.NonHMACRespDataKeys)) + copy(respDataKeys, l.NonHMACRespDataKeys) + + // OuterErr is just linked in a non-deep way as it's an interface, and we + // don't know for sure which type this might actually be. + // At the time of writing this code, OuterErr isn't modified by anything, + // so we shouldn't get any race issues. + cloned := &LogInput{ + Type: l.Type, + Auth: auth, + Request: req, + Response: resp, + OuterErr: l.OuterErr, + NonHMACReqDataKeys: reqDataKeys, + NonHMACRespDataKeys: respDataKeys, + } + + return cloned, nil +} + +// clone will deep-copy the supplied struct. +// However, it cannot copy unexported fields or evaluate methods. +func clone[V any](s V) (V, error) { + var result V + + data, err := copystructure.Copy(s) + if err != nil { + return result, err + } + + result = data.(V) + + return result, err +} + +// cloneAuth deep copies an Auth struct. +func cloneAuth(auth *Auth) (*Auth, error) { + // If auth is nil, there's nothing to clone. + if auth == nil { + return nil, nil + } + + auth, err := clone[*Auth](auth) + if err != nil { + return nil, fmt.Errorf("unable to clone auth: %w", err) + } + + return auth, nil +} + +// cloneResponse deep copies a Response struct. +func cloneResponse(response *Response) (*Response, error) { + // If response is nil, there's nothing to clone. + if response == nil { + return nil, nil + } + + resp, err := clone[*Response](response) + if err != nil { + return nil, fmt.Errorf("unable to clone response: %w", err) + } + + return resp, nil +} diff --git a/sdk/logical/audit_test.go b/sdk/logical/audit_test.go new file mode 100644 index 000000000000..07623daab9e8 --- /dev/null +++ b/sdk/logical/audit_test.go @@ -0,0 +1,77 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// TestLogInput_BexprDatum ensures that we can transform a LogInput +// into a LogInputBexpr to be used in audit filtering. +func TestLogInput_BexprDatum(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Request *Request + Namespace string + ExpectedPath string + ExpectedMountPoint string + ExpectedMountType string + ExpectedNamespace string + ExpectedOperation string + }{ + "nil-no-namespace": { + Request: nil, + Namespace: "", + ExpectedPath: "", + ExpectedMountPoint: "", + ExpectedMountType: "", + ExpectedNamespace: "", + ExpectedOperation: "", + }, + "nil-namespace": { + Request: nil, + Namespace: "juan", + ExpectedPath: "", + ExpectedMountPoint: "", + ExpectedMountType: "", + ExpectedNamespace: "juan", + ExpectedOperation: "", + }, + "happy-path": { + Request: &Request{ + MountPoint: "IAmAMountPoint", + MountType: "IAmAMountType", + Operation: CreateOperation, + Path: "IAmAPath", + }, + Namespace: "juan", + ExpectedPath: "IAmAPath", + ExpectedMountPoint: "IAmAMountPoint", + ExpectedMountType: "IAmAMountType", + ExpectedNamespace: "juan", + ExpectedOperation: "create", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + l := &LogInput{Request: tc.Request} + + d := l.BexprDatum(tc.Namespace) + + require.Equal(t, tc.ExpectedPath, d.Path) + require.Equal(t, tc.ExpectedMountPoint, d.MountPoint) + require.Equal(t, tc.ExpectedMountType, d.MountType) + require.Equal(t, tc.ExpectedNamespace, d.Namespace) + require.Equal(t, tc.ExpectedOperation, d.Operation) + }) + } +} diff --git a/sdk/logical/auth.go b/sdk/logical/auth.go index 951116ee4be2..7cf7992346c2 100644 --- a/sdk/logical/auth.go +++ b/sdk/logical/auth.go @@ -114,6 +114,10 @@ type Auth struct { // EntityCreated is set to true if an entity is created as part of a login request EntityCreated bool `json:"entity_created"` + + // HTTPRequestPriority contains potential information about the request + // priority based on required path capabilities + HTTPRequestPriority *uint8 `json:"http_request_priority"` } func (a *Auth) GoString() string { @@ -126,7 +130,8 @@ type PolicyResults struct { } type PolicyInfo struct { - Name string `json:"name"` - NamespaceId string `json:"namespace_id"` - Type string `json:"type"` + Name string `json:"name"` + NamespaceId string `json:"namespace_id"` + NamespacePath string `json:"namespace_path"` + Type string `json:"type"` } diff --git a/sdk/logical/clienttokensource_enumer.go b/sdk/logical/clienttokensource_enumer.go new file mode 100644 index 000000000000..e930a3a0ddd9 --- /dev/null +++ b/sdk/logical/clienttokensource_enumer.go @@ -0,0 +1,51 @@ +// Code generated by "enumer -type=ClientTokenSource -trimprefix=ClientTokenFrom -transform=snake"; DO NOT EDIT. + +package logical + +import ( + "fmt" +) + +const _ClientTokenSourceName = "no_client_tokenvault_headerauthz_headerinternal_auth" + +var _ClientTokenSourceIndex = [...]uint8{0, 15, 27, 39, 52} + +func (i ClientTokenSource) String() string { + if i >= ClientTokenSource(len(_ClientTokenSourceIndex)-1) { + return fmt.Sprintf("ClientTokenSource(%d)", i) + } + return _ClientTokenSourceName[_ClientTokenSourceIndex[i]:_ClientTokenSourceIndex[i+1]] +} + +var _ClientTokenSourceValues = []ClientTokenSource{0, 1, 2, 3} + +var _ClientTokenSourceNameToValueMap = map[string]ClientTokenSource{ + _ClientTokenSourceName[0:15]: 0, + _ClientTokenSourceName[15:27]: 1, + _ClientTokenSourceName[27:39]: 2, + _ClientTokenSourceName[39:52]: 3, +} + +// ClientTokenSourceString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func ClientTokenSourceString(s string) (ClientTokenSource, error) { + if val, ok := _ClientTokenSourceNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to ClientTokenSource values", s) +} + +// ClientTokenSourceValues returns all values of the enum +func ClientTokenSourceValues() []ClientTokenSource { + return _ClientTokenSourceValues +} + +// IsAClientTokenSource returns "true" if the value is listed in the enum definition. "false" otherwise +func (i ClientTokenSource) IsAClientTokenSource() bool { + for _, v := range _ClientTokenSourceValues { + if i == v { + return true + } + } + return false +} diff --git a/sdk/logical/error.go b/sdk/logical/error.go index 5605784b3e13..dcd030dedf63 100644 --- a/sdk/logical/error.go +++ b/sdk/logical/error.go @@ -3,7 +3,10 @@ package logical -import "errors" +import ( + "context" + "errors" +) var ( // ErrUnsupportedOperation is returned if the operation is not supported @@ -20,12 +23,15 @@ var ( // ErrPermissionDenied is returned if the client is not authorized ErrPermissionDenied = errors.New("permission denied") + // ErrInvalidToken is returned if the token is revoked, expired, or non-existent + ErrInvalidToken = errors.New("invalid token") + // ErrInvalidCredentials is returned when the provided credentials are incorrect // This is used internally for user lockout purposes. This is not seen externally. // The status code returned does not change because of this error ErrInvalidCredentials = errors.New("invalid credentials") - // ErrMultiAuthzPending is returned if the the request needs more + // ErrMultiAuthzPending is returned if the request needs more // authorizations ErrMultiAuthzPending = errors.New("request needs further approval") @@ -59,8 +65,53 @@ var ( // Error indicating that the requested path used to serve a purpose in older // versions, but the functionality has now been removed ErrPathFunctionalityRemoved = errors.New("functionality on this path has been removed") + + // ErrNotFound is an error used to indicate that a particular resource was + // not found. + ErrNotFound = errors.New("not found") ) +type DelegatedAuthErrorHandler func(ctx context.Context, initiatingRequest, authRequest *Request, authResponse *Response, err error) (*Response, error) + +var _ error = &RequestDelegatedAuthError{} + +// RequestDelegatedAuthError Special error indicating the backend wants to delegate authentication elsewhere +type RequestDelegatedAuthError struct { + mountAccessor string + path string + data map[string]interface{} + errHandler DelegatedAuthErrorHandler +} + +func NewDelegatedAuthenticationRequest(mountAccessor, path string, data map[string]interface{}, errHandler DelegatedAuthErrorHandler) *RequestDelegatedAuthError { + return &RequestDelegatedAuthError{ + mountAccessor: mountAccessor, + path: path, + data: data, + errHandler: errHandler, + } +} + +func (d *RequestDelegatedAuthError) Error() string { + return "authentication delegation requested" +} + +func (d *RequestDelegatedAuthError) MountAccessor() string { + return d.mountAccessor +} + +func (d *RequestDelegatedAuthError) Path() string { + return d.path +} + +func (d *RequestDelegatedAuthError) Data() map[string]interface{} { + return d.data +} + +func (d *RequestDelegatedAuthError) AuthErrorHandler() DelegatedAuthErrorHandler { + return d.errHandler +} + type HTTPCodedError interface { Error() string Code() int diff --git a/sdk/logical/event.pb.go b/sdk/logical/event.pb.go index 22e908d91a34..1db6d46dc94d 100644 --- a/sdk/logical/event.pb.go +++ b/sdk/logical/event.pb.go @@ -3,8 +3,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc (unknown) // source: sdk/logical/event.proto package logical @@ -332,7 +332,7 @@ func file_sdk_logical_event_proto_rawDescGZIP() []byte { } var file_sdk_logical_event_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_sdk_logical_event_proto_goTypes = []interface{}{ +var file_sdk_logical_event_proto_goTypes = []any{ (*EventPluginInfo)(nil), // 0: logical.EventPluginInfo (*EventData)(nil), // 1: logical.EventData (*EventReceived)(nil), // 2: logical.EventReceived @@ -355,7 +355,7 @@ func file_sdk_logical_event_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_sdk_logical_event_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_sdk_logical_event_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*EventPluginInfo); i { case 0: return &v.state @@ -367,7 +367,7 @@ func file_sdk_logical_event_proto_init() { return nil } } - file_sdk_logical_event_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_sdk_logical_event_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*EventData); i { case 0: return &v.state @@ -379,7 +379,7 @@ func file_sdk_logical_event_proto_init() { return nil } } - file_sdk_logical_event_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_sdk_logical_event_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*EventReceived); i { case 0: return &v.state diff --git a/sdk/logical/event.proto b/sdk/logical/event.proto index 6e36e5e70f9a..8892412f5f5e 100644 --- a/sdk/logical/event.proto +++ b/sdk/logical/event.proto @@ -3,52 +3,52 @@ syntax = "proto3"; -option go_package = "github.com/hashicorp/vault/sdk/logical"; - package logical; import "google/protobuf/struct.proto"; +option go_package = "github.com/hashicorp/vault/sdk/logical"; + // EventPluginInfo contains data related to the plugin that generated an event. message EventPluginInfo { - // The type of plugin this event originated from, i.e., "auth" or "secrets. - string mount_class = 1; - // Unique ID of the mount entry, e.g., "kv_957bb7d8" - string mount_accessor = 2; - // Mount path of the plugin this event originated from, e.g., "secret/" - string mount_path = 3; - // Plugin name that this event originated from, e.g., "kv" - string plugin = 4; - // Plugin version of the plugin this event originated from, e.g., "v0.13.3+builtin" - string plugin_version = 5; - // Mount version that this event originated from, i.e., if KVv2, then "2". Usually empty. - string version = 6; + // The type of plugin this event originated from, i.e., "auth" or "secrets. + string mount_class = 1; + // Unique ID of the mount entry, e.g., "kv_957bb7d8" + string mount_accessor = 2; + // Mount path of the plugin this event originated from, e.g., "secret/" + string mount_path = 3; + // Plugin name that this event originated from, e.g., "kv" + string plugin = 4; + // Plugin version of the plugin this event originated from, e.g., "v0.13.3+builtin" + string plugin_version = 5; + // Mount version that this event originated from, i.e., if KVv2, then "2". Usually empty. + string version = 6; } // EventData contains event data in a CloudEvents container. message EventData { - // ID identifies the event. It is required. The combination of - // CloudEvents Source (i.e., Vault cluster) + ID must be unique. - // Events with the same Source + ID can be assumed to be duplicates - // by consumers. - // Be careful when setting this manually that the ID contains enough - // entropy to be unique, or possibly that it is idempotent, such - // as a hash of other fields with sufficient uniqueness. - string id = 1; - // Arbitrary non-secret data. Optional. - google.protobuf.Struct metadata = 2; - // Any IDs that the event relates to, i.e., UUIDs, paths. - repeated string entity_ids = 3; - // Human-readable note. - string note = 4; + // ID identifies the event. It is required. The combination of + // CloudEvents Source (i.e., Vault cluster) + ID must be unique. + // Events with the same Source + ID can be assumed to be duplicates + // by consumers. + // Be careful when setting this manually that the ID contains enough + // entropy to be unique, or possibly that it is idempotent, such + // as a hash of other fields with sufficient uniqueness. + string id = 1; + // Arbitrary non-secret data. Optional. + google.protobuf.Struct metadata = 2; + // Any IDs that the event relates to, i.e., UUIDs, paths. + repeated string entity_ids = 3; + // Human-readable note. + string note = 4; } // EventReceived is used to consume events and includes additional metadata regarding // the event type and plugin information. message EventReceived { - EventData event = 1; - // namespace path - string namespace = 2; - string event_type = 3; - EventPluginInfo plugin_info = 4; + EventData event = 1; + // namespace path + string namespace = 2; + string event_type = 3; + EventPluginInfo plugin_info = 4; } diff --git a/sdk/logical/events.go b/sdk/logical/events.go index cbd3f7369024..5bd9717f7ff1 100644 --- a/sdk/logical/events.go +++ b/sdk/logical/events.go @@ -7,6 +7,24 @@ import ( "context" "github.com/hashicorp/go-uuid" + "google.golang.org/protobuf/types/known/structpb" +) + +// common event metadata keys +const ( + // EventMetadataDataPath is used in event metadata to show the API path that can be used to fetch any underlying + // data. For example, the KV plugin would set this to `data/mysecret`. The event system will automatically prepend + // the plugin mount to this path, if present, so it would become `secret/data/mysecret`, for example. + // If this is an auth plugin event, this will additionally be prepended with `auth/`. + EventMetadataDataPath = "data_path" + // EventMetadataOperation is used in event metadata to express what operation was performed that generated the + // event, e.g., `read` or `write`. + EventMetadataOperation = "operation" + // EventMetadataModified is used in event metadata when the event attests that the underlying data has been modified + // and might need to be re-fetched (at the EventMetadataDataPath). + EventMetadataModified = "modified" + + extraMetadataArgument = "EXTRA_VALUE_AT_END" ) // ID is an alias to GetId() for CloudEvents compatibility. @@ -30,5 +48,58 @@ type EventType string // EventSender sends events to the common event bus. type EventSender interface { - Send(ctx context.Context, eventType EventType, event *EventData) error + SendEvent(ctx context.Context, eventType EventType, event *EventData) error +} + +// SendEvent is a convenience method for plugins events to an EventSender, converting the +// metadataPairs to the EventData structure. +func SendEvent(ctx context.Context, sender EventSender, eventType string, metadataPairs ...string) error { + ev, err := NewEvent() + if err != nil { + return err + } + ev.Metadata = &structpb.Struct{Fields: make(map[string]*structpb.Value, (len(metadataPairs)+1)/2)} + for i := 0; i < len(metadataPairs)-1; i += 2 { + ev.Metadata.Fields[metadataPairs[i]] = structpb.NewStringValue(metadataPairs[i+1]) + } + if len(metadataPairs)%2 != 0 { + ev.Metadata.Fields[extraMetadataArgument] = structpb.NewStringValue(metadataPairs[len(metadataPairs)-1]) + } + return sender.SendEvent(ctx, EventType(eventType), ev) +} + +// EventReceivedBexpr is used for evaluating boolean expressions with go-bexpr. +type EventReceivedBexpr struct { + EventType string `bexpr:"event_type"` + Operation string `bexpr:"operation"` + SourcePluginMount string `bexpr:"source_plugin_mount"` + DataPath string `bexpr:"data_path"` + Namespace string `bexpr:"namespace"` +} + +// BexprDatum returns a copy of EventReceived formatted for use in evaluating go-bexpr boolean expressions. +func (x *EventReceived) BexprDatum() any { + operation := "" + dataPath := "" + + if x.Event != nil { + if x.Event.Metadata != nil { + operationValue := x.Event.Metadata.Fields[EventMetadataOperation] + if operationValue != nil { + operation = operationValue.GetStringValue() + } + dataPathValue := x.Event.Metadata.Fields[EventMetadataDataPath] + if dataPathValue != nil { + dataPath = dataPathValue.GetStringValue() + } + } + } + + return &EventReceivedBexpr{ + EventType: x.EventType, + Operation: operation, + SourcePluginMount: x.PluginInfo.MountPath, + DataPath: dataPath, + Namespace: x.Namespace, + } } diff --git a/sdk/logical/events_mock.go b/sdk/logical/events_mock.go new file mode 100644 index 000000000000..72741163e98a --- /dev/null +++ b/sdk/logical/events_mock.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "context" + "sync" +) + +// MockEventSender is a simple implementation of logical.EventSender that simply stores whatever events it receives, +// meant to be used in testing. It is thread-safe. +type MockEventSender struct { + sync.Mutex + Events []MockEvent + Stopped bool +} + +// MockEvent is a container for an event type + event pair. +type MockEvent struct { + Type EventType + Event *EventData +} + +// SendEvent implements the logical.EventSender interface. +func (m *MockEventSender) SendEvent(_ context.Context, eventType EventType, event *EventData) error { + m.Lock() + defer m.Unlock() + if !m.Stopped { + m.Events = append(m.Events, MockEvent{Type: eventType, Event: event}) + } + return nil +} + +func (m *MockEventSender) Stop() { + m.Lock() + defer m.Unlock() + m.Stopped = true +} + +var _ EventSender = (*MockEventSender)(nil) + +// NewMockEventSender returns a new MockEventSender ready to be used. +func NewMockEventSender() *MockEventSender { + return &MockEventSender{} +} diff --git a/sdk/logical/events_test.go b/sdk/logical/events_test.go new file mode 100644 index 000000000000..a018b0d312a9 --- /dev/null +++ b/sdk/logical/events_test.go @@ -0,0 +1,56 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +type fakeSender struct { + captured *EventData +} + +func (f *fakeSender) SendEvent(ctx context.Context, eventType EventType, event *EventData) error { + f.captured = event + return nil +} + +// TestSendEventWithOddParametersAddsExtraMetadata tests that an extra parameter is added to the metadata +// with a special key to note that it was extra. +func TestSendEventWithOddParametersAddsExtraMetadata(t *testing.T) { + sender := &fakeSender{} + // 0 or 2 arguments are okay + err := SendEvent(context.Background(), sender, "foo") + if err != nil { + t.Fatal(err) + } + m := sender.captured.Metadata.AsMap() + assert.NotContains(t, m, extraMetadataArgument) + err = SendEvent(context.Background(), sender, "foo", "bar", "baz") + if err != nil { + t.Fatal(err) + } + m = sender.captured.Metadata.AsMap() + assert.NotContains(t, m, extraMetadataArgument) + + // 1 or 3 arguments should give result in extraMetadataArgument in metadata + err = SendEvent(context.Background(), sender, "foo", "extra") + if err != nil { + t.Fatal(err) + } + m = sender.captured.Metadata.AsMap() + assert.Contains(t, m, extraMetadataArgument) + assert.Equal(t, "extra", m[extraMetadataArgument]) + + err = SendEvent(context.Background(), sender, "foo", "bar", "baz", "extra") + if err != nil { + t.Fatal(err) + } + m = sender.captured.Metadata.AsMap() + assert.Contains(t, m, extraMetadataArgument) + assert.Equal(t, "extra", m[extraMetadataArgument]) +} diff --git a/sdk/logical/identity.pb.go b/sdk/logical/identity.pb.go index fedc5f5c202c..5f08ce168935 100644 --- a/sdk/logical/identity.pb.go +++ b/sdk/logical/identity.pb.go @@ -3,8 +3,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc (unknown) // source: sdk/logical/identity.proto package logical @@ -592,7 +592,7 @@ func file_sdk_logical_identity_proto_rawDescGZIP() []byte { } var file_sdk_logical_identity_proto_msgTypes = make([]protoimpl.MessageInfo, 11) -var file_sdk_logical_identity_proto_goTypes = []interface{}{ +var file_sdk_logical_identity_proto_goTypes = []any{ (*Entity)(nil), // 0: logical.Entity (*Alias)(nil), // 1: logical.Alias (*Group)(nil), // 2: logical.Group @@ -627,7 +627,7 @@ func file_sdk_logical_identity_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_sdk_logical_identity_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_sdk_logical_identity_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Entity); i { case 0: return &v.state @@ -639,7 +639,7 @@ func file_sdk_logical_identity_proto_init() { return nil } } - file_sdk_logical_identity_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_sdk_logical_identity_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Alias); i { case 0: return &v.state @@ -651,7 +651,7 @@ func file_sdk_logical_identity_proto_init() { return nil } } - file_sdk_logical_identity_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_sdk_logical_identity_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Group); i { case 0: return &v.state @@ -663,7 +663,7 @@ func file_sdk_logical_identity_proto_init() { return nil } } - file_sdk_logical_identity_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_sdk_logical_identity_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*MFAMethodID); i { case 0: return &v.state @@ -675,7 +675,7 @@ func file_sdk_logical_identity_proto_init() { return nil } } - file_sdk_logical_identity_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_sdk_logical_identity_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*MFAConstraintAny); i { case 0: return &v.state @@ -687,7 +687,7 @@ func file_sdk_logical_identity_proto_init() { return nil } } - file_sdk_logical_identity_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_sdk_logical_identity_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*MFARequirement); i { case 0: return &v.state diff --git a/sdk/logical/identity.proto b/sdk/logical/identity.proto index 4a1f3413750d..8bac5559011e 100644 --- a/sdk/logical/identity.proto +++ b/sdk/logical/identity.proto @@ -3,93 +3,93 @@ syntax = "proto3"; -option go_package = "github.com/hashicorp/vault/sdk/logical"; - package logical; +option go_package = "github.com/hashicorp/vault/sdk/logical"; + message Entity { - // ID is the unique identifier for the entity - string ID = 1; + // ID is the unique identifier for the entity + string ID = 1; + + // Name is the human-friendly unique identifier for the entity + string name = 2; - // Name is the human-friendly unique identifier for the entity - string name = 2; + // Aliases contains thhe alias mappings for the given entity + repeated Alias aliases = 3; - // Aliases contains thhe alias mappings for the given entity - repeated Alias aliases = 3; + // Metadata represents the custom data tied to this entity + map metadata = 4; - // Metadata represents the custom data tied to this entity - map metadata = 4; - - // Disabled is true if the entity is disabled. - bool disabled = 5; + // Disabled is true if the entity is disabled. + bool disabled = 5; - // NamespaceID is the identifier of the namespace to which this entity - // belongs to. - string namespace_id = 6; + // NamespaceID is the identifier of the namespace to which this entity + // belongs to. + string namespace_id = 6; } message Alias { - // MountType is the backend mount's type to which this identity belongs - string mount_type = 1; - - // MountAccessor is the identifier of the mount entry to which this - // identity belongs - string mount_accessor = 2; - - // Name is the identifier of this identity in its authentication source - string name = 3; - - // Metadata represents the custom data tied to this alias. Fields added - // to it should have a low rate of change (or no change) because each - // change incurs a storage write, so quickly-changing fields can have - // a significant performance impact at scale. See the SDK's - // "aliasmetadata" package for a helper that eases and standardizes - // using this safely. - map metadata = 4; - - // ID is the unique identifier for the alias - string ID = 5; - - // NamespaceID is the identifier of the namespace to which this alias - // belongs. - string namespace_id = 6; - - // Custom Metadata represents the custom data tied to this alias - map custom_metadata = 7; - - // Local indicates if the alias only belongs to the cluster where it was - // created. If true, the alias will be stored in a location that are ignored - // by the performance replication subsystem. - bool local = 8; + // MountType is the backend mount's type to which this identity belongs + string mount_type = 1; + + // MountAccessor is the identifier of the mount entry to which this + // identity belongs + string mount_accessor = 2; + + // Name is the identifier of this identity in its authentication source + string name = 3; + + // Metadata represents the custom data tied to this alias. Fields added + // to it should have a low rate of change (or no change) because each + // change incurs a storage write, so quickly-changing fields can have + // a significant performance impact at scale. See the SDK's + // "aliasmetadata" package for a helper that eases and standardizes + // using this safely. + map metadata = 4; + + // ID is the unique identifier for the alias + string ID = 5; + + // NamespaceID is the identifier of the namespace to which this alias + // belongs. + string namespace_id = 6; + + // Custom Metadata represents the custom data tied to this alias + map custom_metadata = 7; + + // Local indicates if the alias only belongs to the cluster where it was + // created. If true, the alias will be stored in a location that are ignored + // by the performance replication subsystem. + bool local = 8; } message Group { - // ID is the unique identifier for the group - string ID = 1; + // ID is the unique identifier for the group + string ID = 1; - // Name is the human-friendly unique identifier for the group - string name = 2; + // Name is the human-friendly unique identifier for the group + string name = 2; - // Metadata represents the custom data tied to this group - map metadata = 3; + // Metadata represents the custom data tied to this group + map metadata = 3; - // NamespaceID is the identifier of the namespace to which this group - // belongs to. - string namespace_id = 4; + // NamespaceID is the identifier of the namespace to which this group + // belongs to. + string namespace_id = 4; } message MFAMethodID { - string type = 1; - string id = 2; - bool uses_passcode = 3; - string name = 4; + string type = 1; + string id = 2; + bool uses_passcode = 3; + string name = 4; } message MFAConstraintAny { - repeated MFAMethodID any = 1; + repeated MFAMethodID any = 1; } message MFARequirement { - string mfa_request_id = 1; - map mfa_constraints = 2; + string mfa_request_id = 1; + map mfa_constraints = 2; } diff --git a/sdk/logical/keyusage_enumer.go b/sdk/logical/keyusage_enumer.go new file mode 100644 index 000000000000..83998c4a2a57 --- /dev/null +++ b/sdk/logical/keyusage_enumer.go @@ -0,0 +1,55 @@ +// Code generated by "enumer -type=KeyUsage -trimprefix=KeyUsage -transform=snake"; DO NOT EDIT. + +package logical + +import ( + "fmt" +) + +const _KeyUsageName = "encryptdecryptsignverifywrapunwrapgenerate_random" + +var _KeyUsageIndex = [...]uint8{0, 7, 14, 18, 24, 28, 34, 49} + +func (i KeyUsage) String() string { + i -= 1 + if i < 0 || i >= KeyUsage(len(_KeyUsageIndex)-1) { + return fmt.Sprintf("KeyUsage(%d)", i+1) + } + return _KeyUsageName[_KeyUsageIndex[i]:_KeyUsageIndex[i+1]] +} + +var _KeyUsageValues = []KeyUsage{1, 2, 3, 4, 5, 6, 7} + +var _KeyUsageNameToValueMap = map[string]KeyUsage{ + _KeyUsageName[0:7]: 1, + _KeyUsageName[7:14]: 2, + _KeyUsageName[14:18]: 3, + _KeyUsageName[18:24]: 4, + _KeyUsageName[24:28]: 5, + _KeyUsageName[28:34]: 6, + _KeyUsageName[34:49]: 7, +} + +// KeyUsageString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func KeyUsageString(s string) (KeyUsage, error) { + if val, ok := _KeyUsageNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to KeyUsage values", s) +} + +// KeyUsageValues returns all values of the enum +func KeyUsageValues() []KeyUsage { + return _KeyUsageValues +} + +// IsAKeyUsage returns "true" if the value is listed in the enum definition. "false" otherwise +func (i KeyUsage) IsAKeyUsage() bool { + for _, v := range _KeyUsageValues { + if i == v { + return true + } + } + return false +} diff --git a/sdk/logical/logical.go b/sdk/logical/logical.go index 209904e2ff33..be527ba9e9c7 100644 --- a/sdk/logical/logical.go +++ b/sdk/logical/logical.go @@ -159,6 +159,21 @@ type Paths struct { // On standby nodes, like all storage write operations, this will trigger // an ErrReadOnly return. WriteForwardedStorage []string + + // Binary paths are those whose request bodies should not be assumed to + // be JSON encoded, and for which the backend will decode values for auditing + Binary []string + + // Limited paths are storage paths that require special-cased request + // limiting. + // + // This was initially added to separate limiting of "write" requests + // (limits.WriteLimiter) from limiting for CPU-bound pki/issue requests + // (limits.SpecialPathLimiter). Other plugins might also choose to mark + // paths if they don't follow a typical resource usage pattern. + // + // For more details, consult limits/registry.go. + Limited []string } type Auditor interface { @@ -166,11 +181,6 @@ type Auditor interface { AuditResponse(ctx context.Context, input *LogInput) error } -// Externaler allows us to check if a backend is running externally (i.e., over GRPC) -type Externaler interface { - IsExternal() bool -} - type PluginVersion struct { Version string } diff --git a/sdk/logical/managed_key.go b/sdk/logical/managed_key.go index 04727f9d7f42..b7bfb2f13d78 100644 --- a/sdk/logical/managed_key.go +++ b/sdk/logical/managed_key.go @@ -11,6 +11,7 @@ import ( wrapping "github.com/hashicorp/go-kms-wrapping/v2" ) +//go:generate enumer -type=KeyUsage -trimprefix=KeyUsage -transform=snake type KeyUsage int const ( diff --git a/sdk/logical/plugin.pb.go b/sdk/logical/plugin.pb.go index 19b18d89e186..7b8fe8ce723a 100644 --- a/sdk/logical/plugin.pb.go +++ b/sdk/logical/plugin.pb.go @@ -3,8 +3,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc (unknown) // source: sdk/logical/plugin.proto package logical @@ -123,7 +123,7 @@ func file_sdk_logical_plugin_proto_rawDescGZIP() []byte { } var file_sdk_logical_plugin_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_sdk_logical_plugin_proto_goTypes = []interface{}{ +var file_sdk_logical_plugin_proto_goTypes = []any{ (*PluginEnvironment)(nil), // 0: logical.PluginEnvironment } var file_sdk_logical_plugin_proto_depIdxs = []int32{ @@ -140,7 +140,7 @@ func file_sdk_logical_plugin_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_sdk_logical_plugin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_sdk_logical_plugin_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*PluginEnvironment); i { case 0: return &v.state diff --git a/sdk/logical/plugin.proto b/sdk/logical/plugin.proto index 0eaa3c57c849..5e19274ee2cb 100644 --- a/sdk/logical/plugin.proto +++ b/sdk/logical/plugin.proto @@ -3,17 +3,17 @@ syntax = "proto3"; -option go_package = "github.com/hashicorp/vault/sdk/logical"; - package logical; +option go_package = "github.com/hashicorp/vault/sdk/logical"; + message PluginEnvironment { - // VaultVersion is the version of the Vault server - string vault_version = 1; - - // VaultVersionPrerelease is the prerelease information of the Vault server - string vault_version_prerelease = 2; - - // VaultVersionMetadata is the version metadata of the Vault server - string vault_version_metadata = 3; + // VaultVersion is the version of the Vault server + string vault_version = 1; + + // VaultVersionPrerelease is the prerelease information of the Vault server + string vault_version_prerelease = 2; + + // VaultVersionMetadata is the version metadata of the Vault server + string vault_version_metadata = 3; } diff --git a/sdk/logical/request.go b/sdk/logical/request.go index c50b7a031d0a..1c360d4dbc42 100644 --- a/sdk/logical/request.go +++ b/sdk/logical/request.go @@ -6,6 +6,7 @@ package logical import ( "context" "fmt" + "io" "net/http" "strings" "time" @@ -50,12 +51,14 @@ func (r *RequestWrapInfo) SentinelKeys() []string { } } +//go:generate enumer -type=ClientTokenSource -trimprefix=ClientTokenFrom -transform=snake type ClientTokenSource uint32 const ( NoClientToken ClientTokenSource = iota ClientTokenFromVaultHeader ClientTokenFromAuthzHeader + ClientTokenFromInternalAuth ) type WALState struct { @@ -156,6 +159,22 @@ type Request struct { // backends can be tied to the mount it belongs to. MountAccessor string `json:"mount_accessor" structs:"mount_accessor" mapstructure:"mount_accessor" sentinel:""` + // mountRunningVersion is used internally to propagate the semantic version + // of the mounted plugin as reported by its vault.MountEntry to audit logging + mountRunningVersion string + + // mountRunningSha256 is used internally to propagate the encoded sha256 + // of the mounted plugin as reported its vault.MountEntry to audit logging + mountRunningSha256 string + + // mountIsExternalPlugin is used internally to propagate whether + // the backend of the mounted plugin is running externally (i.e., over GRPC) + // to audit logging + mountIsExternalPlugin bool + + // mountClass is used internally to propagate the mount class of the mounted plugin to audit logging + mountClass string + // WrapInfo contains requested response wrapping parameters WrapInfo *RequestWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info" sentinel:""` @@ -176,6 +195,10 @@ type Request struct { // accessible. Unauthenticated bool `json:"unauthenticated" structs:"unauthenticated" mapstructure:"unauthenticated"` + // PathLimited indicates that the request path is marked for special-case + // request limiting. + PathLimited bool `json:"path_limited" structs:"path_limited" mapstructure:"path_limited"` + // MFACreds holds the parsed MFA information supplied over the API as part of // X-Vault-MFA header MFACreds MFACreds `json:"mfa_creds" structs:"mfa_creds" mapstructure:"mfa_creds" sentinel:""` @@ -227,15 +250,41 @@ type Request struct { // InboundSSCToken is the token that arrives on an inbound request, supplied // by the vault user. InboundSSCToken string + + // When a request has been forwarded, contains information of the host the request was forwarded 'from' + ForwardedFrom string `json:"forwarded_from,omitempty"` + + // Name of the chroot namespace for the listener that the request was made against + ChrootNamespace string `json:"chroot_namespace,omitempty"` + + // RequestLimiterDisabled tells whether the request context has Request Limiter applied. + RequestLimiterDisabled bool `json:"request_limiter_disabled,omitempty"` } -// Clone returns a deep copy of the request by using copystructure +// Clone returns a deep copy (almost) of the request. +// It will set unexported fields which were only previously accessible outside +// the package via receiver methods. +// NOTE: Request.Connection is NOT deep-copied, due to issues with the results +// of copystructure on serial numbers within the x509.Certificate objects. func (r *Request) Clone() (*Request, error) { cpy, err := copystructure.Copy(r) if err != nil { return nil, err } - return cpy.(*Request), nil + + req := cpy.(*Request) + + // Add the unexported values that were only retrievable via receivers. + // copystructure isn't able to do this, which is why we're doing it manually. + req.mountClass = r.MountClass() + req.mountRunningVersion = r.MountRunningVersion() + req.mountRunningSha256 = r.MountRunningSha256() + req.mountIsExternalPlugin = r.MountIsExternalPlugin() + // This needs to be overwritten as the internal connection state is not cloned properly + // mainly the big.Int serial numbers within the x509.Certificate objects get mangled. + req.Connection = r.Connection + + return req, nil } // Get returns a data field and guards for nil Data @@ -283,6 +332,38 @@ func (r *Request) SentinelKeys() []string { } } +func (r *Request) MountRunningVersion() string { + return r.mountRunningVersion +} + +func (r *Request) SetMountRunningVersion(mountRunningVersion string) { + r.mountRunningVersion = mountRunningVersion +} + +func (r *Request) MountRunningSha256() string { + return r.mountRunningSha256 +} + +func (r *Request) SetMountRunningSha256(mountRunningSha256 string) { + r.mountRunningSha256 = mountRunningSha256 +} + +func (r *Request) MountIsExternalPlugin() bool { + return r.mountIsExternalPlugin +} + +func (r *Request) SetMountIsExternalPlugin(mountIsExternalPlugin bool) { + r.mountIsExternalPlugin = mountIsExternalPlugin +} + +func (r *Request) MountClass() string { + return r.mountClass +} + +func (r *Request) SetMountClass(mountClass string) { + r.mountClass = mountClass +} + func (r *Request) LastRemoteWAL() uint64 { return r.lastRemoteWAL } @@ -369,6 +450,7 @@ const ( HelpOperation = "help" AliasLookaheadOperation = "alias-lookahead" ResolveRoleOperation = "resolve-role" + HeaderOperation = "header" // The operations below are called globally, the path is less relevant. RevokeOperation Operation = "revoke" @@ -395,3 +477,132 @@ type CtxKeyInFlightRequestID struct{} func (c CtxKeyInFlightRequestID) String() string { return "in-flight-request-ID" } + +type CtxKeyInFlightRequestPriority struct{} + +func (c CtxKeyInFlightRequestPriority) String() string { + return "in-flight-request-priority" +} + +// CtxKeyInFlightTraceID is used for passing a trace ID through request +// forwarding. The CtxKeyInFlightRequestID created at the HTTP layer is +// propagated on through any forwarded requests using this key. +// +// Note that this applies to replication service RPCs (including +// ForwardingRequest from perf standbys or secondaries). The Forwarding RPC +// service may propagate the context but the handling on the active node runs +// back through the `http` package handler which builds a new context from HTTP +// request properties and creates a fresh request ID. Forwarding RPC is used +// exclusively in Community Edition but also in some special cases in Enterprise +// such as when forwarding is forced by an HTTP header. +type CtxKeyInFlightTraceID struct{} + +func (c CtxKeyInFlightTraceID) String() string { + return "in-flight-trace-ID" +} + +type CtxKeyRequestRole struct{} + +func (c CtxKeyRequestRole) String() string { + return "request-role" +} + +// ctxKeyDisableReplicationStatusEndpoints is a custom type used as a key in +// context.Context to store the value `true` when the +// disable_replication_status_endpoints configuration parameter is set to true +// for the listener through which a request was received. +type ctxKeyDisableReplicationStatusEndpoints struct{} + +// String returns a string representation of the receiver type. +func (c ctxKeyDisableReplicationStatusEndpoints) String() string { + return "disable-replication-status-endpoints" +} + +// ContextDisableReplicationStatusEndpointsValue examines the provided +// context.Context for the disable replication status endpoints value and +// returns it as a bool value if it's found along with the ok return value set +// to true; otherwise the ok return value is false. +func ContextDisableReplicationStatusEndpointsValue(ctx context.Context) (value, ok bool) { + value, ok = ctx.Value(ctxKeyDisableReplicationStatusEndpoints{}).(bool) + + return +} + +// CreateContextDisableReplicationStatusEndpoints creates a new context.Context +// based on the provided parent that also includes the provided value for the +// ctxKeyDisableReplicationStatusEndpoints key. +func CreateContextDisableReplicationStatusEndpoints(parent context.Context, value bool) context.Context { + return context.WithValue(parent, ctxKeyDisableReplicationStatusEndpoints{}, value) +} + +// CtxKeyOriginalRequestPath is a custom type used as a key in context.Context +// to store the original request path. +type ctxKeyOriginalRequestPath struct{} + +// String returns a string representation of the receiver type. +func (c ctxKeyOriginalRequestPath) String() string { + return "original_request_path" +} + +// ContextOriginalRequestPathValue examines the provided context.Context for the +// original request path value and returns it as a string value if it's found +// along with the ok value set to true; otherwise the ok return value is false. +func ContextOriginalRequestPathValue(ctx context.Context) (value string, ok bool) { + value, ok = ctx.Value(ctxKeyOriginalRequestPath{}).(string) + + return +} + +// CreateContextOriginalRequestPath creates a new context.Context based on the +// provided parent that also includes the provided original request path value +// for the ctxKeyOriginalRequestPath key. +func CreateContextOriginalRequestPath(parent context.Context, value string) context.Context { + return context.WithValue(parent, ctxKeyOriginalRequestPath{}, value) +} + +type ctxKeyOriginalBody struct{} + +func ContextOriginalBodyValue(ctx context.Context) (io.ReadCloser, bool) { + value, ok := ctx.Value(ctxKeyOriginalBody{}).(io.ReadCloser) + return value, ok +} + +func CreateContextOriginalBody(parent context.Context, body io.ReadCloser) context.Context { + return context.WithValue(parent, ctxKeyOriginalBody{}, body) +} + +type CtxKeyDisableRequestLimiter struct{} + +func (c CtxKeyDisableRequestLimiter) String() string { + return "disable_request_limiter" +} + +// ctxKeyRedactionSettings is a custom type used as a key in context.Context to +// store the value the redaction settings for the listener that received the +// request. +type ctxKeyRedactionSettings struct{} + +// String returns a string representation of the receiver type. +func (c ctxKeyRedactionSettings) String() string { + return "redaction-settings" +} + +// CtxRedactionSettingsValue examines the provided context.Context for the +// redaction settings value and returns them as a tuple of bool values if they +// are found along with the ok return value set to true; otherwise the ok return +// value is false. +func CtxRedactionSettingsValue(ctx context.Context) (redactVersion, redactAddresses, redactClusterName, ok bool) { + value, ok := ctx.Value(ctxKeyRedactionSettings{}).([]bool) + if !ok { + return false, false, false, false + } + + return value[0], value[1], value[2], true +} + +// CreatecontextRedactionSettings creates a new context.Context based on the +// provided parent that also includes the provided redaction settings values for +// the ctxKeyRedactionSettings key. +func CreateContextRedactionSettings(parent context.Context, redactVersion, redactAddresses, redactClusterName bool) context.Context { + return context.WithValue(parent, ctxKeyRedactionSettings{}, []bool{redactVersion, redactAddresses, redactClusterName}) +} diff --git a/sdk/logical/request_test.go b/sdk/logical/request_test.go new file mode 100644 index 000000000000..69663be4e416 --- /dev/null +++ b/sdk/logical/request_test.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestContextDisableReplicationStatusEndpointsValue(t *testing.T) { + testcases := []struct { + name string + ctx context.Context + expectedValue bool + expectedOk bool + }{ + { + name: "without-value", + ctx: context.Background(), + expectedValue: false, + expectedOk: false, + }, + { + name: "with-nil", + ctx: context.WithValue(context.Background(), ctxKeyDisableReplicationStatusEndpoints{}, nil), + expectedValue: false, + expectedOk: false, + }, + { + name: "with-incompatible-value", + ctx: context.WithValue(context.Background(), ctxKeyDisableReplicationStatusEndpoints{}, "true"), + expectedValue: false, + expectedOk: false, + }, + { + name: "with-bool-true", + ctx: context.WithValue(context.Background(), ctxKeyDisableReplicationStatusEndpoints{}, true), + expectedValue: true, + expectedOk: true, + }, + { + name: "with-bool-false", + ctx: context.WithValue(context.Background(), ctxKeyDisableReplicationStatusEndpoints{}, false), + expectedValue: false, + expectedOk: true, + }, + } + + for _, testcase := range testcases { + value, ok := ContextDisableReplicationStatusEndpointsValue(testcase.ctx) + assert.Equal(t, testcase.expectedValue, value, testcase.name) + assert.Equal(t, testcase.expectedOk, ok, testcase.name) + } +} + +func TestCreateContextDisableReplicationStatusEndpoints(t *testing.T) { + ctx := CreateContextDisableReplicationStatusEndpoints(context.Background(), true) + + value := ctx.Value(ctxKeyDisableReplicationStatusEndpoints{}) + + assert.NotNil(t, ctx) + assert.NotNil(t, value) + assert.IsType(t, bool(false), value) + assert.Equal(t, true, value.(bool)) + + ctx = CreateContextDisableReplicationStatusEndpoints(context.Background(), false) + + value = ctx.Value(ctxKeyDisableReplicationStatusEndpoints{}) + + assert.NotNil(t, ctx) + assert.NotNil(t, value) + assert.IsType(t, bool(false), value) + assert.Equal(t, false, value.(bool)) +} + +func TestContextOriginalRequestPathValue(t *testing.T) { + testcases := []struct { + name string + ctx context.Context + expectedValue string + expectedOk bool + }{ + { + name: "without-value", + ctx: context.Background(), + expectedValue: "", + expectedOk: false, + }, + { + name: "with-nil", + ctx: context.WithValue(context.Background(), ctxKeyOriginalRequestPath{}, nil), + expectedValue: "", + expectedOk: false, + }, + { + name: "with-incompatible-value", + ctx: context.WithValue(context.Background(), ctxKeyOriginalRequestPath{}, 6666), + expectedValue: "", + expectedOk: false, + }, + { + name: "with-string-value", + ctx: context.WithValue(context.Background(), ctxKeyOriginalRequestPath{}, "test"), + expectedValue: "test", + expectedOk: true, + }, + { + name: "with-empty-string", + ctx: context.WithValue(context.Background(), ctxKeyOriginalRequestPath{}, ""), + expectedValue: "", + expectedOk: true, + }, + } + + for _, testcase := range testcases { + value, ok := ContextOriginalRequestPathValue(testcase.ctx) + assert.Equal(t, testcase.expectedValue, value, testcase.name) + assert.Equal(t, testcase.expectedOk, ok, testcase.name) + } +} + +func TestCreateContextOriginalRequestPath(t *testing.T) { + ctx := CreateContextOriginalRequestPath(context.Background(), "test") + + value := ctx.Value(ctxKeyOriginalRequestPath{}) + + assert.NotNil(t, ctx) + assert.NotNil(t, value) + assert.IsType(t, string(""), value) + assert.Equal(t, "test", value.(string)) + + ctx = CreateContextOriginalRequestPath(context.Background(), "") + + value = ctx.Value(ctxKeyOriginalRequestPath{}) + + assert.NotNil(t, ctx) + assert.NotNil(t, value) + assert.IsType(t, string(""), value) + assert.Equal(t, "", value.(string)) +} diff --git a/sdk/logical/response.go b/sdk/logical/response.go index 9ea5bf6c5727..721618c76c17 100644 --- a/sdk/logical/response.go +++ b/sdk/logical/response.go @@ -85,6 +85,10 @@ type Response struct { // Headers will contain the http headers from the plugin that it wishes to // have as part of the output Headers map[string][]string `json:"headers" structs:"headers" mapstructure:"headers"` + + // MountType, if non-empty, provides some information about what kind + // of mount this secret came from. + MountType string `json:"mount_type" structs:"mount_type" mapstructure:"mount_type"` } // AddWarning adds a warning into the response's warning list diff --git a/sdk/logical/response_util.go b/sdk/logical/response_util.go index 42529eb78ef2..95d6a931f5a2 100644 --- a/sdk/logical/response_util.go +++ b/sdk/logical/response_util.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "net/http" + "strings" "github.com/hashicorp/errwrap" multierror "github.com/hashicorp/go-multierror" @@ -20,7 +21,7 @@ import ( func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) { if err == nil && (resp == nil || !resp.IsError()) { switch { - case req.Operation == ReadOperation: + case req.Operation == ReadOperation || req.Operation == HeaderOperation: if resp == nil { return http.StatusNotFound, nil } @@ -76,10 +77,21 @@ func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) { var allErrors error var codedErr *ReplicationCodedError errwrap.Walk(err, func(inErr error) { + // The Walk function does not just traverse leaves, and execute the + // callback function on the entire error first. So, if the error is + // of type multierror.Error, we may want to skip storing the entire + // error first to avoid adding duplicate errors when walking down + // the leaf errors + if _, ok := inErr.(*multierror.Error); ok { + return + } newErr, ok := inErr.(*ReplicationCodedError) if ok { codedErr = newErr } else { + // if the error is of type fmt.wrapError which is typically + // made by calling fmt.Errorf("... %w", err), allErrors will + // contain duplicated error messages allErrors = multierror.Append(allErrors, inErr) } }) @@ -101,6 +113,8 @@ func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) { // appropriate code if err != nil { switch { + case errwrap.Contains(err, consts.ErrOverloaded.Error()): + statusCode = http.StatusServiceUnavailable case errwrap.ContainsType(err, new(StatusBadRequest)): statusCode = http.StatusBadRequest case errwrap.Contains(err, ErrPermissionDenied.Error()): @@ -127,11 +141,18 @@ func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) { statusCode = http.StatusBadRequest case errwrap.Contains(err, ErrInvalidCredentials.Error()): statusCode = http.StatusBadRequest + case errors.Is(err, ErrNotFound): + statusCode = http.StatusNotFound } } - if resp != nil && resp.IsError() { - err = fmt.Errorf("%s", resp.Data["error"].(string)) + if respErr := resp.Error(); respErr != nil { + err = fmt.Errorf("%s", respErr.Error()) + + // Don't let other error codes override the overloaded status code + if strings.Contains(respErr.Error(), consts.ErrOverloaded.Error()) { + statusCode = http.StatusServiceUnavailable + } } return statusCode, err @@ -148,6 +169,11 @@ func AdjustErrorStatusCode(status *int, err error) { } } + // Adjust status code when overloaded + if errwrap.Contains(err, consts.ErrOverloaded.Error()) { + *status = http.StatusServiceUnavailable + } + // Adjust status code when sealed if errwrap.Contains(err, consts.ErrSealed.Error()) { *status = http.StatusServiceUnavailable @@ -194,7 +220,7 @@ func RespondErrorAndData(w http.ResponseWriter, status int, data interface{}, er type ErrorAndDataResponse struct { Errors []string `json:"errors"` - Data interface{} `json:"data""` + Data interface{} `json:"data"` } resp := &ErrorAndDataResponse{Errors: make([]string, 0, 1)} if err != nil { diff --git a/sdk/logical/response_util_test.go b/sdk/logical/response_util_test.go index d430b961e8d2..317a49963ad3 100644 --- a/sdk/logical/response_util_test.go +++ b/sdk/logical/response_util_test.go @@ -7,6 +7,8 @@ import ( "errors" "strings" "testing" + + "github.com/hashicorp/vault/sdk/helper/consts" ) func TestResponseUtil_RespondErrorCommon_basic(t *testing.T) { @@ -42,6 +44,14 @@ func TestResponseUtil_RespondErrorCommon_basic(t *testing.T) { respErr: nil, expectedStatus: 404, }, + { + title: "Header not found", + req: &Request{ + Operation: HeaderOperation, + }, + respErr: nil, + expectedStatus: 404, + }, { title: "List with response and no keys", req: &Request{ @@ -75,6 +85,17 @@ func TestResponseUtil_RespondErrorCommon_basic(t *testing.T) { expectedErr: errors.New("error due to wrong credentials"), expectedStatus: 400, }, + { + title: "Overloaded error", + respErr: consts.ErrOverloaded, + resp: &Response{ + Data: map[string]interface{}{ + "error": "overloaded, try again later", + }, + }, + expectedErr: consts.ErrOverloaded, + expectedStatus: 503, + }, } for _, tc := range testCases { diff --git a/sdk/logical/storage.go b/sdk/logical/storage.go index 16ba60b94875..886ad51be781 100644 --- a/sdk/logical/storage.go +++ b/sdk/logical/storage.go @@ -97,6 +97,40 @@ func ScanView(ctx context.Context, view ClearableView, cb func(path string)) err return nil } +// AbortableScanView is used to scan all the keys in a view iteratively, +// but will abort the scan if cb returns false +func AbortableScanView(ctx context.Context, view ClearableView, cb func(path string) (cont bool)) error { + frontier := []string{""} + for len(frontier) > 0 { + n := len(frontier) + current := frontier[n-1] + frontier = frontier[:n-1] + + // List the contents + contents, err := view.List(ctx, current) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("list failed at path %q: {{err}}", current), err) + } + + // Handle the contents in the directory + for _, c := range contents { + // Exit if the context has been canceled + if ctx.Err() != nil { + return ctx.Err() + } + fullPath := current + c + if strings.HasSuffix(c, "/") { + frontier = append(frontier, fullPath) + } else { + if !cb(fullPath) { + return nil + } + } + } + } + return nil +} + // CollectKeys is used to collect all the keys in a view func CollectKeys(ctx context.Context, view ClearableView) ([]string, error) { return CollectKeysWithPrefix(ctx, view, "") diff --git a/sdk/logical/system_view.go b/sdk/logical/system_view.go index 7301c752a162..cecbc261e14e 100644 --- a/sdk/logical/system_view.go +++ b/sdk/logical/system_view.go @@ -97,6 +97,9 @@ type SystemView interface { // write forwarding (WriteForwardedPaths). This value will be templated // in for the {{cluterId}} sentinel. ClusterID(ctx context.Context) (string, error) + + // GenerateIdentityToken returns an identity token for the requesting plugin. + GenerateIdentityToken(ctx context.Context, req *pluginutil.IdentityTokenRequest) (*pluginutil.IdentityTokenResponse, error) } type PasswordPolicy interface { @@ -104,30 +107,51 @@ type PasswordPolicy interface { Generate(context.Context, io.Reader) (string, error) } +type WellKnownSystemView interface { + // RequestWellKnownRedirect registers a redirect from .well-known/src + // to dest, where dest is a sub-path of the mount. An error + // is returned if that source path is already taken + RequestWellKnownRedirect(ctx context.Context, src, dest string) error + + // DeregisterWellKnownRedirect unregisters a specific redirect. Returns + // true if that redirect source was found + DeregisterWellKnownRedirect(ctx context.Context, src string) bool +} + type ExtendedSystemView interface { + WellKnownSystemView + Auditor() Auditor ForwardGenericRequest(context.Context, *Request) (*Response, error) + + // APILockShouldBlockRequest returns whether a namespace for the requested + // mount is locked and should be blocked + APILockShouldBlockRequest() (bool, error) + + // GetPinnedPluginVersion returns the pinned version for the given plugin, if any. + GetPinnedPluginVersion(ctx context.Context, pluginType consts.PluginType, pluginName string) (*pluginutil.PinnedVersion, error) } type PasswordGenerator func() (password string, err error) type StaticSystemView struct { - DefaultLeaseTTLVal time.Duration - MaxLeaseTTLVal time.Duration - SudoPrivilegeVal bool - TaintedVal bool - CachingDisabledVal bool - Primary bool - EnableMlock bool - LocalMountVal bool - ReplicationStateVal consts.ReplicationState - EntityVal *Entity - GroupsVal []*Group - Features license.Features - PluginEnvironment *PluginEnvironment - PasswordPolicies map[string]PasswordGenerator - VersionString string - ClusterUUID string + DefaultLeaseTTLVal time.Duration + MaxLeaseTTLVal time.Duration + SudoPrivilegeVal bool + TaintedVal bool + CachingDisabledVal bool + Primary bool + EnableMlock bool + LocalMountVal bool + ReplicationStateVal consts.ReplicationState + EntityVal *Entity + GroupsVal []*Group + Features license.Features + PluginEnvironment *PluginEnvironment + PasswordPolicies map[string]PasswordGenerator + VersionString string + ClusterUUID string + APILockShouldBlockRequestVal bool } type noopAuditor struct{} @@ -253,3 +277,11 @@ func (d *StaticSystemView) DeletePasswordPolicy(name string) (existed bool) { func (d StaticSystemView) ClusterID(ctx context.Context) (string, error) { return d.ClusterUUID, nil } + +func (d StaticSystemView) GenerateIdentityToken(_ context.Context, _ *pluginutil.IdentityTokenRequest) (*pluginutil.IdentityTokenResponse, error) { + return nil, errors.New("GenerateIdentityToken is not implemented in StaticSystemView") +} + +func (d StaticSystemView) APILockShouldBlockRequest() (bool, error) { + return d.APILockShouldBlockRequestVal, nil +} diff --git a/sdk/logical/testing.go b/sdk/logical/testing.go index a173c7c5f7b2..c507b1c3e35e 100644 --- a/sdk/logical/testing.go +++ b/sdk/logical/testing.go @@ -6,16 +6,15 @@ package logical import ( "context" "reflect" + "testing" "time" - testing "github.com/mitchellh/go-testing-interface" - log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/helper/logging" ) // TestRequest is a helper to create a purely in-memory Request struct. -func TestRequest(t testing.T, op Operation, path string) *Request { +func TestRequest(t testing.TB, op Operation, path string) *Request { return &Request{ Operation: op, Path: path, @@ -27,7 +26,7 @@ func TestRequest(t testing.T, op Operation, path string) *Request { // TestStorage is a helper that can be used from unit tests to verify // the behavior of a Storage impl. -func TestStorage(t testing.T, s Storage) { +func TestStorage(t testing.TB, s Storage) { keys, err := s.List(context.Background(), "") if err != nil { t.Fatalf("list error: %s", err) diff --git a/sdk/logical/token.go b/sdk/logical/token.go index a27a73a22dc2..12114548ee56 100644 --- a/sdk/logical/token.go +++ b/sdk/logical/token.go @@ -11,9 +11,10 @@ import ( "strings" "time" - sockaddr "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/go-sockaddr" ) +//go:generate enumer -type=TokenType -trimprefix=TokenType -transform=kebab type TokenType uint8 const ( @@ -72,23 +73,6 @@ func (t *TokenType) UnmarshalJSON(b []byte) error { return nil } -func (t TokenType) String() string { - switch t { - case TokenTypeDefault: - return "default" - case TokenTypeService: - return "service" - case TokenTypeBatch: - return "batch" - case TokenTypeDefaultService: - return "default-service" - case TokenTypeDefaultBatch: - return "default-batch" - default: - panic("unreachable") - } -} - // TokenEntry is used to represent a given token type TokenEntry struct { Type TokenType `json:"type" mapstructure:"type" structs:"type" sentinel:""` diff --git a/sdk/logical/tokentype_enumer.go b/sdk/logical/tokentype_enumer.go new file mode 100644 index 000000000000..9b350a74d355 --- /dev/null +++ b/sdk/logical/tokentype_enumer.go @@ -0,0 +1,52 @@ +// Code generated by "enumer -type=TokenType -trimprefix=TokenType -transform=kebab"; DO NOT EDIT. + +package logical + +import ( + "fmt" +) + +const _TokenTypeName = "defaultservicebatchdefault-servicedefault-batch" + +var _TokenTypeIndex = [...]uint8{0, 7, 14, 19, 34, 47} + +func (i TokenType) String() string { + if i >= TokenType(len(_TokenTypeIndex)-1) { + return fmt.Sprintf("TokenType(%d)", i) + } + return _TokenTypeName[_TokenTypeIndex[i]:_TokenTypeIndex[i+1]] +} + +var _TokenTypeValues = []TokenType{0, 1, 2, 3, 4} + +var _TokenTypeNameToValueMap = map[string]TokenType{ + _TokenTypeName[0:7]: 0, + _TokenTypeName[7:14]: 1, + _TokenTypeName[14:19]: 2, + _TokenTypeName[19:34]: 3, + _TokenTypeName[34:47]: 4, +} + +// TokenTypeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func TokenTypeString(s string) (TokenType, error) { + if val, ok := _TokenTypeNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to TokenType values", s) +} + +// TokenTypeValues returns all values of the enum +func TokenTypeValues() []TokenType { + return _TokenTypeValues +} + +// IsATokenType returns "true" if the value is listed in the enum definition. "false" otherwise +func (i TokenType) IsATokenType() bool { + for _, v := range _TokenTypeValues { + if i == v { + return true + } + } + return false +} diff --git a/sdk/logical/translate_response.go b/sdk/logical/translate_response.go index ef5ba5f22072..ca832ebd691b 100644 --- a/sdk/logical/translate_response.go +++ b/sdk/logical/translate_response.go @@ -16,9 +16,10 @@ import ( // values we don't. func LogicalResponseToHTTPResponse(input *Response) *HTTPResponse { httpResp := &HTTPResponse{ - Data: input.Data, - Warnings: input.Warnings, - Headers: input.Headers, + Data: input.Data, + Warnings: input.Warnings, + Headers: input.Headers, + MountType: input.MountType, } if input.Secret != nil { @@ -52,9 +53,10 @@ func LogicalResponseToHTTPResponse(input *Response) *HTTPResponse { func HTTPResponseToLogicalResponse(input *HTTPResponse) *Response { logicalResp := &Response{ - Data: input.Data, - Warnings: input.Warnings, - Headers: input.Headers, + Data: input.Data, + Warnings: input.Warnings, + Headers: input.Headers, + MountType: input.MountType, } if input.LeaseID != "" { @@ -99,6 +101,7 @@ type HTTPResponse struct { Warnings []string `json:"warnings"` Headers map[string][]string `json:"-"` Auth *HTTPAuth `json:"auth"` + MountType string `json:"mount_type"` } type HTTPAuth struct { diff --git a/sdk/logical/version.pb.go b/sdk/logical/version.pb.go index 9962824cbb15..abb579096b27 100644 --- a/sdk/logical/version.pb.go +++ b/sdk/logical/version.pb.go @@ -3,8 +3,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc (unknown) // source: sdk/logical/version.proto package logical @@ -141,7 +141,7 @@ func file_sdk_logical_version_proto_rawDescGZIP() []byte { } var file_sdk_logical_version_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_sdk_logical_version_proto_goTypes = []interface{}{ +var file_sdk_logical_version_proto_goTypes = []any{ (*Empty)(nil), // 0: logical.Empty (*VersionReply)(nil), // 1: logical.VersionReply } @@ -161,7 +161,7 @@ func file_sdk_logical_version_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_sdk_logical_version_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_sdk_logical_version_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Empty); i { case 0: return &v.state @@ -173,7 +173,7 @@ func file_sdk_logical_version_proto_init() { return nil } } - file_sdk_logical_version_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_sdk_logical_version_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*VersionReply); i { case 0: return &v.state diff --git a/sdk/logical/version.proto b/sdk/logical/version.proto index 860ddc54e270..704e212056c4 100644 --- a/sdk/logical/version.proto +++ b/sdk/logical/version.proto @@ -17,4 +17,4 @@ message VersionReply { service PluginVersion { // Version returns version information for the plugin. rpc Version(Empty) returns (VersionReply); -} \ No newline at end of file +} diff --git a/sdk/logical/version_grpc.pb.go b/sdk/logical/version_grpc.pb.go index a69e97059978..9aa110fce98f 100644 --- a/sdk/logical/version_grpc.pb.go +++ b/sdk/logical/version_grpc.pb.go @@ -1,4 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.4.0 +// - protoc (unknown) +// source: sdk/logical/version.proto package logical @@ -11,12 +18,18 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 + +const ( + PluginVersion_Version_FullMethodName = "/logical.PluginVersion/Version" +) // PluginVersionClient is the client API for PluginVersion service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// PluginVersion is an optional RPC service implemented by plugins. type PluginVersionClient interface { // Version returns version information for the plugin. Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionReply, error) @@ -31,8 +44,9 @@ func NewPluginVersionClient(cc grpc.ClientConnInterface) PluginVersionClient { } func (c *pluginVersionClient) Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VersionReply) - err := c.cc.Invoke(ctx, "/logical.PluginVersion/Version", in, out, opts...) + err := c.cc.Invoke(ctx, PluginVersion_Version_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -42,6 +56,8 @@ func (c *pluginVersionClient) Version(ctx context.Context, in *Empty, opts ...gr // PluginVersionServer is the server API for PluginVersion service. // All implementations must embed UnimplementedPluginVersionServer // for forward compatibility +// +// PluginVersion is an optional RPC service implemented by plugins. type PluginVersionServer interface { // Version returns version information for the plugin. Version(context.Context, *Empty) (*VersionReply, error) @@ -78,7 +94,7 @@ func _PluginVersion_Version_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/logical.PluginVersion/Version", + FullMethod: PluginVersion_Version_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PluginVersionServer).Version(ctx, req.(*Empty)) diff --git a/sdk/physical/cache.go b/sdk/physical/cache.go index cc318a4c0eb5..3816609e2bcd 100644 --- a/sdk/physical/cache.go +++ b/sdk/physical/cache.go @@ -32,6 +32,16 @@ var cacheExceptionsPaths = []string{ "sys/expire/", "core/poison-pill", "core/raft/tls", + + // Add barrierSealConfigPath and recoverySealConfigPlaintextPath to the cache + // exceptions to avoid unseal errors. See VAULT-17227 + "core/seal-config", + "core/recovery-config", + + // we need to make sure the persisted license is read from the storage + // to ensure the changes to the autoloaded license on the active node + // is observed on the perfStandby nodes + "core/autoloaded-license", } // CacheRefreshContext returns a context with an added value denoting if the @@ -76,6 +86,7 @@ var ( _ ToggleablePurgemonster = (*TransactionalCache)(nil) _ Backend = (*Cache)(nil) _ Transactional = (*TransactionalCache)(nil) + _ TransactionalLimits = (*TransactionalCache)(nil) ) // NewCache returns a physical cache of the given size. @@ -261,3 +272,14 @@ func (c *TransactionalCache) Transaction(ctx context.Context, txns []*TxnEntry) return nil } + +// TransactionLimits implements physical.TransactionalLimits +func (c *TransactionalCache) TransactionLimits() (int, int) { + if tl, ok := c.Transactional.(TransactionalLimits); ok { + return tl.TransactionLimits() + } + // We don't have any specific limits of our own so return zeros to signal that + // the caller should use whatever reasonable defaults it would if it used a + // non-TransactionalLimits backend. + return 0, 0 +} diff --git a/sdk/physical/cache_test.go b/sdk/physical/cache_test.go new file mode 100644 index 000000000000..7e9bf3232a04 --- /dev/null +++ b/sdk/physical/cache_test.go @@ -0,0 +1,54 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package physical + +import ( + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" +) + +func TestTransactionalCache_TransactionLimits(t *testing.T) { + tc := []struct { + name string + be Backend + wantEntries int + wantSize int + }{ + { + name: "non-transactionlimits backend", + be: &TestTransactionalNonLimitBackend{}, + + // Should return zeros to let the implementor choose defaults. + wantEntries: 0, + wantSize: 0, + }, + { + name: "transactionlimits backend", + be: &TestTransactionalLimitBackend{ + MaxEntries: 123, + MaxSize: 345, + }, + + // Should return underlying limits + wantEntries: 123, + wantSize: 345, + }, + } + + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + logger := hclog.NewNullLogger() + + be := NewTransactionalCache(tt.be, 1024, logger, nil) + + // Call the TransactionLimits method + maxEntries, maxBytes := be.TransactionLimits() + + require.Equal(t, tt.wantEntries, maxEntries) + require.Equal(t, tt.wantSize, maxBytes) + }) + } +} diff --git a/sdk/physical/encoding.go b/sdk/physical/encoding.go index 49e00ae6ace5..af581207f9cb 100644 --- a/sdk/physical/encoding.go +++ b/sdk/physical/encoding.go @@ -98,6 +98,17 @@ func (e *TransactionalStorageEncoding) Transaction(ctx context.Context, txns []* return e.Transactional.Transaction(ctx, txns) } +// TransactionLimits implements physical.TransactionalLimits +func (e *TransactionalStorageEncoding) TransactionLimits() (int, int) { + if tl, ok := e.Transactional.(TransactionalLimits); ok { + return tl.TransactionLimits() + } + // We don't have any specific limits of our own so return zeros to signal that + // the caller should use whatever reasonable defaults it would if it used a + // non-TransactionalLimits backend. + return 0, 0 +} + func (e *StorageEncoding) Purge(ctx context.Context) { if purgeable, ok := e.Backend.(ToggleablePurgemonster); ok { purgeable.Purge(ctx) diff --git a/sdk/physical/encoding_test.go b/sdk/physical/encoding_test.go new file mode 100644 index 000000000000..e4d9cceaa417 --- /dev/null +++ b/sdk/physical/encoding_test.go @@ -0,0 +1,51 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package physical + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestTransactionalStorageEncoding_TransactionLimits(t *testing.T) { + tc := []struct { + name string + be Backend + wantEntries int + wantSize int + }{ + { + name: "non-transactionlimits backend", + be: &TestTransactionalNonLimitBackend{}, + + // Should return zeros to let the implementor choose defaults. + wantEntries: 0, + wantSize: 0, + }, + { + name: "transactionlimits backend", + be: &TestTransactionalLimitBackend{ + MaxEntries: 123, + MaxSize: 345, + }, + + // Should return underlying limits + wantEntries: 123, + wantSize: 345, + }, + } + + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + be := NewStorageEncoding(tt.be).(TransactionalLimits) + + // Call the TransactionLimits method + maxEntries, maxBytes := be.TransactionLimits() + + require.Equal(t, tt.wantEntries, maxEntries) + require.Equal(t, tt.wantSize, maxBytes) + }) + } +} diff --git a/sdk/physical/error.go b/sdk/physical/error.go index 4af7b7d639fc..aa7418fd7893 100644 --- a/sdk/physical/error.go +++ b/sdk/physical/error.go @@ -111,3 +111,14 @@ func (e *TransactionalErrorInjector) Transaction(ctx context.Context, txns []*Tx } return e.Transactional.Transaction(ctx, txns) } + +// TransactionLimits implements physical.TransactionalLimits +func (e *TransactionalErrorInjector) TransactionLimits() (int, int) { + if tl, ok := e.Transactional.(TransactionalLimits); ok { + return tl.TransactionLimits() + } + // We don't have any specific limits of our own so return zeros to signal that + // the caller should use whatever reasonable defaults it would if it used a + // non-TransactionalLimits backend. + return 0, 0 +} diff --git a/sdk/physical/error_test.go b/sdk/physical/error_test.go new file mode 100644 index 000000000000..779cd1bc1c98 --- /dev/null +++ b/sdk/physical/error_test.go @@ -0,0 +1,53 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package physical + +import ( + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" +) + +func TestTransactionalErrorInjector_TransactionLimits(t *testing.T) { + tc := []struct { + name string + be Backend + wantEntries int + wantSize int + }{ + { + name: "non-transactionlimits backend", + be: &TestTransactionalNonLimitBackend{}, + + // Should return zeros to let the implementor choose defaults. + wantEntries: 0, + wantSize: 0, + }, + { + name: "transactionlimits backend", + be: &TestTransactionalLimitBackend{ + MaxEntries: 123, + MaxSize: 345, + }, + + // Should return underlying limits + wantEntries: 123, + wantSize: 345, + }, + } + + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + logger := hclog.NewNullLogger() + + injector := NewTransactionalErrorInjector(tt.be, 0, logger) + + maxEntries, maxBytes := injector.TransactionLimits() + + require.Equal(t, tt.wantEntries, maxEntries) + require.Equal(t, tt.wantSize, maxBytes) + }) + } +} diff --git a/sdk/physical/file/file.go b/sdk/physical/file/file.go index d7ad9de3a2b0..ced5d9b93da8 100644 --- a/sdk/physical/file/file.go +++ b/sdk/physical/file/file.go @@ -17,7 +17,6 @@ import ( "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/physical" @@ -245,17 +244,21 @@ func (b *FileBackend) PutInternal(ctx context.Context, entry *physical.Entry) er // JSON encode the entry and write it fullPath := filepath.Join(path, key) - tempPath := fullPath + ".temp" - f, err := os.OpenFile( - tempPath, - os.O_CREATE|os.O_TRUNC|os.O_WRONLY, - 0o600) + f, err := os.CreateTemp(path, key) if err != nil { if f != nil { f.Close() } return err } + + if err = os.Chmod(f.Name(), 0o600); err != nil { + if f != nil { + f.Close() + } + return err + } + if f == nil { return errors.New("could not successfully get a file handle") } @@ -266,7 +269,7 @@ func (b *FileBackend) PutInternal(ctx context.Context, entry *physical.Entry) er }) f.Close() if encErr == nil { - err = os.Rename(tempPath, fullPath) + err = os.Rename(f.Name(), fullPath) if err != nil { return err } @@ -278,7 +281,7 @@ func (b *FileBackend) PutInternal(ctx context.Context, entry *physical.Entry) er // See if we ended up with a zero-byte file and if so delete it, might be a // case of disk being full but the file info is in metadata that is // reserved. - fi, err := os.Stat(tempPath) + fi, err := os.Stat(f.Name()) if err != nil { return encErr } @@ -286,7 +289,7 @@ func (b *FileBackend) PutInternal(ctx context.Context, entry *physical.Entry) er return encErr } if fi.Size() == 0 { - os.Remove(tempPath) + os.Remove(f.Name()) } return encErr } diff --git a/sdk/physical/file/file_test.go b/sdk/physical/file/file_test.go index 14c33094dec4..7fc6398c8dff 100644 --- a/sdk/physical/file/file_test.go +++ b/sdk/physical/file/file_test.go @@ -240,3 +240,54 @@ func TestFileBackend(t *testing.T) { physical.ExerciseBackend_ListPrefix(t, b) } + +func TestFileBackendCreateTempKey(t *testing.T) { + dir := t.TempDir() + + logger := logging.NewVaultLogger(log.Debug) + + b, err := NewFileBackend(map[string]string{ + "path": dir, + }, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + temp := &physical.Entry{Key: "example.temp", Value: []byte("tempfoo")} + err = b.Put(context.Background(), temp) + if err != nil { + t.Fatalf("err: %v", err) + } + + nonTemp := &physical.Entry{Key: "example", Value: []byte("foobar")} + err = b.Put(context.Background(), nonTemp) + if err != nil { + t.Fatalf("err: %v", err) + } + + vals, err := b.List(context.Background(), "") + if err != nil { + t.Fatal(err) + } + if len(vals) != 2 || vals[0] == vals[1] { + t.Fatalf("bad: %v", vals) + } + for _, val := range vals { + if val != "example.temp" && val != "example" { + t.Fatalf("bad val: %v", val) + } + } + out, err := b.Get(context.Background(), "example") + if err != nil { + t.Fatalf("err: %v", err) + } + if !reflect.DeepEqual(out, nonTemp) { + t.Fatalf("bad: %v expected: %v", out, nonTemp) + } + out, err = b.Get(context.Background(), "example.temp") + if err != nil { + t.Fatalf("err: %v", err) + } + if !reflect.DeepEqual(out, temp) { + t.Fatalf("bad: %v expected: %v", out, temp) + } +} diff --git a/sdk/physical/inmem/inmem.go b/sdk/physical/inmem/inmem.go index e4fa1f69ba23..2a9198f6af98 100644 --- a/sdk/physical/inmem/inmem.go +++ b/sdk/physical/inmem/inmem.go @@ -12,20 +12,24 @@ import ( "strings" "sync" "sync/atomic" + "time" "github.com/armon/go-radix" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/physical" + uberAtomic "go.uber.org/atomic" ) // Verify interfaces are satisfied var ( - _ physical.Backend = (*InmemBackend)(nil) - _ physical.HABackend = (*InmemHABackend)(nil) - _ physical.HABackend = (*TransactionalInmemHABackend)(nil) - _ physical.Lock = (*InmemLock)(nil) - _ physical.Transactional = (*TransactionalInmemBackend)(nil) - _ physical.Transactional = (*TransactionalInmemHABackend)(nil) + _ physical.Backend = (*InmemBackend)(nil) + _ physical.MountTableLimitingBackend = (*InmemBackend)(nil) + _ physical.HABackend = (*InmemHABackend)(nil) + _ physical.HABackend = (*TransactionalInmemHABackend)(nil) + _ physical.Lock = (*InmemLock)(nil) + _ physical.Transactional = (*TransactionalInmemBackend)(nil) + _ physical.Transactional = (*TransactionalInmemHABackend)(nil) + _ physical.TransactionalLimits = (*TransactionalInmemBackend)(nil) ) var ( @@ -51,10 +55,25 @@ type InmemBackend struct { failGetInTxn *uint32 logOps bool maxValueSize int + writeLatency time.Duration + + mountTablePaths map[string]struct{} } type TransactionalInmemBackend struct { InmemBackend + + // Using Uber atomic because our SemGrep rules don't like the old pointer + // trick we used above any more even though it's fine. The newer sync/atomic + // types are almost the same, but lack ways to initialize them cleanly in New* + // functions so sticking with what SemGrep likes for now. + maxBatchEntries *uberAtomic.Int32 + maxBatchSize *uberAtomic.Int32 + + largestBatchLen *uberAtomic.Uint64 + largestBatchSize *uberAtomic.Uint64 + + transactionCompleteCh chan *txnCommitRequest } // NewInmem constructs a new in-memory backend @@ -109,9 +128,25 @@ func NewTransactionalInmem(conf map[string]string, logger log.Logger) (physical. logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "", maxValueSize: maxValueSize, }, + + maxBatchEntries: uberAtomic.NewInt32(64), + maxBatchSize: uberAtomic.NewInt32(128 * 1024), + largestBatchLen: uberAtomic.NewUint64(0), + largestBatchSize: uberAtomic.NewUint64(0), }, nil } +// SetWriteLatency add a sleep to each Put/Delete operation (and each op in a +// transaction for a TransactionalInmemBackend). It's not so much to simulate +// real disk latency as much as to make the go runtime schedule things more like +// a real disk where concurrent write operations are more likely to interleave +// as each one blocks on disk IO. Set to 0 to disable again (the default). +func (i *InmemBackend) SetWriteLatency(latency time.Duration) { + i.Lock() + defer i.Unlock() + i.writeLatency = latency +} + // Put is used to insert or update an entry func (i *InmemBackend) Put(ctx context.Context, entry *physical.Entry) error { i.permitPool.Acquire() @@ -142,6 +177,9 @@ func (i *InmemBackend) PutInternal(ctx context.Context, entry *physical.Entry) e } i.root.Insert(entry.Key, entry.Value) + if i.writeLatency > 0 { + time.Sleep(i.writeLatency) + } return nil } @@ -228,6 +266,9 @@ func (i *InmemBackend) DeleteInternal(ctx context.Context, key string) error { } i.root.Delete(key) + if i.writeLatency > 0 { + time.Sleep(i.writeLatency) + } return nil } @@ -294,6 +335,24 @@ func (i *InmemBackend) FailList(fail bool) { atomic.StoreUint32(i.failList, val) } +// RegisterMountTablePath implements physical.MountTableLimitingBackend +func (i *InmemBackend) RegisterMountTablePath(path string) { + if i.mountTablePaths == nil { + i.mountTablePaths = make(map[string]struct{}) + } + i.mountTablePaths[path] = struct{}{} +} + +// GetMountTablePaths returns any paths registered as mount table or namespace +// metadata paths. It's intended for testing. +func (i *InmemBackend) GetMountTablePaths() []string { + var paths []string + for path := range i.mountTablePaths { + paths = append(paths, path) + } + return paths +} + // Transaction implements the transaction interface func (t *TransactionalInmemBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { t.permitPool.Acquire() @@ -303,11 +362,76 @@ func (t *TransactionalInmemBackend) Transaction(ctx context.Context, txns []*phy defer t.Unlock() failGetInTxn := atomic.LoadUint32(t.failGetInTxn) + size := uint64(0) for _, t := range txns { + // We use 2x key length to match the logic in WALBackend.persistWALs. + size += uint64(2*len(t.Entry.Key) + len(t.Entry.Value)) if t.Operation == physical.GetOperation && failGetInTxn != 0 { return GetInTxnDisabledError } } - return physical.GenericTransactionHandler(ctx, t, txns) + if size > t.largestBatchSize.Load() { + t.largestBatchSize.Store(size) + } + if len(txns) > int(t.largestBatchLen.Load()) { + t.largestBatchLen.Store(uint64(len(txns))) + } + + err := physical.GenericTransactionHandler(ctx, t, txns) + + // If we have a transactionCompleteCh set, we block on it before returning. + if t.transactionCompleteCh != nil { + req := &txnCommitRequest{ + txns: txns, + ch: make(chan struct{}), + } + t.transactionCompleteCh <- req + <-req.ch + } + return err +} + +func (t *TransactionalInmemBackend) SetMaxBatchEntries(entries int) { + t.maxBatchEntries.Store(int32(entries)) +} + +func (t *TransactionalInmemBackend) SetMaxBatchSize(entries int) { + t.maxBatchSize.Store(int32(entries)) +} + +func (t *TransactionalInmemBackend) TransactionLimits() (int, int) { + return int(t.maxBatchEntries.Load()), int(t.maxBatchSize.Load()) +} + +func (t *TransactionalInmemBackend) BatchStats() (maxEntries uint64, maxSize uint64) { + return t.largestBatchLen.Load(), t.largestBatchSize.Load() +} + +// TxnCommitChan returns a channel that allows deterministic control of when +// transactions are executed. Each time `Transaction` is called on the backend, +// a txnCommitRequest is sent on the chan returned and then Transaction will +// block until Done is called on that request object. This allows tests to +// deterministically wait until a persist is actually in progress, as well as +// control when the persist completes. The returned chan is buffered with a +// length of 5 which should be enough to ensure that test code doesn't deadlock +// in normal operation since we typically only have one outstanding transaction +// at at time. +func (t *TransactionalInmemBackend) TxnCommitChan() <-chan *txnCommitRequest { + t.Lock() + defer t.Unlock() + + ch := make(chan *txnCommitRequest, 5) + t.transactionCompleteCh = ch + + return ch +} + +type txnCommitRequest struct { + txns []*physical.TxnEntry + ch chan struct{} +} + +func (r *txnCommitRequest) Commit() { + close(r.ch) } diff --git a/sdk/physical/latency.go b/sdk/physical/latency.go index 82c4052d0204..56d045e30264 100644 --- a/sdk/physical/latency.go +++ b/sdk/physical/latency.go @@ -59,6 +59,9 @@ func NewLatencyInjector(b Backend, latency time.Duration, jitter int, logger log } // NewTransactionalLatencyInjector creates a new transactional LatencyInjector +// jitter is the random percent that latency will vary between. +// For example, if you specify latency = 50ms and jitter = 20, then for any +// given operation, the latency will be 50ms +- 10ms (20% of 50), or between 40 and 60ms. func NewTransactionalLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *TransactionalLatencyInjector { return &TransactionalLatencyInjector{ LatencyInjector: NewLatencyInjector(b, latency, jitter, logger), @@ -114,3 +117,14 @@ func (l *TransactionalLatencyInjector) Transaction(ctx context.Context, txns []* l.addLatency() return l.Transactional.Transaction(ctx, txns) } + +// TransactionLimits implements physical.TransactionalLimits +func (l *TransactionalLatencyInjector) TransactionLimits() (int, int) { + if tl, ok := l.Transactional.(TransactionalLimits); ok { + return tl.TransactionLimits() + } + // We don't have any specific limits of our own so return zeros to signal that + // the caller should use whatever reasonable defaults it would if it used a + // non-TransactionalLimits backend. + return 0, 0 +} diff --git a/sdk/physical/latency_test.go b/sdk/physical/latency_test.go new file mode 100644 index 000000000000..2585a04e0f12 --- /dev/null +++ b/sdk/physical/latency_test.go @@ -0,0 +1,53 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package physical + +import ( + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" +) + +func TestTransactionalLatencyInjector_TransactionLimits(t *testing.T) { + tc := []struct { + name string + be Backend + wantEntries int + wantSize int + }{ + { + name: "non-transactionlimits backend", + be: &TestTransactionalNonLimitBackend{}, + + // Should return zeros to let the implementor choose defaults. + wantEntries: 0, + wantSize: 0, + }, + { + name: "transactionlimits backend", + be: &TestTransactionalLimitBackend{ + MaxEntries: 123, + MaxSize: 345, + }, + + // Should return underlying limits + wantEntries: 123, + wantSize: 345, + }, + } + + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + logger := hclog.NewNullLogger() + + injector := NewTransactionalLatencyInjector(tt.be, 0, 0, logger) + + maxEntries, maxBytes := injector.TransactionLimits() + + require.Equal(t, tt.wantEntries, maxEntries) + require.Equal(t, tt.wantSize, maxBytes) + }) + } +} diff --git a/sdk/physical/physical.go b/sdk/physical/physical.go index c0e7d2ef8895..624e580f9a1f 100644 --- a/sdk/physical/physical.go +++ b/sdk/physical/physical.go @@ -60,6 +60,69 @@ type HABackend interface { HAEnabled() bool } +// FencingHABackend is an HABackend which provides the additional guarantee that +// each Lock it returns from LockWith is also a FencingLock. A FencingLock +// provides a mechanism to retrieve a fencing token that can be included by +// future writes by the backend to ensure that it is still the current lock +// holder at the time the write commits. Without this timing might allow a lock +// holder not to notice it's no longer the active node for long enough for it to +// write data to storage even while a new active node is writing causing +// corruption. For Consul backend the fencing token is the session id which is +// submitted with `check-session` operation on each write to ensure the write +// only completes if the session is still holding the lock. For raft backend +// this isn't needed because our in-process raft library is unable to write if +// it's not the leader anyway. +// +// If you implement this, Vault will call RegisterActiveNodeLock with the Lock +// instance returned by LockWith after it successfully locks it. This keeps the +// backend oblivious to the specific key we use for active node locks and allows +// potential future usage of locks for other purposes in the future. +// +// Note that all implementations must support writing to storage before +// RegisterActiveNodeLock is called to support initialization of a new cluster. +// They must also skip fencing writes if the write's Context contains a special +// value. This is necessary to allow Vault to clear and re-initialise secondary +// clusters even though there is already an active node with a specific lock +// session since we clear the cluster while Vault is sealed and clearing the +// data might remove the lock in some storages (e.g. Consul). As noted above +// it's not generally safe to allow unfenced writes after a lock so instead we +// special case just a few types of writes that only happen rarely while the +// cluster is sealed. See the IsUnfencedWrite helper function. +type FencingHABackend interface { + HABackend + + RegisterActiveNodeLock(l Lock) error +} + +// unfencedWriteContextKeyType is a special type to identify context values to +// disable fencing. It's a separate type per the best-practice in Context.Value +// docs to avoid collisions even if the key might match. +type unfencedWriteContextKeyType string + +const ( + // unfencedWriteContextKey is the context key we pass the option to bypass + // fencing through to a FencingHABackend. Note that this is not an ideal use + // of context values and violates the "do not use it for optional arguments" + // guidance but has been agreed as a pragmatic option for this case rather + // than needing to specialize every physical.Backend to understand this + // option. + unfencedWriteContextKey unfencedWriteContextKeyType = "vault-disable-fencing" +) + +// UnfencedWriteCtx adds metadata to a ctx such that any writes performed +// directly on a FencingHABackend using that context will _not_ add a fencing +// token. +func UnfencedWriteCtx(ctx context.Context) context.Context { + return context.WithValue(ctx, unfencedWriteContextKey, true) +} + +// IsUnfencedWrite returns whether or not the context passed has the unfenced +// flag value set. +func IsUnfencedWrite(ctx context.Context) bool { + isUnfenced, ok := ctx.Value(unfencedWriteContextKey).(bool) + return ok && isUnfenced +} + // ToggleablePurgemonster is an interface for backends that can toggle on or // off special functionality and/or support purging. This is only used for the // cache, don't use it for other things. @@ -76,6 +139,16 @@ type RedirectDetect interface { DetectHostAddr() (string, error) } +// MountTableLimitingBackend is an optional interface a Backend can implement +// that allows it to support different entry size limits for mount-table-related +// paths. It will only be called in Vault Enterprise. +type MountTableLimitingBackend interface { + // RegisterMountTablePath informs the Backend that the given path represents + // part of the mount tables or related metadata. This allows the backend to + // apply different limits for this entry if configured to do so. + RegisterMountTablePath(path string) +} + type Lock interface { // Lock is used to acquire the given lock // The stopCh is optional and if closed should interrupt the lock @@ -86,7 +159,7 @@ type Lock interface { // Unlock is used to release the lock Unlock() error - // Returns the value of the lock and if it is held + // Returns the value of the lock and if it is held by _any_ node Value() (bool, string, error) } diff --git a/sdk/physical/testing.go b/sdk/physical/testing.go index 0c6a021d3d23..9b7d339284dd 100644 --- a/sdk/physical/testing.go +++ b/sdk/physical/testing.go @@ -9,13 +9,16 @@ import ( "sort" "testing" "time" + + "github.com/stretchr/testify/require" ) func ExerciseBackend(t testing.TB, b Backend) { t.Helper() + ctx := context.Background() // Should be empty - keys, err := b.List(context.Background(), "") + keys, err := b.List(ctx, "") if err != nil { t.Fatalf("initial list failed: %v", err) } @@ -24,13 +27,13 @@ func ExerciseBackend(t testing.TB, b Backend) { } // Delete should work if it does not exist - err = b.Delete(context.Background(), "foo") + err = b.Delete(ctx, "foo") if err != nil { t.Fatalf("idempotent delete: %v", err) } // Get should not fail, but be nil - out, err := b.Get(context.Background(), "foo") + out, err := b.Get(ctx, "foo") if err != nil { t.Fatalf("initial get failed: %v", err) } @@ -40,13 +43,13 @@ func ExerciseBackend(t testing.TB, b Backend) { // Make an entry e := &Entry{Key: "foo", Value: []byte("test")} - err = b.Put(context.Background(), e) + err = b.Put(ctx, e) if err != nil { t.Fatalf("put failed: %v", err) } // Get should work - out, err = b.Get(context.Background(), "foo") + out, err = b.Get(ctx, "foo") if err != nil { t.Fatalf("get failed: %v", err) } @@ -55,7 +58,7 @@ func ExerciseBackend(t testing.TB, b Backend) { } // List should not be empty - keys, err = b.List(context.Background(), "") + keys, err = b.List(ctx, "") if err != nil { t.Fatalf("list failed: %v", err) } @@ -64,13 +67,13 @@ func ExerciseBackend(t testing.TB, b Backend) { } // Delete should work - err = b.Delete(context.Background(), "foo") + err = b.Delete(ctx, "foo") if err != nil { t.Fatalf("delete: %v", err) } // Should be empty - keys, err = b.List(context.Background(), "") + keys, err = b.List(ctx, "") if err != nil { t.Fatalf("list after delete: %v", err) } @@ -79,7 +82,7 @@ func ExerciseBackend(t testing.TB, b Backend) { } // Get should fail - out, err = b.Get(context.Background(), "foo") + out, err = b.Get(ctx, "foo") if err != nil { t.Fatalf("get after delete: %v", err) } @@ -89,25 +92,25 @@ func ExerciseBackend(t testing.TB, b Backend) { // Multiple Puts should work; GH-189 e = &Entry{Key: "foo", Value: []byte("test")} - err = b.Put(context.Background(), e) + err = b.Put(ctx, e) if err != nil { t.Fatalf("multi put 1 failed: %v", err) } e = &Entry{Key: "foo", Value: []byte("test")} - err = b.Put(context.Background(), e) + err = b.Put(ctx, e) if err != nil { t.Fatalf("multi put 2 failed: %v", err) } // Make a nested entry e = &Entry{Key: "foo/bar", Value: []byte("baz")} - err = b.Put(context.Background(), e) + err = b.Put(ctx, e) if err != nil { t.Fatalf("nested put failed: %v", err) } // Get should work - out, err = b.Get(context.Background(), "foo/bar") + out, err = b.Get(ctx, "foo/bar") if err != nil { t.Fatalf("get failed: %v", err) } @@ -115,7 +118,7 @@ func ExerciseBackend(t testing.TB, b Backend) { t.Errorf("bad: %v expected: %v", out, e) } - keys, err = b.List(context.Background(), "") + keys, err = b.List(ctx, "") if err != nil { t.Fatalf("list multi failed: %v", err) } @@ -125,13 +128,13 @@ func ExerciseBackend(t testing.TB, b Backend) { } // Delete with children should work - err = b.Delete(context.Background(), "foo") + err = b.Delete(ctx, "foo") if err != nil { t.Fatalf("delete after multi: %v", err) } // Get should return the child - out, err = b.Get(context.Background(), "foo/bar") + out, err = b.Get(ctx, "foo/bar") if err != nil { t.Fatalf("get after multi delete: %v", err) } @@ -141,17 +144,17 @@ func ExerciseBackend(t testing.TB, b Backend) { // Removal of nested secret should not leave artifacts e = &Entry{Key: "foo/nested1/nested2/nested3", Value: []byte("baz")} - err = b.Put(context.Background(), e) + err = b.Put(ctx, e) if err != nil { t.Fatalf("deep nest: %v", err) } - err = b.Delete(context.Background(), "foo/nested1/nested2/nested3") + err = b.Delete(ctx, "foo/nested1/nested2/nested3") if err != nil { t.Fatalf("failed to remove deep nest: %v", err) } - keys, err = b.List(context.Background(), "foo/") + keys, err = b.List(ctx, "foo/") if err != nil { t.Fatalf("err: %v", err) } @@ -161,18 +164,18 @@ func ExerciseBackend(t testing.TB, b Backend) { // Make a second nested entry to test prefix removal e = &Entry{Key: "foo/zip", Value: []byte("zap")} - err = b.Put(context.Background(), e) + err = b.Put(ctx, e) if err != nil { t.Fatalf("failed to create second nested: %v", err) } // Delete should not remove the prefix - err = b.Delete(context.Background(), "foo/bar") + err = b.Delete(ctx, "foo/bar") if err != nil { t.Fatalf("failed to delete nested prefix: %v", err) } - keys, err = b.List(context.Background(), "") + keys, err = b.List(ctx, "") if err != nil { t.Fatalf("list nested prefix: %v", err) } @@ -181,12 +184,12 @@ func ExerciseBackend(t testing.TB, b Backend) { } // Delete should remove the prefix - err = b.Delete(context.Background(), "foo/zip") + err = b.Delete(ctx, "foo/zip") if err != nil { t.Fatalf("failed to delete second prefix: %v", err) } - keys, err = b.List(context.Background(), "") + keys, err = b.List(ctx, "") if err != nil { t.Fatalf("listing after second delete failed: %v", err) } @@ -196,29 +199,29 @@ func ExerciseBackend(t testing.TB, b Backend) { // When the root path is empty, adding and removing deep nested values should not break listing e = &Entry{Key: "foo/nested1/nested2/value1", Value: []byte("baz")} - err = b.Put(context.Background(), e) + err = b.Put(ctx, e) if err != nil { t.Fatalf("deep nest: %v", err) } e = &Entry{Key: "foo/nested1/nested2/value2", Value: []byte("baz")} - err = b.Put(context.Background(), e) + err = b.Put(ctx, e) if err != nil { t.Fatalf("deep nest: %v", err) } - err = b.Delete(context.Background(), "foo/nested1/nested2/value2") + err = b.Delete(ctx, "foo/nested1/nested2/value2") if err != nil { t.Fatalf("failed to remove deep nest: %v", err) } - keys, err = b.List(context.Background(), "") + keys, err = b.List(ctx, "") if err != nil { t.Fatalf("listing of root failed after deletion: %v", err) } if len(keys) == 0 { t.Errorf("root is returning empty after deleting a single nested value, expected nested1/: %v", keys) - keys, err = b.List(context.Background(), "foo/nested1") + keys, err = b.List(ctx, "foo/nested1") if err != nil { t.Fatalf("listing of expected nested path 'foo/nested1' failed: %v", err) } @@ -229,12 +232,12 @@ func ExerciseBackend(t testing.TB, b Backend) { } // cleanup left over listing bug test value - err = b.Delete(context.Background(), "foo/nested1/nested2/value1") + err = b.Delete(ctx, "foo/nested1/nested2/value1") if err != nil { t.Fatalf("failed to remove deep nest: %v", err) } - keys, err = b.List(context.Background(), "") + keys, err = b.List(ctx, "") if err != nil { t.Fatalf("listing of root failed after delete of deep nest: %v", err) } @@ -245,32 +248,33 @@ func ExerciseBackend(t testing.TB, b Backend) { func ExerciseBackend_ListPrefix(t testing.TB, b Backend) { t.Helper() + ctx := context.Background() e1 := &Entry{Key: "foo", Value: []byte("test")} e2 := &Entry{Key: "foo/bar", Value: []byte("test")} e3 := &Entry{Key: "foo/bar/baz", Value: []byte("test")} defer func() { - b.Delete(context.Background(), "foo") - b.Delete(context.Background(), "foo/bar") - b.Delete(context.Background(), "foo/bar/baz") + _ = b.Delete(ctx, "foo") + _ = b.Delete(ctx, "foo/bar") + _ = b.Delete(ctx, "foo/bar/baz") }() - err := b.Put(context.Background(), e1) + err := b.Put(ctx, e1) if err != nil { t.Fatalf("failed to put entry 1: %v", err) } - err = b.Put(context.Background(), e2) + err = b.Put(ctx, e2) if err != nil { t.Fatalf("failed to put entry 2: %v", err) } - err = b.Put(context.Background(), e3) + err = b.Put(ctx, e3) if err != nil { t.Fatalf("failed to put entry 3: %v", err) } // Scan the root - keys, err := b.List(context.Background(), "") + keys, err := b.List(ctx, "") if err != nil { t.Fatalf("list root: %v", err) } @@ -280,7 +284,7 @@ func ExerciseBackend_ListPrefix(t testing.TB, b Backend) { } // Scan foo/ - keys, err = b.List(context.Background(), "foo/") + keys, err = b.List(ctx, "foo/") if err != nil { t.Fatalf("list level 1: %v", err) } @@ -290,7 +294,7 @@ func ExerciseBackend_ListPrefix(t testing.TB, b Backend) { } // Scan foo/bar/ - keys, err = b.List(context.Background(), "foo/bar/") + keys, err = b.List(ctx, "foo/bar/") if err != nil { t.Fatalf("list level 2: %v", err) } @@ -330,12 +334,25 @@ func ExerciseHABackend(t testing.TB, b HABackend, b2 HABackend) { t.Errorf("expected value bar: %v", err) } + // Check if it's fencing that we can register the lock + if fba, ok := b.(FencingHABackend); ok { + require.NoError(t, fba.RegisterActiveNodeLock(lock)) + } + // Second acquisition should fail lock2, err := b2.LockWith("foo", "baz") if err != nil { t.Fatalf("lock 2: %v", err) } + // Checking the lock from b2 should discover that the lock is held since held + // implies only that there is _some_ leader not that b2 is leader (this was + // not clear before so we make it explicit with this assertion). + held2, val2, err := lock2.Value() + require.NoError(t, err) + require.Equal(t, "bar", val2) + require.True(t, held2) + // Cancel attempt in 50 msec stopCh := make(chan struct{}) time.AfterFunc(50*time.Millisecond, func() { @@ -363,6 +380,11 @@ func ExerciseHABackend(t testing.TB, b HABackend, b2 HABackend) { t.Errorf("should get leaderCh") } + // Check if it's fencing that we can register the lock + if fba2, ok := b2.(FencingHABackend); ok { + require.NoError(t, fba2.RegisterActiveNodeLock(lock)) + } + // Check the value held, val, err = lock2.Value() if err != nil { @@ -381,6 +403,8 @@ func ExerciseHABackend(t testing.TB, b HABackend, b2 HABackend) { func ExerciseTransactionalBackend(t testing.TB, b Backend) { t.Helper() + ctx := context.Background() + tb, ok := b.(Transactional) if !ok { t.Fatal("Not a transactional backend") @@ -388,11 +412,11 @@ func ExerciseTransactionalBackend(t testing.TB, b Backend) { txns := SetupTestingTransactions(t, b) - if err := tb.Transaction(context.Background(), txns); err != nil { + if err := tb.Transaction(ctx, txns); err != nil { t.Fatal(err) } - keys, err := b.List(context.Background(), "") + keys, err := b.List(ctx, "") if err != nil { t.Fatal(err) } @@ -405,7 +429,7 @@ func ExerciseTransactionalBackend(t testing.TB, b Backend) { t.Fatalf("mismatch: expected\n%#v\ngot\n%#v\n", expected, keys) } - entry, err := b.Get(context.Background(), "foo") + entry, err := b.Get(ctx, "foo") if err != nil { t.Fatal(err) } @@ -419,7 +443,7 @@ func ExerciseTransactionalBackend(t testing.TB, b Backend) { t.Fatal("updates did not apply correctly") } - entry, err = b.Get(context.Background(), "zip") + entry, err = b.Get(ctx, "zip") if err != nil { t.Fatal(err) } @@ -436,25 +460,27 @@ func ExerciseTransactionalBackend(t testing.TB, b Backend) { func SetupTestingTransactions(t testing.TB, b Backend) []*TxnEntry { t.Helper() + ctx := context.Background() + // Add a few keys so that we test rollback with deletion - if err := b.Put(context.Background(), &Entry{ + if err := b.Put(ctx, &Entry{ Key: "foo", Value: []byte("bar"), }); err != nil { t.Fatal(err) } - if err := b.Put(context.Background(), &Entry{ + if err := b.Put(ctx, &Entry{ Key: "zip", Value: []byte("zap"), }); err != nil { t.Fatal(err) } - if err := b.Put(context.Background(), &Entry{ + if err := b.Put(ctx, &Entry{ Key: "deleteme", }); err != nil { t.Fatal(err) } - if err := b.Put(context.Background(), &Entry{ + if err := b.Put(ctx, &Entry{ Key: "deleteme2", }); err != nil { t.Fatal(err) @@ -498,3 +524,43 @@ func SetupTestingTransactions(t testing.TB, b Backend) []*TxnEntry { return txns } + +// Several tests across packages have to test logic with a few variations of +// transactional backends. Make some suitable for testing limits support that +// can be re-used. + +type TestTransactionalNonLimitBackend struct{} + +var _ Transactional = (*TestTransactionalNonLimitBackend)(nil) + +func (b *TestTransactionalNonLimitBackend) Put(ctx context.Context, entry *Entry) error { + return nil +} + +func (b *TestTransactionalNonLimitBackend) Get(ctx context.Context, key string) (*Entry, error) { + return nil, nil +} + +func (b *TestTransactionalNonLimitBackend) Delete(ctx context.Context, key string) error { + return nil +} + +func (b *TestTransactionalNonLimitBackend) List(ctx context.Context, prefix string) ([]string, error) { + return nil, nil +} + +func (b *TestTransactionalNonLimitBackend) Transaction(ctx context.Context, txns []*TxnEntry) error { + return nil +} + +type TestTransactionalLimitBackend struct { + TestTransactionalNonLimitBackend + + MaxEntries, MaxSize int +} + +var _ TransactionalLimits = (*TestTransactionalLimitBackend)(nil) + +func (b *TestTransactionalLimitBackend) TransactionLimits() (int, int) { + return b.MaxEntries, b.MaxSize +} diff --git a/sdk/physical/transactions.go b/sdk/physical/transactions.go index 8d4e33321e2c..de91689ffc57 100644 --- a/sdk/physical/transactions.go +++ b/sdk/physical/transactions.go @@ -34,6 +34,35 @@ type TransactionalBackend interface { Transactional } +// TransactionalLimits SHOULD be implemented by all TransactionalBackend +// implementations. It is separate for backwards compatibility reasons since +// this in a public SDK module. If a TransactionalBackend does not implement +// this, the historic default limits of 63 entries and 128kb (based on Consul's +// limits) are used by replication internals when encoding batches of +// transactions. +type TransactionalLimits interface { + TransactionalBackend + + // TransactionLimits must return the limits of how large each transaction may + // be. The limits returned indicate how many individual operation entries are + // supported in total and an overall size limit on the contents of each + // transaction if applicable. Vault will deduct any meta-operations it needs + // to add from the maxEntries given. maxSize will be compared against the sum + // of the key and value sizes for all operations in a transaction. The backend + // should provide a reasonable margin of safety for any overhead it may have + // while encoding, for example Consul's encoded transaction in JSON must fit + // in the configured max transaction size so it must leave adequate room for + // JSON encoding overhead on top of the raw key and value sizes. + // + // If zero is returned for either value, the replication internals will use + // historic reasonable defaults. This allows middleware implementations such + // as cache layers to either pass through to the underlying backend if it + // implements this interface, or to return zeros to indicate that the + // implementer should apply whatever defaults it would use if the middleware + // were not present. + TransactionLimits() (maxEntries int, maxSize int) +} + type PseudoTransactional interface { // An internal function should do no locking or permit pool acquisition. // Depending on the backend and if it natively supports transactions, these diff --git a/sdk/plugin/backend.go b/sdk/plugin/backend.go index 2da1378eaa91..7ee591657ceb 100644 --- a/sdk/plugin/backend.go +++ b/sdk/plugin/backend.go @@ -7,13 +7,12 @@ import ( "context" "sync/atomic" - "google.golang.org/grpc" - log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/plugin/pb" + "google.golang.org/grpc" ) var ( diff --git a/sdk/plugin/grpc_backend_client.go b/sdk/plugin/grpc_backend_client.go index a343356d19d8..4e92ad13ec58 100644 --- a/sdk/plugin/grpc_backend_client.go +++ b/sdk/plugin/grpc_backend_client.go @@ -25,7 +25,7 @@ var ( ) // Validate backendGRPCPluginClient satisfies the logical.Backend interface -var _ logical.Backend = &backendGRPCPluginClient{} +var _ logical.Backend = (*backendGRPCPluginClient)(nil) // backendPluginClient implements logical.Backend and is the // go-plugin client. @@ -183,17 +183,21 @@ func (b *backendGRPCPluginClient) Cleanup(ctx context.Context) { defer close(quitCh) defer cancel() - b.client.Cleanup(ctx, &pb.Empty{}) - - // This will block until Setup has run the function to create a new server - // in b.server. If we stop here before it has a chance to actually start - // listening, when it starts listening it will immediately error out and - // exit, which is fine. Overall this ensures that we do not miss stopping - // the server if it ends up being created after Cleanup is called. - <-b.cleanupCh + // Only wait on graceful cleanup if we can establish communication with the + // plugin, otherwise b.cleanupCh may never get closed. + if _, err := b.client.Cleanup(ctx, &pb.Empty{}); status.Code(err) != codes.Unavailable { + // This will block until Setup has run the function to create a new server + // in b.server. If we stop here before it has a chance to actually start + // listening, when it starts listening it will immediately error out and + // exit, which is fine. Overall this ensures that we do not miss stopping + // the server if it ends up being created after Cleanup is called. + select { + case <-b.cleanupCh: + } + } server := b.server.Load() - if server != nil { - server.(*grpc.Server).GracefulStop() + if grpcServer, ok := server.(*grpc.Server); ok && grpcServer != nil { + grpcServer.GracefulStop() } } diff --git a/sdk/plugin/grpc_backend_server.go b/sdk/plugin/grpc_backend_server.go index 3356e463c8ad..b537d41bb550 100644 --- a/sdk/plugin/grpc_backend_server.go +++ b/sdk/plugin/grpc_backend_server.go @@ -196,6 +196,8 @@ func (b *backendGRPCPluginServer) SpecialPaths(ctx context.Context, args *pb.Emp LocalStorage: paths.LocalStorage, SealWrapStorage: paths.SealWrapStorage, WriteForwardedStorage: paths.WriteForwardedStorage, + Binary: paths.Binary, + Limited: paths.Limited, }, }, nil } diff --git a/sdk/plugin/grpc_backend_test.go b/sdk/plugin/grpc_backend_test.go index 01a6ea609f7d..880f09930feb 100644 --- a/sdk/plugin/grpc_backend_test.go +++ b/sdk/plugin/grpc_backend_test.go @@ -177,7 +177,7 @@ func testGRPCBackend(t *testing.T) (logical.Backend, func()) { }), }, } - client, _ := gplugin.TestPluginGRPCConn(t, pluginMap) + client, _ := gplugin.TestPluginGRPCConn(t, false, pluginMap) cleanup := func() { client.Close() } diff --git a/sdk/plugin/grpc_events.go b/sdk/plugin/grpc_events.go index 05d788c66cd0..3a4d50cc9387 100644 --- a/sdk/plugin/grpc_events.go +++ b/sdk/plugin/grpc_events.go @@ -23,7 +23,7 @@ type GRPCEventsClient struct { var _ logical.EventSender = (*GRPCEventsClient)(nil) -func (s *GRPCEventsClient) Send(ctx context.Context, eventType logical.EventType, event *logical.EventData) error { +func (s *GRPCEventsClient) SendEvent(ctx context.Context, eventType logical.EventType, event *logical.EventData) error { _, err := s.client.SendEvent(ctx, &pb.SendEventRequest{ EventType: string(eventType), Event: event, @@ -41,7 +41,7 @@ func (s *GRPCEventsServer) SendEvent(ctx context.Context, req *pb.SendEventReque return &pb.Empty{}, nil } - err := s.impl.Send(ctx, logical.EventType(req.EventType), req.Event) + err := s.impl.SendEvent(ctx, logical.EventType(req.EventType), req.Event) if err != nil { return nil, err } diff --git a/sdk/plugin/grpc_storage.go b/sdk/plugin/grpc_storage.go index 5c2f0de3f4f0..638f4b0dd7a0 100644 --- a/sdk/plugin/grpc_storage.go +++ b/sdk/plugin/grpc_storage.go @@ -7,10 +7,9 @@ import ( "context" "errors" - "google.golang.org/grpc" - "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/plugin/pb" + "google.golang.org/grpc" ) var errMissingStorage = errors.New("missing storage implementation: this method should not be called during plugin Setup, but only during and after Initialize") diff --git a/sdk/plugin/grpc_system.go b/sdk/plugin/grpc_system.go index bf4537bd58f7..d907e60eac6f 100644 --- a/sdk/plugin/grpc_system.go +++ b/sdk/plugin/grpc_system.go @@ -211,6 +211,21 @@ func (s gRPCSystemViewClient) ClusterID(ctx context.Context) (string, error) { return reply.ClusterID, nil } +func (s *gRPCSystemViewClient) GenerateIdentityToken(ctx context.Context, req *pluginutil.IdentityTokenRequest) (*pluginutil.IdentityTokenResponse, error) { + resp, err := s.client.GenerateIdentityToken(ctx, &pb.GenerateIdentityTokenRequest{ + Audience: req.Audience, + TTL: int64(req.TTL.Seconds()), + }) + if err != nil { + return nil, err + } + + return &pluginutil.IdentityTokenResponse{ + Token: pluginutil.IdentityToken(resp.Token), + TTL: time.Duration(resp.TTL) * time.Second, + }, nil +} + type gRPCSystemViewServer struct { pb.UnimplementedSystemViewServer @@ -394,3 +409,23 @@ func (s *gRPCSystemViewServer) ClusterInfo(ctx context.Context, _ *pb.Empty) (*p ClusterID: clusterId, }, nil } + +func (s *gRPCSystemViewServer) GenerateIdentityToken(ctx context.Context, req *pb.GenerateIdentityTokenRequest) (*pb.GenerateIdentityTokenResponse, error) { + if s.impl == nil { + return nil, errMissingSystemView + } + + res, err := s.impl.GenerateIdentityToken(ctx, &pluginutil.IdentityTokenRequest{ + Audience: req.GetAudience(), + TTL: time.Duration(req.GetTTL()) * time.Second, + }) + if err != nil { + return &pb.GenerateIdentityTokenResponse{}, status.Errorf(codes.Internal, + err.Error()) + } + + return &pb.GenerateIdentityTokenResponse{ + Token: res.Token.Token(), + TTL: int64(res.TTL.Seconds()), + }, nil +} diff --git a/sdk/plugin/mock/backend.go b/sdk/plugin/mock/backend.go index 9b3aa2c851e2..b34191b938a3 100644 --- a/sdk/plugin/mock/backend.go +++ b/sdk/plugin/mock/backend.go @@ -5,13 +5,19 @@ package mock import ( "context" + "fmt" "os" + "testing" + "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) -const MockPluginVersionEnv = "TESTING_MOCK_VAULT_PLUGIN_VERSION" +const ( + MockPluginVersionEnv = "TESTING_MOCK_VAULT_PLUGIN_VERSION" + MockPluginDefaultInternalValue = "bar" +) // New returns a new backend as an interface. This func // is only necessary for builtin backend plugins. @@ -53,6 +59,7 @@ func Backend() *backend { pathInternal(&b), pathSpecial(&b), pathRaw(&b), + pathEnv(&b), }, ), PathsSpecial: &logical.Paths{ @@ -64,7 +71,7 @@ func Backend() *backend { Invalidate: b.invalidate, BackendType: logical.TypeLogical, } - b.internal = "bar" + b.internal = MockPluginDefaultInternalValue b.RunningVersion = "v0.0.0+mock" if version := os.Getenv(MockPluginVersionEnv); version != "" { b.RunningVersion = version @@ -75,7 +82,7 @@ func Backend() *backend { type backend struct { *framework.Backend - // internal is used to test invalidate + // internal is used to test invalidate and reloads. internal string } @@ -85,3 +92,39 @@ func (b *backend) invalidate(ctx context.Context, key string) { b.internal = "" } } + +// WriteInternalValue is a helper to set an in-memory value in the plugin, +// allowing tests to later assert that the plugin either has or hasn't been +// restarted. +func WriteInternalValue(t *testing.T, client *api.Client, mountPath, value string) { + t.Helper() + resp, err := client.Logical().Write(fmt.Sprintf("%s/internal", mountPath), map[string]interface{}{ + "value": value, + }) + if err != nil { + t.Fatalf("err: %v", err) + } + if resp != nil { + t.Fatalf("bad: %v", resp) + } +} + +// ExpectInternalValue checks the internal in-memory value. +func ExpectInternalValue(t *testing.T, client *api.Client, mountPath, expected string) { + t.Helper() + expectInternalValue(t, client, mountPath, expected) +} + +func expectInternalValue(t *testing.T, client *api.Client, mountPath, expected string) { + t.Helper() + resp, err := client.Logical().Read(fmt.Sprintf("%s/internal", mountPath)) + if err != nil { + t.Fatalf("err: %v", err) + } + if resp == nil { + t.Fatalf("bad: response should not be nil") + } + if resp.Data["value"].(string) != expected { + t.Fatalf("expected %q but got %q", expected, resp.Data["value"].(string)) + } +} diff --git a/sdk/plugin/mock/path_env.go b/sdk/plugin/mock/path_env.go new file mode 100644 index 000000000000..18b4b71ccc32 --- /dev/null +++ b/sdk/plugin/mock/path_env.go @@ -0,0 +1,38 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mock + +import ( + "context" + "os" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// pathEnv is used to interrogate plugin env vars. +func pathEnv(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "env/" + framework.GenericNameRegex("key"), + Fields: map[string]*framework.FieldSchema{ + "key": { + Type: framework.TypeString, + Required: true, + Description: "The name of the environment variable to read.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathEnvRead, + }, + } +} + +func (b *backend) pathEnvRead(_ context.Context, _ *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Return the secret + return &logical.Response{ + Data: map[string]interface{}{ + "key": os.Getenv(data.Get("key").(string)), + }, + }, nil +} diff --git a/sdk/plugin/mock/path_errors.go b/sdk/plugin/mock/path_errors.go index f5e5b124fcb2..9c765c67ad32 100644 --- a/sdk/plugin/mock/path_errors.go +++ b/sdk/plugin/mock/path_errors.go @@ -36,7 +36,6 @@ func errorPaths(b *backend) []*framework.Path { "err_type": {Type: framework.TypeInt}, }, Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.CreateOperation: b.pathErrorRPCRead, logical.UpdateOperation: b.pathErrorRPCRead, }, }, diff --git a/sdk/plugin/pb/backend.pb.go b/sdk/plugin/pb/backend.pb.go index 82bbae2fd230..4c28e80b1378 100644 --- a/sdk/plugin/pb/backend.pb.go +++ b/sdk/plugin/pb/backend.pb.go @@ -3,8 +3,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc (unknown) // source: sdk/plugin/pb/backend.proto package pb @@ -208,6 +208,14 @@ type Paths struct { // // See extended note in /sdk/logical/logical.go. WriteForwardedStorage []string `protobuf:"bytes,5,rep,name=write_forwarded_storage,json=writeForwardedStorage,proto3" json:"write_forwarded_storage,omitempty"` + // Binary are paths whose request bodies are binary, not JSON + // + // See note in /sdk/logical/logical.go. + Binary []string `protobuf:"bytes,6,rep,name=binary,proto3" json:"binary,omitempty"` + // Limited paths are storage paths that require special-case request limiting. + // + // See note in /sdk/logical/logical.go. + Limited []string `protobuf:"bytes,7,rep,name=limited,proto3" json:"limited,omitempty"` } func (x *Paths) Reset() { @@ -277,6 +285,20 @@ func (x *Paths) GetWriteForwardedStorage() []string { return nil } +func (x *Paths) GetBinary() []string { + if x != nil { + return x.Binary + } + return nil +} + +func (x *Paths) GetLimited() []string { + if x != nil { + return x.Limited + } + return nil +} + type Request struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1139,6 +1161,9 @@ type Response struct { // be used in the audit broker to ensure we are auditing only the allowed // headers. Headers map[string]*Header `protobuf:"bytes,7,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // MountType, if non-empty, provides some information about what kind + // of mount this secret came from. + MountType string `protobuf:"bytes,8,opt,name=mount_type,json=mountType,proto3" json:"mount_type,omitempty"` } func (x *Response) Reset() { @@ -1222,6 +1247,13 @@ func (x *Response) GetHeaders() map[string]*Header { return nil } +func (x *Response) GetMountType() string { + if x != nil { + return x.MountType + } + return "" +} + type ResponseWrapInfo struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3214,6 +3246,116 @@ func (x *ClusterInfoReply) GetErr() string { return "" } +type GenerateIdentityTokenRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Audience string `protobuf:"bytes,1,opt,name=audience,proto3" json:"audience,omitempty"` + TTL int64 `protobuf:"varint,2,opt,name=ttl,proto3" json:"ttl,omitempty"` +} + +func (x *GenerateIdentityTokenRequest) Reset() { + *x = GenerateIdentityTokenRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GenerateIdentityTokenRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GenerateIdentityTokenRequest) ProtoMessage() {} + +func (x *GenerateIdentityTokenRequest) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GenerateIdentityTokenRequest.ProtoReflect.Descriptor instead. +func (*GenerateIdentityTokenRequest) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{47} +} + +func (x *GenerateIdentityTokenRequest) GetAudience() string { + if x != nil { + return x.Audience + } + return "" +} + +func (x *GenerateIdentityTokenRequest) GetTTL() int64 { + if x != nil { + return x.TTL + } + return 0 +} + +type GenerateIdentityTokenResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"` + TTL int64 `protobuf:"varint,2,opt,name=ttl,proto3" json:"ttl,omitempty"` +} + +func (x *GenerateIdentityTokenResponse) Reset() { + *x = GenerateIdentityTokenResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GenerateIdentityTokenResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GenerateIdentityTokenResponse) ProtoMessage() {} + +func (x *GenerateIdentityTokenResponse) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GenerateIdentityTokenResponse.ProtoReflect.Descriptor instead. +func (*GenerateIdentityTokenResponse) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{48} +} + +func (x *GenerateIdentityTokenResponse) GetToken() string { + if x != nil { + return x.Token + } + return "" +} + +func (x *GenerateIdentityTokenResponse) GetTTL() int64 { + if x != nil { + return x.TTL + } + return 0 +} + type Connection struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3231,7 +3373,7 @@ type Connection struct { func (x *Connection) Reset() { *x = Connection{} if protoimpl.UnsafeEnabled { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[47] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3244,7 +3386,7 @@ func (x *Connection) String() string { func (*Connection) ProtoMessage() {} func (x *Connection) ProtoReflect() protoreflect.Message { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[47] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3257,7 +3399,7 @@ func (x *Connection) ProtoReflect() protoreflect.Message { // Deprecated: Use Connection.ProtoReflect.Descriptor instead. func (*Connection) Descriptor() ([]byte, []int) { - return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{47} + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{49} } func (x *Connection) GetRemoteAddr() string { @@ -3303,7 +3445,7 @@ type ConnectionState struct { func (x *ConnectionState) Reset() { *x = ConnectionState{} if protoimpl.UnsafeEnabled { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[48] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3316,7 +3458,7 @@ func (x *ConnectionState) String() string { func (*ConnectionState) ProtoMessage() {} func (x *ConnectionState) ProtoReflect() protoreflect.Message { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[48] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3329,7 +3471,7 @@ func (x *ConnectionState) ProtoReflect() protoreflect.Message { // Deprecated: Use ConnectionState.ProtoReflect.Descriptor instead. func (*ConnectionState) Descriptor() ([]byte, []int) { - return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{48} + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{50} } func (x *ConnectionState) GetVersion() uint32 { @@ -3427,7 +3569,7 @@ type Certificate struct { func (x *Certificate) Reset() { *x = Certificate{} if protoimpl.UnsafeEnabled { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[49] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3440,7 +3582,7 @@ func (x *Certificate) String() string { func (*Certificate) ProtoMessage() {} func (x *Certificate) ProtoReflect() protoreflect.Message { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[49] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3453,7 +3595,7 @@ func (x *Certificate) ProtoReflect() protoreflect.Message { // Deprecated: Use Certificate.ProtoReflect.Descriptor instead. func (*Certificate) Descriptor() ([]byte, []int) { - return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{49} + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{51} } func (x *Certificate) GetAsn1Data() []byte { @@ -3474,7 +3616,7 @@ type CertificateChain struct { func (x *CertificateChain) Reset() { *x = CertificateChain{} if protoimpl.UnsafeEnabled { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[50] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3487,7 +3629,7 @@ func (x *CertificateChain) String() string { func (*CertificateChain) ProtoMessage() {} func (x *CertificateChain) ProtoReflect() protoreflect.Message { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[50] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3500,7 +3642,7 @@ func (x *CertificateChain) ProtoReflect() protoreflect.Message { // Deprecated: Use CertificateChain.ProtoReflect.Descriptor instead. func (*CertificateChain) Descriptor() ([]byte, []int) { - return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{50} + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{52} } func (x *CertificateChain) GetCertificates() []*Certificate { @@ -3522,7 +3664,7 @@ type SendEventRequest struct { func (x *SendEventRequest) Reset() { *x = SendEventRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[51] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3535,7 +3677,7 @@ func (x *SendEventRequest) String() string { func (*SendEventRequest) ProtoMessage() {} func (x *SendEventRequest) ProtoReflect() protoreflect.Message { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[51] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3548,7 +3690,7 @@ func (x *SendEventRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SendEventRequest.ProtoReflect.Descriptor instead. func (*SendEventRequest) Descriptor() ([]byte, []int) { - return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{51} + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{53} } func (x *SendEventRequest) GetEventType() string { @@ -3585,7 +3727,7 @@ var file_sdk_plugin_pb_backend_proto_rawDesc = []byte{ 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x72, 0x72, 0x5f, 0x6d, 0x73, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x72, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x07, 0x65, 0x72, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x22, 0xce, 0x01, 0x0a, 0x05, 0x50, 0x61, + 0x52, 0x07, 0x65, 0x72, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x22, 0x80, 0x02, 0x0a, 0x05, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, @@ -3598,519 +3740,539 @@ var file_sdk_plugin_pb_backend_proto_rawDesc = []byte{ 0x67, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x77, 0x72, 0x69, 0x74, 0x65, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, - 0x64, 0x65, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, 0xbf, 0x06, 0x0a, 0x07, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2e, 0x0a, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x06, - 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, - 0x62, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x12, 0x1c, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, - 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x32, - 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x0a, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, - 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, - 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1d, 0x0a, - 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0e, - 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x0e, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x09, 0x77, 0x72, 0x61, 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x77, 0x72, 0x61, - 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x1b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, - 0x75, 0x73, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, - 0x55, 0x73, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, - 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, - 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x6f, 0x76, 0x65, 0x72, - 0x72, 0x69, 0x64, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x75, 0x6e, - 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x13, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0f, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x46, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe6, 0x05, 0x0a, - 0x04, 0x41, 0x75, 0x74, 0x68, 0x12, 0x35, 0x0a, 0x0d, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, - 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0c, - 0x6c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x61, 0x74, - 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, - 0x12, 0x32, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x2e, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6e, - 0x75, 0x6d, 0x5f, 0x75, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6e, - 0x75, 0x6d, 0x55, 0x73, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x0b, 0x20, 0x01, + 0x64, 0x65, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x69, + 0x6e, 0x61, 0x72, 0x79, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x62, 0x69, 0x6e, 0x61, + 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x22, 0xbf, 0x06, 0x0a, + 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2e, 0x0a, 0x12, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, + 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, + 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x08, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, + 0x12, 0x32, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x64, + 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, + 0x0a, 0x0e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x09, 0x77, 0x72, 0x61, 0x70, 0x5f, 0x69, 0x6e, + 0x66, 0x6f, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x77, + 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x1b, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x5f, 0x75, 0x73, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, + 0x6e, 0x67, 0x55, 0x73, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x5f, 0x69, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x49, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x6f, 0x76, + 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x28, 0x0a, 0x0f, + 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x46, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe6, + 0x05, 0x0a, 0x04, 0x41, 0x75, 0x74, 0x68, 0x12, 0x35, 0x0a, 0x0d, 0x6c, 0x65, 0x61, 0x73, 0x65, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, + 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, + 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, + 0x65, 0x73, 0x12, 0x32, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x2e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x19, 0x0a, + 0x08, 0x6e, 0x75, 0x6d, 0x5f, 0x75, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x07, 0x6e, 0x75, 0x6d, 0x55, 0x73, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x33, 0x0a, 0x0d, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x33, 0x0a, 0x0d, 0x67, 0x72, 0x6f, - 0x75, 0x70, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x52, 0x0c, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x1f, - 0x0a, 0x0b, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, 0x0d, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x69, 0x64, 0x72, 0x73, 0x12, - 0x25, 0x0a, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, - 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x69, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, - 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x74, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x65, - 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x4d, 0x61, 0x78, 0x54, 0x74, 0x6c, 0x12, 0x1d, 0x0a, - 0x0a, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x09, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x11, - 0x6e, 0x6f, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6e, 0x6f, 0x44, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xca, 0x06, 0x0a, 0x0a, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, - 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x69, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x2c, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, - 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, - 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, - 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x75, 0x6d, - 0x5f, 0x75, 0x73, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6e, 0x75, 0x6d, - 0x55, 0x73, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x28, 0x0a, 0x10, 0x65, - 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x74, 0x6c, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x4d, - 0x61, 0x78, 0x54, 0x74, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x72, - 0x69, 0x6f, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, - 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x0e, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x1f, - 0x0a, 0x0b, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, 0x0f, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x69, 0x64, 0x72, 0x73, 0x12, - 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x62, 0x62, 0x79, 0x68, 0x6f, 0x6c, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x75, 0x62, 0x62, 0x79, 0x68, - 0x6f, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x12, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x45, 0x0a, 0x0d, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x20, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, - 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x6e, 0x6f, 0x5f, 0x69, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x15, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x12, 0x6e, 0x6f, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x1a, 0x3f, 0x0a, 0x11, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, - 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0xaf, 0x01, 0x0a, 0x0c, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x61, 0x62, - 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x61, - 0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x09, 0x69, 0x73, 0x73, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x4d, 0x61, 0x78, 0x54, 0x54, 0x4c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4d, 0x61, - 0x78, 0x54, 0x54, 0x4c, 0x22, 0x7f, 0x0a, 0x06, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x35, - 0x0a, 0x0d, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x65, - 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x65, - 0x61, 0x73, 0x65, 0x49, 0x64, 0x22, 0xc8, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x22, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x06, - 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x52, 0x04, - 0x61, 0x75, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x64, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x64, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, - 0x12, 0x31, 0x0a, 0x09, 0x77, 0x72, 0x61, 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x77, 0x72, 0x61, 0x70, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x33, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x1a, 0x46, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0xc8, 0x02, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, - 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1a, 0x0a, - 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x3f, 0x0a, 0x0d, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0c, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x77, 0x72, - 0x61, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x41, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x11, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, - 0x5f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, - 0x64, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1b, - 0x0a, 0x09, 0x73, 0x65, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x08, 0x73, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x61, 0x70, 0x22, 0x58, 0x0a, 0x0f, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, - 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, - 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x61, 0x6c, - 0x5f, 0x77, 0x72, 0x61, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x61, - 0x6c, 0x57, 0x72, 0x61, 0x70, 0x22, 0x59, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x07, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x62, 0x2e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x22, 0x60, 0x0a, 0x12, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x28, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x20, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, - 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, - 0x72, 0x72, 0x22, 0x10, 0x0a, 0x0e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, - 0x41, 0x72, 0x67, 0x73, 0x22, 0x33, 0x0a, 0x0f, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, - 0x7a, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, - 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x34, 0x0a, 0x11, 0x53, 0x70, 0x65, - 0x63, 0x69, 0x61, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1f, - 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, - 0x70, 0x62, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x22, - 0x60, 0x0a, 0x18, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x61, 0x73, 0x52, 0x0c, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, + 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, + 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x69, 0x64, 0x72, + 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x0f, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, + 0x74, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x74, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0e, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x4d, 0x61, 0x78, 0x54, 0x74, 0x6c, 0x12, + 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x09, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, + 0x0a, 0x11, 0x6e, 0x6f, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6e, 0x6f, 0x44, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xca, 0x06, 0x0a, 0x0a, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x2c, 0x0a, 0x04, 0x6d, 0x65, + 0x74, 0x61, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, + 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6e, + 0x75, 0x6d, 0x5f, 0x75, 0x73, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6e, + 0x75, 0x6d, 0x55, 0x73, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x74, + 0x74, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x28, 0x0a, + 0x10, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x74, + 0x6c, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, + 0x74, 0x4d, 0x61, 0x78, 0x54, 0x74, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, + 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x70, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, + 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, + 0x0f, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x69, 0x64, 0x72, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x62, 0x62, 0x79, 0x68, 0x6f, 0x6c, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x75, 0x62, 0x62, + 0x79, 0x68, 0x6f, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x12, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x45, 0x0a, 0x0d, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x13, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x65, + 0x74, 0x61, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, + 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x6e, 0x6f, 0x5f, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, + 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x6e, 0x6f, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, + 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x1a, 0x3f, 0x0a, 0x11, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, + 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0xaf, 0x01, 0x0a, 0x0c, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x6e, 0x65, 0x77, + 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x6e, 0x65, + 0x77, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x09, 0x69, 0x73, 0x73, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x4d, 0x61, 0x78, 0x54, 0x54, 0x4c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, + 0x4d, 0x61, 0x78, 0x54, 0x54, 0x4c, 0x22, 0x7f, 0x0a, 0x06, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x12, 0x35, 0x0a, 0x0d, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, + 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x73, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x12, 0x19, 0x0a, 0x08, + 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6c, 0x65, 0x61, 0x73, 0x65, 0x49, 0x64, 0x22, 0xe7, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x75, 0x74, 0x68, + 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, + 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x31, 0x0a, 0x09, 0x77, 0x72, 0x61, 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x77, 0x72, 0x61, + 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x33, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x46, 0x0a, 0x0c, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0xc8, 0x02, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, + 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1a, + 0x0a, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x3f, 0x0a, 0x0d, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0c, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x77, + 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x11, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x61, 0x70, 0x22, 0x58, 0x0a, 0x0f, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, + 0x4c, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x61, + 0x6c, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, + 0x61, 0x6c, 0x57, 0x72, 0x61, 0x70, 0x22, 0x59, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x22, 0x76, 0x0a, 0x19, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, - 0x65, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1f, - 0x0a, 0x0b, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x12, - 0x16, 0x0a, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, - 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0xb8, 0x01, 0x0a, 0x09, 0x53, 0x65, - 0x74, 0x75, 0x70, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x72, 0x6f, 0x6b, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x62, 0x72, 0x6f, 0x6b, - 0x65, 0x72, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x74, 0x75, 0x70, 0x41, - 0x72, 0x67, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x65, - 0x6e, 0x64, 0x55, 0x55, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x61, - 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x55, 0x55, 0x49, 0x44, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x1e, 0x0a, 0x0a, 0x53, 0x65, 0x74, 0x75, 0x70, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x65, 0x72, 0x72, 0x22, 0x1f, 0x0a, 0x09, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x70, 0x6c, - 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x25, 0x0a, 0x11, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x53, 0x0a, 0x0c, - 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x61, - 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x61, - 0x70, 0x22, 0x29, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, - 0x41, 0x72, 0x67, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x38, 0x0a, 0x10, - 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, - 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, - 0x6b, 0x65, 0x79, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x22, 0x0a, 0x0e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x47, 0x65, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x4b, 0x0a, 0x0f, 0x53, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, - 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, - 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, - 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x38, 0x0a, 0x0e, 0x53, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x50, 0x75, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x05, 0x65, 0x6e, 0x74, - 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, - 0x79, 0x22, 0x23, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, 0x52, - 0x65, 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x25, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x26, 0x0a, - 0x12, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x74, 0x22, 0x60, 0x0a, 0x12, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x28, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, + 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, + 0x65, 0x72, 0x72, 0x22, 0x10, 0x0a, 0x0e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, + 0x65, 0x41, 0x72, 0x67, 0x73, 0x22, 0x33, 0x0a, 0x0f, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, + 0x69, 0x7a, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x34, 0x0a, 0x11, 0x53, 0x70, + 0x65, 0x63, 0x69, 0x61, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, + 0x1f, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, + 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, + 0x22, 0x60, 0x0a, 0x18, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x07, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, + 0x62, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x22, 0x76, 0x0a, 0x19, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, + 0x1f, 0x0a, 0x0b, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x6f, 0x75, 0x6e, 0x64, + 0x12, 0x16, 0x0a, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0xb8, 0x01, 0x0a, 0x09, 0x53, + 0x65, 0x74, 0x75, 0x70, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x72, 0x6f, 0x6b, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x62, 0x72, 0x6f, + 0x6b, 0x65, 0x72, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x74, 0x75, 0x70, + 0x41, 0x72, 0x67, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, + 0x65, 0x6e, 0x64, 0x55, 0x55, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, + 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x55, 0x55, 0x49, 0x44, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x1e, 0x0a, 0x0a, 0x53, 0x65, 0x74, 0x75, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x1c, 0x0a, 0x08, 0x54, 0x54, 0x4c, 0x52, 0x65, 0x70, 0x6c, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, - 0x54, 0x54, 0x4c, 0x22, 0x28, 0x0a, 0x0c, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x74, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x22, 0x32, 0x0a, - 0x14, 0x43, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x22, 0x2d, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x22, 0x4e, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, - 0x44, 0x61, 0x74, 0x61, 0x41, 0x72, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, - 0x54, 0x54, 0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x10, - 0x0a, 0x03, 0x4a, 0x57, 0x54, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x4a, 0x57, 0x54, - 0x22, 0x5c, 0x0a, 0x15, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, - 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x31, 0x0a, 0x09, 0x77, 0x72, 0x61, - 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, - 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x08, 0x77, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a, 0x03, - 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x2d, - 0x0a, 0x11, 0x4d, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x27, 0x0a, - 0x0f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x22, 0x2d, 0x0a, 0x0e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x49, 0x6e, 0x66, 0x6f, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x4c, 0x0a, 0x0f, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, - 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x27, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, - 0x61, 0x6c, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x65, 0x72, 0x72, 0x22, 0x50, 0x0a, 0x14, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x46, 0x6f, 0x72, - 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x67, - 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, - 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x06, 0x67, 0x72, 0x6f, - 0x75, 0x70, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x6d, 0x0a, 0x0e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, - 0x6e, 0x76, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x49, 0x0a, 0x12, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x5f, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x50, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, - 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, - 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x65, 0x72, 0x72, 0x22, 0x44, 0x0a, 0x21, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x3d, 0x0a, 0x1f, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, - 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1a, 0x0a, - 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x66, 0x0a, 0x10, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x21, 0x0a, - 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, - 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, - 0x72, 0x22, 0x8e, 0x01, 0x0a, 0x0a, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, - 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, - 0x72, 0x74, 0x12, 0x3e, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, - 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x22, 0xbb, 0x04, 0x0a, 0x0f, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x2d, 0x0a, 0x12, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x63, 0x6f, - 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x68, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, - 0x1d, 0x0a, 0x0a, 0x64, 0x69, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x09, 0x64, 0x69, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x69, 0x74, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, - 0x65, 0x12, 0x2f, 0x0a, 0x13, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, - 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, - 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x69, 0x73, 0x5f, 0x6d, 0x75, 0x74, - 0x75, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x6e, 0x65, 0x67, 0x6f, 0x74, - 0x69, 0x61, 0x74, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x49, 0x73, 0x4d, - 0x75, 0x74, 0x75, 0x61, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x10, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x76, 0x65, 0x72, - 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x0e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, - 0x65, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x42, 0x0a, 0x1d, 0x73, 0x69, 0x67, 0x6e, - 0x65, 0x64, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0c, 0x52, - 0x1b, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x12, 0x23, 0x0a, 0x0d, - 0x6f, 0x63, 0x73, 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6f, 0x63, 0x73, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6c, 0x73, 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x18, - 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x74, 0x6c, 0x73, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, - 0x22, 0x2a, 0x0a, 0x0b, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, - 0x1b, 0x0a, 0x09, 0x61, 0x73, 0x6e, 0x31, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x08, 0x61, 0x73, 0x6e, 0x31, 0x44, 0x61, 0x74, 0x61, 0x22, 0x47, 0x0a, 0x10, + 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x1f, 0x0a, 0x09, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x25, 0x0a, 0x11, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x53, 0x0a, + 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x61, 0x6c, 0x5f, 0x77, 0x72, + 0x61, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x61, 0x6c, 0x57, 0x72, + 0x61, 0x70, 0x22, 0x29, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, + 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x38, 0x0a, + 0x10, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x22, 0x0a, 0x0e, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x47, 0x65, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x4b, 0x0a, 0x0f, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, + 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, + 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x38, 0x0a, 0x0e, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x05, 0x65, 0x6e, + 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, + 0x72, 0x79, 0x22, 0x23, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x25, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x26, + 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x1c, 0x0a, 0x08, 0x54, 0x54, 0x4c, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x03, 0x54, 0x54, 0x4c, 0x22, 0x28, 0x0a, 0x0c, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x74, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x22, 0x32, + 0x0a, 0x14, 0x43, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x22, 0x2d, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x22, 0x4e, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, + 0x70, 0x44, 0x61, 0x74, 0x61, 0x41, 0x72, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x10, 0x0a, + 0x03, 0x54, 0x54, 0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, + 0x10, 0x0a, 0x03, 0x4a, 0x57, 0x54, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x4a, 0x57, + 0x54, 0x22, 0x5c, 0x0a, 0x15, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, + 0x70, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x31, 0x0a, 0x09, 0x77, 0x72, + 0x61, 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x77, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a, + 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, + 0x2d, 0x0a, 0x11, 0x4d, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x27, + 0x0a, 0x0f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x22, 0x2d, 0x0a, 0x0e, 0x45, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x4c, 0x0a, 0x0f, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x27, 0x0a, 0x06, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6c, 0x6f, 0x67, 0x69, + 0x63, 0x61, 0x6c, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x65, 0x72, 0x72, 0x22, 0x50, 0x0a, 0x14, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x46, 0x6f, + 0x72, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x06, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, + 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x06, 0x67, 0x72, + 0x6f, 0x75, 0x70, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x6d, 0x0a, 0x0e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x45, 0x6e, 0x76, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x49, 0x0a, 0x12, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x5f, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x50, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, + 0x52, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, + 0x65, 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x44, 0x0a, 0x21, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x3d, 0x0a, 0x1f, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, + 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1a, + 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x66, 0x0a, 0x10, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, + 0x72, 0x72, 0x22, 0x4c, 0x0a, 0x1c, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x74, 0x74, 0x6c, + 0x22, 0x47, 0x0a, 0x1d, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x22, 0x8e, 0x01, 0x0a, 0x0a, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x3e, 0x0a, 0x10, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, 0xbb, 0x04, 0x0a, 0x0f, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x68, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x43, + 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x69, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x75, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x64, 0x69, 0x64, + 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, + 0x5f, 0x73, 0x75, 0x69, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x13, 0x6e, 0x65, 0x67, + 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, + 0x65, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x6e, 0x65, + 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x5f, 0x69, 0x73, 0x5f, 0x6d, 0x75, 0x74, 0x75, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x1a, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x49, 0x73, 0x4d, 0x75, 0x74, 0x75, 0x61, 0x6c, 0x12, 0x1f, 0x0a, + 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x41, + 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, + 0x10, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x12, 0x33, 0x0a, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x73, 0x22, 0x5b, 0x0a, 0x10, 0x53, 0x65, 0x6e, 0x64, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, - 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x05, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x32, 0xa5, 0x03, 0x0a, 0x07, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x3e, - 0x0a, 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x16, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, 0x64, - 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x30, - 0x0a, 0x0c, 0x53, 0x70, 0x65, 0x63, 0x69, 0x61, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x09, - 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x53, - 0x70, 0x65, 0x63, 0x69, 0x61, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, - 0x12, 0x53, 0x0a, 0x14, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1c, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, - 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x1d, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, 0x64, + 0x52, 0x0e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x73, + 0x12, 0x42, 0x0a, 0x1d, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x1b, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6f, 0x63, 0x73, 0x70, 0x5f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6f, 0x63, 0x73, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6c, 0x73, + 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x74, + 0x6c, 0x73, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x22, 0x2a, 0x0a, 0x0b, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x73, 0x6e, 0x31, 0x5f, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x73, 0x6e, 0x31, + 0x44, 0x61, 0x74, 0x61, 0x22, 0x47, 0x0a, 0x10, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x33, 0x0a, 0x0c, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, + 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x22, 0x5b, 0x0a, + 0x10, 0x53, 0x65, 0x6e, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x28, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x44, + 0x61, 0x74, 0x61, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x32, 0xa5, 0x03, 0x0a, 0x07, 0x42, + 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x3e, 0x0a, 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, + 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x16, + 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x30, 0x0a, 0x0c, 0x53, 0x70, 0x65, 0x63, 0x69, 0x61, + 0x6c, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x1a, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x70, 0x65, 0x63, 0x69, 0x61, 0x6c, 0x50, 0x61, + 0x74, 0x68, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x53, 0x0a, 0x14, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1f, 0x0a, 0x07, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, - 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x09, 0x2e, 0x70, 0x62, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x31, 0x0a, 0x0d, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x09, - 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x53, 0x65, 0x74, - 0x75, 0x70, 0x12, 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x74, 0x75, 0x70, 0x41, 0x72, 0x67, - 0x73, 0x1a, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x74, 0x75, 0x70, 0x52, 0x65, 0x70, 0x6c, - 0x79, 0x12, 0x35, 0x0a, 0x0a, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, - 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, - 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, - 0x69, 0x7a, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x20, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0d, 0x2e, 0x70, 0x62, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x32, 0xd5, 0x01, 0x0a, 0x07, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x31, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x13, - 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x41, - 0x72, 0x67, 0x73, 0x1a, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2e, 0x0a, 0x03, 0x47, 0x65, 0x74, - 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x65, 0x74, - 0x41, 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2e, 0x0a, 0x03, 0x50, 0x75, 0x74, - 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, - 0x41, 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x50, 0x75, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x37, 0x0a, 0x06, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x12, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x16, 0x2e, 0x70, 0x62, 0x2e, - 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x32, 0xe1, 0x05, 0x0a, 0x0a, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x56, 0x69, 0x65, - 0x77, 0x12, 0x2a, 0x0a, 0x0f, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4c, 0x65, 0x61, 0x73, + 0x12, 0x1c, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x1d, + 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1f, 0x0a, + 0x07, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x31, + 0x0a, 0x0d, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, + 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, + 0x65, 0x79, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x12, 0x26, 0x0a, 0x05, 0x53, 0x65, 0x74, 0x75, 0x70, 0x12, 0x0d, 0x2e, 0x70, 0x62, 0x2e, + 0x53, 0x65, 0x74, 0x75, 0x70, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x53, + 0x65, 0x74, 0x75, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x35, 0x0a, 0x0a, 0x49, 0x6e, 0x69, + 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x69, + 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, 0x62, + 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x20, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x32, 0xd5, 0x01, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x31, + 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x14, 0x2e, 0x70, 0x62, + 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x2e, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x65, 0x74, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, + 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x2e, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, + 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x37, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x15, 0x2e, 0x70, 0x62, + 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x72, + 0x67, 0x73, 0x1a, 0x16, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x32, 0xbf, 0x06, 0x0a, 0x0a, 0x53, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x56, 0x69, 0x65, 0x77, 0x12, 0x2a, 0x0a, 0x0f, 0x44, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x54, 0x54, 0x4c, 0x12, 0x09, 0x2e, 0x70, + 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x54, 0x4c, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x0b, 0x4d, 0x61, 0x78, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x54, 0x54, 0x4c, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x54, 0x4c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, - 0x0b, 0x4d, 0x61, 0x78, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x54, 0x54, 0x4c, 0x12, 0x09, 0x2e, 0x70, - 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x54, 0x4c, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x07, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, - 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x10, 0x2e, 0x70, 0x62, - 0x2e, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x36, 0x0a, - 0x0f, 0x43, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x18, 0x2e, 0x70, 0x62, - 0x2e, 0x43, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x38, 0x0a, 0x10, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, - 0x47, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, - 0x61, 0x74, 0x61, 0x12, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, 0x61, 0x74, 0x61, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x19, 0x2e, - 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, - 0x61, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x30, 0x0a, 0x0c, 0x4d, 0x6c, 0x6f, 0x63, - 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2c, 0x0a, 0x0a, 0x4c, 0x6f, - 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, - 0x75, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x35, 0x0a, 0x0a, 0x45, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, 0x62, 0x2e, - 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, - 0x2a, 0x0a, 0x09, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x12, 0x09, 0x2e, 0x70, - 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3f, 0x0a, 0x0f, 0x47, - 0x72, 0x6f, 0x75, 0x70, 0x73, 0x46, 0x6f, 0x72, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x12, + 0x07, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x36, 0x0a, 0x0f, 0x43, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x67, + 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x67, + 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x38, 0x0a, + 0x10, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, 0x2e, 0x70, + 0x62, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x47, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, 0x61, 0x74, 0x61, 0x12, 0x18, 0x2e, 0x70, 0x62, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, 0x61, 0x74, + 0x61, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x30, 0x0a, 0x0c, 0x4d, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x70, 0x62, + 0x2e, 0x4d, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x2c, 0x0a, 0x0a, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x70, 0x62, + 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x35, 0x0a, 0x0a, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x41, 0x72, - 0x67, 0x73, 0x1a, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x46, 0x6f, - 0x72, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x68, 0x0a, 0x1a, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, - 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x25, 0x2e, 0x70, 0x62, 0x2e, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, - 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x23, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, - 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2e, 0x0a, 0x0b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x1a, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x32, 0x36, 0x0a, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, - 0x12, 0x2c, 0x0a, 0x09, 0x53, 0x65, 0x6e, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x2e, - 0x70, 0x62, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x2a, - 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, - 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, - 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2a, 0x0a, 0x09, 0x50, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x45, 0x6e, 0x76, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, + 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x3f, 0x0a, 0x0f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x46, 0x6f, 0x72, + 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x18, 0x2e, 0x70, 0x62, 0x2e, + 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x46, 0x6f, 0x72, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x68, 0x0a, 0x1a, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x25, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x70, 0x62, 0x2e, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, + 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2e, + 0x0a, 0x0b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x09, 0x2e, + 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x5c, + 0x0a, 0x15, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x20, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x70, 0x62, 0x2e, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x36, 0x0a, 0x06, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2c, 0x0a, 0x09, 0x53, 0x65, 0x6e, 0x64, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x12, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x42, 0x2a, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, + 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x70, 0x62, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -4125,8 +4287,8 @@ func file_sdk_plugin_pb_backend_proto_rawDescGZIP() []byte { return file_sdk_plugin_pb_backend_proto_rawDescData } -var file_sdk_plugin_pb_backend_proto_msgTypes = make([]protoimpl.MessageInfo, 58) -var file_sdk_plugin_pb_backend_proto_goTypes = []interface{}{ +var file_sdk_plugin_pb_backend_proto_msgTypes = make([]protoimpl.MessageInfo, 60) +var file_sdk_plugin_pb_backend_proto_goTypes = []any{ (*Empty)(nil), // 0: pb.Empty (*Header)(nil), // 1: pb.Header (*ProtoError)(nil), // 2: pb.ProtoError @@ -4174,43 +4336,45 @@ var file_sdk_plugin_pb_backend_proto_goTypes = []interface{}{ (*GeneratePasswordFromPolicyRequest)(nil), // 44: pb.GeneratePasswordFromPolicyRequest (*GeneratePasswordFromPolicyReply)(nil), // 45: pb.GeneratePasswordFromPolicyReply (*ClusterInfoReply)(nil), // 46: pb.ClusterInfoReply - (*Connection)(nil), // 47: pb.Connection - (*ConnectionState)(nil), // 48: pb.ConnectionState - (*Certificate)(nil), // 49: pb.Certificate - (*CertificateChain)(nil), // 50: pb.CertificateChain - (*SendEventRequest)(nil), // 51: pb.SendEventRequest - nil, // 52: pb.Request.HeadersEntry - nil, // 53: pb.Auth.MetadataEntry - nil, // 54: pb.TokenEntry.MetaEntry - nil, // 55: pb.TokenEntry.InternalMetaEntry - nil, // 56: pb.Response.HeadersEntry - nil, // 57: pb.SetupArgs.ConfigEntry - (*logical.Alias)(nil), // 58: logical.Alias - (*timestamppb.Timestamp)(nil), // 59: google.protobuf.Timestamp - (*logical.Entity)(nil), // 60: logical.Entity - (*logical.Group)(nil), // 61: logical.Group - (*logical.PluginEnvironment)(nil), // 62: logical.PluginEnvironment - (*logical.EventData)(nil), // 63: logical.EventData + (*GenerateIdentityTokenRequest)(nil), // 47: pb.GenerateIdentityTokenRequest + (*GenerateIdentityTokenResponse)(nil), // 48: pb.GenerateIdentityTokenResponse + (*Connection)(nil), // 49: pb.Connection + (*ConnectionState)(nil), // 50: pb.ConnectionState + (*Certificate)(nil), // 51: pb.Certificate + (*CertificateChain)(nil), // 52: pb.CertificateChain + (*SendEventRequest)(nil), // 53: pb.SendEventRequest + nil, // 54: pb.Request.HeadersEntry + nil, // 55: pb.Auth.MetadataEntry + nil, // 56: pb.TokenEntry.MetaEntry + nil, // 57: pb.TokenEntry.InternalMetaEntry + nil, // 58: pb.Response.HeadersEntry + nil, // 59: pb.SetupArgs.ConfigEntry + (*logical.Alias)(nil), // 60: logical.Alias + (*timestamppb.Timestamp)(nil), // 61: google.protobuf.Timestamp + (*logical.Entity)(nil), // 62: logical.Entity + (*logical.Group)(nil), // 63: logical.Group + (*logical.PluginEnvironment)(nil), // 64: logical.PluginEnvironment + (*logical.EventData)(nil), // 65: logical.EventData } var file_sdk_plugin_pb_backend_proto_depIDxs = []int32{ 8, // 0: pb.Request.secret:type_name -> pb.Secret 5, // 1: pb.Request.auth:type_name -> pb.Auth - 52, // 2: pb.Request.headers:type_name -> pb.Request.HeadersEntry + 54, // 2: pb.Request.headers:type_name -> pb.Request.HeadersEntry 11, // 3: pb.Request.wrap_info:type_name -> pb.RequestWrapInfo - 47, // 4: pb.Request.connection:type_name -> pb.Connection + 49, // 4: pb.Request.connection:type_name -> pb.Connection 7, // 5: pb.Auth.lease_options:type_name -> pb.LeaseOptions - 53, // 6: pb.Auth.metadata:type_name -> pb.Auth.MetadataEntry - 58, // 7: pb.Auth.alias:type_name -> logical.Alias - 58, // 8: pb.Auth.group_aliases:type_name -> logical.Alias - 54, // 9: pb.TokenEntry.meta:type_name -> pb.TokenEntry.MetaEntry - 55, // 10: pb.TokenEntry.internal_meta:type_name -> pb.TokenEntry.InternalMetaEntry - 59, // 11: pb.LeaseOptions.issue_time:type_name -> google.protobuf.Timestamp + 55, // 6: pb.Auth.metadata:type_name -> pb.Auth.MetadataEntry + 60, // 7: pb.Auth.alias:type_name -> logical.Alias + 60, // 8: pb.Auth.group_aliases:type_name -> logical.Alias + 56, // 9: pb.TokenEntry.meta:type_name -> pb.TokenEntry.MetaEntry + 57, // 10: pb.TokenEntry.internal_meta:type_name -> pb.TokenEntry.InternalMetaEntry + 61, // 11: pb.LeaseOptions.issue_time:type_name -> google.protobuf.Timestamp 7, // 12: pb.Secret.lease_options:type_name -> pb.LeaseOptions 8, // 13: pb.Response.secret:type_name -> pb.Secret 5, // 14: pb.Response.auth:type_name -> pb.Auth 10, // 15: pb.Response.wrap_info:type_name -> pb.ResponseWrapInfo - 56, // 16: pb.Response.headers:type_name -> pb.Response.HeadersEntry - 59, // 17: pb.ResponseWrapInfo.creation_time:type_name -> google.protobuf.Timestamp + 58, // 16: pb.Response.headers:type_name -> pb.Response.HeadersEntry + 61, // 17: pb.ResponseWrapInfo.creation_time:type_name -> google.protobuf.Timestamp 4, // 18: pb.HandleRequestArgs.request:type_name -> pb.Request 9, // 19: pb.HandleRequestReply.response:type_name -> pb.Response 2, // 20: pb.HandleRequestReply.err:type_name -> pb.ProtoError @@ -4218,18 +4382,18 @@ var file_sdk_plugin_pb_backend_proto_depIDxs = []int32{ 3, // 22: pb.SpecialPathsReply.paths:type_name -> pb.Paths 4, // 23: pb.HandleExistenceCheckArgs.request:type_name -> pb.Request 2, // 24: pb.HandleExistenceCheckReply.err:type_name -> pb.ProtoError - 57, // 25: pb.SetupArgs.Config:type_name -> pb.SetupArgs.ConfigEntry + 59, // 25: pb.SetupArgs.Config:type_name -> pb.SetupArgs.ConfigEntry 23, // 26: pb.StorageGetReply.entry:type_name -> pb.StorageEntry 23, // 27: pb.StoragePutArgs.entry:type_name -> pb.StorageEntry 10, // 28: pb.ResponseWrapDataReply.wrap_info:type_name -> pb.ResponseWrapInfo - 60, // 29: pb.EntityInfoReply.entity:type_name -> logical.Entity - 61, // 30: pb.GroupsForEntityReply.groups:type_name -> logical.Group - 62, // 31: pb.PluginEnvReply.plugin_environment:type_name -> logical.PluginEnvironment - 48, // 32: pb.Connection.connection_state:type_name -> pb.ConnectionState - 50, // 33: pb.ConnectionState.peer_certificates:type_name -> pb.CertificateChain - 50, // 34: pb.ConnectionState.verified_chains:type_name -> pb.CertificateChain - 49, // 35: pb.CertificateChain.certificates:type_name -> pb.Certificate - 63, // 36: pb.SendEventRequest.event:type_name -> logical.EventData + 62, // 29: pb.EntityInfoReply.entity:type_name -> logical.Entity + 63, // 30: pb.GroupsForEntityReply.groups:type_name -> logical.Group + 64, // 31: pb.PluginEnvReply.plugin_environment:type_name -> logical.PluginEnvironment + 50, // 32: pb.Connection.connection_state:type_name -> pb.ConnectionState + 52, // 33: pb.ConnectionState.peer_certificates:type_name -> pb.CertificateChain + 52, // 34: pb.ConnectionState.verified_chains:type_name -> pb.CertificateChain + 51, // 35: pb.CertificateChain.certificates:type_name -> pb.Certificate + 65, // 36: pb.SendEventRequest.event:type_name -> logical.EventData 1, // 37: pb.Request.HeadersEntry.value:type_name -> pb.Header 1, // 38: pb.Response.HeadersEntry.value:type_name -> pb.Header 12, // 39: pb.Backend.HandleRequest:input_type -> pb.HandleRequestArgs @@ -4257,35 +4421,37 @@ var file_sdk_plugin_pb_backend_proto_depIDxs = []int32{ 40, // 61: pb.SystemView.GroupsForEntity:input_type -> pb.EntityInfoArgs 44, // 62: pb.SystemView.GeneratePasswordFromPolicy:input_type -> pb.GeneratePasswordFromPolicyRequest 0, // 63: pb.SystemView.ClusterInfo:input_type -> pb.Empty - 51, // 64: pb.Events.SendEvent:input_type -> pb.SendEventRequest - 13, // 65: pb.Backend.HandleRequest:output_type -> pb.HandleRequestReply - 16, // 66: pb.Backend.SpecialPaths:output_type -> pb.SpecialPathsReply - 18, // 67: pb.Backend.HandleExistenceCheck:output_type -> pb.HandleExistenceCheckReply - 0, // 68: pb.Backend.Cleanup:output_type -> pb.Empty - 0, // 69: pb.Backend.InvalidateKey:output_type -> pb.Empty - 20, // 70: pb.Backend.Setup:output_type -> pb.SetupReply - 15, // 71: pb.Backend.Initialize:output_type -> pb.InitializeReply - 21, // 72: pb.Backend.Type:output_type -> pb.TypeReply - 25, // 73: pb.Storage.List:output_type -> pb.StorageListReply - 27, // 74: pb.Storage.Get:output_type -> pb.StorageGetReply - 29, // 75: pb.Storage.Put:output_type -> pb.StoragePutReply - 31, // 76: pb.Storage.Delete:output_type -> pb.StorageDeleteReply - 32, // 77: pb.SystemView.DefaultLeaseTTL:output_type -> pb.TTLReply - 32, // 78: pb.SystemView.MaxLeaseTTL:output_type -> pb.TTLReply - 33, // 79: pb.SystemView.Tainted:output_type -> pb.TaintedReply - 34, // 80: pb.SystemView.CachingDisabled:output_type -> pb.CachingDisabledReply - 35, // 81: pb.SystemView.ReplicationState:output_type -> pb.ReplicationStateReply - 37, // 82: pb.SystemView.ResponseWrapData:output_type -> pb.ResponseWrapDataReply - 38, // 83: pb.SystemView.MlockEnabled:output_type -> pb.MlockEnabledReply - 39, // 84: pb.SystemView.LocalMount:output_type -> pb.LocalMountReply - 41, // 85: pb.SystemView.EntityInfo:output_type -> pb.EntityInfoReply - 43, // 86: pb.SystemView.PluginEnv:output_type -> pb.PluginEnvReply - 42, // 87: pb.SystemView.GroupsForEntity:output_type -> pb.GroupsForEntityReply - 45, // 88: pb.SystemView.GeneratePasswordFromPolicy:output_type -> pb.GeneratePasswordFromPolicyReply - 46, // 89: pb.SystemView.ClusterInfo:output_type -> pb.ClusterInfoReply - 0, // 90: pb.Events.SendEvent:output_type -> pb.Empty - 65, // [65:91] is the sub-list for method output_type - 39, // [39:65] is the sub-list for method input_type + 47, // 64: pb.SystemView.GenerateIdentityToken:input_type -> pb.GenerateIdentityTokenRequest + 53, // 65: pb.Events.SendEvent:input_type -> pb.SendEventRequest + 13, // 66: pb.Backend.HandleRequest:output_type -> pb.HandleRequestReply + 16, // 67: pb.Backend.SpecialPaths:output_type -> pb.SpecialPathsReply + 18, // 68: pb.Backend.HandleExistenceCheck:output_type -> pb.HandleExistenceCheckReply + 0, // 69: pb.Backend.Cleanup:output_type -> pb.Empty + 0, // 70: pb.Backend.InvalidateKey:output_type -> pb.Empty + 20, // 71: pb.Backend.Setup:output_type -> pb.SetupReply + 15, // 72: pb.Backend.Initialize:output_type -> pb.InitializeReply + 21, // 73: pb.Backend.Type:output_type -> pb.TypeReply + 25, // 74: pb.Storage.List:output_type -> pb.StorageListReply + 27, // 75: pb.Storage.Get:output_type -> pb.StorageGetReply + 29, // 76: pb.Storage.Put:output_type -> pb.StoragePutReply + 31, // 77: pb.Storage.Delete:output_type -> pb.StorageDeleteReply + 32, // 78: pb.SystemView.DefaultLeaseTTL:output_type -> pb.TTLReply + 32, // 79: pb.SystemView.MaxLeaseTTL:output_type -> pb.TTLReply + 33, // 80: pb.SystemView.Tainted:output_type -> pb.TaintedReply + 34, // 81: pb.SystemView.CachingDisabled:output_type -> pb.CachingDisabledReply + 35, // 82: pb.SystemView.ReplicationState:output_type -> pb.ReplicationStateReply + 37, // 83: pb.SystemView.ResponseWrapData:output_type -> pb.ResponseWrapDataReply + 38, // 84: pb.SystemView.MlockEnabled:output_type -> pb.MlockEnabledReply + 39, // 85: pb.SystemView.LocalMount:output_type -> pb.LocalMountReply + 41, // 86: pb.SystemView.EntityInfo:output_type -> pb.EntityInfoReply + 43, // 87: pb.SystemView.PluginEnv:output_type -> pb.PluginEnvReply + 42, // 88: pb.SystemView.GroupsForEntity:output_type -> pb.GroupsForEntityReply + 45, // 89: pb.SystemView.GeneratePasswordFromPolicy:output_type -> pb.GeneratePasswordFromPolicyReply + 46, // 90: pb.SystemView.ClusterInfo:output_type -> pb.ClusterInfoReply + 48, // 91: pb.SystemView.GenerateIdentityToken:output_type -> pb.GenerateIdentityTokenResponse + 0, // 92: pb.Events.SendEvent:output_type -> pb.Empty + 66, // [66:93] is the sub-list for method output_type + 39, // [39:66] is the sub-list for method input_type 39, // [39:39] is the sub-list for extension type_name 39, // [39:39] is the sub-list for extension extendee 0, // [0:39] is the sub-list for field type_name @@ -4297,7 +4463,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_sdk_plugin_pb_backend_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Empty); i { case 0: return &v.state @@ -4309,7 +4475,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Header); i { case 0: return &v.state @@ -4321,7 +4487,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ProtoError); i { case 0: return &v.state @@ -4333,7 +4499,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Paths); i { case 0: return &v.state @@ -4345,7 +4511,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*Request); i { case 0: return &v.state @@ -4357,7 +4523,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*Auth); i { case 0: return &v.state @@ -4369,7 +4535,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*TokenEntry); i { case 0: return &v.state @@ -4381,7 +4547,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*LeaseOptions); i { case 0: return &v.state @@ -4393,7 +4559,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*Secret); i { case 0: return &v.state @@ -4405,7 +4571,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*Response); i { case 0: return &v.state @@ -4417,7 +4583,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*ResponseWrapInfo); i { case 0: return &v.state @@ -4429,7 +4595,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*RequestWrapInfo); i { case 0: return &v.state @@ -4441,7 +4607,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*HandleRequestArgs); i { case 0: return &v.state @@ -4453,7 +4619,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*HandleRequestReply); i { case 0: return &v.state @@ -4465,7 +4631,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*InitializeArgs); i { case 0: return &v.state @@ -4477,7 +4643,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*InitializeReply); i { case 0: return &v.state @@ -4489,7 +4655,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*SpecialPathsReply); i { case 0: return &v.state @@ -4501,7 +4667,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*HandleExistenceCheckArgs); i { case 0: return &v.state @@ -4513,7 +4679,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*HandleExistenceCheckReply); i { case 0: return &v.state @@ -4525,7 +4691,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[19].Exporter = func(v any, i int) any { switch v := v.(*SetupArgs); i { case 0: return &v.state @@ -4537,7 +4703,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[20].Exporter = func(v any, i int) any { switch v := v.(*SetupReply); i { case 0: return &v.state @@ -4549,7 +4715,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[21].Exporter = func(v any, i int) any { switch v := v.(*TypeReply); i { case 0: return &v.state @@ -4561,7 +4727,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[22].Exporter = func(v any, i int) any { switch v := v.(*InvalidateKeyArgs); i { case 0: return &v.state @@ -4573,7 +4739,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[23].Exporter = func(v any, i int) any { switch v := v.(*StorageEntry); i { case 0: return &v.state @@ -4585,7 +4751,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*StorageListArgs); i { case 0: return &v.state @@ -4597,7 +4763,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*StorageListReply); i { case 0: return &v.state @@ -4609,7 +4775,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*StorageGetArgs); i { case 0: return &v.state @@ -4621,7 +4787,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[27].Exporter = func(v any, i int) any { switch v := v.(*StorageGetReply); i { case 0: return &v.state @@ -4633,7 +4799,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[28].Exporter = func(v any, i int) any { switch v := v.(*StoragePutArgs); i { case 0: return &v.state @@ -4645,7 +4811,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[29].Exporter = func(v any, i int) any { switch v := v.(*StoragePutReply); i { case 0: return &v.state @@ -4657,7 +4823,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[30].Exporter = func(v any, i int) any { switch v := v.(*StorageDeleteArgs); i { case 0: return &v.state @@ -4669,7 +4835,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[31].Exporter = func(v any, i int) any { switch v := v.(*StorageDeleteReply); i { case 0: return &v.state @@ -4681,7 +4847,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[32].Exporter = func(v any, i int) any { switch v := v.(*TTLReply); i { case 0: return &v.state @@ -4693,7 +4859,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[33].Exporter = func(v any, i int) any { switch v := v.(*TaintedReply); i { case 0: return &v.state @@ -4705,7 +4871,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[34].Exporter = func(v any, i int) any { switch v := v.(*CachingDisabledReply); i { case 0: return &v.state @@ -4717,7 +4883,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[35].Exporter = func(v any, i int) any { switch v := v.(*ReplicationStateReply); i { case 0: return &v.state @@ -4729,7 +4895,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[36].Exporter = func(v any, i int) any { switch v := v.(*ResponseWrapDataArgs); i { case 0: return &v.state @@ -4741,7 +4907,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[37].Exporter = func(v any, i int) any { switch v := v.(*ResponseWrapDataReply); i { case 0: return &v.state @@ -4753,7 +4919,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[38].Exporter = func(v any, i int) any { switch v := v.(*MlockEnabledReply); i { case 0: return &v.state @@ -4765,7 +4931,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[39].Exporter = func(v any, i int) any { switch v := v.(*LocalMountReply); i { case 0: return &v.state @@ -4777,7 +4943,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[40].Exporter = func(v any, i int) any { switch v := v.(*EntityInfoArgs); i { case 0: return &v.state @@ -4789,7 +4955,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[41].Exporter = func(v any, i int) any { switch v := v.(*EntityInfoReply); i { case 0: return &v.state @@ -4801,7 +4967,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[42].Exporter = func(v any, i int) any { switch v := v.(*GroupsForEntityReply); i { case 0: return &v.state @@ -4813,7 +4979,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[43].Exporter = func(v any, i int) any { switch v := v.(*PluginEnvReply); i { case 0: return &v.state @@ -4825,7 +4991,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[44].Exporter = func(v any, i int) any { switch v := v.(*GeneratePasswordFromPolicyRequest); i { case 0: return &v.state @@ -4837,7 +5003,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[45].Exporter = func(v any, i int) any { switch v := v.(*GeneratePasswordFromPolicyReply); i { case 0: return &v.state @@ -4849,7 +5015,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[46].Exporter = func(v any, i int) any { switch v := v.(*ClusterInfoReply); i { case 0: return &v.state @@ -4861,7 +5027,31 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[47].Exporter = func(v any, i int) any { + switch v := v.(*GenerateIdentityTokenRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[48].Exporter = func(v any, i int) any { + switch v := v.(*GenerateIdentityTokenResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[49].Exporter = func(v any, i int) any { switch v := v.(*Connection); i { case 0: return &v.state @@ -4873,7 +5063,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[50].Exporter = func(v any, i int) any { switch v := v.(*ConnectionState); i { case 0: return &v.state @@ -4885,7 +5075,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[51].Exporter = func(v any, i int) any { switch v := v.(*Certificate); i { case 0: return &v.state @@ -4897,7 +5087,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[52].Exporter = func(v any, i int) any { switch v := v.(*CertificateChain); i { case 0: return &v.state @@ -4909,7 +5099,7 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } - file_sdk_plugin_pb_backend_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + file_sdk_plugin_pb_backend_proto_msgTypes[53].Exporter = func(v any, i int) any { switch v := v.(*SendEventRequest); i { case 0: return &v.state @@ -4928,7 +5118,7 @@ func file_sdk_plugin_pb_backend_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_sdk_plugin_pb_backend_proto_rawDesc, NumEnums: 0, - NumMessages: 58, + NumMessages: 60, NumExtensions: 0, NumServices: 4, }, diff --git a/sdk/plugin/pb/backend.proto b/sdk/plugin/pb/backend.proto index ded407788a72..cf0f47d8dbe7 100644 --- a/sdk/plugin/pb/backend.proto +++ b/sdk/plugin/pb/backend.proto @@ -4,685 +4,711 @@ syntax = "proto3"; package pb; -option go_package = "github.com/hashicorp/vault/sdk/plugin/pb"; - import "google/protobuf/timestamp.proto"; import "sdk/logical/event.proto"; import "sdk/logical/identity.proto"; import "sdk/logical/plugin.proto"; +option go_package = "github.com/hashicorp/vault/sdk/plugin/pb"; + message Empty {} message Header { - repeated string header = 1; + repeated string header = 1; } message ProtoError { - // Error type can be one of: - // ErrTypeUnknown uint32 = iota - // ErrTypeUserError - // ErrTypeInternalError - // ErrTypeCodedError - // ErrTypeStatusBadRequest - // ErrTypeUnsupportedOperation - // ErrTypeUnsupportedPath - // ErrTypeInvalidRequest - // ErrTypePermissionDenied - // ErrTypeMultiAuthzPending - // ErrTypeUnrecoverable - uint32 err_type = 1; - string err_msg = 2; - int64 err_code = 3; + // Error type can be one of: + // ErrTypeUnknown uint32 = iota + // ErrTypeUserError + // ErrTypeInternalError + // ErrTypeCodedError + // ErrTypeStatusBadRequest + // ErrTypeUnsupportedOperation + // ErrTypeUnsupportedPath + // ErrTypeInvalidRequest + // ErrTypePermissionDenied + // ErrTypeMultiAuthzPending + // ErrTypeUnrecoverable + uint32 err_type = 1; + string err_msg = 2; + int64 err_code = 3; } // Paths is the structure of special paths that is used for SpecialPaths. message Paths { - // Root are the paths that require a root token to access - repeated string root = 1; + // Root are the paths that require a root token to access + repeated string root = 1; - // Unauthenticated are the paths that can be accessed without any auth. - repeated string unauthenticated = 2; + // Unauthenticated are the paths that can be accessed without any auth. + repeated string unauthenticated = 2; - // LocalStorage are paths (prefixes) that are local to this instance; this - // indicates that these paths should not be replicated - repeated string local_storage = 3; + // LocalStorage are paths (prefixes) that are local to this instance; this + // indicates that these paths should not be replicated + repeated string local_storage = 3; - // SealWrapStorage are storage paths that, when using a capable seal, - // should be seal wrapped with extra encryption. It is exact matching - // unless it ends with '/' in which case it will be treated as a prefix. - repeated string seal_wrap_storage = 4; + // SealWrapStorage are storage paths that, when using a capable seal, + // should be seal wrapped with extra encryption. It is exact matching + // unless it ends with '/' in which case it will be treated as a prefix. + repeated string seal_wrap_storage = 4; - // WriteForwardedStorage are storage paths that, when running on a PR - // Secondary cluster, cause a GRPC call up to the PR Primary cluster's - // active node to handle storage.Put(...) and storage.Delete(...) events. - // - // See extended note in /sdk/logical/logical.go. - repeated string write_forwarded_storage = 5; + // WriteForwardedStorage are storage paths that, when running on a PR + // Secondary cluster, cause a GRPC call up to the PR Primary cluster's + // active node to handle storage.Put(...) and storage.Delete(...) events. + // + // See extended note in /sdk/logical/logical.go. + repeated string write_forwarded_storage = 5; + + // Binary are paths whose request bodies are binary, not JSON + // + // See note in /sdk/logical/logical.go. + repeated string binary = 6; + + // Limited paths are storage paths that require special-case request limiting. + // + // See note in /sdk/logical/logical.go. + repeated string limited = 7; } message Request { - // Id is the uuid associated with each request - string id = 1; + // Id is the uuid associated with each request + string id = 1; - // If set, the name given to the replication secondary where this request - // originated - string ReplicationCluster = 2; + // If set, the name given to the replication secondary where this request + // originated + string ReplicationCluster = 2; - // Operation is the requested operation type - string operation = 3; + // Operation is the requested operation type + string operation = 3; - // Path is the part of the request path not consumed by the - // routing. As an example, if the original request path is "prod/aws/foo" - // and the AWS logical backend is mounted at "prod/aws/", then the - // final path is "foo" since the mount prefix is trimmed. - string path = 4; + // Path is the part of the request path not consumed by the + // routing. As an example, if the original request path is "prod/aws/foo" + // and the AWS logical backend is mounted at "prod/aws/", then the + // final path is "foo" since the mount prefix is trimmed. + string path = 4; - // Request data is a JSON object that must have keys with string type. - string data = 5; + // Request data is a JSON object that must have keys with string type. + string data = 5; - // Secret will be non-nil only for Revoke and Renew operations - // to represent the secret that was returned prior. - Secret secret = 6; + // Secret will be non-nil only for Revoke and Renew operations + // to represent the secret that was returned prior. + Secret secret = 6; - // Auth will be non-nil only for Renew operations - // to represent the auth that was returned prior. - Auth auth = 7; + // Auth will be non-nil only for Renew operations + // to represent the auth that was returned prior. + Auth auth = 7; - // Headers will contain the http headers from the request. This value will - // be used in the audit broker to ensure we are auditing only the allowed - // headers. - map headers = 8; + // Headers will contain the http headers from the request. This value will + // be used in the audit broker to ensure we are auditing only the allowed + // headers. + map headers = 8; - // ClientToken is provided to the core so that the identity - // can be verified and ACLs applied. This value is passed - // through to the logical backends but after being salted and - // hashed. - string client_token = 9; + // ClientToken is provided to the core so that the identity + // can be verified and ACLs applied. This value is passed + // through to the logical backends but after being salted and + // hashed. + string client_token = 9; - // ClientTokenAccessor is provided to the core so that the it can get - // logged as part of request audit logging. - string client_token_accessor = 10; + // ClientTokenAccessor is provided to the core so that the it can get + // logged as part of request audit logging. + string client_token_accessor = 10; - // DisplayName is provided to the logical backend to help associate - // dynamic secrets with the source entity. This is not a sensitive - // name, but is useful for operators. - string display_name = 11; + // DisplayName is provided to the logical backend to help associate + // dynamic secrets with the source entity. This is not a sensitive + // name, but is useful for operators. + string display_name = 11; - // MountPoint is provided so that a logical backend can generate - // paths relative to itself. The `Path` is effectively the client - // request path with the MountPoint trimmed off. - string mount_point = 12; + // MountPoint is provided so that a logical backend can generate + // paths relative to itself. The `Path` is effectively the client + // request path with the MountPoint trimmed off. + string mount_point = 12; - // MountType is provided so that a logical backend can make decisions - // based on the specific mount type (e.g., if a mount type has different - // aliases, generating different defaults depending on the alias) - string mount_type = 13; + // MountType is provided so that a logical backend can make decisions + // based on the specific mount type (e.g., if a mount type has different + // aliases, generating different defaults depending on the alias) + string mount_type = 13; - // MountAccessor is provided so that identities returned by the authentication - // backends can be tied to the mount it belongs to. - string mount_accessor = 14; + // MountAccessor is provided so that identities returned by the authentication + // backends can be tied to the mount it belongs to. + string mount_accessor = 14; - // WrapInfo contains requested response wrapping parameters - RequestWrapInfo wrap_info = 15; + // WrapInfo contains requested response wrapping parameters + RequestWrapInfo wrap_info = 15; - // ClientTokenRemainingUses represents the allowed number of uses left on the - // token supplied - int64 client_token_remaining_uses = 16; + // ClientTokenRemainingUses represents the allowed number of uses left on the + // token supplied + int64 client_token_remaining_uses = 16; - // EntityID is the identity of the caller extracted out of the token used - // to make this request - string entity_id = 17; + // EntityID is the identity of the caller extracted out of the token used + // to make this request + string entity_id = 17; - // PolicyOverride indicates that the requestor wishes to override - // soft-mandatory Sentinel policies - bool policy_override = 18; + // PolicyOverride indicates that the requestor wishes to override + // soft-mandatory Sentinel policies + bool policy_override = 18; - // Whether the request is unauthenticated, as in, had no client token - // attached. Useful in some situations where the client token is not made - // accessible. - bool unauthenticated = 19; + // Whether the request is unauthenticated, as in, had no client token + // attached. Useful in some situations where the client token is not made + // accessible. + bool unauthenticated = 19; - // Connection will be non-nil only for credential providers to - // inspect the connection information and potentially use it for - // authentication/protection. - Connection connection = 20; + // Connection will be non-nil only for credential providers to + // inspect the connection information and potentially use it for + // authentication/protection. + Connection connection = 20; } message Auth { - LeaseOptions lease_options = 1; - - // InternalData is a JSON object that is stored with the auth struct. - // This will be sent back during a Renew/Revoke for storing internal data - // used for those operations. - string internal_data = 2; - - // DisplayName is a non-security sensitive identifier that is - // applicable to this Auth. It is used for logging and prefixing - // of dynamic secrets. For example, DisplayName may be "armon" for - // the github credential backend. If the client token is used to - // generate a SQL credential, the user may be "github-armon-uuid". - // This is to help identify the source without using audit tables. - string display_name = 3; - - // Policies is the list of policies that the authenticated user - // is associated with. - repeated string policies = 4; - - // Metadata is used to attach arbitrary string-type metadata to - // an authenticated user. This metadata will be outputted into the - // audit log. - map metadata = 5; - - // ClientToken is the token that is generated for the authentication. - // This will be filled in by Vault core when an auth structure is - // returned. Setting this manually will have no effect. - string client_token = 6; - - // Accessor is the identifier for the ClientToken. This can be used - // to perform management functionalities (especially revocation) when - // ClientToken in the audit logs are obfuscated. Accessor can be used - // to revoke a ClientToken and to lookup the capabilities of the ClientToken, - // both without actually knowing the ClientToken. - string accessor = 7; - - // Period indicates that the token generated using this Auth object - // should never expire. The token should be renewed within the duration - // specified by this period. - int64 period = 8; - - // Number of allowed uses of the issued token - int64 num_uses = 9; - - // EntityID is the identifier of the entity in identity store to which the - // identity of the authenticating client belongs to. - string entity_id = 10; - - // Alias is the information about the authenticated client returned by - // the auth backend - logical.Alias alias = 11; - - // GroupAliases are the informational mappings of external groups which an - // authenticated user belongs to. This is used to check if there are - // mappings groups for the group aliases in identity store. For all the - // matching groups, the entity ID of the user will be added. - repeated logical.Alias group_aliases = 12; - - // If set, restricts usage of the certificates to client IPs falling within - // the range of the specified CIDR(s). - repeated string bound_cidrs = 13; - - // TokenPolicies and IdentityPolicies break down the list in Policies to - // help determine where a policy was sourced - repeated string token_policies = 14; - repeated string identity_policies = 15; - - // Explicit maximum lifetime for the token. Unlike normal TTLs, the maximum - // TTL is a hard limit and cannot be exceeded, also counts for periodic tokens. - int64 explicit_max_ttl = 16; - - // TokenType is the type of token being requested - uint32 token_type = 17; - - // Whether the default policy should be added automatically by core - bool no_default_policy = 18; + LeaseOptions lease_options = 1; + + // InternalData is a JSON object that is stored with the auth struct. + // This will be sent back during a Renew/Revoke for storing internal data + // used for those operations. + string internal_data = 2; + + // DisplayName is a non-security sensitive identifier that is + // applicable to this Auth. It is used for logging and prefixing + // of dynamic secrets. For example, DisplayName may be "armon" for + // the github credential backend. If the client token is used to + // generate a SQL credential, the user may be "github-armon-uuid". + // This is to help identify the source without using audit tables. + string display_name = 3; + + // Policies is the list of policies that the authenticated user + // is associated with. + repeated string policies = 4; + + // Metadata is used to attach arbitrary string-type metadata to + // an authenticated user. This metadata will be outputted into the + // audit log. + map metadata = 5; + + // ClientToken is the token that is generated for the authentication. + // This will be filled in by Vault core when an auth structure is + // returned. Setting this manually will have no effect. + string client_token = 6; + + // Accessor is the identifier for the ClientToken. This can be used + // to perform management functionalities (especially revocation) when + // ClientToken in the audit logs are obfuscated. Accessor can be used + // to revoke a ClientToken and to lookup the capabilities of the ClientToken, + // both without actually knowing the ClientToken. + string accessor = 7; + + // Period indicates that the token generated using this Auth object + // should never expire. The token should be renewed within the duration + // specified by this period. + int64 period = 8; + + // Number of allowed uses of the issued token + int64 num_uses = 9; + + // EntityID is the identifier of the entity in identity store to which the + // identity of the authenticating client belongs to. + string entity_id = 10; + + // Alias is the information about the authenticated client returned by + // the auth backend + logical.Alias alias = 11; + + // GroupAliases are the informational mappings of external groups which an + // authenticated user belongs to. This is used to check if there are + // mappings groups for the group aliases in identity store. For all the + // matching groups, the entity ID of the user will be added. + repeated logical.Alias group_aliases = 12; + + // If set, restricts usage of the certificates to client IPs falling within + // the range of the specified CIDR(s). + repeated string bound_cidrs = 13; + + // TokenPolicies and IdentityPolicies break down the list in Policies to + // help determine where a policy was sourced + repeated string token_policies = 14; + repeated string identity_policies = 15; + + // Explicit maximum lifetime for the token. Unlike normal TTLs, the maximum + // TTL is a hard limit and cannot be exceeded, also counts for periodic tokens. + int64 explicit_max_ttl = 16; + + // TokenType is the type of token being requested + uint32 token_type = 17; + + // Whether the default policy should be added automatically by core + bool no_default_policy = 18; } message TokenEntry { - string id = 1; - string accessor = 2; - string parent = 3; - repeated string policies = 4; - string path = 5; - map meta = 6; - string display_name = 7; - int64 num_uses = 8; - int64 creation_time = 9; - int64 ttl = 10; - int64 explicit_max_ttl = 11; - string role = 12; - int64 period = 13; - string entity_id = 14; - repeated string bound_cidrs = 15; - string namespace_id = 16; - string cubbyhole_id = 17; - uint32 type = 18; - map internal_meta = 19; - string inline_policy = 20; - bool no_identity_policies = 21; - string external_id = 22; + string id = 1; + string accessor = 2; + string parent = 3; + repeated string policies = 4; + string path = 5; + map meta = 6; + string display_name = 7; + int64 num_uses = 8; + int64 creation_time = 9; + int64 ttl = 10; + int64 explicit_max_ttl = 11; + string role = 12; + int64 period = 13; + string entity_id = 14; + repeated string bound_cidrs = 15; + string namespace_id = 16; + string cubbyhole_id = 17; + uint32 type = 18; + map internal_meta = 19; + string inline_policy = 20; + bool no_identity_policies = 21; + string external_id = 22; } message LeaseOptions { - int64 TTL = 1; + int64 TTL = 1; - bool renewable = 2; + bool renewable = 2; - int64 increment = 3; + int64 increment = 3; - google.protobuf.Timestamp issue_time = 4; + google.protobuf.Timestamp issue_time = 4; - int64 MaxTTL = 5; + int64 MaxTTL = 5; } message Secret { - LeaseOptions lease_options = 1; + LeaseOptions lease_options = 1; - // InternalData is a JSON object that is stored with the secret. - // This will be sent back during a Renew/Revoke for storing internal data - // used for those operations. - string internal_data = 2; + // InternalData is a JSON object that is stored with the secret. + // This will be sent back during a Renew/Revoke for storing internal data + // used for those operations. + string internal_data = 2; - // LeaseID is the ID returned to the user to manage this secret. - // This is generated by Vault core. Any set value will be ignored. - // For requests, this will always be blank. - string lease_id = 3; + // LeaseID is the ID returned to the user to manage this secret. + // This is generated by Vault core. Any set value will be ignored. + // For requests, this will always be blank. + string lease_id = 3; } message Response { - // Secret, if not nil, denotes that this response represents a secret. - Secret secret = 1; + // Secret, if not nil, denotes that this response represents a secret. + Secret secret = 1; + + // Auth, if not nil, contains the authentication information for + // this response. This is only checked and means something for + // credential backends. + Auth auth = 2; - // Auth, if not nil, contains the authentication information for - // this response. This is only checked and means something for - // credential backends. - Auth auth = 2; + // Response data is a JSON object that must have string keys. For + // secrets, this data is sent down to the user as-is. To store internal + // data that you don't want the user to see, store it in + // Secret.InternalData. + string data = 3; - // Response data is a JSON object that must have string keys. For - // secrets, this data is sent down to the user as-is. To store internal - // data that you don't want the user to see, store it in - // Secret.InternalData. - string data = 3; + // Redirect is an HTTP URL to redirect to for further authentication. + // This is only valid for credential backends. This will be blanked + // for any logical backend and ignored. + string redirect = 4; - // Redirect is an HTTP URL to redirect to for further authentication. - // This is only valid for credential backends. This will be blanked - // for any logical backend and ignored. - string redirect = 4; + // Warnings allow operations or backends to return warnings in response + // to user actions without failing the action outright. + repeated string warnings = 5; - // Warnings allow operations or backends to return warnings in response - // to user actions without failing the action outright. - repeated string warnings = 5; + // Information for wrapping the response in a cubbyhole + ResponseWrapInfo wrap_info = 6; - // Information for wrapping the response in a cubbyhole - ResponseWrapInfo wrap_info = 6; + // Headers will contain the http headers from the response. This value will + // be used in the audit broker to ensure we are auditing only the allowed + // headers. + map headers = 7; - // Headers will contain the http headers from the response. This value will - // be used in the audit broker to ensure we are auditing only the allowed - // headers. - map headers = 7; + // MountType, if non-empty, provides some information about what kind + // of mount this secret came from. + string mount_type = 8; } message ResponseWrapInfo { - // Setting to non-zero specifies that the response should be wrapped. - // Specifies the desired TTL of the wrapping token. - int64 TTL = 1; + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + int64 TTL = 1; - // The token containing the wrapped response - string token = 2; + // The token containing the wrapped response + string token = 2; - // The token accessor for the wrapped response token - string accessor = 3; + // The token accessor for the wrapped response token + string accessor = 3; - // The creation time. This can be used with the TTL to figure out an - // expected expiration. - google.protobuf.Timestamp creation_time = 4; + // The creation time. This can be used with the TTL to figure out an + // expected expiration. + google.protobuf.Timestamp creation_time = 4; - // If the contained response is the output of a token creation call, the - // created token's accessor will be accessible here - string wrapped_accessor = 5; + // If the contained response is the output of a token creation call, the + // created token's accessor will be accessible here + string wrapped_accessor = 5; - // WrappedEntityID is the entity identifier of the caller who initiated the - // wrapping request - string wrapped_entity_id = 6; + // WrappedEntityID is the entity identifier of the caller who initiated the + // wrapping request + string wrapped_entity_id = 6; - // The format to use. This doesn't get returned, it's only internal. - string format = 7; + // The format to use. This doesn't get returned, it's only internal. + string format = 7; - // CreationPath is the original request path that was used to create - // the wrapped response. - string creation_path = 8; + // CreationPath is the original request path that was used to create + // the wrapped response. + string creation_path = 8; - // Controls seal wrapping behavior downstream for specific use cases - bool seal_wrap = 9; + // Controls seal wrapping behavior downstream for specific use cases + bool seal_wrap = 9; } message RequestWrapInfo { - // Setting to non-zero specifies that the response should be wrapped. - // Specifies the desired TTL of the wrapping token. - int64 TTL = 1; + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + int64 TTL = 1; - // The format to use for the wrapped response; if not specified it's a bare - // token - string format = 2; + // The format to use for the wrapped response; if not specified it's a bare + // token + string format = 2; - // A flag to conforming backends that data for a given request should be - // seal wrapped - bool seal_wrap = 3; + // A flag to conforming backends that data for a given request should be + // seal wrapped + bool seal_wrap = 3; } // HandleRequestArgs is the args for HandleRequest method. message HandleRequestArgs { - uint32 storage_id = 1; - Request request = 2; + uint32 storage_id = 1; + Request request = 2; } // HandleRequestReply is the reply for HandleRequest method. message HandleRequestReply { - Response response = 1; - ProtoError err = 2; + Response response = 1; + ProtoError err = 2; } // InitializeArgs is the args for Initialize method. -message InitializeArgs { -} +message InitializeArgs {} // InitializeReply is the reply for Initialize method. message InitializeReply { - ProtoError err = 1; + ProtoError err = 1; } // SpecialPathsReply is the reply for SpecialPaths method. message SpecialPathsReply { - Paths paths = 1; + Paths paths = 1; } // HandleExistenceCheckArgs is the args for HandleExistenceCheck method. message HandleExistenceCheckArgs { - uint32 storage_id = 1; - Request request = 2; + uint32 storage_id = 1; + Request request = 2; } // HandleExistenceCheckReply is the reply for HandleExistenceCheck method. message HandleExistenceCheckReply { - bool check_found = 1; - bool exists = 2; - ProtoError err = 3; + bool check_found = 1; + bool exists = 2; + ProtoError err = 3; } // SetupArgs is the args for Setup method. message SetupArgs { - uint32 broker_id = 1; - map Config = 2; - string backendUUID = 3; + uint32 broker_id = 1; + map Config = 2; + string backendUUID = 3; } // SetupReply is the reply for Setup method. message SetupReply { - string err = 1; + string err = 1; } // TypeReply is the reply for the Type method. message TypeReply { - uint32 type = 1; + uint32 type = 1; } message InvalidateKeyArgs { - string key = 1; + string key = 1; } // Backend is the interface that plugins must satisfy. The plugin should // implement the server for this service. Requests will first run the // HandleExistenceCheck rpc then run the HandleRequests rpc. service Backend { - // HandleRequest is used to handle a request and generate a response. - // The plugins must check the operation type and handle appropriately. - rpc HandleRequest(HandleRequestArgs) returns (HandleRequestReply); - - // SpecialPaths is a list of paths that are special in some way. - // See PathType for the types of special paths. The key is the type - // of the special path, and the value is a list of paths for this type. - // This is not a regular expression but is an exact match. If the path - // ends in '*' then it is a prefix-based match. The '*' can only appear - // at the end. - rpc SpecialPaths(Empty) returns (SpecialPathsReply); - - // HandleExistenceCheck is used to handle a request and generate a response - // indicating whether the given path exists or not; this is used to - // understand whether the request must have a Create or Update capability - // ACL applied. The first bool indicates whether an existence check - // function was found for the backend; the second indicates whether, if an - // existence check function was found, the item exists or not. - rpc HandleExistenceCheck(HandleExistenceCheckArgs) returns (HandleExistenceCheckReply); - - // Cleanup is invoked during an unmount of a backend to allow it to - // handle any cleanup like connection closing or releasing of file handles. - // Cleanup is called right before Vault closes the plugin process. - rpc Cleanup(Empty) returns (Empty); - - // InvalidateKey may be invoked when an object is modified that belongs - // to the backend. The backend can use this to clear any caches or reset - // internal state as needed. - rpc InvalidateKey(InvalidateKeyArgs) returns (Empty); - - // Setup is used to set up the backend based on the provided backend - // configuration. The plugin's setup implementation should use the provided - // broker_id to create a connection back to Vault for use with the Storage - // and SystemView clients. - rpc Setup(SetupArgs) returns (SetupReply); - - // Initialize is invoked just after mounting a backend to allow it to - // handle any initialization tasks that need to be performed. - rpc Initialize(InitializeArgs) returns (InitializeReply); - - // Type returns the BackendType for the particular backend - rpc Type(Empty) returns (TypeReply); + // HandleRequest is used to handle a request and generate a response. + // The plugins must check the operation type and handle appropriately. + rpc HandleRequest(HandleRequestArgs) returns (HandleRequestReply); + + // SpecialPaths is a list of paths that are special in some way. + // See PathType for the types of special paths. The key is the type + // of the special path, and the value is a list of paths for this type. + // This is not a regular expression but is an exact match. If the path + // ends in '*' then it is a prefix-based match. The '*' can only appear + // at the end. + rpc SpecialPaths(Empty) returns (SpecialPathsReply); + + // HandleExistenceCheck is used to handle a request and generate a response + // indicating whether the given path exists or not; this is used to + // understand whether the request must have a Create or Update capability + // ACL applied. The first bool indicates whether an existence check + // function was found for the backend; the second indicates whether, if an + // existence check function was found, the item exists or not. + rpc HandleExistenceCheck(HandleExistenceCheckArgs) returns (HandleExistenceCheckReply); + + // Cleanup is invoked during an unmount of a backend to allow it to + // handle any cleanup like connection closing or releasing of file handles. + // Cleanup is called right before Vault closes the plugin process. + rpc Cleanup(Empty) returns (Empty); + + // InvalidateKey may be invoked when an object is modified that belongs + // to the backend. The backend can use this to clear any caches or reset + // internal state as needed. + rpc InvalidateKey(InvalidateKeyArgs) returns (Empty); + + // Setup is used to set up the backend based on the provided backend + // configuration. The plugin's setup implementation should use the provided + // broker_id to create a connection back to Vault for use with the Storage + // and SystemView clients. + rpc Setup(SetupArgs) returns (SetupReply); + + // Initialize is invoked just after mounting a backend to allow it to + // handle any initialization tasks that need to be performed. + rpc Initialize(InitializeArgs) returns (InitializeReply); + + // Type returns the BackendType for the particular backend + rpc Type(Empty) returns (TypeReply); } message StorageEntry { - string key = 1; - bytes value = 2; - bool seal_wrap = 3; + string key = 1; + bytes value = 2; + bool seal_wrap = 3; } message StorageListArgs { - string prefix = 1; + string prefix = 1; } message StorageListReply { - repeated string keys = 1; - string err = 2; + repeated string keys = 1; + string err = 2; } message StorageGetArgs { - string key = 1; + string key = 1; } message StorageGetReply { - StorageEntry entry = 1; - string err = 2; + StorageEntry entry = 1; + string err = 2; } message StoragePutArgs { - StorageEntry entry = 1; + StorageEntry entry = 1; } message StoragePutReply { - string err = 1; + string err = 1; } message StorageDeleteArgs { - string key = 1; + string key = 1; } message StorageDeleteReply { - string err = 1; + string err = 1; } // Storage is the way that plugins are able read/write data. Plugins should // implement the client for this service. service Storage { - rpc List(StorageListArgs) returns (StorageListReply); - rpc Get(StorageGetArgs) returns (StorageGetReply); - rpc Put(StoragePutArgs) returns (StoragePutReply); - rpc Delete(StorageDeleteArgs) returns (StorageDeleteReply); + rpc List(StorageListArgs) returns (StorageListReply); + rpc Get(StorageGetArgs) returns (StorageGetReply); + rpc Put(StoragePutArgs) returns (StoragePutReply); + rpc Delete(StorageDeleteArgs) returns (StorageDeleteReply); } message TTLReply { - int64 TTL = 1; + int64 TTL = 1; } message TaintedReply { - bool tainted = 1; + bool tainted = 1; } message CachingDisabledReply { - bool disabled = 1; + bool disabled = 1; } message ReplicationStateReply { - int32 state = 1; + int32 state = 1; } message ResponseWrapDataArgs { - string data = 1; - int64 TTL = 2; - bool JWT = 3; + string data = 1; + int64 TTL = 2; + bool JWT = 3; } message ResponseWrapDataReply { - ResponseWrapInfo wrap_info = 1; - string err = 2; + ResponseWrapInfo wrap_info = 1; + string err = 2; } message MlockEnabledReply { - bool enabled = 1; + bool enabled = 1; } message LocalMountReply { - bool local = 1; + bool local = 1; } message EntityInfoArgs { - string entity_id = 1; + string entity_id = 1; } message EntityInfoReply { - logical.Entity entity = 1; - string err = 2; + logical.Entity entity = 1; + string err = 2; } message GroupsForEntityReply { - repeated logical.Group groups = 1; - string err = 2; + repeated logical.Group groups = 1; + string err = 2; } message PluginEnvReply { - logical.PluginEnvironment plugin_environment = 1; - string err = 2; + logical.PluginEnvironment plugin_environment = 1; + string err = 2; } message GeneratePasswordFromPolicyRequest { - string policy_name = 1; + string policy_name = 1; } message GeneratePasswordFromPolicyReply { - string password = 1; + string password = 1; } message ClusterInfoReply { - string cluster_name = 1; - string cluster_id = 2; - string err = 3; + string cluster_name = 1; + string cluster_id = 2; + string err = 3; +} + +message GenerateIdentityTokenRequest { + string audience = 1; + int64 ttl = 2; +} + +message GenerateIdentityTokenResponse { + string token = 1; + int64 ttl = 2; } // SystemView exposes system configuration information in a safe way for plugins // to consume. Plugins should implement the client for this service. service SystemView { - // DefaultLeaseTTL returns the default lease TTL set in Vault configuration - rpc DefaultLeaseTTL(Empty) returns (TTLReply); + // DefaultLeaseTTL returns the default lease TTL set in Vault configuration + rpc DefaultLeaseTTL(Empty) returns (TTLReply); + + // MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend + // authors should take care not to issue credentials that last longer than + // this value, as Vault will revoke them + rpc MaxLeaseTTL(Empty) returns (TTLReply); - // MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend - // authors should take care not to issue credentials that last longer than - // this value, as Vault will revoke them - rpc MaxLeaseTTL(Empty) returns (TTLReply); + // Tainted, returns true if the mount is tainted. A mount is tainted if it is in the + // process of being unmounted. This should only be used in special + // circumstances; a primary use-case is as a guard in revocation functions. + // If revocation of a backend's leases fails it can keep the unmounting + // process from being successful. If the reason for this failure is not + // relevant when the mount is tainted (for instance, saving a CRL to disk + // when the stored CRL will be removed during the unmounting process + // anyways), we can ignore the errors to allow unmounting to complete. + rpc Tainted(Empty) returns (TaintedReply); - // Tainted, returns true if the mount is tainted. A mount is tainted if it is in the - // process of being unmounted. This should only be used in special - // circumstances; a primary use-case is as a guard in revocation functions. - // If revocation of a backend's leases fails it can keep the unmounting - // process from being successful. If the reason for this failure is not - // relevant when the mount is tainted (for instance, saving a CRL to disk - // when the stored CRL will be removed during the unmounting process - // anyways), we can ignore the errors to allow unmounting to complete. - rpc Tainted(Empty) returns (TaintedReply); + // CachingDisabled returns true if caching is disabled. If true, no caches + // should be used, despite known slowdowns. + rpc CachingDisabled(Empty) returns (CachingDisabledReply); - // CachingDisabled returns true if caching is disabled. If true, no caches - // should be used, despite known slowdowns. - rpc CachingDisabled(Empty) returns (CachingDisabledReply); + // ReplicationState indicates the state of cluster replication + rpc ReplicationState(Empty) returns (ReplicationStateReply); - // ReplicationState indicates the state of cluster replication - rpc ReplicationState(Empty) returns (ReplicationStateReply); + // ResponseWrapData wraps the given data in a cubbyhole and returns the + // token used to unwrap. + rpc ResponseWrapData(ResponseWrapDataArgs) returns (ResponseWrapDataReply); - // ResponseWrapData wraps the given data in a cubbyhole and returns the - // token used to unwrap. - rpc ResponseWrapData(ResponseWrapDataArgs) returns (ResponseWrapDataReply); + // MlockEnabled returns the configuration setting for enabling mlock on + // plugins. + rpc MlockEnabled(Empty) returns (MlockEnabledReply); - // MlockEnabled returns the configuration setting for enabling mlock on - // plugins. - rpc MlockEnabled(Empty) returns (MlockEnabledReply); + // LocalMount, when run from a system view attached to a request, indicates + // whether the request is affecting a local mount or not + rpc LocalMount(Empty) returns (LocalMountReply); - // LocalMount, when run from a system view attached to a request, indicates - // whether the request is affecting a local mount or not - rpc LocalMount(Empty) returns (LocalMountReply); + // EntityInfo returns the basic entity information for the given entity id + rpc EntityInfo(EntityInfoArgs) returns (EntityInfoReply); - // EntityInfo returns the basic entity information for the given entity id - rpc EntityInfo(EntityInfoArgs) returns (EntityInfoReply); + // PluginEnv returns Vault environment information used by plugins + rpc PluginEnv(Empty) returns (PluginEnvReply); - // PluginEnv returns Vault environment information used by plugins - rpc PluginEnv(Empty) returns (PluginEnvReply); + // GroupsForEntity returns the group membership information for the given + // entity id + rpc GroupsForEntity(EntityInfoArgs) returns (GroupsForEntityReply); - // GroupsForEntity returns the group membership information for the given - // entity id - rpc GroupsForEntity(EntityInfoArgs) returns (GroupsForEntityReply); + // GeneratePasswordFromPolicy generates a password from an existing password policy + rpc GeneratePasswordFromPolicy(GeneratePasswordFromPolicyRequest) returns (GeneratePasswordFromPolicyReply); - // GeneratePasswordFromPolicy generates a password from an existing password policy - rpc GeneratePasswordFromPolicy(GeneratePasswordFromPolicyRequest) returns (GeneratePasswordFromPolicyReply); + // ClusterInfo returns the ClusterID information; may be reused if ClusterName is also exposed. + rpc ClusterInfo(Empty) returns (ClusterInfoReply); - // ClusterInfo returns the ClusterID information; may be reused if ClusterName is also exposed. - rpc ClusterInfo(Empty) returns (ClusterInfoReply); + // GenerateIdentityToken returns an identity token for the requesting plugin. + rpc GenerateIdentityToken(GenerateIdentityTokenRequest) returns (GenerateIdentityTokenResponse); } message Connection { - // RemoteAddr is the network address that sent the request. - string remote_addr = 1; + // RemoteAddr is the network address that sent the request. + string remote_addr = 1; - // RemotePort is the network port that sent the request. - int32 remote_port = 3; + // RemotePort is the network port that sent the request. + int32 remote_port = 3; - // ConnectionState is the marshalled tls.ConnectionState from the original - // request - ConnectionState connection_state = 2; + // ConnectionState is the marshalled tls.ConnectionState from the original + // request + ConnectionState connection_state = 2; } message ConnectionState { - uint32 version = 1; - bool handshake_complete = 2; - bool did_resume = 3; - uint32 cipher_suite = 4; - string negotiated_protocol = 5; - bool negotiated_protocol_is_mutual = 6; - string server_name = 7; - CertificateChain peer_certificates = 8; + uint32 version = 1; + bool handshake_complete = 2; + bool did_resume = 3; + uint32 cipher_suite = 4; + string negotiated_protocol = 5; + bool negotiated_protocol_is_mutual = 6; + string server_name = 7; + CertificateChain peer_certificates = 8; - repeated CertificateChain verified_chains = 9; - repeated bytes signed_certificate_timestamps = 10; + repeated CertificateChain verified_chains = 9; + repeated bytes signed_certificate_timestamps = 10; - bytes ocsp_response = 11; - bytes tls_unique = 12; + bytes ocsp_response = 11; + bytes tls_unique = 12; } message Certificate { - bytes asn1_data = 1; + bytes asn1_data = 1; } message CertificateChain { - repeated Certificate certificates = 1; + repeated Certificate certificates = 1; } message SendEventRequest { - string event_type = 1; - logical.EventData event = 2; + string event_type = 1; + logical.EventData event = 2; } service Events { - rpc SendEvent(SendEventRequest) returns (Empty); -} \ No newline at end of file + rpc SendEvent(SendEventRequest) returns (Empty); +} diff --git a/sdk/plugin/pb/backend_grpc.pb.go b/sdk/plugin/pb/backend_grpc.pb.go index a8f3107e0407..322e723f1560 100644 --- a/sdk/plugin/pb/backend_grpc.pb.go +++ b/sdk/plugin/pb/backend_grpc.pb.go @@ -1,4 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.4.0 +// - protoc (unknown) +// source: sdk/plugin/pb/backend.proto package pb @@ -11,12 +18,27 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 + +const ( + Backend_HandleRequest_FullMethodName = "/pb.Backend/HandleRequest" + Backend_SpecialPaths_FullMethodName = "/pb.Backend/SpecialPaths" + Backend_HandleExistenceCheck_FullMethodName = "/pb.Backend/HandleExistenceCheck" + Backend_Cleanup_FullMethodName = "/pb.Backend/Cleanup" + Backend_InvalidateKey_FullMethodName = "/pb.Backend/InvalidateKey" + Backend_Setup_FullMethodName = "/pb.Backend/Setup" + Backend_Initialize_FullMethodName = "/pb.Backend/Initialize" + Backend_Type_FullMethodName = "/pb.Backend/Type" +) // BackendClient is the client API for Backend service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// Backend is the interface that plugins must satisfy. The plugin should +// implement the server for this service. Requests will first run the +// HandleExistenceCheck rpc then run the HandleRequests rpc. type BackendClient interface { // HandleRequest is used to handle a request and generate a response. // The plugins must check the operation type and handle appropriately. @@ -64,8 +86,9 @@ func NewBackendClient(cc grpc.ClientConnInterface) BackendClient { } func (c *backendClient) HandleRequest(ctx context.Context, in *HandleRequestArgs, opts ...grpc.CallOption) (*HandleRequestReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(HandleRequestReply) - err := c.cc.Invoke(ctx, "/pb.Backend/HandleRequest", in, out, opts...) + err := c.cc.Invoke(ctx, Backend_HandleRequest_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -73,8 +96,9 @@ func (c *backendClient) HandleRequest(ctx context.Context, in *HandleRequestArgs } func (c *backendClient) SpecialPaths(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*SpecialPathsReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SpecialPathsReply) - err := c.cc.Invoke(ctx, "/pb.Backend/SpecialPaths", in, out, opts...) + err := c.cc.Invoke(ctx, Backend_SpecialPaths_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -82,8 +106,9 @@ func (c *backendClient) SpecialPaths(ctx context.Context, in *Empty, opts ...grp } func (c *backendClient) HandleExistenceCheck(ctx context.Context, in *HandleExistenceCheckArgs, opts ...grpc.CallOption) (*HandleExistenceCheckReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(HandleExistenceCheckReply) - err := c.cc.Invoke(ctx, "/pb.Backend/HandleExistenceCheck", in, out, opts...) + err := c.cc.Invoke(ctx, Backend_HandleExistenceCheck_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -91,8 +116,9 @@ func (c *backendClient) HandleExistenceCheck(ctx context.Context, in *HandleExis } func (c *backendClient) Cleanup(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) - err := c.cc.Invoke(ctx, "/pb.Backend/Cleanup", in, out, opts...) + err := c.cc.Invoke(ctx, Backend_Cleanup_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -100,8 +126,9 @@ func (c *backendClient) Cleanup(ctx context.Context, in *Empty, opts ...grpc.Cal } func (c *backendClient) InvalidateKey(ctx context.Context, in *InvalidateKeyArgs, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) - err := c.cc.Invoke(ctx, "/pb.Backend/InvalidateKey", in, out, opts...) + err := c.cc.Invoke(ctx, Backend_InvalidateKey_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -109,8 +136,9 @@ func (c *backendClient) InvalidateKey(ctx context.Context, in *InvalidateKeyArgs } func (c *backendClient) Setup(ctx context.Context, in *SetupArgs, opts ...grpc.CallOption) (*SetupReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SetupReply) - err := c.cc.Invoke(ctx, "/pb.Backend/Setup", in, out, opts...) + err := c.cc.Invoke(ctx, Backend_Setup_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -118,8 +146,9 @@ func (c *backendClient) Setup(ctx context.Context, in *SetupArgs, opts ...grpc.C } func (c *backendClient) Initialize(ctx context.Context, in *InitializeArgs, opts ...grpc.CallOption) (*InitializeReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(InitializeReply) - err := c.cc.Invoke(ctx, "/pb.Backend/Initialize", in, out, opts...) + err := c.cc.Invoke(ctx, Backend_Initialize_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -127,8 +156,9 @@ func (c *backendClient) Initialize(ctx context.Context, in *InitializeArgs, opts } func (c *backendClient) Type(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TypeReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(TypeReply) - err := c.cc.Invoke(ctx, "/pb.Backend/Type", in, out, opts...) + err := c.cc.Invoke(ctx, Backend_Type_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -138,6 +168,10 @@ func (c *backendClient) Type(ctx context.Context, in *Empty, opts ...grpc.CallOp // BackendServer is the server API for Backend service. // All implementations must embed UnimplementedBackendServer // for forward compatibility +// +// Backend is the interface that plugins must satisfy. The plugin should +// implement the server for this service. Requests will first run the +// HandleExistenceCheck rpc then run the HandleRequests rpc. type BackendServer interface { // HandleRequest is used to handle a request and generate a response. // The plugins must check the operation type and handle appropriately. @@ -228,7 +262,7 @@ func _Backend_HandleRequest_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Backend/HandleRequest", + FullMethod: Backend_HandleRequest_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackendServer).HandleRequest(ctx, req.(*HandleRequestArgs)) @@ -246,7 +280,7 @@ func _Backend_SpecialPaths_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Backend/SpecialPaths", + FullMethod: Backend_SpecialPaths_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackendServer).SpecialPaths(ctx, req.(*Empty)) @@ -264,7 +298,7 @@ func _Backend_HandleExistenceCheck_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Backend/HandleExistenceCheck", + FullMethod: Backend_HandleExistenceCheck_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackendServer).HandleExistenceCheck(ctx, req.(*HandleExistenceCheckArgs)) @@ -282,7 +316,7 @@ func _Backend_Cleanup_Handler(srv interface{}, ctx context.Context, dec func(int } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Backend/Cleanup", + FullMethod: Backend_Cleanup_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackendServer).Cleanup(ctx, req.(*Empty)) @@ -300,7 +334,7 @@ func _Backend_InvalidateKey_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Backend/InvalidateKey", + FullMethod: Backend_InvalidateKey_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackendServer).InvalidateKey(ctx, req.(*InvalidateKeyArgs)) @@ -318,7 +352,7 @@ func _Backend_Setup_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Backend/Setup", + FullMethod: Backend_Setup_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackendServer).Setup(ctx, req.(*SetupArgs)) @@ -336,7 +370,7 @@ func _Backend_Initialize_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Backend/Initialize", + FullMethod: Backend_Initialize_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackendServer).Initialize(ctx, req.(*InitializeArgs)) @@ -354,7 +388,7 @@ func _Backend_Type_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Backend/Type", + FullMethod: Backend_Type_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackendServer).Type(ctx, req.(*Empty)) @@ -406,9 +440,19 @@ var Backend_ServiceDesc = grpc.ServiceDesc{ Metadata: "sdk/plugin/pb/backend.proto", } +const ( + Storage_List_FullMethodName = "/pb.Storage/List" + Storage_Get_FullMethodName = "/pb.Storage/Get" + Storage_Put_FullMethodName = "/pb.Storage/Put" + Storage_Delete_FullMethodName = "/pb.Storage/Delete" +) + // StorageClient is the client API for Storage service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// Storage is the way that plugins are able read/write data. Plugins should +// implement the client for this service. type StorageClient interface { List(ctx context.Context, in *StorageListArgs, opts ...grpc.CallOption) (*StorageListReply, error) Get(ctx context.Context, in *StorageGetArgs, opts ...grpc.CallOption) (*StorageGetReply, error) @@ -425,8 +469,9 @@ func NewStorageClient(cc grpc.ClientConnInterface) StorageClient { } func (c *storageClient) List(ctx context.Context, in *StorageListArgs, opts ...grpc.CallOption) (*StorageListReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StorageListReply) - err := c.cc.Invoke(ctx, "/pb.Storage/List", in, out, opts...) + err := c.cc.Invoke(ctx, Storage_List_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -434,8 +479,9 @@ func (c *storageClient) List(ctx context.Context, in *StorageListArgs, opts ...g } func (c *storageClient) Get(ctx context.Context, in *StorageGetArgs, opts ...grpc.CallOption) (*StorageGetReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StorageGetReply) - err := c.cc.Invoke(ctx, "/pb.Storage/Get", in, out, opts...) + err := c.cc.Invoke(ctx, Storage_Get_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -443,8 +489,9 @@ func (c *storageClient) Get(ctx context.Context, in *StorageGetArgs, opts ...grp } func (c *storageClient) Put(ctx context.Context, in *StoragePutArgs, opts ...grpc.CallOption) (*StoragePutReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StoragePutReply) - err := c.cc.Invoke(ctx, "/pb.Storage/Put", in, out, opts...) + err := c.cc.Invoke(ctx, Storage_Put_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -452,8 +499,9 @@ func (c *storageClient) Put(ctx context.Context, in *StoragePutArgs, opts ...grp } func (c *storageClient) Delete(ctx context.Context, in *StorageDeleteArgs, opts ...grpc.CallOption) (*StorageDeleteReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StorageDeleteReply) - err := c.cc.Invoke(ctx, "/pb.Storage/Delete", in, out, opts...) + err := c.cc.Invoke(ctx, Storage_Delete_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -463,6 +511,9 @@ func (c *storageClient) Delete(ctx context.Context, in *StorageDeleteArgs, opts // StorageServer is the server API for Storage service. // All implementations must embed UnimplementedStorageServer // for forward compatibility +// +// Storage is the way that plugins are able read/write data. Plugins should +// implement the client for this service. type StorageServer interface { List(context.Context, *StorageListArgs) (*StorageListReply, error) Get(context.Context, *StorageGetArgs) (*StorageGetReply, error) @@ -510,7 +561,7 @@ func _Storage_List_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Storage/List", + FullMethod: Storage_List_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageServer).List(ctx, req.(*StorageListArgs)) @@ -528,7 +579,7 @@ func _Storage_Get_Handler(srv interface{}, ctx context.Context, dec func(interfa } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Storage/Get", + FullMethod: Storage_Get_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageServer).Get(ctx, req.(*StorageGetArgs)) @@ -546,7 +597,7 @@ func _Storage_Put_Handler(srv interface{}, ctx context.Context, dec func(interfa } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Storage/Put", + FullMethod: Storage_Put_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageServer).Put(ctx, req.(*StoragePutArgs)) @@ -564,7 +615,7 @@ func _Storage_Delete_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Storage/Delete", + FullMethod: Storage_Delete_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageServer).Delete(ctx, req.(*StorageDeleteArgs)) @@ -600,9 +651,29 @@ var Storage_ServiceDesc = grpc.ServiceDesc{ Metadata: "sdk/plugin/pb/backend.proto", } +const ( + SystemView_DefaultLeaseTTL_FullMethodName = "/pb.SystemView/DefaultLeaseTTL" + SystemView_MaxLeaseTTL_FullMethodName = "/pb.SystemView/MaxLeaseTTL" + SystemView_Tainted_FullMethodName = "/pb.SystemView/Tainted" + SystemView_CachingDisabled_FullMethodName = "/pb.SystemView/CachingDisabled" + SystemView_ReplicationState_FullMethodName = "/pb.SystemView/ReplicationState" + SystemView_ResponseWrapData_FullMethodName = "/pb.SystemView/ResponseWrapData" + SystemView_MlockEnabled_FullMethodName = "/pb.SystemView/MlockEnabled" + SystemView_LocalMount_FullMethodName = "/pb.SystemView/LocalMount" + SystemView_EntityInfo_FullMethodName = "/pb.SystemView/EntityInfo" + SystemView_PluginEnv_FullMethodName = "/pb.SystemView/PluginEnv" + SystemView_GroupsForEntity_FullMethodName = "/pb.SystemView/GroupsForEntity" + SystemView_GeneratePasswordFromPolicy_FullMethodName = "/pb.SystemView/GeneratePasswordFromPolicy" + SystemView_ClusterInfo_FullMethodName = "/pb.SystemView/ClusterInfo" + SystemView_GenerateIdentityToken_FullMethodName = "/pb.SystemView/GenerateIdentityToken" +) + // SystemViewClient is the client API for SystemView service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// SystemView exposes system configuration information in a safe way for plugins +// to consume. Plugins should implement the client for this service. type SystemViewClient interface { // DefaultLeaseTTL returns the default lease TTL set in Vault configuration DefaultLeaseTTL(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TTLReply, error) @@ -644,6 +715,8 @@ type SystemViewClient interface { GeneratePasswordFromPolicy(ctx context.Context, in *GeneratePasswordFromPolicyRequest, opts ...grpc.CallOption) (*GeneratePasswordFromPolicyReply, error) // ClusterInfo returns the ClusterID information; may be reused if ClusterName is also exposed. ClusterInfo(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ClusterInfoReply, error) + // GenerateIdentityToken returns an identity token for the requesting plugin. + GenerateIdentityToken(ctx context.Context, in *GenerateIdentityTokenRequest, opts ...grpc.CallOption) (*GenerateIdentityTokenResponse, error) } type systemViewClient struct { @@ -655,8 +728,9 @@ func NewSystemViewClient(cc grpc.ClientConnInterface) SystemViewClient { } func (c *systemViewClient) DefaultLeaseTTL(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TTLReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(TTLReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/DefaultLeaseTTL", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_DefaultLeaseTTL_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -664,8 +738,9 @@ func (c *systemViewClient) DefaultLeaseTTL(ctx context.Context, in *Empty, opts } func (c *systemViewClient) MaxLeaseTTL(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TTLReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(TTLReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/MaxLeaseTTL", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_MaxLeaseTTL_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -673,8 +748,9 @@ func (c *systemViewClient) MaxLeaseTTL(ctx context.Context, in *Empty, opts ...g } func (c *systemViewClient) Tainted(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TaintedReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(TaintedReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/Tainted", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_Tainted_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -682,8 +758,9 @@ func (c *systemViewClient) Tainted(ctx context.Context, in *Empty, opts ...grpc. } func (c *systemViewClient) CachingDisabled(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*CachingDisabledReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(CachingDisabledReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/CachingDisabled", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_CachingDisabled_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -691,8 +768,9 @@ func (c *systemViewClient) CachingDisabled(ctx context.Context, in *Empty, opts } func (c *systemViewClient) ReplicationState(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ReplicationStateReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ReplicationStateReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/ReplicationState", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_ReplicationState_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -700,8 +778,9 @@ func (c *systemViewClient) ReplicationState(ctx context.Context, in *Empty, opts } func (c *systemViewClient) ResponseWrapData(ctx context.Context, in *ResponseWrapDataArgs, opts ...grpc.CallOption) (*ResponseWrapDataReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ResponseWrapDataReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/ResponseWrapData", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_ResponseWrapData_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -709,8 +788,9 @@ func (c *systemViewClient) ResponseWrapData(ctx context.Context, in *ResponseWra } func (c *systemViewClient) MlockEnabled(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*MlockEnabledReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(MlockEnabledReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/MlockEnabled", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_MlockEnabled_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -718,8 +798,9 @@ func (c *systemViewClient) MlockEnabled(ctx context.Context, in *Empty, opts ... } func (c *systemViewClient) LocalMount(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*LocalMountReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(LocalMountReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/LocalMount", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_LocalMount_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -727,8 +808,9 @@ func (c *systemViewClient) LocalMount(ctx context.Context, in *Empty, opts ...gr } func (c *systemViewClient) EntityInfo(ctx context.Context, in *EntityInfoArgs, opts ...grpc.CallOption) (*EntityInfoReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(EntityInfoReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/EntityInfo", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_EntityInfo_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -736,8 +818,9 @@ func (c *systemViewClient) EntityInfo(ctx context.Context, in *EntityInfoArgs, o } func (c *systemViewClient) PluginEnv(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*PluginEnvReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(PluginEnvReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/PluginEnv", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_PluginEnv_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -745,8 +828,9 @@ func (c *systemViewClient) PluginEnv(ctx context.Context, in *Empty, opts ...grp } func (c *systemViewClient) GroupsForEntity(ctx context.Context, in *EntityInfoArgs, opts ...grpc.CallOption) (*GroupsForEntityReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GroupsForEntityReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/GroupsForEntity", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_GroupsForEntity_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -754,8 +838,9 @@ func (c *systemViewClient) GroupsForEntity(ctx context.Context, in *EntityInfoAr } func (c *systemViewClient) GeneratePasswordFromPolicy(ctx context.Context, in *GeneratePasswordFromPolicyRequest, opts ...grpc.CallOption) (*GeneratePasswordFromPolicyReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GeneratePasswordFromPolicyReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/GeneratePasswordFromPolicy", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_GeneratePasswordFromPolicy_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -763,8 +848,19 @@ func (c *systemViewClient) GeneratePasswordFromPolicy(ctx context.Context, in *G } func (c *systemViewClient) ClusterInfo(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ClusterInfoReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ClusterInfoReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/ClusterInfo", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_ClusterInfo_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) GenerateIdentityToken(ctx context.Context, in *GenerateIdentityTokenRequest, opts ...grpc.CallOption) (*GenerateIdentityTokenResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GenerateIdentityTokenResponse) + err := c.cc.Invoke(ctx, SystemView_GenerateIdentityToken_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -774,6 +870,9 @@ func (c *systemViewClient) ClusterInfo(ctx context.Context, in *Empty, opts ...g // SystemViewServer is the server API for SystemView service. // All implementations must embed UnimplementedSystemViewServer // for forward compatibility +// +// SystemView exposes system configuration information in a safe way for plugins +// to consume. Plugins should implement the client for this service. type SystemViewServer interface { // DefaultLeaseTTL returns the default lease TTL set in Vault configuration DefaultLeaseTTL(context.Context, *Empty) (*TTLReply, error) @@ -815,6 +914,8 @@ type SystemViewServer interface { GeneratePasswordFromPolicy(context.Context, *GeneratePasswordFromPolicyRequest) (*GeneratePasswordFromPolicyReply, error) // ClusterInfo returns the ClusterID information; may be reused if ClusterName is also exposed. ClusterInfo(context.Context, *Empty) (*ClusterInfoReply, error) + // GenerateIdentityToken returns an identity token for the requesting plugin. + GenerateIdentityToken(context.Context, *GenerateIdentityTokenRequest) (*GenerateIdentityTokenResponse, error) mustEmbedUnimplementedSystemViewServer() } @@ -861,6 +962,9 @@ func (UnimplementedSystemViewServer) GeneratePasswordFromPolicy(context.Context, func (UnimplementedSystemViewServer) ClusterInfo(context.Context, *Empty) (*ClusterInfoReply, error) { return nil, status.Errorf(codes.Unimplemented, "method ClusterInfo not implemented") } +func (UnimplementedSystemViewServer) GenerateIdentityToken(context.Context, *GenerateIdentityTokenRequest) (*GenerateIdentityTokenResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GenerateIdentityToken not implemented") +} func (UnimplementedSystemViewServer) mustEmbedUnimplementedSystemViewServer() {} // UnsafeSystemViewServer may be embedded to opt out of forward compatibility for this service. @@ -884,7 +988,7 @@ func _SystemView_DefaultLeaseTTL_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/DefaultLeaseTTL", + FullMethod: SystemView_DefaultLeaseTTL_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).DefaultLeaseTTL(ctx, req.(*Empty)) @@ -902,7 +1006,7 @@ func _SystemView_MaxLeaseTTL_Handler(srv interface{}, ctx context.Context, dec f } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/MaxLeaseTTL", + FullMethod: SystemView_MaxLeaseTTL_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).MaxLeaseTTL(ctx, req.(*Empty)) @@ -920,7 +1024,7 @@ func _SystemView_Tainted_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/Tainted", + FullMethod: SystemView_Tainted_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).Tainted(ctx, req.(*Empty)) @@ -938,7 +1042,7 @@ func _SystemView_CachingDisabled_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/CachingDisabled", + FullMethod: SystemView_CachingDisabled_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).CachingDisabled(ctx, req.(*Empty)) @@ -956,7 +1060,7 @@ func _SystemView_ReplicationState_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/ReplicationState", + FullMethod: SystemView_ReplicationState_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).ReplicationState(ctx, req.(*Empty)) @@ -974,7 +1078,7 @@ func _SystemView_ResponseWrapData_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/ResponseWrapData", + FullMethod: SystemView_ResponseWrapData_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).ResponseWrapData(ctx, req.(*ResponseWrapDataArgs)) @@ -992,7 +1096,7 @@ func _SystemView_MlockEnabled_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/MlockEnabled", + FullMethod: SystemView_MlockEnabled_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).MlockEnabled(ctx, req.(*Empty)) @@ -1010,7 +1114,7 @@ func _SystemView_LocalMount_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/LocalMount", + FullMethod: SystemView_LocalMount_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).LocalMount(ctx, req.(*Empty)) @@ -1028,7 +1132,7 @@ func _SystemView_EntityInfo_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/EntityInfo", + FullMethod: SystemView_EntityInfo_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).EntityInfo(ctx, req.(*EntityInfoArgs)) @@ -1046,7 +1150,7 @@ func _SystemView_PluginEnv_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/PluginEnv", + FullMethod: SystemView_PluginEnv_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).PluginEnv(ctx, req.(*Empty)) @@ -1064,7 +1168,7 @@ func _SystemView_GroupsForEntity_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/GroupsForEntity", + FullMethod: SystemView_GroupsForEntity_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).GroupsForEntity(ctx, req.(*EntityInfoArgs)) @@ -1082,7 +1186,7 @@ func _SystemView_GeneratePasswordFromPolicy_Handler(srv interface{}, ctx context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/GeneratePasswordFromPolicy", + FullMethod: SystemView_GeneratePasswordFromPolicy_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).GeneratePasswordFromPolicy(ctx, req.(*GeneratePasswordFromPolicyRequest)) @@ -1100,7 +1204,7 @@ func _SystemView_ClusterInfo_Handler(srv interface{}, ctx context.Context, dec f } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/ClusterInfo", + FullMethod: SystemView_ClusterInfo_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).ClusterInfo(ctx, req.(*Empty)) @@ -1108,6 +1212,24 @@ func _SystemView_ClusterInfo_Handler(srv interface{}, ctx context.Context, dec f return interceptor(ctx, in, info, handler) } +func _SystemView_GenerateIdentityToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GenerateIdentityTokenRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).GenerateIdentityToken(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SystemView_GenerateIdentityToken_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).GenerateIdentityToken(ctx, req.(*GenerateIdentityTokenRequest)) + } + return interceptor(ctx, in, info, handler) +} + // SystemView_ServiceDesc is the grpc.ServiceDesc for SystemView service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -1167,11 +1289,19 @@ var SystemView_ServiceDesc = grpc.ServiceDesc{ MethodName: "ClusterInfo", Handler: _SystemView_ClusterInfo_Handler, }, + { + MethodName: "GenerateIdentityToken", + Handler: _SystemView_GenerateIdentityToken_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "sdk/plugin/pb/backend.proto", } +const ( + Events_SendEvent_FullMethodName = "/pb.Events/SendEvent" +) + // EventsClient is the client API for Events service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -1188,8 +1318,9 @@ func NewEventsClient(cc grpc.ClientConnInterface) EventsClient { } func (c *eventsClient) SendEvent(ctx context.Context, in *SendEventRequest, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Empty) - err := c.cc.Invoke(ctx, "/pb.Events/SendEvent", in, out, opts...) + err := c.cc.Invoke(ctx, Events_SendEvent_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -1234,7 +1365,7 @@ func _Events_SendEvent_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Events/SendEvent", + FullMethod: Events_SendEvent_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(EventsServer).SendEvent(ctx, req.(*SendEventRequest)) diff --git a/sdk/plugin/pb/translation.go b/sdk/plugin/pb/translation.go index 92ca9af24285..5a349b5b6672 100644 --- a/sdk/plugin/pb/translation.go +++ b/sdk/plugin/pb/translation.go @@ -404,13 +404,14 @@ func ProtoResponseToLogicalResponse(r *Response) (*logical.Response, error) { } return &logical.Response{ - Secret: secret, - Auth: auth, - Data: data, - Redirect: r.Redirect, - Warnings: r.Warnings, - WrapInfo: wrapInfo, - Headers: headers, + Secret: secret, + Auth: auth, + Data: data, + Redirect: r.Redirect, + Warnings: r.Warnings, + WrapInfo: wrapInfo, + Headers: headers, + MountType: r.MountType, }, nil } @@ -491,13 +492,14 @@ func LogicalResponseToProtoResponse(r *logical.Response) (*Response, error) { } return &Response{ - Secret: secret, - Auth: auth, - Data: string(buf[:]), - Redirect: r.Redirect, - Warnings: r.Warnings, - WrapInfo: wrapInfo, - Headers: headers, + Secret: secret, + Auth: auth, + Data: string(buf[:]), + Redirect: r.Redirect, + Warnings: r.Warnings, + WrapInfo: wrapInfo, + Headers: headers, + MountType: r.MountType, }, nil } diff --git a/sdk/plugin/pb/translation_test.go b/sdk/plugin/pb/translation_test.go index 30979257acc1..4386ae59b21b 100644 --- a/sdk/plugin/pb/translation_test.go +++ b/sdk/plugin/pb/translation_test.go @@ -265,6 +265,7 @@ func TestTranslation_Response(t *testing.T) { CreationPath: "test/foo", SealWrap: true, }, + MountType: "mountType", }, } diff --git a/sdk/plugin/plugin.go b/sdk/plugin/plugin.go index c8848fb57114..ec58417ec7d2 100644 --- a/sdk/plugin/plugin.go +++ b/sdk/plugin/plugin.go @@ -154,14 +154,4 @@ func (b *BackendPluginClient) PluginVersion() logical.PluginVersion { return logical.EmptyPluginVersion } -func (b *BackendPluginClient) IsExternal() bool { - if externaler, ok := b.Backend.(logical.Externaler); ok { - return externaler.IsExternal() - } - return true // default to true since this is only used for GRPC plugins -} - -var ( - _ logical.PluginVersioner = (*BackendPluginClient)(nil) - _ logical.Externaler = (*BackendPluginClient)(nil) -) +var _ logical.PluginVersioner = (*BackendPluginClient)(nil) diff --git a/sdk/plugin/plugin_v5.go b/sdk/plugin/plugin_v5.go index d87996c7455d..cc2d1383775d 100644 --- a/sdk/plugin/plugin_v5.go +++ b/sdk/plugin/plugin_v5.go @@ -55,10 +55,7 @@ func (b *BackendPluginClientV5) PluginVersion() logical.PluginVersion { return logical.EmptyPluginVersion } -var ( - _ logical.PluginVersioner = (*BackendPluginClientV5)(nil) - _ logical.Externaler = (*BackendPluginClientV5)(nil) -) +var _ logical.PluginVersioner = (*BackendPluginClientV5)(nil) // NewBackendV5 will return an instance of an RPC-based client implementation of // the backend for external plugins, or a concrete implementation of the diff --git a/sdk/plugin/serve.go b/sdk/plugin/serve.go index 9ad2b820bb7e..2fe4af2686ad 100644 --- a/sdk/plugin/serve.go +++ b/sdk/plugin/serve.go @@ -8,12 +8,11 @@ import ( "math" "os" - "google.golang.org/grpc" - log "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" + "google.golang.org/grpc" ) // BackendPluginName is the name of the plugin that can be @@ -97,7 +96,7 @@ func ServeMultiplex(opts *ServeOpts) error { logger := opts.Logger if logger == nil { logger = log.New(&log.LoggerOptions{ - Level: log.Info, + Level: log.Trace, Output: os.Stderr, JSONFormat: true, }) diff --git a/sdk/plugin/storage_test.go b/sdk/plugin/storage_test.go index 61a5deec6720..f36a6221abe5 100644 --- a/sdk/plugin/storage_test.go +++ b/sdk/plugin/storage_test.go @@ -7,11 +7,10 @@ import ( "context" "testing" - "google.golang.org/grpc" - "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/plugin/pb" + "google.golang.org/grpc" ) func TestStorage_GRPC_ReturnsErrIfStorageNil(t *testing.T) { diff --git a/serviceregistration/consul/consul_service_registration.go b/serviceregistration/consul/consul_service_registration.go index a49ab4ff63f7..f3830121aebd 100644 --- a/serviceregistration/consul/consul_service_registration.go +++ b/serviceregistration/consul/consul_service_registration.go @@ -1,10 +1,11 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package consul import ( "context" + "encoding/json" "errors" "fmt" "math/rand" @@ -51,7 +52,7 @@ const ( // reconcileTimeout is how often Vault should query Consul to detect // and fix any state drift. - reconcileTimeout = 60 * time.Second + DefaultReconcileTimeout = 60 * time.Second // metaExternalSource is a metadata value for external-source that can be // used by the Consul UI. @@ -64,16 +65,20 @@ var hostnameRegex = regexp.MustCompile(`^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]* // Vault to Consul. type serviceRegistration struct { Client *api.Client + config *api.Config logger log.Logger serviceLock sync.RWMutex + registeredServiceID string redirectHost string redirectPort int64 serviceName string serviceTags []string + serviceMeta map[string]string serviceAddress *string disableRegistration bool checkTimeout time.Duration + reconcileTimeout time.Duration notifyActiveCh chan struct{} notifySealedCh chan struct{} @@ -88,90 +93,13 @@ type serviceRegistration struct { // NewConsulServiceRegistration constructs a Consul-based ServiceRegistration. func NewServiceRegistration(conf map[string]string, logger log.Logger, state sr.State) (sr.ServiceRegistration, error) { - // Allow admins to disable consul integration - disableReg, ok := conf["disable_registration"] - var disableRegistration bool - if ok && disableReg != "" { - b, err := parseutil.ParseBool(disableReg) - if err != nil { - return nil, fmt.Errorf("failed parsing disable_registration parameter: %w", err) - } - disableRegistration = b - } - if logger.IsDebug() { - logger.Debug("config disable_registration set", "disable_registration", disableRegistration) - } - - // Get the service name to advertise in Consul - service, ok := conf["service"] - if !ok { - service = DefaultServiceName - } - if !hostnameRegex.MatchString(service) { - return nil, errors.New("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes") - } - if logger.IsDebug() { - logger.Debug("config service set", "service", service) - } - - // Get the additional tags to attach to the registered service name - tags := conf["service_tags"] - if logger.IsDebug() { - logger.Debug("config service_tags set", "service_tags", tags) - } - - // Get the service-specific address to override the use of the HA redirect address - var serviceAddr *string - serviceAddrStr, ok := conf["service_address"] - if ok { - serviceAddr = &serviceAddrStr - } - if logger.IsDebug() { - logger.Debug("config service_address set", "service_address", serviceAddrStr) - } - - checkTimeout := defaultCheckTimeout - checkTimeoutStr, ok := conf["check_timeout"] - if ok { - d, err := parseutil.ParseDurationSecond(checkTimeoutStr) - if err != nil { - return nil, err - } - - min, _ := durationMinusBufferDomain(d, checkMinBuffer, checkJitterFactor) - if min < checkMinBuffer { - return nil, fmt.Errorf("consul check_timeout must be greater than %v", min) - } - - checkTimeout = d - if logger.IsDebug() { - logger.Debug("config check_timeout set", "check_timeout", d) - } - } - - // Configure the client - consulConf := api.DefaultConfig() - // Set MaxIdleConnsPerHost to the number of processes used in expiration.Restore - consulConf.Transport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount - - SetupSecureTLS(context.Background(), consulConf, conf, logger, false) - - consulConf.HttpClient = &http.Client{Transport: consulConf.Transport} - client, err := api.NewClient(consulConf) - if err != nil { - return nil, fmt.Errorf("client setup failed: %w", err) + if logger == nil { + return nil, errors.New("logger is required") } // Setup the backend c := &serviceRegistration{ - Client: client, - - logger: logger, - serviceName: service, - serviceTags: strutil.ParseDedupLowercaseAndSortStrings(tags, ","), - serviceAddress: serviceAddr, - checkTimeout: checkTimeout, - disableRegistration: disableRegistration, + logger: logger, notifyActiveCh: make(chan struct{}), notifySealedCh: make(chan struct{}), @@ -183,7 +111,11 @@ func NewServiceRegistration(conf map[string]string, logger log.Logger, state sr. isPerfStandby: atomicB.NewBool(state.IsPerformanceStandby), isInitialized: atomicB.NewBool(state.IsInitialized), } - return c, nil + + c.serviceLock.Lock() + defer c.serviceLock.Unlock() + err := c.merge(conf) + return c, err } func SetupSecureTLS(ctx context.Context, consulConf *api.Config, conf map[string]string, logger log.Logger, isDiagnose bool) error { @@ -266,6 +198,125 @@ func (c *serviceRegistration) Run(shutdownCh <-chan struct{}, wait *sync.WaitGro return nil } +func (c *serviceRegistration) merge(conf map[string]string) error { + // Allow admins to disable consul integration + disableReg, ok := conf["disable_registration"] + var disableRegistration bool + if ok && disableReg != "" { + b, err := parseutil.ParseBool(disableReg) + if err != nil { + return fmt.Errorf("failed parsing disable_registration parameter: %w", err) + } + disableRegistration = b + } + if c.logger.IsDebug() { + c.logger.Debug("config disable_registration set", "disable_registration", disableRegistration) + } + + // Get the service name to advertise in Consul + service, ok := conf["service"] + if !ok { + service = DefaultServiceName + } + if !hostnameRegex.MatchString(service) { + return errors.New("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes") + } + if c.logger.IsDebug() { + c.logger.Debug("config service set", "service", service) + } + + // Get the additional tags to attach to the registered service name + tags := conf["service_tags"] + if c.logger.IsDebug() { + c.logger.Debug("config service_tags set", "service_tags", tags) + } + + // Get user-defined meta tags to attach to the registered service name + metaTags := map[string]string{} + if metaTagsJSON, ok := conf["service_meta"]; ok { + if err := json.Unmarshal([]byte(metaTagsJSON), &metaTags); err != nil { + return errors.New("service tags must be a dictionary of string keys and values") + } + } + metaTags["external-source"] = metaExternalSource + if c.logger.IsDebug() { + c.logger.Debug("config service_meta set", "service_meta", metaTags) + } + + // Get the service-specific address to override the use of the HA redirect address + var serviceAddr *string + serviceAddrStr, ok := conf["service_address"] + if ok { + serviceAddr = &serviceAddrStr + } + if c.logger.IsDebug() { + c.logger.Debug("config service_address set", "service_address", serviceAddrStr) + } + + checkTimeout := defaultCheckTimeout + checkTimeoutStr, ok := conf["check_timeout"] + if ok { + d, err := parseutil.ParseDurationSecond(checkTimeoutStr) + if err != nil { + return err + } + + min, _ := durationMinusBufferDomain(d, checkMinBuffer, checkJitterFactor) + if min < checkMinBuffer { + return fmt.Errorf("consul check_timeout must be greater than %v", min) + } + + checkTimeout = d + if c.logger.IsDebug() { + c.logger.Debug("config check_timeout set", "check_timeout", d) + } + } + + reconcileTimeout := DefaultReconcileTimeout + reconcileTimeoutStr, ok := conf["reconcile_timeout"] + if ok { + d, err := parseutil.ParseDurationSecond(reconcileTimeoutStr) + if err != nil { + return err + } + + min, _ := durationMinusBufferDomain(d, checkMinBuffer, checkJitterFactor) + if min < checkMinBuffer { + return fmt.Errorf("consul reconcile_timeout must be greater than %v", min) + } + + reconcileTimeout = d + if c.logger.IsDebug() { + c.logger.Debug("config reconcile_timeout set", "reconcile_timeout", d) + } + } + + // Configure the client + consulConf := api.DefaultConfig() + // Set MaxIdleConnsPerHost to the number of processes used in expiration.Restore + consulConf.Transport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount + + SetupSecureTLS(context.Background(), consulConf, conf, c.logger, false) + + consulConf.HttpClient = &http.Client{Transport: consulConf.Transport} + client, err := api.NewClient(consulConf) + if err != nil { + return fmt.Errorf("client setup failed: %w", err) + } + + c.Client = client + c.config = consulConf + c.serviceName = service + c.serviceTags = strutil.ParseDedupAndSortStrings(tags, ",") + c.serviceMeta = metaTags + c.serviceAddress = serviceAddr + c.checkTimeout = checkTimeout + c.disableRegistration = disableRegistration + c.reconcileTimeout = reconcileTimeout + + return nil +} + func (c *serviceRegistration) NotifyActiveStateChange(isActive bool) error { c.isActive.Store(isActive) select { @@ -312,12 +363,31 @@ func (c *serviceRegistration) NotifyInitializedStateChange(isInitialized bool) e default: // NOTE: If this occurs Vault's initialized status could be out of // sync with Consul until checkTimer expires. - c.logger.Warn("concurrent initalize state change notify dropped") + c.logger.Warn("concurrent initialize state change notify dropped") } return nil } +func (c *serviceRegistration) NotifyConfigurationReload(conf *map[string]string) error { + c.serviceLock.Lock() + defer c.serviceLock.Unlock() + if conf == nil { + if c.logger.IsDebug() { + c.logger.Debug("registration is now empty, deregistering service from consul") + } + c.disableRegistration = true + err := c.deregisterService() + c.Client = nil + return err + } else { + if c.logger.IsDebug() { + c.logger.Debug("service registration configuration received, merging with existing configuation") + } + return c.merge(*conf) + } +} + func (c *serviceRegistration) checkDuration() time.Duration { return durationMinusBuffer(c.checkTimeout, checkMinBuffer, checkJitterFactor) } @@ -359,7 +429,6 @@ func (c *serviceRegistration) runEventDemuxer(waitGroup *sync.WaitGroup, shutdow // and end of a handler's life (or after a handler wakes up from // sleeping during a back-off/retry). var shutdown atomicB.Bool - var registeredServiceID string checkLock := new(int32) serviceRegLock := new(int32) @@ -379,16 +448,19 @@ func (c *serviceRegistration) runEventDemuxer(waitGroup *sync.WaitGroup, shutdow checkTimer.Reset(0) case <-reconcileTimer.C: // Unconditionally rearm the reconcileTimer - reconcileTimer.Reset(reconcileTimeout - randomStagger(reconcileTimeout/checkJitterFactor)) + c.serviceLock.RLock() + reconcileTimer.Reset(c.reconcileTimeout - randomStagger(c.reconcileTimeout/checkJitterFactor)) + disableRegistration := c.disableRegistration + c.serviceLock.RUnlock() // Abort if service discovery is disabled or a // reconcile handler is already active - if !c.disableRegistration && atomic.CompareAndSwapInt32(serviceRegLock, 0, 1) { + if !disableRegistration && atomic.CompareAndSwapInt32(serviceRegLock, 0, 1) { // Enter handler with serviceRegLock held go func() { defer atomic.CompareAndSwapInt32(serviceRegLock, 1, 0) for !shutdown.Load() { - serviceID, err := c.reconcileConsul(registeredServiceID) + serviceID, err := c.reconcileConsul() if err != nil { if c.logger.IsWarn() { c.logger.Warn("reconcile unable to talk with Consul backend", "error", err) @@ -398,28 +470,38 @@ func (c *serviceRegistration) runEventDemuxer(waitGroup *sync.WaitGroup, shutdow } c.serviceLock.Lock() - defer c.serviceLock.Unlock() + c.registeredServiceID = serviceID + c.serviceLock.Unlock() - registeredServiceID = serviceID return } }() } case <-checkTimer.C: checkTimer.Reset(c.checkDuration()) + c.serviceLock.RLock() + disableRegistration := c.disableRegistration + c.serviceLock.RUnlock() + // Abort if service discovery is disabled or a // reconcile handler is active - if !c.disableRegistration && atomic.CompareAndSwapInt32(checkLock, 0, 1) { + if !disableRegistration && atomic.CompareAndSwapInt32(checkLock, 0, 1) { // Enter handler with checkLock held go func() { defer atomic.CompareAndSwapInt32(checkLock, 1, 0) for !shutdown.Load() { - if err := c.runCheck(c.isSealed.Load()); err != nil { - if c.logger.IsWarn() { - c.logger.Warn("check unable to talk with Consul backend", "error", err) + c.serviceLock.RLock() + registeredServiceID := c.registeredServiceID + c.serviceLock.RUnlock() + + if registeredServiceID != "" { + if err := c.runCheck(c.isSealed.Load()); err != nil { + if c.logger.IsWarn() { + c.logger.Warn("check unable to talk with Consul backend", "error", err) + } + time.Sleep(consulRetryInterval) + continue } - time.Sleep(consulRetryInterval) - continue } return } @@ -431,13 +513,23 @@ func (c *serviceRegistration) runEventDemuxer(waitGroup *sync.WaitGroup, shutdow } } - c.serviceLock.RLock() - defer c.serviceLock.RUnlock() - if err := c.Client.Agent().ServiceDeregister(registeredServiceID); err != nil { - if c.logger.IsWarn() { - c.logger.Warn("service deregistration failed", "error", err) + c.serviceLock.Lock() + defer c.serviceLock.Unlock() + c.deregisterService() +} + +func (c *serviceRegistration) deregisterService() error { + if c.registeredServiceID != "" { + if err := c.Client.Agent().ServiceDeregister(c.registeredServiceID); err != nil { + if c.logger.IsWarn() { + c.logger.Warn("service deregistration failed", "error", err) + } + return err } + c.registeredServiceID = "" } + + return nil } // checkID returns the ID used for a Consul Check. Assume at least a read @@ -454,10 +546,12 @@ func (c *serviceRegistration) serviceID() string { // reconcileConsul queries the state of Vault Core and Consul and fixes up // Consul's state according to what's in Vault. reconcileConsul is called -// without any locks held and can be run concurrently, therefore no changes +// with a read lock and can be run concurrently, therefore no changes // to serviceRegistration can be made in this method (i.e. wtb const receiver for // compiler enforced safety). -func (c *serviceRegistration) reconcileConsul(registeredServiceID string) (serviceID string, err error) { +func (c *serviceRegistration) reconcileConsul() (serviceID string, err error) { + c.serviceLock.RLock() + defer c.serviceLock.RUnlock() agent := c.Client.Agent() catalog := c.Client.Catalog() @@ -479,7 +573,7 @@ func (c *serviceRegistration) reconcileConsul(registeredServiceID string) (servi var reregister bool switch { - case currentVaultService == nil, registeredServiceID == "": + case currentVaultService == nil, c.registeredServiceID == "": reregister = true default: switch { @@ -507,12 +601,10 @@ func (c *serviceRegistration) reconcileConsul(registeredServiceID string) (servi ID: serviceID, Name: c.serviceName, Tags: tags, + Meta: c.serviceMeta, Port: int(c.redirectPort), Address: serviceAddress, EnableTagOverride: false, - Meta: map[string]string{ - "external-source": metaExternalSource, - }, } checkStatus := api.HealthCritical diff --git a/serviceregistration/consul/consul_service_registration_test.go b/serviceregistration/consul/consul_service_registration_test.go index 0ced651e0242..c728e721c1c0 100644 --- a/serviceregistration/consul/consul_service_registration_test.go +++ b/serviceregistration/consul/consul_service_registration_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package consul @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/vault/sdk/physical/inmem" sr "github.com/hashicorp/vault/serviceregistration" "github.com/hashicorp/vault/vault" + "github.com/stretchr/testify/require" ) type consulConf map[string]string @@ -62,6 +63,17 @@ func TestConsul_ServiceRegistration(t *testing.T) { t.Fatal(err) } + // update the agent's ACL token so that we can successfully deregister the + // service later in the test + _, err = client.Agent().UpdateAgentACLToken(config.Token, nil) + if err != nil { + t.Fatal(err) + } + _, err = client.Agent().UpdateDefaultACLToken(config.Token, nil) + if err != nil { + t.Fatal(err) + } + // waitForServices waits for the services in the Consul catalog to // reach an expected value, returning the delta if that doesn't happen in time. waitForServices := func(t *testing.T, expected map[string][]string) map[string][]string { @@ -91,10 +103,13 @@ func TestConsul_ServiceRegistration(t *testing.T) { // Create a ServiceRegistration that points to our consul instance logger := logging.NewVaultLogger(log.Trace) - sd, err := NewServiceRegistration(map[string]string{ + srConfig := map[string]string{ "address": config.Address(), "token": config.Token, - }, logger, sr.State{}) + // decrease reconcile timeout to make test run faster + "reconcile_timeout": "1s", + } + sd, err := NewServiceRegistration(srConfig, logger, sr.State{}) if err != nil { t.Fatal(err) } @@ -146,6 +161,58 @@ func TestConsul_ServiceRegistration(t *testing.T) { "consul": {}, "vault": {"active", "initialized"}, }) + + // change the token and trigger reload + if sd.(*serviceRegistration).config.Token == "" { + t.Fatal("expected service registration token to not be '' before configuration reload") + } + + srConfigWithoutToken := make(map[string]string) + for k, v := range srConfig { + srConfigWithoutToken[k] = v + } + srConfigWithoutToken["token"] = "" + err = sd.NotifyConfigurationReload(&srConfigWithoutToken) + if err != nil { + t.Fatal(err) + } + + if sd.(*serviceRegistration).config.Token != "" { + t.Fatal("expected service registration token to be '' after configuration reload") + } + + // reconfigure the configuration back to its original state and verify vault is registered + err = sd.NotifyConfigurationReload(&srConfig) + if err != nil { + t.Fatal(err) + } + + waitForServices(t, map[string][]string{ + "consul": {}, + "vault": {"active", "initialized"}, + }) + + // send 'nil' configuration to verify that the service is deregistered + err = sd.NotifyConfigurationReload(nil) + if err != nil { + t.Fatal(err) + } + + waitForServices(t, map[string][]string{ + "consul": {}, + }) + + // reconfigure the configuration back to its original state and verify vault + // is re-registered + err = sd.NotifyConfigurationReload(&srConfig) + if err != nil { + t.Fatal(err) + } + + waitForServices(t, map[string][]string{ + "consul": {}, + "vault": {"active", "initialized"}, + }) } func TestConsul_ServiceAddress(t *testing.T) { @@ -425,6 +492,66 @@ func TestConsul_serviceTags(t *testing.T) { } } +// TestConsul_ServiceMeta tests whether consul service meta registration works +func TestConsul_ServiceMeta(t *testing.T) { + tests := []struct { + conf map[string]string + pass bool + expect map[string]string + }{ + { + conf: map[string]string{}, + pass: true, + expect: map[string]string{"external-source": "vault"}, + }, + { + conf: map[string]string{"service_meta": "true"}, + pass: false, + expect: map[string]string{"external-source": "vault"}, + }, + { + conf: map[string]string{"service_meta": "{\"key\":\"value\"}"}, + pass: true, + expect: map[string]string{"key": "value", "external-source": "vault"}, + }, + { + conf: map[string]string{"service_meta": "{\"external-source\":\"something-else\"}"}, + pass: true, + expect: map[string]string{"external-source": "vault"}, + }, + } + + for _, test := range tests { + logger := logging.NewVaultLogger(log.Debug) + + shutdownCh := make(chan struct{}) + defer func() { + close(shutdownCh) + }() + sr, err := NewServiceRegistration(test.conf, logger, sr.State{}) + if !test.pass { + if err == nil { + t.Fatal("Expected Consul to fail with error") + } + continue + } + + if err != nil && test.pass { + t.Fatalf("Expected Consul to initialize: %v", err) + } + + c, ok := sr.(*serviceRegistration) + if !ok { + t.Fatalf("Expected serviceRegistration") + } + + if !reflect.DeepEqual(c.serviceMeta, test.expect) { + t.Fatalf("Did not produce expected meta: wanted: %v, got %v", test.expect, c.serviceMeta) + } + + } +} + func TestConsul_setRedirectAddr(t *testing.T) { tests := []struct { addr string @@ -563,3 +690,44 @@ func TestConsul_serviceID(t *testing.T) { } } } + +// TestConsul_NewServiceRegistration_serviceTags ensures that we do not modify +// the case of any 'service_tags' set by the config. +// We do expect tags to be sorted in lexicographic order (A-Z). +func TestConsul_NewServiceRegistration_serviceTags(t *testing.T) { + tests := map[string]struct { + Tags string + ExpectedTags []string + }{ + "lowercase": { + Tags: "foo,bar", + ExpectedTags: []string{"bar", "foo"}, + }, + "uppercase": { + Tags: "FOO,BAR", + ExpectedTags: []string{"BAR", "FOO"}, + }, + "PascalCase": { + Tags: "FooBar, Feedface", + ExpectedTags: []string{"Feedface", "FooBar"}, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + cfg := map[string]string{"service_tags": tc.Tags} + logger := logging.NewVaultLogger(log.Trace) + be, err := NewServiceRegistration(cfg, logger, sr.State{}) + require.NoError(t, err) + require.NotNil(t, be) + c, ok := be.(*serviceRegistration) + require.True(t, ok) + require.NotNil(t, c) + require.Equal(t, tc.ExpectedTags, c.serviceTags) + }) + } +} diff --git a/serviceregistration/kubernetes/client/client.go b/serviceregistration/kubernetes/client/client.go index 96d195272453..0fbef7267b0b 100644 --- a/serviceregistration/kubernetes/client/client.go +++ b/serviceregistration/kubernetes/client/client.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package client diff --git a/serviceregistration/kubernetes/client/client_test.go b/serviceregistration/kubernetes/client/client_test.go index de11dad37e23..a02749e39155 100644 --- a/serviceregistration/kubernetes/client/client_test.go +++ b/serviceregistration/kubernetes/client/client_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package client diff --git a/serviceregistration/kubernetes/client/cmd/kubeclient/main.go b/serviceregistration/kubernetes/client/cmd/kubeclient/main.go index 7060a063e2d7..3110facc8098 100644 --- a/serviceregistration/kubernetes/client/cmd/kubeclient/main.go +++ b/serviceregistration/kubernetes/client/cmd/kubeclient/main.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package main diff --git a/serviceregistration/kubernetes/client/config.go b/serviceregistration/kubernetes/client/config.go index be98240e2195..e8210964826c 100644 --- a/serviceregistration/kubernetes/client/config.go +++ b/serviceregistration/kubernetes/client/config.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package client diff --git a/serviceregistration/kubernetes/retry_handler.go b/serviceregistration/kubernetes/retry_handler.go index 46ac18eafddd..3a4397c3b70b 100644 --- a/serviceregistration/kubernetes/retry_handler.go +++ b/serviceregistration/kubernetes/retry_handler.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package kubernetes diff --git a/serviceregistration/kubernetes/retry_handler_test.go b/serviceregistration/kubernetes/retry_handler_test.go index 0dd61113923e..19f1c5b31ce6 100644 --- a/serviceregistration/kubernetes/retry_handler_test.go +++ b/serviceregistration/kubernetes/retry_handler_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package kubernetes diff --git a/serviceregistration/kubernetes/service_registration.go b/serviceregistration/kubernetes/service_registration.go index f377cbb9874e..8b52023b001d 100644 --- a/serviceregistration/kubernetes/service_registration.go +++ b/serviceregistration/kubernetes/service_registration.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package kubernetes @@ -106,6 +106,10 @@ func (r *serviceRegistration) NotifyInitializedStateChange(isInitialized bool) e return nil } +func (c *serviceRegistration) NotifyConfigurationReload(conf *map[string]string) error { + return nil +} + func getRequiredField(logger hclog.Logger, config map[string]string, envVar, configParam string) (string, error) { value := "" switch { diff --git a/serviceregistration/kubernetes/service_registration_test.go b/serviceregistration/kubernetes/service_registration_test.go index a6a93c9ceeee..808807d1a9dc 100644 --- a/serviceregistration/kubernetes/service_registration_test.go +++ b/serviceregistration/kubernetes/service_registration_test.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package kubernetes diff --git a/serviceregistration/kubernetes/testing/testserver.go b/serviceregistration/kubernetes/testing/testserver.go index 6ceb94018625..225431fc1fae 100644 --- a/serviceregistration/kubernetes/testing/testserver.go +++ b/serviceregistration/kubernetes/testing/testserver.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package testing diff --git a/serviceregistration/service_registration.go b/serviceregistration/service_registration.go index 79f5b20d1883..394892e84768 100644 --- a/serviceregistration/service_registration.go +++ b/serviceregistration/service_registration.go @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package serviceregistration @@ -96,4 +96,14 @@ type ServiceRegistration interface { // the implementation's responsibility to retry updating state // in the face of errors. NotifyInitializedStateChange(isInitialized bool) error + + // NotifyConfigurationReload is used by Core to notify that the Vault + // configuration has been reloaded. + // If errors are returned, Vault only logs a warning, so it is + // the implementation's responsibility to retry updating state + // in the face of errors. + // + // If the passed in conf is nil, it is assumed that the service registration + // configuration no longer exits and should be deregistered. + NotifyConfigurationReload(conf *map[string]string) error } diff --git a/shamir/.copywrite.hcl b/shamir/.copywrite.hcl new file mode 100644 index 000000000000..c4b09f33640c --- /dev/null +++ b/shamir/.copywrite.hcl @@ -0,0 +1,8 @@ +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2024 + + header_ignore = [] +} diff --git a/shamir/LICENSE b/shamir/LICENSE new file mode 100644 index 000000000000..f4f97ee5853a --- /dev/null +++ b/shamir/LICENSE @@ -0,0 +1,365 @@ +Copyright (c) 2015 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/tools/codechecker/main.go b/tools/codechecker/main.go new file mode 100644 index 000000000000..e9c761c369cd --- /dev/null +++ b/tools/codechecker/main.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package main + +import ( + "github.com/hashicorp/vault/tools/codechecker/pkg/godoctests" + "github.com/hashicorp/vault/tools/codechecker/pkg/gonilnilfunctions" + "golang.org/x/tools/go/analysis/multichecker" +) + +func main() { + multichecker.Main(gonilnilfunctions.Analyzer, godoctests.Analyzer) +} diff --git a/tools/codechecker/pkg/godoctests/analyzer.go b/tools/codechecker/pkg/godoctests/analyzer.go new file mode 100644 index 000000000000..98b17a111be9 --- /dev/null +++ b/tools/codechecker/pkg/godoctests/analyzer.go @@ -0,0 +1,81 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package godoctests + +import ( + "go/ast" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "godoctests", + Doc: "Verifies that every go test has a go doc", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + } + + inspector.Preorder(nodeFilter, func(node ast.Node) { + funcDecl, ok := node.(*ast.FuncDecl) + if !ok { + return + } + + // starts with 'Test' + if !strings.HasPrefix(funcDecl.Name.Name, "Test") { + return + } + + // has one parameter + params := funcDecl.Type.Params.List + if len(params) != 1 { + return + } + + // parameter is a pointer + firstParamType, ok := params[0].Type.(*ast.StarExpr) + if !ok { + return + } + + selector, ok := firstParamType.X.(*ast.SelectorExpr) + if !ok { + return + } + + // the pointer comes from package 'testing' + selectorIdent, ok := selector.X.(*ast.Ident) + if !ok { + return + } + if selectorIdent.Name != "testing" { + return + } + + // the pointer has type 'T' + if selector.Sel == nil || selector.Sel.Name != "T" { + return + } + + // then there must be a godoc + if funcDecl.Doc == nil { + pass.Reportf(node.Pos(), "Test %s is missing a go doc", + funcDecl.Name.Name) + } else if !strings.HasPrefix(funcDecl.Doc.Text(), funcDecl.Name.Name) { + pass.Reportf(node.Pos(), "Test %s must have a go doc beginning with the function name", + funcDecl.Name.Name) + } + }) + return nil, nil +} diff --git a/tools/codechecker/pkg/godoctests/analyzer_test.go b/tools/codechecker/pkg/godoctests/analyzer_test.go new file mode 100644 index 000000000000..65bf6af1ddcd --- /dev/null +++ b/tools/codechecker/pkg/godoctests/analyzer_test.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package godoctests + +import ( + "os" + "path/filepath" + "testing" + + "golang.org/x/tools/go/analysis/analysistest" +) + +// TestAnalyzer runs the analyzer on the test functions in testdata/funcs.go. The report from the analyzer is compared against +// the comments in funcs.go beginning with "want." If there is no comment beginning with "want", then the analyzer is expected +// not to report anything. +func TestAnalyzer(t *testing.T) { + f, err := os.Getwd() + if err != nil { + t.Fatal("failed to get working directory", err) + } + analysistest.Run(t, filepath.Join(f, "testdata"), Analyzer, ".") +} diff --git a/tools/codechecker/pkg/godoctests/testdata/funcs.go b/tools/codechecker/pkg/godoctests/testdata/funcs.go new file mode 100644 index 000000000000..ddaf56bfd2a4 --- /dev/null +++ b/tools/codechecker/pkg/godoctests/testdata/funcs.go @@ -0,0 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package testdata + +import "testing" + +// Test_GoDocOK is a test that has a go doc +func Test_GoDocOK(t *testing.T) {} + +func Test_NoGoDocFails(t *testing.T) {} // want "Test Test_NoGoDocFails is missing a go doc" + +// This test does not have a go doc beginning with the function name +func Test_BadGoDocFails(t *testing.T) {} // want "Test Test_BadGoDocFails must have a go doc beginning with the function name" + +func test_TestHelperNoGoDocOK(t *testing.T) {} + +func Test_DifferentSignatureNoGoDocOK() {} + +func Test_DifferentSignature2NoGoDocOK(t *testing.T, a int) {} diff --git a/tools/codechecker/pkg/gonilnilfunctions/analyzer.go b/tools/codechecker/pkg/gonilnilfunctions/analyzer.go new file mode 100644 index 000000000000..fbb32dcf1f0e --- /dev/null +++ b/tools/codechecker/pkg/gonilnilfunctions/analyzer.go @@ -0,0 +1,171 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package gonilnilfunctions + +import ( + "go/ast" + "go/types" + "reflect" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "gonilnilfunctions", + Doc: "Verifies that every go function with error as one of its two return types cannot return nil, nil", + Run: run, + ResultType: reflect.TypeOf((interface{})(nil)), + Requires: []*analysis.Analyzer{inspect.Analyzer}, +} + +// getNestedReturnStatements searches the AST for return statements, and returns +// them in a tail-call optimized list. +func getNestedReturnStatements(s ast.Stmt, returns []*ast.ReturnStmt) []*ast.ReturnStmt { + switch s := s.(type) { + case *ast.BlockStmt: + statements := make([]*ast.ReturnStmt, 0) + for _, stmt := range s.List { + statements = append(statements, getNestedReturnStatements(stmt, make([]*ast.ReturnStmt, 0))...) + } + + return append(returns, statements...) + case *ast.BranchStmt: + return returns + case *ast.ForStmt: + return getNestedReturnStatements(s.Body, returns) + case *ast.IfStmt: + return getNestedReturnStatements(s.Body, returns) + case *ast.LabeledStmt: + return getNestedReturnStatements(s.Stmt, returns) + case *ast.RangeStmt: + return getNestedReturnStatements(s.Body, returns) + case *ast.ReturnStmt: + return append(returns, s) + case *ast.SwitchStmt: + return getNestedReturnStatements(s.Body, returns) + case *ast.SelectStmt: + return getNestedReturnStatements(s.Body, returns) + case *ast.TypeSwitchStmt: + return getNestedReturnStatements(s.Body, returns) + case *ast.CommClause: + statements := make([]*ast.ReturnStmt, 0) + for _, stmt := range s.Body { + statements = append(statements, getNestedReturnStatements(stmt, make([]*ast.ReturnStmt, 0))...) + } + + return append(returns, statements...) + case *ast.CaseClause: + statements := make([]*ast.ReturnStmt, 0) + for _, stmt := range s.Body { + statements = append(statements, getNestedReturnStatements(stmt, make([]*ast.ReturnStmt, 0))...) + } + + return append(returns, statements...) + case *ast.ExprStmt: + return returns + } + return returns +} + +// run runs the analysis, failing for functions whose signatures contain two results including one error +// (e.g. (something, error)), that contain multiple nil returns +func run(pass *analysis.Pass) (interface{}, error) { + inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + } + + inspector.Preorder(nodeFilter, func(node ast.Node) { + funcDecl, ok := node.(*ast.FuncDecl) + if !ok { + return + } + + // If the function has the "Ignore" godoc comment, skip it + if strings.Contains(funcDecl.Doc.Text(), "ignore-nil-nil-function-check") { + return + } + + // The function returns something + if funcDecl == nil || funcDecl.Type == nil || funcDecl.Type.Results == nil { + return + } + + // The function has more than 1 return value + results := funcDecl.Type.Results.List + if len(results) < 2 { + return + } + + // isError is a helper function to check if a Field is of error type + isError := func(field *ast.Field) bool { + if named, ok := pass.TypesInfo.TypeOf(field.Type).(*types.Named); ok { + namedObject := named.Obj() + return namedObject != nil && namedObject.Pkg() == nil && namedObject.Name() == "error" + } + return false + } + + // one of the return values is error + var errorFound bool + for _, result := range results { + if isError(result) { + errorFound = true + break + } + } + + if !errorFound { + return + } + + // Since these statements might be e.g. blocks with + // other statements inside, we need to get the return statements + // from inside them, first. + statements := funcDecl.Body.List + + returnStatements := make([]*ast.ReturnStmt, 0) + for _, statement := range statements { + returnStatements = append(returnStatements, getNestedReturnStatements(statement, make([]*ast.ReturnStmt, 0))...) + } + + for _, returnStatement := range returnStatements { + numResultsNil := 0 + results := returnStatement.Results + + // We only want two-arg functions (something, nil) + // We can remove this block in the future if we change our mind + if len(results) != 2 { + continue + } + + for _, result := range results { + // nil is an ident + ident, isIdent := result.(*ast.Ident) + if isIdent { + if ident.Name == "nil" { + // We found one nil in the return list + numResultsNil++ + } + } + } + // We found N nils, and our function returns N results, so this fails the check + if numResultsNil == len(results) { + // All the return values are nil, so we fail the report + pass.Reportf(node.Pos(), "Function %s can return an error, and has a statement that returns only nils", + funcDecl.Name.Name) + + // We break out of the loop of checking return statements, so that we don't repeat ourselves + break + } + } + }) + + var success interface{} + return success, nil +} diff --git a/tools/codechecker/pkg/gonilnilfunctions/analyzer_test.go b/tools/codechecker/pkg/gonilnilfunctions/analyzer_test.go new file mode 100644 index 000000000000..4cfac4af4825 --- /dev/null +++ b/tools/codechecker/pkg/gonilnilfunctions/analyzer_test.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package gonilnilfunctions + +import ( + "os" + "path/filepath" + "testing" + + "golang.org/x/tools/go/analysis/analysistest" +) + +// TestAnalyzer runs the analyzer on the test functions in testdata/funcs.go. The report from the analyzer is compared against +// the comments in funcs.go beginning with "want." If there is no comment beginning with "want", then the analyzer is expected +// not to report anything. +func TestAnalyzer(t *testing.T) { + f, err := os.Getwd() + if err != nil { + t.Fatal("failed to get working directory", err) + } + analysistest.Run(t, filepath.Join(f, "testdata"), Analyzer, ".") +} diff --git a/tools/codechecker/pkg/gonilnilfunctions/testdata/funcs.go b/tools/codechecker/pkg/gonilnilfunctions/testdata/funcs.go new file mode 100644 index 000000000000..73f3ee9f589b --- /dev/null +++ b/tools/codechecker/pkg/gonilnilfunctions/testdata/funcs.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package testdata + +func ReturnReturnOkay() (any, error) { + var i interface{} + return i, nil +} + +func OneGoodOneBad() (any, error) { // want "Function OneGoodOneBad can return an error, and has a statement that returns only nils" + var i interface{} + if true { + return i, nil + } + return nil, nil +} + +func OneBadOneGood() (any, error) { // want "Function OneBadOneGood can return an error, and has a statement that returns only nils" + var i interface{} + if true { + return nil, nil + } + return i, nil +} + +func EmptyFunc() {} + +func TwoNilNils() (any, error) { // want "Function TwoNilNils can return an error, and has a statement that returns only nils" + if true { + return nil, nil + } + return nil, nil +} + +// ThreeResults should not fail, as while it returns nil, nil, nil, it has three results, not two. +func ThreeResults() (any, any, error) { + return nil, nil, nil +} + +func TwoArgsNoError() (any, any) { + return nil, nil +} + +func NestedReturn() (any, error) { // want "Function NestedReturn can return an error, and has a statement that returns only nils" + { + { + { + return nil, nil + } + } + } +} + +func NestedForReturn() (any, error) { // want "Function NestedForReturn can return an error, and has a statement that returns only nils" + for { + for i := 0; i < 100; i++ { + { + return nil, nil + } + } + } +} + +func AnyErrorNilNil() (any, error) { // want "Function AnyErrorNilNil can return an error, and has a statement that returns only nils" + return nil, nil +} + +// Skipped should be skipped because of the following line: +// ignore-nil-nil-function-check +func Skipped() (any, error) { + return nil, nil +} diff --git a/tools/godoctests/main.go b/tools/godoctests/main.go deleted file mode 100644 index caa6ca0b9370..000000000000 --- a/tools/godoctests/main.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package main - -import ( - "github.com/hashicorp/vault/tools/godoctests/pkg/analyzer" - "golang.org/x/tools/go/analysis/singlechecker" -) - -func main() { - singlechecker.Main(analyzer.Analyzer) -} diff --git a/tools/godoctests/pkg/analyzer/analyzer.go b/tools/godoctests/pkg/analyzer/analyzer.go deleted file mode 100644 index 38ed37d933a6..000000000000 --- a/tools/godoctests/pkg/analyzer/analyzer.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package analyzer - -import ( - "go/ast" - "strings" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" -) - -var Analyzer = &analysis.Analyzer{ - Name: "godoctests", - Doc: "Verifies that every go test has a go doc", - Run: run, - Requires: []*analysis.Analyzer{inspect.Analyzer}, -} - -func run(pass *analysis.Pass) (interface{}, error) { - inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - - nodeFilter := []ast.Node{ - (*ast.FuncDecl)(nil), - } - - inspector.Preorder(nodeFilter, func(node ast.Node) { - funcDecl, ok := node.(*ast.FuncDecl) - if !ok { - return - } - - // starts with 'Test' - if !strings.HasPrefix(funcDecl.Name.Name, "Test") { - return - } - - // has one parameter - params := funcDecl.Type.Params.List - if len(params) != 1 { - return - } - - // parameter is a pointer - firstParamType, ok := params[0].Type.(*ast.StarExpr) - if !ok { - return - } - - selector, ok := firstParamType.X.(*ast.SelectorExpr) - if !ok { - return - } - - // the pointer comes from package 'testing' - selectorIdent, ok := selector.X.(*ast.Ident) - if !ok { - return - } - if selectorIdent.Name != "testing" { - return - } - - // the pointer has type 'T' - if selector.Sel == nil || selector.Sel.Name != "T" { - return - } - - // then there must be a godoc - if funcDecl.Doc == nil { - pass.Reportf(node.Pos(), "Test %s is missing a go doc", - funcDecl.Name.Name) - } else if !strings.HasPrefix(funcDecl.Doc.Text(), funcDecl.Name.Name) { - pass.Reportf(node.Pos(), "Test %s must have a go doc beginning with the function name", - funcDecl.Name.Name) - } - }) - return nil, nil -} diff --git a/tools/godoctests/pkg/analyzer/analyzer_test.go b/tools/godoctests/pkg/analyzer/analyzer_test.go deleted file mode 100644 index df1bfafd4632..000000000000 --- a/tools/godoctests/pkg/analyzer/analyzer_test.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package analyzer - -import ( - "os" - "path/filepath" - "testing" - - "golang.org/x/tools/go/analysis/analysistest" -) - -// TestAnalyzer runs the analyzer on the test functions in testdata/funcs.go. The report from the analyzer is compared against -// the comments in funcs.go beginning with "want." If there is no comment beginning with "want", then the analyzer is expected -// not to report anything. -func TestAnalyzer(t *testing.T) { - f, err := os.Getwd() - if err != nil { - t.Fatal("failed to get working directory", err) - } - analysistest.Run(t, filepath.Join(f, "testdata"), Analyzer, ".") -} diff --git a/tools/godoctests/pkg/analyzer/testdata/funcs.go b/tools/godoctests/pkg/analyzer/testdata/funcs.go deleted file mode 100644 index e9d5fead6744..000000000000 --- a/tools/godoctests/pkg/analyzer/testdata/funcs.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package testdata - -import "testing" - -// Test_GoDocOK is a test that has a go doc -func Test_GoDocOK(t *testing.T) {} - -func Test_NoGoDocFails(t *testing.T) {} // want "Test Test_NoGoDocFails is missing a go doc" - -// This test does not have a go doc beginning with the function name -func Test_BadGoDocFails(t *testing.T) {} // want "Test Test_BadGoDocFails must have a go doc beginning with the function name" - -func test_TestHelperNoGoDocOK(t *testing.T) {} - -func Test_DifferentSignatureNoGoDocOK() {} - -func Test_DifferentSignature2NoGoDocOK(t *testing.T, a int) {} diff --git a/tools/semgrep/ci/atomic.yml b/tools/semgrep/ci/atomic.yml index 1d6b2a9da799..97bad1ba858d 100644 --- a/tools/semgrep/ci/atomic.yml +++ b/tools/semgrep/ci/atomic.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: atomics-64bit-safety diff --git a/tools/semgrep/ci/bad-multierror-append.yml b/tools/semgrep/ci/bad-multierror-append.yml index bebb20013f39..166d0564ba70 100644 --- a/tools/semgrep/ci/bad-multierror-append.yml +++ b/tools/semgrep/ci/bad-multierror-append.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: bad-multierror-append diff --git a/tools/semgrep/ci/bad-nil-guard.yml b/tools/semgrep/ci/bad-nil-guard.yml index 70003690f72f..01e51d298312 100644 --- a/tools/semgrep/ci/bad-nil-guard.yml +++ b/tools/semgrep/ci/bad-nil-guard.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: bad-nil-guard diff --git a/tools/semgrep/ci/error-shadowing.yml b/tools/semgrep/ci/error-shadowing.yml index 43ea1a3eb9fd..490a6667f5fe 100644 --- a/tools/semgrep/ci/error-shadowing.yml +++ b/tools/semgrep/ci/error-shadowing.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: error-shadow-check-types diff --git a/tools/semgrep/ci/fmt-printf.yml b/tools/semgrep/ci/fmt-printf.yml index fc6e824446a0..47e298ebf001 100644 --- a/tools/semgrep/ci/fmt-printf.yml +++ b/tools/semgrep/ci/fmt-printf.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: fmt.Printf diff --git a/tools/semgrep/ci/hashsum.yml b/tools/semgrep/ci/hashsum.yml index 82765a12a262..8ef8ca7f0de7 100644 --- a/tools/semgrep/ci/hashsum.yml +++ b/tools/semgrep/ci/hashsum.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: hash-sum-without-write diff --git a/tools/semgrep/ci/hmac-bytes.yml b/tools/semgrep/ci/hmac-bytes.yml index d8da277064a2..e5ce32ef2d6b 100644 --- a/tools/semgrep/ci/hmac-bytes.yml +++ b/tools/semgrep/ci/hmac-bytes.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: use-hmac-equal diff --git a/tools/semgrep/ci/hmac-hash.yml b/tools/semgrep/ci/hmac-hash.yml index 2b03883c4a51..76e1e9e726fc 100644 --- a/tools/semgrep/ci/hmac-hash.yml +++ b/tools/semgrep/ci/hmac-hash.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: hmac-needs-new diff --git a/tools/semgrep/lock-not-unlocked-on-return.yml b/tools/semgrep/ci/lock-not-unlocked-on-return.yml similarity index 97% rename from tools/semgrep/lock-not-unlocked-on-return.yml rename to tools/semgrep/ci/lock-not-unlocked-on-return.yml index 6482b7194599..958d8dfc17e5 100644 --- a/tools/semgrep/lock-not-unlocked-on-return.yml +++ b/tools/semgrep/ci/lock-not-unlocked-on-return.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: lock_not_unlocked @@ -89,6 +89,10 @@ rules: ... return ... } + # Another lock object that returns an error + - pattern-not: | + $ERR = $LOCK.Lock() + ... # deferred unlock with release function - pattern-not: | $LOCK.Lock() @@ -293,4 +297,4 @@ rules: if $COND { ... return ... - } \ No newline at end of file + } diff --git a/tools/semgrep/ci/logger-format-string.yml b/tools/semgrep/ci/logger-format-string.yml index 14cb6cd4276c..136c4eb1486c 100644 --- a/tools/semgrep/ci/logger-format-string.yml +++ b/tools/semgrep/ci/logger-format-string.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: logger-used-with-format-string diff --git a/tools/semgrep/ci/loop-time-after.yml b/tools/semgrep/ci/loop-time-after.yml index e3a5183a1fd6..4104c9568e03 100644 --- a/tools/semgrep/ci/loop-time-after.yml +++ b/tools/semgrep/ci/loop-time-after.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: loop-time-after diff --git a/tools/semgrep/ci/loopclosure.yml b/tools/semgrep/ci/loopclosure.yml index 88ab134c5dff..ce358e951b0b 100644 --- a/tools/semgrep/ci/loopclosure.yml +++ b/tools/semgrep/ci/loopclosure.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: loopclosure diff --git a/tools/semgrep/ci/no-nil-check.yml b/tools/semgrep/ci/no-nil-check.yml index 0b1f1ce37205..a470f35c87fb 100644 --- a/tools/semgrep/ci/no-nil-check.yml +++ b/tools/semgrep/ci/no-nil-check.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: nil-check-logical-storage @@ -99,6 +99,13 @@ rules: ... } ... + - pattern-not: | + $VAR, $ERR = NamespaceByID(...) + ... + if !a.includeInResponse(..., $VAR) { + ... + } + ... message: missed nil check languages: - go diff --git a/tools/semgrep/ci/oddifsequence.yml b/tools/semgrep/ci/oddifsequence.yml index 77b71b6a2a63..8e4e12d143da 100644 --- a/tools/semgrep/ci/oddifsequence.yml +++ b/tools/semgrep/ci/oddifsequence.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: odd-sequence-ifs diff --git a/tools/semgrep/ci/return-nil-error.yml b/tools/semgrep/ci/return-nil-error.yml index a91e4eaecd6d..18a8bdff86a1 100644 --- a/tools/semgrep/ci/return-nil-error.yml +++ b/tools/semgrep/ci/return-nil-error.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: return-nil diff --git a/tools/semgrep/ci/return-nil.yml b/tools/semgrep/ci/return-nil.yml index 2a6447cef710..66fd310aa692 100644 --- a/tools/semgrep/ci/return-nil.yml +++ b/tools/semgrep/ci/return-nil.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: hc-return-nil diff --git a/tools/semgrep/ci/time-parse-duration.yml b/tools/semgrep/ci/time-parse-duration.yml new file mode 100644 index 000000000000..3c1746d69f0f --- /dev/null +++ b/tools/semgrep/ci/time-parse-duration.yml @@ -0,0 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +rules: + - id: time-parse-duration + patterns: + - pattern: time.ParseDuration + message: "Usage of time.ParseDuration. Use parseutil.ParseDurationSeconds, instead!" + languages: [go] + severity: ERROR diff --git a/tools/semgrep/ci/wrongerrcall.yml b/tools/semgrep/ci/wrongerrcall.yml index 315e26d5f2d0..8de9627b45a5 100644 --- a/tools/semgrep/ci/wrongerrcall.yml +++ b/tools/semgrep/ci/wrongerrcall.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: maybe-wrong-err diff --git a/tools/semgrep/ci/wronglock.yml b/tools/semgrep/ci/wronglock.yml index 126a5446a4fd..5b408b6a0fe4 100644 --- a/tools/semgrep/ci/wronglock.yml +++ b/tools/semgrep/ci/wronglock.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: wrong-lock-unlock diff --git a/tools/semgrep/hostport.yml b/tools/semgrep/hostport.yml index 28613ecd9036..f687ae707f29 100644 --- a/tools/semgrep/hostport.yml +++ b/tools/semgrep/hostport.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 # https://github.com/golang/go/issues/28308, from @stapelberg rules: diff --git a/tools/semgrep/joinpath.yml b/tools/semgrep/joinpath.yml index ec27127bfa03..bc58134f7ee3 100644 --- a/tools/semgrep/joinpath.yml +++ b/tools/semgrep/joinpath.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: use-strings-join-path diff --git a/tools/semgrep/logger-sprintf.yml b/tools/semgrep/logger-sprintf.yml index 7d2f48bcdc3e..a478f085b16a 100644 --- a/tools/semgrep/logger-sprintf.yml +++ b/tools/semgrep/logger-sprintf.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: logger-used-with-sprintf diff --git a/tools/semgrep/paths-with-callbacks-and-operations.yml b/tools/semgrep/paths-with-callbacks-and-operations.yml index e29cbab65baf..33b710012004 100644 --- a/tools/semgrep/paths-with-callbacks-and-operations.yml +++ b/tools/semgrep/paths-with-callbacks-and-operations.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: path-has-both-callbacks-and-operations diff --git a/tools/semgrep/paths-with-callbacks.yml b/tools/semgrep/paths-with-callbacks.yml index 9049a1d370a3..d04a85ea3d76 100644 --- a/tools/semgrep/paths-with-callbacks.yml +++ b/tools/semgrep/paths-with-callbacks.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: uses-path-callbacks diff --git a/tools/semgrep/physical-storage.yml b/tools/semgrep/physical-storage.yml index e7e978cc75a9..660a7c179438 100644 --- a/tools/semgrep/physical-storage.yml +++ b/tools/semgrep/physical-storage.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: physical-storage-bypass-encryption diff --git a/tools/semgrep/replication-has-state.yml b/tools/semgrep/replication-has-state.yml index 7868e328087e..d97bff493691 100644 --- a/tools/semgrep/replication-has-state.yml +++ b/tools/semgrep/replication-has-state.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: replication-state-should-use-IsPerfSecondary diff --git a/tools/semgrep/self-equals.yml b/tools/semgrep/self-equals.yml index ae7c1ff8c93c..1e43b253f8b9 100644 --- a/tools/semgrep/self-equals.yml +++ b/tools/semgrep/self-equals.yml @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 +# SPDX-License-Identifier: BUSL-1.1 rules: - id: self-equals diff --git a/tools/stubmaker/main.go b/tools/stubmaker/main.go new file mode 100644 index 000000000000..e2984420537b --- /dev/null +++ b/tools/stubmaker/main.go @@ -0,0 +1,332 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package main + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/token" + "go/types" + "os" + "path/filepath" + "strings" + + "github.com/go-git/go-git/v5" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/object" + "github.com/hashicorp/go-hclog" + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/imports" +) + +var logger hclog.Logger + +func fatal(err error) { + logger.Error("fatal error", "error", err) + os.Exit(1) +} + +type generator struct { + file *ast.File + fset *token.FileSet +} + +func main() { + logger = hclog.New(&hclog.LoggerOptions{ + Name: "stubmaker", + Level: hclog.Trace, + }) + + // Setup git, both so we can determine if we're running on enterprise, and + // so we can make sure we don't clobber a non-transient file. + repo, err := git.PlainOpenWithOptions(".", &git.PlainOpenOptions{ + DetectDotGit: true, + }) + if err != nil { + if err.Error() != "repository does not exist" { + fatal(err) + } + repo = nil + } + + var wt *git.Worktree + if repo != nil { + wt, err = repo.Worktree() + if err != nil { + fatal(err) + } + if !isEnterprise(wt) { + return + } + } + + // Read the file and figure out if we need to do anything. + inputFile := os.Getenv("GOFILE") + if !strings.HasSuffix(inputFile, "_stubs_oss.go") { + fatal(fmt.Errorf("stubmaker should only be invoked from files ending in _stubs_oss.go")) + } + + baseFilename := strings.TrimSuffix(inputFile, "_stubs_oss.go") + outputFile := baseFilename + "_stubs_ent.go" + b, err := os.ReadFile(inputFile) + if err != nil { + fatal(err) + } + + inputParsed, err := parseFile(b) + if err != nil { + fatal(err) + } + needed, existing, err := inputParsed.areStubsNeeded() + if err != nil { + fatal(err) + } + if !needed { + return + } + + // We'd like to write the file, but first make sure that we're not going + // to blow away anyone's work or overwrite a file already in git. + if repo != nil { + head, err := repo.Head() + if err != nil { + fatal(err) + } + obj, err := repo.Object(plumbing.AnyObject, head.Hash()) + if err != nil { + fatal(err) + } + + st, err := wt.Status() + if err != nil { + fatal(err) + } + + tracked, err := inGit(wt, st, obj, outputFile) + if err != nil { + fatal(err) + } + if tracked { + fatal(fmt.Errorf("output file %s exists in git, not overwriting", outputFile)) + } + } + + // Now we can finally write the file + output, err := os.Create(outputFile + ".tmp") + if err != nil { + fatal(err) + } + err = inputParsed.writeStubs(output, existing) + if err != nil { + // If we don't end up writing to the file, delete it. + os.Remove(outputFile + ".tmp") + } else { + os.Rename(outputFile+".tmp", outputFile) + } + if err != nil { + fatal(err) + } +} + +func (g *generator) writeStubs(output *os.File, existingFuncs map[string]struct{}) error { + // delete all functions/methods that are already defined + g.modifyAST(existingFuncs) + + // write the updated code to buf + buf := new(bytes.Buffer) + err := format.Node(buf, g.fset, g.file) + if err != nil { + return err + } + + // remove any unneeded imports + res, err := imports.Process("", buf.Bytes(), &imports.Options{ + Fragment: true, + AllErrors: false, + Comments: true, + FormatOnly: false, + }) + if err != nil { + return err + } + + // add the code generation line and update the build tags + outputLines, err := fixGeneratedComments(res) + if err != nil { + return err + } + _, err = output.WriteString(strings.Join(outputLines, "\n") + "\n") + return err +} + +func fixGeneratedComments(b []byte) ([]string, error) { + warning := "// Code generated by tools/stubmaker; DO NOT EDIT." + goGenerate := "//go:generate go run github.com/hashicorp/vault/tools/stubmaker" + + scanner := bufio.NewScanner(bytes.NewBuffer(b)) + var outputLines []string + for scanner.Scan() { + line := scanner.Text() + switch { + case strings.Contains(line, "//go:build ") && strings.Contains(line, "!enterprise"): + outputLines = append(outputLines, warning, "") + line = strings.ReplaceAll(line, "!enterprise", "enterprise") + case line == goGenerate: + continue + } + outputLines = append(outputLines, line) + } + return outputLines, scanner.Err() +} + +func inGit(wt *git.Worktree, st git.Status, obj object.Object, path string) (bool, error) { + absPath, err := filepath.Abs(path) + if err != nil { + return false, fmt.Errorf("path %s can't be made absolute: %w", path, err) + } + relPath, err := filepath.Rel(wt.Filesystem.Root(), absPath) + if err != nil { + return false, fmt.Errorf("path %s can't be made relative: %w", absPath, err) + } + + fst := st.File(relPath) + if fst.Worktree != git.Untracked || fst.Staging != git.Untracked { + return true, nil + } + + curwd, err := os.Getwd() + if err != nil { + return false, err + } + + blob, err := resolve(obj, relPath) + if err != nil && !strings.Contains(err.Error(), "file not found") { + return false, fmt.Errorf("error resolving path %s from %s: %w", relPath, curwd, err) + } + + return blob != nil, nil +} + +func isEnterprise(wt *git.Worktree) bool { + st, err := wt.Filesystem.Stat("enthelpers") + onOss := errors.Is(err, os.ErrNotExist) + onEnt := st != nil + + switch { + case onOss && !onEnt: + case !onOss && onEnt: + default: + fatal(err) + } + return onEnt +} + +// resolve blob at given path from obj. obj can be a commit, tag, tree, or blob. +func resolve(obj object.Object, path string) (*object.Blob, error) { + switch o := obj.(type) { + case *object.Commit: + t, err := o.Tree() + if err != nil { + return nil, err + } + return resolve(t, path) + case *object.Tag: + target, err := o.Object() + if err != nil { + return nil, err + } + return resolve(target, path) + case *object.Tree: + file, err := o.File(path) + if err != nil { + return nil, err + } + return &file.Blob, nil + case *object.Blob: + return o, nil + default: + return nil, object.ErrUnsupportedObject + } +} + +// areStubsNeeded checks if all functions and methods defined in the stub file +// are present in the package +func (g *generator) areStubsNeeded() (needed bool, existingStubs map[string]struct{}, err error) { + pkg, err := parsePackage(".", []string{"enterprise"}) + if err != nil { + return false, nil, err + } + + stubFunctions := make(map[string]struct{}) + for _, d := range g.file.Decls { + dFunc, ok := d.(*ast.FuncDecl) + if !ok { + continue + } + stubFunctions[dFunc.Name.Name] = struct{}{} + + } + found := make(map[string]struct{}) + for name, val := range pkg.TypesInfo.Defs { + if val == nil { + continue + } + _, ok := val.Type().(*types.Signature) + if !ok { + continue + } + if _, ok := stubFunctions[name.Name]; ok { + found[name.Name] = struct{}{} + } + } + + return len(found) != len(stubFunctions), found, nil +} + +func (g *generator) modifyAST(exists map[string]struct{}) { + astutil.Apply(g.file, nil, func(c *astutil.Cursor) bool { + switch x := c.Node().(type) { + case *ast.FuncDecl: + if _, ok := exists[x.Name.Name]; ok { + c.Delete() + } + } + + return true + }) +} + +func parsePackage(name string, tags []string) (*packages.Package, error) { + cfg := &packages.Config{ + Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax, + Tests: false, + BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(tags, " "))}, + } + pkgs, err := packages.Load(cfg, name) + if err != nil { + return nil, fmt.Errorf("error parsing package %s: %v", name, err) + } + if len(pkgs) != 1 { + return nil, fmt.Errorf("error: %d packages found", len(pkgs)) + } + return pkgs[0], nil +} + +func parseFile(buffer []byte) (*generator, error) { + fs := token.NewFileSet() + f, err := parser.ParseFile(fs, "", buffer, parser.AllErrors|parser.ParseComments) + if err != nil { + return nil, err + } + return &generator{ + file: f, + fset: fs, + }, nil +} diff --git a/tools/tools.go b/tools/tools.go index 59a6d962529d..066198c07145 100644 --- a/tools/tools.go +++ b/tools/tools.go @@ -1,40 +1,16 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 //go:build tools -// This file ensures tool dependencies are kept in sync. This is the -// recommended way of doing this according to -// https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module -// To install the following tools at the version used by this repo run: -// $ make bootstrap -// or +// This file is here for backwards compat only. You can now use make instead of go generate to +// install tools. + +// You can replace // $ go generate -tags tools tools/tools.go +// with +// $ make tools package tools -//go:generate go install golang.org/x/tools/cmd/goimports -//go:generate go install github.com/client9/misspell/cmd/misspell -//go:generate go install mvdan.cc/gofumpt -//go:generate go install google.golang.org/protobuf/cmd/protoc-gen-go -//go:generate go install google.golang.org/grpc/cmd/protoc-gen-go-grpc -//go:generate go install github.com/favadi/protoc-go-inject-tag -//go:generate go install github.com/golangci/revgrep/cmd/revgrep -//go:generate go install gotest.tools/gotestsum -import ( - _ "golang.org/x/tools/cmd/goimports" - - _ "github.com/client9/misspell/cmd/misspell" - - _ "mvdan.cc/gofumpt" - - _ "google.golang.org/protobuf/cmd/protoc-gen-go" - - _ "google.golang.org/grpc/cmd/protoc-gen-go-grpc" - - _ "github.com/favadi/protoc-go-inject-tag" - - _ "github.com/golangci/revgrep/cmd/revgrep" - - _ "gotest.tools/gotestsum" -) +//go:generate ./tools.sh install-tools diff --git a/tools/tools.sh b/tools/tools.sh new file mode 100755 index 000000000000..8a8dffa7dddb --- /dev/null +++ b/tools/tools.sh @@ -0,0 +1,161 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -euo pipefail + +# Determine the root directory of the repository +repo_root() { + git rev-parse --show-toplevel +} + +# Install an external Go tool. +go_install() { + if go install "$1"; then + echo "--> $1 ✔" + else + echo "--> $1 ✖" + return 1 + fi +} + +# Check for a tool binary in the path. +check_tool() { + if builtin type -P "$2" &> /dev/null; then + echo "--> $2 ✔" + else + echo "--> $2 ✖" + echo "Could not find required $1 tool $2. Run 'make tools-$1' to install it." 1>&2 + return 1 + fi +} + +# Install external tools. +install_external() { + local tools + # If you update this please update check_external below as well as our external tools + # install action .github/actions/install-external-tools/action.yml + # + tools=( + honnef.co/go/tools/cmd/staticcheck@latest + github.com/bufbuild/buf/cmd/buf@v1.25.0 + github.com/favadi/protoc-go-inject-tag@latest + github.com/golangci/misspell/cmd/misspell@latest + github.com/golangci/revgrep/cmd/revgrep@latest + github.com/loggerhead/enumer@latest + github.com/rinchsan/gosimports/cmd/gosimports@latest + golang.org/x/tools/cmd/goimports@latest + google.golang.org/protobuf/cmd/protoc-gen-go@latest + google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.4.0 + gotest.tools/gotestsum@latest + mvdan.cc/gofumpt@latest + mvdan.cc/sh/v3/cmd/shfmt@latest + ) + + echo "==> Installing external tools..." + for tool in "${tools[@]}"; do + go_install "$tool" + done +} + +# Check that all tools are installed +check_external() { + # Ensure that all external tools are available. In CI we'll prefer installing pre-built external + # tools for speed instead of go install so that we don't require downloading Go modules and + # compiling tools from scratch in every CI job. + # See .github/actions/install-external-tools.yml for that workflow. + local tools + tools=( + buf + enumer + gofumpt + goimports + gosimports + gotestsum + misspell + protoc-gen-go + protoc-gen-go-grpc + protoc-go-inject-tag + revgrep + shfmt + staticcheck + ) + + echo "==> Checking for external tools..." + for tool in "${tools[@]}"; do + check_tool external "$tool" + done +} + +# Install internal tools. +install_internal() { + local tools + # If you update this please update check tools below. + tools=( + codechecker + stubmaker + ) + + echo "==> Installing internal tools..." + pushd "$(repo_root)" &> /dev/null + for tool in "${tools[@]}"; do + go_install ./tools/"$tool" + done + popd &> /dev/null +} + +# Check internal that all tools are installed +check_internal() { + # Ensure that all required internal tools are available. + local tools + tools=( + codechecker + stubmaker + ) + + echo "==> Checking for internal tools..." + for tool in "${tools[@]}"; do + check_tool internal "$tool" + done +} + +# Install tools. +install() { + install_internal + install_external +} + +# Check tools. +check() { + check_internal + check_external +} + +main() { + case $1 in + install-external) + install_external + ;; + install-internal) + install_internal + ;; + check-external) + check_external + ;; + check-internal) + check_internal + ;; + install) + install + ;; + check) + check + ;; + *) + echo "unknown sub-command" >&2 + exit 1 + ;; + esac +} + +main "$@" diff --git a/ui/.copywrite.hcl b/ui/.copywrite.hcl new file mode 100644 index 000000000000..935bc238e476 --- /dev/null +++ b/ui/.copywrite.hcl @@ -0,0 +1,33 @@ +# (OPTIONAL) Overrides the copywrite config schema version +# Default: 1 +schema_version = 1 + +project { + # (OPTIONAL) SPDX-compatible license identifier + # Leave blank if you don't wish to license the project + # Default: "MPL-2.0" + license = "BUSL-1.1" + + # (OPTIONAL) Represents the copyright holder used in all statements + # Default: HashiCorp, Inc. + # copyright_holder = "" + + # (OPTIONAL) Represents the year that the project initially began + # Default: + # copyright_year = 0 + + # (OPTIONAL) A list of globs that should not have copyright or license headers . + # Supports doublestar glob patterns for more flexibility in defining which + # files or folders should be ignored + # Default: [] + header_ignore = [ + "node_modules/**" + ] + + # (OPTIONAL) Links to an upstream repo for determining repo relationships + # This is for special cases and should not normally be set. + # Default: "" + # upstream = "hashicorp/" +} + + diff --git a/ui/.ember-cli b/ui/.ember-cli index f6e59871ff52..fcd9114b1235 100644 --- a/ui/.ember-cli +++ b/ui/.ember-cli @@ -9,8 +9,8 @@ "output-path": "../http/web_ui", /** - Setting `isTypeScriptProject` to true will force the blueprint generators to generate TypeScript - rather than JavaScript by default, when a TypeScript version of a given blueprint is available. + Setting `isTypeScriptProject` to true will force the blueprint generators to generate TypeScript + rather than JavaScript by default, when a TypeScript version of a given blueprint is available. */ "isTypeScriptProject": false } diff --git a/ui/.eslintignore b/ui/.eslintignore index c352da9c77be..4730dfa0735f 100644 --- a/ui/.eslintignore +++ b/ui/.eslintignore @@ -1,6 +1,5 @@ # unconventional js /blueprints/*/files/ -/vendor/ # compiled output /dist/ @@ -15,7 +14,6 @@ /coverage/ !.* .*/ -.eslintcache # ember-try /.node_modules.ember-try/ diff --git a/ui/.eslintrc.js b/ui/.eslintrc.js index bb4548319848..1c0220a535d4 100644 --- a/ui/.eslintrc.js +++ b/ui/.eslintrc.js @@ -1,6 +1,6 @@ /** * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 + * SPDX-License-Identifier: BUSL-1.1 */ /* eslint-disable no-undef */ @@ -8,13 +8,14 @@ 'use strict'; module.exports = { - parser: 'babel-eslint', + parser: '@babel/eslint-parser', root: true, parserOptions: { - ecmaVersion: 2018, + ecmaVersion: 'latest', sourceType: 'module', - ecmaFeatures: { - legacyDecorators: true, + requireConfigFile: false, + babelOptions: { + plugins: [['@babel/plugin-proposal-decorators', { decoratorsBeforeExport: true }]], }, }, plugins: ['ember'], @@ -45,6 +46,7 @@ module.exports = { files: [ './.eslintrc.js', './.prettierrc.js', + './.stylelintrc.js', './.template-lintrc.js', './ember-cli-build.js', './testem.js', @@ -60,18 +62,15 @@ module.exports = { browser: false, node: true, }, - plugins: ['node'], - extends: ['plugin:node/recommended'], - rules: { - // this can be removed once the following is fixed - // https://github.com/mysticatea/eslint-plugin-node/issues/77 - 'node/no-unpublished-require': 'off', - }, + extends: ['plugin:n/recommended'], }, { // test files files: ['tests/**/*-test.{js,ts}'], extends: ['plugin:qunit/recommended'], + rules: { + 'qunit/require-expect': 'off', + }, }, { files: ['**/*.ts'], diff --git a/ui/.github/workflows/ci.yml b/ui/.github/workflows/ci.yml deleted file mode 100644 index ba4e3f71006a..000000000000 --- a/ui/.github/workflows/ci.yml +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -name: CI - -on: - push: - branches: - - main - - master - pull_request: {} - -concurrency: - group: ci-${{ github.head_ref || github.ref }} - cancel-in-progress: true - -jobs: - lint: - name: "Lint" - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - name: Install Node - uses: actions/setup-node@v3 - with: - node-version: 12.x - cache: yarn - - name: Install Dependencies - run: yarn install --frozen-lockfile - - name: Lint - run: yarn lint - - test: - name: "Test" - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - name: Install Node - uses: actions/setup-node@v3 - with: - node-version: 12.x - cache: yarn - - name: Install Dependencies - run: yarn install --frozen-lockfile - - name: Run Tests - run: yarn test diff --git a/ui/.gitignore b/ui/.gitignore index 70da8c051de3..639f011c1dcb 100644 --- a/ui/.gitignore +++ b/ui/.gitignore @@ -1,27 +1,24 @@ -# See https://help.github.com/ignore-files/ for more about ignoring files. - # compiled output /dist/ -/tmp/ +/declarations/ # dependencies -/bower_components/ /node_modules/ # misc /.sass-cache +/.env* +/.pnp* /.eslintcache -/connect.lock /coverage/ -/libpeerconnection.log /npm-debug.log* /testem.log /yarn-error.log package-lock.json +/docs/components/* # ember-try /.node_modules.ember-try/ -/bower.json.ember-try /npm-shrinkwrap.json.ember-try /package.json.ember-try /package-lock.json.ember-try @@ -29,3 +26,16 @@ package-lock.json # broccoli-debug /DEBUG/ + +# yarn +.pnp.* +.yarn/* +!.yarn/patches +!.yarn/plugins +!.yarn/releases +!.yarn/sdks +!.yarn/versions + +# copywrite tool used in pre-commit hook +.copywrite + diff --git a/ui/.nvmrc b/ui/.nvmrc deleted file mode 100644 index 958b5a36e1fa..000000000000 --- a/ui/.nvmrc +++ /dev/null @@ -1 +0,0 @@ -v14 diff --git a/ui/.prettierignore b/ui/.prettierignore index 4178fd571e68..9385391f21b7 100644 --- a/ui/.prettierignore +++ b/ui/.prettierignore @@ -1,25 +1,13 @@ # unconventional js /blueprints/*/files/ -/vendor/ # compiled output /dist/ -/tmp/ - -# dependencies -/bower_components/ -/node_modules/ # misc /coverage/ !.* -.eslintcache -.lint-todo/ +.*/ # ember-try /.node_modules.ember-try/ -/bower.json.ember-try -/npm-shrinkwrap.json.ember-try -/package.json.ember-try -/package-lock.json.ember-try -/yarn.lock.ember-try diff --git a/ui/.prettierrc.js b/ui/.prettierrc.js index 8c776351a454..691cad3025e1 100644 --- a/ui/.prettierrc.js +++ b/ui/.prettierrc.js @@ -1,6 +1,6 @@ /** * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 + * SPDX-License-Identifier: BUSL-1.1 */ 'use strict'; @@ -17,5 +17,11 @@ module.exports = { printWidth: 125, }, }, + { + files: '*.{js,ts}', + options: { + singleQuote: true, + }, + }, ], }; diff --git a/ui/.stylelintignore b/ui/.stylelintignore new file mode 100644 index 000000000000..a0cf71cbd183 --- /dev/null +++ b/ui/.stylelintignore @@ -0,0 +1,8 @@ +# unconventional files +/blueprints/*/files/ + +# compiled output +/dist/ + +# addons +/.node_modules.ember-try/ diff --git a/ui/.stylelintrc.js b/ui/.stylelintrc.js new file mode 100644 index 000000000000..4409325c3e75 --- /dev/null +++ b/ui/.stylelintrc.js @@ -0,0 +1,10 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +'use strict'; + +module.exports = { + extends: ['stylelint-config-standard', 'stylelint-prettier/recommended'], +}; diff --git a/ui/.template-lintrc.js b/ui/.template-lintrc.js index 3540521890a8..3936c7dd4769 100644 --- a/ui/.template-lintrc.js +++ b/ui/.template-lintrc.js @@ -1,59 +1,32 @@ /** * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 + * SPDX-License-Identifier: BUSL-1.1 */ -'use strict'; - -const fs = require('fs'); -let testOverrides = {}; -try { - // ember-template-lint no longer exports anything so we cannot access the rule definitions conventionally - // read file, convert to json string and parse - const toJSON = (str) => { - return JSON.parse( - str - .slice(str.indexOf(':') + 2) // get rid of export statement - .slice(0, -(str.length - str.lastIndexOf(','))) // remove trailing brackets from export - .replace(/:.*,/g, `: ${false},`) // convert values to false - .replace(/,([^,]*)$/, '$1') // remove last comma - .replace(/'/g, '"') // convert to double quotes - .replace(/(\w[^"].*[^"]):/g, '"$1":') // wrap quotes around single word keys - .trim() - ); - }; - const recommended = toJSON( - fs.readFileSync('node_modules/ember-template-lint/lib/config/recommended.js').toString() - ); - const stylistic = toJSON( - fs.readFileSync('node_modules/ember-template-lint/lib/config/stylistic.js').toString() - ); - testOverrides = { - ...recommended, - ...stylistic, - prettier: false, - }; -} catch (error) { - console.log(error); // eslint-disable-line -} - module.exports = { plugins: ['ember-template-lint-plugin-prettier'], + extends: ['recommended', 'ember-template-lint-plugin-prettier:recommended'], + rules: { 'no-action': 'off', 'no-implicit-this': { allow: ['supported-auth-backends'], }, 'require-input-label': 'off', + 'no-array-prototype-extensions': 'off', + // from bump to ember-template-lint@6.0.0 + 'no-builtin-form-components': 'off', + 'no-at-ember-render-modifiers': 'off', + 'no-unnecessary-curly-strings': 'off', + 'no-unnecessary-curly-parens': 'off', }, - ignore: ['lib/story-md', 'tests/**'], - // ember language server vscode extension does not currently respect the ignore field - // override all rules manually as workaround to align with cli overrides: [ { files: ['**/*-test.js'], - rules: testOverrides, + rules: { + prettier: false, + }, }, ], }; diff --git a/ui/.watchmanconfig b/ui/.watchmanconfig index e7834e3e4f39..f9c3d8f84fba 100644 --- a/ui/.watchmanconfig +++ b/ui/.watchmanconfig @@ -1,3 +1,3 @@ { - "ignore_dirs": ["tmp", "dist"] + "ignore_dirs": ["dist"] } diff --git a/ui/.yarn/patches/lodash.template-npm-4.5.0-5272df3039.patch b/ui/.yarn/patches/lodash.template-npm-4.5.0-5272df3039.patch new file mode 100644 index 000000000000..920eeb901f3f --- /dev/null +++ b/ui/.yarn/patches/lodash.template-npm-4.5.0-5272df3039.patch @@ -0,0 +1,45 @@ +diff --git a/index.js b/index.js +index f051141e362679e1cc12f3dca924d8f6e7f5459b..63815c4c53412263de74fd4d779cfd198be87c8e 100644 +--- a/index.js ++++ b/index.js +@@ -17,6 +17,9 @@ var HOT_COUNT = 800, + var INFINITY = 1 / 0, + MAX_SAFE_INTEGER = 9007199254740991; + ++/** Error message constants. */ ++var INVALID_TEMPL_VAR_ERROR_TEXT = 'Invalid `variable` option passed into `_.template`'; ++ + /** `Object#toString` result references. */ + var argsTag = '[object Arguments]', + arrayTag = '[object Array]', +@@ -1343,6 +1346,18 @@ function keysIn(object) { + return isArrayLike(object) ? arrayLikeKeys(object, true) : baseKeysIn(object); + } + ++/** ++ * Used to validate the `validate` option in `_.template` variable. ++ * ++ * Forbids characters which could potentially change the meaning of the function argument definition: ++ * - "()," (modification of function parameters) ++ * - "=" (default value) ++ * - "[]{}" (destructuring of function parameters) ++ * - "/" (beginning of a comment) ++ * - whitespace ++ */ ++var reForbiddenIdentifierChars = /[()=,{}\[\]\/\s]/; ++ + /** + * Creates a compiled template function that can interpolate data properties + * in "interpolate" delimiters, HTML-escape interpolated data properties in +@@ -1522,6 +1537,11 @@ function template(string, options, guard) { + if (!variable) { + source = 'with (obj) {\n' + source + '\n}\n'; + } ++ // Throw an error if a forbidden character was found in `variable`, to prevent ++ // potential command injection attacks. ++ else if (reForbiddenIdentifierChars.test(variable)) { ++ throw new Error(INVALID_TEMPL_VAR_ERROR_TEXT); ++ } + // Cleanup code by stripping empty strings. + source = (isEvaluating ? source.replace(reEmptyStringLeading, '') : source) + .replace(reEmptyStringMiddle, '$1') diff --git a/ui/.yarn/releases/yarn-1.19.1.js b/ui/.yarn/releases/yarn-1.19.1.js deleted file mode 100755 index f848ec523aa8..000000000000 --- a/ui/.yarn/releases/yarn-1.19.1.js +++ /dev/null @@ -1,147221 +0,0 @@ -#!/usr/bin/env node -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 - */ - -module.exports = -/******/ (function(modules) { // webpackBootstrap -/******/ // The module cache -/******/ var installedModules = {}; -/******/ -/******/ // The require function -/******/ function __webpack_require__(moduleId) { -/******/ -/******/ // Check if module is in cache -/******/ if(installedModules[moduleId]) { -/******/ return installedModules[moduleId].exports; -/******/ } -/******/ // Create a new module (and put it into the cache) -/******/ var module = installedModules[moduleId] = { -/******/ i: moduleId, -/******/ l: false, -/******/ exports: {} -/******/ }; -/******/ -/******/ // Execute the module function -/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); -/******/ -/******/ // Flag the module as loaded -/******/ module.l = true; -/******/ -/******/ // Return the exports of the module -/******/ return module.exports; -/******/ } -/******/ -/******/ -/******/ // expose the modules object (__webpack_modules__) -/******/ __webpack_require__.m = modules; -/******/ -/******/ // expose the module cache -/******/ __webpack_require__.c = installedModules; -/******/ -/******/ // identity function for calling harmony imports with the correct context -/******/ __webpack_require__.i = function(value) { return value; }; -/******/ -/******/ // define getter function for harmony exports -/******/ __webpack_require__.d = function(exports, name, getter) { -/******/ if(!__webpack_require__.o(exports, name)) { -/******/ Object.defineProperty(exports, name, { -/******/ configurable: false, -/******/ enumerable: true, -/******/ get: getter -/******/ }); -/******/ } -/******/ }; -/******/ -/******/ // getDefaultExport function for compatibility with non-harmony modules -/******/ __webpack_require__.n = function(module) { -/******/ var getter = module && module.__esModule ? -/******/ function getDefault() { return module['default']; } : -/******/ function getModuleExports() { return module; }; -/******/ __webpack_require__.d(getter, 'a', getter); -/******/ return getter; -/******/ }; -/******/ -/******/ // Object.prototype.hasOwnProperty.call -/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); }; -/******/ -/******/ // __webpack_public_path__ -/******/ __webpack_require__.p = ""; -/******/ -/******/ // Load entry module and return exports -/******/ return __webpack_require__(__webpack_require__.s = 549); -/******/ }) -/************************************************************************/ -/******/ ([ -/* 0 */ -/***/ (function(module, exports) { - -module.exports = require("path"); - -/***/ }), -/* 1 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony export (immutable) */ __webpack_exports__["a"] = __extends; -/* unused harmony export __assign */ -/* unused harmony export __rest */ -/* unused harmony export __decorate */ -/* unused harmony export __param */ -/* unused harmony export __metadata */ -/* unused harmony export __awaiter */ -/* unused harmony export __generator */ -/* unused harmony export __exportStar */ -/* unused harmony export __values */ -/* unused harmony export __read */ -/* unused harmony export __spread */ -/* unused harmony export __await */ -/* unused harmony export __asyncGenerator */ -/* unused harmony export __asyncDelegator */ -/* unused harmony export __asyncValues */ -/* unused harmony export __makeTemplateObject */ -/* unused harmony export __importStar */ -/* unused harmony export __importDefault */ -/*! ***************************************************************************** -Copyright (c) Microsoft Corporation. All rights reserved. -Licensed under the Apache License, Version 2.0 (the "License"); you may not use -this file except in compliance with the License. You may obtain a copy of the -License at http://www.apache.org/licenses/LICENSE-2.0 - -THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED -WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -MERCHANTABLITY OR NON-INFRINGEMENT. - -See the Apache Version 2.0 License for specific language governing permissions -and limitations under the License. -***************************************************************************** */ -/* global Reflect, Promise */ - -var extendStatics = function(d, b) { - extendStatics = Object.setPrototypeOf || - ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) || - function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; }; - return extendStatics(d, b); -}; - -function __extends(d, b) { - extendStatics(d, b); - function __() { this.constructor = d; } - d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); -} - -var __assign = function() { - __assign = Object.assign || function __assign(t) { - for (var s, i = 1, n = arguments.length; i < n; i++) { - s = arguments[i]; - for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p]; - } - return t; - } - return __assign.apply(this, arguments); -} - -function __rest(s, e) { - var t = {}; - for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0) - t[p] = s[p]; - if (s != null && typeof Object.getOwnPropertySymbols === "function") - for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) if (e.indexOf(p[i]) < 0) - t[p[i]] = s[p[i]]; - return t; -} - -function __decorate(decorators, target, key, desc) { - var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; - if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); - else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; - return c > 3 && r && Object.defineProperty(target, key, r), r; -} - -function __param(paramIndex, decorator) { - return function (target, key) { decorator(target, key, paramIndex); } -} - -function __metadata(metadataKey, metadataValue) { - if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(metadataKey, metadataValue); -} - -function __awaiter(thisArg, _arguments, P, generator) { - return new (P || (P = Promise))(function (resolve, reject) { - function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } - function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } - function step(result) { result.done ? resolve(result.value) : new P(function (resolve) { resolve(result.value); }).then(fulfilled, rejected); } - step((generator = generator.apply(thisArg, _arguments || [])).next()); - }); -} - -function __generator(thisArg, body) { - var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g; - return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g; - function verb(n) { return function (v) { return step([n, v]); }; } - function step(op) { - if (f) throw new TypeError("Generator is already executing."); - while (_) try { - if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t; - if (y = 0, t) op = [op[0] & 2, t.value]; - switch (op[0]) { - case 0: case 1: t = op; break; - case 4: _.label++; return { value: op[1], done: false }; - case 5: _.label++; y = op[1]; op = [0]; continue; - case 7: op = _.ops.pop(); _.trys.pop(); continue; - default: - if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; } - if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; } - if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; } - if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; } - if (t[2]) _.ops.pop(); - _.trys.pop(); continue; - } - op = body.call(thisArg, _); - } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; } - if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true }; - } -} - -function __exportStar(m, exports) { - for (var p in m) if (!exports.hasOwnProperty(p)) exports[p] = m[p]; -} - -function __values(o) { - var m = typeof Symbol === "function" && o[Symbol.iterator], i = 0; - if (m) return m.call(o); - return { - next: function () { - if (o && i >= o.length) o = void 0; - return { value: o && o[i++], done: !o }; - } - }; -} - -function __read(o, n) { - var m = typeof Symbol === "function" && o[Symbol.iterator]; - if (!m) return o; - var i = m.call(o), r, ar = [], e; - try { - while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); - } - catch (error) { e = { error: error }; } - finally { - try { - if (r && !r.done && (m = i["return"])) m.call(i); - } - finally { if (e) throw e.error; } - } - return ar; -} - -function __spread() { - for (var ar = [], i = 0; i < arguments.length; i++) - ar = ar.concat(__read(arguments[i])); - return ar; -} - -function __await(v) { - return this instanceof __await ? (this.v = v, this) : new __await(v); -} - -function __asyncGenerator(thisArg, _arguments, generator) { - if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined."); - var g = generator.apply(thisArg, _arguments || []), i, q = []; - return i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i; - function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; } - function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } } - function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); } - function fulfill(value) { resume("next", value); } - function reject(value) { resume("throw", value); } - function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); } -} - -function __asyncDelegator(o) { - var i, p; - return i = {}, verb("next"), verb("throw", function (e) { throw e; }), verb("return"), i[Symbol.iterator] = function () { return this; }, i; - function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === "return" } : f ? f(v) : v; } : f; } -} - -function __asyncValues(o) { - if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined."); - var m = o[Symbol.asyncIterator], i; - return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i); - function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; } - function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); } -} - -function __makeTemplateObject(cooked, raw) { - if (Object.defineProperty) { Object.defineProperty(cooked, "raw", { value: raw }); } else { cooked.raw = raw; } - return cooked; -}; - -function __importStar(mod) { - if (mod && mod.__esModule) return mod; - var result = {}; - if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k]; - result.default = mod; - return result; -} - -function __importDefault(mod) { - return (mod && mod.__esModule) ? mod : { default: mod }; -} - - -/***/ }), -/* 2 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -exports.__esModule = true; - -var _promise = __webpack_require__(227); - -var _promise2 = _interopRequireDefault(_promise); - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -exports.default = function (fn) { - return function () { - var gen = fn.apply(this, arguments); - return new _promise2.default(function (resolve, reject) { - function step(key, arg) { - try { - var info = gen[key](arg); - var value = info.value; - } catch (error) { - reject(error); - return; - } - - if (info.done) { - resolve(value); - } else { - return _promise2.default.resolve(value).then(function (value) { - step("next", value); - }, function (err) { - step("throw", err); - }); - } - } - - return step("next"); - }); - }; -}; - -/***/ }), -/* 3 */ -/***/ (function(module, exports) { - -module.exports = require("util"); - -/***/ }), -/* 4 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", { - value: true -}); -exports.getFirstSuitableFolder = exports.readFirstAvailableStream = exports.makeTempDir = exports.hardlinksWork = exports.writeFilePreservingEol = exports.getFileSizeOnDisk = exports.walk = exports.symlink = exports.find = exports.readJsonAndFile = exports.readJson = exports.readFileAny = exports.hardlinkBulk = exports.copyBulk = exports.unlink = exports.glob = exports.link = exports.chmod = exports.lstat = exports.exists = exports.mkdirp = exports.stat = exports.access = exports.rename = exports.readdir = exports.realpath = exports.readlink = exports.writeFile = exports.open = exports.readFileBuffer = exports.lockQueue = exports.constants = undefined; - -var _asyncToGenerator2; - -function _load_asyncToGenerator() { - return _asyncToGenerator2 = _interopRequireDefault(__webpack_require__(2)); -} - -let buildActionsForCopy = (() => { - var _ref = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (queue, events, possibleExtraneous, reporter) { - - // - let build = (() => { - var _ref5 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (data) { - const src = data.src, - dest = data.dest, - type = data.type; - - const onFresh = data.onFresh || noop; - const onDone = data.onDone || noop; - - // TODO https://github.com/yarnpkg/yarn/issues/3751 - // related to bundled dependencies handling - if (files.has(dest.toLowerCase())) { - reporter.verbose(`The case-insensitive file ${dest} shouldn't be copied twice in one bulk copy`); - } else { - files.add(dest.toLowerCase()); - } - - if (type === 'symlink') { - yield mkdirp((_path || _load_path()).default.dirname(dest)); - onFresh(); - actions.symlink.push({ - dest, - linkname: src - }); - onDone(); - return; - } - - if (events.ignoreBasenames.indexOf((_path || _load_path()).default.basename(src)) >= 0) { - // ignored file - return; - } - - const srcStat = yield lstat(src); - let srcFiles; - - if (srcStat.isDirectory()) { - srcFiles = yield readdir(src); - } - - let destStat; - try { - // try accessing the destination - destStat = yield lstat(dest); - } catch (e) { - // proceed if destination doesn't exist, otherwise error - if (e.code !== 'ENOENT') { - throw e; - } - } - - // if destination exists - if (destStat) { - const bothSymlinks = srcStat.isSymbolicLink() && destStat.isSymbolicLink(); - const bothFolders = srcStat.isDirectory() && destStat.isDirectory(); - const bothFiles = srcStat.isFile() && destStat.isFile(); - - // EINVAL access errors sometimes happen which shouldn't because node shouldn't be giving - // us modes that aren't valid. investigate this, it's generally safe to proceed. - - /* if (srcStat.mode !== destStat.mode) { - try { - await access(dest, srcStat.mode); - } catch (err) {} - } */ - - if (bothFiles && artifactFiles.has(dest)) { - // this file gets changed during build, likely by a custom install script. Don't bother checking it. - onDone(); - reporter.verbose(reporter.lang('verboseFileSkipArtifact', src)); - return; - } - - if (bothFiles && srcStat.size === destStat.size && (0, (_fsNormalized || _load_fsNormalized()).fileDatesEqual)(srcStat.mtime, destStat.mtime)) { - // we can safely assume this is the same file - onDone(); - reporter.verbose(reporter.lang('verboseFileSkip', src, dest, srcStat.size, +srcStat.mtime)); - return; - } - - if (bothSymlinks) { - const srcReallink = yield readlink(src); - if (srcReallink === (yield readlink(dest))) { - // if both symlinks are the same then we can continue on - onDone(); - reporter.verbose(reporter.lang('verboseFileSkipSymlink', src, dest, srcReallink)); - return; - } - } - - if (bothFolders) { - // mark files that aren't in this folder as possibly extraneous - const destFiles = yield readdir(dest); - invariant(srcFiles, 'src files not initialised'); - - for (var _iterator4 = destFiles, _isArray4 = Array.isArray(_iterator4), _i4 = 0, _iterator4 = _isArray4 ? _iterator4 : _iterator4[Symbol.iterator]();;) { - var _ref6; - - if (_isArray4) { - if (_i4 >= _iterator4.length) break; - _ref6 = _iterator4[_i4++]; - } else { - _i4 = _iterator4.next(); - if (_i4.done) break; - _ref6 = _i4.value; - } - - const file = _ref6; - - if (srcFiles.indexOf(file) < 0) { - const loc = (_path || _load_path()).default.join(dest, file); - possibleExtraneous.add(loc); - - if ((yield lstat(loc)).isDirectory()) { - for (var _iterator5 = yield readdir(loc), _isArray5 = Array.isArray(_iterator5), _i5 = 0, _iterator5 = _isArray5 ? _iterator5 : _iterator5[Symbol.iterator]();;) { - var _ref7; - - if (_isArray5) { - if (_i5 >= _iterator5.length) break; - _ref7 = _iterator5[_i5++]; - } else { - _i5 = _iterator5.next(); - if (_i5.done) break; - _ref7 = _i5.value; - } - - const file = _ref7; - - possibleExtraneous.add((_path || _load_path()).default.join(loc, file)); - } - } - } - } - } - } - - if (destStat && destStat.isSymbolicLink()) { - yield (0, (_fsNormalized || _load_fsNormalized()).unlink)(dest); - destStat = null; - } - - if (srcStat.isSymbolicLink()) { - onFresh(); - const linkname = yield readlink(src); - actions.symlink.push({ - dest, - linkname - }); - onDone(); - } else if (srcStat.isDirectory()) { - if (!destStat) { - reporter.verbose(reporter.lang('verboseFileFolder', dest)); - yield mkdirp(dest); - } - - const destParts = dest.split((_path || _load_path()).default.sep); - while (destParts.length) { - files.add(destParts.join((_path || _load_path()).default.sep).toLowerCase()); - destParts.pop(); - } - - // push all files to queue - invariant(srcFiles, 'src files not initialised'); - let remaining = srcFiles.length; - if (!remaining) { - onDone(); - } - for (var _iterator6 = srcFiles, _isArray6 = Array.isArray(_iterator6), _i6 = 0, _iterator6 = _isArray6 ? _iterator6 : _iterator6[Symbol.iterator]();;) { - var _ref8; - - if (_isArray6) { - if (_i6 >= _iterator6.length) break; - _ref8 = _iterator6[_i6++]; - } else { - _i6 = _iterator6.next(); - if (_i6.done) break; - _ref8 = _i6.value; - } - - const file = _ref8; - - queue.push({ - dest: (_path || _load_path()).default.join(dest, file), - onFresh, - onDone: function (_onDone) { - function onDone() { - return _onDone.apply(this, arguments); - } - - onDone.toString = function () { - return _onDone.toString(); - }; - - return onDone; - }(function () { - if (--remaining === 0) { - onDone(); - } - }), - src: (_path || _load_path()).default.join(src, file) - }); - } - } else if (srcStat.isFile()) { - onFresh(); - actions.file.push({ - src, - dest, - atime: srcStat.atime, - mtime: srcStat.mtime, - mode: srcStat.mode - }); - onDone(); - } else { - throw new Error(`unsure how to copy this: ${src}`); - } - }); - - return function build(_x5) { - return _ref5.apply(this, arguments); - }; - })(); - - const artifactFiles = new Set(events.artifactFiles || []); - const files = new Set(); - - // initialise events - for (var _iterator = queue, _isArray = Array.isArray(_iterator), _i = 0, _iterator = _isArray ? _iterator : _iterator[Symbol.iterator]();;) { - var _ref2; - - if (_isArray) { - if (_i >= _iterator.length) break; - _ref2 = _iterator[_i++]; - } else { - _i = _iterator.next(); - if (_i.done) break; - _ref2 = _i.value; - } - - const item = _ref2; - - const onDone = item.onDone; - item.onDone = function () { - events.onProgress(item.dest); - if (onDone) { - onDone(); - } - }; - } - events.onStart(queue.length); - - // start building actions - const actions = { - file: [], - symlink: [], - link: [] - }; - - // custom concurrency logic as we're always executing stacks of CONCURRENT_QUEUE_ITEMS queue items - // at a time due to the requirement to push items onto the queue - while (queue.length) { - const items = queue.splice(0, CONCURRENT_QUEUE_ITEMS); - yield Promise.all(items.map(build)); - } - - // simulate the existence of some files to prevent considering them extraneous - for (var _iterator2 = artifactFiles, _isArray2 = Array.isArray(_iterator2), _i2 = 0, _iterator2 = _isArray2 ? _iterator2 : _iterator2[Symbol.iterator]();;) { - var _ref3; - - if (_isArray2) { - if (_i2 >= _iterator2.length) break; - _ref3 = _iterator2[_i2++]; - } else { - _i2 = _iterator2.next(); - if (_i2.done) break; - _ref3 = _i2.value; - } - - const file = _ref3; - - if (possibleExtraneous.has(file)) { - reporter.verbose(reporter.lang('verboseFilePhantomExtraneous', file)); - possibleExtraneous.delete(file); - } - } - - for (var _iterator3 = possibleExtraneous, _isArray3 = Array.isArray(_iterator3), _i3 = 0, _iterator3 = _isArray3 ? _iterator3 : _iterator3[Symbol.iterator]();;) { - var _ref4; - - if (_isArray3) { - if (_i3 >= _iterator3.length) break; - _ref4 = _iterator3[_i3++]; - } else { - _i3 = _iterator3.next(); - if (_i3.done) break; - _ref4 = _i3.value; - } - - const loc = _ref4; - - if (files.has(loc.toLowerCase())) { - possibleExtraneous.delete(loc); - } - } - - return actions; - }); - - return function buildActionsForCopy(_x, _x2, _x3, _x4) { - return _ref.apply(this, arguments); - }; -})(); - -let buildActionsForHardlink = (() => { - var _ref9 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (queue, events, possibleExtraneous, reporter) { - - // - let build = (() => { - var _ref13 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (data) { - const src = data.src, - dest = data.dest; - - const onFresh = data.onFresh || noop; - const onDone = data.onDone || noop; - if (files.has(dest.toLowerCase())) { - // Fixes issue https://github.com/yarnpkg/yarn/issues/2734 - // When bulk hardlinking we have A -> B structure that we want to hardlink to A1 -> B1, - // package-linker passes that modules A1 and B1 need to be hardlinked, - // the recursive linking algorithm of A1 ends up scheduling files in B1 to be linked twice which will case - // an exception. - onDone(); - return; - } - files.add(dest.toLowerCase()); - - if (events.ignoreBasenames.indexOf((_path || _load_path()).default.basename(src)) >= 0) { - // ignored file - return; - } - - const srcStat = yield lstat(src); - let srcFiles; - - if (srcStat.isDirectory()) { - srcFiles = yield readdir(src); - } - - const destExists = yield exists(dest); - if (destExists) { - const destStat = yield lstat(dest); - - const bothSymlinks = srcStat.isSymbolicLink() && destStat.isSymbolicLink(); - const bothFolders = srcStat.isDirectory() && destStat.isDirectory(); - const bothFiles = srcStat.isFile() && destStat.isFile(); - - if (srcStat.mode !== destStat.mode) { - try { - yield access(dest, srcStat.mode); - } catch (err) { - // EINVAL access errors sometimes happen which shouldn't because node shouldn't be giving - // us modes that aren't valid. investigate this, it's generally safe to proceed. - reporter.verbose(err); - } - } - - if (bothFiles && artifactFiles.has(dest)) { - // this file gets changed during build, likely by a custom install script. Don't bother checking it. - onDone(); - reporter.verbose(reporter.lang('verboseFileSkipArtifact', src)); - return; - } - - // correct hardlink - if (bothFiles && srcStat.ino !== null && srcStat.ino === destStat.ino) { - onDone(); - reporter.verbose(reporter.lang('verboseFileSkip', src, dest, srcStat.ino)); - return; - } - - if (bothSymlinks) { - const srcReallink = yield readlink(src); - if (srcReallink === (yield readlink(dest))) { - // if both symlinks are the same then we can continue on - onDone(); - reporter.verbose(reporter.lang('verboseFileSkipSymlink', src, dest, srcReallink)); - return; - } - } - - if (bothFolders) { - // mark files that aren't in this folder as possibly extraneous - const destFiles = yield readdir(dest); - invariant(srcFiles, 'src files not initialised'); - - for (var _iterator10 = destFiles, _isArray10 = Array.isArray(_iterator10), _i10 = 0, _iterator10 = _isArray10 ? _iterator10 : _iterator10[Symbol.iterator]();;) { - var _ref14; - - if (_isArray10) { - if (_i10 >= _iterator10.length) break; - _ref14 = _iterator10[_i10++]; - } else { - _i10 = _iterator10.next(); - if (_i10.done) break; - _ref14 = _i10.value; - } - - const file = _ref14; - - if (srcFiles.indexOf(file) < 0) { - const loc = (_path || _load_path()).default.join(dest, file); - possibleExtraneous.add(loc); - - if ((yield lstat(loc)).isDirectory()) { - for (var _iterator11 = yield readdir(loc), _isArray11 = Array.isArray(_iterator11), _i11 = 0, _iterator11 = _isArray11 ? _iterator11 : _iterator11[Symbol.iterator]();;) { - var _ref15; - - if (_isArray11) { - if (_i11 >= _iterator11.length) break; - _ref15 = _iterator11[_i11++]; - } else { - _i11 = _iterator11.next(); - if (_i11.done) break; - _ref15 = _i11.value; - } - - const file = _ref15; - - possibleExtraneous.add((_path || _load_path()).default.join(loc, file)); - } - } - } - } - } - } - - if (srcStat.isSymbolicLink()) { - onFresh(); - const linkname = yield readlink(src); - actions.symlink.push({ - dest, - linkname - }); - onDone(); - } else if (srcStat.isDirectory()) { - reporter.verbose(reporter.lang('verboseFileFolder', dest)); - yield mkdirp(dest); - - const destParts = dest.split((_path || _load_path()).default.sep); - while (destParts.length) { - files.add(destParts.join((_path || _load_path()).default.sep).toLowerCase()); - destParts.pop(); - } - - // push all files to queue - invariant(srcFiles, 'src files not initialised'); - let remaining = srcFiles.length; - if (!remaining) { - onDone(); - } - for (var _iterator12 = srcFiles, _isArray12 = Array.isArray(_iterator12), _i12 = 0, _iterator12 = _isArray12 ? _iterator12 : _iterator12[Symbol.iterator]();;) { - var _ref16; - - if (_isArray12) { - if (_i12 >= _iterator12.length) break; - _ref16 = _iterator12[_i12++]; - } else { - _i12 = _iterator12.next(); - if (_i12.done) break; - _ref16 = _i12.value; - } - - const file = _ref16; - - queue.push({ - onFresh, - src: (_path || _load_path()).default.join(src, file), - dest: (_path || _load_path()).default.join(dest, file), - onDone: function (_onDone2) { - function onDone() { - return _onDone2.apply(this, arguments); - } - - onDone.toString = function () { - return _onDone2.toString(); - }; - - return onDone; - }(function () { - if (--remaining === 0) { - onDone(); - } - }) - }); - } - } else if (srcStat.isFile()) { - onFresh(); - actions.link.push({ - src, - dest, - removeDest: destExists - }); - onDone(); - } else { - throw new Error(`unsure how to copy this: ${src}`); - } - }); - - return function build(_x10) { - return _ref13.apply(this, arguments); - }; - })(); - - const artifactFiles = new Set(events.artifactFiles || []); - const files = new Set(); - - // initialise events - for (var _iterator7 = queue, _isArray7 = Array.isArray(_iterator7), _i7 = 0, _iterator7 = _isArray7 ? _iterator7 : _iterator7[Symbol.iterator]();;) { - var _ref10; - - if (_isArray7) { - if (_i7 >= _iterator7.length) break; - _ref10 = _iterator7[_i7++]; - } else { - _i7 = _iterator7.next(); - if (_i7.done) break; - _ref10 = _i7.value; - } - - const item = _ref10; - - const onDone = item.onDone || noop; - item.onDone = function () { - events.onProgress(item.dest); - onDone(); - }; - } - events.onStart(queue.length); - - // start building actions - const actions = { - file: [], - symlink: [], - link: [] - }; - - // custom concurrency logic as we're always executing stacks of CONCURRENT_QUEUE_ITEMS queue items - // at a time due to the requirement to push items onto the queue - while (queue.length) { - const items = queue.splice(0, CONCURRENT_QUEUE_ITEMS); - yield Promise.all(items.map(build)); - } - - // simulate the existence of some files to prevent considering them extraneous - for (var _iterator8 = artifactFiles, _isArray8 = Array.isArray(_iterator8), _i8 = 0, _iterator8 = _isArray8 ? _iterator8 : _iterator8[Symbol.iterator]();;) { - var _ref11; - - if (_isArray8) { - if (_i8 >= _iterator8.length) break; - _ref11 = _iterator8[_i8++]; - } else { - _i8 = _iterator8.next(); - if (_i8.done) break; - _ref11 = _i8.value; - } - - const file = _ref11; - - if (possibleExtraneous.has(file)) { - reporter.verbose(reporter.lang('verboseFilePhantomExtraneous', file)); - possibleExtraneous.delete(file); - } - } - - for (var _iterator9 = possibleExtraneous, _isArray9 = Array.isArray(_iterator9), _i9 = 0, _iterator9 = _isArray9 ? _iterator9 : _iterator9[Symbol.iterator]();;) { - var _ref12; - - if (_isArray9) { - if (_i9 >= _iterator9.length) break; - _ref12 = _iterator9[_i9++]; - } else { - _i9 = _iterator9.next(); - if (_i9.done) break; - _ref12 = _i9.value; - } - - const loc = _ref12; - - if (files.has(loc.toLowerCase())) { - possibleExtraneous.delete(loc); - } - } - - return actions; - }); - - return function buildActionsForHardlink(_x6, _x7, _x8, _x9) { - return _ref9.apply(this, arguments); - }; -})(); - -let copyBulk = exports.copyBulk = (() => { - var _ref17 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (queue, reporter, _events) { - const events = { - onStart: _events && _events.onStart || noop, - onProgress: _events && _events.onProgress || noop, - possibleExtraneous: _events ? _events.possibleExtraneous : new Set(), - ignoreBasenames: _events && _events.ignoreBasenames || [], - artifactFiles: _events && _events.artifactFiles || [] - }; - - const actions = yield buildActionsForCopy(queue, events, events.possibleExtraneous, reporter); - events.onStart(actions.file.length + actions.symlink.length + actions.link.length); - - const fileActions = actions.file; - - const currentlyWriting = new Map(); - - yield (_promise || _load_promise()).queue(fileActions, (() => { - var _ref18 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (data) { - let writePromise; - while (writePromise = currentlyWriting.get(data.dest)) { - yield writePromise; - } - - reporter.verbose(reporter.lang('verboseFileCopy', data.src, data.dest)); - const copier = (0, (_fsNormalized || _load_fsNormalized()).copyFile)(data, function () { - return currentlyWriting.delete(data.dest); - }); - currentlyWriting.set(data.dest, copier); - events.onProgress(data.dest); - return copier; - }); - - return function (_x14) { - return _ref18.apply(this, arguments); - }; - })(), CONCURRENT_QUEUE_ITEMS); - - // we need to copy symlinks last as they could reference files we were copying - const symlinkActions = actions.symlink; - yield (_promise || _load_promise()).queue(symlinkActions, function (data) { - const linkname = (_path || _load_path()).default.resolve((_path || _load_path()).default.dirname(data.dest), data.linkname); - reporter.verbose(reporter.lang('verboseFileSymlink', data.dest, linkname)); - return symlink(linkname, data.dest); - }); - }); - - return function copyBulk(_x11, _x12, _x13) { - return _ref17.apply(this, arguments); - }; -})(); - -let hardlinkBulk = exports.hardlinkBulk = (() => { - var _ref19 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (queue, reporter, _events) { - const events = { - onStart: _events && _events.onStart || noop, - onProgress: _events && _events.onProgress || noop, - possibleExtraneous: _events ? _events.possibleExtraneous : new Set(), - artifactFiles: _events && _events.artifactFiles || [], - ignoreBasenames: [] - }; - - const actions = yield buildActionsForHardlink(queue, events, events.possibleExtraneous, reporter); - events.onStart(actions.file.length + actions.symlink.length + actions.link.length); - - const fileActions = actions.link; - - yield (_promise || _load_promise()).queue(fileActions, (() => { - var _ref20 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (data) { - reporter.verbose(reporter.lang('verboseFileLink', data.src, data.dest)); - if (data.removeDest) { - yield (0, (_fsNormalized || _load_fsNormalized()).unlink)(data.dest); - } - yield link(data.src, data.dest); - }); - - return function (_x18) { - return _ref20.apply(this, arguments); - }; - })(), CONCURRENT_QUEUE_ITEMS); - - // we need to copy symlinks last as they could reference files we were copying - const symlinkActions = actions.symlink; - yield (_promise || _load_promise()).queue(symlinkActions, function (data) { - const linkname = (_path || _load_path()).default.resolve((_path || _load_path()).default.dirname(data.dest), data.linkname); - reporter.verbose(reporter.lang('verboseFileSymlink', data.dest, linkname)); - return symlink(linkname, data.dest); - }); - }); - - return function hardlinkBulk(_x15, _x16, _x17) { - return _ref19.apply(this, arguments); - }; -})(); - -let readFileAny = exports.readFileAny = (() => { - var _ref21 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (files) { - for (var _iterator13 = files, _isArray13 = Array.isArray(_iterator13), _i13 = 0, _iterator13 = _isArray13 ? _iterator13 : _iterator13[Symbol.iterator]();;) { - var _ref22; - - if (_isArray13) { - if (_i13 >= _iterator13.length) break; - _ref22 = _iterator13[_i13++]; - } else { - _i13 = _iterator13.next(); - if (_i13.done) break; - _ref22 = _i13.value; - } - - const file = _ref22; - - if (yield exists(file)) { - return readFile(file); - } - } - return null; - }); - - return function readFileAny(_x19) { - return _ref21.apply(this, arguments); - }; -})(); - -let readJson = exports.readJson = (() => { - var _ref23 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (loc) { - return (yield readJsonAndFile(loc)).object; - }); - - return function readJson(_x20) { - return _ref23.apply(this, arguments); - }; -})(); - -let readJsonAndFile = exports.readJsonAndFile = (() => { - var _ref24 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (loc) { - const file = yield readFile(loc); - try { - return { - object: (0, (_map || _load_map()).default)(JSON.parse(stripBOM(file))), - content: file - }; - } catch (err) { - err.message = `${loc}: ${err.message}`; - throw err; - } - }); - - return function readJsonAndFile(_x21) { - return _ref24.apply(this, arguments); - }; -})(); - -let find = exports.find = (() => { - var _ref25 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (filename, dir) { - const parts = dir.split((_path || _load_path()).default.sep); - - while (parts.length) { - const loc = parts.concat(filename).join((_path || _load_path()).default.sep); - - if (yield exists(loc)) { - return loc; - } else { - parts.pop(); - } - } - - return false; - }); - - return function find(_x22, _x23) { - return _ref25.apply(this, arguments); - }; -})(); - -let symlink = exports.symlink = (() => { - var _ref26 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (src, dest) { - if (process.platform !== 'win32') { - // use relative paths otherwise which will be retained if the directory is moved - src = (_path || _load_path()).default.relative((_path || _load_path()).default.dirname(dest), src); - // When path.relative returns an empty string for the current directory, we should instead use - // '.', which is a valid fs.symlink target. - src = src || '.'; - } - - try { - const stats = yield lstat(dest); - if (stats.isSymbolicLink()) { - const resolved = dest; - if (resolved === src) { - return; - } - } - } catch (err) { - if (err.code !== 'ENOENT') { - throw err; - } - } - - // We use rimraf for unlink which never throws an ENOENT on missing target - yield (0, (_fsNormalized || _load_fsNormalized()).unlink)(dest); - - if (process.platform === 'win32') { - // use directory junctions if possible on win32, this requires absolute paths - yield fsSymlink(src, dest, 'junction'); - } else { - yield fsSymlink(src, dest); - } - }); - - return function symlink(_x24, _x25) { - return _ref26.apply(this, arguments); - }; -})(); - -let walk = exports.walk = (() => { - var _ref27 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (dir, relativeDir, ignoreBasenames = new Set()) { - let files = []; - - let filenames = yield readdir(dir); - if (ignoreBasenames.size) { - filenames = filenames.filter(function (name) { - return !ignoreBasenames.has(name); - }); - } - - for (var _iterator14 = filenames, _isArray14 = Array.isArray(_iterator14), _i14 = 0, _iterator14 = _isArray14 ? _iterator14 : _iterator14[Symbol.iterator]();;) { - var _ref28; - - if (_isArray14) { - if (_i14 >= _iterator14.length) break; - _ref28 = _iterator14[_i14++]; - } else { - _i14 = _iterator14.next(); - if (_i14.done) break; - _ref28 = _i14.value; - } - - const name = _ref28; - - const relative = relativeDir ? (_path || _load_path()).default.join(relativeDir, name) : name; - const loc = (_path || _load_path()).default.join(dir, name); - const stat = yield lstat(loc); - - files.push({ - relative, - basename: name, - absolute: loc, - mtime: +stat.mtime - }); - - if (stat.isDirectory()) { - files = files.concat((yield walk(loc, relative, ignoreBasenames))); - } - } - - return files; - }); - - return function walk(_x26, _x27) { - return _ref27.apply(this, arguments); - }; -})(); - -let getFileSizeOnDisk = exports.getFileSizeOnDisk = (() => { - var _ref29 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (loc) { - const stat = yield lstat(loc); - const size = stat.size, - blockSize = stat.blksize; - - - return Math.ceil(size / blockSize) * blockSize; - }); - - return function getFileSizeOnDisk(_x28) { - return _ref29.apply(this, arguments); - }; -})(); - -let getEolFromFile = (() => { - var _ref30 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (path) { - if (!(yield exists(path))) { - return undefined; - } - - const buffer = yield readFileBuffer(path); - - for (let i = 0; i < buffer.length; ++i) { - if (buffer[i] === cr) { - return '\r\n'; - } - if (buffer[i] === lf) { - return '\n'; - } - } - return undefined; - }); - - return function getEolFromFile(_x29) { - return _ref30.apply(this, arguments); - }; -})(); - -let writeFilePreservingEol = exports.writeFilePreservingEol = (() => { - var _ref31 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (path, data) { - const eol = (yield getEolFromFile(path)) || (_os || _load_os()).default.EOL; - if (eol !== '\n') { - data = data.replace(/\n/g, eol); - } - yield writeFile(path, data); - }); - - return function writeFilePreservingEol(_x30, _x31) { - return _ref31.apply(this, arguments); - }; -})(); - -let hardlinksWork = exports.hardlinksWork = (() => { - var _ref32 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (dir) { - const filename = 'test-file' + Math.random(); - const file = (_path || _load_path()).default.join(dir, filename); - const fileLink = (_path || _load_path()).default.join(dir, filename + '-link'); - try { - yield writeFile(file, 'test'); - yield link(file, fileLink); - } catch (err) { - return false; - } finally { - yield (0, (_fsNormalized || _load_fsNormalized()).unlink)(file); - yield (0, (_fsNormalized || _load_fsNormalized()).unlink)(fileLink); - } - return true; - }); - - return function hardlinksWork(_x32) { - return _ref32.apply(this, arguments); - }; -})(); - -// not a strict polyfill for Node's fs.mkdtemp - - -let makeTempDir = exports.makeTempDir = (() => { - var _ref33 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (prefix) { - const dir = (_path || _load_path()).default.join((_os || _load_os()).default.tmpdir(), `yarn-${prefix || ''}-${Date.now()}-${Math.random()}`); - yield (0, (_fsNormalized || _load_fsNormalized()).unlink)(dir); - yield mkdirp(dir); - return dir; - }); - - return function makeTempDir(_x33) { - return _ref33.apply(this, arguments); - }; -})(); - -let readFirstAvailableStream = exports.readFirstAvailableStream = (() => { - var _ref34 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (paths) { - for (var _iterator15 = paths, _isArray15 = Array.isArray(_iterator15), _i15 = 0, _iterator15 = _isArray15 ? _iterator15 : _iterator15[Symbol.iterator]();;) { - var _ref35; - - if (_isArray15) { - if (_i15 >= _iterator15.length) break; - _ref35 = _iterator15[_i15++]; - } else { - _i15 = _iterator15.next(); - if (_i15.done) break; - _ref35 = _i15.value; - } - - const path = _ref35; - - try { - const fd = yield open(path, 'r'); - return (_fs || _load_fs()).default.createReadStream(path, { fd }); - } catch (err) { - // Try the next one - } - } - return null; - }); - - return function readFirstAvailableStream(_x34) { - return _ref34.apply(this, arguments); - }; -})(); - -let getFirstSuitableFolder = exports.getFirstSuitableFolder = (() => { - var _ref36 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (paths, mode = constants.W_OK | constants.X_OK) { - const result = { - skipped: [], - folder: null - }; - - for (var _iterator16 = paths, _isArray16 = Array.isArray(_iterator16), _i16 = 0, _iterator16 = _isArray16 ? _iterator16 : _iterator16[Symbol.iterator]();;) { - var _ref37; - - if (_isArray16) { - if (_i16 >= _iterator16.length) break; - _ref37 = _iterator16[_i16++]; - } else { - _i16 = _iterator16.next(); - if (_i16.done) break; - _ref37 = _i16.value; - } - - const folder = _ref37; - - try { - yield mkdirp(folder); - yield access(folder, mode); - - result.folder = folder; - - return result; - } catch (error) { - result.skipped.push({ - error, - folder - }); - } - } - return result; - }); - - return function getFirstSuitableFolder(_x35) { - return _ref36.apply(this, arguments); - }; -})(); - -exports.copy = copy; -exports.readFile = readFile; -exports.readFileRaw = readFileRaw; -exports.normalizeOS = normalizeOS; - -var _fs; - -function _load_fs() { - return _fs = _interopRequireDefault(__webpack_require__(5)); -} - -var _glob; - -function _load_glob() { - return _glob = _interopRequireDefault(__webpack_require__(99)); -} - -var _os; - -function _load_os() { - return _os = _interopRequireDefault(__webpack_require__(49)); -} - -var _path; - -function _load_path() { - return _path = _interopRequireDefault(__webpack_require__(0)); -} - -var _blockingQueue; - -function _load_blockingQueue() { - return _blockingQueue = _interopRequireDefault(__webpack_require__(110)); -} - -var _promise; - -function _load_promise() { - return _promise = _interopRequireWildcard(__webpack_require__(50)); -} - -var _promise2; - -function _load_promise2() { - return _promise2 = __webpack_require__(50); -} - -var _map; - -function _load_map() { - return _map = _interopRequireDefault(__webpack_require__(29)); -} - -var _fsNormalized; - -function _load_fsNormalized() { - return _fsNormalized = __webpack_require__(218); -} - -function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } } - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -const constants = exports.constants = typeof (_fs || _load_fs()).default.constants !== 'undefined' ? (_fs || _load_fs()).default.constants : { - R_OK: (_fs || _load_fs()).default.R_OK, - W_OK: (_fs || _load_fs()).default.W_OK, - X_OK: (_fs || _load_fs()).default.X_OK -}; - -const lockQueue = exports.lockQueue = new (_blockingQueue || _load_blockingQueue()).default('fs lock'); - -const readFileBuffer = exports.readFileBuffer = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.readFile); -const open = exports.open = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.open); -const writeFile = exports.writeFile = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.writeFile); -const readlink = exports.readlink = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.readlink); -const realpath = exports.realpath = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.realpath); -const readdir = exports.readdir = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.readdir); -const rename = exports.rename = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.rename); -const access = exports.access = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.access); -const stat = exports.stat = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.stat); -const mkdirp = exports.mkdirp = (0, (_promise2 || _load_promise2()).promisify)(__webpack_require__(145)); -const exists = exports.exists = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.exists, true); -const lstat = exports.lstat = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.lstat); -const chmod = exports.chmod = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.chmod); -const link = exports.link = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.link); -const glob = exports.glob = (0, (_promise2 || _load_promise2()).promisify)((_glob || _load_glob()).default); -exports.unlink = (_fsNormalized || _load_fsNormalized()).unlink; - -// fs.copyFile uses the native file copying instructions on the system, performing much better -// than any JS-based solution and consumes fewer resources. Repeated testing to fine tune the -// concurrency level revealed 128 as the sweet spot on a quad-core, 16 CPU Intel system with SSD. - -const CONCURRENT_QUEUE_ITEMS = (_fs || _load_fs()).default.copyFile ? 128 : 4; - -const fsSymlink = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.symlink); -const invariant = __webpack_require__(9); -const stripBOM = __webpack_require__(160); - -const noop = () => {}; - -function copy(src, dest, reporter) { - return copyBulk([{ src, dest }], reporter); -} - -function _readFile(loc, encoding) { - return new Promise((resolve, reject) => { - (_fs || _load_fs()).default.readFile(loc, encoding, function (err, content) { - if (err) { - reject(err); - } else { - resolve(content); - } - }); - }); -} - -function readFile(loc) { - return _readFile(loc, 'utf8').then(normalizeOS); -} - -function readFileRaw(loc) { - return _readFile(loc, 'binary'); -} - -function normalizeOS(body) { - return body.replace(/\r\n/g, '\n'); -} - -const cr = '\r'.charCodeAt(0); -const lf = '\n'.charCodeAt(0); - -/***/ }), -/* 5 */ -/***/ (function(module, exports) { - -module.exports = require("fs"); - -/***/ }), -/* 6 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", { - value: true -}); -class MessageError extends Error { - constructor(msg, code) { - super(msg); - this.code = code; - } - -} - -exports.MessageError = MessageError; -class ProcessSpawnError extends MessageError { - constructor(msg, code, process) { - super(msg, code); - this.process = process; - } - -} - -exports.ProcessSpawnError = ProcessSpawnError; -class SecurityError extends MessageError {} - -exports.SecurityError = SecurityError; -class ProcessTermError extends MessageError {} - -exports.ProcessTermError = ProcessTermError; -class ResponseError extends Error { - constructor(msg, responseCode) { - super(msg); - this.responseCode = responseCode; - } - -} - -exports.ResponseError = ResponseError; -class OneTimePasswordError extends Error {} -exports.OneTimePasswordError = OneTimePasswordError; - -/***/ }), -/* 7 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return Subscriber; }); -/* unused harmony export SafeSubscriber */ -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0_tslib__ = __webpack_require__(1); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__util_isFunction__ = __webpack_require__(154); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_2__Observer__ = __webpack_require__(420); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_3__Subscription__ = __webpack_require__(25); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_4__internal_symbol_rxSubscriber__ = __webpack_require__(321); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_5__config__ = __webpack_require__(185); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_6__util_hostReportError__ = __webpack_require__(323); -/** PURE_IMPORTS_START tslib,_util_isFunction,_Observer,_Subscription,_internal_symbol_rxSubscriber,_config,_util_hostReportError PURE_IMPORTS_END */ - - - - - - - -var Subscriber = /*@__PURE__*/ (function (_super) { - __WEBPACK_IMPORTED_MODULE_0_tslib__["a" /* __extends */](Subscriber, _super); - function Subscriber(destinationOrNext, error, complete) { - var _this = _super.call(this) || this; - _this.syncErrorValue = null; - _this.syncErrorThrown = false; - _this.syncErrorThrowable = false; - _this.isStopped = false; - _this._parentSubscription = null; - switch (arguments.length) { - case 0: - _this.destination = __WEBPACK_IMPORTED_MODULE_2__Observer__["a" /* empty */]; - break; - case 1: - if (!destinationOrNext) { - _this.destination = __WEBPACK_IMPORTED_MODULE_2__Observer__["a" /* empty */]; - break; - } - if (typeof destinationOrNext === 'object') { - if (destinationOrNext instanceof Subscriber) { - _this.syncErrorThrowable = destinationOrNext.syncErrorThrowable; - _this.destination = destinationOrNext; - destinationOrNext.add(_this); - } - else { - _this.syncErrorThrowable = true; - _this.destination = new SafeSubscriber(_this, destinationOrNext); - } - break; - } - default: - _this.syncErrorThrowable = true; - _this.destination = new SafeSubscriber(_this, destinationOrNext, error, complete); - break; - } - return _this; - } - Subscriber.prototype[__WEBPACK_IMPORTED_MODULE_4__internal_symbol_rxSubscriber__["a" /* rxSubscriber */]] = function () { return this; }; - Subscriber.create = function (next, error, complete) { - var subscriber = new Subscriber(next, error, complete); - subscriber.syncErrorThrowable = false; - return subscriber; - }; - Subscriber.prototype.next = function (value) { - if (!this.isStopped) { - this._next(value); - } - }; - Subscriber.prototype.error = function (err) { - if (!this.isStopped) { - this.isStopped = true; - this._error(err); - } - }; - Subscriber.prototype.complete = function () { - if (!this.isStopped) { - this.isStopped = true; - this._complete(); - } - }; - Subscriber.prototype.unsubscribe = function () { - if (this.closed) { - return; - } - this.isStopped = true; - _super.prototype.unsubscribe.call(this); - }; - Subscriber.prototype._next = function (value) { - this.destination.next(value); - }; - Subscriber.prototype._error = function (err) { - this.destination.error(err); - this.unsubscribe(); - }; - Subscriber.prototype._complete = function () { - this.destination.complete(); - this.unsubscribe(); - }; - Subscriber.prototype._unsubscribeAndRecycle = function () { - var _a = this, _parent = _a._parent, _parents = _a._parents; - this._parent = null; - this._parents = null; - this.unsubscribe(); - this.closed = false; - this.isStopped = false; - this._parent = _parent; - this._parents = _parents; - this._parentSubscription = null; - return this; - }; - return Subscriber; -}(__WEBPACK_IMPORTED_MODULE_3__Subscription__["a" /* Subscription */])); - -var SafeSubscriber = /*@__PURE__*/ (function (_super) { - __WEBPACK_IMPORTED_MODULE_0_tslib__["a" /* __extends */](SafeSubscriber, _super); - function SafeSubscriber(_parentSubscriber, observerOrNext, error, complete) { - var _this = _super.call(this) || this; - _this._parentSubscriber = _parentSubscriber; - var next; - var context = _this; - if (__webpack_require__.i(__WEBPACK_IMPORTED_MODULE_1__util_isFunction__["a" /* isFunction */])(observerOrNext)) { - next = observerOrNext; - } - else if (observerOrNext) { - next = observerOrNext.next; - error = observerOrNext.error; - complete = observerOrNext.complete; - if (observerOrNext !== __WEBPACK_IMPORTED_MODULE_2__Observer__["a" /* empty */]) { - context = Object.create(observerOrNext); - if (__webpack_require__.i(__WEBPACK_IMPORTED_MODULE_1__util_isFunction__["a" /* isFunction */])(context.unsubscribe)) { - _this.add(context.unsubscribe.bind(context)); - } - context.unsubscribe = _this.unsubscribe.bind(_this); - } - } - _this._context = context; - _this._next = next; - _this._error = error; - _this._complete = complete; - return _this; - } - SafeSubscriber.prototype.next = function (value) { - if (!this.isStopped && this._next) { - var _parentSubscriber = this._parentSubscriber; - if (!__WEBPACK_IMPORTED_MODULE_5__config__["a" /* config */].useDeprecatedSynchronousErrorHandling || !_parentSubscriber.syncErrorThrowable) { - this.__tryOrUnsub(this._next, value); - } - else if (this.__tryOrSetError(_parentSubscriber, this._next, value)) { - this.unsubscribe(); - } - } - }; - SafeSubscriber.prototype.error = function (err) { - if (!this.isStopped) { - var _parentSubscriber = this._parentSubscriber; - var useDeprecatedSynchronousErrorHandling = __WEBPACK_IMPORTED_MODULE_5__config__["a" /* config */].useDeprecatedSynchronousErrorHandling; - if (this._error) { - if (!useDeprecatedSynchronousErrorHandling || !_parentSubscriber.syncErrorThrowable) { - this.__tryOrUnsub(this._error, err); - this.unsubscribe(); - } - else { - this.__tryOrSetError(_parentSubscriber, this._error, err); - this.unsubscribe(); - } - } - else if (!_parentSubscriber.syncErrorThrowable) { - this.unsubscribe(); - if (useDeprecatedSynchronousErrorHandling) { - throw err; - } - __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_6__util_hostReportError__["a" /* hostReportError */])(err); - } - else { - if (useDeprecatedSynchronousErrorHandling) { - _parentSubscriber.syncErrorValue = err; - _parentSubscriber.syncErrorThrown = true; - } - else { - __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_6__util_hostReportError__["a" /* hostReportError */])(err); - } - this.unsubscribe(); - } - } - }; - SafeSubscriber.prototype.complete = function () { - var _this = this; - if (!this.isStopped) { - var _parentSubscriber = this._parentSubscriber; - if (this._complete) { - var wrappedComplete = function () { return _this._complete.call(_this._context); }; - if (!__WEBPACK_IMPORTED_MODULE_5__config__["a" /* config */].useDeprecatedSynchronousErrorHandling || !_parentSubscriber.syncErrorThrowable) { - this.__tryOrUnsub(wrappedComplete); - this.unsubscribe(); - } - else { - this.__tryOrSetError(_parentSubscriber, wrappedComplete); - this.unsubscribe(); - } - } - else { - this.unsubscribe(); - } - } - }; - SafeSubscriber.prototype.__tryOrUnsub = function (fn, value) { - try { - fn.call(this._context, value); - } - catch (err) { - this.unsubscribe(); - if (__WEBPACK_IMPORTED_MODULE_5__config__["a" /* config */].useDeprecatedSynchronousErrorHandling) { - throw err; - } - else { - __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_6__util_hostReportError__["a" /* hostReportError */])(err); - } - } - }; - SafeSubscriber.prototype.__tryOrSetError = function (parent, fn, value) { - if (!__WEBPACK_IMPORTED_MODULE_5__config__["a" /* config */].useDeprecatedSynchronousErrorHandling) { - throw new Error('bad call'); - } - try { - fn.call(this._context, value); - } - catch (err) { - if (__WEBPACK_IMPORTED_MODULE_5__config__["a" /* config */].useDeprecatedSynchronousErrorHandling) { - parent.syncErrorValue = err; - parent.syncErrorThrown = true; - return true; - } - else { - __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_6__util_hostReportError__["a" /* hostReportError */])(err); - return true; - } - } - return false; - }; - SafeSubscriber.prototype._unsubscribe = function () { - var _parentSubscriber = this._parentSubscriber; - this._context = null; - this._parentSubscriber = null; - _parentSubscriber.unsubscribe(); - }; - return SafeSubscriber; -}(Subscriber)); - -//# sourceMappingURL=Subscriber.js.map - - -/***/ }), -/* 8 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", { - value: true -}); -exports.getPathKey = getPathKey; -const os = __webpack_require__(49); -const path = __webpack_require__(0); -const userHome = __webpack_require__(67).default; - -var _require = __webpack_require__(225); - -const getCacheDir = _require.getCacheDir, - getConfigDir = _require.getConfigDir, - getDataDir = _require.getDataDir; - -const isWebpackBundle = __webpack_require__(278); - -const DEPENDENCY_TYPES = exports.DEPENDENCY_TYPES = ['devDependencies', 'dependencies', 'optionalDependencies', 'peerDependencies']; -const OWNED_DEPENDENCY_TYPES = exports.OWNED_DEPENDENCY_TYPES = ['devDependencies', 'dependencies', 'optionalDependencies']; - -const RESOLUTIONS = exports.RESOLUTIONS = 'resolutions'; -const MANIFEST_FIELDS = exports.MANIFEST_FIELDS = [RESOLUTIONS, ...DEPENDENCY_TYPES]; - -const SUPPORTED_NODE_VERSIONS = exports.SUPPORTED_NODE_VERSIONS = '^4.8.0 || ^5.7.0 || ^6.2.2 || >=8.0.0'; - -const YARN_REGISTRY = exports.YARN_REGISTRY = 'https://registry.yarnpkg.com'; -const NPM_REGISTRY_RE = exports.NPM_REGISTRY_RE = /https?:\/\/registry\.npmjs\.org/g; - -const YARN_DOCS = exports.YARN_DOCS = 'https://yarnpkg.com/en/docs/cli/'; -const YARN_INSTALLER_SH = exports.YARN_INSTALLER_SH = 'https://yarnpkg.com/install.sh'; -const YARN_INSTALLER_MSI = exports.YARN_INSTALLER_MSI = 'https://yarnpkg.com/latest.msi'; - -const SELF_UPDATE_VERSION_URL = exports.SELF_UPDATE_VERSION_URL = 'https://yarnpkg.com/latest-version'; - -// cache version, bump whenever we make backwards incompatible changes -const CACHE_VERSION = exports.CACHE_VERSION = 6; - -// lockfile version, bump whenever we make backwards incompatible changes -const LOCKFILE_VERSION = exports.LOCKFILE_VERSION = 1; - -// max amount of network requests to perform concurrently -const NETWORK_CONCURRENCY = exports.NETWORK_CONCURRENCY = 8; - -// HTTP timeout used when downloading packages -const NETWORK_TIMEOUT = exports.NETWORK_TIMEOUT = 30 * 1000; // in milliseconds - -// max amount of child processes to execute concurrently -const CHILD_CONCURRENCY = exports.CHILD_CONCURRENCY = 5; - -const REQUIRED_PACKAGE_KEYS = exports.REQUIRED_PACKAGE_KEYS = ['name', 'version', '_uid']; - -function getPreferredCacheDirectories() { - const preferredCacheDirectories = [getCacheDir()]; - - if (process.getuid) { - // $FlowFixMe: process.getuid exists, dammit - preferredCacheDirectories.push(path.join(os.tmpdir(), `.yarn-cache-${process.getuid()}`)); - } - - preferredCacheDirectories.push(path.join(os.tmpdir(), `.yarn-cache`)); - - return preferredCacheDirectories; -} - -const PREFERRED_MODULE_CACHE_DIRECTORIES = exports.PREFERRED_MODULE_CACHE_DIRECTORIES = getPreferredCacheDirectories(); -const CONFIG_DIRECTORY = exports.CONFIG_DIRECTORY = getConfigDir(); -const DATA_DIRECTORY = exports.DATA_DIRECTORY = getDataDir(); -const LINK_REGISTRY_DIRECTORY = exports.LINK_REGISTRY_DIRECTORY = path.join(DATA_DIRECTORY, 'link'); -const GLOBAL_MODULE_DIRECTORY = exports.GLOBAL_MODULE_DIRECTORY = path.join(DATA_DIRECTORY, 'global'); - -const NODE_BIN_PATH = exports.NODE_BIN_PATH = process.execPath; -const YARN_BIN_PATH = exports.YARN_BIN_PATH = getYarnBinPath(); - -// Webpack needs to be configured with node.__dirname/__filename = false -function getYarnBinPath() { - if (isWebpackBundle) { - return __filename; - } else { - return path.join(__dirname, '..', 'bin', 'yarn.js'); - } -} - -const NODE_MODULES_FOLDER = exports.NODE_MODULES_FOLDER = 'node_modules'; -const NODE_PACKAGE_JSON = exports.NODE_PACKAGE_JSON = 'package.json'; - -const PNP_FILENAME = exports.PNP_FILENAME = '.pnp.js'; - -const POSIX_GLOBAL_PREFIX = exports.POSIX_GLOBAL_PREFIX = `${process.env.DESTDIR || ''}/usr/local`; -const FALLBACK_GLOBAL_PREFIX = exports.FALLBACK_GLOBAL_PREFIX = path.join(userHome, '.yarn'); - -const META_FOLDER = exports.META_FOLDER = '.yarn-meta'; -const INTEGRITY_FILENAME = exports.INTEGRITY_FILENAME = '.yarn-integrity'; -const LOCKFILE_FILENAME = exports.LOCKFILE_FILENAME = 'yarn.lock'; -const METADATA_FILENAME = exports.METADATA_FILENAME = '.yarn-metadata.json'; -const TARBALL_FILENAME = exports.TARBALL_FILENAME = '.yarn-tarball.tgz'; -const CLEAN_FILENAME = exports.CLEAN_FILENAME = '.yarnclean'; - -const NPM_LOCK_FILENAME = exports.NPM_LOCK_FILENAME = 'package-lock.json'; -const NPM_SHRINKWRAP_FILENAME = exports.NPM_SHRINKWRAP_FILENAME = 'npm-shrinkwrap.json'; - -const DEFAULT_INDENT = exports.DEFAULT_INDENT = ' '; -const SINGLE_INSTANCE_PORT = exports.SINGLE_INSTANCE_PORT = 31997; -const SINGLE_INSTANCE_FILENAME = exports.SINGLE_INSTANCE_FILENAME = '.yarn-single-instance'; - -const ENV_PATH_KEY = exports.ENV_PATH_KEY = getPathKey(process.platform, process.env); - -function getPathKey(platform, env) { - let pathKey = 'PATH'; - - // windows calls its path "Path" usually, but this is not guaranteed. - if (platform === 'win32') { - pathKey = 'Path'; - - for (const key in env) { - if (key.toLowerCase() === 'path') { - pathKey = key; - } - } - } - - return pathKey; -} - -const VERSION_COLOR_SCHEME = exports.VERSION_COLOR_SCHEME = { - major: 'red', - premajor: 'red', - minor: 'yellow', - preminor: 'yellow', - patch: 'green', - prepatch: 'green', - prerelease: 'red', - unchanged: 'white', - unknown: 'red' -}; - -/***/ }), -/* 9 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; -/** - * Copyright (c) 2013-present, Facebook, Inc. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */ - - - -/** - * Use invariant() to assert state which your program assumes to be true. - * - * Provide sprintf-style format (only %s is supported) and arguments - * to provide information about what broke and what you were - * expecting. - * - * The invariant message will be stripped in production, but the invariant - * will remain to ensure logic does not differ in production. - */ - -var NODE_ENV = process.env.NODE_ENV; - -var invariant = function(condition, format, a, b, c, d, e, f) { - if (NODE_ENV !== 'production') { - if (format === undefined) { - throw new Error('invariant requires an error message argument'); - } - } - - if (!condition) { - var error; - if (format === undefined) { - error = new Error( - 'Minified exception occurred; use the non-minified dev environment ' + - 'for the full error message and additional helpful warnings.' - ); - } else { - var args = [a, b, c, d, e, f]; - var argIndex = 0; - error = new Error( - format.replace(/%s/g, function() { return args[argIndex++]; }) - ); - error.name = 'Invariant Violation'; - } - - error.framesToPop = 1; // we don't care about invariant's own frame - throw error; - } -}; - -module.exports = invariant; - - -/***/ }), -/* 10 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -var YAMLException = __webpack_require__(54); - -var TYPE_CONSTRUCTOR_OPTIONS = [ - 'kind', - 'resolve', - 'construct', - 'instanceOf', - 'predicate', - 'represent', - 'defaultStyle', - 'styleAliases' -]; - -var YAML_NODE_KINDS = [ - 'scalar', - 'sequence', - 'mapping' -]; - -function compileStyleAliases(map) { - var result = {}; - - if (map !== null) { - Object.keys(map).forEach(function (style) { - map[style].forEach(function (alias) { - result[String(alias)] = style; - }); - }); - } - - return result; -} - -function Type(tag, options) { - options = options || {}; - - Object.keys(options).forEach(function (name) { - if (TYPE_CONSTRUCTOR_OPTIONS.indexOf(name) === -1) { - throw new YAMLException('Unknown option "' + name + '" is met in definition of "' + tag + '" YAML type.'); - } - }); - - // TODO: Add tag format check. - this.tag = tag; - this.kind = options['kind'] || null; - this.resolve = options['resolve'] || function () { return true; }; - this.construct = options['construct'] || function (data) { return data; }; - this.instanceOf = options['instanceOf'] || null; - this.predicate = options['predicate'] || null; - this.represent = options['represent'] || null; - this.defaultStyle = options['defaultStyle'] || null; - this.styleAliases = compileStyleAliases(options['styleAliases'] || null); - - if (YAML_NODE_KINDS.indexOf(this.kind) === -1) { - throw new YAMLException('Unknown kind "' + this.kind + '" is specified for "' + tag + '" YAML type.'); - } -} - -module.exports = Type; - - -/***/ }), -/* 11 */ -/***/ (function(module, exports) { - -module.exports = require("crypto"); - -/***/ }), -/* 12 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return Observable; }); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__util_canReportError__ = __webpack_require__(322); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__util_toSubscriber__ = __webpack_require__(932); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_2__internal_symbol_observable__ = __webpack_require__(117); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_3__util_pipe__ = __webpack_require__(324); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_4__config__ = __webpack_require__(185); -/** PURE_IMPORTS_START _util_canReportError,_util_toSubscriber,_internal_symbol_observable,_util_pipe,_config PURE_IMPORTS_END */ - - - - - -var Observable = /*@__PURE__*/ (function () { - function Observable(subscribe) { - this._isScalar = false; - if (subscribe) { - this._subscribe = subscribe; - } - } - Observable.prototype.lift = function (operator) { - var observable = new Observable(); - observable.source = this; - observable.operator = operator; - return observable; - }; - Observable.prototype.subscribe = function (observerOrNext, error, complete) { - var operator = this.operator; - var sink = __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_1__util_toSubscriber__["a" /* toSubscriber */])(observerOrNext, error, complete); - if (operator) { - operator.call(sink, this.source); - } - else { - sink.add(this.source || (__WEBPACK_IMPORTED_MODULE_4__config__["a" /* config */].useDeprecatedSynchronousErrorHandling && !sink.syncErrorThrowable) ? - this._subscribe(sink) : - this._trySubscribe(sink)); - } - if (__WEBPACK_IMPORTED_MODULE_4__config__["a" /* config */].useDeprecatedSynchronousErrorHandling) { - if (sink.syncErrorThrowable) { - sink.syncErrorThrowable = false; - if (sink.syncErrorThrown) { - throw sink.syncErrorValue; - } - } - } - return sink; - }; - Observable.prototype._trySubscribe = function (sink) { - try { - return this._subscribe(sink); - } - catch (err) { - if (__WEBPACK_IMPORTED_MODULE_4__config__["a" /* config */].useDeprecatedSynchronousErrorHandling) { - sink.syncErrorThrown = true; - sink.syncErrorValue = err; - } - if (__webpack_require__.i(__WEBPACK_IMPORTED_MODULE_0__util_canReportError__["a" /* canReportError */])(sink)) { - sink.error(err); - } - else { - console.warn(err); - } - } - }; - Observable.prototype.forEach = function (next, promiseCtor) { - var _this = this; - promiseCtor = getPromiseCtor(promiseCtor); - return new promiseCtor(function (resolve, reject) { - var subscription; - subscription = _this.subscribe(function (value) { - try { - next(value); - } - catch (err) { - reject(err); - if (subscription) { - subscription.unsubscribe(); - } - } - }, reject, resolve); - }); - }; - Observable.prototype._subscribe = function (subscriber) { - var source = this.source; - return source && source.subscribe(subscriber); - }; - Observable.prototype[__WEBPACK_IMPORTED_MODULE_2__internal_symbol_observable__["a" /* observable */]] = function () { - return this; - }; - Observable.prototype.pipe = function () { - var operations = []; - for (var _i = 0; _i < arguments.length; _i++) { - operations[_i] = arguments[_i]; - } - if (operations.length === 0) { - return this; - } - return __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_3__util_pipe__["b" /* pipeFromArray */])(operations)(this); - }; - Observable.prototype.toPromise = function (promiseCtor) { - var _this = this; - promiseCtor = getPromiseCtor(promiseCtor); - return new promiseCtor(function (resolve, reject) { - var value; - _this.subscribe(function (x) { return value = x; }, function (err) { return reject(err); }, function () { return resolve(value); }); - }); - }; - Observable.create = function (subscribe) { - return new Observable(subscribe); - }; - return Observable; -}()); - -function getPromiseCtor(promiseCtor) { - if (!promiseCtor) { - promiseCtor = __WEBPACK_IMPORTED_MODULE_4__config__["a" /* config */].Promise || Promise; - } - if (!promiseCtor) { - throw new Error('no Promise impl found'); - } - return promiseCtor; -} -//# sourceMappingURL=Observable.js.map - - -/***/ }), -/* 13 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return OuterSubscriber; }); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0_tslib__ = __webpack_require__(1); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__Subscriber__ = __webpack_require__(7); -/** PURE_IMPORTS_START tslib,_Subscriber PURE_IMPORTS_END */ - - -var OuterSubscriber = /*@__PURE__*/ (function (_super) { - __WEBPACK_IMPORTED_MODULE_0_tslib__["a" /* __extends */](OuterSubscriber, _super); - function OuterSubscriber() { - return _super !== null && _super.apply(this, arguments) || this; - } - OuterSubscriber.prototype.notifyNext = function (outerValue, innerValue, outerIndex, innerIndex, innerSub) { - this.destination.next(innerValue); - }; - OuterSubscriber.prototype.notifyError = function (error, innerSub) { - this.destination.error(error); - }; - OuterSubscriber.prototype.notifyComplete = function (innerSub) { - this.destination.complete(); - }; - return OuterSubscriber; -}(__WEBPACK_IMPORTED_MODULE_1__Subscriber__["a" /* Subscriber */])); - -//# sourceMappingURL=OuterSubscriber.js.map - - -/***/ }), -/* 14 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony export (immutable) */ __webpack_exports__["a"] = subscribeToResult; -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__InnerSubscriber__ = __webpack_require__(84); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__subscribeTo__ = __webpack_require__(446); -/** PURE_IMPORTS_START _InnerSubscriber,_subscribeTo PURE_IMPORTS_END */ - - -function subscribeToResult(outerSubscriber, result, outerValue, outerIndex, destination) { - if (destination === void 0) { - destination = new __WEBPACK_IMPORTED_MODULE_0__InnerSubscriber__["a" /* InnerSubscriber */](outerSubscriber, outerValue, outerIndex); - } - if (destination.closed) { - return; - } - return __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_1__subscribeTo__["a" /* subscribeTo */])(result)(destination); -} -//# sourceMappingURL=subscribeToResult.js.map - - -/***/ }), -/* 15 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; -/* eslint-disable node/no-deprecated-api */ - - - -var buffer = __webpack_require__(64) -var Buffer = buffer.Buffer - -var safer = {} - -var key - -for (key in buffer) { - if (!buffer.hasOwnProperty(key)) continue - if (key === 'SlowBuffer' || key === 'Buffer') continue - safer[key] = buffer[key] -} - -var Safer = safer.Buffer = {} -for (key in Buffer) { - if (!Buffer.hasOwnProperty(key)) continue - if (key === 'allocUnsafe' || key === 'allocUnsafeSlow') continue - Safer[key] = Buffer[key] -} - -safer.Buffer.prototype = Buffer.prototype - -if (!Safer.from || Safer.from === Uint8Array.from) { - Safer.from = function (value, encodingOrOffset, length) { - if (typeof value === 'number') { - throw new TypeError('The "value" argument must not be of type number. Received type ' + typeof value) - } - if (value && typeof value.length === 'undefined') { - throw new TypeError('The first argument must be one of type string, Buffer, ArrayBuffer, Array, or Array-like Object. Received type ' + typeof value) - } - return Buffer(value, encodingOrOffset, length) - } -} - -if (!Safer.alloc) { - Safer.alloc = function (size, fill, encoding) { - if (typeof size !== 'number') { - throw new TypeError('The "size" argument must be of type number. Received type ' + typeof size) - } - if (size < 0 || size >= 2 * (1 << 30)) { - throw new RangeError('The value "' + size + '" is invalid for option "size"') - } - var buf = Buffer(size) - if (!fill || fill.length === 0) { - buf.fill(0) - } else if (typeof encoding === 'string') { - buf.fill(fill, encoding) - } else { - buf.fill(fill) - } - return buf - } -} - -if (!safer.kStringMaxLength) { - try { - safer.kStringMaxLength = process.binding('buffer').kStringMaxLength - } catch (e) { - // we can't determine kStringMaxLength in environments where process.binding - // is unsupported, so let's not set it - } -} - -if (!safer.constants) { - safer.constants = { - MAX_LENGTH: safer.kMaxLength - } - if (safer.kStringMaxLength) { - safer.constants.MAX_STRING_LENGTH = safer.kStringMaxLength - } -} - -module.exports = safer - - -/***/ }), -/* 16 */ -/***/ (function(module, exports, __webpack_require__) { - -// Copyright (c) 2012, Mark Cavage. All rights reserved. -// Copyright 2015 Joyent, Inc. - -var assert = __webpack_require__(28); -var Stream = __webpack_require__(23).Stream; -var util = __webpack_require__(3); - - -///--- Globals - -/* JSSTYLED */ -var UUID_REGEXP = /^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$/; - - -///--- Internal - -function _capitalize(str) { - return (str.charAt(0).toUpperCase() + str.slice(1)); -} - -function _toss(name, expected, oper, arg, actual) { - throw new assert.AssertionError({ - message: util.format('%s (%s) is required', name, expected), - actual: (actual === undefined) ? typeof (arg) : actual(arg), - expected: expected, - operator: oper || '===', - stackStartFunction: _toss.caller - }); -} - -function _getClass(arg) { - return (Object.prototype.toString.call(arg).slice(8, -1)); -} - -function noop() { - // Why even bother with asserts? -} - - -///--- Exports - -var types = { - bool: { - check: function (arg) { return typeof (arg) === 'boolean'; } - }, - func: { - check: function (arg) { return typeof (arg) === 'function'; } - }, - string: { - check: function (arg) { return typeof (arg) === 'string'; } - }, - object: { - check: function (arg) { - return typeof (arg) === 'object' && arg !== null; - } - }, - number: { - check: function (arg) { - return typeof (arg) === 'number' && !isNaN(arg); - } - }, - finite: { - check: function (arg) { - return typeof (arg) === 'number' && !isNaN(arg) && isFinite(arg); - } - }, - buffer: { - check: function (arg) { return Buffer.isBuffer(arg); }, - operator: 'Buffer.isBuffer' - }, - array: { - check: function (arg) { return Array.isArray(arg); }, - operator: 'Array.isArray' - }, - stream: { - check: function (arg) { return arg instanceof Stream; }, - operator: 'instanceof', - actual: _getClass - }, - date: { - check: function (arg) { return arg instanceof Date; }, - operator: 'instanceof', - actual: _getClass - }, - regexp: { - check: function (arg) { return arg instanceof RegExp; }, - operator: 'instanceof', - actual: _getClass - }, - uuid: { - check: function (arg) { - return typeof (arg) === 'string' && UUID_REGEXP.test(arg); - }, - operator: 'isUUID' - } -}; - -function _setExports(ndebug) { - var keys = Object.keys(types); - var out; - - /* re-export standard assert */ - if (process.env.NODE_NDEBUG) { - out = noop; - } else { - out = function (arg, msg) { - if (!arg) { - _toss(msg, 'true', arg); - } - }; - } - - /* standard checks */ - keys.forEach(function (k) { - if (ndebug) { - out[k] = noop; - return; - } - var type = types[k]; - out[k] = function (arg, msg) { - if (!type.check(arg)) { - _toss(msg, k, type.operator, arg, type.actual); - } - }; - }); - - /* optional checks */ - keys.forEach(function (k) { - var name = 'optional' + _capitalize(k); - if (ndebug) { - out[name] = noop; - return; - } - var type = types[k]; - out[name] = function (arg, msg) { - if (arg === undefined || arg === null) { - return; - } - if (!type.check(arg)) { - _toss(msg, k, type.operator, arg, type.actual); - } - }; - }); - - /* arrayOf checks */ - keys.forEach(function (k) { - var name = 'arrayOf' + _capitalize(k); - if (ndebug) { - out[name] = noop; - return; - } - var type = types[k]; - var expected = '[' + k + ']'; - out[name] = function (arg, msg) { - if (!Array.isArray(arg)) { - _toss(msg, expected, type.operator, arg, type.actual); - } - var i; - for (i = 0; i < arg.length; i++) { - if (!type.check(arg[i])) { - _toss(msg, expected, type.operator, arg, type.actual); - } - } - }; - }); - - /* optionalArrayOf checks */ - keys.forEach(function (k) { - var name = 'optionalArrayOf' + _capitalize(k); - if (ndebug) { - out[name] = noop; - return; - } - var type = types[k]; - var expected = '[' + k + ']'; - out[name] = function (arg, msg) { - if (arg === undefined || arg === null) { - return; - } - if (!Array.isArray(arg)) { - _toss(msg, expected, type.operator, arg, type.actual); - } - var i; - for (i = 0; i < arg.length; i++) { - if (!type.check(arg[i])) { - _toss(msg, expected, type.operator, arg, type.actual); - } - } - }; - }); - - /* re-export built-in assertions */ - Object.keys(assert).forEach(function (k) { - if (k === 'AssertionError') { - out[k] = assert[k]; - return; - } - if (ndebug) { - out[k] = noop; - return; - } - out[k] = assert[k]; - }); - - /* export ourselves (for unit tests _only_) */ - out._setExports = _setExports; - - return out; -} - -module.exports = _setExports(process.env.NODE_NDEBUG); - - -/***/ }), -/* 17 */ -/***/ (function(module, exports) { - -// https://github.com/zloirock/core-js/issues/86#issuecomment-115759028 -var global = module.exports = typeof window != 'undefined' && window.Math == Math - ? window : typeof self != 'undefined' && self.Math == Math ? self - // eslint-disable-next-line no-new-func - : Function('return this')(); -if (typeof __g == 'number') __g = global; // eslint-disable-line no-undef - - -/***/ }), -/* 18 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", { - value: true -}); -exports.sortAlpha = sortAlpha; -exports.sortOptionsByFlags = sortOptionsByFlags; -exports.entries = entries; -exports.removePrefix = removePrefix; -exports.removeSuffix = removeSuffix; -exports.addSuffix = addSuffix; -exports.hyphenate = hyphenate; -exports.camelCase = camelCase; -exports.compareSortedArrays = compareSortedArrays; -exports.sleep = sleep; -const _camelCase = __webpack_require__(230); - -function sortAlpha(a, b) { - // sort alphabetically in a deterministic way - const shortLen = Math.min(a.length, b.length); - for (let i = 0; i < shortLen; i++) { - const aChar = a.charCodeAt(i); - const bChar = b.charCodeAt(i); - if (aChar !== bChar) { - return aChar - bChar; - } - } - return a.length - b.length; -} - -function sortOptionsByFlags(a, b) { - const aOpt = a.flags.replace(/-/g, ''); - const bOpt = b.flags.replace(/-/g, ''); - return sortAlpha(aOpt, bOpt); -} - -function entries(obj) { - const entries = []; - if (obj) { - for (const key in obj) { - entries.push([key, obj[key]]); - } - } - return entries; -} - -function removePrefix(pattern, prefix) { - if (pattern.startsWith(prefix)) { - pattern = pattern.slice(prefix.length); - } - - return pattern; -} - -function removeSuffix(pattern, suffix) { - if (pattern.endsWith(suffix)) { - return pattern.slice(0, -suffix.length); - } - - return pattern; -} - -function addSuffix(pattern, suffix) { - if (!pattern.endsWith(suffix)) { - return pattern + suffix; - } - - return pattern; -} - -function hyphenate(str) { - return str.replace(/[A-Z]/g, match => { - return '-' + match.charAt(0).toLowerCase(); - }); -} - -function camelCase(str) { - if (/[A-Z]/.test(str)) { - return null; - } else { - return _camelCase(str); - } -} - -function compareSortedArrays(array1, array2) { - if (array1.length !== array2.length) { - return false; - } - for (let i = 0, len = array1.length; i < len; i++) { - if (array1[i] !== array2[i]) { - return false; - } - } - return true; -} - -function sleep(ms) { - return new Promise(resolve => { - setTimeout(resolve, ms); - }); -} - -/***/ }), -/* 19 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", { - value: true -}); -exports.stringify = exports.parse = undefined; - -var _asyncToGenerator2; - -function _load_asyncToGenerator() { - return _asyncToGenerator2 = _interopRequireDefault(__webpack_require__(2)); -} - -var _parse; - -function _load_parse() { - return _parse = __webpack_require__(105); -} - -Object.defineProperty(exports, 'parse', { - enumerable: true, - get: function get() { - return _interopRequireDefault(_parse || _load_parse()).default; - } -}); - -var _stringify; - -function _load_stringify() { - return _stringify = __webpack_require__(199); -} - -Object.defineProperty(exports, 'stringify', { - enumerable: true, - get: function get() { - return _interopRequireDefault(_stringify || _load_stringify()).default; - } -}); -exports.implodeEntry = implodeEntry; -exports.explodeEntry = explodeEntry; - -var _misc; - -function _load_misc() { - return _misc = __webpack_require__(18); -} - -var _normalizePattern; - -function _load_normalizePattern() { - return _normalizePattern = __webpack_require__(37); -} - -var _parse2; - -function _load_parse2() { - return _parse2 = _interopRequireDefault(__webpack_require__(105)); -} - -var _constants; - -function _load_constants() { - return _constants = __webpack_require__(8); -} - -var _fs; - -function _load_fs() { - return _fs = _interopRequireWildcard(__webpack_require__(4)); -} - -function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } } - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -const invariant = __webpack_require__(9); - -const path = __webpack_require__(0); -const ssri = __webpack_require__(65); - -function getName(pattern) { - return (0, (_normalizePattern || _load_normalizePattern()).normalizePattern)(pattern).name; -} - -function blankObjectUndefined(obj) { - return obj && Object.keys(obj).length ? obj : undefined; -} - -function keyForRemote(remote) { - return remote.resolved || (remote.reference && remote.hash ? `${remote.reference}#${remote.hash}` : null); -} - -function serializeIntegrity(integrity) { - // We need this because `Integrity.toString()` does not use sorting to ensure a stable string output - // See https://git.io/vx2Hy - return integrity.toString().split(' ').sort().join(' '); -} - -function implodeEntry(pattern, obj) { - const inferredName = getName(pattern); - const integrity = obj.integrity ? serializeIntegrity(obj.integrity) : ''; - const imploded = { - name: inferredName === obj.name ? undefined : obj.name, - version: obj.version, - uid: obj.uid === obj.version ? undefined : obj.uid, - resolved: obj.resolved, - registry: obj.registry === 'npm' ? undefined : obj.registry, - dependencies: blankObjectUndefined(obj.dependencies), - optionalDependencies: blankObjectUndefined(obj.optionalDependencies), - permissions: blankObjectUndefined(obj.permissions), - prebuiltVariants: blankObjectUndefined(obj.prebuiltVariants) - }; - if (integrity) { - imploded.integrity = integrity; - } - return imploded; -} - -function explodeEntry(pattern, obj) { - obj.optionalDependencies = obj.optionalDependencies || {}; - obj.dependencies = obj.dependencies || {}; - obj.uid = obj.uid || obj.version; - obj.permissions = obj.permissions || {}; - obj.registry = obj.registry || 'npm'; - obj.name = obj.name || getName(pattern); - const integrity = obj.integrity; - if (integrity && integrity.isIntegrity) { - obj.integrity = ssri.parse(integrity); - } - return obj; -} - -class Lockfile { - constructor({ cache, source, parseResultType } = {}) { - this.source = source || ''; - this.cache = cache; - this.parseResultType = parseResultType; - } - - // source string if the `cache` was parsed - - - // if true, we're parsing an old yarn file and need to update integrity fields - hasEntriesExistWithoutIntegrity() { - if (!this.cache) { - return false; - } - - for (const key in this.cache) { - // $FlowFixMe - `this.cache` is clearly defined at this point - if (!/^.*@(file:|http)/.test(key) && this.cache[key] && !this.cache[key].integrity) { - return true; - } - } - - return false; - } - - static fromDirectory(dir, reporter) { - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - // read the manifest in this directory - const lockfileLoc = path.join(dir, (_constants || _load_constants()).LOCKFILE_FILENAME); - - let lockfile; - let rawLockfile = ''; - let parseResult; - - if (yield (_fs || _load_fs()).exists(lockfileLoc)) { - rawLockfile = yield (_fs || _load_fs()).readFile(lockfileLoc); - parseResult = (0, (_parse2 || _load_parse2()).default)(rawLockfile, lockfileLoc); - - if (reporter) { - if (parseResult.type === 'merge') { - reporter.info(reporter.lang('lockfileMerged')); - } else if (parseResult.type === 'conflict') { - reporter.warn(reporter.lang('lockfileConflict')); - } - } - - lockfile = parseResult.object; - } else if (reporter) { - reporter.info(reporter.lang('noLockfileFound')); - } - - if (lockfile && lockfile.__metadata) { - const lockfilev2 = lockfile; - lockfile = {}; - } - - return new Lockfile({ cache: lockfile, source: rawLockfile, parseResultType: parseResult && parseResult.type }); - })(); - } - - getLocked(pattern) { - const cache = this.cache; - if (!cache) { - return undefined; - } - - const shrunk = pattern in cache && cache[pattern]; - - if (typeof shrunk === 'string') { - return this.getLocked(shrunk); - } else if (shrunk) { - explodeEntry(pattern, shrunk); - return shrunk; - } - - return undefined; - } - - removePattern(pattern) { - const cache = this.cache; - if (!cache) { - return; - } - delete cache[pattern]; - } - - getLockfile(patterns) { - const lockfile = {}; - const seen = new Map(); - - // order by name so that lockfile manifest is assigned to the first dependency with this manifest - // the others that have the same remoteKey will just refer to the first - // ordering allows for consistency in lockfile when it is serialized - const sortedPatternsKeys = Object.keys(patterns).sort((_misc || _load_misc()).sortAlpha); - - for (var _iterator = sortedPatternsKeys, _isArray = Array.isArray(_iterator), _i = 0, _iterator = _isArray ? _iterator : _iterator[Symbol.iterator]();;) { - var _ref; - - if (_isArray) { - if (_i >= _iterator.length) break; - _ref = _iterator[_i++]; - } else { - _i = _iterator.next(); - if (_i.done) break; - _ref = _i.value; - } - - const pattern = _ref; - - const pkg = patterns[pattern]; - const remote = pkg._remote, - ref = pkg._reference; - - invariant(ref, 'Package is missing a reference'); - invariant(remote, 'Package is missing a remote'); - - const remoteKey = keyForRemote(remote); - const seenPattern = remoteKey && seen.get(remoteKey); - if (seenPattern) { - // no point in duplicating it - lockfile[pattern] = seenPattern; - - // if we're relying on our name being inferred and two of the patterns have - // different inferred names then we need to set it - if (!seenPattern.name && getName(pattern) !== pkg.name) { - seenPattern.name = pkg.name; - } - continue; - } - const obj = implodeEntry(pattern, { - name: pkg.name, - version: pkg.version, - uid: pkg._uid, - resolved: remote.resolved, - integrity: remote.integrity, - registry: remote.registry, - dependencies: pkg.dependencies, - peerDependencies: pkg.peerDependencies, - optionalDependencies: pkg.optionalDependencies, - permissions: ref.permissions, - prebuiltVariants: pkg.prebuiltVariants - }); - - lockfile[pattern] = obj; - - if (remoteKey) { - seen.set(remoteKey, obj); - } - } - - return lockfile; - } -} -exports.default = Lockfile; - -/***/ }), -/* 20 */ -/***/ (function(module, exports, __webpack_require__) { - -var store = __webpack_require__(133)('wks'); -var uid = __webpack_require__(137); -var Symbol = __webpack_require__(17).Symbol; -var USE_SYMBOL = typeof Symbol == 'function'; - -var $exports = module.exports = function (name) { - return store[name] || (store[name] = - USE_SYMBOL && Symbol[name] || (USE_SYMBOL ? Symbol : uid)('Symbol.' + name)); -}; - -$exports.store = store; - - -/***/ }), -/* 21 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -exports.__esModule = true; - -var _assign = __webpack_require__(591); - -var _assign2 = _interopRequireDefault(_assign); - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -exports.default = _assign2.default || function (target) { - for (var i = 1; i < arguments.length; i++) { - var source = arguments[i]; - - for (var key in source) { - if (Object.prototype.hasOwnProperty.call(source, key)) { - target[key] = source[key]; - } - } - } - - return target; -}; - -/***/ }), -/* 22 */ -/***/ (function(module, exports) { - -exports = module.exports = SemVer; - -// The debug function is excluded entirely from the minified version. -/* nomin */ var debug; -/* nomin */ if (typeof process === 'object' && - /* nomin */ process.env && - /* nomin */ process.env.NODE_DEBUG && - /* nomin */ /\bsemver\b/i.test(process.env.NODE_DEBUG)) - /* nomin */ debug = function() { - /* nomin */ var args = Array.prototype.slice.call(arguments, 0); - /* nomin */ args.unshift('SEMVER'); - /* nomin */ console.log.apply(console, args); - /* nomin */ }; -/* nomin */ else - /* nomin */ debug = function() {}; - -// Note: this is the semver.org version of the spec that it implements -// Not necessarily the package version of this code. -exports.SEMVER_SPEC_VERSION = '2.0.0'; - -var MAX_LENGTH = 256; -var MAX_SAFE_INTEGER = Number.MAX_SAFE_INTEGER || 9007199254740991; - -// Max safe segment length for coercion. -var MAX_SAFE_COMPONENT_LENGTH = 16; - -// The actual regexps go on exports.re -var re = exports.re = []; -var src = exports.src = []; -var R = 0; - -// The following Regular Expressions can be used for tokenizing, -// validating, and parsing SemVer version strings. - -// ## Numeric Identifier -// A single `0`, or a non-zero digit followed by zero or more digits. - -var NUMERICIDENTIFIER = R++; -src[NUMERICIDENTIFIER] = '0|[1-9]\\d*'; -var NUMERICIDENTIFIERLOOSE = R++; -src[NUMERICIDENTIFIERLOOSE] = '[0-9]+'; - - -// ## Non-numeric Identifier -// Zero or more digits, followed by a letter or hyphen, and then zero or -// more letters, digits, or hyphens. - -var NONNUMERICIDENTIFIER = R++; -src[NONNUMERICIDENTIFIER] = '\\d*[a-zA-Z-][a-zA-Z0-9-]*'; - - -// ## Main Version -// Three dot-separated numeric identifiers. - -var MAINVERSION = R++; -src[MAINVERSION] = '(' + src[NUMERICIDENTIFIER] + ')\\.' + - '(' + src[NUMERICIDENTIFIER] + ')\\.' + - '(' + src[NUMERICIDENTIFIER] + ')'; - -var MAINVERSIONLOOSE = R++; -src[MAINVERSIONLOOSE] = '(' + src[NUMERICIDENTIFIERLOOSE] + ')\\.' + - '(' + src[NUMERICIDENTIFIERLOOSE] + ')\\.' + - '(' + src[NUMERICIDENTIFIERLOOSE] + ')'; - -// ## Pre-release Version Identifier -// A numeric identifier, or a non-numeric identifier. - -var PRERELEASEIDENTIFIER = R++; -src[PRERELEASEIDENTIFIER] = '(?:' + src[NUMERICIDENTIFIER] + - '|' + src[NONNUMERICIDENTIFIER] + ')'; - -var PRERELEASEIDENTIFIERLOOSE = R++; -src[PRERELEASEIDENTIFIERLOOSE] = '(?:' + src[NUMERICIDENTIFIERLOOSE] + - '|' + src[NONNUMERICIDENTIFIER] + ')'; - - -// ## Pre-release Version -// Hyphen, followed by one or more dot-separated pre-release version -// identifiers. - -var PRERELEASE = R++; -src[PRERELEASE] = '(?:-(' + src[PRERELEASEIDENTIFIER] + - '(?:\\.' + src[PRERELEASEIDENTIFIER] + ')*))'; - -var PRERELEASELOOSE = R++; -src[PRERELEASELOOSE] = '(?:-?(' + src[PRERELEASEIDENTIFIERLOOSE] + - '(?:\\.' + src[PRERELEASEIDENTIFIERLOOSE] + ')*))'; - -// ## Build Metadata Identifier -// Any combination of digits, letters, or hyphens. - -var BUILDIDENTIFIER = R++; -src[BUILDIDENTIFIER] = '[0-9A-Za-z-]+'; - -// ## Build Metadata -// Plus sign, followed by one or more period-separated build metadata -// identifiers. - -var BUILD = R++; -src[BUILD] = '(?:\\+(' + src[BUILDIDENTIFIER] + - '(?:\\.' + src[BUILDIDENTIFIER] + ')*))'; - - -// ## Full Version String -// A main version, followed optionally by a pre-release version and -// build metadata. - -// Note that the only major, minor, patch, and pre-release sections of -// the version string are capturing groups. The build metadata is not a -// capturing group, because it should not ever be used in version -// comparison. - -var FULL = R++; -var FULLPLAIN = 'v?' + src[MAINVERSION] + - src[PRERELEASE] + '?' + - src[BUILD] + '?'; - -src[FULL] = '^' + FULLPLAIN + '$'; - -// like full, but allows v1.2.3 and =1.2.3, which people do sometimes. -// also, 1.0.0alpha1 (prerelease without the hyphen) which is pretty -// common in the npm registry. -var LOOSEPLAIN = '[v=\\s]*' + src[MAINVERSIONLOOSE] + - src[PRERELEASELOOSE] + '?' + - src[BUILD] + '?'; - -var LOOSE = R++; -src[LOOSE] = '^' + LOOSEPLAIN + '$'; - -var GTLT = R++; -src[GTLT] = '((?:<|>)?=?)'; - -// Something like "2.*" or "1.2.x". -// Note that "x.x" is a valid xRange identifer, meaning "any version" -// Only the first item is strictly required. -var XRANGEIDENTIFIERLOOSE = R++; -src[XRANGEIDENTIFIERLOOSE] = src[NUMERICIDENTIFIERLOOSE] + '|x|X|\\*'; -var XRANGEIDENTIFIER = R++; -src[XRANGEIDENTIFIER] = src[NUMERICIDENTIFIER] + '|x|X|\\*'; - -var XRANGEPLAIN = R++; -src[XRANGEPLAIN] = '[v=\\s]*(' + src[XRANGEIDENTIFIER] + ')' + - '(?:\\.(' + src[XRANGEIDENTIFIER] + ')' + - '(?:\\.(' + src[XRANGEIDENTIFIER] + ')' + - '(?:' + src[PRERELEASE] + ')?' + - src[BUILD] + '?' + - ')?)?'; - -var XRANGEPLAINLOOSE = R++; -src[XRANGEPLAINLOOSE] = '[v=\\s]*(' + src[XRANGEIDENTIFIERLOOSE] + ')' + - '(?:\\.(' + src[XRANGEIDENTIFIERLOOSE] + ')' + - '(?:\\.(' + src[XRANGEIDENTIFIERLOOSE] + ')' + - '(?:' + src[PRERELEASELOOSE] + ')?' + - src[BUILD] + '?' + - ')?)?'; - -var XRANGE = R++; -src[XRANGE] = '^' + src[GTLT] + '\\s*' + src[XRANGEPLAIN] + '$'; -var XRANGELOOSE = R++; -src[XRANGELOOSE] = '^' + src[GTLT] + '\\s*' + src[XRANGEPLAINLOOSE] + '$'; - -// Coercion. -// Extract anything that could conceivably be a part of a valid semver -var COERCE = R++; -src[COERCE] = '(?:^|[^\\d])' + - '(\\d{1,' + MAX_SAFE_COMPONENT_LENGTH + '})' + - '(?:\\.(\\d{1,' + MAX_SAFE_COMPONENT_LENGTH + '}))?' + - '(?:\\.(\\d{1,' + MAX_SAFE_COMPONENT_LENGTH + '}))?' + - '(?:$|[^\\d])'; - -// Tilde ranges. -// Meaning is "reasonably at or greater than" -var LONETILDE = R++; -src[LONETILDE] = '(?:~>?)'; - -var TILDETRIM = R++; -src[TILDETRIM] = '(\\s*)' + src[LONETILDE] + '\\s+'; -re[TILDETRIM] = new RegExp(src[TILDETRIM], 'g'); -var tildeTrimReplace = '$1~'; - -var TILDE = R++; -src[TILDE] = '^' + src[LONETILDE] + src[XRANGEPLAIN] + '$'; -var TILDELOOSE = R++; -src[TILDELOOSE] = '^' + src[LONETILDE] + src[XRANGEPLAINLOOSE] + '$'; - -// Caret ranges. -// Meaning is "at least and backwards compatible with" -var LONECARET = R++; -src[LONECARET] = '(?:\\^)'; - -var CARETTRIM = R++; -src[CARETTRIM] = '(\\s*)' + src[LONECARET] + '\\s+'; -re[CARETTRIM] = new RegExp(src[CARETTRIM], 'g'); -var caretTrimReplace = '$1^'; - -var CARET = R++; -src[CARET] = '^' + src[LONECARET] + src[XRANGEPLAIN] + '$'; -var CARETLOOSE = R++; -src[CARETLOOSE] = '^' + src[LONECARET] + src[XRANGEPLAINLOOSE] + '$'; - -// A simple gt/lt/eq thing, or just "" to indicate "any version" -var COMPARATORLOOSE = R++; -src[COMPARATORLOOSE] = '^' + src[GTLT] + '\\s*(' + LOOSEPLAIN + ')$|^$'; -var COMPARATOR = R++; -src[COMPARATOR] = '^' + src[GTLT] + '\\s*(' + FULLPLAIN + ')$|^$'; - - -// An expression to strip any whitespace between the gtlt and the thing -// it modifies, so that `> 1.2.3` ==> `>1.2.3` -var COMPARATORTRIM = R++; -src[COMPARATORTRIM] = '(\\s*)' + src[GTLT] + - '\\s*(' + LOOSEPLAIN + '|' + src[XRANGEPLAIN] + ')'; - -// this one has to use the /g flag -re[COMPARATORTRIM] = new RegExp(src[COMPARATORTRIM], 'g'); -var comparatorTrimReplace = '$1$2$3'; - - -// Something like `1.2.3 - 1.2.4` -// Note that these all use the loose form, because they'll be -// checked against either the strict or loose comparator form -// later. -var HYPHENRANGE = R++; -src[HYPHENRANGE] = '^\\s*(' + src[XRANGEPLAIN] + ')' + - '\\s+-\\s+' + - '(' + src[XRANGEPLAIN] + ')' + - '\\s*$'; - -var HYPHENRANGELOOSE = R++; -src[HYPHENRANGELOOSE] = '^\\s*(' + src[XRANGEPLAINLOOSE] + ')' + - '\\s+-\\s+' + - '(' + src[XRANGEPLAINLOOSE] + ')' + - '\\s*$'; - -// Star ranges basically just allow anything at all. -var STAR = R++; -src[STAR] = '(<|>)?=?\\s*\\*'; - -// Compile to actual regexp objects. -// All are flag-free, unless they were created above with a flag. -for (var i = 0; i < R; i++) { - debug(i, src[i]); - if (!re[i]) - re[i] = new RegExp(src[i]); -} - -exports.parse = parse; -function parse(version, loose) { - if (version instanceof SemVer) - return version; - - if (typeof version !== 'string') - return null; - - if (version.length > MAX_LENGTH) - return null; - - var r = loose ? re[LOOSE] : re[FULL]; - if (!r.test(version)) - return null; - - try { - return new SemVer(version, loose); - } catch (er) { - return null; - } -} - -exports.valid = valid; -function valid(version, loose) { - var v = parse(version, loose); - return v ? v.version : null; -} - - -exports.clean = clean; -function clean(version, loose) { - var s = parse(version.trim().replace(/^[=v]+/, ''), loose); - return s ? s.version : null; -} - -exports.SemVer = SemVer; - -function SemVer(version, loose) { - if (version instanceof SemVer) { - if (version.loose === loose) - return version; - else - version = version.version; - } else if (typeof version !== 'string') { - throw new TypeError('Invalid Version: ' + version); - } - - if (version.length > MAX_LENGTH) - throw new TypeError('version is longer than ' + MAX_LENGTH + ' characters') - - if (!(this instanceof SemVer)) - return new SemVer(version, loose); - - debug('SemVer', version, loose); - this.loose = loose; - var m = version.trim().match(loose ? re[LOOSE] : re[FULL]); - - if (!m) - throw new TypeError('Invalid Version: ' + version); - - this.raw = version; - - // these are actually numbers - this.major = +m[1]; - this.minor = +m[2]; - this.patch = +m[3]; - - if (this.major > MAX_SAFE_INTEGER || this.major < 0) - throw new TypeError('Invalid major version') - - if (this.minor > MAX_SAFE_INTEGER || this.minor < 0) - throw new TypeError('Invalid minor version') - - if (this.patch > MAX_SAFE_INTEGER || this.patch < 0) - throw new TypeError('Invalid patch version') - - // numberify any prerelease numeric ids - if (!m[4]) - this.prerelease = []; - else - this.prerelease = m[4].split('.').map(function(id) { - if (/^[0-9]+$/.test(id)) { - var num = +id; - if (num >= 0 && num < MAX_SAFE_INTEGER) - return num; - } - return id; - }); - - this.build = m[5] ? m[5].split('.') : []; - this.format(); -} - -SemVer.prototype.format = function() { - this.version = this.major + '.' + this.minor + '.' + this.patch; - if (this.prerelease.length) - this.version += '-' + this.prerelease.join('.'); - return this.version; -}; - -SemVer.prototype.toString = function() { - return this.version; -}; - -SemVer.prototype.compare = function(other) { - debug('SemVer.compare', this.version, this.loose, other); - if (!(other instanceof SemVer)) - other = new SemVer(other, this.loose); - - return this.compareMain(other) || this.comparePre(other); -}; - -SemVer.prototype.compareMain = function(other) { - if (!(other instanceof SemVer)) - other = new SemVer(other, this.loose); - - return compareIdentifiers(this.major, other.major) || - compareIdentifiers(this.minor, other.minor) || - compareIdentifiers(this.patch, other.patch); -}; - -SemVer.prototype.comparePre = function(other) { - if (!(other instanceof SemVer)) - other = new SemVer(other, this.loose); - - // NOT having a prerelease is > having one - if (this.prerelease.length && !other.prerelease.length) - return -1; - else if (!this.prerelease.length && other.prerelease.length) - return 1; - else if (!this.prerelease.length && !other.prerelease.length) - return 0; - - var i = 0; - do { - var a = this.prerelease[i]; - var b = other.prerelease[i]; - debug('prerelease compare', i, a, b); - if (a === undefined && b === undefined) - return 0; - else if (b === undefined) - return 1; - else if (a === undefined) - return -1; - else if (a === b) - continue; - else - return compareIdentifiers(a, b); - } while (++i); -}; - -// preminor will bump the version up to the next minor release, and immediately -// down to pre-release. premajor and prepatch work the same way. -SemVer.prototype.inc = function(release, identifier) { - switch (release) { - case 'premajor': - this.prerelease.length = 0; - this.patch = 0; - this.minor = 0; - this.major++; - this.inc('pre', identifier); - break; - case 'preminor': - this.prerelease.length = 0; - this.patch = 0; - this.minor++; - this.inc('pre', identifier); - break; - case 'prepatch': - // If this is already a prerelease, it will bump to the next version - // drop any prereleases that might already exist, since they are not - // relevant at this point. - this.prerelease.length = 0; - this.inc('patch', identifier); - this.inc('pre', identifier); - break; - // If the input is a non-prerelease version, this acts the same as - // prepatch. - case 'prerelease': - if (this.prerelease.length === 0) - this.inc('patch', identifier); - this.inc('pre', identifier); - break; - - case 'major': - // If this is a pre-major version, bump up to the same major version. - // Otherwise increment major. - // 1.0.0-5 bumps to 1.0.0 - // 1.1.0 bumps to 2.0.0 - if (this.minor !== 0 || this.patch !== 0 || this.prerelease.length === 0) - this.major++; - this.minor = 0; - this.patch = 0; - this.prerelease = []; - break; - case 'minor': - // If this is a pre-minor version, bump up to the same minor version. - // Otherwise increment minor. - // 1.2.0-5 bumps to 1.2.0 - // 1.2.1 bumps to 1.3.0 - if (this.patch !== 0 || this.prerelease.length === 0) - this.minor++; - this.patch = 0; - this.prerelease = []; - break; - case 'patch': - // If this is not a pre-release version, it will increment the patch. - // If it is a pre-release it will bump up to the same patch version. - // 1.2.0-5 patches to 1.2.0 - // 1.2.0 patches to 1.2.1 - if (this.prerelease.length === 0) - this.patch++; - this.prerelease = []; - break; - // This probably shouldn't be used publicly. - // 1.0.0 "pre" would become 1.0.0-0 which is the wrong direction. - case 'pre': - if (this.prerelease.length === 0) - this.prerelease = [0]; - else { - var i = this.prerelease.length; - while (--i >= 0) { - if (typeof this.prerelease[i] === 'number') { - this.prerelease[i]++; - i = -2; - } - } - if (i === -1) // didn't increment anything - this.prerelease.push(0); - } - if (identifier) { - // 1.2.0-beta.1 bumps to 1.2.0-beta.2, - // 1.2.0-beta.fooblz or 1.2.0-beta bumps to 1.2.0-beta.0 - if (this.prerelease[0] === identifier) { - if (isNaN(this.prerelease[1])) - this.prerelease = [identifier, 0]; - } else - this.prerelease = [identifier, 0]; - } - break; - - default: - throw new Error('invalid increment argument: ' + release); - } - this.format(); - this.raw = this.version; - return this; -}; - -exports.inc = inc; -function inc(version, release, loose, identifier) { - if (typeof(loose) === 'string') { - identifier = loose; - loose = undefined; - } - - try { - return new SemVer(version, loose).inc(release, identifier).version; - } catch (er) { - return null; - } -} - -exports.diff = diff; -function diff(version1, version2) { - if (eq(version1, version2)) { - return null; - } else { - var v1 = parse(version1); - var v2 = parse(version2); - if (v1.prerelease.length || v2.prerelease.length) { - for (var key in v1) { - if (key === 'major' || key === 'minor' || key === 'patch') { - if (v1[key] !== v2[key]) { - return 'pre'+key; - } - } - } - return 'prerelease'; - } - for (var key in v1) { - if (key === 'major' || key === 'minor' || key === 'patch') { - if (v1[key] !== v2[key]) { - return key; - } - } - } - } -} - -exports.compareIdentifiers = compareIdentifiers; - -var numeric = /^[0-9]+$/; -function compareIdentifiers(a, b) { - var anum = numeric.test(a); - var bnum = numeric.test(b); - - if (anum && bnum) { - a = +a; - b = +b; - } - - return (anum && !bnum) ? -1 : - (bnum && !anum) ? 1 : - a < b ? -1 : - a > b ? 1 : - 0; -} - -exports.rcompareIdentifiers = rcompareIdentifiers; -function rcompareIdentifiers(a, b) { - return compareIdentifiers(b, a); -} - -exports.major = major; -function major(a, loose) { - return new SemVer(a, loose).major; -} - -exports.minor = minor; -function minor(a, loose) { - return new SemVer(a, loose).minor; -} - -exports.patch = patch; -function patch(a, loose) { - return new SemVer(a, loose).patch; -} - -exports.compare = compare; -function compare(a, b, loose) { - return new SemVer(a, loose).compare(new SemVer(b, loose)); -} - -exports.compareLoose = compareLoose; -function compareLoose(a, b) { - return compare(a, b, true); -} - -exports.rcompare = rcompare; -function rcompare(a, b, loose) { - return compare(b, a, loose); -} - -exports.sort = sort; -function sort(list, loose) { - return list.sort(function(a, b) { - return exports.compare(a, b, loose); - }); -} - -exports.rsort = rsort; -function rsort(list, loose) { - return list.sort(function(a, b) { - return exports.rcompare(a, b, loose); - }); -} - -exports.gt = gt; -function gt(a, b, loose) { - return compare(a, b, loose) > 0; -} - -exports.lt = lt; -function lt(a, b, loose) { - return compare(a, b, loose) < 0; -} - -exports.eq = eq; -function eq(a, b, loose) { - return compare(a, b, loose) === 0; -} - -exports.neq = neq; -function neq(a, b, loose) { - return compare(a, b, loose) !== 0; -} - -exports.gte = gte; -function gte(a, b, loose) { - return compare(a, b, loose) >= 0; -} - -exports.lte = lte; -function lte(a, b, loose) { - return compare(a, b, loose) <= 0; -} - -exports.cmp = cmp; -function cmp(a, op, b, loose) { - var ret; - switch (op) { - case '===': - if (typeof a === 'object') a = a.version; - if (typeof b === 'object') b = b.version; - ret = a === b; - break; - case '!==': - if (typeof a === 'object') a = a.version; - if (typeof b === 'object') b = b.version; - ret = a !== b; - break; - case '': case '=': case '==': ret = eq(a, b, loose); break; - case '!=': ret = neq(a, b, loose); break; - case '>': ret = gt(a, b, loose); break; - case '>=': ret = gte(a, b, loose); break; - case '<': ret = lt(a, b, loose); break; - case '<=': ret = lte(a, b, loose); break; - default: throw new TypeError('Invalid operator: ' + op); - } - return ret; -} - -exports.Comparator = Comparator; -function Comparator(comp, loose) { - if (comp instanceof Comparator) { - if (comp.loose === loose) - return comp; - else - comp = comp.value; - } - - if (!(this instanceof Comparator)) - return new Comparator(comp, loose); - - debug('comparator', comp, loose); - this.loose = loose; - this.parse(comp); - - if (this.semver === ANY) - this.value = ''; - else - this.value = this.operator + this.semver.version; - - debug('comp', this); -} - -var ANY = {}; -Comparator.prototype.parse = function(comp) { - var r = this.loose ? re[COMPARATORLOOSE] : re[COMPARATOR]; - var m = comp.match(r); - - if (!m) - throw new TypeError('Invalid comparator: ' + comp); - - this.operator = m[1]; - if (this.operator === '=') - this.operator = ''; - - // if it literally is just '>' or '' then allow anything. - if (!m[2]) - this.semver = ANY; - else - this.semver = new SemVer(m[2], this.loose); -}; - -Comparator.prototype.toString = function() { - return this.value; -}; - -Comparator.prototype.test = function(version) { - debug('Comparator.test', version, this.loose); - - if (this.semver === ANY) - return true; - - if (typeof version === 'string') - version = new SemVer(version, this.loose); - - return cmp(version, this.operator, this.semver, this.loose); -}; - -Comparator.prototype.intersects = function(comp, loose) { - if (!(comp instanceof Comparator)) { - throw new TypeError('a Comparator is required'); - } - - var rangeTmp; - - if (this.operator === '') { - rangeTmp = new Range(comp.value, loose); - return satisfies(this.value, rangeTmp, loose); - } else if (comp.operator === '') { - rangeTmp = new Range(this.value, loose); - return satisfies(comp.semver, rangeTmp, loose); - } - - var sameDirectionIncreasing = - (this.operator === '>=' || this.operator === '>') && - (comp.operator === '>=' || comp.operator === '>'); - var sameDirectionDecreasing = - (this.operator === '<=' || this.operator === '<') && - (comp.operator === '<=' || comp.operator === '<'); - var sameSemVer = this.semver.version === comp.semver.version; - var differentDirectionsInclusive = - (this.operator === '>=' || this.operator === '<=') && - (comp.operator === '>=' || comp.operator === '<='); - var oppositeDirectionsLessThan = - cmp(this.semver, '<', comp.semver, loose) && - ((this.operator === '>=' || this.operator === '>') && - (comp.operator === '<=' || comp.operator === '<')); - var oppositeDirectionsGreaterThan = - cmp(this.semver, '>', comp.semver, loose) && - ((this.operator === '<=' || this.operator === '<') && - (comp.operator === '>=' || comp.operator === '>')); - - return sameDirectionIncreasing || sameDirectionDecreasing || - (sameSemVer && differentDirectionsInclusive) || - oppositeDirectionsLessThan || oppositeDirectionsGreaterThan; -}; - - -exports.Range = Range; -function Range(range, loose) { - if (range instanceof Range) { - if (range.loose === loose) { - return range; - } else { - return new Range(range.raw, loose); - } - } - - if (range instanceof Comparator) { - return new Range(range.value, loose); - } - - if (!(this instanceof Range)) - return new Range(range, loose); - - this.loose = loose; - - // First, split based on boolean or || - this.raw = range; - this.set = range.split(/\s*\|\|\s*/).map(function(range) { - return this.parseRange(range.trim()); - }, this).filter(function(c) { - // throw out any that are not relevant for whatever reason - return c.length; - }); - - if (!this.set.length) { - throw new TypeError('Invalid SemVer Range: ' + range); - } - - this.format(); -} - -Range.prototype.format = function() { - this.range = this.set.map(function(comps) { - return comps.join(' ').trim(); - }).join('||').trim(); - return this.range; -}; - -Range.prototype.toString = function() { - return this.range; -}; - -Range.prototype.parseRange = function(range) { - var loose = this.loose; - range = range.trim(); - debug('range', range, loose); - // `1.2.3 - 1.2.4` => `>=1.2.3 <=1.2.4` - var hr = loose ? re[HYPHENRANGELOOSE] : re[HYPHENRANGE]; - range = range.replace(hr, hyphenReplace); - debug('hyphen replace', range); - // `> 1.2.3 < 1.2.5` => `>1.2.3 <1.2.5` - range = range.replace(re[COMPARATORTRIM], comparatorTrimReplace); - debug('comparator trim', range, re[COMPARATORTRIM]); - - // `~ 1.2.3` => `~1.2.3` - range = range.replace(re[TILDETRIM], tildeTrimReplace); - - // `^ 1.2.3` => `^1.2.3` - range = range.replace(re[CARETTRIM], caretTrimReplace); - - // normalize spaces - range = range.split(/\s+/).join(' '); - - // At this point, the range is completely trimmed and - // ready to be split into comparators. - - var compRe = loose ? re[COMPARATORLOOSE] : re[COMPARATOR]; - var set = range.split(' ').map(function(comp) { - return parseComparator(comp, loose); - }).join(' ').split(/\s+/); - if (this.loose) { - // in loose mode, throw out any that are not valid comparators - set = set.filter(function(comp) { - return !!comp.match(compRe); - }); - } - set = set.map(function(comp) { - return new Comparator(comp, loose); - }); - - return set; -}; - -Range.prototype.intersects = function(range, loose) { - if (!(range instanceof Range)) { - throw new TypeError('a Range is required'); - } - - return this.set.some(function(thisComparators) { - return thisComparators.every(function(thisComparator) { - return range.set.some(function(rangeComparators) { - return rangeComparators.every(function(rangeComparator) { - return thisComparator.intersects(rangeComparator, loose); - }); - }); - }); - }); -}; - -// Mostly just for testing and legacy API reasons -exports.toComparators = toComparators; -function toComparators(range, loose) { - return new Range(range, loose).set.map(function(comp) { - return comp.map(function(c) { - return c.value; - }).join(' ').trim().split(' '); - }); -} - -// comprised of xranges, tildes, stars, and gtlt's at this point. -// already replaced the hyphen ranges -// turn into a set of JUST comparators. -function parseComparator(comp, loose) { - debug('comp', comp); - comp = replaceCarets(comp, loose); - debug('caret', comp); - comp = replaceTildes(comp, loose); - debug('tildes', comp); - comp = replaceXRanges(comp, loose); - debug('xrange', comp); - comp = replaceStars(comp, loose); - debug('stars', comp); - return comp; -} - -function isX(id) { - return !id || id.toLowerCase() === 'x' || id === '*'; -} - -// ~, ~> --> * (any, kinda silly) -// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0 <3.0.0 -// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0 <2.1.0 -// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0 <1.3.0 -// ~1.2.3, ~>1.2.3 --> >=1.2.3 <1.3.0 -// ~1.2.0, ~>1.2.0 --> >=1.2.0 <1.3.0 -function replaceTildes(comp, loose) { - return comp.trim().split(/\s+/).map(function(comp) { - return replaceTilde(comp, loose); - }).join(' '); -} - -function replaceTilde(comp, loose) { - var r = loose ? re[TILDELOOSE] : re[TILDE]; - return comp.replace(r, function(_, M, m, p, pr) { - debug('tilde', comp, _, M, m, p, pr); - var ret; - - if (isX(M)) - ret = ''; - else if (isX(m)) - ret = '>=' + M + '.0.0 <' + (+M + 1) + '.0.0'; - else if (isX(p)) - // ~1.2 == >=1.2.0 <1.3.0 - ret = '>=' + M + '.' + m + '.0 <' + M + '.' + (+m + 1) + '.0'; - else if (pr) { - debug('replaceTilde pr', pr); - if (pr.charAt(0) !== '-') - pr = '-' + pr; - ret = '>=' + M + '.' + m + '.' + p + pr + - ' <' + M + '.' + (+m + 1) + '.0'; - } else - // ~1.2.3 == >=1.2.3 <1.3.0 - ret = '>=' + M + '.' + m + '.' + p + - ' <' + M + '.' + (+m + 1) + '.0'; - - debug('tilde return', ret); - return ret; - }); -} - -// ^ --> * (any, kinda silly) -// ^2, ^2.x, ^2.x.x --> >=2.0.0 <3.0.0 -// ^2.0, ^2.0.x --> >=2.0.0 <3.0.0 -// ^1.2, ^1.2.x --> >=1.2.0 <2.0.0 -// ^1.2.3 --> >=1.2.3 <2.0.0 -// ^1.2.0 --> >=1.2.0 <2.0.0 -function replaceCarets(comp, loose) { - return comp.trim().split(/\s+/).map(function(comp) { - return replaceCaret(comp, loose); - }).join(' '); -} - -function replaceCaret(comp, loose) { - debug('caret', comp, loose); - var r = loose ? re[CARETLOOSE] : re[CARET]; - return comp.replace(r, function(_, M, m, p, pr) { - debug('caret', comp, _, M, m, p, pr); - var ret; - - if (isX(M)) - ret = ''; - else if (isX(m)) - ret = '>=' + M + '.0.0 <' + (+M + 1) + '.0.0'; - else if (isX(p)) { - if (M === '0') - ret = '>=' + M + '.' + m + '.0 <' + M + '.' + (+m + 1) + '.0'; - else - ret = '>=' + M + '.' + m + '.0 <' + (+M + 1) + '.0.0'; - } else if (pr) { - debug('replaceCaret pr', pr); - if (pr.charAt(0) !== '-') - pr = '-' + pr; - if (M === '0') { - if (m === '0') - ret = '>=' + M + '.' + m + '.' + p + pr + - ' <' + M + '.' + m + '.' + (+p + 1); - else - ret = '>=' + M + '.' + m + '.' + p + pr + - ' <' + M + '.' + (+m + 1) + '.0'; - } else - ret = '>=' + M + '.' + m + '.' + p + pr + - ' <' + (+M + 1) + '.0.0'; - } else { - debug('no pr'); - if (M === '0') { - if (m === '0') - ret = '>=' + M + '.' + m + '.' + p + - ' <' + M + '.' + m + '.' + (+p + 1); - else - ret = '>=' + M + '.' + m + '.' + p + - ' <' + M + '.' + (+m + 1) + '.0'; - } else - ret = '>=' + M + '.' + m + '.' + p + - ' <' + (+M + 1) + '.0.0'; - } - - debug('caret return', ret); - return ret; - }); -} - -function replaceXRanges(comp, loose) { - debug('replaceXRanges', comp, loose); - return comp.split(/\s+/).map(function(comp) { - return replaceXRange(comp, loose); - }).join(' '); -} - -function replaceXRange(comp, loose) { - comp = comp.trim(); - var r = loose ? re[XRANGELOOSE] : re[XRANGE]; - return comp.replace(r, function(ret, gtlt, M, m, p, pr) { - debug('xRange', comp, ret, gtlt, M, m, p, pr); - var xM = isX(M); - var xm = xM || isX(m); - var xp = xm || isX(p); - var anyX = xp; - - if (gtlt === '=' && anyX) - gtlt = ''; - - if (xM) { - if (gtlt === '>' || gtlt === '<') { - // nothing is allowed - ret = '<0.0.0'; - } else { - // nothing is forbidden - ret = '*'; - } - } else if (gtlt && anyX) { - // replace X with 0 - if (xm) - m = 0; - if (xp) - p = 0; - - if (gtlt === '>') { - // >1 => >=2.0.0 - // >1.2 => >=1.3.0 - // >1.2.3 => >= 1.2.4 - gtlt = '>='; - if (xm) { - M = +M + 1; - m = 0; - p = 0; - } else if (xp) { - m = +m + 1; - p = 0; - } - } else if (gtlt === '<=') { - // <=0.7.x is actually <0.8.0, since any 0.7.x should - // pass. Similarly, <=7.x is actually <8.0.0, etc. - gtlt = '<'; - if (xm) - M = +M + 1; - else - m = +m + 1; - } - - ret = gtlt + M + '.' + m + '.' + p; - } else if (xm) { - ret = '>=' + M + '.0.0 <' + (+M + 1) + '.0.0'; - } else if (xp) { - ret = '>=' + M + '.' + m + '.0 <' + M + '.' + (+m + 1) + '.0'; - } - - debug('xRange return', ret); - - return ret; - }); -} - -// Because * is AND-ed with everything else in the comparator, -// and '' means "any version", just remove the *s entirely. -function replaceStars(comp, loose) { - debug('replaceStars', comp, loose); - // Looseness is ignored here. star is always as loose as it gets! - return comp.trim().replace(re[STAR], ''); -} - -// This function is passed to string.replace(re[HYPHENRANGE]) -// M, m, patch, prerelease, build -// 1.2 - 3.4.5 => >=1.2.0 <=3.4.5 -// 1.2.3 - 3.4 => >=1.2.0 <3.5.0 Any 3.4.x will do -// 1.2 - 3.4 => >=1.2.0 <3.5.0 -function hyphenReplace($0, - from, fM, fm, fp, fpr, fb, - to, tM, tm, tp, tpr, tb) { - - if (isX(fM)) - from = ''; - else if (isX(fm)) - from = '>=' + fM + '.0.0'; - else if (isX(fp)) - from = '>=' + fM + '.' + fm + '.0'; - else - from = '>=' + from; - - if (isX(tM)) - to = ''; - else if (isX(tm)) - to = '<' + (+tM + 1) + '.0.0'; - else if (isX(tp)) - to = '<' + tM + '.' + (+tm + 1) + '.0'; - else if (tpr) - to = '<=' + tM + '.' + tm + '.' + tp + '-' + tpr; - else - to = '<=' + to; - - return (from + ' ' + to).trim(); -} - - -// if ANY of the sets match ALL of its comparators, then pass -Range.prototype.test = function(version) { - if (!version) - return false; - - if (typeof version === 'string') - version = new SemVer(version, this.loose); - - for (var i = 0; i < this.set.length; i++) { - if (testSet(this.set[i], version)) - return true; - } - return false; -}; - -function testSet(set, version) { - for (var i = 0; i < set.length; i++) { - if (!set[i].test(version)) - return false; - } - - if (version.prerelease.length) { - // Find the set of versions that are allowed to have prereleases - // For example, ^1.2.3-pr.1 desugars to >=1.2.3-pr.1 <2.0.0 - // That should allow `1.2.3-pr.2` to pass. - // However, `1.2.4-alpha.notready` should NOT be allowed, - // even though it's within the range set by the comparators. - for (var i = 0; i < set.length; i++) { - debug(set[i].semver); - if (set[i].semver === ANY) - continue; - - if (set[i].semver.prerelease.length > 0) { - var allowed = set[i].semver; - if (allowed.major === version.major && - allowed.minor === version.minor && - allowed.patch === version.patch) - return true; - } - } - - // Version has a -pre, but it's not one of the ones we like. - return false; - } - - return true; -} - -exports.satisfies = satisfies; -function satisfies(version, range, loose) { - try { - range = new Range(range, loose); - } catch (er) { - return false; - } - return range.test(version); -} - -exports.maxSatisfying = maxSatisfying; -function maxSatisfying(versions, range, loose) { - var max = null; - var maxSV = null; - try { - var rangeObj = new Range(range, loose); - } catch (er) { - return null; - } - versions.forEach(function (v) { - if (rangeObj.test(v)) { // satisfies(v, range, loose) - if (!max || maxSV.compare(v) === -1) { // compare(max, v, true) - max = v; - maxSV = new SemVer(max, loose); - } - } - }) - return max; -} - -exports.minSatisfying = minSatisfying; -function minSatisfying(versions, range, loose) { - var min = null; - var minSV = null; - try { - var rangeObj = new Range(range, loose); - } catch (er) { - return null; - } - versions.forEach(function (v) { - if (rangeObj.test(v)) { // satisfies(v, range, loose) - if (!min || minSV.compare(v) === 1) { // compare(min, v, true) - min = v; - minSV = new SemVer(min, loose); - } - } - }) - return min; -} - -exports.validRange = validRange; -function validRange(range, loose) { - try { - // Return '*' instead of '' so that truthiness works. - // This will throw if it's invalid anyway - return new Range(range, loose).range || '*'; - } catch (er) { - return null; - } -} - -// Determine if version is less than all the versions possible in the range -exports.ltr = ltr; -function ltr(version, range, loose) { - return outside(version, range, '<', loose); -} - -// Determine if version is greater than all the versions possible in the range. -exports.gtr = gtr; -function gtr(version, range, loose) { - return outside(version, range, '>', loose); -} - -exports.outside = outside; -function outside(version, range, hilo, loose) { - version = new SemVer(version, loose); - range = new Range(range, loose); - - var gtfn, ltefn, ltfn, comp, ecomp; - switch (hilo) { - case '>': - gtfn = gt; - ltefn = lte; - ltfn = lt; - comp = '>'; - ecomp = '>='; - break; - case '<': - gtfn = lt; - ltefn = gte; - ltfn = gt; - comp = '<'; - ecomp = '<='; - break; - default: - throw new TypeError('Must provide a hilo val of "<" or ">"'); - } - - // If it satisifes the range it is not outside - if (satisfies(version, range, loose)) { - return false; - } - - // From now on, variable terms are as if we're in "gtr" mode. - // but note that everything is flipped for the "ltr" function. - - for (var i = 0; i < range.set.length; ++i) { - var comparators = range.set[i]; - - var high = null; - var low = null; - - comparators.forEach(function(comparator) { - if (comparator.semver === ANY) { - comparator = new Comparator('>=0.0.0') - } - high = high || comparator; - low = low || comparator; - if (gtfn(comparator.semver, high.semver, loose)) { - high = comparator; - } else if (ltfn(comparator.semver, low.semver, loose)) { - low = comparator; - } - }); - - // If the edge version comparator has a operator then our version - // isn't outside it - if (high.operator === comp || high.operator === ecomp) { - return false; - } - - // If the lowest version comparator has an operator and our version - // is less than it then it isn't higher than the range - if ((!low.operator || low.operator === comp) && - ltefn(version, low.semver)) { - return false; - } else if (low.operator === ecomp && ltfn(version, low.semver)) { - return false; - } - } - return true; -} - -exports.prerelease = prerelease; -function prerelease(version, loose) { - var parsed = parse(version, loose); - return (parsed && parsed.prerelease.length) ? parsed.prerelease : null; -} - -exports.intersects = intersects; -function intersects(r1, r2, loose) { - r1 = new Range(r1, loose) - r2 = new Range(r2, loose) - return r1.intersects(r2) -} - -exports.coerce = coerce; -function coerce(version) { - if (version instanceof SemVer) - return version; - - if (typeof version !== 'string') - return null; - - var match = version.match(re[COERCE]); - - if (match == null) - return null; - - return parse((match[1] || '0') + '.' + (match[2] || '0') + '.' + (match[3] || '0')); -} - - -/***/ }), -/* 23 */ -/***/ (function(module, exports) { - -module.exports = require("stream"); - -/***/ }), -/* 24 */ -/***/ (function(module, exports) { - -module.exports = require("url"); - -/***/ }), -/* 25 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return Subscription; }); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__util_isArray__ = __webpack_require__(41); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__util_isObject__ = __webpack_require__(444); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_2__util_isFunction__ = __webpack_require__(154); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_3__util_tryCatch__ = __webpack_require__(56); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_4__util_errorObject__ = __webpack_require__(47); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_5__util_UnsubscriptionError__ = __webpack_require__(441); -/** PURE_IMPORTS_START _util_isArray,_util_isObject,_util_isFunction,_util_tryCatch,_util_errorObject,_util_UnsubscriptionError PURE_IMPORTS_END */ - - - - - - -var Subscription = /*@__PURE__*/ (function () { - function Subscription(unsubscribe) { - this.closed = false; - this._parent = null; - this._parents = null; - this._subscriptions = null; - if (unsubscribe) { - this._unsubscribe = unsubscribe; - } - } - Subscription.prototype.unsubscribe = function () { - var hasErrors = false; - var errors; - if (this.closed) { - return; - } - var _a = this, _parent = _a._parent, _parents = _a._parents, _unsubscribe = _a._unsubscribe, _subscriptions = _a._subscriptions; - this.closed = true; - this._parent = null; - this._parents = null; - this._subscriptions = null; - var index = -1; - var len = _parents ? _parents.length : 0; - while (_parent) { - _parent.remove(this); - _parent = ++index < len && _parents[index] || null; - } - if (__webpack_require__.i(__WEBPACK_IMPORTED_MODULE_2__util_isFunction__["a" /* isFunction */])(_unsubscribe)) { - var trial = __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_3__util_tryCatch__["a" /* tryCatch */])(_unsubscribe).call(this); - if (trial === __WEBPACK_IMPORTED_MODULE_4__util_errorObject__["a" /* errorObject */]) { - hasErrors = true; - errors = errors || (__WEBPACK_IMPORTED_MODULE_4__util_errorObject__["a" /* errorObject */].e instanceof __WEBPACK_IMPORTED_MODULE_5__util_UnsubscriptionError__["a" /* UnsubscriptionError */] ? - flattenUnsubscriptionErrors(__WEBPACK_IMPORTED_MODULE_4__util_errorObject__["a" /* errorObject */].e.errors) : [__WEBPACK_IMPORTED_MODULE_4__util_errorObject__["a" /* errorObject */].e]); - } - } - if (__webpack_require__.i(__WEBPACK_IMPORTED_MODULE_0__util_isArray__["a" /* isArray */])(_subscriptions)) { - index = -1; - len = _subscriptions.length; - while (++index < len) { - var sub = _subscriptions[index]; - if (__webpack_require__.i(__WEBPACK_IMPORTED_MODULE_1__util_isObject__["a" /* isObject */])(sub)) { - var trial = __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_3__util_tryCatch__["a" /* tryCatch */])(sub.unsubscribe).call(sub); - if (trial === __WEBPACK_IMPORTED_MODULE_4__util_errorObject__["a" /* errorObject */]) { - hasErrors = true; - errors = errors || []; - var err = __WEBPACK_IMPORTED_MODULE_4__util_errorObject__["a" /* errorObject */].e; - if (err instanceof __WEBPACK_IMPORTED_MODULE_5__util_UnsubscriptionError__["a" /* UnsubscriptionError */]) { - errors = errors.concat(flattenUnsubscriptionErrors(err.errors)); - } - else { - errors.push(err); - } - } - } - } - } - if (hasErrors) { - throw new __WEBPACK_IMPORTED_MODULE_5__util_UnsubscriptionError__["a" /* UnsubscriptionError */](errors); - } - }; - Subscription.prototype.add = function (teardown) { - if (!teardown || (teardown === Subscription.EMPTY)) { - return Subscription.EMPTY; - } - if (teardown === this) { - return this; - } - var subscription = teardown; - switch (typeof teardown) { - case 'function': - subscription = new Subscription(teardown); - case 'object': - if (subscription.closed || typeof subscription.unsubscribe !== 'function') { - return subscription; - } - else if (this.closed) { - subscription.unsubscribe(); - return subscription; - } - else if (typeof subscription._addParent !== 'function') { - var tmp = subscription; - subscription = new Subscription(); - subscription._subscriptions = [tmp]; - } - break; - default: - throw new Error('unrecognized teardown ' + teardown + ' added to Subscription.'); - } - var subscriptions = this._subscriptions || (this._subscriptions = []); - subscriptions.push(subscription); - subscription._addParent(this); - return subscription; - }; - Subscription.prototype.remove = function (subscription) { - var subscriptions = this._subscriptions; - if (subscriptions) { - var subscriptionIndex = subscriptions.indexOf(subscription); - if (subscriptionIndex !== -1) { - subscriptions.splice(subscriptionIndex, 1); - } - } - }; - Subscription.prototype._addParent = function (parent) { - var _a = this, _parent = _a._parent, _parents = _a._parents; - if (!_parent || _parent === parent) { - this._parent = parent; - } - else if (!_parents) { - this._parents = [parent]; - } - else if (_parents.indexOf(parent) === -1) { - _parents.push(parent); - } - }; - Subscription.EMPTY = (function (empty) { - empty.closed = true; - return empty; - }(new Subscription())); - return Subscription; -}()); - -function flattenUnsubscriptionErrors(errors) { - return errors.reduce(function (errs, err) { return errs.concat((err instanceof __WEBPACK_IMPORTED_MODULE_5__util_UnsubscriptionError__["a" /* UnsubscriptionError */]) ? err.errors : err); }, []); -} -//# sourceMappingURL=Subscription.js.map - - -/***/ }), -/* 26 */ -/***/ (function(module, exports, __webpack_require__) { - -// Copyright 2015 Joyent, Inc. - -module.exports = { - bufferSplit: bufferSplit, - addRSAMissing: addRSAMissing, - calculateDSAPublic: calculateDSAPublic, - calculateED25519Public: calculateED25519Public, - calculateX25519Public: calculateX25519Public, - mpNormalize: mpNormalize, - mpDenormalize: mpDenormalize, - ecNormalize: ecNormalize, - countZeros: countZeros, - assertCompatible: assertCompatible, - isCompatible: isCompatible, - opensslKeyDeriv: opensslKeyDeriv, - opensshCipherInfo: opensshCipherInfo, - publicFromPrivateECDSA: publicFromPrivateECDSA, - zeroPadToLength: zeroPadToLength, - writeBitString: writeBitString, - readBitString: readBitString -}; - -var assert = __webpack_require__(16); -var Buffer = __webpack_require__(15).Buffer; -var PrivateKey = __webpack_require__(33); -var Key = __webpack_require__(27); -var crypto = __webpack_require__(11); -var algs = __webpack_require__(32); -var asn1 = __webpack_require__(66); - -var ec, jsbn; -var nacl; - -var MAX_CLASS_DEPTH = 3; - -function isCompatible(obj, klass, needVer) { - if (obj === null || typeof (obj) !== 'object') - return (false); - if (needVer === undefined) - needVer = klass.prototype._sshpkApiVersion; - if (obj instanceof klass && - klass.prototype._sshpkApiVersion[0] == needVer[0]) - return (true); - var proto = Object.getPrototypeOf(obj); - var depth = 0; - while (proto.constructor.name !== klass.name) { - proto = Object.getPrototypeOf(proto); - if (!proto || ++depth > MAX_CLASS_DEPTH) - return (false); - } - if (proto.constructor.name !== klass.name) - return (false); - var ver = proto._sshpkApiVersion; - if (ver === undefined) - ver = klass._oldVersionDetect(obj); - if (ver[0] != needVer[0] || ver[1] < needVer[1]) - return (false); - return (true); -} - -function assertCompatible(obj, klass, needVer, name) { - if (name === undefined) - name = 'object'; - assert.ok(obj, name + ' must not be null'); - assert.object(obj, name + ' must be an object'); - if (needVer === undefined) - needVer = klass.prototype._sshpkApiVersion; - if (obj instanceof klass && - klass.prototype._sshpkApiVersion[0] == needVer[0]) - return; - var proto = Object.getPrototypeOf(obj); - var depth = 0; - while (proto.constructor.name !== klass.name) { - proto = Object.getPrototypeOf(proto); - assert.ok(proto && ++depth <= MAX_CLASS_DEPTH, - name + ' must be a ' + klass.name + ' instance'); - } - assert.strictEqual(proto.constructor.name, klass.name, - name + ' must be a ' + klass.name + ' instance'); - var ver = proto._sshpkApiVersion; - if (ver === undefined) - ver = klass._oldVersionDetect(obj); - assert.ok(ver[0] == needVer[0] && ver[1] >= needVer[1], - name + ' must be compatible with ' + klass.name + ' klass ' + - 'version ' + needVer[0] + '.' + needVer[1]); -} - -var CIPHER_LEN = { - 'des-ede3-cbc': { key: 7, iv: 8 }, - 'aes-128-cbc': { key: 16, iv: 16 } -}; -var PKCS5_SALT_LEN = 8; - -function opensslKeyDeriv(cipher, salt, passphrase, count) { - assert.buffer(salt, 'salt'); - assert.buffer(passphrase, 'passphrase'); - assert.number(count, 'iteration count'); - - var clen = CIPHER_LEN[cipher]; - assert.object(clen, 'supported cipher'); - - salt = salt.slice(0, PKCS5_SALT_LEN); - - var D, D_prev, bufs; - var material = Buffer.alloc(0); - while (material.length < clen.key + clen.iv) { - bufs = []; - if (D_prev) - bufs.push(D_prev); - bufs.push(passphrase); - bufs.push(salt); - D = Buffer.concat(bufs); - for (var j = 0; j < count; ++j) - D = crypto.createHash('md5').update(D).digest(); - material = Buffer.concat([material, D]); - D_prev = D; - } - - return ({ - key: material.slice(0, clen.key), - iv: material.slice(clen.key, clen.key + clen.iv) - }); -} - -/* Count leading zero bits on a buffer */ -function countZeros(buf) { - var o = 0, obit = 8; - while (o < buf.length) { - var mask = (1 << obit); - if ((buf[o] & mask) === mask) - break; - obit--; - if (obit < 0) { - o++; - obit = 8; - } - } - return (o*8 + (8 - obit) - 1); -} - -function bufferSplit(buf, chr) { - assert.buffer(buf); - assert.string(chr); - - var parts = []; - var lastPart = 0; - var matches = 0; - for (var i = 0; i < buf.length; ++i) { - if (buf[i] === chr.charCodeAt(matches)) - ++matches; - else if (buf[i] === chr.charCodeAt(0)) - matches = 1; - else - matches = 0; - - if (matches >= chr.length) { - var newPart = i + 1; - parts.push(buf.slice(lastPart, newPart - matches)); - lastPart = newPart; - matches = 0; - } - } - if (lastPart <= buf.length) - parts.push(buf.slice(lastPart, buf.length)); - - return (parts); -} - -function ecNormalize(buf, addZero) { - assert.buffer(buf); - if (buf[0] === 0x00 && buf[1] === 0x04) { - if (addZero) - return (buf); - return (buf.slice(1)); - } else if (buf[0] === 0x04) { - if (!addZero) - return (buf); - } else { - while (buf[0] === 0x00) - buf = buf.slice(1); - if (buf[0] === 0x02 || buf[0] === 0x03) - throw (new Error('Compressed elliptic curve points ' + - 'are not supported')); - if (buf[0] !== 0x04) - throw (new Error('Not a valid elliptic curve point')); - if (!addZero) - return (buf); - } - var b = Buffer.alloc(buf.length + 1); - b[0] = 0x0; - buf.copy(b, 1); - return (b); -} - -function readBitString(der, tag) { - if (tag === undefined) - tag = asn1.Ber.BitString; - var buf = der.readString(tag, true); - assert.strictEqual(buf[0], 0x00, 'bit strings with unused bits are ' + - 'not supported (0x' + buf[0].toString(16) + ')'); - return (buf.slice(1)); -} - -function writeBitString(der, buf, tag) { - if (tag === undefined) - tag = asn1.Ber.BitString; - var b = Buffer.alloc(buf.length + 1); - b[0] = 0x00; - buf.copy(b, 1); - der.writeBuffer(b, tag); -} - -function mpNormalize(buf) { - assert.buffer(buf); - while (buf.length > 1 && buf[0] === 0x00 && (buf[1] & 0x80) === 0x00) - buf = buf.slice(1); - if ((buf[0] & 0x80) === 0x80) { - var b = Buffer.alloc(buf.length + 1); - b[0] = 0x00; - buf.copy(b, 1); - buf = b; - } - return (buf); -} - -function mpDenormalize(buf) { - assert.buffer(buf); - while (buf.length > 1 && buf[0] === 0x00) - buf = buf.slice(1); - return (buf); -} - -function zeroPadToLength(buf, len) { - assert.buffer(buf); - assert.number(len); - while (buf.length > len) { - assert.equal(buf[0], 0x00); - buf = buf.slice(1); - } - while (buf.length < len) { - var b = Buffer.alloc(buf.length + 1); - b[0] = 0x00; - buf.copy(b, 1); - buf = b; - } - return (buf); -} - -function bigintToMpBuf(bigint) { - var buf = Buffer.from(bigint.toByteArray()); - buf = mpNormalize(buf); - return (buf); -} - -function calculateDSAPublic(g, p, x) { - assert.buffer(g); - assert.buffer(p); - assert.buffer(x); - try { - var bigInt = __webpack_require__(81).BigInteger; - } catch (e) { - throw (new Error('To load a PKCS#8 format DSA private key, ' + - 'the node jsbn library is required.')); - } - g = new bigInt(g); - p = new bigInt(p); - x = new bigInt(x); - var y = g.modPow(x, p); - var ybuf = bigintToMpBuf(y); - return (ybuf); -} - -function calculateED25519Public(k) { - assert.buffer(k); - - if (nacl === undefined) - nacl = __webpack_require__(76); - - var kp = nacl.sign.keyPair.fromSeed(new Uint8Array(k)); - return (Buffer.from(kp.publicKey)); -} - -function calculateX25519Public(k) { - assert.buffer(k); - - if (nacl === undefined) - nacl = __webpack_require__(76); - - var kp = nacl.box.keyPair.fromSeed(new Uint8Array(k)); - return (Buffer.from(kp.publicKey)); -} - -function addRSAMissing(key) { - assert.object(key); - assertCompatible(key, PrivateKey, [1, 1]); - try { - var bigInt = __webpack_require__(81).BigInteger; - } catch (e) { - throw (new Error('To write a PEM private key from ' + - 'this source, the node jsbn lib is required.')); - } - - var d = new bigInt(key.part.d.data); - var buf; - - if (!key.part.dmodp) { - var p = new bigInt(key.part.p.data); - var dmodp = d.mod(p.subtract(1)); - - buf = bigintToMpBuf(dmodp); - key.part.dmodp = {name: 'dmodp', data: buf}; - key.parts.push(key.part.dmodp); - } - if (!key.part.dmodq) { - var q = new bigInt(key.part.q.data); - var dmodq = d.mod(q.subtract(1)); - - buf = bigintToMpBuf(dmodq); - key.part.dmodq = {name: 'dmodq', data: buf}; - key.parts.push(key.part.dmodq); - } -} - -function publicFromPrivateECDSA(curveName, priv) { - assert.string(curveName, 'curveName'); - assert.buffer(priv); - if (ec === undefined) - ec = __webpack_require__(139); - if (jsbn === undefined) - jsbn = __webpack_require__(81).BigInteger; - var params = algs.curves[curveName]; - var p = new jsbn(params.p); - var a = new jsbn(params.a); - var b = new jsbn(params.b); - var curve = new ec.ECCurveFp(p, a, b); - var G = curve.decodePointHex(params.G.toString('hex')); - - var d = new jsbn(mpNormalize(priv)); - var pub = G.multiply(d); - pub = Buffer.from(curve.encodePointHex(pub), 'hex'); - - var parts = []; - parts.push({name: 'curve', data: Buffer.from(curveName)}); - parts.push({name: 'Q', data: pub}); - - var key = new Key({type: 'ecdsa', curve: curve, parts: parts}); - return (key); -} - -function opensshCipherInfo(cipher) { - var inf = {}; - switch (cipher) { - case '3des-cbc': - inf.keySize = 24; - inf.blockSize = 8; - inf.opensslName = 'des-ede3-cbc'; - break; - case 'blowfish-cbc': - inf.keySize = 16; - inf.blockSize = 8; - inf.opensslName = 'bf-cbc'; - break; - case 'aes128-cbc': - case 'aes128-ctr': - case 'aes128-gcm@openssh.com': - inf.keySize = 16; - inf.blockSize = 16; - inf.opensslName = 'aes-128-' + cipher.slice(7, 10); - break; - case 'aes192-cbc': - case 'aes192-ctr': - case 'aes192-gcm@openssh.com': - inf.keySize = 24; - inf.blockSize = 16; - inf.opensslName = 'aes-192-' + cipher.slice(7, 10); - break; - case 'aes256-cbc': - case 'aes256-ctr': - case 'aes256-gcm@openssh.com': - inf.keySize = 32; - inf.blockSize = 16; - inf.opensslName = 'aes-256-' + cipher.slice(7, 10); - break; - default: - throw (new Error( - 'Unsupported openssl cipher "' + cipher + '"')); - } - return (inf); -} - - -/***/ }), -/* 27 */ -/***/ (function(module, exports, __webpack_require__) { - -// Copyright 2017 Joyent, Inc. - -module.exports = Key; - -var assert = __webpack_require__(16); -var algs = __webpack_require__(32); -var crypto = __webpack_require__(11); -var Fingerprint = __webpack_require__(156); -var Signature = __webpack_require__(75); -var DiffieHellman = __webpack_require__(325).DiffieHellman; -var errs = __webpack_require__(74); -var utils = __webpack_require__(26); -var PrivateKey = __webpack_require__(33); -var edCompat; - -try { - edCompat = __webpack_require__(454); -} catch (e) { - /* Just continue through, and bail out if we try to use it. */ -} - -var InvalidAlgorithmError = errs.InvalidAlgorithmError; -var KeyParseError = errs.KeyParseError; - -var formats = {}; -formats['auto'] = __webpack_require__(455); -formats['pem'] = __webpack_require__(86); -formats['pkcs1'] = __webpack_require__(327); -formats['pkcs8'] = __webpack_require__(157); -formats['rfc4253'] = __webpack_require__(103); -formats['ssh'] = __webpack_require__(456); -formats['ssh-private'] = __webpack_require__(192); -formats['openssh'] = formats['ssh-private']; -formats['dnssec'] = __webpack_require__(326); - -function Key(opts) { - assert.object(opts, 'options'); - assert.arrayOfObject(opts.parts, 'options.parts'); - assert.string(opts.type, 'options.type'); - assert.optionalString(opts.comment, 'options.comment'); - - var algInfo = algs.info[opts.type]; - if (typeof (algInfo) !== 'object') - throw (new InvalidAlgorithmError(opts.type)); - - var partLookup = {}; - for (var i = 0; i < opts.parts.length; ++i) { - var part = opts.parts[i]; - partLookup[part.name] = part; - } - - this.type = opts.type; - this.parts = opts.parts; - this.part = partLookup; - this.comment = undefined; - this.source = opts.source; - - /* for speeding up hashing/fingerprint operations */ - this._rfc4253Cache = opts._rfc4253Cache; - this._hashCache = {}; - - var sz; - this.curve = undefined; - if (this.type === 'ecdsa') { - var curve = this.part.curve.data.toString(); - this.curve = curve; - sz = algs.curves[curve].size; - } else if (this.type === 'ed25519' || this.type === 'curve25519') { - sz = 256; - this.curve = 'curve25519'; - } else { - var szPart = this.part[algInfo.sizePart]; - sz = szPart.data.length; - sz = sz * 8 - utils.countZeros(szPart.data); - } - this.size = sz; -} - -Key.formats = formats; - -Key.prototype.toBuffer = function (format, options) { - if (format === undefined) - format = 'ssh'; - assert.string(format, 'format'); - assert.object(formats[format], 'formats[format]'); - assert.optionalObject(options, 'options'); - - if (format === 'rfc4253') { - if (this._rfc4253Cache === undefined) - this._rfc4253Cache = formats['rfc4253'].write(this); - return (this._rfc4253Cache); - } - - return (formats[format].write(this, options)); -}; - -Key.prototype.toString = function (format, options) { - return (this.toBuffer(format, options).toString()); -}; - -Key.prototype.hash = function (algo) { - assert.string(algo, 'algorithm'); - algo = algo.toLowerCase(); - if (algs.hashAlgs[algo] === undefined) - throw (new InvalidAlgorithmError(algo)); - - if (this._hashCache[algo]) - return (this._hashCache[algo]); - var hash = crypto.createHash(algo). - update(this.toBuffer('rfc4253')).digest(); - this._hashCache[algo] = hash; - return (hash); -}; - -Key.prototype.fingerprint = function (algo) { - if (algo === undefined) - algo = 'sha256'; - assert.string(algo, 'algorithm'); - var opts = { - type: 'key', - hash: this.hash(algo), - algorithm: algo - }; - return (new Fingerprint(opts)); -}; - -Key.prototype.defaultHashAlgorithm = function () { - var hashAlgo = 'sha1'; - if (this.type === 'rsa') - hashAlgo = 'sha256'; - if (this.type === 'dsa' && this.size > 1024) - hashAlgo = 'sha256'; - if (this.type === 'ed25519') - hashAlgo = 'sha512'; - if (this.type === 'ecdsa') { - if (this.size <= 256) - hashAlgo = 'sha256'; - else if (this.size <= 384) - hashAlgo = 'sha384'; - else - hashAlgo = 'sha512'; - } - return (hashAlgo); -}; - -Key.prototype.createVerify = function (hashAlgo) { - if (hashAlgo === undefined) - hashAlgo = this.defaultHashAlgorithm(); - assert.string(hashAlgo, 'hash algorithm'); - - /* ED25519 is not supported by OpenSSL, use a javascript impl. */ - if (this.type === 'ed25519' && edCompat !== undefined) - return (new edCompat.Verifier(this, hashAlgo)); - if (this.type === 'curve25519') - throw (new Error('Curve25519 keys are not suitable for ' + - 'signing or verification')); - - var v, nm, err; - try { - nm = hashAlgo.toUpperCase(); - v = crypto.createVerify(nm); - } catch (e) { - err = e; - } - if (v === undefined || (err instanceof Error && - err.message.match(/Unknown message digest/))) { - nm = 'RSA-'; - nm += hashAlgo.toUpperCase(); - v = crypto.createVerify(nm); - } - assert.ok(v, 'failed to create verifier'); - var oldVerify = v.verify.bind(v); - var key = this.toBuffer('pkcs8'); - var curve = this.curve; - var self = this; - v.verify = function (signature, fmt) { - if (Signature.isSignature(signature, [2, 0])) { - if (signature.type !== self.type) - return (false); - if (signature.hashAlgorithm && - signature.hashAlgorithm !== hashAlgo) - return (false); - if (signature.curve && self.type === 'ecdsa' && - signature.curve !== curve) - return (false); - return (oldVerify(key, signature.toBuffer('asn1'))); - - } else if (typeof (signature) === 'string' || - Buffer.isBuffer(signature)) { - return (oldVerify(key, signature, fmt)); - - /* - * Avoid doing this on valid arguments, walking the prototype - * chain can be quite slow. - */ - } else if (Signature.isSignature(signature, [1, 0])) { - throw (new Error('signature was created by too old ' + - 'a version of sshpk and cannot be verified')); - - } else { - throw (new TypeError('signature must be a string, ' + - 'Buffer, or Signature object')); - } - }; - return (v); -}; - -Key.prototype.createDiffieHellman = function () { - if (this.type === 'rsa') - throw (new Error('RSA keys do not support Diffie-Hellman')); - - return (new DiffieHellman(this)); -}; -Key.prototype.createDH = Key.prototype.createDiffieHellman; - -Key.parse = function (data, format, options) { - if (typeof (data) !== 'string') - assert.buffer(data, 'data'); - if (format === undefined) - format = 'auto'; - assert.string(format, 'format'); - if (typeof (options) === 'string') - options = { filename: options }; - assert.optionalObject(options, 'options'); - if (options === undefined) - options = {}; - assert.optionalString(options.filename, 'options.filename'); - if (options.filename === undefined) - options.filename = '(unnamed)'; - - assert.object(formats[format], 'formats[format]'); - - try { - var k = formats[format].read(data, options); - if (k instanceof PrivateKey) - k = k.toPublic(); - if (!k.comment) - k.comment = options.filename; - return (k); - } catch (e) { - if (e.name === 'KeyEncryptedError') - throw (e); - throw (new KeyParseError(options.filename, format, e)); - } -}; - -Key.isKey = function (obj, ver) { - return (utils.isCompatible(obj, Key, ver)); -}; - -/* - * API versions for Key: - * [1,0] -- initial ver, may take Signature for createVerify or may not - * [1,1] -- added pkcs1, pkcs8 formats - * [1,2] -- added auto, ssh-private, openssh formats - * [1,3] -- added defaultHashAlgorithm - * [1,4] -- added ed support, createDH - * [1,5] -- first explicitly tagged version - * [1,6] -- changed ed25519 part names - */ -Key.prototype._sshpkApiVersion = [1, 6]; - -Key._oldVersionDetect = function (obj) { - assert.func(obj.toBuffer); - assert.func(obj.fingerprint); - if (obj.createDH) - return ([1, 4]); - if (obj.defaultHashAlgorithm) - return ([1, 3]); - if (obj.formats['auto']) - return ([1, 2]); - if (obj.formats['pkcs1']) - return ([1, 1]); - return ([1, 0]); -}; - - -/***/ }), -/* 28 */ -/***/ (function(module, exports) { - -module.exports = require("assert"); - -/***/ }), -/* 29 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", { - value: true -}); -exports.default = nullify; -function nullify(obj = {}) { - if (Array.isArray(obj)) { - for (var _iterator = obj, _isArray = Array.isArray(_iterator), _i = 0, _iterator = _isArray ? _iterator : _iterator[Symbol.iterator]();;) { - var _ref; - - if (_isArray) { - if (_i >= _iterator.length) break; - _ref = _iterator[_i++]; - } else { - _i = _iterator.next(); - if (_i.done) break; - _ref = _i.value; - } - - const item = _ref; - - nullify(item); - } - } else if (obj !== null && typeof obj === 'object' || typeof obj === 'function') { - Object.setPrototypeOf(obj, null); - - // for..in can only be applied to 'object', not 'function' - if (typeof obj === 'object') { - for (const key in obj) { - nullify(obj[key]); - } - } - } - - return obj; -} - -/***/ }), -/* 30 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - -const escapeStringRegexp = __webpack_require__(388); -const ansiStyles = __webpack_require__(506); -const stdoutColor = __webpack_require__(598).stdout; - -const template = __webpack_require__(599); - -const isSimpleWindowsTerm = process.platform === 'win32' && !(process.env.TERM || '').toLowerCase().startsWith('xterm'); - -// `supportsColor.level` → `ansiStyles.color[name]` mapping -const levelMapping = ['ansi', 'ansi', 'ansi256', 'ansi16m']; - -// `color-convert` models to exclude from the Chalk API due to conflicts and such -const skipModels = new Set(['gray']); - -const styles = Object.create(null); - -function applyOptions(obj, options) { - options = options || {}; - - // Detect level if not set manually - const scLevel = stdoutColor ? stdoutColor.level : 0; - obj.level = options.level === undefined ? scLevel : options.level; - obj.enabled = 'enabled' in options ? options.enabled : obj.level > 0; -} - -function Chalk(options) { - // We check for this.template here since calling `chalk.constructor()` - // by itself will have a `this` of a previously constructed chalk object - if (!this || !(this instanceof Chalk) || this.template) { - const chalk = {}; - applyOptions(chalk, options); - - chalk.template = function () { - const args = [].slice.call(arguments); - return chalkTag.apply(null, [chalk.template].concat(args)); - }; - - Object.setPrototypeOf(chalk, Chalk.prototype); - Object.setPrototypeOf(chalk.template, chalk); - - chalk.template.constructor = Chalk; - - return chalk.template; - } - - applyOptions(this, options); -} - -// Use bright blue on Windows as the normal blue color is illegible -if (isSimpleWindowsTerm) { - ansiStyles.blue.open = '\u001B[94m'; -} - -for (const key of Object.keys(ansiStyles)) { - ansiStyles[key].closeRe = new RegExp(escapeStringRegexp(ansiStyles[key].close), 'g'); - - styles[key] = { - get() { - const codes = ansiStyles[key]; - return build.call(this, this._styles ? this._styles.concat(codes) : [codes], this._empty, key); - } - }; -} - -styles.visible = { - get() { - return build.call(this, this._styles || [], true, 'visible'); - } -}; - -ansiStyles.color.closeRe = new RegExp(escapeStringRegexp(ansiStyles.color.close), 'g'); -for (const model of Object.keys(ansiStyles.color.ansi)) { - if (skipModels.has(model)) { - continue; - } - - styles[model] = { - get() { - const level = this.level; - return function () { - const open = ansiStyles.color[levelMapping[level]][model].apply(null, arguments); - const codes = { - open, - close: ansiStyles.color.close, - closeRe: ansiStyles.color.closeRe - }; - return build.call(this, this._styles ? this._styles.concat(codes) : [codes], this._empty, model); - }; - } - }; -} - -ansiStyles.bgColor.closeRe = new RegExp(escapeStringRegexp(ansiStyles.bgColor.close), 'g'); -for (const model of Object.keys(ansiStyles.bgColor.ansi)) { - if (skipModels.has(model)) { - continue; - } - - const bgModel = 'bg' + model[0].toUpperCase() + model.slice(1); - styles[bgModel] = { - get() { - const level = this.level; - return function () { - const open = ansiStyles.bgColor[levelMapping[level]][model].apply(null, arguments); - const codes = { - open, - close: ansiStyles.bgColor.close, - closeRe: ansiStyles.bgColor.closeRe - }; - return build.call(this, this._styles ? this._styles.concat(codes) : [codes], this._empty, model); - }; - } - }; -} - -const proto = Object.defineProperties(() => {}, styles); - -function build(_styles, _empty, key) { - const builder = function () { - return applyStyle.apply(builder, arguments); - }; - - builder._styles = _styles; - builder._empty = _empty; - - const self = this; - - Object.defineProperty(builder, 'level', { - enumerable: true, - get() { - return self.level; - }, - set(level) { - self.level = level; - } - }); - - Object.defineProperty(builder, 'enabled', { - enumerable: true, - get() { - return self.enabled; - }, - set(enabled) { - self.enabled = enabled; - } - }); - - // See below for fix regarding invisible grey/dim combination on Windows - builder.hasGrey = this.hasGrey || key === 'gray' || key === 'grey'; - - // `__proto__` is used because we must return a function, but there is - // no way to create a function with a different prototype - builder.__proto__ = proto; // eslint-disable-line no-proto - - return builder; -} - -function applyStyle() { - // Support varags, but simply cast to string in case there's only one arg - const args = arguments; - const argsLen = args.length; - let str = String(arguments[0]); - - if (argsLen === 0) { - return ''; - } - - if (argsLen > 1) { - // Don't slice `arguments`, it prevents V8 optimizations - for (let a = 1; a < argsLen; a++) { - str += ' ' + args[a]; - } - } - - if (!this.enabled || this.level <= 0 || !str) { - return this._empty ? '' : str; - } - - // Turns out that on Windows dimmed gray text becomes invisible in cmd.exe, - // see https://github.com/chalk/chalk/issues/58 - // If we're on Windows and we're dealing with a gray color, temporarily make 'dim' a noop. - const originalDim = ansiStyles.dim.open; - if (isSimpleWindowsTerm && this.hasGrey) { - ansiStyles.dim.open = ''; - } - - for (const code of this._styles.slice().reverse()) { - // Replace any instances already present with a re-opening code - // otherwise only the part of the string until said closing code - // will be colored, and the rest will simply be 'plain'. - str = code.open + str.replace(code.closeRe, code.open) + code.close; - - // Close the styling before a linebreak and reopen - // after next line to fix a bleed issue on macOS - // https://github.com/chalk/chalk/pull/92 - str = str.replace(/\r?\n/g, `${code.close}$&${code.open}`); - } - - // Reset the original `dim` if we changed it to work around the Windows dimmed gray issue - ansiStyles.dim.open = originalDim; - - return str; -} - -function chalkTag(chalk, strings) { - if (!Array.isArray(strings)) { - // If chalk() was called by itself or with a string, - // return the string itself as a string. - return [].slice.call(arguments, 1).join(' '); - } - - const args = [].slice.call(arguments, 2); - const parts = [strings.raw[0]]; - - for (let i = 1; i < strings.length; i++) { - parts.push(String(args[i - 1]).replace(/[{}\\]/g, '\\$&')); - parts.push(String(strings.raw[i])); - } - - return template(chalk, parts.join('')); -} - -Object.defineProperties(Chalk.prototype, styles); - -module.exports = Chalk(); // eslint-disable-line new-cap -module.exports.supportsColor = stdoutColor; -module.exports.default = module.exports; // For TypeScript - - -/***/ }), -/* 31 */ -/***/ (function(module, exports) { - -var core = module.exports = { version: '2.5.7' }; -if (typeof __e == 'number') __e = core; // eslint-disable-line no-undef - - -/***/ }), -/* 32 */ -/***/ (function(module, exports, __webpack_require__) { - -// Copyright 2015 Joyent, Inc. - -var Buffer = __webpack_require__(15).Buffer; - -var algInfo = { - 'dsa': { - parts: ['p', 'q', 'g', 'y'], - sizePart: 'p' - }, - 'rsa': { - parts: ['e', 'n'], - sizePart: 'n' - }, - 'ecdsa': { - parts: ['curve', 'Q'], - sizePart: 'Q' - }, - 'ed25519': { - parts: ['A'], - sizePart: 'A' - } -}; -algInfo['curve25519'] = algInfo['ed25519']; - -var algPrivInfo = { - 'dsa': { - parts: ['p', 'q', 'g', 'y', 'x'] - }, - 'rsa': { - parts: ['n', 'e', 'd', 'iqmp', 'p', 'q'] - }, - 'ecdsa': { - parts: ['curve', 'Q', 'd'] - }, - 'ed25519': { - parts: ['A', 'k'] - } -}; -algPrivInfo['curve25519'] = algPrivInfo['ed25519']; - -var hashAlgs = { - 'md5': true, - 'sha1': true, - 'sha256': true, - 'sha384': true, - 'sha512': true -}; - -/* - * Taken from - * http://csrc.nist.gov/groups/ST/toolkit/documents/dss/NISTReCur.pdf - */ -var curves = { - 'nistp256': { - size: 256, - pkcs8oid: '1.2.840.10045.3.1.7', - p: Buffer.from(('00' + - 'ffffffff 00000001 00000000 00000000' + - '00000000 ffffffff ffffffff ffffffff'). - replace(/ /g, ''), 'hex'), - a: Buffer.from(('00' + - 'FFFFFFFF 00000001 00000000 00000000' + - '00000000 FFFFFFFF FFFFFFFF FFFFFFFC'). - replace(/ /g, ''), 'hex'), - b: Buffer.from(( - '5ac635d8 aa3a93e7 b3ebbd55 769886bc' + - '651d06b0 cc53b0f6 3bce3c3e 27d2604b'). - replace(/ /g, ''), 'hex'), - s: Buffer.from(('00' + - 'c49d3608 86e70493 6a6678e1 139d26b7' + - '819f7e90'). - replace(/ /g, ''), 'hex'), - n: Buffer.from(('00' + - 'ffffffff 00000000 ffffffff ffffffff' + - 'bce6faad a7179e84 f3b9cac2 fc632551'). - replace(/ /g, ''), 'hex'), - G: Buffer.from(('04' + - '6b17d1f2 e12c4247 f8bce6e5 63a440f2' + - '77037d81 2deb33a0 f4a13945 d898c296' + - '4fe342e2 fe1a7f9b 8ee7eb4a 7c0f9e16' + - '2bce3357 6b315ece cbb64068 37bf51f5'). - replace(/ /g, ''), 'hex') - }, - 'nistp384': { - size: 384, - pkcs8oid: '1.3.132.0.34', - p: Buffer.from(('00' + - 'ffffffff ffffffff ffffffff ffffffff' + - 'ffffffff ffffffff ffffffff fffffffe' + - 'ffffffff 00000000 00000000 ffffffff'). - replace(/ /g, ''), 'hex'), - a: Buffer.from(('00' + - 'FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF' + - 'FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFE' + - 'FFFFFFFF 00000000 00000000 FFFFFFFC'). - replace(/ /g, ''), 'hex'), - b: Buffer.from(( - 'b3312fa7 e23ee7e4 988e056b e3f82d19' + - '181d9c6e fe814112 0314088f 5013875a' + - 'c656398d 8a2ed19d 2a85c8ed d3ec2aef'). - replace(/ /g, ''), 'hex'), - s: Buffer.from(('00' + - 'a335926a a319a27a 1d00896a 6773a482' + - '7acdac73'). - replace(/ /g, ''), 'hex'), - n: Buffer.from(('00' + - 'ffffffff ffffffff ffffffff ffffffff' + - 'ffffffff ffffffff c7634d81 f4372ddf' + - '581a0db2 48b0a77a ecec196a ccc52973'). - replace(/ /g, ''), 'hex'), - G: Buffer.from(('04' + - 'aa87ca22 be8b0537 8eb1c71e f320ad74' + - '6e1d3b62 8ba79b98 59f741e0 82542a38' + - '5502f25d bf55296c 3a545e38 72760ab7' + - '3617de4a 96262c6f 5d9e98bf 9292dc29' + - 'f8f41dbd 289a147c e9da3113 b5f0b8c0' + - '0a60b1ce 1d7e819d 7a431d7c 90ea0e5f'). - replace(/ /g, ''), 'hex') - }, - 'nistp521': { - size: 521, - pkcs8oid: '1.3.132.0.35', - p: Buffer.from(( - '01ffffff ffffffff ffffffff ffffffff' + - 'ffffffff ffffffff ffffffff ffffffff' + - 'ffffffff ffffffff ffffffff ffffffff' + - 'ffffffff ffffffff ffffffff ffffffff' + - 'ffff').replace(/ /g, ''), 'hex'), - a: Buffer.from(('01FF' + - 'FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF' + - 'FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF' + - 'FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF' + - 'FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFC'). - replace(/ /g, ''), 'hex'), - b: Buffer.from(('51' + - '953eb961 8e1c9a1f 929a21a0 b68540ee' + - 'a2da725b 99b315f3 b8b48991 8ef109e1' + - '56193951 ec7e937b 1652c0bd 3bb1bf07' + - '3573df88 3d2c34f1 ef451fd4 6b503f00'). - replace(/ /g, ''), 'hex'), - s: Buffer.from(('00' + - 'd09e8800 291cb853 96cc6717 393284aa' + - 'a0da64ba').replace(/ /g, ''), 'hex'), - n: Buffer.from(('01ff' + - 'ffffffff ffffffff ffffffff ffffffff' + - 'ffffffff ffffffff ffffffff fffffffa' + - '51868783 bf2f966b 7fcc0148 f709a5d0' + - '3bb5c9b8 899c47ae bb6fb71e 91386409'). - replace(/ /g, ''), 'hex'), - G: Buffer.from(('04' + - '00c6 858e06b7 0404e9cd 9e3ecb66 2395b442' + - '9c648139 053fb521 f828af60 6b4d3dba' + - 'a14b5e77 efe75928 fe1dc127 a2ffa8de' + - '3348b3c1 856a429b f97e7e31 c2e5bd66' + - '0118 39296a78 9a3bc004 5c8a5fb4 2c7d1bd9' + - '98f54449 579b4468 17afbd17 273e662c' + - '97ee7299 5ef42640 c550b901 3fad0761' + - '353c7086 a272c240 88be9476 9fd16650'). - replace(/ /g, ''), 'hex') - } -}; - -module.exports = { - info: algInfo, - privInfo: algPrivInfo, - hashAlgs: hashAlgs, - curves: curves -}; - - -/***/ }), -/* 33 */ -/***/ (function(module, exports, __webpack_require__) { - -// Copyright 2017 Joyent, Inc. - -module.exports = PrivateKey; - -var assert = __webpack_require__(16); -var Buffer = __webpack_require__(15).Buffer; -var algs = __webpack_require__(32); -var crypto = __webpack_require__(11); -var Fingerprint = __webpack_require__(156); -var Signature = __webpack_require__(75); -var errs = __webpack_require__(74); -var util = __webpack_require__(3); -var utils = __webpack_require__(26); -var dhe = __webpack_require__(325); -var generateECDSA = dhe.generateECDSA; -var generateED25519 = dhe.generateED25519; -var edCompat; -var nacl; - -try { - edCompat = __webpack_require__(454); -} catch (e) { - /* Just continue through, and bail out if we try to use it. */ -} - -var Key = __webpack_require__(27); - -var InvalidAlgorithmError = errs.InvalidAlgorithmError; -var KeyParseError = errs.KeyParseError; -var KeyEncryptedError = errs.KeyEncryptedError; - -var formats = {}; -formats['auto'] = __webpack_require__(455); -formats['pem'] = __webpack_require__(86); -formats['pkcs1'] = __webpack_require__(327); -formats['pkcs8'] = __webpack_require__(157); -formats['rfc4253'] = __webpack_require__(103); -formats['ssh-private'] = __webpack_require__(192); -formats['openssh'] = formats['ssh-private']; -formats['ssh'] = formats['ssh-private']; -formats['dnssec'] = __webpack_require__(326); - -function PrivateKey(opts) { - assert.object(opts, 'options'); - Key.call(this, opts); - - this._pubCache = undefined; -} -util.inherits(PrivateKey, Key); - -PrivateKey.formats = formats; - -PrivateKey.prototype.toBuffer = function (format, options) { - if (format === undefined) - format = 'pkcs1'; - assert.string(format, 'format'); - assert.object(formats[format], 'formats[format]'); - assert.optionalObject(options, 'options'); - - return (formats[format].write(this, options)); -}; - -PrivateKey.prototype.hash = function (algo) { - return (this.toPublic().hash(algo)); -}; - -PrivateKey.prototype.toPublic = function () { - if (this._pubCache) - return (this._pubCache); - - var algInfo = algs.info[this.type]; - var pubParts = []; - for (var i = 0; i < algInfo.parts.length; ++i) { - var p = algInfo.parts[i]; - pubParts.push(this.part[p]); - } - - this._pubCache = new Key({ - type: this.type, - source: this, - parts: pubParts - }); - if (this.comment) - this._pubCache.comment = this.comment; - return (this._pubCache); -}; - -PrivateKey.prototype.derive = function (newType) { - assert.string(newType, 'type'); - var priv, pub, pair; - - if (this.type === 'ed25519' && newType === 'curve25519') { - if (nacl === undefined) - nacl = __webpack_require__(76); - - priv = this.part.k.data; - if (priv[0] === 0x00) - priv = priv.slice(1); - - pair = nacl.box.keyPair.fromSecretKey(new Uint8Array(priv)); - pub = Buffer.from(pair.publicKey); - - return (new PrivateKey({ - type: 'curve25519', - parts: [ - { name: 'A', data: utils.mpNormalize(pub) }, - { name: 'k', data: utils.mpNormalize(priv) } - ] - })); - } else if (this.type === 'curve25519' && newType === 'ed25519') { - if (nacl === undefined) - nacl = __webpack_require__(76); - - priv = this.part.k.data; - if (priv[0] === 0x00) - priv = priv.slice(1); - - pair = nacl.sign.keyPair.fromSeed(new Uint8Array(priv)); - pub = Buffer.from(pair.publicKey); - - return (new PrivateKey({ - type: 'ed25519', - parts: [ - { name: 'A', data: utils.mpNormalize(pub) }, - { name: 'k', data: utils.mpNormalize(priv) } - ] - })); - } - throw (new Error('Key derivation not supported from ' + this.type + - ' to ' + newType)); -}; - -PrivateKey.prototype.createVerify = function (hashAlgo) { - return (this.toPublic().createVerify(hashAlgo)); -}; - -PrivateKey.prototype.createSign = function (hashAlgo) { - if (hashAlgo === undefined) - hashAlgo = this.defaultHashAlgorithm(); - assert.string(hashAlgo, 'hash algorithm'); - - /* ED25519 is not supported by OpenSSL, use a javascript impl. */ - if (this.type === 'ed25519' && edCompat !== undefined) - return (new edCompat.Signer(this, hashAlgo)); - if (this.type === 'curve25519') - throw (new Error('Curve25519 keys are not suitable for ' + - 'signing or verification')); - - var v, nm, err; - try { - nm = hashAlgo.toUpperCase(); - v = crypto.createSign(nm); - } catch (e) { - err = e; - } - if (v === undefined || (err instanceof Error && - err.message.match(/Unknown message digest/))) { - nm = 'RSA-'; - nm += hashAlgo.toUpperCase(); - v = crypto.createSign(nm); - } - assert.ok(v, 'failed to create verifier'); - var oldSign = v.sign.bind(v); - var key = this.toBuffer('pkcs1'); - var type = this.type; - var curve = this.curve; - v.sign = function () { - var sig = oldSign(key); - if (typeof (sig) === 'string') - sig = Buffer.from(sig, 'binary'); - sig = Signature.parse(sig, type, 'asn1'); - sig.hashAlgorithm = hashAlgo; - sig.curve = curve; - return (sig); - }; - return (v); -}; - -PrivateKey.parse = function (data, format, options) { - if (typeof (data) !== 'string') - assert.buffer(data, 'data'); - if (format === undefined) - format = 'auto'; - assert.string(format, 'format'); - if (typeof (options) === 'string') - options = { filename: options }; - assert.optionalObject(options, 'options'); - if (options === undefined) - options = {}; - assert.optionalString(options.filename, 'options.filename'); - if (options.filename === undefined) - options.filename = '(unnamed)'; - - assert.object(formats[format], 'formats[format]'); - - try { - var k = formats[format].read(data, options); - assert.ok(k instanceof PrivateKey, 'key is not a private key'); - if (!k.comment) - k.comment = options.filename; - return (k); - } catch (e) { - if (e.name === 'KeyEncryptedError') - throw (e); - throw (new KeyParseError(options.filename, format, e)); - } -}; - -PrivateKey.isPrivateKey = function (obj, ver) { - return (utils.isCompatible(obj, PrivateKey, ver)); -}; - -PrivateKey.generate = function (type, options) { - if (options === undefined) - options = {}; - assert.object(options, 'options'); - - switch (type) { - case 'ecdsa': - if (options.curve === undefined) - options.curve = 'nistp256'; - assert.string(options.curve, 'options.curve'); - return (generateECDSA(options.curve)); - case 'ed25519': - return (generateED25519()); - default: - throw (new Error('Key generation not supported with key ' + - 'type "' + type + '"')); - } -}; - -/* - * API versions for PrivateKey: - * [1,0] -- initial ver - * [1,1] -- added auto, pkcs[18], openssh/ssh-private formats - * [1,2] -- added defaultHashAlgorithm - * [1,3] -- added derive, ed, createDH - * [1,4] -- first tagged version - * [1,5] -- changed ed25519 part names and format - */ -PrivateKey.prototype._sshpkApiVersion = [1, 5]; - -PrivateKey._oldVersionDetect = function (obj) { - assert.func(obj.toPublic); - assert.func(obj.createSign); - if (obj.derive) - return ([1, 3]); - if (obj.defaultHashAlgorithm) - return ([1, 2]); - if (obj.formats['auto']) - return ([1, 1]); - return ([1, 0]); -}; - - -/***/ }), -/* 34 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", { - value: true -}); -exports.wrapLifecycle = exports.run = exports.install = exports.Install = undefined; - -var _extends2; - -function _load_extends() { - return _extends2 = _interopRequireDefault(__webpack_require__(21)); -} - -var _asyncToGenerator2; - -function _load_asyncToGenerator() { - return _asyncToGenerator2 = _interopRequireDefault(__webpack_require__(2)); -} - -let install = exports.install = (() => { - var _ref29 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (config, reporter, flags, lockfile) { - yield wrapLifecycle(config, flags, (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - const install = new Install(flags, config, reporter, lockfile); - yield install.init(); - })); - }); - - return function install(_x7, _x8, _x9, _x10) { - return _ref29.apply(this, arguments); - }; -})(); - -let run = exports.run = (() => { - var _ref31 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (config, reporter, flags, args) { - let lockfile; - let error = 'installCommandRenamed'; - if (flags.lockfile === false) { - lockfile = new (_lockfile || _load_lockfile()).default(); - } else { - lockfile = yield (_lockfile || _load_lockfile()).default.fromDirectory(config.lockfileFolder, reporter); - } - - if (args.length) { - const exampleArgs = args.slice(); - - if (flags.saveDev) { - exampleArgs.push('--dev'); - } - if (flags.savePeer) { - exampleArgs.push('--peer'); - } - if (flags.saveOptional) { - exampleArgs.push('--optional'); - } - if (flags.saveExact) { - exampleArgs.push('--exact'); - } - if (flags.saveTilde) { - exampleArgs.push('--tilde'); - } - let command = 'add'; - if (flags.global) { - error = 'globalFlagRemoved'; - command = 'global add'; - } - throw new (_errors || _load_errors()).MessageError(reporter.lang(error, `yarn ${command} ${exampleArgs.join(' ')}`)); - } - - yield install(config, reporter, flags, lockfile); - }); - - return function run(_x11, _x12, _x13, _x14) { - return _ref31.apply(this, arguments); - }; -})(); - -let wrapLifecycle = exports.wrapLifecycle = (() => { - var _ref32 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (config, flags, factory) { - yield config.executeLifecycleScript('preinstall'); - - yield factory(); - - // npm behaviour, seems kinda funky but yay compatibility - yield config.executeLifecycleScript('install'); - yield config.executeLifecycleScript('postinstall'); - - if (!config.production) { - if (!config.disablePrepublish) { - yield config.executeLifecycleScript('prepublish'); - } - yield config.executeLifecycleScript('prepare'); - } - }); - - return function wrapLifecycle(_x15, _x16, _x17) { - return _ref32.apply(this, arguments); - }; -})(); - -exports.hasWrapper = hasWrapper; -exports.setFlags = setFlags; - -var _objectPath; - -function _load_objectPath() { - return _objectPath = _interopRequireDefault(__webpack_require__(304)); -} - -var _hooks; - -function _load_hooks() { - return _hooks = __webpack_require__(374); -} - -var _index; - -function _load_index() { - return _index = _interopRequireDefault(__webpack_require__(220)); -} - -var _errors; - -function _load_errors() { - return _errors = __webpack_require__(6); -} - -var _integrityChecker; - -function _load_integrityChecker() { - return _integrityChecker = _interopRequireDefault(__webpack_require__(208)); -} - -var _lockfile; - -function _load_lockfile() { - return _lockfile = _interopRequireDefault(__webpack_require__(19)); -} - -var _lockfile2; - -function _load_lockfile2() { - return _lockfile2 = __webpack_require__(19); -} - -var _packageFetcher; - -function _load_packageFetcher() { - return _packageFetcher = _interopRequireWildcard(__webpack_require__(210)); -} - -var _packageInstallScripts; - -function _load_packageInstallScripts() { - return _packageInstallScripts = _interopRequireDefault(__webpack_require__(557)); -} - -var _packageCompatibility; - -function _load_packageCompatibility() { - return _packageCompatibility = _interopRequireWildcard(__webpack_require__(209)); -} - -var _packageResolver; - -function _load_packageResolver() { - return _packageResolver = _interopRequireDefault(__webpack_require__(366)); -} - -var _packageLinker; - -function _load_packageLinker() { - return _packageLinker = _interopRequireDefault(__webpack_require__(211)); -} - -var _index2; - -function _load_index2() { - return _index2 = __webpack_require__(57); -} - -var _index3; - -function _load_index3() { - return _index3 = __webpack_require__(78); -} - -var _autoclean; - -function _load_autoclean() { - return _autoclean = __webpack_require__(354); -} - -var _constants; - -function _load_constants() { - return _constants = _interopRequireWildcard(__webpack_require__(8)); -} - -var _normalizePattern; - -function _load_normalizePattern() { - return _normalizePattern = __webpack_require__(37); -} - -var _fs; - -function _load_fs() { - return _fs = _interopRequireWildcard(__webpack_require__(4)); -} - -var _map; - -function _load_map() { - return _map = _interopRequireDefault(__webpack_require__(29)); -} - -var _yarnVersion; - -function _load_yarnVersion() { - return _yarnVersion = __webpack_require__(120); -} - -var _generatePnpMap; - -function _load_generatePnpMap() { - return _generatePnpMap = __webpack_require__(579); -} - -var _workspaceLayout; - -function _load_workspaceLayout() { - return _workspaceLayout = _interopRequireDefault(__webpack_require__(90)); -} - -var _resolutionMap; - -function _load_resolutionMap() { - return _resolutionMap = _interopRequireDefault(__webpack_require__(214)); -} - -var _guessName; - -function _load_guessName() { - return _guessName = _interopRequireDefault(__webpack_require__(169)); -} - -var _audit; - -function _load_audit() { - return _audit = _interopRequireDefault(__webpack_require__(353)); -} - -function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } } - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -const deepEqual = __webpack_require__(631); - -const emoji = __webpack_require__(302); -const invariant = __webpack_require__(9); -const path = __webpack_require__(0); -const semver = __webpack_require__(22); -const uuid = __webpack_require__(119); -const ssri = __webpack_require__(65); - -const ONE_DAY = 1000 * 60 * 60 * 24; - -/** - * Try and detect the installation method for Yarn and provide a command to update it with. - */ - -function getUpdateCommand(installationMethod) { - if (installationMethod === 'tar') { - return `curl --compressed -o- -L ${(_constants || _load_constants()).YARN_INSTALLER_SH} | bash`; - } - - if (installationMethod === 'homebrew') { - return 'brew upgrade yarn'; - } - - if (installationMethod === 'deb') { - return 'sudo apt-get update && sudo apt-get install yarn'; - } - - if (installationMethod === 'rpm') { - return 'sudo yum install yarn'; - } - - if (installationMethod === 'npm') { - return 'npm install --global yarn'; - } - - if (installationMethod === 'chocolatey') { - return 'choco upgrade yarn'; - } - - if (installationMethod === 'apk') { - return 'apk update && apk add -u yarn'; - } - - if (installationMethod === 'portage') { - return 'sudo emerge --sync && sudo emerge -au sys-apps/yarn'; - } - - return null; -} - -function getUpdateInstaller(installationMethod) { - // Windows - if (installationMethod === 'msi') { - return (_constants || _load_constants()).YARN_INSTALLER_MSI; - } - - return null; -} - -function normalizeFlags(config, rawFlags) { - const flags = { - // install - har: !!rawFlags.har, - ignorePlatform: !!rawFlags.ignorePlatform, - ignoreEngines: !!rawFlags.ignoreEngines, - ignoreScripts: !!rawFlags.ignoreScripts, - ignoreOptional: !!rawFlags.ignoreOptional, - force: !!rawFlags.force, - flat: !!rawFlags.flat, - lockfile: rawFlags.lockfile !== false, - pureLockfile: !!rawFlags.pureLockfile, - updateChecksums: !!rawFlags.updateChecksums, - skipIntegrityCheck: !!rawFlags.skipIntegrityCheck, - frozenLockfile: !!rawFlags.frozenLockfile, - linkDuplicates: !!rawFlags.linkDuplicates, - checkFiles: !!rawFlags.checkFiles, - audit: !!rawFlags.audit, - - // add - peer: !!rawFlags.peer, - dev: !!rawFlags.dev, - optional: !!rawFlags.optional, - exact: !!rawFlags.exact, - tilde: !!rawFlags.tilde, - ignoreWorkspaceRootCheck: !!rawFlags.ignoreWorkspaceRootCheck, - - // outdated, update-interactive - includeWorkspaceDeps: !!rawFlags.includeWorkspaceDeps, - - // add, remove, update - workspaceRootIsCwd: rawFlags.workspaceRootIsCwd !== false - }; - - if (config.getOption('ignore-scripts')) { - flags.ignoreScripts = true; - } - - if (config.getOption('ignore-platform')) { - flags.ignorePlatform = true; - } - - if (config.getOption('ignore-engines')) { - flags.ignoreEngines = true; - } - - if (config.getOption('ignore-optional')) { - flags.ignoreOptional = true; - } - - if (config.getOption('force')) { - flags.force = true; - } - - return flags; -} - -class Install { - constructor(flags, config, reporter, lockfile) { - this.rootManifestRegistries = []; - this.rootPatternsToOrigin = (0, (_map || _load_map()).default)(); - this.lockfile = lockfile; - this.reporter = reporter; - this.config = config; - this.flags = normalizeFlags(config, flags); - this.resolutions = (0, (_map || _load_map()).default)(); // Legacy resolutions field used for flat install mode - this.resolutionMap = new (_resolutionMap || _load_resolutionMap()).default(config); // Selective resolutions for nested dependencies - this.resolver = new (_packageResolver || _load_packageResolver()).default(config, lockfile, this.resolutionMap); - this.integrityChecker = new (_integrityChecker || _load_integrityChecker()).default(config); - this.linker = new (_packageLinker || _load_packageLinker()).default(config, this.resolver); - this.scripts = new (_packageInstallScripts || _load_packageInstallScripts()).default(config, this.resolver, this.flags.force); - } - - /** - * Create a list of dependency requests from the current directories manifests. - */ - - fetchRequestFromCwd(excludePatterns = [], ignoreUnusedPatterns = false) { - var _this = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - const patterns = []; - const deps = []; - let resolutionDeps = []; - const manifest = {}; - - const ignorePatterns = []; - const usedPatterns = []; - let workspaceLayout; - - // some commands should always run in the context of the entire workspace - const cwd = _this.flags.includeWorkspaceDeps || _this.flags.workspaceRootIsCwd ? _this.config.lockfileFolder : _this.config.cwd; - - // non-workspaces are always root, otherwise check for workspace root - const cwdIsRoot = !_this.config.workspaceRootFolder || _this.config.lockfileFolder === cwd; - - // exclude package names that are in install args - const excludeNames = []; - for (var _iterator = excludePatterns, _isArray = Array.isArray(_iterator), _i = 0, _iterator = _isArray ? _iterator : _iterator[Symbol.iterator]();;) { - var _ref; - - if (_isArray) { - if (_i >= _iterator.length) break; - _ref = _iterator[_i++]; - } else { - _i = _iterator.next(); - if (_i.done) break; - _ref = _i.value; - } - - const pattern = _ref; - - if ((0, (_index3 || _load_index3()).getExoticResolver)(pattern)) { - excludeNames.push((0, (_guessName || _load_guessName()).default)(pattern)); - } else { - // extract the name - const parts = (0, (_normalizePattern || _load_normalizePattern()).normalizePattern)(pattern); - excludeNames.push(parts.name); - } - } - - const stripExcluded = function stripExcluded(manifest) { - for (var _iterator2 = excludeNames, _isArray2 = Array.isArray(_iterator2), _i2 = 0, _iterator2 = _isArray2 ? _iterator2 : _iterator2[Symbol.iterator]();;) { - var _ref2; - - if (_isArray2) { - if (_i2 >= _iterator2.length) break; - _ref2 = _iterator2[_i2++]; - } else { - _i2 = _iterator2.next(); - if (_i2.done) break; - _ref2 = _i2.value; - } - - const exclude = _ref2; - - if (manifest.dependencies && manifest.dependencies[exclude]) { - delete manifest.dependencies[exclude]; - } - if (manifest.devDependencies && manifest.devDependencies[exclude]) { - delete manifest.devDependencies[exclude]; - } - if (manifest.optionalDependencies && manifest.optionalDependencies[exclude]) { - delete manifest.optionalDependencies[exclude]; - } - } - }; - - for (var _iterator3 = Object.keys((_index2 || _load_index2()).registries), _isArray3 = Array.isArray(_iterator3), _i3 = 0, _iterator3 = _isArray3 ? _iterator3 : _iterator3[Symbol.iterator]();;) { - var _ref3; - - if (_isArray3) { - if (_i3 >= _iterator3.length) break; - _ref3 = _iterator3[_i3++]; - } else { - _i3 = _iterator3.next(); - if (_i3.done) break; - _ref3 = _i3.value; - } - - const registry = _ref3; - - const filename = (_index2 || _load_index2()).registries[registry].filename; - - const loc = path.join(cwd, filename); - if (!(yield (_fs || _load_fs()).exists(loc))) { - continue; - } - - _this.rootManifestRegistries.push(registry); - - const projectManifestJson = yield _this.config.readJson(loc); - yield (0, (_index || _load_index()).default)(projectManifestJson, cwd, _this.config, cwdIsRoot); - - Object.assign(_this.resolutions, projectManifestJson.resolutions); - Object.assign(manifest, projectManifestJson); - - _this.resolutionMap.init(_this.resolutions); - for (var _iterator4 = Object.keys(_this.resolutionMap.resolutionsByPackage), _isArray4 = Array.isArray(_iterator4), _i4 = 0, _iterator4 = _isArray4 ? _iterator4 : _iterator4[Symbol.iterator]();;) { - var _ref4; - - if (_isArray4) { - if (_i4 >= _iterator4.length) break; - _ref4 = _iterator4[_i4++]; - } else { - _i4 = _iterator4.next(); - if (_i4.done) break; - _ref4 = _i4.value; - } - - const packageName = _ref4; - - const optional = (_objectPath || _load_objectPath()).default.has(manifest.optionalDependencies, packageName) && _this.flags.ignoreOptional; - for (var _iterator8 = _this.resolutionMap.resolutionsByPackage[packageName], _isArray8 = Array.isArray(_iterator8), _i8 = 0, _iterator8 = _isArray8 ? _iterator8 : _iterator8[Symbol.iterator]();;) { - var _ref9; - - if (_isArray8) { - if (_i8 >= _iterator8.length) break; - _ref9 = _iterator8[_i8++]; - } else { - _i8 = _iterator8.next(); - if (_i8.done) break; - _ref9 = _i8.value; - } - - const _ref8 = _ref9; - const pattern = _ref8.pattern; - - resolutionDeps = [...resolutionDeps, { registry, pattern, optional, hint: 'resolution' }]; - } - } - - const pushDeps = function pushDeps(depType, manifest, { hint, optional }, isUsed) { - if (ignoreUnusedPatterns && !isUsed) { - return; - } - // We only take unused dependencies into consideration to get deterministic hoisting. - // Since flat mode doesn't care about hoisting and everything is top level and specified then we can safely - // leave these out. - if (_this.flags.flat && !isUsed) { - return; - } - const depMap = manifest[depType]; - for (const name in depMap) { - if (excludeNames.indexOf(name) >= 0) { - continue; - } - - let pattern = name; - if (!_this.lockfile.getLocked(pattern)) { - // when we use --save we save the dependency to the lockfile with just the name rather than the - // version combo - pattern += '@' + depMap[name]; - } - - // normalization made sure packages are mentioned only once - if (isUsed) { - usedPatterns.push(pattern); - } else { - ignorePatterns.push(pattern); - } - - _this.rootPatternsToOrigin[pattern] = depType; - patterns.push(pattern); - deps.push({ pattern, registry, hint, optional, workspaceName: manifest.name, workspaceLoc: manifest._loc }); - } - }; - - if (cwdIsRoot) { - pushDeps('dependencies', projectManifestJson, { hint: null, optional: false }, true); - pushDeps('devDependencies', projectManifestJson, { hint: 'dev', optional: false }, !_this.config.production); - pushDeps('optionalDependencies', projectManifestJson, { hint: 'optional', optional: true }, true); - } - - if (_this.config.workspaceRootFolder) { - const workspaceLoc = cwdIsRoot ? loc : path.join(_this.config.lockfileFolder, filename); - const workspacesRoot = path.dirname(workspaceLoc); - - let workspaceManifestJson = projectManifestJson; - if (!cwdIsRoot) { - // the manifest we read before was a child workspace, so get the root - workspaceManifestJson = yield _this.config.readJson(workspaceLoc); - yield (0, (_index || _load_index()).default)(workspaceManifestJson, workspacesRoot, _this.config, true); - } - - const workspaces = yield _this.config.resolveWorkspaces(workspacesRoot, workspaceManifestJson); - workspaceLayout = new (_workspaceLayout || _load_workspaceLayout()).default(workspaces, _this.config); - - // add virtual manifest that depends on all workspaces, this way package hoisters and resolvers will work fine - const workspaceDependencies = (0, (_extends2 || _load_extends()).default)({}, workspaceManifestJson.dependencies); - for (var _iterator5 = Object.keys(workspaces), _isArray5 = Array.isArray(_iterator5), _i5 = 0, _iterator5 = _isArray5 ? _iterator5 : _iterator5[Symbol.iterator]();;) { - var _ref5; - - if (_isArray5) { - if (_i5 >= _iterator5.length) break; - _ref5 = _iterator5[_i5++]; - } else { - _i5 = _iterator5.next(); - if (_i5.done) break; - _ref5 = _i5.value; - } - - const workspaceName = _ref5; - - const workspaceManifest = workspaces[workspaceName].manifest; - workspaceDependencies[workspaceName] = workspaceManifest.version; - - // include dependencies from all workspaces - if (_this.flags.includeWorkspaceDeps) { - pushDeps('dependencies', workspaceManifest, { hint: null, optional: false }, true); - pushDeps('devDependencies', workspaceManifest, { hint: 'dev', optional: false }, !_this.config.production); - pushDeps('optionalDependencies', workspaceManifest, { hint: 'optional', optional: true }, true); - } - } - const virtualDependencyManifest = { - _uid: '', - name: `workspace-aggregator-${uuid.v4()}`, - version: '1.0.0', - _registry: 'npm', - _loc: workspacesRoot, - dependencies: workspaceDependencies, - devDependencies: (0, (_extends2 || _load_extends()).default)({}, workspaceManifestJson.devDependencies), - optionalDependencies: (0, (_extends2 || _load_extends()).default)({}, workspaceManifestJson.optionalDependencies), - private: workspaceManifestJson.private, - workspaces: workspaceManifestJson.workspaces - }; - workspaceLayout.virtualManifestName = virtualDependencyManifest.name; - const virtualDep = {}; - virtualDep[virtualDependencyManifest.name] = virtualDependencyManifest.version; - workspaces[virtualDependencyManifest.name] = { loc: workspacesRoot, manifest: virtualDependencyManifest }; - - // ensure dependencies that should be excluded are stripped from the correct manifest - stripExcluded(cwdIsRoot ? virtualDependencyManifest : workspaces[projectManifestJson.name].manifest); - - pushDeps('workspaces', { workspaces: virtualDep }, { hint: 'workspaces', optional: false }, true); - - const implicitWorkspaceDependencies = (0, (_extends2 || _load_extends()).default)({}, workspaceDependencies); - - for (var _iterator6 = (_constants || _load_constants()).OWNED_DEPENDENCY_TYPES, _isArray6 = Array.isArray(_iterator6), _i6 = 0, _iterator6 = _isArray6 ? _iterator6 : _iterator6[Symbol.iterator]();;) { - var _ref6; - - if (_isArray6) { - if (_i6 >= _iterator6.length) break; - _ref6 = _iterator6[_i6++]; - } else { - _i6 = _iterator6.next(); - if (_i6.done) break; - _ref6 = _i6.value; - } - - const type = _ref6; - - for (var _iterator7 = Object.keys(projectManifestJson[type] || {}), _isArray7 = Array.isArray(_iterator7), _i7 = 0, _iterator7 = _isArray7 ? _iterator7 : _iterator7[Symbol.iterator]();;) { - var _ref7; - - if (_isArray7) { - if (_i7 >= _iterator7.length) break; - _ref7 = _iterator7[_i7++]; - } else { - _i7 = _iterator7.next(); - if (_i7.done) break; - _ref7 = _i7.value; - } - - const dependencyName = _ref7; - - delete implicitWorkspaceDependencies[dependencyName]; - } - } - - pushDeps('dependencies', { dependencies: implicitWorkspaceDependencies }, { hint: 'workspaces', optional: false }, true); - } - - break; - } - - // inherit root flat flag - if (manifest.flat) { - _this.flags.flat = true; - } - - return { - requests: [...resolutionDeps, ...deps], - patterns, - manifest, - usedPatterns, - ignorePatterns, - workspaceLayout - }; - })(); - } - - /** - * TODO description - */ - - prepareRequests(requests) { - return requests; - } - - preparePatterns(patterns) { - return patterns; - } - preparePatternsForLinking(patterns, cwdManifest, cwdIsRoot) { - return patterns; - } - - prepareManifests() { - var _this2 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - const manifests = yield _this2.config.getRootManifests(); - return manifests; - })(); - } - - bailout(patterns, workspaceLayout) { - var _this3 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - // We don't want to skip the audit - it could yield important errors - if (_this3.flags.audit) { - return false; - } - // PNP is so fast that the integrity check isn't pertinent - if (_this3.config.plugnplayEnabled) { - return false; - } - if (_this3.flags.skipIntegrityCheck || _this3.flags.force) { - return false; - } - const lockfileCache = _this3.lockfile.cache; - if (!lockfileCache) { - return false; - } - const lockfileClean = _this3.lockfile.parseResultType === 'success'; - const match = yield _this3.integrityChecker.check(patterns, lockfileCache, _this3.flags, workspaceLayout); - if (_this3.flags.frozenLockfile && (!lockfileClean || match.missingPatterns.length > 0)) { - throw new (_errors || _load_errors()).MessageError(_this3.reporter.lang('frozenLockfileError')); - } - - const haveLockfile = yield (_fs || _load_fs()).exists(path.join(_this3.config.lockfileFolder, (_constants || _load_constants()).LOCKFILE_FILENAME)); - - const lockfileIntegrityPresent = !_this3.lockfile.hasEntriesExistWithoutIntegrity(); - const integrityBailout = lockfileIntegrityPresent || !_this3.config.autoAddIntegrity; - - if (match.integrityMatches && haveLockfile && lockfileClean && integrityBailout) { - _this3.reporter.success(_this3.reporter.lang('upToDate')); - return true; - } - - if (match.integrityFileMissing && haveLockfile) { - // Integrity file missing, force script installations - _this3.scripts.setForce(true); - return false; - } - - if (match.hardRefreshRequired) { - // e.g. node version doesn't match, force script installations - _this3.scripts.setForce(true); - return false; - } - - if (!patterns.length && !match.integrityFileMissing) { - _this3.reporter.success(_this3.reporter.lang('nothingToInstall')); - yield _this3.createEmptyManifestFolders(); - yield _this3.saveLockfileAndIntegrity(patterns, workspaceLayout); - return true; - } - - return false; - })(); - } - - /** - * Produce empty folders for all used root manifests. - */ - - createEmptyManifestFolders() { - var _this4 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - if (_this4.config.modulesFolder) { - // already created - return; - } - - for (var _iterator9 = _this4.rootManifestRegistries, _isArray9 = Array.isArray(_iterator9), _i9 = 0, _iterator9 = _isArray9 ? _iterator9 : _iterator9[Symbol.iterator]();;) { - var _ref10; - - if (_isArray9) { - if (_i9 >= _iterator9.length) break; - _ref10 = _iterator9[_i9++]; - } else { - _i9 = _iterator9.next(); - if (_i9.done) break; - _ref10 = _i9.value; - } - - const registryName = _ref10; - const folder = _this4.config.registries[registryName].folder; - - yield (_fs || _load_fs()).mkdirp(path.join(_this4.config.lockfileFolder, folder)); - } - })(); - } - - /** - * TODO description - */ - - markIgnored(patterns) { - for (var _iterator10 = patterns, _isArray10 = Array.isArray(_iterator10), _i10 = 0, _iterator10 = _isArray10 ? _iterator10 : _iterator10[Symbol.iterator]();;) { - var _ref11; - - if (_isArray10) { - if (_i10 >= _iterator10.length) break; - _ref11 = _iterator10[_i10++]; - } else { - _i10 = _iterator10.next(); - if (_i10.done) break; - _ref11 = _i10.value; - } - - const pattern = _ref11; - - const manifest = this.resolver.getStrictResolvedPattern(pattern); - const ref = manifest._reference; - invariant(ref, 'expected package reference'); - - // just mark the package as ignored. if the package is used by a required package, the hoister - // will take care of that. - ref.ignore = true; - } - } - - /** - * helper method that gets only recent manifests - * used by global.ls command - */ - getFlattenedDeps() { - var _this5 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - var _ref12 = yield _this5.fetchRequestFromCwd(); - - const depRequests = _ref12.requests, - rawPatterns = _ref12.patterns; - - - yield _this5.resolver.init(depRequests, {}); - - const manifests = yield (_packageFetcher || _load_packageFetcher()).fetch(_this5.resolver.getManifests(), _this5.config); - _this5.resolver.updateManifests(manifests); - - return _this5.flatten(rawPatterns); - })(); - } - - /** - * TODO description - */ - - init() { - var _this6 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - _this6.checkUpdate(); - - // warn if we have a shrinkwrap - if (yield (_fs || _load_fs()).exists(path.join(_this6.config.lockfileFolder, (_constants || _load_constants()).NPM_SHRINKWRAP_FILENAME))) { - _this6.reporter.warn(_this6.reporter.lang('shrinkwrapWarning')); - } - - // warn if we have an npm lockfile - if (yield (_fs || _load_fs()).exists(path.join(_this6.config.lockfileFolder, (_constants || _load_constants()).NPM_LOCK_FILENAME))) { - _this6.reporter.warn(_this6.reporter.lang('npmLockfileWarning')); - } - - if (_this6.config.plugnplayEnabled) { - _this6.reporter.info(_this6.reporter.lang('plugnplaySuggestV2L1')); - _this6.reporter.info(_this6.reporter.lang('plugnplaySuggestV2L2')); - } - - let flattenedTopLevelPatterns = []; - const steps = []; - - var _ref13 = yield _this6.fetchRequestFromCwd(); - - const depRequests = _ref13.requests, - rawPatterns = _ref13.patterns, - ignorePatterns = _ref13.ignorePatterns, - workspaceLayout = _ref13.workspaceLayout, - manifest = _ref13.manifest; - - let topLevelPatterns = []; - - const artifacts = yield _this6.integrityChecker.getArtifacts(); - if (artifacts) { - _this6.linker.setArtifacts(artifacts); - _this6.scripts.setArtifacts(artifacts); - } - - if ((_packageCompatibility || _load_packageCompatibility()).shouldCheck(manifest, _this6.flags)) { - steps.push((() => { - var _ref14 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (curr, total) { - _this6.reporter.step(curr, total, _this6.reporter.lang('checkingManifest'), emoji.get('mag')); - yield _this6.checkCompatibility(); - }); - - return function (_x, _x2) { - return _ref14.apply(this, arguments); - }; - })()); - } - - const audit = new (_audit || _load_audit()).default(_this6.config, _this6.reporter, { groups: (_constants || _load_constants()).OWNED_DEPENDENCY_TYPES }); - let auditFoundProblems = false; - - steps.push(function (curr, total) { - return (0, (_hooks || _load_hooks()).callThroughHook)('resolveStep', (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - _this6.reporter.step(curr, total, _this6.reporter.lang('resolvingPackages'), emoji.get('mag')); - yield _this6.resolver.init(_this6.prepareRequests(depRequests), { - isFlat: _this6.flags.flat, - isFrozen: _this6.flags.frozenLockfile, - workspaceLayout - }); - topLevelPatterns = _this6.preparePatterns(rawPatterns); - flattenedTopLevelPatterns = yield _this6.flatten(topLevelPatterns); - return { bailout: !_this6.flags.audit && (yield _this6.bailout(topLevelPatterns, workspaceLayout)) }; - })); - }); - - if (_this6.flags.audit) { - steps.push(function (curr, total) { - return (0, (_hooks || _load_hooks()).callThroughHook)('auditStep', (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - _this6.reporter.step(curr, total, _this6.reporter.lang('auditRunning'), emoji.get('mag')); - if (_this6.flags.offline) { - _this6.reporter.warn(_this6.reporter.lang('auditOffline')); - return { bailout: false }; - } - const preparedManifests = yield _this6.prepareManifests(); - // $FlowFixMe - Flow considers `m` in the map operation to be "mixed", so does not recognize `m.object` - const mergedManifest = Object.assign({}, ...Object.values(preparedManifests).map(function (m) { - return m.object; - })); - const auditVulnerabilityCounts = yield audit.performAudit(mergedManifest, _this6.lockfile, _this6.resolver, _this6.linker, topLevelPatterns); - auditFoundProblems = auditVulnerabilityCounts.info || auditVulnerabilityCounts.low || auditVulnerabilityCounts.moderate || auditVulnerabilityCounts.high || auditVulnerabilityCounts.critical; - return { bailout: yield _this6.bailout(topLevelPatterns, workspaceLayout) }; - })); - }); - } - - steps.push(function (curr, total) { - return (0, (_hooks || _load_hooks()).callThroughHook)('fetchStep', (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - _this6.markIgnored(ignorePatterns); - _this6.reporter.step(curr, total, _this6.reporter.lang('fetchingPackages'), emoji.get('truck')); - const manifests = yield (_packageFetcher || _load_packageFetcher()).fetch(_this6.resolver.getManifests(), _this6.config); - _this6.resolver.updateManifests(manifests); - yield (_packageCompatibility || _load_packageCompatibility()).check(_this6.resolver.getManifests(), _this6.config, _this6.flags.ignoreEngines); - })); - }); - - steps.push(function (curr, total) { - return (0, (_hooks || _load_hooks()).callThroughHook)('linkStep', (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - // remove integrity hash to make this operation atomic - yield _this6.integrityChecker.removeIntegrityFile(); - _this6.reporter.step(curr, total, _this6.reporter.lang('linkingDependencies'), emoji.get('link')); - flattenedTopLevelPatterns = _this6.preparePatternsForLinking(flattenedTopLevelPatterns, manifest, _this6.config.lockfileFolder === _this6.config.cwd); - yield _this6.linker.init(flattenedTopLevelPatterns, workspaceLayout, { - linkDuplicates: _this6.flags.linkDuplicates, - ignoreOptional: _this6.flags.ignoreOptional - }); - })); - }); - - if (_this6.config.plugnplayEnabled) { - steps.push(function (curr, total) { - return (0, (_hooks || _load_hooks()).callThroughHook)('pnpStep', (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - const pnpPath = `${_this6.config.lockfileFolder}/${(_constants || _load_constants()).PNP_FILENAME}`; - - const code = yield (0, (_generatePnpMap || _load_generatePnpMap()).generatePnpMap)(_this6.config, flattenedTopLevelPatterns, { - resolver: _this6.resolver, - reporter: _this6.reporter, - targetPath: pnpPath, - workspaceLayout - }); - - try { - const file = yield (_fs || _load_fs()).readFile(pnpPath); - if (file === code) { - return; - } - } catch (error) {} - - yield (_fs || _load_fs()).writeFile(pnpPath, code); - yield (_fs || _load_fs()).chmod(pnpPath, 0o755); - })); - }); - } - - steps.push(function (curr, total) { - return (0, (_hooks || _load_hooks()).callThroughHook)('buildStep', (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - _this6.reporter.step(curr, total, _this6.flags.force ? _this6.reporter.lang('rebuildingPackages') : _this6.reporter.lang('buildingFreshPackages'), emoji.get('hammer')); - - if (_this6.config.ignoreScripts) { - _this6.reporter.warn(_this6.reporter.lang('ignoredScripts')); - } else { - yield _this6.scripts.init(flattenedTopLevelPatterns); - } - })); - }); - - if (_this6.flags.har) { - steps.push((() => { - var _ref21 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (curr, total) { - const formattedDate = new Date().toISOString().replace(/:/g, '-'); - const filename = `yarn-install_${formattedDate}.har`; - _this6.reporter.step(curr, total, _this6.reporter.lang('savingHar', filename), emoji.get('black_circle_for_record')); - yield _this6.config.requestManager.saveHar(filename); - }); - - return function (_x3, _x4) { - return _ref21.apply(this, arguments); - }; - })()); - } - - if (yield _this6.shouldClean()) { - steps.push((() => { - var _ref22 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (curr, total) { - _this6.reporter.step(curr, total, _this6.reporter.lang('cleaningModules'), emoji.get('recycle')); - yield (0, (_autoclean || _load_autoclean()).clean)(_this6.config, _this6.reporter); - }); - - return function (_x5, _x6) { - return _ref22.apply(this, arguments); - }; - })()); - } - - let currentStep = 0; - for (var _iterator11 = steps, _isArray11 = Array.isArray(_iterator11), _i11 = 0, _iterator11 = _isArray11 ? _iterator11 : _iterator11[Symbol.iterator]();;) { - var _ref23; - - if (_isArray11) { - if (_i11 >= _iterator11.length) break; - _ref23 = _iterator11[_i11++]; - } else { - _i11 = _iterator11.next(); - if (_i11.done) break; - _ref23 = _i11.value; - } - - const step = _ref23; - - const stepResult = yield step(++currentStep, steps.length); - if (stepResult && stepResult.bailout) { - if (_this6.flags.audit) { - audit.summary(); - } - if (auditFoundProblems) { - _this6.reporter.warn(_this6.reporter.lang('auditRunAuditForDetails')); - } - _this6.maybeOutputUpdate(); - return flattenedTopLevelPatterns; - } - } - - // fin! - if (_this6.flags.audit) { - audit.summary(); - } - if (auditFoundProblems) { - _this6.reporter.warn(_this6.reporter.lang('auditRunAuditForDetails')); - } - yield _this6.saveLockfileAndIntegrity(topLevelPatterns, workspaceLayout); - yield _this6.persistChanges(); - _this6.maybeOutputUpdate(); - _this6.config.requestManager.clearCache(); - return flattenedTopLevelPatterns; - })(); - } - - checkCompatibility() { - var _this7 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - var _ref24 = yield _this7.fetchRequestFromCwd(); - - const manifest = _ref24.manifest; - - yield (_packageCompatibility || _load_packageCompatibility()).checkOne(manifest, _this7.config, _this7.flags.ignoreEngines); - })(); - } - - persistChanges() { - var _this8 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - // get all the different registry manifests in this folder - const manifests = yield _this8.config.getRootManifests(); - - if (yield _this8.applyChanges(manifests)) { - yield _this8.config.saveRootManifests(manifests); - } - })(); - } - - applyChanges(manifests) { - let hasChanged = false; - - if (this.config.plugnplayPersist) { - const object = manifests.npm.object; - - - if (typeof object.installConfig !== 'object') { - object.installConfig = {}; - } - - if (this.config.plugnplayEnabled && object.installConfig.pnp !== true) { - object.installConfig.pnp = true; - hasChanged = true; - } else if (!this.config.plugnplayEnabled && typeof object.installConfig.pnp !== 'undefined') { - delete object.installConfig.pnp; - hasChanged = true; - } - - if (Object.keys(object.installConfig).length === 0) { - delete object.installConfig; - } - } - - return Promise.resolve(hasChanged); - } - - /** - * Check if we should run the cleaning step. - */ - - shouldClean() { - return (_fs || _load_fs()).exists(path.join(this.config.lockfileFolder, (_constants || _load_constants()).CLEAN_FILENAME)); - } - - /** - * TODO - */ - - flatten(patterns) { - var _this9 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - if (!_this9.flags.flat) { - return patterns; - } - - const flattenedPatterns = []; - - for (var _iterator12 = _this9.resolver.getAllDependencyNamesByLevelOrder(patterns), _isArray12 = Array.isArray(_iterator12), _i12 = 0, _iterator12 = _isArray12 ? _iterator12 : _iterator12[Symbol.iterator]();;) { - var _ref25; - - if (_isArray12) { - if (_i12 >= _iterator12.length) break; - _ref25 = _iterator12[_i12++]; - } else { - _i12 = _iterator12.next(); - if (_i12.done) break; - _ref25 = _i12.value; - } - - const name = _ref25; - - const infos = _this9.resolver.getAllInfoForPackageName(name).filter(function (manifest) { - const ref = manifest._reference; - invariant(ref, 'expected package reference'); - return !ref.ignore; - }); - - if (infos.length === 0) { - continue; - } - - if (infos.length === 1) { - // single version of this package - // take out a single pattern as multiple patterns may have resolved to this package - flattenedPatterns.push(_this9.resolver.patternsByPackage[name][0]); - continue; - } - - const options = infos.map(function (info) { - const ref = info._reference; - invariant(ref, 'expected reference'); - return { - // TODO `and is required by {PARENT}`, - name: _this9.reporter.lang('manualVersionResolutionOption', ref.patterns.join(', '), info.version), - - value: info.version - }; - }); - const versions = infos.map(function (info) { - return info.version; - }); - let version; - - const resolutionVersion = _this9.resolutions[name]; - if (resolutionVersion && versions.indexOf(resolutionVersion) >= 0) { - // use json `resolution` version - version = resolutionVersion; - } else { - version = yield _this9.reporter.select(_this9.reporter.lang('manualVersionResolution', name), _this9.reporter.lang('answer'), options); - _this9.resolutions[name] = version; - } - - flattenedPatterns.push(_this9.resolver.collapseAllVersionsOfPackage(name, version)); - } - - // save resolutions to their appropriate root manifest - if (Object.keys(_this9.resolutions).length) { - const manifests = yield _this9.config.getRootManifests(); - - for (const name in _this9.resolutions) { - const version = _this9.resolutions[name]; - - const patterns = _this9.resolver.patternsByPackage[name]; - if (!patterns) { - continue; - } - - let manifest; - for (var _iterator13 = patterns, _isArray13 = Array.isArray(_iterator13), _i13 = 0, _iterator13 = _isArray13 ? _iterator13 : _iterator13[Symbol.iterator]();;) { - var _ref26; - - if (_isArray13) { - if (_i13 >= _iterator13.length) break; - _ref26 = _iterator13[_i13++]; - } else { - _i13 = _iterator13.next(); - if (_i13.done) break; - _ref26 = _i13.value; - } - - const pattern = _ref26; - - manifest = _this9.resolver.getResolvedPattern(pattern); - if (manifest) { - break; - } - } - invariant(manifest, 'expected manifest'); - - const ref = manifest._reference; - invariant(ref, 'expected reference'); - - const object = manifests[ref.registry].object; - object.resolutions = object.resolutions || {}; - object.resolutions[name] = version; - } - - yield _this9.config.saveRootManifests(manifests); - } - - return flattenedPatterns; - })(); - } - - /** - * Remove offline tarballs that are no longer required - */ - - pruneOfflineMirror(lockfile) { - var _this10 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - const mirror = _this10.config.getOfflineMirrorPath(); - if (!mirror) { - return; - } - - const requiredTarballs = new Set(); - for (const dependency in lockfile) { - const resolved = lockfile[dependency].resolved; - if (resolved) { - const basename = path.basename(resolved.split('#')[0]); - if (dependency[0] === '@' && basename[0] !== '@') { - requiredTarballs.add(`${dependency.split('/')[0]}-${basename}`); - } - requiredTarballs.add(basename); - } - } - - const mirrorFiles = yield (_fs || _load_fs()).walk(mirror); - for (var _iterator14 = mirrorFiles, _isArray14 = Array.isArray(_iterator14), _i14 = 0, _iterator14 = _isArray14 ? _iterator14 : _iterator14[Symbol.iterator]();;) { - var _ref27; - - if (_isArray14) { - if (_i14 >= _iterator14.length) break; - _ref27 = _iterator14[_i14++]; - } else { - _i14 = _iterator14.next(); - if (_i14.done) break; - _ref27 = _i14.value; - } - - const file = _ref27; - - const isTarball = path.extname(file.basename) === '.tgz'; - // if using experimental-pack-script-packages-in-mirror flag, don't unlink prebuilt packages - const hasPrebuiltPackage = file.relative.startsWith('prebuilt/'); - if (isTarball && !hasPrebuiltPackage && !requiredTarballs.has(file.basename)) { - yield (_fs || _load_fs()).unlink(file.absolute); - } - } - })(); - } - - /** - * Save updated integrity and lockfiles. - */ - - saveLockfileAndIntegrity(patterns, workspaceLayout) { - var _this11 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - const resolvedPatterns = {}; - Object.keys(_this11.resolver.patterns).forEach(function (pattern) { - if (!workspaceLayout || !workspaceLayout.getManifestByPattern(pattern)) { - resolvedPatterns[pattern] = _this11.resolver.patterns[pattern]; - } - }); - - // TODO this code is duplicated in a few places, need a common way to filter out workspace patterns from lockfile - patterns = patterns.filter(function (p) { - return !workspaceLayout || !workspaceLayout.getManifestByPattern(p); - }); - - const lockfileBasedOnResolver = _this11.lockfile.getLockfile(resolvedPatterns); - - if (_this11.config.pruneOfflineMirror) { - yield _this11.pruneOfflineMirror(lockfileBasedOnResolver); - } - - // write integrity hash - if (!_this11.config.plugnplayEnabled) { - yield _this11.integrityChecker.save(patterns, lockfileBasedOnResolver, _this11.flags, workspaceLayout, _this11.scripts.getArtifacts()); - } - - // --no-lockfile or --pure-lockfile or --frozen-lockfile - if (_this11.flags.lockfile === false || _this11.flags.pureLockfile || _this11.flags.frozenLockfile) { - return; - } - - const lockFileHasAllPatterns = patterns.every(function (p) { - return _this11.lockfile.getLocked(p); - }); - const lockfilePatternsMatch = Object.keys(_this11.lockfile.cache || {}).every(function (p) { - return lockfileBasedOnResolver[p]; - }); - const resolverPatternsAreSameAsInLockfile = Object.keys(lockfileBasedOnResolver).every(function (pattern) { - const manifest = _this11.lockfile.getLocked(pattern); - return manifest && manifest.resolved === lockfileBasedOnResolver[pattern].resolved && deepEqual(manifest.prebuiltVariants, lockfileBasedOnResolver[pattern].prebuiltVariants); - }); - const integrityPatternsAreSameAsInLockfile = Object.keys(lockfileBasedOnResolver).every(function (pattern) { - const existingIntegrityInfo = lockfileBasedOnResolver[pattern].integrity; - if (!existingIntegrityInfo) { - // if this entry does not have an integrity, no need to re-write the lockfile because of it - return true; - } - const manifest = _this11.lockfile.getLocked(pattern); - if (manifest && manifest.integrity) { - const manifestIntegrity = ssri.stringify(manifest.integrity); - return manifestIntegrity === existingIntegrityInfo; - } - return false; - }); - - // remove command is followed by install with force, lockfile will be rewritten in any case then - if (!_this11.flags.force && _this11.lockfile.parseResultType === 'success' && lockFileHasAllPatterns && lockfilePatternsMatch && resolverPatternsAreSameAsInLockfile && integrityPatternsAreSameAsInLockfile && patterns.length) { - return; - } - - // build lockfile location - const loc = path.join(_this11.config.lockfileFolder, (_constants || _load_constants()).LOCKFILE_FILENAME); - - // write lockfile - const lockSource = (0, (_lockfile2 || _load_lockfile2()).stringify)(lockfileBasedOnResolver, false, _this11.config.enableLockfileVersions); - yield (_fs || _load_fs()).writeFilePreservingEol(loc, lockSource); - - _this11._logSuccessSaveLockfile(); - })(); - } - - _logSuccessSaveLockfile() { - this.reporter.success(this.reporter.lang('savedLockfile')); - } - - /** - * Load the dependency graph of the current install. Only does package resolving and wont write to the cwd. - */ - hydrate(ignoreUnusedPatterns) { - var _this12 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - const request = yield _this12.fetchRequestFromCwd([], ignoreUnusedPatterns); - const depRequests = request.requests, - rawPatterns = request.patterns, - ignorePatterns = request.ignorePatterns, - workspaceLayout = request.workspaceLayout; - - - yield _this12.resolver.init(depRequests, { - isFlat: _this12.flags.flat, - isFrozen: _this12.flags.frozenLockfile, - workspaceLayout - }); - yield _this12.flatten(rawPatterns); - _this12.markIgnored(ignorePatterns); - - // fetch packages, should hit cache most of the time - const manifests = yield (_packageFetcher || _load_packageFetcher()).fetch(_this12.resolver.getManifests(), _this12.config); - _this12.resolver.updateManifests(manifests); - yield (_packageCompatibility || _load_packageCompatibility()).check(_this12.resolver.getManifests(), _this12.config, _this12.flags.ignoreEngines); - - // expand minimal manifests - for (var _iterator15 = _this12.resolver.getManifests(), _isArray15 = Array.isArray(_iterator15), _i15 = 0, _iterator15 = _isArray15 ? _iterator15 : _iterator15[Symbol.iterator]();;) { - var _ref28; - - if (_isArray15) { - if (_i15 >= _iterator15.length) break; - _ref28 = _iterator15[_i15++]; - } else { - _i15 = _iterator15.next(); - if (_i15.done) break; - _ref28 = _i15.value; - } - - const manifest = _ref28; - - const ref = manifest._reference; - invariant(ref, 'expected reference'); - const type = ref.remote.type; - // link specifier won't ever hit cache - - let loc = ''; - if (type === 'link') { - continue; - } else if (type === 'workspace') { - if (!ref.remote.reference) { - continue; - } - loc = ref.remote.reference; - } else { - loc = _this12.config.generateModuleCachePath(ref); - } - const newPkg = yield _this12.config.readManifest(loc); - yield _this12.resolver.updateManifest(ref, newPkg); - } - - return request; - })(); - } - - /** - * Check for updates every day and output a nag message if there's a newer version. - */ - - checkUpdate() { - if (this.config.nonInteractive) { - // don't show upgrade dialog on CI or non-TTY terminals - return; - } - - // don't check if disabled - if (this.config.getOption('disable-self-update-check')) { - return; - } - - // only check for updates once a day - const lastUpdateCheck = Number(this.config.getOption('lastUpdateCheck')) || 0; - if (lastUpdateCheck && Date.now() - lastUpdateCheck < ONE_DAY) { - return; - } - - // don't bug for updates on tagged releases - if ((_yarnVersion || _load_yarnVersion()).version.indexOf('-') >= 0) { - return; - } - - this._checkUpdate().catch(() => { - // swallow errors - }); - } - - _checkUpdate() { - var _this13 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - let latestVersion = yield _this13.config.requestManager.request({ - url: (_constants || _load_constants()).SELF_UPDATE_VERSION_URL - }); - invariant(typeof latestVersion === 'string', 'expected string'); - latestVersion = latestVersion.trim(); - if (!semver.valid(latestVersion)) { - return; - } - - // ensure we only check for updates periodically - _this13.config.registries.yarn.saveHomeConfig({ - lastUpdateCheck: Date.now() - }); - - if (semver.gt(latestVersion, (_yarnVersion || _load_yarnVersion()).version)) { - const installationMethod = yield (0, (_yarnVersion || _load_yarnVersion()).getInstallationMethod)(); - _this13.maybeOutputUpdate = function () { - _this13.reporter.warn(_this13.reporter.lang('yarnOutdated', latestVersion, (_yarnVersion || _load_yarnVersion()).version)); - - const command = getUpdateCommand(installationMethod); - if (command) { - _this13.reporter.info(_this13.reporter.lang('yarnOutdatedCommand')); - _this13.reporter.command(command); - } else { - const installer = getUpdateInstaller(installationMethod); - if (installer) { - _this13.reporter.info(_this13.reporter.lang('yarnOutdatedInstaller', installer)); - } - } - }; - } - })(); - } - - /** - * Method to override with a possible upgrade message. - */ - - maybeOutputUpdate() {} -} - -exports.Install = Install; -function hasWrapper(commander, args) { - return true; -} - -function setFlags(commander) { - commander.description('Yarn install is used to install all dependencies for a project.'); - commander.usage('install [flags]'); - commander.option('-A, --audit', 'Run vulnerability audit on installed packages'); - commander.option('-g, --global', 'DEPRECATED'); - commander.option('-S, --save', 'DEPRECATED - save package to your `dependencies`'); - commander.option('-D, --save-dev', 'DEPRECATED - save package to your `devDependencies`'); - commander.option('-P, --save-peer', 'DEPRECATED - save package to your `peerDependencies`'); - commander.option('-O, --save-optional', 'DEPRECATED - save package to your `optionalDependencies`'); - commander.option('-E, --save-exact', 'DEPRECATED'); - commander.option('-T, --save-tilde', 'DEPRECATED'); -} - -/***/ }), -/* 35 */ -/***/ (function(module, exports, __webpack_require__) { - -var isObject = __webpack_require__(52); -module.exports = function (it) { - if (!isObject(it)) throw TypeError(it + ' is not an object!'); - return it; -}; - - -/***/ }), -/* 36 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return SubjectSubscriber; }); -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return Subject; }); -/* unused harmony export AnonymousSubject */ -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0_tslib__ = __webpack_require__(1); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__Observable__ = __webpack_require__(12); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_2__Subscriber__ = __webpack_require__(7); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_3__Subscription__ = __webpack_require__(25); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_4__util_ObjectUnsubscribedError__ = __webpack_require__(189); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_5__SubjectSubscription__ = __webpack_require__(422); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_6__internal_symbol_rxSubscriber__ = __webpack_require__(321); -/** PURE_IMPORTS_START tslib,_Observable,_Subscriber,_Subscription,_util_ObjectUnsubscribedError,_SubjectSubscription,_internal_symbol_rxSubscriber PURE_IMPORTS_END */ - - - - - - - -var SubjectSubscriber = /*@__PURE__*/ (function (_super) { - __WEBPACK_IMPORTED_MODULE_0_tslib__["a" /* __extends */](SubjectSubscriber, _super); - function SubjectSubscriber(destination) { - var _this = _super.call(this, destination) || this; - _this.destination = destination; - return _this; - } - return SubjectSubscriber; -}(__WEBPACK_IMPORTED_MODULE_2__Subscriber__["a" /* Subscriber */])); - -var Subject = /*@__PURE__*/ (function (_super) { - __WEBPACK_IMPORTED_MODULE_0_tslib__["a" /* __extends */](Subject, _super); - function Subject() { - var _this = _super.call(this) || this; - _this.observers = []; - _this.closed = false; - _this.isStopped = false; - _this.hasError = false; - _this.thrownError = null; - return _this; - } - Subject.prototype[__WEBPACK_IMPORTED_MODULE_6__internal_symbol_rxSubscriber__["a" /* rxSubscriber */]] = function () { - return new SubjectSubscriber(this); - }; - Subject.prototype.lift = function (operator) { - var subject = new AnonymousSubject(this, this); - subject.operator = operator; - return subject; - }; - Subject.prototype.next = function (value) { - if (this.closed) { - throw new __WEBPACK_IMPORTED_MODULE_4__util_ObjectUnsubscribedError__["a" /* ObjectUnsubscribedError */](); - } - if (!this.isStopped) { - var observers = this.observers; - var len = observers.length; - var copy = observers.slice(); - for (var i = 0; i < len; i++) { - copy[i].next(value); - } - } - }; - Subject.prototype.error = function (err) { - if (this.closed) { - throw new __WEBPACK_IMPORTED_MODULE_4__util_ObjectUnsubscribedError__["a" /* ObjectUnsubscribedError */](); - } - this.hasError = true; - this.thrownError = err; - this.isStopped = true; - var observers = this.observers; - var len = observers.length; - var copy = observers.slice(); - for (var i = 0; i < len; i++) { - copy[i].error(err); - } - this.observers.length = 0; - }; - Subject.prototype.complete = function () { - if (this.closed) { - throw new __WEBPACK_IMPORTED_MODULE_4__util_ObjectUnsubscribedError__["a" /* ObjectUnsubscribedError */](); - } - this.isStopped = true; - var observers = this.observers; - var len = observers.length; - var copy = observers.slice(); - for (var i = 0; i < len; i++) { - copy[i].complete(); - } - this.observers.length = 0; - }; - Subject.prototype.unsubscribe = function () { - this.isStopped = true; - this.closed = true; - this.observers = null; - }; - Subject.prototype._trySubscribe = function (subscriber) { - if (this.closed) { - throw new __WEBPACK_IMPORTED_MODULE_4__util_ObjectUnsubscribedError__["a" /* ObjectUnsubscribedError */](); - } - else { - return _super.prototype._trySubscribe.call(this, subscriber); - } - }; - Subject.prototype._subscribe = function (subscriber) { - if (this.closed) { - throw new __WEBPACK_IMPORTED_MODULE_4__util_ObjectUnsubscribedError__["a" /* ObjectUnsubscribedError */](); - } - else if (this.hasError) { - subscriber.error(this.thrownError); - return __WEBPACK_IMPORTED_MODULE_3__Subscription__["a" /* Subscription */].EMPTY; - } - else if (this.isStopped) { - subscriber.complete(); - return __WEBPACK_IMPORTED_MODULE_3__Subscription__["a" /* Subscription */].EMPTY; - } - else { - this.observers.push(subscriber); - return new __WEBPACK_IMPORTED_MODULE_5__SubjectSubscription__["a" /* SubjectSubscription */](this, subscriber); - } - }; - Subject.prototype.asObservable = function () { - var observable = new __WEBPACK_IMPORTED_MODULE_1__Observable__["a" /* Observable */](); - observable.source = this; - return observable; - }; - Subject.create = function (destination, source) { - return new AnonymousSubject(destination, source); - }; - return Subject; -}(__WEBPACK_IMPORTED_MODULE_1__Observable__["a" /* Observable */])); - -var AnonymousSubject = /*@__PURE__*/ (function (_super) { - __WEBPACK_IMPORTED_MODULE_0_tslib__["a" /* __extends */](AnonymousSubject, _super); - function AnonymousSubject(destination, source) { - var _this = _super.call(this) || this; - _this.destination = destination; - _this.source = source; - return _this; - } - AnonymousSubject.prototype.next = function (value) { - var destination = this.destination; - if (destination && destination.next) { - destination.next(value); - } - }; - AnonymousSubject.prototype.error = function (err) { - var destination = this.destination; - if (destination && destination.error) { - this.destination.error(err); - } - }; - AnonymousSubject.prototype.complete = function () { - var destination = this.destination; - if (destination && destination.complete) { - this.destination.complete(); - } - }; - AnonymousSubject.prototype._subscribe = function (subscriber) { - var source = this.source; - if (source) { - return this.source.subscribe(subscriber); - } - else { - return __WEBPACK_IMPORTED_MODULE_3__Subscription__["a" /* Subscription */].EMPTY; - } - }; - return AnonymousSubject; -}(Subject)); - -//# sourceMappingURL=Subject.js.map - - -/***/ }), -/* 37 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", { - value: true -}); -exports.normalizePattern = normalizePattern; - -/** - * Explode and normalize a pattern into its name and range. - */ - -function normalizePattern(pattern) { - let hasVersion = false; - let range = 'latest'; - let name = pattern; - - // if we're a scope then remove the @ and add it back later - let isScoped = false; - if (name[0] === '@') { - isScoped = true; - name = name.slice(1); - } - - // take first part as the name - const parts = name.split('@'); - if (parts.length > 1) { - name = parts.shift(); - range = parts.join('@'); - - if (range) { - hasVersion = true; - } else { - range = '*'; - } - } - - // add back @ scope suffix - if (isScoped) { - name = `@${name}`; - } - - return { name, range, hasVersion }; -} - -/***/ }), -/* 38 */ -/***/ (function(module, exports, __webpack_require__) { - -/* WEBPACK VAR INJECTION */(function(module) {var __WEBPACK_AMD_DEFINE_RESULT__;/** - * @license - * Lodash - * Copyright JS Foundation and other contributors - * Released under MIT license - * Based on Underscore.js 1.8.3 - * Copyright Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors - */ -;(function() { - - /** Used as a safe reference for `undefined` in pre-ES5 environments. */ - var undefined; - - /** Used as the semantic version number. */ - var VERSION = '4.17.10'; - - /** Used as the size to enable large array optimizations. */ - var LARGE_ARRAY_SIZE = 200; - - /** Error message constants. */ - var CORE_ERROR_TEXT = 'Unsupported core-js use. Try https://npms.io/search?q=ponyfill.', - FUNC_ERROR_TEXT = 'Expected a function'; - - /** Used to stand-in for `undefined` hash values. */ - var HASH_UNDEFINED = '__lodash_hash_undefined__'; - - /** Used as the maximum memoize cache size. */ - var MAX_MEMOIZE_SIZE = 500; - - /** Used as the internal argument placeholder. */ - var PLACEHOLDER = '__lodash_placeholder__'; - - /** Used to compose bitmasks for cloning. */ - var CLONE_DEEP_FLAG = 1, - CLONE_FLAT_FLAG = 2, - CLONE_SYMBOLS_FLAG = 4; - - /** Used to compose bitmasks for value comparisons. */ - var COMPARE_PARTIAL_FLAG = 1, - COMPARE_UNORDERED_FLAG = 2; - - /** Used to compose bitmasks for function metadata. */ - var WRAP_BIND_FLAG = 1, - WRAP_BIND_KEY_FLAG = 2, - WRAP_CURRY_BOUND_FLAG = 4, - WRAP_CURRY_FLAG = 8, - WRAP_CURRY_RIGHT_FLAG = 16, - WRAP_PARTIAL_FLAG = 32, - WRAP_PARTIAL_RIGHT_FLAG = 64, - WRAP_ARY_FLAG = 128, - WRAP_REARG_FLAG = 256, - WRAP_FLIP_FLAG = 512; - - /** Used as default options for `_.truncate`. */ - var DEFAULT_TRUNC_LENGTH = 30, - DEFAULT_TRUNC_OMISSION = '...'; - - /** Used to detect hot functions by number of calls within a span of milliseconds. */ - var HOT_COUNT = 800, - HOT_SPAN = 16; - - /** Used to indicate the type of lazy iteratees. */ - var LAZY_FILTER_FLAG = 1, - LAZY_MAP_FLAG = 2, - LAZY_WHILE_FLAG = 3; - - /** Used as references for various `Number` constants. */ - var INFINITY = 1 / 0, - MAX_SAFE_INTEGER = 9007199254740991, - MAX_INTEGER = 1.7976931348623157e+308, - NAN = 0 / 0; - - /** Used as references for the maximum length and index of an array. */ - var MAX_ARRAY_LENGTH = 4294967295, - MAX_ARRAY_INDEX = MAX_ARRAY_LENGTH - 1, - HALF_MAX_ARRAY_LENGTH = MAX_ARRAY_LENGTH >>> 1; - - /** Used to associate wrap methods with their bit flags. */ - var wrapFlags = [ - ['ary', WRAP_ARY_FLAG], - ['bind', WRAP_BIND_FLAG], - ['bindKey', WRAP_BIND_KEY_FLAG], - ['curry', WRAP_CURRY_FLAG], - ['curryRight', WRAP_CURRY_RIGHT_FLAG], - ['flip', WRAP_FLIP_FLAG], - ['partial', WRAP_PARTIAL_FLAG], - ['partialRight', WRAP_PARTIAL_RIGHT_FLAG], - ['rearg', WRAP_REARG_FLAG] - ]; - - /** `Object#toString` result references. */ - var argsTag = '[object Arguments]', - arrayTag = '[object Array]', - asyncTag = '[object AsyncFunction]', - boolTag = '[object Boolean]', - dateTag = '[object Date]', - domExcTag = '[object DOMException]', - errorTag = '[object Error]', - funcTag = '[object Function]', - genTag = '[object GeneratorFunction]', - mapTag = '[object Map]', - numberTag = '[object Number]', - nullTag = '[object Null]', - objectTag = '[object Object]', - promiseTag = '[object Promise]', - proxyTag = '[object Proxy]', - regexpTag = '[object RegExp]', - setTag = '[object Set]', - stringTag = '[object String]', - symbolTag = '[object Symbol]', - undefinedTag = '[object Undefined]', - weakMapTag = '[object WeakMap]', - weakSetTag = '[object WeakSet]'; - - var arrayBufferTag = '[object ArrayBuffer]', - dataViewTag = '[object DataView]', - float32Tag = '[object Float32Array]', - float64Tag = '[object Float64Array]', - int8Tag = '[object Int8Array]', - int16Tag = '[object Int16Array]', - int32Tag = '[object Int32Array]', - uint8Tag = '[object Uint8Array]', - uint8ClampedTag = '[object Uint8ClampedArray]', - uint16Tag = '[object Uint16Array]', - uint32Tag = '[object Uint32Array]'; - - /** Used to match empty string literals in compiled template source. */ - var reEmptyStringLeading = /\b__p \+= '';/g, - reEmptyStringMiddle = /\b(__p \+=) '' \+/g, - reEmptyStringTrailing = /(__e\(.*?\)|\b__t\)) \+\n'';/g; - - /** Used to match HTML entities and HTML characters. */ - var reEscapedHtml = /&(?:amp|lt|gt|quot|#39);/g, - reUnescapedHtml = /[&<>"']/g, - reHasEscapedHtml = RegExp(reEscapedHtml.source), - reHasUnescapedHtml = RegExp(reUnescapedHtml.source); - - /** Used to match template delimiters. */ - var reEscape = /<%-([\s\S]+?)%>/g, - reEvaluate = /<%([\s\S]+?)%>/g, - reInterpolate = /<%=([\s\S]+?)%>/g; - - /** Used to match property names within property paths. */ - var reIsDeepProp = /\.|\[(?:[^[\]]*|(["'])(?:(?!\1)[^\\]|\\.)*?\1)\]/, - reIsPlainProp = /^\w*$/, - rePropName = /[^.[\]]+|\[(?:(-?\d+(?:\.\d+)?)|(["'])((?:(?!\2)[^\\]|\\.)*?)\2)\]|(?=(?:\.|\[\])(?:\.|\[\]|$))/g; - - /** - * Used to match `RegExp` - * [syntax characters](http://ecma-international.org/ecma-262/7.0/#sec-patterns). - */ - var reRegExpChar = /[\\^$.*+?()[\]{}|]/g, - reHasRegExpChar = RegExp(reRegExpChar.source); - - /** Used to match leading and trailing whitespace. */ - var reTrim = /^\s+|\s+$/g, - reTrimStart = /^\s+/, - reTrimEnd = /\s+$/; - - /** Used to match wrap detail comments. */ - var reWrapComment = /\{(?:\n\/\* \[wrapped with .+\] \*\/)?\n?/, - reWrapDetails = /\{\n\/\* \[wrapped with (.+)\] \*/, - reSplitDetails = /,? & /; - - /** Used to match words composed of alphanumeric characters. */ - var reAsciiWord = /[^\x00-\x2f\x3a-\x40\x5b-\x60\x7b-\x7f]+/g; - - /** Used to match backslashes in property paths. */ - var reEscapeChar = /\\(\\)?/g; - - /** - * Used to match - * [ES template delimiters](http://ecma-international.org/ecma-262/7.0/#sec-template-literal-lexical-components). - */ - var reEsTemplate = /\$\{([^\\}]*(?:\\.[^\\}]*)*)\}/g; - - /** Used to match `RegExp` flags from their coerced string values. */ - var reFlags = /\w*$/; - - /** Used to detect bad signed hexadecimal string values. */ - var reIsBadHex = /^[-+]0x[0-9a-f]+$/i; - - /** Used to detect binary string values. */ - var reIsBinary = /^0b[01]+$/i; - - /** Used to detect host constructors (Safari). */ - var reIsHostCtor = /^\[object .+?Constructor\]$/; - - /** Used to detect octal string values. */ - var reIsOctal = /^0o[0-7]+$/i; - - /** Used to detect unsigned integer values. */ - var reIsUint = /^(?:0|[1-9]\d*)$/; - - /** Used to match Latin Unicode letters (excluding mathematical operators). */ - var reLatin = /[\xc0-\xd6\xd8-\xf6\xf8-\xff\u0100-\u017f]/g; - - /** Used to ensure capturing order of template delimiters. */ - var reNoMatch = /($^)/; - - /** Used to match unescaped characters in compiled string literals. */ - var reUnescapedString = /['\n\r\u2028\u2029\\]/g; - - /** Used to compose unicode character classes. */ - var rsAstralRange = '\\ud800-\\udfff', - rsComboMarksRange = '\\u0300-\\u036f', - reComboHalfMarksRange = '\\ufe20-\\ufe2f', - rsComboSymbolsRange = '\\u20d0-\\u20ff', - rsComboRange = rsComboMarksRange + reComboHalfMarksRange + rsComboSymbolsRange, - rsDingbatRange = '\\u2700-\\u27bf', - rsLowerRange = 'a-z\\xdf-\\xf6\\xf8-\\xff', - rsMathOpRange = '\\xac\\xb1\\xd7\\xf7', - rsNonCharRange = '\\x00-\\x2f\\x3a-\\x40\\x5b-\\x60\\x7b-\\xbf', - rsPunctuationRange = '\\u2000-\\u206f', - rsSpaceRange = ' \\t\\x0b\\f\\xa0\\ufeff\\n\\r\\u2028\\u2029\\u1680\\u180e\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200a\\u202f\\u205f\\u3000', - rsUpperRange = 'A-Z\\xc0-\\xd6\\xd8-\\xde', - rsVarRange = '\\ufe0e\\ufe0f', - rsBreakRange = rsMathOpRange + rsNonCharRange + rsPunctuationRange + rsSpaceRange; - - /** Used to compose unicode capture groups. */ - var rsApos = "['\u2019]", - rsAstral = '[' + rsAstralRange + ']', - rsBreak = '[' + rsBreakRange + ']', - rsCombo = '[' + rsComboRange + ']', - rsDigits = '\\d+', - rsDingbat = '[' + rsDingbatRange + ']', - rsLower = '[' + rsLowerRange + ']', - rsMisc = '[^' + rsAstralRange + rsBreakRange + rsDigits + rsDingbatRange + rsLowerRange + rsUpperRange + ']', - rsFitz = '\\ud83c[\\udffb-\\udfff]', - rsModifier = '(?:' + rsCombo + '|' + rsFitz + ')', - rsNonAstral = '[^' + rsAstralRange + ']', - rsRegional = '(?:\\ud83c[\\udde6-\\uddff]){2}', - rsSurrPair = '[\\ud800-\\udbff][\\udc00-\\udfff]', - rsUpper = '[' + rsUpperRange + ']', - rsZWJ = '\\u200d'; - - /** Used to compose unicode regexes. */ - var rsMiscLower = '(?:' + rsLower + '|' + rsMisc + ')', - rsMiscUpper = '(?:' + rsUpper + '|' + rsMisc + ')', - rsOptContrLower = '(?:' + rsApos + '(?:d|ll|m|re|s|t|ve))?', - rsOptContrUpper = '(?:' + rsApos + '(?:D|LL|M|RE|S|T|VE))?', - reOptMod = rsModifier + '?', - rsOptVar = '[' + rsVarRange + ']?', - rsOptJoin = '(?:' + rsZWJ + '(?:' + [rsNonAstral, rsRegional, rsSurrPair].join('|') + ')' + rsOptVar + reOptMod + ')*', - rsOrdLower = '\\d*(?:1st|2nd|3rd|(?![123])\\dth)(?=\\b|[A-Z_])', - rsOrdUpper = '\\d*(?:1ST|2ND|3RD|(?![123])\\dTH)(?=\\b|[a-z_])', - rsSeq = rsOptVar + reOptMod + rsOptJoin, - rsEmoji = '(?:' + [rsDingbat, rsRegional, rsSurrPair].join('|') + ')' + rsSeq, - rsSymbol = '(?:' + [rsNonAstral + rsCombo + '?', rsCombo, rsRegional, rsSurrPair, rsAstral].join('|') + ')'; - - /** Used to match apostrophes. */ - var reApos = RegExp(rsApos, 'g'); - - /** - * Used to match [combining diacritical marks](https://en.wikipedia.org/wiki/Combining_Diacritical_Marks) and - * [combining diacritical marks for symbols](https://en.wikipedia.org/wiki/Combining_Diacritical_Marks_for_Symbols). - */ - var reComboMark = RegExp(rsCombo, 'g'); - - /** Used to match [string symbols](https://mathiasbynens.be/notes/javascript-unicode). */ - var reUnicode = RegExp(rsFitz + '(?=' + rsFitz + ')|' + rsSymbol + rsSeq, 'g'); - - /** Used to match complex or compound words. */ - var reUnicodeWord = RegExp([ - rsUpper + '?' + rsLower + '+' + rsOptContrLower + '(?=' + [rsBreak, rsUpper, '$'].join('|') + ')', - rsMiscUpper + '+' + rsOptContrUpper + '(?=' + [rsBreak, rsUpper + rsMiscLower, '$'].join('|') + ')', - rsUpper + '?' + rsMiscLower + '+' + rsOptContrLower, - rsUpper + '+' + rsOptContrUpper, - rsOrdUpper, - rsOrdLower, - rsDigits, - rsEmoji - ].join('|'), 'g'); - - /** Used to detect strings with [zero-width joiners or code points from the astral planes](http://eev.ee/blog/2015/09/12/dark-corners-of-unicode/). */ - var reHasUnicode = RegExp('[' + rsZWJ + rsAstralRange + rsComboRange + rsVarRange + ']'); - - /** Used to detect strings that need a more robust regexp to match words. */ - var reHasUnicodeWord = /[a-z][A-Z]|[A-Z]{2,}[a-z]|[0-9][a-zA-Z]|[a-zA-Z][0-9]|[^a-zA-Z0-9 ]/; - - /** Used to assign default `context` object properties. */ - var contextProps = [ - 'Array', 'Buffer', 'DataView', 'Date', 'Error', 'Float32Array', 'Float64Array', - 'Function', 'Int8Array', 'Int16Array', 'Int32Array', 'Map', 'Math', 'Object', - 'Promise', 'RegExp', 'Set', 'String', 'Symbol', 'TypeError', 'Uint8Array', - 'Uint8ClampedArray', 'Uint16Array', 'Uint32Array', 'WeakMap', - '_', 'clearTimeout', 'isFinite', 'parseInt', 'setTimeout' - ]; - - /** Used to make template sourceURLs easier to identify. */ - var templateCounter = -1; - - /** Used to identify `toStringTag` values of typed arrays. */ - var typedArrayTags = {}; - typedArrayTags[float32Tag] = typedArrayTags[float64Tag] = - typedArrayTags[int8Tag] = typedArrayTags[int16Tag] = - typedArrayTags[int32Tag] = typedArrayTags[uint8Tag] = - typedArrayTags[uint8ClampedTag] = typedArrayTags[uint16Tag] = - typedArrayTags[uint32Tag] = true; - typedArrayTags[argsTag] = typedArrayTags[arrayTag] = - typedArrayTags[arrayBufferTag] = typedArrayTags[boolTag] = - typedArrayTags[dataViewTag] = typedArrayTags[dateTag] = - typedArrayTags[errorTag] = typedArrayTags[funcTag] = - typedArrayTags[mapTag] = typedArrayTags[numberTag] = - typedArrayTags[objectTag] = typedArrayTags[regexpTag] = - typedArrayTags[setTag] = typedArrayTags[stringTag] = - typedArrayTags[weakMapTag] = false; - - /** Used to identify `toStringTag` values supported by `_.clone`. */ - var cloneableTags = {}; - cloneableTags[argsTag] = cloneableTags[arrayTag] = - cloneableTags[arrayBufferTag] = cloneableTags[dataViewTag] = - cloneableTags[boolTag] = cloneableTags[dateTag] = - cloneableTags[float32Tag] = cloneableTags[float64Tag] = - cloneableTags[int8Tag] = cloneableTags[int16Tag] = - cloneableTags[int32Tag] = cloneableTags[mapTag] = - cloneableTags[numberTag] = cloneableTags[objectTag] = - cloneableTags[regexpTag] = cloneableTags[setTag] = - cloneableTags[stringTag] = cloneableTags[symbolTag] = - cloneableTags[uint8Tag] = cloneableTags[uint8ClampedTag] = - cloneableTags[uint16Tag] = cloneableTags[uint32Tag] = true; - cloneableTags[errorTag] = cloneableTags[funcTag] = - cloneableTags[weakMapTag] = false; - - /** Used to map Latin Unicode letters to basic Latin letters. */ - var deburredLetters = { - // Latin-1 Supplement block. - '\xc0': 'A', '\xc1': 'A', '\xc2': 'A', '\xc3': 'A', '\xc4': 'A', '\xc5': 'A', - '\xe0': 'a', '\xe1': 'a', '\xe2': 'a', '\xe3': 'a', '\xe4': 'a', '\xe5': 'a', - '\xc7': 'C', '\xe7': 'c', - '\xd0': 'D', '\xf0': 'd', - '\xc8': 'E', '\xc9': 'E', '\xca': 'E', '\xcb': 'E', - '\xe8': 'e', '\xe9': 'e', '\xea': 'e', '\xeb': 'e', - '\xcc': 'I', '\xcd': 'I', '\xce': 'I', '\xcf': 'I', - '\xec': 'i', '\xed': 'i', '\xee': 'i', '\xef': 'i', - '\xd1': 'N', '\xf1': 'n', - '\xd2': 'O', '\xd3': 'O', '\xd4': 'O', '\xd5': 'O', '\xd6': 'O', '\xd8': 'O', - '\xf2': 'o', '\xf3': 'o', '\xf4': 'o', '\xf5': 'o', '\xf6': 'o', '\xf8': 'o', - '\xd9': 'U', '\xda': 'U', '\xdb': 'U', '\xdc': 'U', - '\xf9': 'u', '\xfa': 'u', '\xfb': 'u', '\xfc': 'u', - '\xdd': 'Y', '\xfd': 'y', '\xff': 'y', - '\xc6': 'Ae', '\xe6': 'ae', - '\xde': 'Th', '\xfe': 'th', - '\xdf': 'ss', - // Latin Extended-A block. - '\u0100': 'A', '\u0102': 'A', '\u0104': 'A', - '\u0101': 'a', '\u0103': 'a', '\u0105': 'a', - '\u0106': 'C', '\u0108': 'C', '\u010a': 'C', '\u010c': 'C', - '\u0107': 'c', '\u0109': 'c', '\u010b': 'c', '\u010d': 'c', - '\u010e': 'D', '\u0110': 'D', '\u010f': 'd', '\u0111': 'd', - '\u0112': 'E', '\u0114': 'E', '\u0116': 'E', '\u0118': 'E', '\u011a': 'E', - '\u0113': 'e', '\u0115': 'e', '\u0117': 'e', '\u0119': 'e', '\u011b': 'e', - '\u011c': 'G', '\u011e': 'G', '\u0120': 'G', '\u0122': 'G', - '\u011d': 'g', '\u011f': 'g', '\u0121': 'g', '\u0123': 'g', - '\u0124': 'H', '\u0126': 'H', '\u0125': 'h', '\u0127': 'h', - '\u0128': 'I', '\u012a': 'I', '\u012c': 'I', '\u012e': 'I', '\u0130': 'I', - '\u0129': 'i', '\u012b': 'i', '\u012d': 'i', '\u012f': 'i', '\u0131': 'i', - '\u0134': 'J', '\u0135': 'j', - '\u0136': 'K', '\u0137': 'k', '\u0138': 'k', - '\u0139': 'L', '\u013b': 'L', '\u013d': 'L', '\u013f': 'L', '\u0141': 'L', - '\u013a': 'l', '\u013c': 'l', '\u013e': 'l', '\u0140': 'l', '\u0142': 'l', - '\u0143': 'N', '\u0145': 'N', '\u0147': 'N', '\u014a': 'N', - '\u0144': 'n', '\u0146': 'n', '\u0148': 'n', '\u014b': 'n', - '\u014c': 'O', '\u014e': 'O', '\u0150': 'O', - '\u014d': 'o', '\u014f': 'o', '\u0151': 'o', - '\u0154': 'R', '\u0156': 'R', '\u0158': 'R', - '\u0155': 'r', '\u0157': 'r', '\u0159': 'r', - '\u015a': 'S', '\u015c': 'S', '\u015e': 'S', '\u0160': 'S', - '\u015b': 's', '\u015d': 's', '\u015f': 's', '\u0161': 's', - '\u0162': 'T', '\u0164': 'T', '\u0166': 'T', - '\u0163': 't', '\u0165': 't', '\u0167': 't', - '\u0168': 'U', '\u016a': 'U', '\u016c': 'U', '\u016e': 'U', '\u0170': 'U', '\u0172': 'U', - '\u0169': 'u', '\u016b': 'u', '\u016d': 'u', '\u016f': 'u', '\u0171': 'u', '\u0173': 'u', - '\u0174': 'W', '\u0175': 'w', - '\u0176': 'Y', '\u0177': 'y', '\u0178': 'Y', - '\u0179': 'Z', '\u017b': 'Z', '\u017d': 'Z', - '\u017a': 'z', '\u017c': 'z', '\u017e': 'z', - '\u0132': 'IJ', '\u0133': 'ij', - '\u0152': 'Oe', '\u0153': 'oe', - '\u0149': "'n", '\u017f': 's' - }; - - /** Used to map characters to HTML entities. */ - var htmlEscapes = { - '&': '&', - '<': '<', - '>': '>', - '"': '"', - "'": ''' - }; - - /** Used to map HTML entities to characters. */ - var htmlUnescapes = { - '&': '&', - '<': '<', - '>': '>', - '"': '"', - ''': "'" - }; - - /** Used to escape characters for inclusion in compiled string literals. */ - var stringEscapes = { - '\\': '\\', - "'": "'", - '\n': 'n', - '\r': 'r', - '\u2028': 'u2028', - '\u2029': 'u2029' - }; - - /** Built-in method references without a dependency on `root`. */ - var freeParseFloat = parseFloat, - freeParseInt = parseInt; - - /** Detect free variable `global` from Node.js. */ - var freeGlobal = typeof global == 'object' && global && global.Object === Object && global; - - /** Detect free variable `self`. */ - var freeSelf = typeof self == 'object' && self && self.Object === Object && self; - - /** Used as a reference to the global object. */ - var root = freeGlobal || freeSelf || Function('return this')(); - - /** Detect free variable `exports`. */ - var freeExports = typeof exports == 'object' && exports && !exports.nodeType && exports; - - /** Detect free variable `module`. */ - var freeModule = freeExports && typeof module == 'object' && module && !module.nodeType && module; - - /** Detect the popular CommonJS extension `module.exports`. */ - var moduleExports = freeModule && freeModule.exports === freeExports; - - /** Detect free variable `process` from Node.js. */ - var freeProcess = moduleExports && freeGlobal.process; - - /** Used to access faster Node.js helpers. */ - var nodeUtil = (function() { - try { - // Use `util.types` for Node.js 10+. - var types = freeModule && freeModule.require && freeModule.require('util').types; - - if (types) { - return types; - } - - // Legacy `process.binding('util')` for Node.js < 10. - return freeProcess && freeProcess.binding && freeProcess.binding('util'); - } catch (e) {} - }()); - - /* Node.js helper references. */ - var nodeIsArrayBuffer = nodeUtil && nodeUtil.isArrayBuffer, - nodeIsDate = nodeUtil && nodeUtil.isDate, - nodeIsMap = nodeUtil && nodeUtil.isMap, - nodeIsRegExp = nodeUtil && nodeUtil.isRegExp, - nodeIsSet = nodeUtil && nodeUtil.isSet, - nodeIsTypedArray = nodeUtil && nodeUtil.isTypedArray; - - /*--------------------------------------------------------------------------*/ - - /** - * A faster alternative to `Function#apply`, this function invokes `func` - * with the `this` binding of `thisArg` and the arguments of `args`. - * - * @private - * @param {Function} func The function to invoke. - * @param {*} thisArg The `this` binding of `func`. - * @param {Array} args The arguments to invoke `func` with. - * @returns {*} Returns the result of `func`. - */ - function apply(func, thisArg, args) { - switch (args.length) { - case 0: return func.call(thisArg); - case 1: return func.call(thisArg, args[0]); - case 2: return func.call(thisArg, args[0], args[1]); - case 3: return func.call(thisArg, args[0], args[1], args[2]); - } - return func.apply(thisArg, args); - } - - /** - * A specialized version of `baseAggregator` for arrays. - * - * @private - * @param {Array} [array] The array to iterate over. - * @param {Function} setter The function to set `accumulator` values. - * @param {Function} iteratee The iteratee to transform keys. - * @param {Object} accumulator The initial aggregated object. - * @returns {Function} Returns `accumulator`. - */ - function arrayAggregator(array, setter, iteratee, accumulator) { - var index = -1, - length = array == null ? 0 : array.length; - - while (++index < length) { - var value = array[index]; - setter(accumulator, value, iteratee(value), array); - } - return accumulator; - } - - /** - * A specialized version of `_.forEach` for arrays without support for - * iteratee shorthands. - * - * @private - * @param {Array} [array] The array to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @returns {Array} Returns `array`. - */ - function arrayEach(array, iteratee) { - var index = -1, - length = array == null ? 0 : array.length; - - while (++index < length) { - if (iteratee(array[index], index, array) === false) { - break; - } - } - return array; - } - - /** - * A specialized version of `_.forEachRight` for arrays without support for - * iteratee shorthands. - * - * @private - * @param {Array} [array] The array to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @returns {Array} Returns `array`. - */ - function arrayEachRight(array, iteratee) { - var length = array == null ? 0 : array.length; - - while (length--) { - if (iteratee(array[length], length, array) === false) { - break; - } - } - return array; - } - - /** - * A specialized version of `_.every` for arrays without support for - * iteratee shorthands. - * - * @private - * @param {Array} [array] The array to iterate over. - * @param {Function} predicate The function invoked per iteration. - * @returns {boolean} Returns `true` if all elements pass the predicate check, - * else `false`. - */ - function arrayEvery(array, predicate) { - var index = -1, - length = array == null ? 0 : array.length; - - while (++index < length) { - if (!predicate(array[index], index, array)) { - return false; - } - } - return true; - } - - /** - * A specialized version of `_.filter` for arrays without support for - * iteratee shorthands. - * - * @private - * @param {Array} [array] The array to iterate over. - * @param {Function} predicate The function invoked per iteration. - * @returns {Array} Returns the new filtered array. - */ - function arrayFilter(array, predicate) { - var index = -1, - length = array == null ? 0 : array.length, - resIndex = 0, - result = []; - - while (++index < length) { - var value = array[index]; - if (predicate(value, index, array)) { - result[resIndex++] = value; - } - } - return result; - } - - /** - * A specialized version of `_.includes` for arrays without support for - * specifying an index to search from. - * - * @private - * @param {Array} [array] The array to inspect. - * @param {*} target The value to search for. - * @returns {boolean} Returns `true` if `target` is found, else `false`. - */ - function arrayIncludes(array, value) { - var length = array == null ? 0 : array.length; - return !!length && baseIndexOf(array, value, 0) > -1; - } - - /** - * This function is like `arrayIncludes` except that it accepts a comparator. - * - * @private - * @param {Array} [array] The array to inspect. - * @param {*} target The value to search for. - * @param {Function} comparator The comparator invoked per element. - * @returns {boolean} Returns `true` if `target` is found, else `false`. - */ - function arrayIncludesWith(array, value, comparator) { - var index = -1, - length = array == null ? 0 : array.length; - - while (++index < length) { - if (comparator(value, array[index])) { - return true; - } - } - return false; - } - - /** - * A specialized version of `_.map` for arrays without support for iteratee - * shorthands. - * - * @private - * @param {Array} [array] The array to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @returns {Array} Returns the new mapped array. - */ - function arrayMap(array, iteratee) { - var index = -1, - length = array == null ? 0 : array.length, - result = Array(length); - - while (++index < length) { - result[index] = iteratee(array[index], index, array); - } - return result; - } - - /** - * Appends the elements of `values` to `array`. - * - * @private - * @param {Array} array The array to modify. - * @param {Array} values The values to append. - * @returns {Array} Returns `array`. - */ - function arrayPush(array, values) { - var index = -1, - length = values.length, - offset = array.length; - - while (++index < length) { - array[offset + index] = values[index]; - } - return array; - } - - /** - * A specialized version of `_.reduce` for arrays without support for - * iteratee shorthands. - * - * @private - * @param {Array} [array] The array to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @param {*} [accumulator] The initial value. - * @param {boolean} [initAccum] Specify using the first element of `array` as - * the initial value. - * @returns {*} Returns the accumulated value. - */ - function arrayReduce(array, iteratee, accumulator, initAccum) { - var index = -1, - length = array == null ? 0 : array.length; - - if (initAccum && length) { - accumulator = array[++index]; - } - while (++index < length) { - accumulator = iteratee(accumulator, array[index], index, array); - } - return accumulator; - } - - /** - * A specialized version of `_.reduceRight` for arrays without support for - * iteratee shorthands. - * - * @private - * @param {Array} [array] The array to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @param {*} [accumulator] The initial value. - * @param {boolean} [initAccum] Specify using the last element of `array` as - * the initial value. - * @returns {*} Returns the accumulated value. - */ - function arrayReduceRight(array, iteratee, accumulator, initAccum) { - var length = array == null ? 0 : array.length; - if (initAccum && length) { - accumulator = array[--length]; - } - while (length--) { - accumulator = iteratee(accumulator, array[length], length, array); - } - return accumulator; - } - - /** - * A specialized version of `_.some` for arrays without support for iteratee - * shorthands. - * - * @private - * @param {Array} [array] The array to iterate over. - * @param {Function} predicate The function invoked per iteration. - * @returns {boolean} Returns `true` if any element passes the predicate check, - * else `false`. - */ - function arraySome(array, predicate) { - var index = -1, - length = array == null ? 0 : array.length; - - while (++index < length) { - if (predicate(array[index], index, array)) { - return true; - } - } - return false; - } - - /** - * Gets the size of an ASCII `string`. - * - * @private - * @param {string} string The string inspect. - * @returns {number} Returns the string size. - */ - var asciiSize = baseProperty('length'); - - /** - * Converts an ASCII `string` to an array. - * - * @private - * @param {string} string The string to convert. - * @returns {Array} Returns the converted array. - */ - function asciiToArray(string) { - return string.split(''); - } - - /** - * Splits an ASCII `string` into an array of its words. - * - * @private - * @param {string} The string to inspect. - * @returns {Array} Returns the words of `string`. - */ - function asciiWords(string) { - return string.match(reAsciiWord) || []; - } - - /** - * The base implementation of methods like `_.findKey` and `_.findLastKey`, - * without support for iteratee shorthands, which iterates over `collection` - * using `eachFunc`. - * - * @private - * @param {Array|Object} collection The collection to inspect. - * @param {Function} predicate The function invoked per iteration. - * @param {Function} eachFunc The function to iterate over `collection`. - * @returns {*} Returns the found element or its key, else `undefined`. - */ - function baseFindKey(collection, predicate, eachFunc) { - var result; - eachFunc(collection, function(value, key, collection) { - if (predicate(value, key, collection)) { - result = key; - return false; - } - }); - return result; - } - - /** - * The base implementation of `_.findIndex` and `_.findLastIndex` without - * support for iteratee shorthands. - * - * @private - * @param {Array} array The array to inspect. - * @param {Function} predicate The function invoked per iteration. - * @param {number} fromIndex The index to search from. - * @param {boolean} [fromRight] Specify iterating from right to left. - * @returns {number} Returns the index of the matched value, else `-1`. - */ - function baseFindIndex(array, predicate, fromIndex, fromRight) { - var length = array.length, - index = fromIndex + (fromRight ? 1 : -1); - - while ((fromRight ? index-- : ++index < length)) { - if (predicate(array[index], index, array)) { - return index; - } - } - return -1; - } - - /** - * The base implementation of `_.indexOf` without `fromIndex` bounds checks. - * - * @private - * @param {Array} array The array to inspect. - * @param {*} value The value to search for. - * @param {number} fromIndex The index to search from. - * @returns {number} Returns the index of the matched value, else `-1`. - */ - function baseIndexOf(array, value, fromIndex) { - return value === value - ? strictIndexOf(array, value, fromIndex) - : baseFindIndex(array, baseIsNaN, fromIndex); - } - - /** - * This function is like `baseIndexOf` except that it accepts a comparator. - * - * @private - * @param {Array} array The array to inspect. - * @param {*} value The value to search for. - * @param {number} fromIndex The index to search from. - * @param {Function} comparator The comparator invoked per element. - * @returns {number} Returns the index of the matched value, else `-1`. - */ - function baseIndexOfWith(array, value, fromIndex, comparator) { - var index = fromIndex - 1, - length = array.length; - - while (++index < length) { - if (comparator(array[index], value)) { - return index; - } - } - return -1; - } - - /** - * The base implementation of `_.isNaN` without support for number objects. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is `NaN`, else `false`. - */ - function baseIsNaN(value) { - return value !== value; - } - - /** - * The base implementation of `_.mean` and `_.meanBy` without support for - * iteratee shorthands. - * - * @private - * @param {Array} array The array to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @returns {number} Returns the mean. - */ - function baseMean(array, iteratee) { - var length = array == null ? 0 : array.length; - return length ? (baseSum(array, iteratee) / length) : NAN; - } - - /** - * The base implementation of `_.property` without support for deep paths. - * - * @private - * @param {string} key The key of the property to get. - * @returns {Function} Returns the new accessor function. - */ - function baseProperty(key) { - return function(object) { - return object == null ? undefined : object[key]; - }; - } - - /** - * The base implementation of `_.propertyOf` without support for deep paths. - * - * @private - * @param {Object} object The object to query. - * @returns {Function} Returns the new accessor function. - */ - function basePropertyOf(object) { - return function(key) { - return object == null ? undefined : object[key]; - }; - } - - /** - * The base implementation of `_.reduce` and `_.reduceRight`, without support - * for iteratee shorthands, which iterates over `collection` using `eachFunc`. - * - * @private - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @param {*} accumulator The initial value. - * @param {boolean} initAccum Specify using the first or last element of - * `collection` as the initial value. - * @param {Function} eachFunc The function to iterate over `collection`. - * @returns {*} Returns the accumulated value. - */ - function baseReduce(collection, iteratee, accumulator, initAccum, eachFunc) { - eachFunc(collection, function(value, index, collection) { - accumulator = initAccum - ? (initAccum = false, value) - : iteratee(accumulator, value, index, collection); - }); - return accumulator; - } - - /** - * The base implementation of `_.sortBy` which uses `comparer` to define the - * sort order of `array` and replaces criteria objects with their corresponding - * values. - * - * @private - * @param {Array} array The array to sort. - * @param {Function} comparer The function to define sort order. - * @returns {Array} Returns `array`. - */ - function baseSortBy(array, comparer) { - var length = array.length; - - array.sort(comparer); - while (length--) { - array[length] = array[length].value; - } - return array; - } - - /** - * The base implementation of `_.sum` and `_.sumBy` without support for - * iteratee shorthands. - * - * @private - * @param {Array} array The array to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @returns {number} Returns the sum. - */ - function baseSum(array, iteratee) { - var result, - index = -1, - length = array.length; - - while (++index < length) { - var current = iteratee(array[index]); - if (current !== undefined) { - result = result === undefined ? current : (result + current); - } - } - return result; - } - - /** - * The base implementation of `_.times` without support for iteratee shorthands - * or max array length checks. - * - * @private - * @param {number} n The number of times to invoke `iteratee`. - * @param {Function} iteratee The function invoked per iteration. - * @returns {Array} Returns the array of results. - */ - function baseTimes(n, iteratee) { - var index = -1, - result = Array(n); - - while (++index < n) { - result[index] = iteratee(index); - } - return result; - } - - /** - * The base implementation of `_.toPairs` and `_.toPairsIn` which creates an array - * of key-value pairs for `object` corresponding to the property names of `props`. - * - * @private - * @param {Object} object The object to query. - * @param {Array} props The property names to get values for. - * @returns {Object} Returns the key-value pairs. - */ - function baseToPairs(object, props) { - return arrayMap(props, function(key) { - return [key, object[key]]; - }); - } - - /** - * The base implementation of `_.unary` without support for storing metadata. - * - * @private - * @param {Function} func The function to cap arguments for. - * @returns {Function} Returns the new capped function. - */ - function baseUnary(func) { - return function(value) { - return func(value); - }; - } - - /** - * The base implementation of `_.values` and `_.valuesIn` which creates an - * array of `object` property values corresponding to the property names - * of `props`. - * - * @private - * @param {Object} object The object to query. - * @param {Array} props The property names to get values for. - * @returns {Object} Returns the array of property values. - */ - function baseValues(object, props) { - return arrayMap(props, function(key) { - return object[key]; - }); - } - - /** - * Checks if a `cache` value for `key` exists. - * - * @private - * @param {Object} cache The cache to query. - * @param {string} key The key of the entry to check. - * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`. - */ - function cacheHas(cache, key) { - return cache.has(key); - } - - /** - * Used by `_.trim` and `_.trimStart` to get the index of the first string symbol - * that is not found in the character symbols. - * - * @private - * @param {Array} strSymbols The string symbols to inspect. - * @param {Array} chrSymbols The character symbols to find. - * @returns {number} Returns the index of the first unmatched string symbol. - */ - function charsStartIndex(strSymbols, chrSymbols) { - var index = -1, - length = strSymbols.length; - - while (++index < length && baseIndexOf(chrSymbols, strSymbols[index], 0) > -1) {} - return index; - } - - /** - * Used by `_.trim` and `_.trimEnd` to get the index of the last string symbol - * that is not found in the character symbols. - * - * @private - * @param {Array} strSymbols The string symbols to inspect. - * @param {Array} chrSymbols The character symbols to find. - * @returns {number} Returns the index of the last unmatched string symbol. - */ - function charsEndIndex(strSymbols, chrSymbols) { - var index = strSymbols.length; - - while (index-- && baseIndexOf(chrSymbols, strSymbols[index], 0) > -1) {} - return index; - } - - /** - * Gets the number of `placeholder` occurrences in `array`. - * - * @private - * @param {Array} array The array to inspect. - * @param {*} placeholder The placeholder to search for. - * @returns {number} Returns the placeholder count. - */ - function countHolders(array, placeholder) { - var length = array.length, - result = 0; - - while (length--) { - if (array[length] === placeholder) { - ++result; - } - } - return result; - } - - /** - * Used by `_.deburr` to convert Latin-1 Supplement and Latin Extended-A - * letters to basic Latin letters. - * - * @private - * @param {string} letter The matched letter to deburr. - * @returns {string} Returns the deburred letter. - */ - var deburrLetter = basePropertyOf(deburredLetters); - - /** - * Used by `_.escape` to convert characters to HTML entities. - * - * @private - * @param {string} chr The matched character to escape. - * @returns {string} Returns the escaped character. - */ - var escapeHtmlChar = basePropertyOf(htmlEscapes); - - /** - * Used by `_.template` to escape characters for inclusion in compiled string literals. - * - * @private - * @param {string} chr The matched character to escape. - * @returns {string} Returns the escaped character. - */ - function escapeStringChar(chr) { - return '\\' + stringEscapes[chr]; - } - - /** - * Gets the value at `key` of `object`. - * - * @private - * @param {Object} [object] The object to query. - * @param {string} key The key of the property to get. - * @returns {*} Returns the property value. - */ - function getValue(object, key) { - return object == null ? undefined : object[key]; - } - - /** - * Checks if `string` contains Unicode symbols. - * - * @private - * @param {string} string The string to inspect. - * @returns {boolean} Returns `true` if a symbol is found, else `false`. - */ - function hasUnicode(string) { - return reHasUnicode.test(string); - } - - /** - * Checks if `string` contains a word composed of Unicode symbols. - * - * @private - * @param {string} string The string to inspect. - * @returns {boolean} Returns `true` if a word is found, else `false`. - */ - function hasUnicodeWord(string) { - return reHasUnicodeWord.test(string); - } - - /** - * Converts `iterator` to an array. - * - * @private - * @param {Object} iterator The iterator to convert. - * @returns {Array} Returns the converted array. - */ - function iteratorToArray(iterator) { - var data, - result = []; - - while (!(data = iterator.next()).done) { - result.push(data.value); - } - return result; - } - - /** - * Converts `map` to its key-value pairs. - * - * @private - * @param {Object} map The map to convert. - * @returns {Array} Returns the key-value pairs. - */ - function mapToArray(map) { - var index = -1, - result = Array(map.size); - - map.forEach(function(value, key) { - result[++index] = [key, value]; - }); - return result; - } - - /** - * Creates a unary function that invokes `func` with its argument transformed. - * - * @private - * @param {Function} func The function to wrap. - * @param {Function} transform The argument transform. - * @returns {Function} Returns the new function. - */ - function overArg(func, transform) { - return function(arg) { - return func(transform(arg)); - }; - } - - /** - * Replaces all `placeholder` elements in `array` with an internal placeholder - * and returns an array of their indexes. - * - * @private - * @param {Array} array The array to modify. - * @param {*} placeholder The placeholder to replace. - * @returns {Array} Returns the new array of placeholder indexes. - */ - function replaceHolders(array, placeholder) { - var index = -1, - length = array.length, - resIndex = 0, - result = []; - - while (++index < length) { - var value = array[index]; - if (value === placeholder || value === PLACEHOLDER) { - array[index] = PLACEHOLDER; - result[resIndex++] = index; - } - } - return result; - } - - /** - * Gets the value at `key`, unless `key` is "__proto__". - * - * @private - * @param {Object} object The object to query. - * @param {string} key The key of the property to get. - * @returns {*} Returns the property value. - */ - function safeGet(object, key) { - return key == '__proto__' - ? undefined - : object[key]; - } - - /** - * Converts `set` to an array of its values. - * - * @private - * @param {Object} set The set to convert. - * @returns {Array} Returns the values. - */ - function setToArray(set) { - var index = -1, - result = Array(set.size); - - set.forEach(function(value) { - result[++index] = value; - }); - return result; - } - - /** - * Converts `set` to its value-value pairs. - * - * @private - * @param {Object} set The set to convert. - * @returns {Array} Returns the value-value pairs. - */ - function setToPairs(set) { - var index = -1, - result = Array(set.size); - - set.forEach(function(value) { - result[++index] = [value, value]; - }); - return result; - } - - /** - * A specialized version of `_.indexOf` which performs strict equality - * comparisons of values, i.e. `===`. - * - * @private - * @param {Array} array The array to inspect. - * @param {*} value The value to search for. - * @param {number} fromIndex The index to search from. - * @returns {number} Returns the index of the matched value, else `-1`. - */ - function strictIndexOf(array, value, fromIndex) { - var index = fromIndex - 1, - length = array.length; - - while (++index < length) { - if (array[index] === value) { - return index; - } - } - return -1; - } - - /** - * A specialized version of `_.lastIndexOf` which performs strict equality - * comparisons of values, i.e. `===`. - * - * @private - * @param {Array} array The array to inspect. - * @param {*} value The value to search for. - * @param {number} fromIndex The index to search from. - * @returns {number} Returns the index of the matched value, else `-1`. - */ - function strictLastIndexOf(array, value, fromIndex) { - var index = fromIndex + 1; - while (index--) { - if (array[index] === value) { - return index; - } - } - return index; - } - - /** - * Gets the number of symbols in `string`. - * - * @private - * @param {string} string The string to inspect. - * @returns {number} Returns the string size. - */ - function stringSize(string) { - return hasUnicode(string) - ? unicodeSize(string) - : asciiSize(string); - } - - /** - * Converts `string` to an array. - * - * @private - * @param {string} string The string to convert. - * @returns {Array} Returns the converted array. - */ - function stringToArray(string) { - return hasUnicode(string) - ? unicodeToArray(string) - : asciiToArray(string); - } - - /** - * Used by `_.unescape` to convert HTML entities to characters. - * - * @private - * @param {string} chr The matched character to unescape. - * @returns {string} Returns the unescaped character. - */ - var unescapeHtmlChar = basePropertyOf(htmlUnescapes); - - /** - * Gets the size of a Unicode `string`. - * - * @private - * @param {string} string The string inspect. - * @returns {number} Returns the string size. - */ - function unicodeSize(string) { - var result = reUnicode.lastIndex = 0; - while (reUnicode.test(string)) { - ++result; - } - return result; - } - - /** - * Converts a Unicode `string` to an array. - * - * @private - * @param {string} string The string to convert. - * @returns {Array} Returns the converted array. - */ - function unicodeToArray(string) { - return string.match(reUnicode) || []; - } - - /** - * Splits a Unicode `string` into an array of its words. - * - * @private - * @param {string} The string to inspect. - * @returns {Array} Returns the words of `string`. - */ - function unicodeWords(string) { - return string.match(reUnicodeWord) || []; - } - - /*--------------------------------------------------------------------------*/ - - /** - * Create a new pristine `lodash` function using the `context` object. - * - * @static - * @memberOf _ - * @since 1.1.0 - * @category Util - * @param {Object} [context=root] The context object. - * @returns {Function} Returns a new `lodash` function. - * @example - * - * _.mixin({ 'foo': _.constant('foo') }); - * - * var lodash = _.runInContext(); - * lodash.mixin({ 'bar': lodash.constant('bar') }); - * - * _.isFunction(_.foo); - * // => true - * _.isFunction(_.bar); - * // => false - * - * lodash.isFunction(lodash.foo); - * // => false - * lodash.isFunction(lodash.bar); - * // => true - * - * // Create a suped-up `defer` in Node.js. - * var defer = _.runInContext({ 'setTimeout': setImmediate }).defer; - */ - var runInContext = (function runInContext(context) { - context = context == null ? root : _.defaults(root.Object(), context, _.pick(root, contextProps)); - - /** Built-in constructor references. */ - var Array = context.Array, - Date = context.Date, - Error = context.Error, - Function = context.Function, - Math = context.Math, - Object = context.Object, - RegExp = context.RegExp, - String = context.String, - TypeError = context.TypeError; - - /** Used for built-in method references. */ - var arrayProto = Array.prototype, - funcProto = Function.prototype, - objectProto = Object.prototype; - - /** Used to detect overreaching core-js shims. */ - var coreJsData = context['__core-js_shared__']; - - /** Used to resolve the decompiled source of functions. */ - var funcToString = funcProto.toString; - - /** Used to check objects for own properties. */ - var hasOwnProperty = objectProto.hasOwnProperty; - - /** Used to generate unique IDs. */ - var idCounter = 0; - - /** Used to detect methods masquerading as native. */ - var maskSrcKey = (function() { - var uid = /[^.]+$/.exec(coreJsData && coreJsData.keys && coreJsData.keys.IE_PROTO || ''); - return uid ? ('Symbol(src)_1.' + uid) : ''; - }()); - - /** - * Used to resolve the - * [`toStringTag`](http://ecma-international.org/ecma-262/7.0/#sec-object.prototype.tostring) - * of values. - */ - var nativeObjectToString = objectProto.toString; - - /** Used to infer the `Object` constructor. */ - var objectCtorString = funcToString.call(Object); - - /** Used to restore the original `_` reference in `_.noConflict`. */ - var oldDash = root._; - - /** Used to detect if a method is native. */ - var reIsNative = RegExp('^' + - funcToString.call(hasOwnProperty).replace(reRegExpChar, '\\$&') - .replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g, '$1.*?') + '$' - ); - - /** Built-in value references. */ - var Buffer = moduleExports ? context.Buffer : undefined, - Symbol = context.Symbol, - Uint8Array = context.Uint8Array, - allocUnsafe = Buffer ? Buffer.allocUnsafe : undefined, - getPrototype = overArg(Object.getPrototypeOf, Object), - objectCreate = Object.create, - propertyIsEnumerable = objectProto.propertyIsEnumerable, - splice = arrayProto.splice, - spreadableSymbol = Symbol ? Symbol.isConcatSpreadable : undefined, - symIterator = Symbol ? Symbol.iterator : undefined, - symToStringTag = Symbol ? Symbol.toStringTag : undefined; - - var defineProperty = (function() { - try { - var func = getNative(Object, 'defineProperty'); - func({}, '', {}); - return func; - } catch (e) {} - }()); - - /** Mocked built-ins. */ - var ctxClearTimeout = context.clearTimeout !== root.clearTimeout && context.clearTimeout, - ctxNow = Date && Date.now !== root.Date.now && Date.now, - ctxSetTimeout = context.setTimeout !== root.setTimeout && context.setTimeout; - - /* Built-in method references for those with the same name as other `lodash` methods. */ - var nativeCeil = Math.ceil, - nativeFloor = Math.floor, - nativeGetSymbols = Object.getOwnPropertySymbols, - nativeIsBuffer = Buffer ? Buffer.isBuffer : undefined, - nativeIsFinite = context.isFinite, - nativeJoin = arrayProto.join, - nativeKeys = overArg(Object.keys, Object), - nativeMax = Math.max, - nativeMin = Math.min, - nativeNow = Date.now, - nativeParseInt = context.parseInt, - nativeRandom = Math.random, - nativeReverse = arrayProto.reverse; - - /* Built-in method references that are verified to be native. */ - var DataView = getNative(context, 'DataView'), - Map = getNative(context, 'Map'), - Promise = getNative(context, 'Promise'), - Set = getNative(context, 'Set'), - WeakMap = getNative(context, 'WeakMap'), - nativeCreate = getNative(Object, 'create'); - - /** Used to store function metadata. */ - var metaMap = WeakMap && new WeakMap; - - /** Used to lookup unminified function names. */ - var realNames = {}; - - /** Used to detect maps, sets, and weakmaps. */ - var dataViewCtorString = toSource(DataView), - mapCtorString = toSource(Map), - promiseCtorString = toSource(Promise), - setCtorString = toSource(Set), - weakMapCtorString = toSource(WeakMap); - - /** Used to convert symbols to primitives and strings. */ - var symbolProto = Symbol ? Symbol.prototype : undefined, - symbolValueOf = symbolProto ? symbolProto.valueOf : undefined, - symbolToString = symbolProto ? symbolProto.toString : undefined; - - /*------------------------------------------------------------------------*/ - - /** - * Creates a `lodash` object which wraps `value` to enable implicit method - * chain sequences. Methods that operate on and return arrays, collections, - * and functions can be chained together. Methods that retrieve a single value - * or may return a primitive value will automatically end the chain sequence - * and return the unwrapped value. Otherwise, the value must be unwrapped - * with `_#value`. - * - * Explicit chain sequences, which must be unwrapped with `_#value`, may be - * enabled using `_.chain`. - * - * The execution of chained methods is lazy, that is, it's deferred until - * `_#value` is implicitly or explicitly called. - * - * Lazy evaluation allows several methods to support shortcut fusion. - * Shortcut fusion is an optimization to merge iteratee calls; this avoids - * the creation of intermediate arrays and can greatly reduce the number of - * iteratee executions. Sections of a chain sequence qualify for shortcut - * fusion if the section is applied to an array and iteratees accept only - * one argument. The heuristic for whether a section qualifies for shortcut - * fusion is subject to change. - * - * Chaining is supported in custom builds as long as the `_#value` method is - * directly or indirectly included in the build. - * - * In addition to lodash methods, wrappers have `Array` and `String` methods. - * - * The wrapper `Array` methods are: - * `concat`, `join`, `pop`, `push`, `shift`, `sort`, `splice`, and `unshift` - * - * The wrapper `String` methods are: - * `replace` and `split` - * - * The wrapper methods that support shortcut fusion are: - * `at`, `compact`, `drop`, `dropRight`, `dropWhile`, `filter`, `find`, - * `findLast`, `head`, `initial`, `last`, `map`, `reject`, `reverse`, `slice`, - * `tail`, `take`, `takeRight`, `takeRightWhile`, `takeWhile`, and `toArray` - * - * The chainable wrapper methods are: - * `after`, `ary`, `assign`, `assignIn`, `assignInWith`, `assignWith`, `at`, - * `before`, `bind`, `bindAll`, `bindKey`, `castArray`, `chain`, `chunk`, - * `commit`, `compact`, `concat`, `conforms`, `constant`, `countBy`, `create`, - * `curry`, `debounce`, `defaults`, `defaultsDeep`, `defer`, `delay`, - * `difference`, `differenceBy`, `differenceWith`, `drop`, `dropRight`, - * `dropRightWhile`, `dropWhile`, `extend`, `extendWith`, `fill`, `filter`, - * `flatMap`, `flatMapDeep`, `flatMapDepth`, `flatten`, `flattenDeep`, - * `flattenDepth`, `flip`, `flow`, `flowRight`, `fromPairs`, `functions`, - * `functionsIn`, `groupBy`, `initial`, `intersection`, `intersectionBy`, - * `intersectionWith`, `invert`, `invertBy`, `invokeMap`, `iteratee`, `keyBy`, - * `keys`, `keysIn`, `map`, `mapKeys`, `mapValues`, `matches`, `matchesProperty`, - * `memoize`, `merge`, `mergeWith`, `method`, `methodOf`, `mixin`, `negate`, - * `nthArg`, `omit`, `omitBy`, `once`, `orderBy`, `over`, `overArgs`, - * `overEvery`, `overSome`, `partial`, `partialRight`, `partition`, `pick`, - * `pickBy`, `plant`, `property`, `propertyOf`, `pull`, `pullAll`, `pullAllBy`, - * `pullAllWith`, `pullAt`, `push`, `range`, `rangeRight`, `rearg`, `reject`, - * `remove`, `rest`, `reverse`, `sampleSize`, `set`, `setWith`, `shuffle`, - * `slice`, `sort`, `sortBy`, `splice`, `spread`, `tail`, `take`, `takeRight`, - * `takeRightWhile`, `takeWhile`, `tap`, `throttle`, `thru`, `toArray`, - * `toPairs`, `toPairsIn`, `toPath`, `toPlainObject`, `transform`, `unary`, - * `union`, `unionBy`, `unionWith`, `uniq`, `uniqBy`, `uniqWith`, `unset`, - * `unshift`, `unzip`, `unzipWith`, `update`, `updateWith`, `values`, - * `valuesIn`, `without`, `wrap`, `xor`, `xorBy`, `xorWith`, `zip`, - * `zipObject`, `zipObjectDeep`, and `zipWith` - * - * The wrapper methods that are **not** chainable by default are: - * `add`, `attempt`, `camelCase`, `capitalize`, `ceil`, `clamp`, `clone`, - * `cloneDeep`, `cloneDeepWith`, `cloneWith`, `conformsTo`, `deburr`, - * `defaultTo`, `divide`, `each`, `eachRight`, `endsWith`, `eq`, `escape`, - * `escapeRegExp`, `every`, `find`, `findIndex`, `findKey`, `findLast`, - * `findLastIndex`, `findLastKey`, `first`, `floor`, `forEach`, `forEachRight`, - * `forIn`, `forInRight`, `forOwn`, `forOwnRight`, `get`, `gt`, `gte`, `has`, - * `hasIn`, `head`, `identity`, `includes`, `indexOf`, `inRange`, `invoke`, - * `isArguments`, `isArray`, `isArrayBuffer`, `isArrayLike`, `isArrayLikeObject`, - * `isBoolean`, `isBuffer`, `isDate`, `isElement`, `isEmpty`, `isEqual`, - * `isEqualWith`, `isError`, `isFinite`, `isFunction`, `isInteger`, `isLength`, - * `isMap`, `isMatch`, `isMatchWith`, `isNaN`, `isNative`, `isNil`, `isNull`, - * `isNumber`, `isObject`, `isObjectLike`, `isPlainObject`, `isRegExp`, - * `isSafeInteger`, `isSet`, `isString`, `isUndefined`, `isTypedArray`, - * `isWeakMap`, `isWeakSet`, `join`, `kebabCase`, `last`, `lastIndexOf`, - * `lowerCase`, `lowerFirst`, `lt`, `lte`, `max`, `maxBy`, `mean`, `meanBy`, - * `min`, `minBy`, `multiply`, `noConflict`, `noop`, `now`, `nth`, `pad`, - * `padEnd`, `padStart`, `parseInt`, `pop`, `random`, `reduce`, `reduceRight`, - * `repeat`, `result`, `round`, `runInContext`, `sample`, `shift`, `size`, - * `snakeCase`, `some`, `sortedIndex`, `sortedIndexBy`, `sortedLastIndex`, - * `sortedLastIndexBy`, `startCase`, `startsWith`, `stubArray`, `stubFalse`, - * `stubObject`, `stubString`, `stubTrue`, `subtract`, `sum`, `sumBy`, - * `template`, `times`, `toFinite`, `toInteger`, `toJSON`, `toLength`, - * `toLower`, `toNumber`, `toSafeInteger`, `toString`, `toUpper`, `trim`, - * `trimEnd`, `trimStart`, `truncate`, `unescape`, `uniqueId`, `upperCase`, - * `upperFirst`, `value`, and `words` - * - * @name _ - * @constructor - * @category Seq - * @param {*} value The value to wrap in a `lodash` instance. - * @returns {Object} Returns the new `lodash` wrapper instance. - * @example - * - * function square(n) { - * return n * n; - * } - * - * var wrapped = _([1, 2, 3]); - * - * // Returns an unwrapped value. - * wrapped.reduce(_.add); - * // => 6 - * - * // Returns a wrapped value. - * var squares = wrapped.map(square); - * - * _.isArray(squares); - * // => false - * - * _.isArray(squares.value()); - * // => true - */ - function lodash(value) { - if (isObjectLike(value) && !isArray(value) && !(value instanceof LazyWrapper)) { - if (value instanceof LodashWrapper) { - return value; - } - if (hasOwnProperty.call(value, '__wrapped__')) { - return wrapperClone(value); - } - } - return new LodashWrapper(value); - } - - /** - * The base implementation of `_.create` without support for assigning - * properties to the created object. - * - * @private - * @param {Object} proto The object to inherit from. - * @returns {Object} Returns the new object. - */ - var baseCreate = (function() { - function object() {} - return function(proto) { - if (!isObject(proto)) { - return {}; - } - if (objectCreate) { - return objectCreate(proto); - } - object.prototype = proto; - var result = new object; - object.prototype = undefined; - return result; - }; - }()); - - /** - * The function whose prototype chain sequence wrappers inherit from. - * - * @private - */ - function baseLodash() { - // No operation performed. - } - - /** - * The base constructor for creating `lodash` wrapper objects. - * - * @private - * @param {*} value The value to wrap. - * @param {boolean} [chainAll] Enable explicit method chain sequences. - */ - function LodashWrapper(value, chainAll) { - this.__wrapped__ = value; - this.__actions__ = []; - this.__chain__ = !!chainAll; - this.__index__ = 0; - this.__values__ = undefined; - } - - /** - * By default, the template delimiters used by lodash are like those in - * embedded Ruby (ERB) as well as ES2015 template strings. Change the - * following template settings to use alternative delimiters. - * - * @static - * @memberOf _ - * @type {Object} - */ - lodash.templateSettings = { - - /** - * Used to detect `data` property values to be HTML-escaped. - * - * @memberOf _.templateSettings - * @type {RegExp} - */ - 'escape': reEscape, - - /** - * Used to detect code to be evaluated. - * - * @memberOf _.templateSettings - * @type {RegExp} - */ - 'evaluate': reEvaluate, - - /** - * Used to detect `data` property values to inject. - * - * @memberOf _.templateSettings - * @type {RegExp} - */ - 'interpolate': reInterpolate, - - /** - * Used to reference the data object in the template text. - * - * @memberOf _.templateSettings - * @type {string} - */ - 'variable': '', - - /** - * Used to import variables into the compiled template. - * - * @memberOf _.templateSettings - * @type {Object} - */ - 'imports': { - - /** - * A reference to the `lodash` function. - * - * @memberOf _.templateSettings.imports - * @type {Function} - */ - '_': lodash - } - }; - - // Ensure wrappers are instances of `baseLodash`. - lodash.prototype = baseLodash.prototype; - lodash.prototype.constructor = lodash; - - LodashWrapper.prototype = baseCreate(baseLodash.prototype); - LodashWrapper.prototype.constructor = LodashWrapper; - - /*------------------------------------------------------------------------*/ - - /** - * Creates a lazy wrapper object which wraps `value` to enable lazy evaluation. - * - * @private - * @constructor - * @param {*} value The value to wrap. - */ - function LazyWrapper(value) { - this.__wrapped__ = value; - this.__actions__ = []; - this.__dir__ = 1; - this.__filtered__ = false; - this.__iteratees__ = []; - this.__takeCount__ = MAX_ARRAY_LENGTH; - this.__views__ = []; - } - - /** - * Creates a clone of the lazy wrapper object. - * - * @private - * @name clone - * @memberOf LazyWrapper - * @returns {Object} Returns the cloned `LazyWrapper` object. - */ - function lazyClone() { - var result = new LazyWrapper(this.__wrapped__); - result.__actions__ = copyArray(this.__actions__); - result.__dir__ = this.__dir__; - result.__filtered__ = this.__filtered__; - result.__iteratees__ = copyArray(this.__iteratees__); - result.__takeCount__ = this.__takeCount__; - result.__views__ = copyArray(this.__views__); - return result; - } - - /** - * Reverses the direction of lazy iteration. - * - * @private - * @name reverse - * @memberOf LazyWrapper - * @returns {Object} Returns the new reversed `LazyWrapper` object. - */ - function lazyReverse() { - if (this.__filtered__) { - var result = new LazyWrapper(this); - result.__dir__ = -1; - result.__filtered__ = true; - } else { - result = this.clone(); - result.__dir__ *= -1; - } - return result; - } - - /** - * Extracts the unwrapped value from its lazy wrapper. - * - * @private - * @name value - * @memberOf LazyWrapper - * @returns {*} Returns the unwrapped value. - */ - function lazyValue() { - var array = this.__wrapped__.value(), - dir = this.__dir__, - isArr = isArray(array), - isRight = dir < 0, - arrLength = isArr ? array.length : 0, - view = getView(0, arrLength, this.__views__), - start = view.start, - end = view.end, - length = end - start, - index = isRight ? end : (start - 1), - iteratees = this.__iteratees__, - iterLength = iteratees.length, - resIndex = 0, - takeCount = nativeMin(length, this.__takeCount__); - - if (!isArr || (!isRight && arrLength == length && takeCount == length)) { - return baseWrapperValue(array, this.__actions__); - } - var result = []; - - outer: - while (length-- && resIndex < takeCount) { - index += dir; - - var iterIndex = -1, - value = array[index]; - - while (++iterIndex < iterLength) { - var data = iteratees[iterIndex], - iteratee = data.iteratee, - type = data.type, - computed = iteratee(value); - - if (type == LAZY_MAP_FLAG) { - value = computed; - } else if (!computed) { - if (type == LAZY_FILTER_FLAG) { - continue outer; - } else { - break outer; - } - } - } - result[resIndex++] = value; - } - return result; - } - - // Ensure `LazyWrapper` is an instance of `baseLodash`. - LazyWrapper.prototype = baseCreate(baseLodash.prototype); - LazyWrapper.prototype.constructor = LazyWrapper; - - /*------------------------------------------------------------------------*/ - - /** - * Creates a hash object. - * - * @private - * @constructor - * @param {Array} [entries] The key-value pairs to cache. - */ - function Hash(entries) { - var index = -1, - length = entries == null ? 0 : entries.length; - - this.clear(); - while (++index < length) { - var entry = entries[index]; - this.set(entry[0], entry[1]); - } - } - - /** - * Removes all key-value entries from the hash. - * - * @private - * @name clear - * @memberOf Hash - */ - function hashClear() { - this.__data__ = nativeCreate ? nativeCreate(null) : {}; - this.size = 0; - } - - /** - * Removes `key` and its value from the hash. - * - * @private - * @name delete - * @memberOf Hash - * @param {Object} hash The hash to modify. - * @param {string} key The key of the value to remove. - * @returns {boolean} Returns `true` if the entry was removed, else `false`. - */ - function hashDelete(key) { - var result = this.has(key) && delete this.__data__[key]; - this.size -= result ? 1 : 0; - return result; - } - - /** - * Gets the hash value for `key`. - * - * @private - * @name get - * @memberOf Hash - * @param {string} key The key of the value to get. - * @returns {*} Returns the entry value. - */ - function hashGet(key) { - var data = this.__data__; - if (nativeCreate) { - var result = data[key]; - return result === HASH_UNDEFINED ? undefined : result; - } - return hasOwnProperty.call(data, key) ? data[key] : undefined; - } - - /** - * Checks if a hash value for `key` exists. - * - * @private - * @name has - * @memberOf Hash - * @param {string} key The key of the entry to check. - * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`. - */ - function hashHas(key) { - var data = this.__data__; - return nativeCreate ? (data[key] !== undefined) : hasOwnProperty.call(data, key); - } - - /** - * Sets the hash `key` to `value`. - * - * @private - * @name set - * @memberOf Hash - * @param {string} key The key of the value to set. - * @param {*} value The value to set. - * @returns {Object} Returns the hash instance. - */ - function hashSet(key, value) { - var data = this.__data__; - this.size += this.has(key) ? 0 : 1; - data[key] = (nativeCreate && value === undefined) ? HASH_UNDEFINED : value; - return this; - } - - // Add methods to `Hash`. - Hash.prototype.clear = hashClear; - Hash.prototype['delete'] = hashDelete; - Hash.prototype.get = hashGet; - Hash.prototype.has = hashHas; - Hash.prototype.set = hashSet; - - /*------------------------------------------------------------------------*/ - - /** - * Creates an list cache object. - * - * @private - * @constructor - * @param {Array} [entries] The key-value pairs to cache. - */ - function ListCache(entries) { - var index = -1, - length = entries == null ? 0 : entries.length; - - this.clear(); - while (++index < length) { - var entry = entries[index]; - this.set(entry[0], entry[1]); - } - } - - /** - * Removes all key-value entries from the list cache. - * - * @private - * @name clear - * @memberOf ListCache - */ - function listCacheClear() { - this.__data__ = []; - this.size = 0; - } - - /** - * Removes `key` and its value from the list cache. - * - * @private - * @name delete - * @memberOf ListCache - * @param {string} key The key of the value to remove. - * @returns {boolean} Returns `true` if the entry was removed, else `false`. - */ - function listCacheDelete(key) { - var data = this.__data__, - index = assocIndexOf(data, key); - - if (index < 0) { - return false; - } - var lastIndex = data.length - 1; - if (index == lastIndex) { - data.pop(); - } else { - splice.call(data, index, 1); - } - --this.size; - return true; - } - - /** - * Gets the list cache value for `key`. - * - * @private - * @name get - * @memberOf ListCache - * @param {string} key The key of the value to get. - * @returns {*} Returns the entry value. - */ - function listCacheGet(key) { - var data = this.__data__, - index = assocIndexOf(data, key); - - return index < 0 ? undefined : data[index][1]; - } - - /** - * Checks if a list cache value for `key` exists. - * - * @private - * @name has - * @memberOf ListCache - * @param {string} key The key of the entry to check. - * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`. - */ - function listCacheHas(key) { - return assocIndexOf(this.__data__, key) > -1; - } - - /** - * Sets the list cache `key` to `value`. - * - * @private - * @name set - * @memberOf ListCache - * @param {string} key The key of the value to set. - * @param {*} value The value to set. - * @returns {Object} Returns the list cache instance. - */ - function listCacheSet(key, value) { - var data = this.__data__, - index = assocIndexOf(data, key); - - if (index < 0) { - ++this.size; - data.push([key, value]); - } else { - data[index][1] = value; - } - return this; - } - - // Add methods to `ListCache`. - ListCache.prototype.clear = listCacheClear; - ListCache.prototype['delete'] = listCacheDelete; - ListCache.prototype.get = listCacheGet; - ListCache.prototype.has = listCacheHas; - ListCache.prototype.set = listCacheSet; - - /*------------------------------------------------------------------------*/ - - /** - * Creates a map cache object to store key-value pairs. - * - * @private - * @constructor - * @param {Array} [entries] The key-value pairs to cache. - */ - function MapCache(entries) { - var index = -1, - length = entries == null ? 0 : entries.length; - - this.clear(); - while (++index < length) { - var entry = entries[index]; - this.set(entry[0], entry[1]); - } - } - - /** - * Removes all key-value entries from the map. - * - * @private - * @name clear - * @memberOf MapCache - */ - function mapCacheClear() { - this.size = 0; - this.__data__ = { - 'hash': new Hash, - 'map': new (Map || ListCache), - 'string': new Hash - }; - } - - /** - * Removes `key` and its value from the map. - * - * @private - * @name delete - * @memberOf MapCache - * @param {string} key The key of the value to remove. - * @returns {boolean} Returns `true` if the entry was removed, else `false`. - */ - function mapCacheDelete(key) { - var result = getMapData(this, key)['delete'](key); - this.size -= result ? 1 : 0; - return result; - } - - /** - * Gets the map value for `key`. - * - * @private - * @name get - * @memberOf MapCache - * @param {string} key The key of the value to get. - * @returns {*} Returns the entry value. - */ - function mapCacheGet(key) { - return getMapData(this, key).get(key); - } - - /** - * Checks if a map value for `key` exists. - * - * @private - * @name has - * @memberOf MapCache - * @param {string} key The key of the entry to check. - * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`. - */ - function mapCacheHas(key) { - return getMapData(this, key).has(key); - } - - /** - * Sets the map `key` to `value`. - * - * @private - * @name set - * @memberOf MapCache - * @param {string} key The key of the value to set. - * @param {*} value The value to set. - * @returns {Object} Returns the map cache instance. - */ - function mapCacheSet(key, value) { - var data = getMapData(this, key), - size = data.size; - - data.set(key, value); - this.size += data.size == size ? 0 : 1; - return this; - } - - // Add methods to `MapCache`. - MapCache.prototype.clear = mapCacheClear; - MapCache.prototype['delete'] = mapCacheDelete; - MapCache.prototype.get = mapCacheGet; - MapCache.prototype.has = mapCacheHas; - MapCache.prototype.set = mapCacheSet; - - /*------------------------------------------------------------------------*/ - - /** - * - * Creates an array cache object to store unique values. - * - * @private - * @constructor - * @param {Array} [values] The values to cache. - */ - function SetCache(values) { - var index = -1, - length = values == null ? 0 : values.length; - - this.__data__ = new MapCache; - while (++index < length) { - this.add(values[index]); - } - } - - /** - * Adds `value` to the array cache. - * - * @private - * @name add - * @memberOf SetCache - * @alias push - * @param {*} value The value to cache. - * @returns {Object} Returns the cache instance. - */ - function setCacheAdd(value) { - this.__data__.set(value, HASH_UNDEFINED); - return this; - } - - /** - * Checks if `value` is in the array cache. - * - * @private - * @name has - * @memberOf SetCache - * @param {*} value The value to search for. - * @returns {number} Returns `true` if `value` is found, else `false`. - */ - function setCacheHas(value) { - return this.__data__.has(value); - } - - // Add methods to `SetCache`. - SetCache.prototype.add = SetCache.prototype.push = setCacheAdd; - SetCache.prototype.has = setCacheHas; - - /*------------------------------------------------------------------------*/ - - /** - * Creates a stack cache object to store key-value pairs. - * - * @private - * @constructor - * @param {Array} [entries] The key-value pairs to cache. - */ - function Stack(entries) { - var data = this.__data__ = new ListCache(entries); - this.size = data.size; - } - - /** - * Removes all key-value entries from the stack. - * - * @private - * @name clear - * @memberOf Stack - */ - function stackClear() { - this.__data__ = new ListCache; - this.size = 0; - } - - /** - * Removes `key` and its value from the stack. - * - * @private - * @name delete - * @memberOf Stack - * @param {string} key The key of the value to remove. - * @returns {boolean} Returns `true` if the entry was removed, else `false`. - */ - function stackDelete(key) { - var data = this.__data__, - result = data['delete'](key); - - this.size = data.size; - return result; - } - - /** - * Gets the stack value for `key`. - * - * @private - * @name get - * @memberOf Stack - * @param {string} key The key of the value to get. - * @returns {*} Returns the entry value. - */ - function stackGet(key) { - return this.__data__.get(key); - } - - /** - * Checks if a stack value for `key` exists. - * - * @private - * @name has - * @memberOf Stack - * @param {string} key The key of the entry to check. - * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`. - */ - function stackHas(key) { - return this.__data__.has(key); - } - - /** - * Sets the stack `key` to `value`. - * - * @private - * @name set - * @memberOf Stack - * @param {string} key The key of the value to set. - * @param {*} value The value to set. - * @returns {Object} Returns the stack cache instance. - */ - function stackSet(key, value) { - var data = this.__data__; - if (data instanceof ListCache) { - var pairs = data.__data__; - if (!Map || (pairs.length < LARGE_ARRAY_SIZE - 1)) { - pairs.push([key, value]); - this.size = ++data.size; - return this; - } - data = this.__data__ = new MapCache(pairs); - } - data.set(key, value); - this.size = data.size; - return this; - } - - // Add methods to `Stack`. - Stack.prototype.clear = stackClear; - Stack.prototype['delete'] = stackDelete; - Stack.prototype.get = stackGet; - Stack.prototype.has = stackHas; - Stack.prototype.set = stackSet; - - /*------------------------------------------------------------------------*/ - - /** - * Creates an array of the enumerable property names of the array-like `value`. - * - * @private - * @param {*} value The value to query. - * @param {boolean} inherited Specify returning inherited property names. - * @returns {Array} Returns the array of property names. - */ - function arrayLikeKeys(value, inherited) { - var isArr = isArray(value), - isArg = !isArr && isArguments(value), - isBuff = !isArr && !isArg && isBuffer(value), - isType = !isArr && !isArg && !isBuff && isTypedArray(value), - skipIndexes = isArr || isArg || isBuff || isType, - result = skipIndexes ? baseTimes(value.length, String) : [], - length = result.length; - - for (var key in value) { - if ((inherited || hasOwnProperty.call(value, key)) && - !(skipIndexes && ( - // Safari 9 has enumerable `arguments.length` in strict mode. - key == 'length' || - // Node.js 0.10 has enumerable non-index properties on buffers. - (isBuff && (key == 'offset' || key == 'parent')) || - // PhantomJS 2 has enumerable non-index properties on typed arrays. - (isType && (key == 'buffer' || key == 'byteLength' || key == 'byteOffset')) || - // Skip index properties. - isIndex(key, length) - ))) { - result.push(key); - } - } - return result; - } - - /** - * A specialized version of `_.sample` for arrays. - * - * @private - * @param {Array} array The array to sample. - * @returns {*} Returns the random element. - */ - function arraySample(array) { - var length = array.length; - return length ? array[baseRandom(0, length - 1)] : undefined; - } - - /** - * A specialized version of `_.sampleSize` for arrays. - * - * @private - * @param {Array} array The array to sample. - * @param {number} n The number of elements to sample. - * @returns {Array} Returns the random elements. - */ - function arraySampleSize(array, n) { - return shuffleSelf(copyArray(array), baseClamp(n, 0, array.length)); - } - - /** - * A specialized version of `_.shuffle` for arrays. - * - * @private - * @param {Array} array The array to shuffle. - * @returns {Array} Returns the new shuffled array. - */ - function arrayShuffle(array) { - return shuffleSelf(copyArray(array)); - } - - /** - * This function is like `assignValue` except that it doesn't assign - * `undefined` values. - * - * @private - * @param {Object} object The object to modify. - * @param {string} key The key of the property to assign. - * @param {*} value The value to assign. - */ - function assignMergeValue(object, key, value) { - if ((value !== undefined && !eq(object[key], value)) || - (value === undefined && !(key in object))) { - baseAssignValue(object, key, value); - } - } - - /** - * Assigns `value` to `key` of `object` if the existing value is not equivalent - * using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * for equality comparisons. - * - * @private - * @param {Object} object The object to modify. - * @param {string} key The key of the property to assign. - * @param {*} value The value to assign. - */ - function assignValue(object, key, value) { - var objValue = object[key]; - if (!(hasOwnProperty.call(object, key) && eq(objValue, value)) || - (value === undefined && !(key in object))) { - baseAssignValue(object, key, value); - } - } - - /** - * Gets the index at which the `key` is found in `array` of key-value pairs. - * - * @private - * @param {Array} array The array to inspect. - * @param {*} key The key to search for. - * @returns {number} Returns the index of the matched value, else `-1`. - */ - function assocIndexOf(array, key) { - var length = array.length; - while (length--) { - if (eq(array[length][0], key)) { - return length; - } - } - return -1; - } - - /** - * Aggregates elements of `collection` on `accumulator` with keys transformed - * by `iteratee` and values set by `setter`. - * - * @private - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} setter The function to set `accumulator` values. - * @param {Function} iteratee The iteratee to transform keys. - * @param {Object} accumulator The initial aggregated object. - * @returns {Function} Returns `accumulator`. - */ - function baseAggregator(collection, setter, iteratee, accumulator) { - baseEach(collection, function(value, key, collection) { - setter(accumulator, value, iteratee(value), collection); - }); - return accumulator; - } - - /** - * The base implementation of `_.assign` without support for multiple sources - * or `customizer` functions. - * - * @private - * @param {Object} object The destination object. - * @param {Object} source The source object. - * @returns {Object} Returns `object`. - */ - function baseAssign(object, source) { - return object && copyObject(source, keys(source), object); - } - - /** - * The base implementation of `_.assignIn` without support for multiple sources - * or `customizer` functions. - * - * @private - * @param {Object} object The destination object. - * @param {Object} source The source object. - * @returns {Object} Returns `object`. - */ - function baseAssignIn(object, source) { - return object && copyObject(source, keysIn(source), object); - } - - /** - * The base implementation of `assignValue` and `assignMergeValue` without - * value checks. - * - * @private - * @param {Object} object The object to modify. - * @param {string} key The key of the property to assign. - * @param {*} value The value to assign. - */ - function baseAssignValue(object, key, value) { - if (key == '__proto__' && defineProperty) { - defineProperty(object, key, { - 'configurable': true, - 'enumerable': true, - 'value': value, - 'writable': true - }); - } else { - object[key] = value; - } - } - - /** - * The base implementation of `_.at` without support for individual paths. - * - * @private - * @param {Object} object The object to iterate over. - * @param {string[]} paths The property paths to pick. - * @returns {Array} Returns the picked elements. - */ - function baseAt(object, paths) { - var index = -1, - length = paths.length, - result = Array(length), - skip = object == null; - - while (++index < length) { - result[index] = skip ? undefined : get(object, paths[index]); - } - return result; - } - - /** - * The base implementation of `_.clamp` which doesn't coerce arguments. - * - * @private - * @param {number} number The number to clamp. - * @param {number} [lower] The lower bound. - * @param {number} upper The upper bound. - * @returns {number} Returns the clamped number. - */ - function baseClamp(number, lower, upper) { - if (number === number) { - if (upper !== undefined) { - number = number <= upper ? number : upper; - } - if (lower !== undefined) { - number = number >= lower ? number : lower; - } - } - return number; - } - - /** - * The base implementation of `_.clone` and `_.cloneDeep` which tracks - * traversed objects. - * - * @private - * @param {*} value The value to clone. - * @param {boolean} bitmask The bitmask flags. - * 1 - Deep clone - * 2 - Flatten inherited properties - * 4 - Clone symbols - * @param {Function} [customizer] The function to customize cloning. - * @param {string} [key] The key of `value`. - * @param {Object} [object] The parent object of `value`. - * @param {Object} [stack] Tracks traversed objects and their clone counterparts. - * @returns {*} Returns the cloned value. - */ - function baseClone(value, bitmask, customizer, key, object, stack) { - var result, - isDeep = bitmask & CLONE_DEEP_FLAG, - isFlat = bitmask & CLONE_FLAT_FLAG, - isFull = bitmask & CLONE_SYMBOLS_FLAG; - - if (customizer) { - result = object ? customizer(value, key, object, stack) : customizer(value); - } - if (result !== undefined) { - return result; - } - if (!isObject(value)) { - return value; - } - var isArr = isArray(value); - if (isArr) { - result = initCloneArray(value); - if (!isDeep) { - return copyArray(value, result); - } - } else { - var tag = getTag(value), - isFunc = tag == funcTag || tag == genTag; - - if (isBuffer(value)) { - return cloneBuffer(value, isDeep); - } - if (tag == objectTag || tag == argsTag || (isFunc && !object)) { - result = (isFlat || isFunc) ? {} : initCloneObject(value); - if (!isDeep) { - return isFlat - ? copySymbolsIn(value, baseAssignIn(result, value)) - : copySymbols(value, baseAssign(result, value)); - } - } else { - if (!cloneableTags[tag]) { - return object ? value : {}; - } - result = initCloneByTag(value, tag, isDeep); - } - } - // Check for circular references and return its corresponding clone. - stack || (stack = new Stack); - var stacked = stack.get(value); - if (stacked) { - return stacked; - } - stack.set(value, result); - - if (isSet(value)) { - value.forEach(function(subValue) { - result.add(baseClone(subValue, bitmask, customizer, subValue, value, stack)); - }); - - return result; - } - - if (isMap(value)) { - value.forEach(function(subValue, key) { - result.set(key, baseClone(subValue, bitmask, customizer, key, value, stack)); - }); - - return result; - } - - var keysFunc = isFull - ? (isFlat ? getAllKeysIn : getAllKeys) - : (isFlat ? keysIn : keys); - - var props = isArr ? undefined : keysFunc(value); - arrayEach(props || value, function(subValue, key) { - if (props) { - key = subValue; - subValue = value[key]; - } - // Recursively populate clone (susceptible to call stack limits). - assignValue(result, key, baseClone(subValue, bitmask, customizer, key, value, stack)); - }); - return result; - } - - /** - * The base implementation of `_.conforms` which doesn't clone `source`. - * - * @private - * @param {Object} source The object of property predicates to conform to. - * @returns {Function} Returns the new spec function. - */ - function baseConforms(source) { - var props = keys(source); - return function(object) { - return baseConformsTo(object, source, props); - }; - } - - /** - * The base implementation of `_.conformsTo` which accepts `props` to check. - * - * @private - * @param {Object} object The object to inspect. - * @param {Object} source The object of property predicates to conform to. - * @returns {boolean} Returns `true` if `object` conforms, else `false`. - */ - function baseConformsTo(object, source, props) { - var length = props.length; - if (object == null) { - return !length; - } - object = Object(object); - while (length--) { - var key = props[length], - predicate = source[key], - value = object[key]; - - if ((value === undefined && !(key in object)) || !predicate(value)) { - return false; - } - } - return true; - } - - /** - * The base implementation of `_.delay` and `_.defer` which accepts `args` - * to provide to `func`. - * - * @private - * @param {Function} func The function to delay. - * @param {number} wait The number of milliseconds to delay invocation. - * @param {Array} args The arguments to provide to `func`. - * @returns {number|Object} Returns the timer id or timeout object. - */ - function baseDelay(func, wait, args) { - if (typeof func != 'function') { - throw new TypeError(FUNC_ERROR_TEXT); - } - return setTimeout(function() { func.apply(undefined, args); }, wait); - } - - /** - * The base implementation of methods like `_.difference` without support - * for excluding multiple arrays or iteratee shorthands. - * - * @private - * @param {Array} array The array to inspect. - * @param {Array} values The values to exclude. - * @param {Function} [iteratee] The iteratee invoked per element. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns the new array of filtered values. - */ - function baseDifference(array, values, iteratee, comparator) { - var index = -1, - includes = arrayIncludes, - isCommon = true, - length = array.length, - result = [], - valuesLength = values.length; - - if (!length) { - return result; - } - if (iteratee) { - values = arrayMap(values, baseUnary(iteratee)); - } - if (comparator) { - includes = arrayIncludesWith; - isCommon = false; - } - else if (values.length >= LARGE_ARRAY_SIZE) { - includes = cacheHas; - isCommon = false; - values = new SetCache(values); - } - outer: - while (++index < length) { - var value = array[index], - computed = iteratee == null ? value : iteratee(value); - - value = (comparator || value !== 0) ? value : 0; - if (isCommon && computed === computed) { - var valuesIndex = valuesLength; - while (valuesIndex--) { - if (values[valuesIndex] === computed) { - continue outer; - } - } - result.push(value); - } - else if (!includes(values, computed, comparator)) { - result.push(value); - } - } - return result; - } - - /** - * The base implementation of `_.forEach` without support for iteratee shorthands. - * - * @private - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @returns {Array|Object} Returns `collection`. - */ - var baseEach = createBaseEach(baseForOwn); - - /** - * The base implementation of `_.forEachRight` without support for iteratee shorthands. - * - * @private - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @returns {Array|Object} Returns `collection`. - */ - var baseEachRight = createBaseEach(baseForOwnRight, true); - - /** - * The base implementation of `_.every` without support for iteratee shorthands. - * - * @private - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} predicate The function invoked per iteration. - * @returns {boolean} Returns `true` if all elements pass the predicate check, - * else `false` - */ - function baseEvery(collection, predicate) { - var result = true; - baseEach(collection, function(value, index, collection) { - result = !!predicate(value, index, collection); - return result; - }); - return result; - } - - /** - * The base implementation of methods like `_.max` and `_.min` which accepts a - * `comparator` to determine the extremum value. - * - * @private - * @param {Array} array The array to iterate over. - * @param {Function} iteratee The iteratee invoked per iteration. - * @param {Function} comparator The comparator used to compare values. - * @returns {*} Returns the extremum value. - */ - function baseExtremum(array, iteratee, comparator) { - var index = -1, - length = array.length; - - while (++index < length) { - var value = array[index], - current = iteratee(value); - - if (current != null && (computed === undefined - ? (current === current && !isSymbol(current)) - : comparator(current, computed) - )) { - var computed = current, - result = value; - } - } - return result; - } - - /** - * The base implementation of `_.fill` without an iteratee call guard. - * - * @private - * @param {Array} array The array to fill. - * @param {*} value The value to fill `array` with. - * @param {number} [start=0] The start position. - * @param {number} [end=array.length] The end position. - * @returns {Array} Returns `array`. - */ - function baseFill(array, value, start, end) { - var length = array.length; - - start = toInteger(start); - if (start < 0) { - start = -start > length ? 0 : (length + start); - } - end = (end === undefined || end > length) ? length : toInteger(end); - if (end < 0) { - end += length; - } - end = start > end ? 0 : toLength(end); - while (start < end) { - array[start++] = value; - } - return array; - } - - /** - * The base implementation of `_.filter` without support for iteratee shorthands. - * - * @private - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} predicate The function invoked per iteration. - * @returns {Array} Returns the new filtered array. - */ - function baseFilter(collection, predicate) { - var result = []; - baseEach(collection, function(value, index, collection) { - if (predicate(value, index, collection)) { - result.push(value); - } - }); - return result; - } - - /** - * The base implementation of `_.flatten` with support for restricting flattening. - * - * @private - * @param {Array} array The array to flatten. - * @param {number} depth The maximum recursion depth. - * @param {boolean} [predicate=isFlattenable] The function invoked per iteration. - * @param {boolean} [isStrict] Restrict to values that pass `predicate` checks. - * @param {Array} [result=[]] The initial result value. - * @returns {Array} Returns the new flattened array. - */ - function baseFlatten(array, depth, predicate, isStrict, result) { - var index = -1, - length = array.length; - - predicate || (predicate = isFlattenable); - result || (result = []); - - while (++index < length) { - var value = array[index]; - if (depth > 0 && predicate(value)) { - if (depth > 1) { - // Recursively flatten arrays (susceptible to call stack limits). - baseFlatten(value, depth - 1, predicate, isStrict, result); - } else { - arrayPush(result, value); - } - } else if (!isStrict) { - result[result.length] = value; - } - } - return result; - } - - /** - * The base implementation of `baseForOwn` which iterates over `object` - * properties returned by `keysFunc` and invokes `iteratee` for each property. - * Iteratee functions may exit iteration early by explicitly returning `false`. - * - * @private - * @param {Object} object The object to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @param {Function} keysFunc The function to get the keys of `object`. - * @returns {Object} Returns `object`. - */ - var baseFor = createBaseFor(); - - /** - * This function is like `baseFor` except that it iterates over properties - * in the opposite order. - * - * @private - * @param {Object} object The object to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @param {Function} keysFunc The function to get the keys of `object`. - * @returns {Object} Returns `object`. - */ - var baseForRight = createBaseFor(true); - - /** - * The base implementation of `_.forOwn` without support for iteratee shorthands. - * - * @private - * @param {Object} object The object to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @returns {Object} Returns `object`. - */ - function baseForOwn(object, iteratee) { - return object && baseFor(object, iteratee, keys); - } - - /** - * The base implementation of `_.forOwnRight` without support for iteratee shorthands. - * - * @private - * @param {Object} object The object to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @returns {Object} Returns `object`. - */ - function baseForOwnRight(object, iteratee) { - return object && baseForRight(object, iteratee, keys); - } - - /** - * The base implementation of `_.functions` which creates an array of - * `object` function property names filtered from `props`. - * - * @private - * @param {Object} object The object to inspect. - * @param {Array} props The property names to filter. - * @returns {Array} Returns the function names. - */ - function baseFunctions(object, props) { - return arrayFilter(props, function(key) { - return isFunction(object[key]); - }); - } - - /** - * The base implementation of `_.get` without support for default values. - * - * @private - * @param {Object} object The object to query. - * @param {Array|string} path The path of the property to get. - * @returns {*} Returns the resolved value. - */ - function baseGet(object, path) { - path = castPath(path, object); - - var index = 0, - length = path.length; - - while (object != null && index < length) { - object = object[toKey(path[index++])]; - } - return (index && index == length) ? object : undefined; - } - - /** - * The base implementation of `getAllKeys` and `getAllKeysIn` which uses - * `keysFunc` and `symbolsFunc` to get the enumerable property names and - * symbols of `object`. - * - * @private - * @param {Object} object The object to query. - * @param {Function} keysFunc The function to get the keys of `object`. - * @param {Function} symbolsFunc The function to get the symbols of `object`. - * @returns {Array} Returns the array of property names and symbols. - */ - function baseGetAllKeys(object, keysFunc, symbolsFunc) { - var result = keysFunc(object); - return isArray(object) ? result : arrayPush(result, symbolsFunc(object)); - } - - /** - * The base implementation of `getTag` without fallbacks for buggy environments. - * - * @private - * @param {*} value The value to query. - * @returns {string} Returns the `toStringTag`. - */ - function baseGetTag(value) { - if (value == null) { - return value === undefined ? undefinedTag : nullTag; - } - return (symToStringTag && symToStringTag in Object(value)) - ? getRawTag(value) - : objectToString(value); - } - - /** - * The base implementation of `_.gt` which doesn't coerce arguments. - * - * @private - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @returns {boolean} Returns `true` if `value` is greater than `other`, - * else `false`. - */ - function baseGt(value, other) { - return value > other; - } - - /** - * The base implementation of `_.has` without support for deep paths. - * - * @private - * @param {Object} [object] The object to query. - * @param {Array|string} key The key to check. - * @returns {boolean} Returns `true` if `key` exists, else `false`. - */ - function baseHas(object, key) { - return object != null && hasOwnProperty.call(object, key); - } - - /** - * The base implementation of `_.hasIn` without support for deep paths. - * - * @private - * @param {Object} [object] The object to query. - * @param {Array|string} key The key to check. - * @returns {boolean} Returns `true` if `key` exists, else `false`. - */ - function baseHasIn(object, key) { - return object != null && key in Object(object); - } - - /** - * The base implementation of `_.inRange` which doesn't coerce arguments. - * - * @private - * @param {number} number The number to check. - * @param {number} start The start of the range. - * @param {number} end The end of the range. - * @returns {boolean} Returns `true` if `number` is in the range, else `false`. - */ - function baseInRange(number, start, end) { - return number >= nativeMin(start, end) && number < nativeMax(start, end); - } - - /** - * The base implementation of methods like `_.intersection`, without support - * for iteratee shorthands, that accepts an array of arrays to inspect. - * - * @private - * @param {Array} arrays The arrays to inspect. - * @param {Function} [iteratee] The iteratee invoked per element. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns the new array of shared values. - */ - function baseIntersection(arrays, iteratee, comparator) { - var includes = comparator ? arrayIncludesWith : arrayIncludes, - length = arrays[0].length, - othLength = arrays.length, - othIndex = othLength, - caches = Array(othLength), - maxLength = Infinity, - result = []; - - while (othIndex--) { - var array = arrays[othIndex]; - if (othIndex && iteratee) { - array = arrayMap(array, baseUnary(iteratee)); - } - maxLength = nativeMin(array.length, maxLength); - caches[othIndex] = !comparator && (iteratee || (length >= 120 && array.length >= 120)) - ? new SetCache(othIndex && array) - : undefined; - } - array = arrays[0]; - - var index = -1, - seen = caches[0]; - - outer: - while (++index < length && result.length < maxLength) { - var value = array[index], - computed = iteratee ? iteratee(value) : value; - - value = (comparator || value !== 0) ? value : 0; - if (!(seen - ? cacheHas(seen, computed) - : includes(result, computed, comparator) - )) { - othIndex = othLength; - while (--othIndex) { - var cache = caches[othIndex]; - if (!(cache - ? cacheHas(cache, computed) - : includes(arrays[othIndex], computed, comparator)) - ) { - continue outer; - } - } - if (seen) { - seen.push(computed); - } - result.push(value); - } - } - return result; - } - - /** - * The base implementation of `_.invert` and `_.invertBy` which inverts - * `object` with values transformed by `iteratee` and set by `setter`. - * - * @private - * @param {Object} object The object to iterate over. - * @param {Function} setter The function to set `accumulator` values. - * @param {Function} iteratee The iteratee to transform values. - * @param {Object} accumulator The initial inverted object. - * @returns {Function} Returns `accumulator`. - */ - function baseInverter(object, setter, iteratee, accumulator) { - baseForOwn(object, function(value, key, object) { - setter(accumulator, iteratee(value), key, object); - }); - return accumulator; - } - - /** - * The base implementation of `_.invoke` without support for individual - * method arguments. - * - * @private - * @param {Object} object The object to query. - * @param {Array|string} path The path of the method to invoke. - * @param {Array} args The arguments to invoke the method with. - * @returns {*} Returns the result of the invoked method. - */ - function baseInvoke(object, path, args) { - path = castPath(path, object); - object = parent(object, path); - var func = object == null ? object : object[toKey(last(path))]; - return func == null ? undefined : apply(func, object, args); - } - - /** - * The base implementation of `_.isArguments`. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is an `arguments` object, - */ - function baseIsArguments(value) { - return isObjectLike(value) && baseGetTag(value) == argsTag; - } - - /** - * The base implementation of `_.isArrayBuffer` without Node.js optimizations. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is an array buffer, else `false`. - */ - function baseIsArrayBuffer(value) { - return isObjectLike(value) && baseGetTag(value) == arrayBufferTag; - } - - /** - * The base implementation of `_.isDate` without Node.js optimizations. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a date object, else `false`. - */ - function baseIsDate(value) { - return isObjectLike(value) && baseGetTag(value) == dateTag; - } - - /** - * The base implementation of `_.isEqual` which supports partial comparisons - * and tracks traversed objects. - * - * @private - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @param {boolean} bitmask The bitmask flags. - * 1 - Unordered comparison - * 2 - Partial comparison - * @param {Function} [customizer] The function to customize comparisons. - * @param {Object} [stack] Tracks traversed `value` and `other` objects. - * @returns {boolean} Returns `true` if the values are equivalent, else `false`. - */ - function baseIsEqual(value, other, bitmask, customizer, stack) { - if (value === other) { - return true; - } - if (value == null || other == null || (!isObjectLike(value) && !isObjectLike(other))) { - return value !== value && other !== other; - } - return baseIsEqualDeep(value, other, bitmask, customizer, baseIsEqual, stack); - } - - /** - * A specialized version of `baseIsEqual` for arrays and objects which performs - * deep comparisons and tracks traversed objects enabling objects with circular - * references to be compared. - * - * @private - * @param {Object} object The object to compare. - * @param {Object} other The other object to compare. - * @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details. - * @param {Function} customizer The function to customize comparisons. - * @param {Function} equalFunc The function to determine equivalents of values. - * @param {Object} [stack] Tracks traversed `object` and `other` objects. - * @returns {boolean} Returns `true` if the objects are equivalent, else `false`. - */ - function baseIsEqualDeep(object, other, bitmask, customizer, equalFunc, stack) { - var objIsArr = isArray(object), - othIsArr = isArray(other), - objTag = objIsArr ? arrayTag : getTag(object), - othTag = othIsArr ? arrayTag : getTag(other); - - objTag = objTag == argsTag ? objectTag : objTag; - othTag = othTag == argsTag ? objectTag : othTag; - - var objIsObj = objTag == objectTag, - othIsObj = othTag == objectTag, - isSameTag = objTag == othTag; - - if (isSameTag && isBuffer(object)) { - if (!isBuffer(other)) { - return false; - } - objIsArr = true; - objIsObj = false; - } - if (isSameTag && !objIsObj) { - stack || (stack = new Stack); - return (objIsArr || isTypedArray(object)) - ? equalArrays(object, other, bitmask, customizer, equalFunc, stack) - : equalByTag(object, other, objTag, bitmask, customizer, equalFunc, stack); - } - if (!(bitmask & COMPARE_PARTIAL_FLAG)) { - var objIsWrapped = objIsObj && hasOwnProperty.call(object, '__wrapped__'), - othIsWrapped = othIsObj && hasOwnProperty.call(other, '__wrapped__'); - - if (objIsWrapped || othIsWrapped) { - var objUnwrapped = objIsWrapped ? object.value() : object, - othUnwrapped = othIsWrapped ? other.value() : other; - - stack || (stack = new Stack); - return equalFunc(objUnwrapped, othUnwrapped, bitmask, customizer, stack); - } - } - if (!isSameTag) { - return false; - } - stack || (stack = new Stack); - return equalObjects(object, other, bitmask, customizer, equalFunc, stack); - } - - /** - * The base implementation of `_.isMap` without Node.js optimizations. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a map, else `false`. - */ - function baseIsMap(value) { - return isObjectLike(value) && getTag(value) == mapTag; - } - - /** - * The base implementation of `_.isMatch` without support for iteratee shorthands. - * - * @private - * @param {Object} object The object to inspect. - * @param {Object} source The object of property values to match. - * @param {Array} matchData The property names, values, and compare flags to match. - * @param {Function} [customizer] The function to customize comparisons. - * @returns {boolean} Returns `true` if `object` is a match, else `false`. - */ - function baseIsMatch(object, source, matchData, customizer) { - var index = matchData.length, - length = index, - noCustomizer = !customizer; - - if (object == null) { - return !length; - } - object = Object(object); - while (index--) { - var data = matchData[index]; - if ((noCustomizer && data[2]) - ? data[1] !== object[data[0]] - : !(data[0] in object) - ) { - return false; - } - } - while (++index < length) { - data = matchData[index]; - var key = data[0], - objValue = object[key], - srcValue = data[1]; - - if (noCustomizer && data[2]) { - if (objValue === undefined && !(key in object)) { - return false; - } - } else { - var stack = new Stack; - if (customizer) { - var result = customizer(objValue, srcValue, key, object, source, stack); - } - if (!(result === undefined - ? baseIsEqual(srcValue, objValue, COMPARE_PARTIAL_FLAG | COMPARE_UNORDERED_FLAG, customizer, stack) - : result - )) { - return false; - } - } - } - return true; - } - - /** - * The base implementation of `_.isNative` without bad shim checks. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a native function, - * else `false`. - */ - function baseIsNative(value) { - if (!isObject(value) || isMasked(value)) { - return false; - } - var pattern = isFunction(value) ? reIsNative : reIsHostCtor; - return pattern.test(toSource(value)); - } - - /** - * The base implementation of `_.isRegExp` without Node.js optimizations. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a regexp, else `false`. - */ - function baseIsRegExp(value) { - return isObjectLike(value) && baseGetTag(value) == regexpTag; - } - - /** - * The base implementation of `_.isSet` without Node.js optimizations. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a set, else `false`. - */ - function baseIsSet(value) { - return isObjectLike(value) && getTag(value) == setTag; - } - - /** - * The base implementation of `_.isTypedArray` without Node.js optimizations. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a typed array, else `false`. - */ - function baseIsTypedArray(value) { - return isObjectLike(value) && - isLength(value.length) && !!typedArrayTags[baseGetTag(value)]; - } - - /** - * The base implementation of `_.iteratee`. - * - * @private - * @param {*} [value=_.identity] The value to convert to an iteratee. - * @returns {Function} Returns the iteratee. - */ - function baseIteratee(value) { - // Don't store the `typeof` result in a variable to avoid a JIT bug in Safari 9. - // See https://bugs.webkit.org/show_bug.cgi?id=156034 for more details. - if (typeof value == 'function') { - return value; - } - if (value == null) { - return identity; - } - if (typeof value == 'object') { - return isArray(value) - ? baseMatchesProperty(value[0], value[1]) - : baseMatches(value); - } - return property(value); - } - - /** - * The base implementation of `_.keys` which doesn't treat sparse arrays as dense. - * - * @private - * @param {Object} object The object to query. - * @returns {Array} Returns the array of property names. - */ - function baseKeys(object) { - if (!isPrototype(object)) { - return nativeKeys(object); - } - var result = []; - for (var key in Object(object)) { - if (hasOwnProperty.call(object, key) && key != 'constructor') { - result.push(key); - } - } - return result; - } - - /** - * The base implementation of `_.keysIn` which doesn't treat sparse arrays as dense. - * - * @private - * @param {Object} object The object to query. - * @returns {Array} Returns the array of property names. - */ - function baseKeysIn(object) { - if (!isObject(object)) { - return nativeKeysIn(object); - } - var isProto = isPrototype(object), - result = []; - - for (var key in object) { - if (!(key == 'constructor' && (isProto || !hasOwnProperty.call(object, key)))) { - result.push(key); - } - } - return result; - } - - /** - * The base implementation of `_.lt` which doesn't coerce arguments. - * - * @private - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @returns {boolean} Returns `true` if `value` is less than `other`, - * else `false`. - */ - function baseLt(value, other) { - return value < other; - } - - /** - * The base implementation of `_.map` without support for iteratee shorthands. - * - * @private - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @returns {Array} Returns the new mapped array. - */ - function baseMap(collection, iteratee) { - var index = -1, - result = isArrayLike(collection) ? Array(collection.length) : []; - - baseEach(collection, function(value, key, collection) { - result[++index] = iteratee(value, key, collection); - }); - return result; - } - - /** - * The base implementation of `_.matches` which doesn't clone `source`. - * - * @private - * @param {Object} source The object of property values to match. - * @returns {Function} Returns the new spec function. - */ - function baseMatches(source) { - var matchData = getMatchData(source); - if (matchData.length == 1 && matchData[0][2]) { - return matchesStrictComparable(matchData[0][0], matchData[0][1]); - } - return function(object) { - return object === source || baseIsMatch(object, source, matchData); - }; - } - - /** - * The base implementation of `_.matchesProperty` which doesn't clone `srcValue`. - * - * @private - * @param {string} path The path of the property to get. - * @param {*} srcValue The value to match. - * @returns {Function} Returns the new spec function. - */ - function baseMatchesProperty(path, srcValue) { - if (isKey(path) && isStrictComparable(srcValue)) { - return matchesStrictComparable(toKey(path), srcValue); - } - return function(object) { - var objValue = get(object, path); - return (objValue === undefined && objValue === srcValue) - ? hasIn(object, path) - : baseIsEqual(srcValue, objValue, COMPARE_PARTIAL_FLAG | COMPARE_UNORDERED_FLAG); - }; - } - - /** - * The base implementation of `_.merge` without support for multiple sources. - * - * @private - * @param {Object} object The destination object. - * @param {Object} source The source object. - * @param {number} srcIndex The index of `source`. - * @param {Function} [customizer] The function to customize merged values. - * @param {Object} [stack] Tracks traversed source values and their merged - * counterparts. - */ - function baseMerge(object, source, srcIndex, customizer, stack) { - if (object === source) { - return; - } - baseFor(source, function(srcValue, key) { - if (isObject(srcValue)) { - stack || (stack = new Stack); - baseMergeDeep(object, source, key, srcIndex, baseMerge, customizer, stack); - } - else { - var newValue = customizer - ? customizer(safeGet(object, key), srcValue, (key + ''), object, source, stack) - : undefined; - - if (newValue === undefined) { - newValue = srcValue; - } - assignMergeValue(object, key, newValue); - } - }, keysIn); - } - - /** - * A specialized version of `baseMerge` for arrays and objects which performs - * deep merges and tracks traversed objects enabling objects with circular - * references to be merged. - * - * @private - * @param {Object} object The destination object. - * @param {Object} source The source object. - * @param {string} key The key of the value to merge. - * @param {number} srcIndex The index of `source`. - * @param {Function} mergeFunc The function to merge values. - * @param {Function} [customizer] The function to customize assigned values. - * @param {Object} [stack] Tracks traversed source values and their merged - * counterparts. - */ - function baseMergeDeep(object, source, key, srcIndex, mergeFunc, customizer, stack) { - var objValue = safeGet(object, key), - srcValue = safeGet(source, key), - stacked = stack.get(srcValue); - - if (stacked) { - assignMergeValue(object, key, stacked); - return; - } - var newValue = customizer - ? customizer(objValue, srcValue, (key + ''), object, source, stack) - : undefined; - - var isCommon = newValue === undefined; - - if (isCommon) { - var isArr = isArray(srcValue), - isBuff = !isArr && isBuffer(srcValue), - isTyped = !isArr && !isBuff && isTypedArray(srcValue); - - newValue = srcValue; - if (isArr || isBuff || isTyped) { - if (isArray(objValue)) { - newValue = objValue; - } - else if (isArrayLikeObject(objValue)) { - newValue = copyArray(objValue); - } - else if (isBuff) { - isCommon = false; - newValue = cloneBuffer(srcValue, true); - } - else if (isTyped) { - isCommon = false; - newValue = cloneTypedArray(srcValue, true); - } - else { - newValue = []; - } - } - else if (isPlainObject(srcValue) || isArguments(srcValue)) { - newValue = objValue; - if (isArguments(objValue)) { - newValue = toPlainObject(objValue); - } - else if (!isObject(objValue) || (srcIndex && isFunction(objValue))) { - newValue = initCloneObject(srcValue); - } - } - else { - isCommon = false; - } - } - if (isCommon) { - // Recursively merge objects and arrays (susceptible to call stack limits). - stack.set(srcValue, newValue); - mergeFunc(newValue, srcValue, srcIndex, customizer, stack); - stack['delete'](srcValue); - } - assignMergeValue(object, key, newValue); - } - - /** - * The base implementation of `_.nth` which doesn't coerce arguments. - * - * @private - * @param {Array} array The array to query. - * @param {number} n The index of the element to return. - * @returns {*} Returns the nth element of `array`. - */ - function baseNth(array, n) { - var length = array.length; - if (!length) { - return; - } - n += n < 0 ? length : 0; - return isIndex(n, length) ? array[n] : undefined; - } - - /** - * The base implementation of `_.orderBy` without param guards. - * - * @private - * @param {Array|Object} collection The collection to iterate over. - * @param {Function[]|Object[]|string[]} iteratees The iteratees to sort by. - * @param {string[]} orders The sort orders of `iteratees`. - * @returns {Array} Returns the new sorted array. - */ - function baseOrderBy(collection, iteratees, orders) { - var index = -1; - iteratees = arrayMap(iteratees.length ? iteratees : [identity], baseUnary(getIteratee())); - - var result = baseMap(collection, function(value, key, collection) { - var criteria = arrayMap(iteratees, function(iteratee) { - return iteratee(value); - }); - return { 'criteria': criteria, 'index': ++index, 'value': value }; - }); - - return baseSortBy(result, function(object, other) { - return compareMultiple(object, other, orders); - }); - } - - /** - * The base implementation of `_.pick` without support for individual - * property identifiers. - * - * @private - * @param {Object} object The source object. - * @param {string[]} paths The property paths to pick. - * @returns {Object} Returns the new object. - */ - function basePick(object, paths) { - return basePickBy(object, paths, function(value, path) { - return hasIn(object, path); - }); - } - - /** - * The base implementation of `_.pickBy` without support for iteratee shorthands. - * - * @private - * @param {Object} object The source object. - * @param {string[]} paths The property paths to pick. - * @param {Function} predicate The function invoked per property. - * @returns {Object} Returns the new object. - */ - function basePickBy(object, paths, predicate) { - var index = -1, - length = paths.length, - result = {}; - - while (++index < length) { - var path = paths[index], - value = baseGet(object, path); - - if (predicate(value, path)) { - baseSet(result, castPath(path, object), value); - } - } - return result; - } - - /** - * A specialized version of `baseProperty` which supports deep paths. - * - * @private - * @param {Array|string} path The path of the property to get. - * @returns {Function} Returns the new accessor function. - */ - function basePropertyDeep(path) { - return function(object) { - return baseGet(object, path); - }; - } - - /** - * The base implementation of `_.pullAllBy` without support for iteratee - * shorthands. - * - * @private - * @param {Array} array The array to modify. - * @param {Array} values The values to remove. - * @param {Function} [iteratee] The iteratee invoked per element. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns `array`. - */ - function basePullAll(array, values, iteratee, comparator) { - var indexOf = comparator ? baseIndexOfWith : baseIndexOf, - index = -1, - length = values.length, - seen = array; - - if (array === values) { - values = copyArray(values); - } - if (iteratee) { - seen = arrayMap(array, baseUnary(iteratee)); - } - while (++index < length) { - var fromIndex = 0, - value = values[index], - computed = iteratee ? iteratee(value) : value; - - while ((fromIndex = indexOf(seen, computed, fromIndex, comparator)) > -1) { - if (seen !== array) { - splice.call(seen, fromIndex, 1); - } - splice.call(array, fromIndex, 1); - } - } - return array; - } - - /** - * The base implementation of `_.pullAt` without support for individual - * indexes or capturing the removed elements. - * - * @private - * @param {Array} array The array to modify. - * @param {number[]} indexes The indexes of elements to remove. - * @returns {Array} Returns `array`. - */ - function basePullAt(array, indexes) { - var length = array ? indexes.length : 0, - lastIndex = length - 1; - - while (length--) { - var index = indexes[length]; - if (length == lastIndex || index !== previous) { - var previous = index; - if (isIndex(index)) { - splice.call(array, index, 1); - } else { - baseUnset(array, index); - } - } - } - return array; - } - - /** - * The base implementation of `_.random` without support for returning - * floating-point numbers. - * - * @private - * @param {number} lower The lower bound. - * @param {number} upper The upper bound. - * @returns {number} Returns the random number. - */ - function baseRandom(lower, upper) { - return lower + nativeFloor(nativeRandom() * (upper - lower + 1)); - } - - /** - * The base implementation of `_.range` and `_.rangeRight` which doesn't - * coerce arguments. - * - * @private - * @param {number} start The start of the range. - * @param {number} end The end of the range. - * @param {number} step The value to increment or decrement by. - * @param {boolean} [fromRight] Specify iterating from right to left. - * @returns {Array} Returns the range of numbers. - */ - function baseRange(start, end, step, fromRight) { - var index = -1, - length = nativeMax(nativeCeil((end - start) / (step || 1)), 0), - result = Array(length); - - while (length--) { - result[fromRight ? length : ++index] = start; - start += step; - } - return result; - } - - /** - * The base implementation of `_.repeat` which doesn't coerce arguments. - * - * @private - * @param {string} string The string to repeat. - * @param {number} n The number of times to repeat the string. - * @returns {string} Returns the repeated string. - */ - function baseRepeat(string, n) { - var result = ''; - if (!string || n < 1 || n > MAX_SAFE_INTEGER) { - return result; - } - // Leverage the exponentiation by squaring algorithm for a faster repeat. - // See https://en.wikipedia.org/wiki/Exponentiation_by_squaring for more details. - do { - if (n % 2) { - result += string; - } - n = nativeFloor(n / 2); - if (n) { - string += string; - } - } while (n); - - return result; - } - - /** - * The base implementation of `_.rest` which doesn't validate or coerce arguments. - * - * @private - * @param {Function} func The function to apply a rest parameter to. - * @param {number} [start=func.length-1] The start position of the rest parameter. - * @returns {Function} Returns the new function. - */ - function baseRest(func, start) { - return setToString(overRest(func, start, identity), func + ''); - } - - /** - * The base implementation of `_.sample`. - * - * @private - * @param {Array|Object} collection The collection to sample. - * @returns {*} Returns the random element. - */ - function baseSample(collection) { - return arraySample(values(collection)); - } - - /** - * The base implementation of `_.sampleSize` without param guards. - * - * @private - * @param {Array|Object} collection The collection to sample. - * @param {number} n The number of elements to sample. - * @returns {Array} Returns the random elements. - */ - function baseSampleSize(collection, n) { - var array = values(collection); - return shuffleSelf(array, baseClamp(n, 0, array.length)); - } - - /** - * The base implementation of `_.set`. - * - * @private - * @param {Object} object The object to modify. - * @param {Array|string} path The path of the property to set. - * @param {*} value The value to set. - * @param {Function} [customizer] The function to customize path creation. - * @returns {Object} Returns `object`. - */ - function baseSet(object, path, value, customizer) { - if (!isObject(object)) { - return object; - } - path = castPath(path, object); - - var index = -1, - length = path.length, - lastIndex = length - 1, - nested = object; - - while (nested != null && ++index < length) { - var key = toKey(path[index]), - newValue = value; - - if (index != lastIndex) { - var objValue = nested[key]; - newValue = customizer ? customizer(objValue, key, nested) : undefined; - if (newValue === undefined) { - newValue = isObject(objValue) - ? objValue - : (isIndex(path[index + 1]) ? [] : {}); - } - } - assignValue(nested, key, newValue); - nested = nested[key]; - } - return object; - } - - /** - * The base implementation of `setData` without support for hot loop shorting. - * - * @private - * @param {Function} func The function to associate metadata with. - * @param {*} data The metadata. - * @returns {Function} Returns `func`. - */ - var baseSetData = !metaMap ? identity : function(func, data) { - metaMap.set(func, data); - return func; - }; - - /** - * The base implementation of `setToString` without support for hot loop shorting. - * - * @private - * @param {Function} func The function to modify. - * @param {Function} string The `toString` result. - * @returns {Function} Returns `func`. - */ - var baseSetToString = !defineProperty ? identity : function(func, string) { - return defineProperty(func, 'toString', { - 'configurable': true, - 'enumerable': false, - 'value': constant(string), - 'writable': true - }); - }; - - /** - * The base implementation of `_.shuffle`. - * - * @private - * @param {Array|Object} collection The collection to shuffle. - * @returns {Array} Returns the new shuffled array. - */ - function baseShuffle(collection) { - return shuffleSelf(values(collection)); - } - - /** - * The base implementation of `_.slice` without an iteratee call guard. - * - * @private - * @param {Array} array The array to slice. - * @param {number} [start=0] The start position. - * @param {number} [end=array.length] The end position. - * @returns {Array} Returns the slice of `array`. - */ - function baseSlice(array, start, end) { - var index = -1, - length = array.length; - - if (start < 0) { - start = -start > length ? 0 : (length + start); - } - end = end > length ? length : end; - if (end < 0) { - end += length; - } - length = start > end ? 0 : ((end - start) >>> 0); - start >>>= 0; - - var result = Array(length); - while (++index < length) { - result[index] = array[index + start]; - } - return result; - } - - /** - * The base implementation of `_.some` without support for iteratee shorthands. - * - * @private - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} predicate The function invoked per iteration. - * @returns {boolean} Returns `true` if any element passes the predicate check, - * else `false`. - */ - function baseSome(collection, predicate) { - var result; - - baseEach(collection, function(value, index, collection) { - result = predicate(value, index, collection); - return !result; - }); - return !!result; - } - - /** - * The base implementation of `_.sortedIndex` and `_.sortedLastIndex` which - * performs a binary search of `array` to determine the index at which `value` - * should be inserted into `array` in order to maintain its sort order. - * - * @private - * @param {Array} array The sorted array to inspect. - * @param {*} value The value to evaluate. - * @param {boolean} [retHighest] Specify returning the highest qualified index. - * @returns {number} Returns the index at which `value` should be inserted - * into `array`. - */ - function baseSortedIndex(array, value, retHighest) { - var low = 0, - high = array == null ? low : array.length; - - if (typeof value == 'number' && value === value && high <= HALF_MAX_ARRAY_LENGTH) { - while (low < high) { - var mid = (low + high) >>> 1, - computed = array[mid]; - - if (computed !== null && !isSymbol(computed) && - (retHighest ? (computed <= value) : (computed < value))) { - low = mid + 1; - } else { - high = mid; - } - } - return high; - } - return baseSortedIndexBy(array, value, identity, retHighest); - } - - /** - * The base implementation of `_.sortedIndexBy` and `_.sortedLastIndexBy` - * which invokes `iteratee` for `value` and each element of `array` to compute - * their sort ranking. The iteratee is invoked with one argument; (value). - * - * @private - * @param {Array} array The sorted array to inspect. - * @param {*} value The value to evaluate. - * @param {Function} iteratee The iteratee invoked per element. - * @param {boolean} [retHighest] Specify returning the highest qualified index. - * @returns {number} Returns the index at which `value` should be inserted - * into `array`. - */ - function baseSortedIndexBy(array, value, iteratee, retHighest) { - value = iteratee(value); - - var low = 0, - high = array == null ? 0 : array.length, - valIsNaN = value !== value, - valIsNull = value === null, - valIsSymbol = isSymbol(value), - valIsUndefined = value === undefined; - - while (low < high) { - var mid = nativeFloor((low + high) / 2), - computed = iteratee(array[mid]), - othIsDefined = computed !== undefined, - othIsNull = computed === null, - othIsReflexive = computed === computed, - othIsSymbol = isSymbol(computed); - - if (valIsNaN) { - var setLow = retHighest || othIsReflexive; - } else if (valIsUndefined) { - setLow = othIsReflexive && (retHighest || othIsDefined); - } else if (valIsNull) { - setLow = othIsReflexive && othIsDefined && (retHighest || !othIsNull); - } else if (valIsSymbol) { - setLow = othIsReflexive && othIsDefined && !othIsNull && (retHighest || !othIsSymbol); - } else if (othIsNull || othIsSymbol) { - setLow = false; - } else { - setLow = retHighest ? (computed <= value) : (computed < value); - } - if (setLow) { - low = mid + 1; - } else { - high = mid; - } - } - return nativeMin(high, MAX_ARRAY_INDEX); - } - - /** - * The base implementation of `_.sortedUniq` and `_.sortedUniqBy` without - * support for iteratee shorthands. - * - * @private - * @param {Array} array The array to inspect. - * @param {Function} [iteratee] The iteratee invoked per element. - * @returns {Array} Returns the new duplicate free array. - */ - function baseSortedUniq(array, iteratee) { - var index = -1, - length = array.length, - resIndex = 0, - result = []; - - while (++index < length) { - var value = array[index], - computed = iteratee ? iteratee(value) : value; - - if (!index || !eq(computed, seen)) { - var seen = computed; - result[resIndex++] = value === 0 ? 0 : value; - } - } - return result; - } - - /** - * The base implementation of `_.toNumber` which doesn't ensure correct - * conversions of binary, hexadecimal, or octal string values. - * - * @private - * @param {*} value The value to process. - * @returns {number} Returns the number. - */ - function baseToNumber(value) { - if (typeof value == 'number') { - return value; - } - if (isSymbol(value)) { - return NAN; - } - return +value; - } - - /** - * The base implementation of `_.toString` which doesn't convert nullish - * values to empty strings. - * - * @private - * @param {*} value The value to process. - * @returns {string} Returns the string. - */ - function baseToString(value) { - // Exit early for strings to avoid a performance hit in some environments. - if (typeof value == 'string') { - return value; - } - if (isArray(value)) { - // Recursively convert values (susceptible to call stack limits). - return arrayMap(value, baseToString) + ''; - } - if (isSymbol(value)) { - return symbolToString ? symbolToString.call(value) : ''; - } - var result = (value + ''); - return (result == '0' && (1 / value) == -INFINITY) ? '-0' : result; - } - - /** - * The base implementation of `_.uniqBy` without support for iteratee shorthands. - * - * @private - * @param {Array} array The array to inspect. - * @param {Function} [iteratee] The iteratee invoked per element. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns the new duplicate free array. - */ - function baseUniq(array, iteratee, comparator) { - var index = -1, - includes = arrayIncludes, - length = array.length, - isCommon = true, - result = [], - seen = result; - - if (comparator) { - isCommon = false; - includes = arrayIncludesWith; - } - else if (length >= LARGE_ARRAY_SIZE) { - var set = iteratee ? null : createSet(array); - if (set) { - return setToArray(set); - } - isCommon = false; - includes = cacheHas; - seen = new SetCache; - } - else { - seen = iteratee ? [] : result; - } - outer: - while (++index < length) { - var value = array[index], - computed = iteratee ? iteratee(value) : value; - - value = (comparator || value !== 0) ? value : 0; - if (isCommon && computed === computed) { - var seenIndex = seen.length; - while (seenIndex--) { - if (seen[seenIndex] === computed) { - continue outer; - } - } - if (iteratee) { - seen.push(computed); - } - result.push(value); - } - else if (!includes(seen, computed, comparator)) { - if (seen !== result) { - seen.push(computed); - } - result.push(value); - } - } - return result; - } - - /** - * The base implementation of `_.unset`. - * - * @private - * @param {Object} object The object to modify. - * @param {Array|string} path The property path to unset. - * @returns {boolean} Returns `true` if the property is deleted, else `false`. - */ - function baseUnset(object, path) { - path = castPath(path, object); - object = parent(object, path); - return object == null || delete object[toKey(last(path))]; - } - - /** - * The base implementation of `_.update`. - * - * @private - * @param {Object} object The object to modify. - * @param {Array|string} path The path of the property to update. - * @param {Function} updater The function to produce the updated value. - * @param {Function} [customizer] The function to customize path creation. - * @returns {Object} Returns `object`. - */ - function baseUpdate(object, path, updater, customizer) { - return baseSet(object, path, updater(baseGet(object, path)), customizer); - } - - /** - * The base implementation of methods like `_.dropWhile` and `_.takeWhile` - * without support for iteratee shorthands. - * - * @private - * @param {Array} array The array to query. - * @param {Function} predicate The function invoked per iteration. - * @param {boolean} [isDrop] Specify dropping elements instead of taking them. - * @param {boolean} [fromRight] Specify iterating from right to left. - * @returns {Array} Returns the slice of `array`. - */ - function baseWhile(array, predicate, isDrop, fromRight) { - var length = array.length, - index = fromRight ? length : -1; - - while ((fromRight ? index-- : ++index < length) && - predicate(array[index], index, array)) {} - - return isDrop - ? baseSlice(array, (fromRight ? 0 : index), (fromRight ? index + 1 : length)) - : baseSlice(array, (fromRight ? index + 1 : 0), (fromRight ? length : index)); - } - - /** - * The base implementation of `wrapperValue` which returns the result of - * performing a sequence of actions on the unwrapped `value`, where each - * successive action is supplied the return value of the previous. - * - * @private - * @param {*} value The unwrapped value. - * @param {Array} actions Actions to perform to resolve the unwrapped value. - * @returns {*} Returns the resolved value. - */ - function baseWrapperValue(value, actions) { - var result = value; - if (result instanceof LazyWrapper) { - result = result.value(); - } - return arrayReduce(actions, function(result, action) { - return action.func.apply(action.thisArg, arrayPush([result], action.args)); - }, result); - } - - /** - * The base implementation of methods like `_.xor`, without support for - * iteratee shorthands, that accepts an array of arrays to inspect. - * - * @private - * @param {Array} arrays The arrays to inspect. - * @param {Function} [iteratee] The iteratee invoked per element. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns the new array of values. - */ - function baseXor(arrays, iteratee, comparator) { - var length = arrays.length; - if (length < 2) { - return length ? baseUniq(arrays[0]) : []; - } - var index = -1, - result = Array(length); - - while (++index < length) { - var array = arrays[index], - othIndex = -1; - - while (++othIndex < length) { - if (othIndex != index) { - result[index] = baseDifference(result[index] || array, arrays[othIndex], iteratee, comparator); - } - } - } - return baseUniq(baseFlatten(result, 1), iteratee, comparator); - } - - /** - * This base implementation of `_.zipObject` which assigns values using `assignFunc`. - * - * @private - * @param {Array} props The property identifiers. - * @param {Array} values The property values. - * @param {Function} assignFunc The function to assign values. - * @returns {Object} Returns the new object. - */ - function baseZipObject(props, values, assignFunc) { - var index = -1, - length = props.length, - valsLength = values.length, - result = {}; - - while (++index < length) { - var value = index < valsLength ? values[index] : undefined; - assignFunc(result, props[index], value); - } - return result; - } - - /** - * Casts `value` to an empty array if it's not an array like object. - * - * @private - * @param {*} value The value to inspect. - * @returns {Array|Object} Returns the cast array-like object. - */ - function castArrayLikeObject(value) { - return isArrayLikeObject(value) ? value : []; - } - - /** - * Casts `value` to `identity` if it's not a function. - * - * @private - * @param {*} value The value to inspect. - * @returns {Function} Returns cast function. - */ - function castFunction(value) { - return typeof value == 'function' ? value : identity; - } - - /** - * Casts `value` to a path array if it's not one. - * - * @private - * @param {*} value The value to inspect. - * @param {Object} [object] The object to query keys on. - * @returns {Array} Returns the cast property path array. - */ - function castPath(value, object) { - if (isArray(value)) { - return value; - } - return isKey(value, object) ? [value] : stringToPath(toString(value)); - } - - /** - * A `baseRest` alias which can be replaced with `identity` by module - * replacement plugins. - * - * @private - * @type {Function} - * @param {Function} func The function to apply a rest parameter to. - * @returns {Function} Returns the new function. - */ - var castRest = baseRest; - - /** - * Casts `array` to a slice if it's needed. - * - * @private - * @param {Array} array The array to inspect. - * @param {number} start The start position. - * @param {number} [end=array.length] The end position. - * @returns {Array} Returns the cast slice. - */ - function castSlice(array, start, end) { - var length = array.length; - end = end === undefined ? length : end; - return (!start && end >= length) ? array : baseSlice(array, start, end); - } - - /** - * A simple wrapper around the global [`clearTimeout`](https://mdn.io/clearTimeout). - * - * @private - * @param {number|Object} id The timer id or timeout object of the timer to clear. - */ - var clearTimeout = ctxClearTimeout || function(id) { - return root.clearTimeout(id); - }; - - /** - * Creates a clone of `buffer`. - * - * @private - * @param {Buffer} buffer The buffer to clone. - * @param {boolean} [isDeep] Specify a deep clone. - * @returns {Buffer} Returns the cloned buffer. - */ - function cloneBuffer(buffer, isDeep) { - if (isDeep) { - return buffer.slice(); - } - var length = buffer.length, - result = allocUnsafe ? allocUnsafe(length) : new buffer.constructor(length); - - buffer.copy(result); - return result; - } - - /** - * Creates a clone of `arrayBuffer`. - * - * @private - * @param {ArrayBuffer} arrayBuffer The array buffer to clone. - * @returns {ArrayBuffer} Returns the cloned array buffer. - */ - function cloneArrayBuffer(arrayBuffer) { - var result = new arrayBuffer.constructor(arrayBuffer.byteLength); - new Uint8Array(result).set(new Uint8Array(arrayBuffer)); - return result; - } - - /** - * Creates a clone of `dataView`. - * - * @private - * @param {Object} dataView The data view to clone. - * @param {boolean} [isDeep] Specify a deep clone. - * @returns {Object} Returns the cloned data view. - */ - function cloneDataView(dataView, isDeep) { - var buffer = isDeep ? cloneArrayBuffer(dataView.buffer) : dataView.buffer; - return new dataView.constructor(buffer, dataView.byteOffset, dataView.byteLength); - } - - /** - * Creates a clone of `regexp`. - * - * @private - * @param {Object} regexp The regexp to clone. - * @returns {Object} Returns the cloned regexp. - */ - function cloneRegExp(regexp) { - var result = new regexp.constructor(regexp.source, reFlags.exec(regexp)); - result.lastIndex = regexp.lastIndex; - return result; - } - - /** - * Creates a clone of the `symbol` object. - * - * @private - * @param {Object} symbol The symbol object to clone. - * @returns {Object} Returns the cloned symbol object. - */ - function cloneSymbol(symbol) { - return symbolValueOf ? Object(symbolValueOf.call(symbol)) : {}; - } - - /** - * Creates a clone of `typedArray`. - * - * @private - * @param {Object} typedArray The typed array to clone. - * @param {boolean} [isDeep] Specify a deep clone. - * @returns {Object} Returns the cloned typed array. - */ - function cloneTypedArray(typedArray, isDeep) { - var buffer = isDeep ? cloneArrayBuffer(typedArray.buffer) : typedArray.buffer; - return new typedArray.constructor(buffer, typedArray.byteOffset, typedArray.length); - } - - /** - * Compares values to sort them in ascending order. - * - * @private - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @returns {number} Returns the sort order indicator for `value`. - */ - function compareAscending(value, other) { - if (value !== other) { - var valIsDefined = value !== undefined, - valIsNull = value === null, - valIsReflexive = value === value, - valIsSymbol = isSymbol(value); - - var othIsDefined = other !== undefined, - othIsNull = other === null, - othIsReflexive = other === other, - othIsSymbol = isSymbol(other); - - if ((!othIsNull && !othIsSymbol && !valIsSymbol && value > other) || - (valIsSymbol && othIsDefined && othIsReflexive && !othIsNull && !othIsSymbol) || - (valIsNull && othIsDefined && othIsReflexive) || - (!valIsDefined && othIsReflexive) || - !valIsReflexive) { - return 1; - } - if ((!valIsNull && !valIsSymbol && !othIsSymbol && value < other) || - (othIsSymbol && valIsDefined && valIsReflexive && !valIsNull && !valIsSymbol) || - (othIsNull && valIsDefined && valIsReflexive) || - (!othIsDefined && valIsReflexive) || - !othIsReflexive) { - return -1; - } - } - return 0; - } - - /** - * Used by `_.orderBy` to compare multiple properties of a value to another - * and stable sort them. - * - * If `orders` is unspecified, all values are sorted in ascending order. Otherwise, - * specify an order of "desc" for descending or "asc" for ascending sort order - * of corresponding values. - * - * @private - * @param {Object} object The object to compare. - * @param {Object} other The other object to compare. - * @param {boolean[]|string[]} orders The order to sort by for each property. - * @returns {number} Returns the sort order indicator for `object`. - */ - function compareMultiple(object, other, orders) { - var index = -1, - objCriteria = object.criteria, - othCriteria = other.criteria, - length = objCriteria.length, - ordersLength = orders.length; - - while (++index < length) { - var result = compareAscending(objCriteria[index], othCriteria[index]); - if (result) { - if (index >= ordersLength) { - return result; - } - var order = orders[index]; - return result * (order == 'desc' ? -1 : 1); - } - } - // Fixes an `Array#sort` bug in the JS engine embedded in Adobe applications - // that causes it, under certain circumstances, to provide the same value for - // `object` and `other`. See https://github.com/jashkenas/underscore/pull/1247 - // for more details. - // - // This also ensures a stable sort in V8 and other engines. - // See https://bugs.chromium.org/p/v8/issues/detail?id=90 for more details. - return object.index - other.index; - } - - /** - * Creates an array that is the composition of partially applied arguments, - * placeholders, and provided arguments into a single array of arguments. - * - * @private - * @param {Array} args The provided arguments. - * @param {Array} partials The arguments to prepend to those provided. - * @param {Array} holders The `partials` placeholder indexes. - * @params {boolean} [isCurried] Specify composing for a curried function. - * @returns {Array} Returns the new array of composed arguments. - */ - function composeArgs(args, partials, holders, isCurried) { - var argsIndex = -1, - argsLength = args.length, - holdersLength = holders.length, - leftIndex = -1, - leftLength = partials.length, - rangeLength = nativeMax(argsLength - holdersLength, 0), - result = Array(leftLength + rangeLength), - isUncurried = !isCurried; - - while (++leftIndex < leftLength) { - result[leftIndex] = partials[leftIndex]; - } - while (++argsIndex < holdersLength) { - if (isUncurried || argsIndex < argsLength) { - result[holders[argsIndex]] = args[argsIndex]; - } - } - while (rangeLength--) { - result[leftIndex++] = args[argsIndex++]; - } - return result; - } - - /** - * This function is like `composeArgs` except that the arguments composition - * is tailored for `_.partialRight`. - * - * @private - * @param {Array} args The provided arguments. - * @param {Array} partials The arguments to append to those provided. - * @param {Array} holders The `partials` placeholder indexes. - * @params {boolean} [isCurried] Specify composing for a curried function. - * @returns {Array} Returns the new array of composed arguments. - */ - function composeArgsRight(args, partials, holders, isCurried) { - var argsIndex = -1, - argsLength = args.length, - holdersIndex = -1, - holdersLength = holders.length, - rightIndex = -1, - rightLength = partials.length, - rangeLength = nativeMax(argsLength - holdersLength, 0), - result = Array(rangeLength + rightLength), - isUncurried = !isCurried; - - while (++argsIndex < rangeLength) { - result[argsIndex] = args[argsIndex]; - } - var offset = argsIndex; - while (++rightIndex < rightLength) { - result[offset + rightIndex] = partials[rightIndex]; - } - while (++holdersIndex < holdersLength) { - if (isUncurried || argsIndex < argsLength) { - result[offset + holders[holdersIndex]] = args[argsIndex++]; - } - } - return result; - } - - /** - * Copies the values of `source` to `array`. - * - * @private - * @param {Array} source The array to copy values from. - * @param {Array} [array=[]] The array to copy values to. - * @returns {Array} Returns `array`. - */ - function copyArray(source, array) { - var index = -1, - length = source.length; - - array || (array = Array(length)); - while (++index < length) { - array[index] = source[index]; - } - return array; - } - - /** - * Copies properties of `source` to `object`. - * - * @private - * @param {Object} source The object to copy properties from. - * @param {Array} props The property identifiers to copy. - * @param {Object} [object={}] The object to copy properties to. - * @param {Function} [customizer] The function to customize copied values. - * @returns {Object} Returns `object`. - */ - function copyObject(source, props, object, customizer) { - var isNew = !object; - object || (object = {}); - - var index = -1, - length = props.length; - - while (++index < length) { - var key = props[index]; - - var newValue = customizer - ? customizer(object[key], source[key], key, object, source) - : undefined; - - if (newValue === undefined) { - newValue = source[key]; - } - if (isNew) { - baseAssignValue(object, key, newValue); - } else { - assignValue(object, key, newValue); - } - } - return object; - } - - /** - * Copies own symbols of `source` to `object`. - * - * @private - * @param {Object} source The object to copy symbols from. - * @param {Object} [object={}] The object to copy symbols to. - * @returns {Object} Returns `object`. - */ - function copySymbols(source, object) { - return copyObject(source, getSymbols(source), object); - } - - /** - * Copies own and inherited symbols of `source` to `object`. - * - * @private - * @param {Object} source The object to copy symbols from. - * @param {Object} [object={}] The object to copy symbols to. - * @returns {Object} Returns `object`. - */ - function copySymbolsIn(source, object) { - return copyObject(source, getSymbolsIn(source), object); - } - - /** - * Creates a function like `_.groupBy`. - * - * @private - * @param {Function} setter The function to set accumulator values. - * @param {Function} [initializer] The accumulator object initializer. - * @returns {Function} Returns the new aggregator function. - */ - function createAggregator(setter, initializer) { - return function(collection, iteratee) { - var func = isArray(collection) ? arrayAggregator : baseAggregator, - accumulator = initializer ? initializer() : {}; - - return func(collection, setter, getIteratee(iteratee, 2), accumulator); - }; - } - - /** - * Creates a function like `_.assign`. - * - * @private - * @param {Function} assigner The function to assign values. - * @returns {Function} Returns the new assigner function. - */ - function createAssigner(assigner) { - return baseRest(function(object, sources) { - var index = -1, - length = sources.length, - customizer = length > 1 ? sources[length - 1] : undefined, - guard = length > 2 ? sources[2] : undefined; - - customizer = (assigner.length > 3 && typeof customizer == 'function') - ? (length--, customizer) - : undefined; - - if (guard && isIterateeCall(sources[0], sources[1], guard)) { - customizer = length < 3 ? undefined : customizer; - length = 1; - } - object = Object(object); - while (++index < length) { - var source = sources[index]; - if (source) { - assigner(object, source, index, customizer); - } - } - return object; - }); - } - - /** - * Creates a `baseEach` or `baseEachRight` function. - * - * @private - * @param {Function} eachFunc The function to iterate over a collection. - * @param {boolean} [fromRight] Specify iterating from right to left. - * @returns {Function} Returns the new base function. - */ - function createBaseEach(eachFunc, fromRight) { - return function(collection, iteratee) { - if (collection == null) { - return collection; - } - if (!isArrayLike(collection)) { - return eachFunc(collection, iteratee); - } - var length = collection.length, - index = fromRight ? length : -1, - iterable = Object(collection); - - while ((fromRight ? index-- : ++index < length)) { - if (iteratee(iterable[index], index, iterable) === false) { - break; - } - } - return collection; - }; - } - - /** - * Creates a base function for methods like `_.forIn` and `_.forOwn`. - * - * @private - * @param {boolean} [fromRight] Specify iterating from right to left. - * @returns {Function} Returns the new base function. - */ - function createBaseFor(fromRight) { - return function(object, iteratee, keysFunc) { - var index = -1, - iterable = Object(object), - props = keysFunc(object), - length = props.length; - - while (length--) { - var key = props[fromRight ? length : ++index]; - if (iteratee(iterable[key], key, iterable) === false) { - break; - } - } - return object; - }; - } - - /** - * Creates a function that wraps `func` to invoke it with the optional `this` - * binding of `thisArg`. - * - * @private - * @param {Function} func The function to wrap. - * @param {number} bitmask The bitmask flags. See `createWrap` for more details. - * @param {*} [thisArg] The `this` binding of `func`. - * @returns {Function} Returns the new wrapped function. - */ - function createBind(func, bitmask, thisArg) { - var isBind = bitmask & WRAP_BIND_FLAG, - Ctor = createCtor(func); - - function wrapper() { - var fn = (this && this !== root && this instanceof wrapper) ? Ctor : func; - return fn.apply(isBind ? thisArg : this, arguments); - } - return wrapper; - } - - /** - * Creates a function like `_.lowerFirst`. - * - * @private - * @param {string} methodName The name of the `String` case method to use. - * @returns {Function} Returns the new case function. - */ - function createCaseFirst(methodName) { - return function(string) { - string = toString(string); - - var strSymbols = hasUnicode(string) - ? stringToArray(string) - : undefined; - - var chr = strSymbols - ? strSymbols[0] - : string.charAt(0); - - var trailing = strSymbols - ? castSlice(strSymbols, 1).join('') - : string.slice(1); - - return chr[methodName]() + trailing; - }; - } - - /** - * Creates a function like `_.camelCase`. - * - * @private - * @param {Function} callback The function to combine each word. - * @returns {Function} Returns the new compounder function. - */ - function createCompounder(callback) { - return function(string) { - return arrayReduce(words(deburr(string).replace(reApos, '')), callback, ''); - }; - } - - /** - * Creates a function that produces an instance of `Ctor` regardless of - * whether it was invoked as part of a `new` expression or by `call` or `apply`. - * - * @private - * @param {Function} Ctor The constructor to wrap. - * @returns {Function} Returns the new wrapped function. - */ - function createCtor(Ctor) { - return function() { - // Use a `switch` statement to work with class constructors. See - // http://ecma-international.org/ecma-262/7.0/#sec-ecmascript-function-objects-call-thisargument-argumentslist - // for more details. - var args = arguments; - switch (args.length) { - case 0: return new Ctor; - case 1: return new Ctor(args[0]); - case 2: return new Ctor(args[0], args[1]); - case 3: return new Ctor(args[0], args[1], args[2]); - case 4: return new Ctor(args[0], args[1], args[2], args[3]); - case 5: return new Ctor(args[0], args[1], args[2], args[3], args[4]); - case 6: return new Ctor(args[0], args[1], args[2], args[3], args[4], args[5]); - case 7: return new Ctor(args[0], args[1], args[2], args[3], args[4], args[5], args[6]); - } - var thisBinding = baseCreate(Ctor.prototype), - result = Ctor.apply(thisBinding, args); - - // Mimic the constructor's `return` behavior. - // See https://es5.github.io/#x13.2.2 for more details. - return isObject(result) ? result : thisBinding; - }; - } - - /** - * Creates a function that wraps `func` to enable currying. - * - * @private - * @param {Function} func The function to wrap. - * @param {number} bitmask The bitmask flags. See `createWrap` for more details. - * @param {number} arity The arity of `func`. - * @returns {Function} Returns the new wrapped function. - */ - function createCurry(func, bitmask, arity) { - var Ctor = createCtor(func); - - function wrapper() { - var length = arguments.length, - args = Array(length), - index = length, - placeholder = getHolder(wrapper); - - while (index--) { - args[index] = arguments[index]; - } - var holders = (length < 3 && args[0] !== placeholder && args[length - 1] !== placeholder) - ? [] - : replaceHolders(args, placeholder); - - length -= holders.length; - if (length < arity) { - return createRecurry( - func, bitmask, createHybrid, wrapper.placeholder, undefined, - args, holders, undefined, undefined, arity - length); - } - var fn = (this && this !== root && this instanceof wrapper) ? Ctor : func; - return apply(fn, this, args); - } - return wrapper; - } - - /** - * Creates a `_.find` or `_.findLast` function. - * - * @private - * @param {Function} findIndexFunc The function to find the collection index. - * @returns {Function} Returns the new find function. - */ - function createFind(findIndexFunc) { - return function(collection, predicate, fromIndex) { - var iterable = Object(collection); - if (!isArrayLike(collection)) { - var iteratee = getIteratee(predicate, 3); - collection = keys(collection); - predicate = function(key) { return iteratee(iterable[key], key, iterable); }; - } - var index = findIndexFunc(collection, predicate, fromIndex); - return index > -1 ? iterable[iteratee ? collection[index] : index] : undefined; - }; - } - - /** - * Creates a `_.flow` or `_.flowRight` function. - * - * @private - * @param {boolean} [fromRight] Specify iterating from right to left. - * @returns {Function} Returns the new flow function. - */ - function createFlow(fromRight) { - return flatRest(function(funcs) { - var length = funcs.length, - index = length, - prereq = LodashWrapper.prototype.thru; - - if (fromRight) { - funcs.reverse(); - } - while (index--) { - var func = funcs[index]; - if (typeof func != 'function') { - throw new TypeError(FUNC_ERROR_TEXT); - } - if (prereq && !wrapper && getFuncName(func) == 'wrapper') { - var wrapper = new LodashWrapper([], true); - } - } - index = wrapper ? index : length; - while (++index < length) { - func = funcs[index]; - - var funcName = getFuncName(func), - data = funcName == 'wrapper' ? getData(func) : undefined; - - if (data && isLaziable(data[0]) && - data[1] == (WRAP_ARY_FLAG | WRAP_CURRY_FLAG | WRAP_PARTIAL_FLAG | WRAP_REARG_FLAG) && - !data[4].length && data[9] == 1 - ) { - wrapper = wrapper[getFuncName(data[0])].apply(wrapper, data[3]); - } else { - wrapper = (func.length == 1 && isLaziable(func)) - ? wrapper[funcName]() - : wrapper.thru(func); - } - } - return function() { - var args = arguments, - value = args[0]; - - if (wrapper && args.length == 1 && isArray(value)) { - return wrapper.plant(value).value(); - } - var index = 0, - result = length ? funcs[index].apply(this, args) : value; - - while (++index < length) { - result = funcs[index].call(this, result); - } - return result; - }; - }); - } - - /** - * Creates a function that wraps `func` to invoke it with optional `this` - * binding of `thisArg`, partial application, and currying. - * - * @private - * @param {Function|string} func The function or method name to wrap. - * @param {number} bitmask The bitmask flags. See `createWrap` for more details. - * @param {*} [thisArg] The `this` binding of `func`. - * @param {Array} [partials] The arguments to prepend to those provided to - * the new function. - * @param {Array} [holders] The `partials` placeholder indexes. - * @param {Array} [partialsRight] The arguments to append to those provided - * to the new function. - * @param {Array} [holdersRight] The `partialsRight` placeholder indexes. - * @param {Array} [argPos] The argument positions of the new function. - * @param {number} [ary] The arity cap of `func`. - * @param {number} [arity] The arity of `func`. - * @returns {Function} Returns the new wrapped function. - */ - function createHybrid(func, bitmask, thisArg, partials, holders, partialsRight, holdersRight, argPos, ary, arity) { - var isAry = bitmask & WRAP_ARY_FLAG, - isBind = bitmask & WRAP_BIND_FLAG, - isBindKey = bitmask & WRAP_BIND_KEY_FLAG, - isCurried = bitmask & (WRAP_CURRY_FLAG | WRAP_CURRY_RIGHT_FLAG), - isFlip = bitmask & WRAP_FLIP_FLAG, - Ctor = isBindKey ? undefined : createCtor(func); - - function wrapper() { - var length = arguments.length, - args = Array(length), - index = length; - - while (index--) { - args[index] = arguments[index]; - } - if (isCurried) { - var placeholder = getHolder(wrapper), - holdersCount = countHolders(args, placeholder); - } - if (partials) { - args = composeArgs(args, partials, holders, isCurried); - } - if (partialsRight) { - args = composeArgsRight(args, partialsRight, holdersRight, isCurried); - } - length -= holdersCount; - if (isCurried && length < arity) { - var newHolders = replaceHolders(args, placeholder); - return createRecurry( - func, bitmask, createHybrid, wrapper.placeholder, thisArg, - args, newHolders, argPos, ary, arity - length - ); - } - var thisBinding = isBind ? thisArg : this, - fn = isBindKey ? thisBinding[func] : func; - - length = args.length; - if (argPos) { - args = reorder(args, argPos); - } else if (isFlip && length > 1) { - args.reverse(); - } - if (isAry && ary < length) { - args.length = ary; - } - if (this && this !== root && this instanceof wrapper) { - fn = Ctor || createCtor(fn); - } - return fn.apply(thisBinding, args); - } - return wrapper; - } - - /** - * Creates a function like `_.invertBy`. - * - * @private - * @param {Function} setter The function to set accumulator values. - * @param {Function} toIteratee The function to resolve iteratees. - * @returns {Function} Returns the new inverter function. - */ - function createInverter(setter, toIteratee) { - return function(object, iteratee) { - return baseInverter(object, setter, toIteratee(iteratee), {}); - }; - } - - /** - * Creates a function that performs a mathematical operation on two values. - * - * @private - * @param {Function} operator The function to perform the operation. - * @param {number} [defaultValue] The value used for `undefined` arguments. - * @returns {Function} Returns the new mathematical operation function. - */ - function createMathOperation(operator, defaultValue) { - return function(value, other) { - var result; - if (value === undefined && other === undefined) { - return defaultValue; - } - if (value !== undefined) { - result = value; - } - if (other !== undefined) { - if (result === undefined) { - return other; - } - if (typeof value == 'string' || typeof other == 'string') { - value = baseToString(value); - other = baseToString(other); - } else { - value = baseToNumber(value); - other = baseToNumber(other); - } - result = operator(value, other); - } - return result; - }; - } - - /** - * Creates a function like `_.over`. - * - * @private - * @param {Function} arrayFunc The function to iterate over iteratees. - * @returns {Function} Returns the new over function. - */ - function createOver(arrayFunc) { - return flatRest(function(iteratees) { - iteratees = arrayMap(iteratees, baseUnary(getIteratee())); - return baseRest(function(args) { - var thisArg = this; - return arrayFunc(iteratees, function(iteratee) { - return apply(iteratee, thisArg, args); - }); - }); - }); - } - - /** - * Creates the padding for `string` based on `length`. The `chars` string - * is truncated if the number of characters exceeds `length`. - * - * @private - * @param {number} length The padding length. - * @param {string} [chars=' '] The string used as padding. - * @returns {string} Returns the padding for `string`. - */ - function createPadding(length, chars) { - chars = chars === undefined ? ' ' : baseToString(chars); - - var charsLength = chars.length; - if (charsLength < 2) { - return charsLength ? baseRepeat(chars, length) : chars; - } - var result = baseRepeat(chars, nativeCeil(length / stringSize(chars))); - return hasUnicode(chars) - ? castSlice(stringToArray(result), 0, length).join('') - : result.slice(0, length); - } - - /** - * Creates a function that wraps `func` to invoke it with the `this` binding - * of `thisArg` and `partials` prepended to the arguments it receives. - * - * @private - * @param {Function} func The function to wrap. - * @param {number} bitmask The bitmask flags. See `createWrap` for more details. - * @param {*} thisArg The `this` binding of `func`. - * @param {Array} partials The arguments to prepend to those provided to - * the new function. - * @returns {Function} Returns the new wrapped function. - */ - function createPartial(func, bitmask, thisArg, partials) { - var isBind = bitmask & WRAP_BIND_FLAG, - Ctor = createCtor(func); - - function wrapper() { - var argsIndex = -1, - argsLength = arguments.length, - leftIndex = -1, - leftLength = partials.length, - args = Array(leftLength + argsLength), - fn = (this && this !== root && this instanceof wrapper) ? Ctor : func; - - while (++leftIndex < leftLength) { - args[leftIndex] = partials[leftIndex]; - } - while (argsLength--) { - args[leftIndex++] = arguments[++argsIndex]; - } - return apply(fn, isBind ? thisArg : this, args); - } - return wrapper; - } - - /** - * Creates a `_.range` or `_.rangeRight` function. - * - * @private - * @param {boolean} [fromRight] Specify iterating from right to left. - * @returns {Function} Returns the new range function. - */ - function createRange(fromRight) { - return function(start, end, step) { - if (step && typeof step != 'number' && isIterateeCall(start, end, step)) { - end = step = undefined; - } - // Ensure the sign of `-0` is preserved. - start = toFinite(start); - if (end === undefined) { - end = start; - start = 0; - } else { - end = toFinite(end); - } - step = step === undefined ? (start < end ? 1 : -1) : toFinite(step); - return baseRange(start, end, step, fromRight); - }; - } - - /** - * Creates a function that performs a relational operation on two values. - * - * @private - * @param {Function} operator The function to perform the operation. - * @returns {Function} Returns the new relational operation function. - */ - function createRelationalOperation(operator) { - return function(value, other) { - if (!(typeof value == 'string' && typeof other == 'string')) { - value = toNumber(value); - other = toNumber(other); - } - return operator(value, other); - }; - } - - /** - * Creates a function that wraps `func` to continue currying. - * - * @private - * @param {Function} func The function to wrap. - * @param {number} bitmask The bitmask flags. See `createWrap` for more details. - * @param {Function} wrapFunc The function to create the `func` wrapper. - * @param {*} placeholder The placeholder value. - * @param {*} [thisArg] The `this` binding of `func`. - * @param {Array} [partials] The arguments to prepend to those provided to - * the new function. - * @param {Array} [holders] The `partials` placeholder indexes. - * @param {Array} [argPos] The argument positions of the new function. - * @param {number} [ary] The arity cap of `func`. - * @param {number} [arity] The arity of `func`. - * @returns {Function} Returns the new wrapped function. - */ - function createRecurry(func, bitmask, wrapFunc, placeholder, thisArg, partials, holders, argPos, ary, arity) { - var isCurry = bitmask & WRAP_CURRY_FLAG, - newHolders = isCurry ? holders : undefined, - newHoldersRight = isCurry ? undefined : holders, - newPartials = isCurry ? partials : undefined, - newPartialsRight = isCurry ? undefined : partials; - - bitmask |= (isCurry ? WRAP_PARTIAL_FLAG : WRAP_PARTIAL_RIGHT_FLAG); - bitmask &= ~(isCurry ? WRAP_PARTIAL_RIGHT_FLAG : WRAP_PARTIAL_FLAG); - - if (!(bitmask & WRAP_CURRY_BOUND_FLAG)) { - bitmask &= ~(WRAP_BIND_FLAG | WRAP_BIND_KEY_FLAG); - } - var newData = [ - func, bitmask, thisArg, newPartials, newHolders, newPartialsRight, - newHoldersRight, argPos, ary, arity - ]; - - var result = wrapFunc.apply(undefined, newData); - if (isLaziable(func)) { - setData(result, newData); - } - result.placeholder = placeholder; - return setWrapToString(result, func, bitmask); - } - - /** - * Creates a function like `_.round`. - * - * @private - * @param {string} methodName The name of the `Math` method to use when rounding. - * @returns {Function} Returns the new round function. - */ - function createRound(methodName) { - var func = Math[methodName]; - return function(number, precision) { - number = toNumber(number); - precision = precision == null ? 0 : nativeMin(toInteger(precision), 292); - if (precision) { - // Shift with exponential notation to avoid floating-point issues. - // See [MDN](https://mdn.io/round#Examples) for more details. - var pair = (toString(number) + 'e').split('e'), - value = func(pair[0] + 'e' + (+pair[1] + precision)); - - pair = (toString(value) + 'e').split('e'); - return +(pair[0] + 'e' + (+pair[1] - precision)); - } - return func(number); - }; - } - - /** - * Creates a set object of `values`. - * - * @private - * @param {Array} values The values to add to the set. - * @returns {Object} Returns the new set. - */ - var createSet = !(Set && (1 / setToArray(new Set([,-0]))[1]) == INFINITY) ? noop : function(values) { - return new Set(values); - }; - - /** - * Creates a `_.toPairs` or `_.toPairsIn` function. - * - * @private - * @param {Function} keysFunc The function to get the keys of a given object. - * @returns {Function} Returns the new pairs function. - */ - function createToPairs(keysFunc) { - return function(object) { - var tag = getTag(object); - if (tag == mapTag) { - return mapToArray(object); - } - if (tag == setTag) { - return setToPairs(object); - } - return baseToPairs(object, keysFunc(object)); - }; - } - - /** - * Creates a function that either curries or invokes `func` with optional - * `this` binding and partially applied arguments. - * - * @private - * @param {Function|string} func The function or method name to wrap. - * @param {number} bitmask The bitmask flags. - * 1 - `_.bind` - * 2 - `_.bindKey` - * 4 - `_.curry` or `_.curryRight` of a bound function - * 8 - `_.curry` - * 16 - `_.curryRight` - * 32 - `_.partial` - * 64 - `_.partialRight` - * 128 - `_.rearg` - * 256 - `_.ary` - * 512 - `_.flip` - * @param {*} [thisArg] The `this` binding of `func`. - * @param {Array} [partials] The arguments to be partially applied. - * @param {Array} [holders] The `partials` placeholder indexes. - * @param {Array} [argPos] The argument positions of the new function. - * @param {number} [ary] The arity cap of `func`. - * @param {number} [arity] The arity of `func`. - * @returns {Function} Returns the new wrapped function. - */ - function createWrap(func, bitmask, thisArg, partials, holders, argPos, ary, arity) { - var isBindKey = bitmask & WRAP_BIND_KEY_FLAG; - if (!isBindKey && typeof func != 'function') { - throw new TypeError(FUNC_ERROR_TEXT); - } - var length = partials ? partials.length : 0; - if (!length) { - bitmask &= ~(WRAP_PARTIAL_FLAG | WRAP_PARTIAL_RIGHT_FLAG); - partials = holders = undefined; - } - ary = ary === undefined ? ary : nativeMax(toInteger(ary), 0); - arity = arity === undefined ? arity : toInteger(arity); - length -= holders ? holders.length : 0; - - if (bitmask & WRAP_PARTIAL_RIGHT_FLAG) { - var partialsRight = partials, - holdersRight = holders; - - partials = holders = undefined; - } - var data = isBindKey ? undefined : getData(func); - - var newData = [ - func, bitmask, thisArg, partials, holders, partialsRight, holdersRight, - argPos, ary, arity - ]; - - if (data) { - mergeData(newData, data); - } - func = newData[0]; - bitmask = newData[1]; - thisArg = newData[2]; - partials = newData[3]; - holders = newData[4]; - arity = newData[9] = newData[9] === undefined - ? (isBindKey ? 0 : func.length) - : nativeMax(newData[9] - length, 0); - - if (!arity && bitmask & (WRAP_CURRY_FLAG | WRAP_CURRY_RIGHT_FLAG)) { - bitmask &= ~(WRAP_CURRY_FLAG | WRAP_CURRY_RIGHT_FLAG); - } - if (!bitmask || bitmask == WRAP_BIND_FLAG) { - var result = createBind(func, bitmask, thisArg); - } else if (bitmask == WRAP_CURRY_FLAG || bitmask == WRAP_CURRY_RIGHT_FLAG) { - result = createCurry(func, bitmask, arity); - } else if ((bitmask == WRAP_PARTIAL_FLAG || bitmask == (WRAP_BIND_FLAG | WRAP_PARTIAL_FLAG)) && !holders.length) { - result = createPartial(func, bitmask, thisArg, partials); - } else { - result = createHybrid.apply(undefined, newData); - } - var setter = data ? baseSetData : setData; - return setWrapToString(setter(result, newData), func, bitmask); - } - - /** - * Used by `_.defaults` to customize its `_.assignIn` use to assign properties - * of source objects to the destination object for all destination properties - * that resolve to `undefined`. - * - * @private - * @param {*} objValue The destination value. - * @param {*} srcValue The source value. - * @param {string} key The key of the property to assign. - * @param {Object} object The parent object of `objValue`. - * @returns {*} Returns the value to assign. - */ - function customDefaultsAssignIn(objValue, srcValue, key, object) { - if (objValue === undefined || - (eq(objValue, objectProto[key]) && !hasOwnProperty.call(object, key))) { - return srcValue; - } - return objValue; - } - - /** - * Used by `_.defaultsDeep` to customize its `_.merge` use to merge source - * objects into destination objects that are passed thru. - * - * @private - * @param {*} objValue The destination value. - * @param {*} srcValue The source value. - * @param {string} key The key of the property to merge. - * @param {Object} object The parent object of `objValue`. - * @param {Object} source The parent object of `srcValue`. - * @param {Object} [stack] Tracks traversed source values and their merged - * counterparts. - * @returns {*} Returns the value to assign. - */ - function customDefaultsMerge(objValue, srcValue, key, object, source, stack) { - if (isObject(objValue) && isObject(srcValue)) { - // Recursively merge objects and arrays (susceptible to call stack limits). - stack.set(srcValue, objValue); - baseMerge(objValue, srcValue, undefined, customDefaultsMerge, stack); - stack['delete'](srcValue); - } - return objValue; - } - - /** - * Used by `_.omit` to customize its `_.cloneDeep` use to only clone plain - * objects. - * - * @private - * @param {*} value The value to inspect. - * @param {string} key The key of the property to inspect. - * @returns {*} Returns the uncloned value or `undefined` to defer cloning to `_.cloneDeep`. - */ - function customOmitClone(value) { - return isPlainObject(value) ? undefined : value; - } - - /** - * A specialized version of `baseIsEqualDeep` for arrays with support for - * partial deep comparisons. - * - * @private - * @param {Array} array The array to compare. - * @param {Array} other The other array to compare. - * @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details. - * @param {Function} customizer The function to customize comparisons. - * @param {Function} equalFunc The function to determine equivalents of values. - * @param {Object} stack Tracks traversed `array` and `other` objects. - * @returns {boolean} Returns `true` if the arrays are equivalent, else `false`. - */ - function equalArrays(array, other, bitmask, customizer, equalFunc, stack) { - var isPartial = bitmask & COMPARE_PARTIAL_FLAG, - arrLength = array.length, - othLength = other.length; - - if (arrLength != othLength && !(isPartial && othLength > arrLength)) { - return false; - } - // Assume cyclic values are equal. - var stacked = stack.get(array); - if (stacked && stack.get(other)) { - return stacked == other; - } - var index = -1, - result = true, - seen = (bitmask & COMPARE_UNORDERED_FLAG) ? new SetCache : undefined; - - stack.set(array, other); - stack.set(other, array); - - // Ignore non-index properties. - while (++index < arrLength) { - var arrValue = array[index], - othValue = other[index]; - - if (customizer) { - var compared = isPartial - ? customizer(othValue, arrValue, index, other, array, stack) - : customizer(arrValue, othValue, index, array, other, stack); - } - if (compared !== undefined) { - if (compared) { - continue; - } - result = false; - break; - } - // Recursively compare arrays (susceptible to call stack limits). - if (seen) { - if (!arraySome(other, function(othValue, othIndex) { - if (!cacheHas(seen, othIndex) && - (arrValue === othValue || equalFunc(arrValue, othValue, bitmask, customizer, stack))) { - return seen.push(othIndex); - } - })) { - result = false; - break; - } - } else if (!( - arrValue === othValue || - equalFunc(arrValue, othValue, bitmask, customizer, stack) - )) { - result = false; - break; - } - } - stack['delete'](array); - stack['delete'](other); - return result; - } - - /** - * A specialized version of `baseIsEqualDeep` for comparing objects of - * the same `toStringTag`. - * - * **Note:** This function only supports comparing values with tags of - * `Boolean`, `Date`, `Error`, `Number`, `RegExp`, or `String`. - * - * @private - * @param {Object} object The object to compare. - * @param {Object} other The other object to compare. - * @param {string} tag The `toStringTag` of the objects to compare. - * @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details. - * @param {Function} customizer The function to customize comparisons. - * @param {Function} equalFunc The function to determine equivalents of values. - * @param {Object} stack Tracks traversed `object` and `other` objects. - * @returns {boolean} Returns `true` if the objects are equivalent, else `false`. - */ - function equalByTag(object, other, tag, bitmask, customizer, equalFunc, stack) { - switch (tag) { - case dataViewTag: - if ((object.byteLength != other.byteLength) || - (object.byteOffset != other.byteOffset)) { - return false; - } - object = object.buffer; - other = other.buffer; - - case arrayBufferTag: - if ((object.byteLength != other.byteLength) || - !equalFunc(new Uint8Array(object), new Uint8Array(other))) { - return false; - } - return true; - - case boolTag: - case dateTag: - case numberTag: - // Coerce booleans to `1` or `0` and dates to milliseconds. - // Invalid dates are coerced to `NaN`. - return eq(+object, +other); - - case errorTag: - return object.name == other.name && object.message == other.message; - - case regexpTag: - case stringTag: - // Coerce regexes to strings and treat strings, primitives and objects, - // as equal. See http://www.ecma-international.org/ecma-262/7.0/#sec-regexp.prototype.tostring - // for more details. - return object == (other + ''); - - case mapTag: - var convert = mapToArray; - - case setTag: - var isPartial = bitmask & COMPARE_PARTIAL_FLAG; - convert || (convert = setToArray); - - if (object.size != other.size && !isPartial) { - return false; - } - // Assume cyclic values are equal. - var stacked = stack.get(object); - if (stacked) { - return stacked == other; - } - bitmask |= COMPARE_UNORDERED_FLAG; - - // Recursively compare objects (susceptible to call stack limits). - stack.set(object, other); - var result = equalArrays(convert(object), convert(other), bitmask, customizer, equalFunc, stack); - stack['delete'](object); - return result; - - case symbolTag: - if (symbolValueOf) { - return symbolValueOf.call(object) == symbolValueOf.call(other); - } - } - return false; - } - - /** - * A specialized version of `baseIsEqualDeep` for objects with support for - * partial deep comparisons. - * - * @private - * @param {Object} object The object to compare. - * @param {Object} other The other object to compare. - * @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details. - * @param {Function} customizer The function to customize comparisons. - * @param {Function} equalFunc The function to determine equivalents of values. - * @param {Object} stack Tracks traversed `object` and `other` objects. - * @returns {boolean} Returns `true` if the objects are equivalent, else `false`. - */ - function equalObjects(object, other, bitmask, customizer, equalFunc, stack) { - var isPartial = bitmask & COMPARE_PARTIAL_FLAG, - objProps = getAllKeys(object), - objLength = objProps.length, - othProps = getAllKeys(other), - othLength = othProps.length; - - if (objLength != othLength && !isPartial) { - return false; - } - var index = objLength; - while (index--) { - var key = objProps[index]; - if (!(isPartial ? key in other : hasOwnProperty.call(other, key))) { - return false; - } - } - // Assume cyclic values are equal. - var stacked = stack.get(object); - if (stacked && stack.get(other)) { - return stacked == other; - } - var result = true; - stack.set(object, other); - stack.set(other, object); - - var skipCtor = isPartial; - while (++index < objLength) { - key = objProps[index]; - var objValue = object[key], - othValue = other[key]; - - if (customizer) { - var compared = isPartial - ? customizer(othValue, objValue, key, other, object, stack) - : customizer(objValue, othValue, key, object, other, stack); - } - // Recursively compare objects (susceptible to call stack limits). - if (!(compared === undefined - ? (objValue === othValue || equalFunc(objValue, othValue, bitmask, customizer, stack)) - : compared - )) { - result = false; - break; - } - skipCtor || (skipCtor = key == 'constructor'); - } - if (result && !skipCtor) { - var objCtor = object.constructor, - othCtor = other.constructor; - - // Non `Object` object instances with different constructors are not equal. - if (objCtor != othCtor && - ('constructor' in object && 'constructor' in other) && - !(typeof objCtor == 'function' && objCtor instanceof objCtor && - typeof othCtor == 'function' && othCtor instanceof othCtor)) { - result = false; - } - } - stack['delete'](object); - stack['delete'](other); - return result; - } - - /** - * A specialized version of `baseRest` which flattens the rest array. - * - * @private - * @param {Function} func The function to apply a rest parameter to. - * @returns {Function} Returns the new function. - */ - function flatRest(func) { - return setToString(overRest(func, undefined, flatten), func + ''); - } - - /** - * Creates an array of own enumerable property names and symbols of `object`. - * - * @private - * @param {Object} object The object to query. - * @returns {Array} Returns the array of property names and symbols. - */ - function getAllKeys(object) { - return baseGetAllKeys(object, keys, getSymbols); - } - - /** - * Creates an array of own and inherited enumerable property names and - * symbols of `object`. - * - * @private - * @param {Object} object The object to query. - * @returns {Array} Returns the array of property names and symbols. - */ - function getAllKeysIn(object) { - return baseGetAllKeys(object, keysIn, getSymbolsIn); - } - - /** - * Gets metadata for `func`. - * - * @private - * @param {Function} func The function to query. - * @returns {*} Returns the metadata for `func`. - */ - var getData = !metaMap ? noop : function(func) { - return metaMap.get(func); - }; - - /** - * Gets the name of `func`. - * - * @private - * @param {Function} func The function to query. - * @returns {string} Returns the function name. - */ - function getFuncName(func) { - var result = (func.name + ''), - array = realNames[result], - length = hasOwnProperty.call(realNames, result) ? array.length : 0; - - while (length--) { - var data = array[length], - otherFunc = data.func; - if (otherFunc == null || otherFunc == func) { - return data.name; - } - } - return result; - } - - /** - * Gets the argument placeholder value for `func`. - * - * @private - * @param {Function} func The function to inspect. - * @returns {*} Returns the placeholder value. - */ - function getHolder(func) { - var object = hasOwnProperty.call(lodash, 'placeholder') ? lodash : func; - return object.placeholder; - } - - /** - * Gets the appropriate "iteratee" function. If `_.iteratee` is customized, - * this function returns the custom method, otherwise it returns `baseIteratee`. - * If arguments are provided, the chosen function is invoked with them and - * its result is returned. - * - * @private - * @param {*} [value] The value to convert to an iteratee. - * @param {number} [arity] The arity of the created iteratee. - * @returns {Function} Returns the chosen function or its result. - */ - function getIteratee() { - var result = lodash.iteratee || iteratee; - result = result === iteratee ? baseIteratee : result; - return arguments.length ? result(arguments[0], arguments[1]) : result; - } - - /** - * Gets the data for `map`. - * - * @private - * @param {Object} map The map to query. - * @param {string} key The reference key. - * @returns {*} Returns the map data. - */ - function getMapData(map, key) { - var data = map.__data__; - return isKeyable(key) - ? data[typeof key == 'string' ? 'string' : 'hash'] - : data.map; - } - - /** - * Gets the property names, values, and compare flags of `object`. - * - * @private - * @param {Object} object The object to query. - * @returns {Array} Returns the match data of `object`. - */ - function getMatchData(object) { - var result = keys(object), - length = result.length; - - while (length--) { - var key = result[length], - value = object[key]; - - result[length] = [key, value, isStrictComparable(value)]; - } - return result; - } - - /** - * Gets the native function at `key` of `object`. - * - * @private - * @param {Object} object The object to query. - * @param {string} key The key of the method to get. - * @returns {*} Returns the function if it's native, else `undefined`. - */ - function getNative(object, key) { - var value = getValue(object, key); - return baseIsNative(value) ? value : undefined; - } - - /** - * A specialized version of `baseGetTag` which ignores `Symbol.toStringTag` values. - * - * @private - * @param {*} value The value to query. - * @returns {string} Returns the raw `toStringTag`. - */ - function getRawTag(value) { - var isOwn = hasOwnProperty.call(value, symToStringTag), - tag = value[symToStringTag]; - - try { - value[symToStringTag] = undefined; - var unmasked = true; - } catch (e) {} - - var result = nativeObjectToString.call(value); - if (unmasked) { - if (isOwn) { - value[symToStringTag] = tag; - } else { - delete value[symToStringTag]; - } - } - return result; - } - - /** - * Creates an array of the own enumerable symbols of `object`. - * - * @private - * @param {Object} object The object to query. - * @returns {Array} Returns the array of symbols. - */ - var getSymbols = !nativeGetSymbols ? stubArray : function(object) { - if (object == null) { - return []; - } - object = Object(object); - return arrayFilter(nativeGetSymbols(object), function(symbol) { - return propertyIsEnumerable.call(object, symbol); - }); - }; - - /** - * Creates an array of the own and inherited enumerable symbols of `object`. - * - * @private - * @param {Object} object The object to query. - * @returns {Array} Returns the array of symbols. - */ - var getSymbolsIn = !nativeGetSymbols ? stubArray : function(object) { - var result = []; - while (object) { - arrayPush(result, getSymbols(object)); - object = getPrototype(object); - } - return result; - }; - - /** - * Gets the `toStringTag` of `value`. - * - * @private - * @param {*} value The value to query. - * @returns {string} Returns the `toStringTag`. - */ - var getTag = baseGetTag; - - // Fallback for data views, maps, sets, and weak maps in IE 11 and promises in Node.js < 6. - if ((DataView && getTag(new DataView(new ArrayBuffer(1))) != dataViewTag) || - (Map && getTag(new Map) != mapTag) || - (Promise && getTag(Promise.resolve()) != promiseTag) || - (Set && getTag(new Set) != setTag) || - (WeakMap && getTag(new WeakMap) != weakMapTag)) { - getTag = function(value) { - var result = baseGetTag(value), - Ctor = result == objectTag ? value.constructor : undefined, - ctorString = Ctor ? toSource(Ctor) : ''; - - if (ctorString) { - switch (ctorString) { - case dataViewCtorString: return dataViewTag; - case mapCtorString: return mapTag; - case promiseCtorString: return promiseTag; - case setCtorString: return setTag; - case weakMapCtorString: return weakMapTag; - } - } - return result; - }; - } - - /** - * Gets the view, applying any `transforms` to the `start` and `end` positions. - * - * @private - * @param {number} start The start of the view. - * @param {number} end The end of the view. - * @param {Array} transforms The transformations to apply to the view. - * @returns {Object} Returns an object containing the `start` and `end` - * positions of the view. - */ - function getView(start, end, transforms) { - var index = -1, - length = transforms.length; - - while (++index < length) { - var data = transforms[index], - size = data.size; - - switch (data.type) { - case 'drop': start += size; break; - case 'dropRight': end -= size; break; - case 'take': end = nativeMin(end, start + size); break; - case 'takeRight': start = nativeMax(start, end - size); break; - } - } - return { 'start': start, 'end': end }; - } - - /** - * Extracts wrapper details from the `source` body comment. - * - * @private - * @param {string} source The source to inspect. - * @returns {Array} Returns the wrapper details. - */ - function getWrapDetails(source) { - var match = source.match(reWrapDetails); - return match ? match[1].split(reSplitDetails) : []; - } - - /** - * Checks if `path` exists on `object`. - * - * @private - * @param {Object} object The object to query. - * @param {Array|string} path The path to check. - * @param {Function} hasFunc The function to check properties. - * @returns {boolean} Returns `true` if `path` exists, else `false`. - */ - function hasPath(object, path, hasFunc) { - path = castPath(path, object); - - var index = -1, - length = path.length, - result = false; - - while (++index < length) { - var key = toKey(path[index]); - if (!(result = object != null && hasFunc(object, key))) { - break; - } - object = object[key]; - } - if (result || ++index != length) { - return result; - } - length = object == null ? 0 : object.length; - return !!length && isLength(length) && isIndex(key, length) && - (isArray(object) || isArguments(object)); - } - - /** - * Initializes an array clone. - * - * @private - * @param {Array} array The array to clone. - * @returns {Array} Returns the initialized clone. - */ - function initCloneArray(array) { - var length = array.length, - result = new array.constructor(length); - - // Add properties assigned by `RegExp#exec`. - if (length && typeof array[0] == 'string' && hasOwnProperty.call(array, 'index')) { - result.index = array.index; - result.input = array.input; - } - return result; - } - - /** - * Initializes an object clone. - * - * @private - * @param {Object} object The object to clone. - * @returns {Object} Returns the initialized clone. - */ - function initCloneObject(object) { - return (typeof object.constructor == 'function' && !isPrototype(object)) - ? baseCreate(getPrototype(object)) - : {}; - } - - /** - * Initializes an object clone based on its `toStringTag`. - * - * **Note:** This function only supports cloning values with tags of - * `Boolean`, `Date`, `Error`, `Map`, `Number`, `RegExp`, `Set`, or `String`. - * - * @private - * @param {Object} object The object to clone. - * @param {string} tag The `toStringTag` of the object to clone. - * @param {boolean} [isDeep] Specify a deep clone. - * @returns {Object} Returns the initialized clone. - */ - function initCloneByTag(object, tag, isDeep) { - var Ctor = object.constructor; - switch (tag) { - case arrayBufferTag: - return cloneArrayBuffer(object); - - case boolTag: - case dateTag: - return new Ctor(+object); - - case dataViewTag: - return cloneDataView(object, isDeep); - - case float32Tag: case float64Tag: - case int8Tag: case int16Tag: case int32Tag: - case uint8Tag: case uint8ClampedTag: case uint16Tag: case uint32Tag: - return cloneTypedArray(object, isDeep); - - case mapTag: - return new Ctor; - - case numberTag: - case stringTag: - return new Ctor(object); - - case regexpTag: - return cloneRegExp(object); - - case setTag: - return new Ctor; - - case symbolTag: - return cloneSymbol(object); - } - } - - /** - * Inserts wrapper `details` in a comment at the top of the `source` body. - * - * @private - * @param {string} source The source to modify. - * @returns {Array} details The details to insert. - * @returns {string} Returns the modified source. - */ - function insertWrapDetails(source, details) { - var length = details.length; - if (!length) { - return source; - } - var lastIndex = length - 1; - details[lastIndex] = (length > 1 ? '& ' : '') + details[lastIndex]; - details = details.join(length > 2 ? ', ' : ' '); - return source.replace(reWrapComment, '{\n/* [wrapped with ' + details + '] */\n'); - } - - /** - * Checks if `value` is a flattenable `arguments` object or array. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is flattenable, else `false`. - */ - function isFlattenable(value) { - return isArray(value) || isArguments(value) || - !!(spreadableSymbol && value && value[spreadableSymbol]); - } - - /** - * Checks if `value` is a valid array-like index. - * - * @private - * @param {*} value The value to check. - * @param {number} [length=MAX_SAFE_INTEGER] The upper bounds of a valid index. - * @returns {boolean} Returns `true` if `value` is a valid index, else `false`. - */ - function isIndex(value, length) { - var type = typeof value; - length = length == null ? MAX_SAFE_INTEGER : length; - - return !!length && - (type == 'number' || - (type != 'symbol' && reIsUint.test(value))) && - (value > -1 && value % 1 == 0 && value < length); - } - - /** - * Checks if the given arguments are from an iteratee call. - * - * @private - * @param {*} value The potential iteratee value argument. - * @param {*} index The potential iteratee index or key argument. - * @param {*} object The potential iteratee object argument. - * @returns {boolean} Returns `true` if the arguments are from an iteratee call, - * else `false`. - */ - function isIterateeCall(value, index, object) { - if (!isObject(object)) { - return false; - } - var type = typeof index; - if (type == 'number' - ? (isArrayLike(object) && isIndex(index, object.length)) - : (type == 'string' && index in object) - ) { - return eq(object[index], value); - } - return false; - } - - /** - * Checks if `value` is a property name and not a property path. - * - * @private - * @param {*} value The value to check. - * @param {Object} [object] The object to query keys on. - * @returns {boolean} Returns `true` if `value` is a property name, else `false`. - */ - function isKey(value, object) { - if (isArray(value)) { - return false; - } - var type = typeof value; - if (type == 'number' || type == 'symbol' || type == 'boolean' || - value == null || isSymbol(value)) { - return true; - } - return reIsPlainProp.test(value) || !reIsDeepProp.test(value) || - (object != null && value in Object(object)); - } - - /** - * Checks if `value` is suitable for use as unique object key. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is suitable, else `false`. - */ - function isKeyable(value) { - var type = typeof value; - return (type == 'string' || type == 'number' || type == 'symbol' || type == 'boolean') - ? (value !== '__proto__') - : (value === null); - } - - /** - * Checks if `func` has a lazy counterpart. - * - * @private - * @param {Function} func The function to check. - * @returns {boolean} Returns `true` if `func` has a lazy counterpart, - * else `false`. - */ - function isLaziable(func) { - var funcName = getFuncName(func), - other = lodash[funcName]; - - if (typeof other != 'function' || !(funcName in LazyWrapper.prototype)) { - return false; - } - if (func === other) { - return true; - } - var data = getData(other); - return !!data && func === data[0]; - } - - /** - * Checks if `func` has its source masked. - * - * @private - * @param {Function} func The function to check. - * @returns {boolean} Returns `true` if `func` is masked, else `false`. - */ - function isMasked(func) { - return !!maskSrcKey && (maskSrcKey in func); - } - - /** - * Checks if `func` is capable of being masked. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `func` is maskable, else `false`. - */ - var isMaskable = coreJsData ? isFunction : stubFalse; - - /** - * Checks if `value` is likely a prototype object. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a prototype, else `false`. - */ - function isPrototype(value) { - var Ctor = value && value.constructor, - proto = (typeof Ctor == 'function' && Ctor.prototype) || objectProto; - - return value === proto; - } - - /** - * Checks if `value` is suitable for strict equality comparisons, i.e. `===`. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` if suitable for strict - * equality comparisons, else `false`. - */ - function isStrictComparable(value) { - return value === value && !isObject(value); - } - - /** - * A specialized version of `matchesProperty` for source values suitable - * for strict equality comparisons, i.e. `===`. - * - * @private - * @param {string} key The key of the property to get. - * @param {*} srcValue The value to match. - * @returns {Function} Returns the new spec function. - */ - function matchesStrictComparable(key, srcValue) { - return function(object) { - if (object == null) { - return false; - } - return object[key] === srcValue && - (srcValue !== undefined || (key in Object(object))); - }; - } - - /** - * A specialized version of `_.memoize` which clears the memoized function's - * cache when it exceeds `MAX_MEMOIZE_SIZE`. - * - * @private - * @param {Function} func The function to have its output memoized. - * @returns {Function} Returns the new memoized function. - */ - function memoizeCapped(func) { - var result = memoize(func, function(key) { - if (cache.size === MAX_MEMOIZE_SIZE) { - cache.clear(); - } - return key; - }); - - var cache = result.cache; - return result; - } - - /** - * Merges the function metadata of `source` into `data`. - * - * Merging metadata reduces the number of wrappers used to invoke a function. - * This is possible because methods like `_.bind`, `_.curry`, and `_.partial` - * may be applied regardless of execution order. Methods like `_.ary` and - * `_.rearg` modify function arguments, making the order in which they are - * executed important, preventing the merging of metadata. However, we make - * an exception for a safe combined case where curried functions have `_.ary` - * and or `_.rearg` applied. - * - * @private - * @param {Array} data The destination metadata. - * @param {Array} source The source metadata. - * @returns {Array} Returns `data`. - */ - function mergeData(data, source) { - var bitmask = data[1], - srcBitmask = source[1], - newBitmask = bitmask | srcBitmask, - isCommon = newBitmask < (WRAP_BIND_FLAG | WRAP_BIND_KEY_FLAG | WRAP_ARY_FLAG); - - var isCombo = - ((srcBitmask == WRAP_ARY_FLAG) && (bitmask == WRAP_CURRY_FLAG)) || - ((srcBitmask == WRAP_ARY_FLAG) && (bitmask == WRAP_REARG_FLAG) && (data[7].length <= source[8])) || - ((srcBitmask == (WRAP_ARY_FLAG | WRAP_REARG_FLAG)) && (source[7].length <= source[8]) && (bitmask == WRAP_CURRY_FLAG)); - - // Exit early if metadata can't be merged. - if (!(isCommon || isCombo)) { - return data; - } - // Use source `thisArg` if available. - if (srcBitmask & WRAP_BIND_FLAG) { - data[2] = source[2]; - // Set when currying a bound function. - newBitmask |= bitmask & WRAP_BIND_FLAG ? 0 : WRAP_CURRY_BOUND_FLAG; - } - // Compose partial arguments. - var value = source[3]; - if (value) { - var partials = data[3]; - data[3] = partials ? composeArgs(partials, value, source[4]) : value; - data[4] = partials ? replaceHolders(data[3], PLACEHOLDER) : source[4]; - } - // Compose partial right arguments. - value = source[5]; - if (value) { - partials = data[5]; - data[5] = partials ? composeArgsRight(partials, value, source[6]) : value; - data[6] = partials ? replaceHolders(data[5], PLACEHOLDER) : source[6]; - } - // Use source `argPos` if available. - value = source[7]; - if (value) { - data[7] = value; - } - // Use source `ary` if it's smaller. - if (srcBitmask & WRAP_ARY_FLAG) { - data[8] = data[8] == null ? source[8] : nativeMin(data[8], source[8]); - } - // Use source `arity` if one is not provided. - if (data[9] == null) { - data[9] = source[9]; - } - // Use source `func` and merge bitmasks. - data[0] = source[0]; - data[1] = newBitmask; - - return data; - } - - /** - * This function is like - * [`Object.keys`](http://ecma-international.org/ecma-262/7.0/#sec-object.keys) - * except that it includes inherited enumerable properties. - * - * @private - * @param {Object} object The object to query. - * @returns {Array} Returns the array of property names. - */ - function nativeKeysIn(object) { - var result = []; - if (object != null) { - for (var key in Object(object)) { - result.push(key); - } - } - return result; - } - - /** - * Converts `value` to a string using `Object.prototype.toString`. - * - * @private - * @param {*} value The value to convert. - * @returns {string} Returns the converted string. - */ - function objectToString(value) { - return nativeObjectToString.call(value); - } - - /** - * A specialized version of `baseRest` which transforms the rest array. - * - * @private - * @param {Function} func The function to apply a rest parameter to. - * @param {number} [start=func.length-1] The start position of the rest parameter. - * @param {Function} transform The rest array transform. - * @returns {Function} Returns the new function. - */ - function overRest(func, start, transform) { - start = nativeMax(start === undefined ? (func.length - 1) : start, 0); - return function() { - var args = arguments, - index = -1, - length = nativeMax(args.length - start, 0), - array = Array(length); - - while (++index < length) { - array[index] = args[start + index]; - } - index = -1; - var otherArgs = Array(start + 1); - while (++index < start) { - otherArgs[index] = args[index]; - } - otherArgs[start] = transform(array); - return apply(func, this, otherArgs); - }; - } - - /** - * Gets the parent value at `path` of `object`. - * - * @private - * @param {Object} object The object to query. - * @param {Array} path The path to get the parent value of. - * @returns {*} Returns the parent value. - */ - function parent(object, path) { - return path.length < 2 ? object : baseGet(object, baseSlice(path, 0, -1)); - } - - /** - * Reorder `array` according to the specified indexes where the element at - * the first index is assigned as the first element, the element at - * the second index is assigned as the second element, and so on. - * - * @private - * @param {Array} array The array to reorder. - * @param {Array} indexes The arranged array indexes. - * @returns {Array} Returns `array`. - */ - function reorder(array, indexes) { - var arrLength = array.length, - length = nativeMin(indexes.length, arrLength), - oldArray = copyArray(array); - - while (length--) { - var index = indexes[length]; - array[length] = isIndex(index, arrLength) ? oldArray[index] : undefined; - } - return array; - } - - /** - * Sets metadata for `func`. - * - * **Note:** If this function becomes hot, i.e. is invoked a lot in a short - * period of time, it will trip its breaker and transition to an identity - * function to avoid garbage collection pauses in V8. See - * [V8 issue 2070](https://bugs.chromium.org/p/v8/issues/detail?id=2070) - * for more details. - * - * @private - * @param {Function} func The function to associate metadata with. - * @param {*} data The metadata. - * @returns {Function} Returns `func`. - */ - var setData = shortOut(baseSetData); - - /** - * A simple wrapper around the global [`setTimeout`](https://mdn.io/setTimeout). - * - * @private - * @param {Function} func The function to delay. - * @param {number} wait The number of milliseconds to delay invocation. - * @returns {number|Object} Returns the timer id or timeout object. - */ - var setTimeout = ctxSetTimeout || function(func, wait) { - return root.setTimeout(func, wait); - }; - - /** - * Sets the `toString` method of `func` to return `string`. - * - * @private - * @param {Function} func The function to modify. - * @param {Function} string The `toString` result. - * @returns {Function} Returns `func`. - */ - var setToString = shortOut(baseSetToString); - - /** - * Sets the `toString` method of `wrapper` to mimic the source of `reference` - * with wrapper details in a comment at the top of the source body. - * - * @private - * @param {Function} wrapper The function to modify. - * @param {Function} reference The reference function. - * @param {number} bitmask The bitmask flags. See `createWrap` for more details. - * @returns {Function} Returns `wrapper`. - */ - function setWrapToString(wrapper, reference, bitmask) { - var source = (reference + ''); - return setToString(wrapper, insertWrapDetails(source, updateWrapDetails(getWrapDetails(source), bitmask))); - } - - /** - * Creates a function that'll short out and invoke `identity` instead - * of `func` when it's called `HOT_COUNT` or more times in `HOT_SPAN` - * milliseconds. - * - * @private - * @param {Function} func The function to restrict. - * @returns {Function} Returns the new shortable function. - */ - function shortOut(func) { - var count = 0, - lastCalled = 0; - - return function() { - var stamp = nativeNow(), - remaining = HOT_SPAN - (stamp - lastCalled); - - lastCalled = stamp; - if (remaining > 0) { - if (++count >= HOT_COUNT) { - return arguments[0]; - } - } else { - count = 0; - } - return func.apply(undefined, arguments); - }; - } - - /** - * A specialized version of `_.shuffle` which mutates and sets the size of `array`. - * - * @private - * @param {Array} array The array to shuffle. - * @param {number} [size=array.length] The size of `array`. - * @returns {Array} Returns `array`. - */ - function shuffleSelf(array, size) { - var index = -1, - length = array.length, - lastIndex = length - 1; - - size = size === undefined ? length : size; - while (++index < size) { - var rand = baseRandom(index, lastIndex), - value = array[rand]; - - array[rand] = array[index]; - array[index] = value; - } - array.length = size; - return array; - } - - /** - * Converts `string` to a property path array. - * - * @private - * @param {string} string The string to convert. - * @returns {Array} Returns the property path array. - */ - var stringToPath = memoizeCapped(function(string) { - var result = []; - if (string.charCodeAt(0) === 46 /* . */) { - result.push(''); - } - string.replace(rePropName, function(match, number, quote, subString) { - result.push(quote ? subString.replace(reEscapeChar, '$1') : (number || match)); - }); - return result; - }); - - /** - * Converts `value` to a string key if it's not a string or symbol. - * - * @private - * @param {*} value The value to inspect. - * @returns {string|symbol} Returns the key. - */ - function toKey(value) { - if (typeof value == 'string' || isSymbol(value)) { - return value; - } - var result = (value + ''); - return (result == '0' && (1 / value) == -INFINITY) ? '-0' : result; - } - - /** - * Converts `func` to its source code. - * - * @private - * @param {Function} func The function to convert. - * @returns {string} Returns the source code. - */ - function toSource(func) { - if (func != null) { - try { - return funcToString.call(func); - } catch (e) {} - try { - return (func + ''); - } catch (e) {} - } - return ''; - } - - /** - * Updates wrapper `details` based on `bitmask` flags. - * - * @private - * @returns {Array} details The details to modify. - * @param {number} bitmask The bitmask flags. See `createWrap` for more details. - * @returns {Array} Returns `details`. - */ - function updateWrapDetails(details, bitmask) { - arrayEach(wrapFlags, function(pair) { - var value = '_.' + pair[0]; - if ((bitmask & pair[1]) && !arrayIncludes(details, value)) { - details.push(value); - } - }); - return details.sort(); - } - - /** - * Creates a clone of `wrapper`. - * - * @private - * @param {Object} wrapper The wrapper to clone. - * @returns {Object} Returns the cloned wrapper. - */ - function wrapperClone(wrapper) { - if (wrapper instanceof LazyWrapper) { - return wrapper.clone(); - } - var result = new LodashWrapper(wrapper.__wrapped__, wrapper.__chain__); - result.__actions__ = copyArray(wrapper.__actions__); - result.__index__ = wrapper.__index__; - result.__values__ = wrapper.__values__; - return result; - } - - /*------------------------------------------------------------------------*/ - - /** - * Creates an array of elements split into groups the length of `size`. - * If `array` can't be split evenly, the final chunk will be the remaining - * elements. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The array to process. - * @param {number} [size=1] The length of each chunk - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {Array} Returns the new array of chunks. - * @example - * - * _.chunk(['a', 'b', 'c', 'd'], 2); - * // => [['a', 'b'], ['c', 'd']] - * - * _.chunk(['a', 'b', 'c', 'd'], 3); - * // => [['a', 'b', 'c'], ['d']] - */ - function chunk(array, size, guard) { - if ((guard ? isIterateeCall(array, size, guard) : size === undefined)) { - size = 1; - } else { - size = nativeMax(toInteger(size), 0); - } - var length = array == null ? 0 : array.length; - if (!length || size < 1) { - return []; - } - var index = 0, - resIndex = 0, - result = Array(nativeCeil(length / size)); - - while (index < length) { - result[resIndex++] = baseSlice(array, index, (index += size)); - } - return result; - } - - /** - * Creates an array with all falsey values removed. The values `false`, `null`, - * `0`, `""`, `undefined`, and `NaN` are falsey. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The array to compact. - * @returns {Array} Returns the new array of filtered values. - * @example - * - * _.compact([0, 1, false, 2, '', 3]); - * // => [1, 2, 3] - */ - function compact(array) { - var index = -1, - length = array == null ? 0 : array.length, - resIndex = 0, - result = []; - - while (++index < length) { - var value = array[index]; - if (value) { - result[resIndex++] = value; - } - } - return result; - } - - /** - * Creates a new array concatenating `array` with any additional arrays - * and/or values. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to concatenate. - * @param {...*} [values] The values to concatenate. - * @returns {Array} Returns the new concatenated array. - * @example - * - * var array = [1]; - * var other = _.concat(array, 2, [3], [[4]]); - * - * console.log(other); - * // => [1, 2, 3, [4]] - * - * console.log(array); - * // => [1] - */ - function concat() { - var length = arguments.length; - if (!length) { - return []; - } - var args = Array(length - 1), - array = arguments[0], - index = length; - - while (index--) { - args[index - 1] = arguments[index]; - } - return arrayPush(isArray(array) ? copyArray(array) : [array], baseFlatten(args, 1)); - } - - /** - * Creates an array of `array` values not included in the other given arrays - * using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * for equality comparisons. The order and references of result values are - * determined by the first array. - * - * **Note:** Unlike `_.pullAll`, this method returns a new array. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {...Array} [values] The values to exclude. - * @returns {Array} Returns the new array of filtered values. - * @see _.without, _.xor - * @example - * - * _.difference([2, 1], [2, 3]); - * // => [1] - */ - var difference = baseRest(function(array, values) { - return isArrayLikeObject(array) - ? baseDifference(array, baseFlatten(values, 1, isArrayLikeObject, true)) - : []; - }); - - /** - * This method is like `_.difference` except that it accepts `iteratee` which - * is invoked for each element of `array` and `values` to generate the criterion - * by which they're compared. The order and references of result values are - * determined by the first array. The iteratee is invoked with one argument: - * (value). - * - * **Note:** Unlike `_.pullAllBy`, this method returns a new array. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {...Array} [values] The values to exclude. - * @param {Function} [iteratee=_.identity] The iteratee invoked per element. - * @returns {Array} Returns the new array of filtered values. - * @example - * - * _.differenceBy([2.1, 1.2], [2.3, 3.4], Math.floor); - * // => [1.2] - * - * // The `_.property` iteratee shorthand. - * _.differenceBy([{ 'x': 2 }, { 'x': 1 }], [{ 'x': 1 }], 'x'); - * // => [{ 'x': 2 }] - */ - var differenceBy = baseRest(function(array, values) { - var iteratee = last(values); - if (isArrayLikeObject(iteratee)) { - iteratee = undefined; - } - return isArrayLikeObject(array) - ? baseDifference(array, baseFlatten(values, 1, isArrayLikeObject, true), getIteratee(iteratee, 2)) - : []; - }); - - /** - * This method is like `_.difference` except that it accepts `comparator` - * which is invoked to compare elements of `array` to `values`. The order and - * references of result values are determined by the first array. The comparator - * is invoked with two arguments: (arrVal, othVal). - * - * **Note:** Unlike `_.pullAllWith`, this method returns a new array. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {...Array} [values] The values to exclude. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns the new array of filtered values. - * @example - * - * var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }]; - * - * _.differenceWith(objects, [{ 'x': 1, 'y': 2 }], _.isEqual); - * // => [{ 'x': 2, 'y': 1 }] - */ - var differenceWith = baseRest(function(array, values) { - var comparator = last(values); - if (isArrayLikeObject(comparator)) { - comparator = undefined; - } - return isArrayLikeObject(array) - ? baseDifference(array, baseFlatten(values, 1, isArrayLikeObject, true), undefined, comparator) - : []; - }); - - /** - * Creates a slice of `array` with `n` elements dropped from the beginning. - * - * @static - * @memberOf _ - * @since 0.5.0 - * @category Array - * @param {Array} array The array to query. - * @param {number} [n=1] The number of elements to drop. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {Array} Returns the slice of `array`. - * @example - * - * _.drop([1, 2, 3]); - * // => [2, 3] - * - * _.drop([1, 2, 3], 2); - * // => [3] - * - * _.drop([1, 2, 3], 5); - * // => [] - * - * _.drop([1, 2, 3], 0); - * // => [1, 2, 3] - */ - function drop(array, n, guard) { - var length = array == null ? 0 : array.length; - if (!length) { - return []; - } - n = (guard || n === undefined) ? 1 : toInteger(n); - return baseSlice(array, n < 0 ? 0 : n, length); - } - - /** - * Creates a slice of `array` with `n` elements dropped from the end. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The array to query. - * @param {number} [n=1] The number of elements to drop. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {Array} Returns the slice of `array`. - * @example - * - * _.dropRight([1, 2, 3]); - * // => [1, 2] - * - * _.dropRight([1, 2, 3], 2); - * // => [1] - * - * _.dropRight([1, 2, 3], 5); - * // => [] - * - * _.dropRight([1, 2, 3], 0); - * // => [1, 2, 3] - */ - function dropRight(array, n, guard) { - var length = array == null ? 0 : array.length; - if (!length) { - return []; - } - n = (guard || n === undefined) ? 1 : toInteger(n); - n = length - n; - return baseSlice(array, 0, n < 0 ? 0 : n); - } - - /** - * Creates a slice of `array` excluding elements dropped from the end. - * Elements are dropped until `predicate` returns falsey. The predicate is - * invoked with three arguments: (value, index, array). - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The array to query. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @returns {Array} Returns the slice of `array`. - * @example - * - * var users = [ - * { 'user': 'barney', 'active': true }, - * { 'user': 'fred', 'active': false }, - * { 'user': 'pebbles', 'active': false } - * ]; - * - * _.dropRightWhile(users, function(o) { return !o.active; }); - * // => objects for ['barney'] - * - * // The `_.matches` iteratee shorthand. - * _.dropRightWhile(users, { 'user': 'pebbles', 'active': false }); - * // => objects for ['barney', 'fred'] - * - * // The `_.matchesProperty` iteratee shorthand. - * _.dropRightWhile(users, ['active', false]); - * // => objects for ['barney'] - * - * // The `_.property` iteratee shorthand. - * _.dropRightWhile(users, 'active'); - * // => objects for ['barney', 'fred', 'pebbles'] - */ - function dropRightWhile(array, predicate) { - return (array && array.length) - ? baseWhile(array, getIteratee(predicate, 3), true, true) - : []; - } - - /** - * Creates a slice of `array` excluding elements dropped from the beginning. - * Elements are dropped until `predicate` returns falsey. The predicate is - * invoked with three arguments: (value, index, array). - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The array to query. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @returns {Array} Returns the slice of `array`. - * @example - * - * var users = [ - * { 'user': 'barney', 'active': false }, - * { 'user': 'fred', 'active': false }, - * { 'user': 'pebbles', 'active': true } - * ]; - * - * _.dropWhile(users, function(o) { return !o.active; }); - * // => objects for ['pebbles'] - * - * // The `_.matches` iteratee shorthand. - * _.dropWhile(users, { 'user': 'barney', 'active': false }); - * // => objects for ['fred', 'pebbles'] - * - * // The `_.matchesProperty` iteratee shorthand. - * _.dropWhile(users, ['active', false]); - * // => objects for ['pebbles'] - * - * // The `_.property` iteratee shorthand. - * _.dropWhile(users, 'active'); - * // => objects for ['barney', 'fred', 'pebbles'] - */ - function dropWhile(array, predicate) { - return (array && array.length) - ? baseWhile(array, getIteratee(predicate, 3), true) - : []; - } - - /** - * Fills elements of `array` with `value` from `start` up to, but not - * including, `end`. - * - * **Note:** This method mutates `array`. - * - * @static - * @memberOf _ - * @since 3.2.0 - * @category Array - * @param {Array} array The array to fill. - * @param {*} value The value to fill `array` with. - * @param {number} [start=0] The start position. - * @param {number} [end=array.length] The end position. - * @returns {Array} Returns `array`. - * @example - * - * var array = [1, 2, 3]; - * - * _.fill(array, 'a'); - * console.log(array); - * // => ['a', 'a', 'a'] - * - * _.fill(Array(3), 2); - * // => [2, 2, 2] - * - * _.fill([4, 6, 8, 10], '*', 1, 3); - * // => [4, '*', '*', 10] - */ - function fill(array, value, start, end) { - var length = array == null ? 0 : array.length; - if (!length) { - return []; - } - if (start && typeof start != 'number' && isIterateeCall(array, value, start)) { - start = 0; - end = length; - } - return baseFill(array, value, start, end); - } - - /** - * This method is like `_.find` except that it returns the index of the first - * element `predicate` returns truthy for instead of the element itself. - * - * @static - * @memberOf _ - * @since 1.1.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @param {number} [fromIndex=0] The index to search from. - * @returns {number} Returns the index of the found element, else `-1`. - * @example - * - * var users = [ - * { 'user': 'barney', 'active': false }, - * { 'user': 'fred', 'active': false }, - * { 'user': 'pebbles', 'active': true } - * ]; - * - * _.findIndex(users, function(o) { return o.user == 'barney'; }); - * // => 0 - * - * // The `_.matches` iteratee shorthand. - * _.findIndex(users, { 'user': 'fred', 'active': false }); - * // => 1 - * - * // The `_.matchesProperty` iteratee shorthand. - * _.findIndex(users, ['active', false]); - * // => 0 - * - * // The `_.property` iteratee shorthand. - * _.findIndex(users, 'active'); - * // => 2 - */ - function findIndex(array, predicate, fromIndex) { - var length = array == null ? 0 : array.length; - if (!length) { - return -1; - } - var index = fromIndex == null ? 0 : toInteger(fromIndex); - if (index < 0) { - index = nativeMax(length + index, 0); - } - return baseFindIndex(array, getIteratee(predicate, 3), index); - } - - /** - * This method is like `_.findIndex` except that it iterates over elements - * of `collection` from right to left. - * - * @static - * @memberOf _ - * @since 2.0.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @param {number} [fromIndex=array.length-1] The index to search from. - * @returns {number} Returns the index of the found element, else `-1`. - * @example - * - * var users = [ - * { 'user': 'barney', 'active': true }, - * { 'user': 'fred', 'active': false }, - * { 'user': 'pebbles', 'active': false } - * ]; - * - * _.findLastIndex(users, function(o) { return o.user == 'pebbles'; }); - * // => 2 - * - * // The `_.matches` iteratee shorthand. - * _.findLastIndex(users, { 'user': 'barney', 'active': true }); - * // => 0 - * - * // The `_.matchesProperty` iteratee shorthand. - * _.findLastIndex(users, ['active', false]); - * // => 2 - * - * // The `_.property` iteratee shorthand. - * _.findLastIndex(users, 'active'); - * // => 0 - */ - function findLastIndex(array, predicate, fromIndex) { - var length = array == null ? 0 : array.length; - if (!length) { - return -1; - } - var index = length - 1; - if (fromIndex !== undefined) { - index = toInteger(fromIndex); - index = fromIndex < 0 - ? nativeMax(length + index, 0) - : nativeMin(index, length - 1); - } - return baseFindIndex(array, getIteratee(predicate, 3), index, true); - } - - /** - * Flattens `array` a single level deep. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The array to flatten. - * @returns {Array} Returns the new flattened array. - * @example - * - * _.flatten([1, [2, [3, [4]], 5]]); - * // => [1, 2, [3, [4]], 5] - */ - function flatten(array) { - var length = array == null ? 0 : array.length; - return length ? baseFlatten(array, 1) : []; - } - - /** - * Recursively flattens `array`. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The array to flatten. - * @returns {Array} Returns the new flattened array. - * @example - * - * _.flattenDeep([1, [2, [3, [4]], 5]]); - * // => [1, 2, 3, 4, 5] - */ - function flattenDeep(array) { - var length = array == null ? 0 : array.length; - return length ? baseFlatten(array, INFINITY) : []; - } - - /** - * Recursively flatten `array` up to `depth` times. - * - * @static - * @memberOf _ - * @since 4.4.0 - * @category Array - * @param {Array} array The array to flatten. - * @param {number} [depth=1] The maximum recursion depth. - * @returns {Array} Returns the new flattened array. - * @example - * - * var array = [1, [2, [3, [4]], 5]]; - * - * _.flattenDepth(array, 1); - * // => [1, 2, [3, [4]], 5] - * - * _.flattenDepth(array, 2); - * // => [1, 2, 3, [4], 5] - */ - function flattenDepth(array, depth) { - var length = array == null ? 0 : array.length; - if (!length) { - return []; - } - depth = depth === undefined ? 1 : toInteger(depth); - return baseFlatten(array, depth); - } - - /** - * The inverse of `_.toPairs`; this method returns an object composed - * from key-value `pairs`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} pairs The key-value pairs. - * @returns {Object} Returns the new object. - * @example - * - * _.fromPairs([['a', 1], ['b', 2]]); - * // => { 'a': 1, 'b': 2 } - */ - function fromPairs(pairs) { - var index = -1, - length = pairs == null ? 0 : pairs.length, - result = {}; - - while (++index < length) { - var pair = pairs[index]; - result[pair[0]] = pair[1]; - } - return result; - } - - /** - * Gets the first element of `array`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @alias first - * @category Array - * @param {Array} array The array to query. - * @returns {*} Returns the first element of `array`. - * @example - * - * _.head([1, 2, 3]); - * // => 1 - * - * _.head([]); - * // => undefined - */ - function head(array) { - return (array && array.length) ? array[0] : undefined; - } - - /** - * Gets the index at which the first occurrence of `value` is found in `array` - * using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * for equality comparisons. If `fromIndex` is negative, it's used as the - * offset from the end of `array`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {*} value The value to search for. - * @param {number} [fromIndex=0] The index to search from. - * @returns {number} Returns the index of the matched value, else `-1`. - * @example - * - * _.indexOf([1, 2, 1, 2], 2); - * // => 1 - * - * // Search from the `fromIndex`. - * _.indexOf([1, 2, 1, 2], 2, 2); - * // => 3 - */ - function indexOf(array, value, fromIndex) { - var length = array == null ? 0 : array.length; - if (!length) { - return -1; - } - var index = fromIndex == null ? 0 : toInteger(fromIndex); - if (index < 0) { - index = nativeMax(length + index, 0); - } - return baseIndexOf(array, value, index); - } - - /** - * Gets all but the last element of `array`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The array to query. - * @returns {Array} Returns the slice of `array`. - * @example - * - * _.initial([1, 2, 3]); - * // => [1, 2] - */ - function initial(array) { - var length = array == null ? 0 : array.length; - return length ? baseSlice(array, 0, -1) : []; - } - - /** - * Creates an array of unique values that are included in all given arrays - * using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * for equality comparisons. The order and references of result values are - * determined by the first array. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {...Array} [arrays] The arrays to inspect. - * @returns {Array} Returns the new array of intersecting values. - * @example - * - * _.intersection([2, 1], [2, 3]); - * // => [2] - */ - var intersection = baseRest(function(arrays) { - var mapped = arrayMap(arrays, castArrayLikeObject); - return (mapped.length && mapped[0] === arrays[0]) - ? baseIntersection(mapped) - : []; - }); - - /** - * This method is like `_.intersection` except that it accepts `iteratee` - * which is invoked for each element of each `arrays` to generate the criterion - * by which they're compared. The order and references of result values are - * determined by the first array. The iteratee is invoked with one argument: - * (value). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {...Array} [arrays] The arrays to inspect. - * @param {Function} [iteratee=_.identity] The iteratee invoked per element. - * @returns {Array} Returns the new array of intersecting values. - * @example - * - * _.intersectionBy([2.1, 1.2], [2.3, 3.4], Math.floor); - * // => [2.1] - * - * // The `_.property` iteratee shorthand. - * _.intersectionBy([{ 'x': 1 }], [{ 'x': 2 }, { 'x': 1 }], 'x'); - * // => [{ 'x': 1 }] - */ - var intersectionBy = baseRest(function(arrays) { - var iteratee = last(arrays), - mapped = arrayMap(arrays, castArrayLikeObject); - - if (iteratee === last(mapped)) { - iteratee = undefined; - } else { - mapped.pop(); - } - return (mapped.length && mapped[0] === arrays[0]) - ? baseIntersection(mapped, getIteratee(iteratee, 2)) - : []; - }); - - /** - * This method is like `_.intersection` except that it accepts `comparator` - * which is invoked to compare elements of `arrays`. The order and references - * of result values are determined by the first array. The comparator is - * invoked with two arguments: (arrVal, othVal). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {...Array} [arrays] The arrays to inspect. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns the new array of intersecting values. - * @example - * - * var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }]; - * var others = [{ 'x': 1, 'y': 1 }, { 'x': 1, 'y': 2 }]; - * - * _.intersectionWith(objects, others, _.isEqual); - * // => [{ 'x': 1, 'y': 2 }] - */ - var intersectionWith = baseRest(function(arrays) { - var comparator = last(arrays), - mapped = arrayMap(arrays, castArrayLikeObject); - - comparator = typeof comparator == 'function' ? comparator : undefined; - if (comparator) { - mapped.pop(); - } - return (mapped.length && mapped[0] === arrays[0]) - ? baseIntersection(mapped, undefined, comparator) - : []; - }); - - /** - * Converts all elements in `array` into a string separated by `separator`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to convert. - * @param {string} [separator=','] The element separator. - * @returns {string} Returns the joined string. - * @example - * - * _.join(['a', 'b', 'c'], '~'); - * // => 'a~b~c' - */ - function join(array, separator) { - return array == null ? '' : nativeJoin.call(array, separator); - } - - /** - * Gets the last element of `array`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The array to query. - * @returns {*} Returns the last element of `array`. - * @example - * - * _.last([1, 2, 3]); - * // => 3 - */ - function last(array) { - var length = array == null ? 0 : array.length; - return length ? array[length - 1] : undefined; - } - - /** - * This method is like `_.indexOf` except that it iterates over elements of - * `array` from right to left. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {*} value The value to search for. - * @param {number} [fromIndex=array.length-1] The index to search from. - * @returns {number} Returns the index of the matched value, else `-1`. - * @example - * - * _.lastIndexOf([1, 2, 1, 2], 2); - * // => 3 - * - * // Search from the `fromIndex`. - * _.lastIndexOf([1, 2, 1, 2], 2, 2); - * // => 1 - */ - function lastIndexOf(array, value, fromIndex) { - var length = array == null ? 0 : array.length; - if (!length) { - return -1; - } - var index = length; - if (fromIndex !== undefined) { - index = toInteger(fromIndex); - index = index < 0 ? nativeMax(length + index, 0) : nativeMin(index, length - 1); - } - return value === value - ? strictLastIndexOf(array, value, index) - : baseFindIndex(array, baseIsNaN, index, true); - } - - /** - * Gets the element at index `n` of `array`. If `n` is negative, the nth - * element from the end is returned. - * - * @static - * @memberOf _ - * @since 4.11.0 - * @category Array - * @param {Array} array The array to query. - * @param {number} [n=0] The index of the element to return. - * @returns {*} Returns the nth element of `array`. - * @example - * - * var array = ['a', 'b', 'c', 'd']; - * - * _.nth(array, 1); - * // => 'b' - * - * _.nth(array, -2); - * // => 'c'; - */ - function nth(array, n) { - return (array && array.length) ? baseNth(array, toInteger(n)) : undefined; - } - - /** - * Removes all given values from `array` using - * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * for equality comparisons. - * - * **Note:** Unlike `_.without`, this method mutates `array`. Use `_.remove` - * to remove elements from an array by predicate. - * - * @static - * @memberOf _ - * @since 2.0.0 - * @category Array - * @param {Array} array The array to modify. - * @param {...*} [values] The values to remove. - * @returns {Array} Returns `array`. - * @example - * - * var array = ['a', 'b', 'c', 'a', 'b', 'c']; - * - * _.pull(array, 'a', 'c'); - * console.log(array); - * // => ['b', 'b'] - */ - var pull = baseRest(pullAll); - - /** - * This method is like `_.pull` except that it accepts an array of values to remove. - * - * **Note:** Unlike `_.difference`, this method mutates `array`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to modify. - * @param {Array} values The values to remove. - * @returns {Array} Returns `array`. - * @example - * - * var array = ['a', 'b', 'c', 'a', 'b', 'c']; - * - * _.pullAll(array, ['a', 'c']); - * console.log(array); - * // => ['b', 'b'] - */ - function pullAll(array, values) { - return (array && array.length && values && values.length) - ? basePullAll(array, values) - : array; - } - - /** - * This method is like `_.pullAll` except that it accepts `iteratee` which is - * invoked for each element of `array` and `values` to generate the criterion - * by which they're compared. The iteratee is invoked with one argument: (value). - * - * **Note:** Unlike `_.differenceBy`, this method mutates `array`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to modify. - * @param {Array} values The values to remove. - * @param {Function} [iteratee=_.identity] The iteratee invoked per element. - * @returns {Array} Returns `array`. - * @example - * - * var array = [{ 'x': 1 }, { 'x': 2 }, { 'x': 3 }, { 'x': 1 }]; - * - * _.pullAllBy(array, [{ 'x': 1 }, { 'x': 3 }], 'x'); - * console.log(array); - * // => [{ 'x': 2 }] - */ - function pullAllBy(array, values, iteratee) { - return (array && array.length && values && values.length) - ? basePullAll(array, values, getIteratee(iteratee, 2)) - : array; - } - - /** - * This method is like `_.pullAll` except that it accepts `comparator` which - * is invoked to compare elements of `array` to `values`. The comparator is - * invoked with two arguments: (arrVal, othVal). - * - * **Note:** Unlike `_.differenceWith`, this method mutates `array`. - * - * @static - * @memberOf _ - * @since 4.6.0 - * @category Array - * @param {Array} array The array to modify. - * @param {Array} values The values to remove. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns `array`. - * @example - * - * var array = [{ 'x': 1, 'y': 2 }, { 'x': 3, 'y': 4 }, { 'x': 5, 'y': 6 }]; - * - * _.pullAllWith(array, [{ 'x': 3, 'y': 4 }], _.isEqual); - * console.log(array); - * // => [{ 'x': 1, 'y': 2 }, { 'x': 5, 'y': 6 }] - */ - function pullAllWith(array, values, comparator) { - return (array && array.length && values && values.length) - ? basePullAll(array, values, undefined, comparator) - : array; - } - - /** - * Removes elements from `array` corresponding to `indexes` and returns an - * array of removed elements. - * - * **Note:** Unlike `_.at`, this method mutates `array`. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The array to modify. - * @param {...(number|number[])} [indexes] The indexes of elements to remove. - * @returns {Array} Returns the new array of removed elements. - * @example - * - * var array = ['a', 'b', 'c', 'd']; - * var pulled = _.pullAt(array, [1, 3]); - * - * console.log(array); - * // => ['a', 'c'] - * - * console.log(pulled); - * // => ['b', 'd'] - */ - var pullAt = flatRest(function(array, indexes) { - var length = array == null ? 0 : array.length, - result = baseAt(array, indexes); - - basePullAt(array, arrayMap(indexes, function(index) { - return isIndex(index, length) ? +index : index; - }).sort(compareAscending)); - - return result; - }); - - /** - * Removes all elements from `array` that `predicate` returns truthy for - * and returns an array of the removed elements. The predicate is invoked - * with three arguments: (value, index, array). - * - * **Note:** Unlike `_.filter`, this method mutates `array`. Use `_.pull` - * to pull elements from an array by value. - * - * @static - * @memberOf _ - * @since 2.0.0 - * @category Array - * @param {Array} array The array to modify. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @returns {Array} Returns the new array of removed elements. - * @example - * - * var array = [1, 2, 3, 4]; - * var evens = _.remove(array, function(n) { - * return n % 2 == 0; - * }); - * - * console.log(array); - * // => [1, 3] - * - * console.log(evens); - * // => [2, 4] - */ - function remove(array, predicate) { - var result = []; - if (!(array && array.length)) { - return result; - } - var index = -1, - indexes = [], - length = array.length; - - predicate = getIteratee(predicate, 3); - while (++index < length) { - var value = array[index]; - if (predicate(value, index, array)) { - result.push(value); - indexes.push(index); - } - } - basePullAt(array, indexes); - return result; - } - - /** - * Reverses `array` so that the first element becomes the last, the second - * element becomes the second to last, and so on. - * - * **Note:** This method mutates `array` and is based on - * [`Array#reverse`](https://mdn.io/Array/reverse). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to modify. - * @returns {Array} Returns `array`. - * @example - * - * var array = [1, 2, 3]; - * - * _.reverse(array); - * // => [3, 2, 1] - * - * console.log(array); - * // => [3, 2, 1] - */ - function reverse(array) { - return array == null ? array : nativeReverse.call(array); - } - - /** - * Creates a slice of `array` from `start` up to, but not including, `end`. - * - * **Note:** This method is used instead of - * [`Array#slice`](https://mdn.io/Array/slice) to ensure dense arrays are - * returned. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The array to slice. - * @param {number} [start=0] The start position. - * @param {number} [end=array.length] The end position. - * @returns {Array} Returns the slice of `array`. - */ - function slice(array, start, end) { - var length = array == null ? 0 : array.length; - if (!length) { - return []; - } - if (end && typeof end != 'number' && isIterateeCall(array, start, end)) { - start = 0; - end = length; - } - else { - start = start == null ? 0 : toInteger(start); - end = end === undefined ? length : toInteger(end); - } - return baseSlice(array, start, end); - } - - /** - * Uses a binary search to determine the lowest index at which `value` - * should be inserted into `array` in order to maintain its sort order. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The sorted array to inspect. - * @param {*} value The value to evaluate. - * @returns {number} Returns the index at which `value` should be inserted - * into `array`. - * @example - * - * _.sortedIndex([30, 50], 40); - * // => 1 - */ - function sortedIndex(array, value) { - return baseSortedIndex(array, value); - } - - /** - * This method is like `_.sortedIndex` except that it accepts `iteratee` - * which is invoked for `value` and each element of `array` to compute their - * sort ranking. The iteratee is invoked with one argument: (value). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The sorted array to inspect. - * @param {*} value The value to evaluate. - * @param {Function} [iteratee=_.identity] The iteratee invoked per element. - * @returns {number} Returns the index at which `value` should be inserted - * into `array`. - * @example - * - * var objects = [{ 'x': 4 }, { 'x': 5 }]; - * - * _.sortedIndexBy(objects, { 'x': 4 }, function(o) { return o.x; }); - * // => 0 - * - * // The `_.property` iteratee shorthand. - * _.sortedIndexBy(objects, { 'x': 4 }, 'x'); - * // => 0 - */ - function sortedIndexBy(array, value, iteratee) { - return baseSortedIndexBy(array, value, getIteratee(iteratee, 2)); - } - - /** - * This method is like `_.indexOf` except that it performs a binary - * search on a sorted `array`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {*} value The value to search for. - * @returns {number} Returns the index of the matched value, else `-1`. - * @example - * - * _.sortedIndexOf([4, 5, 5, 5, 6], 5); - * // => 1 - */ - function sortedIndexOf(array, value) { - var length = array == null ? 0 : array.length; - if (length) { - var index = baseSortedIndex(array, value); - if (index < length && eq(array[index], value)) { - return index; - } - } - return -1; - } - - /** - * This method is like `_.sortedIndex` except that it returns the highest - * index at which `value` should be inserted into `array` in order to - * maintain its sort order. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The sorted array to inspect. - * @param {*} value The value to evaluate. - * @returns {number} Returns the index at which `value` should be inserted - * into `array`. - * @example - * - * _.sortedLastIndex([4, 5, 5, 5, 6], 5); - * // => 4 - */ - function sortedLastIndex(array, value) { - return baseSortedIndex(array, value, true); - } - - /** - * This method is like `_.sortedLastIndex` except that it accepts `iteratee` - * which is invoked for `value` and each element of `array` to compute their - * sort ranking. The iteratee is invoked with one argument: (value). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The sorted array to inspect. - * @param {*} value The value to evaluate. - * @param {Function} [iteratee=_.identity] The iteratee invoked per element. - * @returns {number} Returns the index at which `value` should be inserted - * into `array`. - * @example - * - * var objects = [{ 'x': 4 }, { 'x': 5 }]; - * - * _.sortedLastIndexBy(objects, { 'x': 4 }, function(o) { return o.x; }); - * // => 1 - * - * // The `_.property` iteratee shorthand. - * _.sortedLastIndexBy(objects, { 'x': 4 }, 'x'); - * // => 1 - */ - function sortedLastIndexBy(array, value, iteratee) { - return baseSortedIndexBy(array, value, getIteratee(iteratee, 2), true); - } - - /** - * This method is like `_.lastIndexOf` except that it performs a binary - * search on a sorted `array`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {*} value The value to search for. - * @returns {number} Returns the index of the matched value, else `-1`. - * @example - * - * _.sortedLastIndexOf([4, 5, 5, 5, 6], 5); - * // => 3 - */ - function sortedLastIndexOf(array, value) { - var length = array == null ? 0 : array.length; - if (length) { - var index = baseSortedIndex(array, value, true) - 1; - if (eq(array[index], value)) { - return index; - } - } - return -1; - } - - /** - * This method is like `_.uniq` except that it's designed and optimized - * for sorted arrays. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to inspect. - * @returns {Array} Returns the new duplicate free array. - * @example - * - * _.sortedUniq([1, 1, 2]); - * // => [1, 2] - */ - function sortedUniq(array) { - return (array && array.length) - ? baseSortedUniq(array) - : []; - } - - /** - * This method is like `_.uniqBy` except that it's designed and optimized - * for sorted arrays. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {Function} [iteratee] The iteratee invoked per element. - * @returns {Array} Returns the new duplicate free array. - * @example - * - * _.sortedUniqBy([1.1, 1.2, 2.3, 2.4], Math.floor); - * // => [1.1, 2.3] - */ - function sortedUniqBy(array, iteratee) { - return (array && array.length) - ? baseSortedUniq(array, getIteratee(iteratee, 2)) - : []; - } - - /** - * Gets all but the first element of `array`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to query. - * @returns {Array} Returns the slice of `array`. - * @example - * - * _.tail([1, 2, 3]); - * // => [2, 3] - */ - function tail(array) { - var length = array == null ? 0 : array.length; - return length ? baseSlice(array, 1, length) : []; - } - - /** - * Creates a slice of `array` with `n` elements taken from the beginning. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The array to query. - * @param {number} [n=1] The number of elements to take. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {Array} Returns the slice of `array`. - * @example - * - * _.take([1, 2, 3]); - * // => [1] - * - * _.take([1, 2, 3], 2); - * // => [1, 2] - * - * _.take([1, 2, 3], 5); - * // => [1, 2, 3] - * - * _.take([1, 2, 3], 0); - * // => [] - */ - function take(array, n, guard) { - if (!(array && array.length)) { - return []; - } - n = (guard || n === undefined) ? 1 : toInteger(n); - return baseSlice(array, 0, n < 0 ? 0 : n); - } - - /** - * Creates a slice of `array` with `n` elements taken from the end. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The array to query. - * @param {number} [n=1] The number of elements to take. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {Array} Returns the slice of `array`. - * @example - * - * _.takeRight([1, 2, 3]); - * // => [3] - * - * _.takeRight([1, 2, 3], 2); - * // => [2, 3] - * - * _.takeRight([1, 2, 3], 5); - * // => [1, 2, 3] - * - * _.takeRight([1, 2, 3], 0); - * // => [] - */ - function takeRight(array, n, guard) { - var length = array == null ? 0 : array.length; - if (!length) { - return []; - } - n = (guard || n === undefined) ? 1 : toInteger(n); - n = length - n; - return baseSlice(array, n < 0 ? 0 : n, length); - } - - /** - * Creates a slice of `array` with elements taken from the end. Elements are - * taken until `predicate` returns falsey. The predicate is invoked with - * three arguments: (value, index, array). - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The array to query. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @returns {Array} Returns the slice of `array`. - * @example - * - * var users = [ - * { 'user': 'barney', 'active': true }, - * { 'user': 'fred', 'active': false }, - * { 'user': 'pebbles', 'active': false } - * ]; - * - * _.takeRightWhile(users, function(o) { return !o.active; }); - * // => objects for ['fred', 'pebbles'] - * - * // The `_.matches` iteratee shorthand. - * _.takeRightWhile(users, { 'user': 'pebbles', 'active': false }); - * // => objects for ['pebbles'] - * - * // The `_.matchesProperty` iteratee shorthand. - * _.takeRightWhile(users, ['active', false]); - * // => objects for ['fred', 'pebbles'] - * - * // The `_.property` iteratee shorthand. - * _.takeRightWhile(users, 'active'); - * // => [] - */ - function takeRightWhile(array, predicate) { - return (array && array.length) - ? baseWhile(array, getIteratee(predicate, 3), false, true) - : []; - } - - /** - * Creates a slice of `array` with elements taken from the beginning. Elements - * are taken until `predicate` returns falsey. The predicate is invoked with - * three arguments: (value, index, array). - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The array to query. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @returns {Array} Returns the slice of `array`. - * @example - * - * var users = [ - * { 'user': 'barney', 'active': false }, - * { 'user': 'fred', 'active': false }, - * { 'user': 'pebbles', 'active': true } - * ]; - * - * _.takeWhile(users, function(o) { return !o.active; }); - * // => objects for ['barney', 'fred'] - * - * // The `_.matches` iteratee shorthand. - * _.takeWhile(users, { 'user': 'barney', 'active': false }); - * // => objects for ['barney'] - * - * // The `_.matchesProperty` iteratee shorthand. - * _.takeWhile(users, ['active', false]); - * // => objects for ['barney', 'fred'] - * - * // The `_.property` iteratee shorthand. - * _.takeWhile(users, 'active'); - * // => [] - */ - function takeWhile(array, predicate) { - return (array && array.length) - ? baseWhile(array, getIteratee(predicate, 3)) - : []; - } - - /** - * Creates an array of unique values, in order, from all given arrays using - * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * for equality comparisons. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {...Array} [arrays] The arrays to inspect. - * @returns {Array} Returns the new array of combined values. - * @example - * - * _.union([2], [1, 2]); - * // => [2, 1] - */ - var union = baseRest(function(arrays) { - return baseUniq(baseFlatten(arrays, 1, isArrayLikeObject, true)); - }); - - /** - * This method is like `_.union` except that it accepts `iteratee` which is - * invoked for each element of each `arrays` to generate the criterion by - * which uniqueness is computed. Result values are chosen from the first - * array in which the value occurs. The iteratee is invoked with one argument: - * (value). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {...Array} [arrays] The arrays to inspect. - * @param {Function} [iteratee=_.identity] The iteratee invoked per element. - * @returns {Array} Returns the new array of combined values. - * @example - * - * _.unionBy([2.1], [1.2, 2.3], Math.floor); - * // => [2.1, 1.2] - * - * // The `_.property` iteratee shorthand. - * _.unionBy([{ 'x': 1 }], [{ 'x': 2 }, { 'x': 1 }], 'x'); - * // => [{ 'x': 1 }, { 'x': 2 }] - */ - var unionBy = baseRest(function(arrays) { - var iteratee = last(arrays); - if (isArrayLikeObject(iteratee)) { - iteratee = undefined; - } - return baseUniq(baseFlatten(arrays, 1, isArrayLikeObject, true), getIteratee(iteratee, 2)); - }); - - /** - * This method is like `_.union` except that it accepts `comparator` which - * is invoked to compare elements of `arrays`. Result values are chosen from - * the first array in which the value occurs. The comparator is invoked - * with two arguments: (arrVal, othVal). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {...Array} [arrays] The arrays to inspect. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns the new array of combined values. - * @example - * - * var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }]; - * var others = [{ 'x': 1, 'y': 1 }, { 'x': 1, 'y': 2 }]; - * - * _.unionWith(objects, others, _.isEqual); - * // => [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }, { 'x': 1, 'y': 1 }] - */ - var unionWith = baseRest(function(arrays) { - var comparator = last(arrays); - comparator = typeof comparator == 'function' ? comparator : undefined; - return baseUniq(baseFlatten(arrays, 1, isArrayLikeObject, true), undefined, comparator); - }); - - /** - * Creates a duplicate-free version of an array, using - * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * for equality comparisons, in which only the first occurrence of each element - * is kept. The order of result values is determined by the order they occur - * in the array. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The array to inspect. - * @returns {Array} Returns the new duplicate free array. - * @example - * - * _.uniq([2, 1, 2]); - * // => [2, 1] - */ - function uniq(array) { - return (array && array.length) ? baseUniq(array) : []; - } - - /** - * This method is like `_.uniq` except that it accepts `iteratee` which is - * invoked for each element in `array` to generate the criterion by which - * uniqueness is computed. The order of result values is determined by the - * order they occur in the array. The iteratee is invoked with one argument: - * (value). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {Function} [iteratee=_.identity] The iteratee invoked per element. - * @returns {Array} Returns the new duplicate free array. - * @example - * - * _.uniqBy([2.1, 1.2, 2.3], Math.floor); - * // => [2.1, 1.2] - * - * // The `_.property` iteratee shorthand. - * _.uniqBy([{ 'x': 1 }, { 'x': 2 }, { 'x': 1 }], 'x'); - * // => [{ 'x': 1 }, { 'x': 2 }] - */ - function uniqBy(array, iteratee) { - return (array && array.length) ? baseUniq(array, getIteratee(iteratee, 2)) : []; - } - - /** - * This method is like `_.uniq` except that it accepts `comparator` which - * is invoked to compare elements of `array`. The order of result values is - * determined by the order they occur in the array.The comparator is invoked - * with two arguments: (arrVal, othVal). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns the new duplicate free array. - * @example - * - * var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }, { 'x': 1, 'y': 2 }]; - * - * _.uniqWith(objects, _.isEqual); - * // => [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }] - */ - function uniqWith(array, comparator) { - comparator = typeof comparator == 'function' ? comparator : undefined; - return (array && array.length) ? baseUniq(array, undefined, comparator) : []; - } - - /** - * This method is like `_.zip` except that it accepts an array of grouped - * elements and creates an array regrouping the elements to their pre-zip - * configuration. - * - * @static - * @memberOf _ - * @since 1.2.0 - * @category Array - * @param {Array} array The array of grouped elements to process. - * @returns {Array} Returns the new array of regrouped elements. - * @example - * - * var zipped = _.zip(['a', 'b'], [1, 2], [true, false]); - * // => [['a', 1, true], ['b', 2, false]] - * - * _.unzip(zipped); - * // => [['a', 'b'], [1, 2], [true, false]] - */ - function unzip(array) { - if (!(array && array.length)) { - return []; - } - var length = 0; - array = arrayFilter(array, function(group) { - if (isArrayLikeObject(group)) { - length = nativeMax(group.length, length); - return true; - } - }); - return baseTimes(length, function(index) { - return arrayMap(array, baseProperty(index)); - }); - } - - /** - * This method is like `_.unzip` except that it accepts `iteratee` to specify - * how regrouped values should be combined. The iteratee is invoked with the - * elements of each group: (...group). - * - * @static - * @memberOf _ - * @since 3.8.0 - * @category Array - * @param {Array} array The array of grouped elements to process. - * @param {Function} [iteratee=_.identity] The function to combine - * regrouped values. - * @returns {Array} Returns the new array of regrouped elements. - * @example - * - * var zipped = _.zip([1, 2], [10, 20], [100, 200]); - * // => [[1, 10, 100], [2, 20, 200]] - * - * _.unzipWith(zipped, _.add); - * // => [3, 30, 300] - */ - function unzipWith(array, iteratee) { - if (!(array && array.length)) { - return []; - } - var result = unzip(array); - if (iteratee == null) { - return result; - } - return arrayMap(result, function(group) { - return apply(iteratee, undefined, group); - }); - } - - /** - * Creates an array excluding all given values using - * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * for equality comparisons. - * - * **Note:** Unlike `_.pull`, this method returns a new array. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {...*} [values] The values to exclude. - * @returns {Array} Returns the new array of filtered values. - * @see _.difference, _.xor - * @example - * - * _.without([2, 1, 2, 3], 1, 2); - * // => [3] - */ - var without = baseRest(function(array, values) { - return isArrayLikeObject(array) - ? baseDifference(array, values) - : []; - }); - - /** - * Creates an array of unique values that is the - * [symmetric difference](https://en.wikipedia.org/wiki/Symmetric_difference) - * of the given arrays. The order of result values is determined by the order - * they occur in the arrays. - * - * @static - * @memberOf _ - * @since 2.4.0 - * @category Array - * @param {...Array} [arrays] The arrays to inspect. - * @returns {Array} Returns the new array of filtered values. - * @see _.difference, _.without - * @example - * - * _.xor([2, 1], [2, 3]); - * // => [1, 3] - */ - var xor = baseRest(function(arrays) { - return baseXor(arrayFilter(arrays, isArrayLikeObject)); - }); - - /** - * This method is like `_.xor` except that it accepts `iteratee` which is - * invoked for each element of each `arrays` to generate the criterion by - * which by which they're compared. The order of result values is determined - * by the order they occur in the arrays. The iteratee is invoked with one - * argument: (value). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {...Array} [arrays] The arrays to inspect. - * @param {Function} [iteratee=_.identity] The iteratee invoked per element. - * @returns {Array} Returns the new array of filtered values. - * @example - * - * _.xorBy([2.1, 1.2], [2.3, 3.4], Math.floor); - * // => [1.2, 3.4] - * - * // The `_.property` iteratee shorthand. - * _.xorBy([{ 'x': 1 }], [{ 'x': 2 }, { 'x': 1 }], 'x'); - * // => [{ 'x': 2 }] - */ - var xorBy = baseRest(function(arrays) { - var iteratee = last(arrays); - if (isArrayLikeObject(iteratee)) { - iteratee = undefined; - } - return baseXor(arrayFilter(arrays, isArrayLikeObject), getIteratee(iteratee, 2)); - }); - - /** - * This method is like `_.xor` except that it accepts `comparator` which is - * invoked to compare elements of `arrays`. The order of result values is - * determined by the order they occur in the arrays. The comparator is invoked - * with two arguments: (arrVal, othVal). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {...Array} [arrays] The arrays to inspect. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns the new array of filtered values. - * @example - * - * var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }]; - * var others = [{ 'x': 1, 'y': 1 }, { 'x': 1, 'y': 2 }]; - * - * _.xorWith(objects, others, _.isEqual); - * // => [{ 'x': 2, 'y': 1 }, { 'x': 1, 'y': 1 }] - */ - var xorWith = baseRest(function(arrays) { - var comparator = last(arrays); - comparator = typeof comparator == 'function' ? comparator : undefined; - return baseXor(arrayFilter(arrays, isArrayLikeObject), undefined, comparator); - }); - - /** - * Creates an array of grouped elements, the first of which contains the - * first elements of the given arrays, the second of which contains the - * second elements of the given arrays, and so on. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {...Array} [arrays] The arrays to process. - * @returns {Array} Returns the new array of grouped elements. - * @example - * - * _.zip(['a', 'b'], [1, 2], [true, false]); - * // => [['a', 1, true], ['b', 2, false]] - */ - var zip = baseRest(unzip); - - /** - * This method is like `_.fromPairs` except that it accepts two arrays, - * one of property identifiers and one of corresponding values. - * - * @static - * @memberOf _ - * @since 0.4.0 - * @category Array - * @param {Array} [props=[]] The property identifiers. - * @param {Array} [values=[]] The property values. - * @returns {Object} Returns the new object. - * @example - * - * _.zipObject(['a', 'b'], [1, 2]); - * // => { 'a': 1, 'b': 2 } - */ - function zipObject(props, values) { - return baseZipObject(props || [], values || [], assignValue); - } - - /** - * This method is like `_.zipObject` except that it supports property paths. - * - * @static - * @memberOf _ - * @since 4.1.0 - * @category Array - * @param {Array} [props=[]] The property identifiers. - * @param {Array} [values=[]] The property values. - * @returns {Object} Returns the new object. - * @example - * - * _.zipObjectDeep(['a.b[0].c', 'a.b[1].d'], [1, 2]); - * // => { 'a': { 'b': [{ 'c': 1 }, { 'd': 2 }] } } - */ - function zipObjectDeep(props, values) { - return baseZipObject(props || [], values || [], baseSet); - } - - /** - * This method is like `_.zip` except that it accepts `iteratee` to specify - * how grouped values should be combined. The iteratee is invoked with the - * elements of each group: (...group). - * - * @static - * @memberOf _ - * @since 3.8.0 - * @category Array - * @param {...Array} [arrays] The arrays to process. - * @param {Function} [iteratee=_.identity] The function to combine - * grouped values. - * @returns {Array} Returns the new array of grouped elements. - * @example - * - * _.zipWith([1, 2], [10, 20], [100, 200], function(a, b, c) { - * return a + b + c; - * }); - * // => [111, 222] - */ - var zipWith = baseRest(function(arrays) { - var length = arrays.length, - iteratee = length > 1 ? arrays[length - 1] : undefined; - - iteratee = typeof iteratee == 'function' ? (arrays.pop(), iteratee) : undefined; - return unzipWith(arrays, iteratee); - }); - - /*------------------------------------------------------------------------*/ - - /** - * Creates a `lodash` wrapper instance that wraps `value` with explicit method - * chain sequences enabled. The result of such sequences must be unwrapped - * with `_#value`. - * - * @static - * @memberOf _ - * @since 1.3.0 - * @category Seq - * @param {*} value The value to wrap. - * @returns {Object} Returns the new `lodash` wrapper instance. - * @example - * - * var users = [ - * { 'user': 'barney', 'age': 36 }, - * { 'user': 'fred', 'age': 40 }, - * { 'user': 'pebbles', 'age': 1 } - * ]; - * - * var youngest = _ - * .chain(users) - * .sortBy('age') - * .map(function(o) { - * return o.user + ' is ' + o.age; - * }) - * .head() - * .value(); - * // => 'pebbles is 1' - */ - function chain(value) { - var result = lodash(value); - result.__chain__ = true; - return result; - } - - /** - * This method invokes `interceptor` and returns `value`. The interceptor - * is invoked with one argument; (value). The purpose of this method is to - * "tap into" a method chain sequence in order to modify intermediate results. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Seq - * @param {*} value The value to provide to `interceptor`. - * @param {Function} interceptor The function to invoke. - * @returns {*} Returns `value`. - * @example - * - * _([1, 2, 3]) - * .tap(function(array) { - * // Mutate input array. - * array.pop(); - * }) - * .reverse() - * .value(); - * // => [2, 1] - */ - function tap(value, interceptor) { - interceptor(value); - return value; - } - - /** - * This method is like `_.tap` except that it returns the result of `interceptor`. - * The purpose of this method is to "pass thru" values replacing intermediate - * results in a method chain sequence. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Seq - * @param {*} value The value to provide to `interceptor`. - * @param {Function} interceptor The function to invoke. - * @returns {*} Returns the result of `interceptor`. - * @example - * - * _(' abc ') - * .chain() - * .trim() - * .thru(function(value) { - * return [value]; - * }) - * .value(); - * // => ['abc'] - */ - function thru(value, interceptor) { - return interceptor(value); - } - - /** - * This method is the wrapper version of `_.at`. - * - * @name at - * @memberOf _ - * @since 1.0.0 - * @category Seq - * @param {...(string|string[])} [paths] The property paths to pick. - * @returns {Object} Returns the new `lodash` wrapper instance. - * @example - * - * var object = { 'a': [{ 'b': { 'c': 3 } }, 4] }; - * - * _(object).at(['a[0].b.c', 'a[1]']).value(); - * // => [3, 4] - */ - var wrapperAt = flatRest(function(paths) { - var length = paths.length, - start = length ? paths[0] : 0, - value = this.__wrapped__, - interceptor = function(object) { return baseAt(object, paths); }; - - if (length > 1 || this.__actions__.length || - !(value instanceof LazyWrapper) || !isIndex(start)) { - return this.thru(interceptor); - } - value = value.slice(start, +start + (length ? 1 : 0)); - value.__actions__.push({ - 'func': thru, - 'args': [interceptor], - 'thisArg': undefined - }); - return new LodashWrapper(value, this.__chain__).thru(function(array) { - if (length && !array.length) { - array.push(undefined); - } - return array; - }); - }); - - /** - * Creates a `lodash` wrapper instance with explicit method chain sequences enabled. - * - * @name chain - * @memberOf _ - * @since 0.1.0 - * @category Seq - * @returns {Object} Returns the new `lodash` wrapper instance. - * @example - * - * var users = [ - * { 'user': 'barney', 'age': 36 }, - * { 'user': 'fred', 'age': 40 } - * ]; - * - * // A sequence without explicit chaining. - * _(users).head(); - * // => { 'user': 'barney', 'age': 36 } - * - * // A sequence with explicit chaining. - * _(users) - * .chain() - * .head() - * .pick('user') - * .value(); - * // => { 'user': 'barney' } - */ - function wrapperChain() { - return chain(this); - } - - /** - * Executes the chain sequence and returns the wrapped result. - * - * @name commit - * @memberOf _ - * @since 3.2.0 - * @category Seq - * @returns {Object} Returns the new `lodash` wrapper instance. - * @example - * - * var array = [1, 2]; - * var wrapped = _(array).push(3); - * - * console.log(array); - * // => [1, 2] - * - * wrapped = wrapped.commit(); - * console.log(array); - * // => [1, 2, 3] - * - * wrapped.last(); - * // => 3 - * - * console.log(array); - * // => [1, 2, 3] - */ - function wrapperCommit() { - return new LodashWrapper(this.value(), this.__chain__); - } - - /** - * Gets the next value on a wrapped object following the - * [iterator protocol](https://mdn.io/iteration_protocols#iterator). - * - * @name next - * @memberOf _ - * @since 4.0.0 - * @category Seq - * @returns {Object} Returns the next iterator value. - * @example - * - * var wrapped = _([1, 2]); - * - * wrapped.next(); - * // => { 'done': false, 'value': 1 } - * - * wrapped.next(); - * // => { 'done': false, 'value': 2 } - * - * wrapped.next(); - * // => { 'done': true, 'value': undefined } - */ - function wrapperNext() { - if (this.__values__ === undefined) { - this.__values__ = toArray(this.value()); - } - var done = this.__index__ >= this.__values__.length, - value = done ? undefined : this.__values__[this.__index__++]; - - return { 'done': done, 'value': value }; - } - - /** - * Enables the wrapper to be iterable. - * - * @name Symbol.iterator - * @memberOf _ - * @since 4.0.0 - * @category Seq - * @returns {Object} Returns the wrapper object. - * @example - * - * var wrapped = _([1, 2]); - * - * wrapped[Symbol.iterator]() === wrapped; - * // => true - * - * Array.from(wrapped); - * // => [1, 2] - */ - function wrapperToIterator() { - return this; - } - - /** - * Creates a clone of the chain sequence planting `value` as the wrapped value. - * - * @name plant - * @memberOf _ - * @since 3.2.0 - * @category Seq - * @param {*} value The value to plant. - * @returns {Object} Returns the new `lodash` wrapper instance. - * @example - * - * function square(n) { - * return n * n; - * } - * - * var wrapped = _([1, 2]).map(square); - * var other = wrapped.plant([3, 4]); - * - * other.value(); - * // => [9, 16] - * - * wrapped.value(); - * // => [1, 4] - */ - function wrapperPlant(value) { - var result, - parent = this; - - while (parent instanceof baseLodash) { - var clone = wrapperClone(parent); - clone.__index__ = 0; - clone.__values__ = undefined; - if (result) { - previous.__wrapped__ = clone; - } else { - result = clone; - } - var previous = clone; - parent = parent.__wrapped__; - } - previous.__wrapped__ = value; - return result; - } - - /** - * This method is the wrapper version of `_.reverse`. - * - * **Note:** This method mutates the wrapped array. - * - * @name reverse - * @memberOf _ - * @since 0.1.0 - * @category Seq - * @returns {Object} Returns the new `lodash` wrapper instance. - * @example - * - * var array = [1, 2, 3]; - * - * _(array).reverse().value() - * // => [3, 2, 1] - * - * console.log(array); - * // => [3, 2, 1] - */ - function wrapperReverse() { - var value = this.__wrapped__; - if (value instanceof LazyWrapper) { - var wrapped = value; - if (this.__actions__.length) { - wrapped = new LazyWrapper(this); - } - wrapped = wrapped.reverse(); - wrapped.__actions__.push({ - 'func': thru, - 'args': [reverse], - 'thisArg': undefined - }); - return new LodashWrapper(wrapped, this.__chain__); - } - return this.thru(reverse); - } - - /** - * Executes the chain sequence to resolve the unwrapped value. - * - * @name value - * @memberOf _ - * @since 0.1.0 - * @alias toJSON, valueOf - * @category Seq - * @returns {*} Returns the resolved unwrapped value. - * @example - * - * _([1, 2, 3]).value(); - * // => [1, 2, 3] - */ - function wrapperValue() { - return baseWrapperValue(this.__wrapped__, this.__actions__); - } - - /*------------------------------------------------------------------------*/ - - /** - * Creates an object composed of keys generated from the results of running - * each element of `collection` thru `iteratee`. The corresponding value of - * each key is the number of times the key was returned by `iteratee`. The - * iteratee is invoked with one argument: (value). - * - * @static - * @memberOf _ - * @since 0.5.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The iteratee to transform keys. - * @returns {Object} Returns the composed aggregate object. - * @example - * - * _.countBy([6.1, 4.2, 6.3], Math.floor); - * // => { '4': 1, '6': 2 } - * - * // The `_.property` iteratee shorthand. - * _.countBy(['one', 'two', 'three'], 'length'); - * // => { '3': 2, '5': 1 } - */ - var countBy = createAggregator(function(result, value, key) { - if (hasOwnProperty.call(result, key)) { - ++result[key]; - } else { - baseAssignValue(result, key, 1); - } - }); - - /** - * Checks if `predicate` returns truthy for **all** elements of `collection`. - * Iteration is stopped once `predicate` returns falsey. The predicate is - * invoked with three arguments: (value, index|key, collection). - * - * **Note:** This method returns `true` for - * [empty collections](https://en.wikipedia.org/wiki/Empty_set) because - * [everything is true](https://en.wikipedia.org/wiki/Vacuous_truth) of - * elements of empty collections. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {boolean} Returns `true` if all elements pass the predicate check, - * else `false`. - * @example - * - * _.every([true, 1, null, 'yes'], Boolean); - * // => false - * - * var users = [ - * { 'user': 'barney', 'age': 36, 'active': false }, - * { 'user': 'fred', 'age': 40, 'active': false } - * ]; - * - * // The `_.matches` iteratee shorthand. - * _.every(users, { 'user': 'barney', 'active': false }); - * // => false - * - * // The `_.matchesProperty` iteratee shorthand. - * _.every(users, ['active', false]); - * // => true - * - * // The `_.property` iteratee shorthand. - * _.every(users, 'active'); - * // => false - */ - function every(collection, predicate, guard) { - var func = isArray(collection) ? arrayEvery : baseEvery; - if (guard && isIterateeCall(collection, predicate, guard)) { - predicate = undefined; - } - return func(collection, getIteratee(predicate, 3)); - } - - /** - * Iterates over elements of `collection`, returning an array of all elements - * `predicate` returns truthy for. The predicate is invoked with three - * arguments: (value, index|key, collection). - * - * **Note:** Unlike `_.remove`, this method returns a new array. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @returns {Array} Returns the new filtered array. - * @see _.reject - * @example - * - * var users = [ - * { 'user': 'barney', 'age': 36, 'active': true }, - * { 'user': 'fred', 'age': 40, 'active': false } - * ]; - * - * _.filter(users, function(o) { return !o.active; }); - * // => objects for ['fred'] - * - * // The `_.matches` iteratee shorthand. - * _.filter(users, { 'age': 36, 'active': true }); - * // => objects for ['barney'] - * - * // The `_.matchesProperty` iteratee shorthand. - * _.filter(users, ['active', false]); - * // => objects for ['fred'] - * - * // The `_.property` iteratee shorthand. - * _.filter(users, 'active'); - * // => objects for ['barney'] - */ - function filter(collection, predicate) { - var func = isArray(collection) ? arrayFilter : baseFilter; - return func(collection, getIteratee(predicate, 3)); - } - - /** - * Iterates over elements of `collection`, returning the first element - * `predicate` returns truthy for. The predicate is invoked with three - * arguments: (value, index|key, collection). - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to inspect. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @param {number} [fromIndex=0] The index to search from. - * @returns {*} Returns the matched element, else `undefined`. - * @example - * - * var users = [ - * { 'user': 'barney', 'age': 36, 'active': true }, - * { 'user': 'fred', 'age': 40, 'active': false }, - * { 'user': 'pebbles', 'age': 1, 'active': true } - * ]; - * - * _.find(users, function(o) { return o.age < 40; }); - * // => object for 'barney' - * - * // The `_.matches` iteratee shorthand. - * _.find(users, { 'age': 1, 'active': true }); - * // => object for 'pebbles' - * - * // The `_.matchesProperty` iteratee shorthand. - * _.find(users, ['active', false]); - * // => object for 'fred' - * - * // The `_.property` iteratee shorthand. - * _.find(users, 'active'); - * // => object for 'barney' - */ - var find = createFind(findIndex); - - /** - * This method is like `_.find` except that it iterates over elements of - * `collection` from right to left. - * - * @static - * @memberOf _ - * @since 2.0.0 - * @category Collection - * @param {Array|Object} collection The collection to inspect. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @param {number} [fromIndex=collection.length-1] The index to search from. - * @returns {*} Returns the matched element, else `undefined`. - * @example - * - * _.findLast([1, 2, 3, 4], function(n) { - * return n % 2 == 1; - * }); - * // => 3 - */ - var findLast = createFind(findLastIndex); - - /** - * Creates a flattened array of values by running each element in `collection` - * thru `iteratee` and flattening the mapped results. The iteratee is invoked - * with three arguments: (value, index|key, collection). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Array} Returns the new flattened array. - * @example - * - * function duplicate(n) { - * return [n, n]; - * } - * - * _.flatMap([1, 2], duplicate); - * // => [1, 1, 2, 2] - */ - function flatMap(collection, iteratee) { - return baseFlatten(map(collection, iteratee), 1); - } - - /** - * This method is like `_.flatMap` except that it recursively flattens the - * mapped results. - * - * @static - * @memberOf _ - * @since 4.7.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Array} Returns the new flattened array. - * @example - * - * function duplicate(n) { - * return [[[n, n]]]; - * } - * - * _.flatMapDeep([1, 2], duplicate); - * // => [1, 1, 2, 2] - */ - function flatMapDeep(collection, iteratee) { - return baseFlatten(map(collection, iteratee), INFINITY); - } - - /** - * This method is like `_.flatMap` except that it recursively flattens the - * mapped results up to `depth` times. - * - * @static - * @memberOf _ - * @since 4.7.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @param {number} [depth=1] The maximum recursion depth. - * @returns {Array} Returns the new flattened array. - * @example - * - * function duplicate(n) { - * return [[[n, n]]]; - * } - * - * _.flatMapDepth([1, 2], duplicate, 2); - * // => [[1, 1], [2, 2]] - */ - function flatMapDepth(collection, iteratee, depth) { - depth = depth === undefined ? 1 : toInteger(depth); - return baseFlatten(map(collection, iteratee), depth); - } - - /** - * Iterates over elements of `collection` and invokes `iteratee` for each element. - * The iteratee is invoked with three arguments: (value, index|key, collection). - * Iteratee functions may exit iteration early by explicitly returning `false`. - * - * **Note:** As with other "Collections" methods, objects with a "length" - * property are iterated like arrays. To avoid this behavior use `_.forIn` - * or `_.forOwn` for object iteration. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @alias each - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Array|Object} Returns `collection`. - * @see _.forEachRight - * @example - * - * _.forEach([1, 2], function(value) { - * console.log(value); - * }); - * // => Logs `1` then `2`. - * - * _.forEach({ 'a': 1, 'b': 2 }, function(value, key) { - * console.log(key); - * }); - * // => Logs 'a' then 'b' (iteration order is not guaranteed). - */ - function forEach(collection, iteratee) { - var func = isArray(collection) ? arrayEach : baseEach; - return func(collection, getIteratee(iteratee, 3)); - } - - /** - * This method is like `_.forEach` except that it iterates over elements of - * `collection` from right to left. - * - * @static - * @memberOf _ - * @since 2.0.0 - * @alias eachRight - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Array|Object} Returns `collection`. - * @see _.forEach - * @example - * - * _.forEachRight([1, 2], function(value) { - * console.log(value); - * }); - * // => Logs `2` then `1`. - */ - function forEachRight(collection, iteratee) { - var func = isArray(collection) ? arrayEachRight : baseEachRight; - return func(collection, getIteratee(iteratee, 3)); - } - - /** - * Creates an object composed of keys generated from the results of running - * each element of `collection` thru `iteratee`. The order of grouped values - * is determined by the order they occur in `collection`. The corresponding - * value of each key is an array of elements responsible for generating the - * key. The iteratee is invoked with one argument: (value). - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The iteratee to transform keys. - * @returns {Object} Returns the composed aggregate object. - * @example - * - * _.groupBy([6.1, 4.2, 6.3], Math.floor); - * // => { '4': [4.2], '6': [6.1, 6.3] } - * - * // The `_.property` iteratee shorthand. - * _.groupBy(['one', 'two', 'three'], 'length'); - * // => { '3': ['one', 'two'], '5': ['three'] } - */ - var groupBy = createAggregator(function(result, value, key) { - if (hasOwnProperty.call(result, key)) { - result[key].push(value); - } else { - baseAssignValue(result, key, [value]); - } - }); - - /** - * Checks if `value` is in `collection`. If `collection` is a string, it's - * checked for a substring of `value`, otherwise - * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * is used for equality comparisons. If `fromIndex` is negative, it's used as - * the offset from the end of `collection`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object|string} collection The collection to inspect. - * @param {*} value The value to search for. - * @param {number} [fromIndex=0] The index to search from. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.reduce`. - * @returns {boolean} Returns `true` if `value` is found, else `false`. - * @example - * - * _.includes([1, 2, 3], 1); - * // => true - * - * _.includes([1, 2, 3], 1, 2); - * // => false - * - * _.includes({ 'a': 1, 'b': 2 }, 1); - * // => true - * - * _.includes('abcd', 'bc'); - * // => true - */ - function includes(collection, value, fromIndex, guard) { - collection = isArrayLike(collection) ? collection : values(collection); - fromIndex = (fromIndex && !guard) ? toInteger(fromIndex) : 0; - - var length = collection.length; - if (fromIndex < 0) { - fromIndex = nativeMax(length + fromIndex, 0); - } - return isString(collection) - ? (fromIndex <= length && collection.indexOf(value, fromIndex) > -1) - : (!!length && baseIndexOf(collection, value, fromIndex) > -1); - } - - /** - * Invokes the method at `path` of each element in `collection`, returning - * an array of the results of each invoked method. Any additional arguments - * are provided to each invoked method. If `path` is a function, it's invoked - * for, and `this` bound to, each element in `collection`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Array|Function|string} path The path of the method to invoke or - * the function invoked per iteration. - * @param {...*} [args] The arguments to invoke each method with. - * @returns {Array} Returns the array of results. - * @example - * - * _.invokeMap([[5, 1, 7], [3, 2, 1]], 'sort'); - * // => [[1, 5, 7], [1, 2, 3]] - * - * _.invokeMap([123, 456], String.prototype.split, ''); - * // => [['1', '2', '3'], ['4', '5', '6']] - */ - var invokeMap = baseRest(function(collection, path, args) { - var index = -1, - isFunc = typeof path == 'function', - result = isArrayLike(collection) ? Array(collection.length) : []; - - baseEach(collection, function(value) { - result[++index] = isFunc ? apply(path, value, args) : baseInvoke(value, path, args); - }); - return result; - }); - - /** - * Creates an object composed of keys generated from the results of running - * each element of `collection` thru `iteratee`. The corresponding value of - * each key is the last element responsible for generating the key. The - * iteratee is invoked with one argument: (value). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The iteratee to transform keys. - * @returns {Object} Returns the composed aggregate object. - * @example - * - * var array = [ - * { 'dir': 'left', 'code': 97 }, - * { 'dir': 'right', 'code': 100 } - * ]; - * - * _.keyBy(array, function(o) { - * return String.fromCharCode(o.code); - * }); - * // => { 'a': { 'dir': 'left', 'code': 97 }, 'd': { 'dir': 'right', 'code': 100 } } - * - * _.keyBy(array, 'dir'); - * // => { 'left': { 'dir': 'left', 'code': 97 }, 'right': { 'dir': 'right', 'code': 100 } } - */ - var keyBy = createAggregator(function(result, value, key) { - baseAssignValue(result, key, value); - }); - - /** - * Creates an array of values by running each element in `collection` thru - * `iteratee`. The iteratee is invoked with three arguments: - * (value, index|key, collection). - * - * Many lodash methods are guarded to work as iteratees for methods like - * `_.every`, `_.filter`, `_.map`, `_.mapValues`, `_.reject`, and `_.some`. - * - * The guarded methods are: - * `ary`, `chunk`, `curry`, `curryRight`, `drop`, `dropRight`, `every`, - * `fill`, `invert`, `parseInt`, `random`, `range`, `rangeRight`, `repeat`, - * `sampleSize`, `slice`, `some`, `sortBy`, `split`, `take`, `takeRight`, - * `template`, `trim`, `trimEnd`, `trimStart`, and `words` - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Array} Returns the new mapped array. - * @example - * - * function square(n) { - * return n * n; - * } - * - * _.map([4, 8], square); - * // => [16, 64] - * - * _.map({ 'a': 4, 'b': 8 }, square); - * // => [16, 64] (iteration order is not guaranteed) - * - * var users = [ - * { 'user': 'barney' }, - * { 'user': 'fred' } - * ]; - * - * // The `_.property` iteratee shorthand. - * _.map(users, 'user'); - * // => ['barney', 'fred'] - */ - function map(collection, iteratee) { - var func = isArray(collection) ? arrayMap : baseMap; - return func(collection, getIteratee(iteratee, 3)); - } - - /** - * This method is like `_.sortBy` except that it allows specifying the sort - * orders of the iteratees to sort by. If `orders` is unspecified, all values - * are sorted in ascending order. Otherwise, specify an order of "desc" for - * descending or "asc" for ascending sort order of corresponding values. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Array[]|Function[]|Object[]|string[]} [iteratees=[_.identity]] - * The iteratees to sort by. - * @param {string[]} [orders] The sort orders of `iteratees`. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.reduce`. - * @returns {Array} Returns the new sorted array. - * @example - * - * var users = [ - * { 'user': 'fred', 'age': 48 }, - * { 'user': 'barney', 'age': 34 }, - * { 'user': 'fred', 'age': 40 }, - * { 'user': 'barney', 'age': 36 } - * ]; - * - * // Sort by `user` in ascending order and by `age` in descending order. - * _.orderBy(users, ['user', 'age'], ['asc', 'desc']); - * // => objects for [['barney', 36], ['barney', 34], ['fred', 48], ['fred', 40]] - */ - function orderBy(collection, iteratees, orders, guard) { - if (collection == null) { - return []; - } - if (!isArray(iteratees)) { - iteratees = iteratees == null ? [] : [iteratees]; - } - orders = guard ? undefined : orders; - if (!isArray(orders)) { - orders = orders == null ? [] : [orders]; - } - return baseOrderBy(collection, iteratees, orders); - } - - /** - * Creates an array of elements split into two groups, the first of which - * contains elements `predicate` returns truthy for, the second of which - * contains elements `predicate` returns falsey for. The predicate is - * invoked with one argument: (value). - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @returns {Array} Returns the array of grouped elements. - * @example - * - * var users = [ - * { 'user': 'barney', 'age': 36, 'active': false }, - * { 'user': 'fred', 'age': 40, 'active': true }, - * { 'user': 'pebbles', 'age': 1, 'active': false } - * ]; - * - * _.partition(users, function(o) { return o.active; }); - * // => objects for [['fred'], ['barney', 'pebbles']] - * - * // The `_.matches` iteratee shorthand. - * _.partition(users, { 'age': 1, 'active': false }); - * // => objects for [['pebbles'], ['barney', 'fred']] - * - * // The `_.matchesProperty` iteratee shorthand. - * _.partition(users, ['active', false]); - * // => objects for [['barney', 'pebbles'], ['fred']] - * - * // The `_.property` iteratee shorthand. - * _.partition(users, 'active'); - * // => objects for [['fred'], ['barney', 'pebbles']] - */ - var partition = createAggregator(function(result, value, key) { - result[key ? 0 : 1].push(value); - }, function() { return [[], []]; }); - - /** - * Reduces `collection` to a value which is the accumulated result of running - * each element in `collection` thru `iteratee`, where each successive - * invocation is supplied the return value of the previous. If `accumulator` - * is not given, the first element of `collection` is used as the initial - * value. The iteratee is invoked with four arguments: - * (accumulator, value, index|key, collection). - * - * Many lodash methods are guarded to work as iteratees for methods like - * `_.reduce`, `_.reduceRight`, and `_.transform`. - * - * The guarded methods are: - * `assign`, `defaults`, `defaultsDeep`, `includes`, `merge`, `orderBy`, - * and `sortBy` - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @param {*} [accumulator] The initial value. - * @returns {*} Returns the accumulated value. - * @see _.reduceRight - * @example - * - * _.reduce([1, 2], function(sum, n) { - * return sum + n; - * }, 0); - * // => 3 - * - * _.reduce({ 'a': 1, 'b': 2, 'c': 1 }, function(result, value, key) { - * (result[value] || (result[value] = [])).push(key); - * return result; - * }, {}); - * // => { '1': ['a', 'c'], '2': ['b'] } (iteration order is not guaranteed) - */ - function reduce(collection, iteratee, accumulator) { - var func = isArray(collection) ? arrayReduce : baseReduce, - initAccum = arguments.length < 3; - - return func(collection, getIteratee(iteratee, 4), accumulator, initAccum, baseEach); - } - - /** - * This method is like `_.reduce` except that it iterates over elements of - * `collection` from right to left. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @param {*} [accumulator] The initial value. - * @returns {*} Returns the accumulated value. - * @see _.reduce - * @example - * - * var array = [[0, 1], [2, 3], [4, 5]]; - * - * _.reduceRight(array, function(flattened, other) { - * return flattened.concat(other); - * }, []); - * // => [4, 5, 2, 3, 0, 1] - */ - function reduceRight(collection, iteratee, accumulator) { - var func = isArray(collection) ? arrayReduceRight : baseReduce, - initAccum = arguments.length < 3; - - return func(collection, getIteratee(iteratee, 4), accumulator, initAccum, baseEachRight); - } - - /** - * The opposite of `_.filter`; this method returns the elements of `collection` - * that `predicate` does **not** return truthy for. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @returns {Array} Returns the new filtered array. - * @see _.filter - * @example - * - * var users = [ - * { 'user': 'barney', 'age': 36, 'active': false }, - * { 'user': 'fred', 'age': 40, 'active': true } - * ]; - * - * _.reject(users, function(o) { return !o.active; }); - * // => objects for ['fred'] - * - * // The `_.matches` iteratee shorthand. - * _.reject(users, { 'age': 40, 'active': true }); - * // => objects for ['barney'] - * - * // The `_.matchesProperty` iteratee shorthand. - * _.reject(users, ['active', false]); - * // => objects for ['fred'] - * - * // The `_.property` iteratee shorthand. - * _.reject(users, 'active'); - * // => objects for ['barney'] - */ - function reject(collection, predicate) { - var func = isArray(collection) ? arrayFilter : baseFilter; - return func(collection, negate(getIteratee(predicate, 3))); - } - - /** - * Gets a random element from `collection`. - * - * @static - * @memberOf _ - * @since 2.0.0 - * @category Collection - * @param {Array|Object} collection The collection to sample. - * @returns {*} Returns the random element. - * @example - * - * _.sample([1, 2, 3, 4]); - * // => 2 - */ - function sample(collection) { - var func = isArray(collection) ? arraySample : baseSample; - return func(collection); - } - - /** - * Gets `n` random elements at unique keys from `collection` up to the - * size of `collection`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Collection - * @param {Array|Object} collection The collection to sample. - * @param {number} [n=1] The number of elements to sample. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {Array} Returns the random elements. - * @example - * - * _.sampleSize([1, 2, 3], 2); - * // => [3, 1] - * - * _.sampleSize([1, 2, 3], 4); - * // => [2, 3, 1] - */ - function sampleSize(collection, n, guard) { - if ((guard ? isIterateeCall(collection, n, guard) : n === undefined)) { - n = 1; - } else { - n = toInteger(n); - } - var func = isArray(collection) ? arraySampleSize : baseSampleSize; - return func(collection, n); - } - - /** - * Creates an array of shuffled values, using a version of the - * [Fisher-Yates shuffle](https://en.wikipedia.org/wiki/Fisher-Yates_shuffle). - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to shuffle. - * @returns {Array} Returns the new shuffled array. - * @example - * - * _.shuffle([1, 2, 3, 4]); - * // => [4, 1, 3, 2] - */ - function shuffle(collection) { - var func = isArray(collection) ? arrayShuffle : baseShuffle; - return func(collection); - } - - /** - * Gets the size of `collection` by returning its length for array-like - * values or the number of own enumerable string keyed properties for objects. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object|string} collection The collection to inspect. - * @returns {number} Returns the collection size. - * @example - * - * _.size([1, 2, 3]); - * // => 3 - * - * _.size({ 'a': 1, 'b': 2 }); - * // => 2 - * - * _.size('pebbles'); - * // => 7 - */ - function size(collection) { - if (collection == null) { - return 0; - } - if (isArrayLike(collection)) { - return isString(collection) ? stringSize(collection) : collection.length; - } - var tag = getTag(collection); - if (tag == mapTag || tag == setTag) { - return collection.size; - } - return baseKeys(collection).length; - } - - /** - * Checks if `predicate` returns truthy for **any** element of `collection`. - * Iteration is stopped once `predicate` returns truthy. The predicate is - * invoked with three arguments: (value, index|key, collection). - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {boolean} Returns `true` if any element passes the predicate check, - * else `false`. - * @example - * - * _.some([null, 0, 'yes', false], Boolean); - * // => true - * - * var users = [ - * { 'user': 'barney', 'active': true }, - * { 'user': 'fred', 'active': false } - * ]; - * - * // The `_.matches` iteratee shorthand. - * _.some(users, { 'user': 'barney', 'active': false }); - * // => false - * - * // The `_.matchesProperty` iteratee shorthand. - * _.some(users, ['active', false]); - * // => true - * - * // The `_.property` iteratee shorthand. - * _.some(users, 'active'); - * // => true - */ - function some(collection, predicate, guard) { - var func = isArray(collection) ? arraySome : baseSome; - if (guard && isIterateeCall(collection, predicate, guard)) { - predicate = undefined; - } - return func(collection, getIteratee(predicate, 3)); - } - - /** - * Creates an array of elements, sorted in ascending order by the results of - * running each element in a collection thru each iteratee. This method - * performs a stable sort, that is, it preserves the original sort order of - * equal elements. The iteratees are invoked with one argument: (value). - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {...(Function|Function[])} [iteratees=[_.identity]] - * The iteratees to sort by. - * @returns {Array} Returns the new sorted array. - * @example - * - * var users = [ - * { 'user': 'fred', 'age': 48 }, - * { 'user': 'barney', 'age': 36 }, - * { 'user': 'fred', 'age': 40 }, - * { 'user': 'barney', 'age': 34 } - * ]; - * - * _.sortBy(users, [function(o) { return o.user; }]); - * // => objects for [['barney', 36], ['barney', 34], ['fred', 48], ['fred', 40]] - * - * _.sortBy(users, ['user', 'age']); - * // => objects for [['barney', 34], ['barney', 36], ['fred', 40], ['fred', 48]] - */ - var sortBy = baseRest(function(collection, iteratees) { - if (collection == null) { - return []; - } - var length = iteratees.length; - if (length > 1 && isIterateeCall(collection, iteratees[0], iteratees[1])) { - iteratees = []; - } else if (length > 2 && isIterateeCall(iteratees[0], iteratees[1], iteratees[2])) { - iteratees = [iteratees[0]]; - } - return baseOrderBy(collection, baseFlatten(iteratees, 1), []); - }); - - /*------------------------------------------------------------------------*/ - - /** - * Gets the timestamp of the number of milliseconds that have elapsed since - * the Unix epoch (1 January 1970 00:00:00 UTC). - * - * @static - * @memberOf _ - * @since 2.4.0 - * @category Date - * @returns {number} Returns the timestamp. - * @example - * - * _.defer(function(stamp) { - * console.log(_.now() - stamp); - * }, _.now()); - * // => Logs the number of milliseconds it took for the deferred invocation. - */ - var now = ctxNow || function() { - return root.Date.now(); - }; - - /*------------------------------------------------------------------------*/ - - /** - * The opposite of `_.before`; this method creates a function that invokes - * `func` once it's called `n` or more times. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Function - * @param {number} n The number of calls before `func` is invoked. - * @param {Function} func The function to restrict. - * @returns {Function} Returns the new restricted function. - * @example - * - * var saves = ['profile', 'settings']; - * - * var done = _.after(saves.length, function() { - * console.log('done saving!'); - * }); - * - * _.forEach(saves, function(type) { - * asyncSave({ 'type': type, 'complete': done }); - * }); - * // => Logs 'done saving!' after the two async saves have completed. - */ - function after(n, func) { - if (typeof func != 'function') { - throw new TypeError(FUNC_ERROR_TEXT); - } - n = toInteger(n); - return function() { - if (--n < 1) { - return func.apply(this, arguments); - } - }; - } - - /** - * Creates a function that invokes `func`, with up to `n` arguments, - * ignoring any additional arguments. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Function - * @param {Function} func The function to cap arguments for. - * @param {number} [n=func.length] The arity cap. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {Function} Returns the new capped function. - * @example - * - * _.map(['6', '8', '10'], _.ary(parseInt, 1)); - * // => [6, 8, 10] - */ - function ary(func, n, guard) { - n = guard ? undefined : n; - n = (func && n == null) ? func.length : n; - return createWrap(func, WRAP_ARY_FLAG, undefined, undefined, undefined, undefined, n); - } - - /** - * Creates a function that invokes `func`, with the `this` binding and arguments - * of the created function, while it's called less than `n` times. Subsequent - * calls to the created function return the result of the last `func` invocation. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Function - * @param {number} n The number of calls at which `func` is no longer invoked. - * @param {Function} func The function to restrict. - * @returns {Function} Returns the new restricted function. - * @example - * - * jQuery(element).on('click', _.before(5, addContactToList)); - * // => Allows adding up to 4 contacts to the list. - */ - function before(n, func) { - var result; - if (typeof func != 'function') { - throw new TypeError(FUNC_ERROR_TEXT); - } - n = toInteger(n); - return function() { - if (--n > 0) { - result = func.apply(this, arguments); - } - if (n <= 1) { - func = undefined; - } - return result; - }; - } - - /** - * Creates a function that invokes `func` with the `this` binding of `thisArg` - * and `partials` prepended to the arguments it receives. - * - * The `_.bind.placeholder` value, which defaults to `_` in monolithic builds, - * may be used as a placeholder for partially applied arguments. - * - * **Note:** Unlike native `Function#bind`, this method doesn't set the "length" - * property of bound functions. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Function - * @param {Function} func The function to bind. - * @param {*} thisArg The `this` binding of `func`. - * @param {...*} [partials] The arguments to be partially applied. - * @returns {Function} Returns the new bound function. - * @example - * - * function greet(greeting, punctuation) { - * return greeting + ' ' + this.user + punctuation; - * } - * - * var object = { 'user': 'fred' }; - * - * var bound = _.bind(greet, object, 'hi'); - * bound('!'); - * // => 'hi fred!' - * - * // Bound with placeholders. - * var bound = _.bind(greet, object, _, '!'); - * bound('hi'); - * // => 'hi fred!' - */ - var bind = baseRest(function(func, thisArg, partials) { - var bitmask = WRAP_BIND_FLAG; - if (partials.length) { - var holders = replaceHolders(partials, getHolder(bind)); - bitmask |= WRAP_PARTIAL_FLAG; - } - return createWrap(func, bitmask, thisArg, partials, holders); - }); - - /** - * Creates a function that invokes the method at `object[key]` with `partials` - * prepended to the arguments it receives. - * - * This method differs from `_.bind` by allowing bound functions to reference - * methods that may be redefined or don't yet exist. See - * [Peter Michaux's article](http://peter.michaux.ca/articles/lazy-function-definition-pattern) - * for more details. - * - * The `_.bindKey.placeholder` value, which defaults to `_` in monolithic - * builds, may be used as a placeholder for partially applied arguments. - * - * @static - * @memberOf _ - * @since 0.10.0 - * @category Function - * @param {Object} object The object to invoke the method on. - * @param {string} key The key of the method. - * @param {...*} [partials] The arguments to be partially applied. - * @returns {Function} Returns the new bound function. - * @example - * - * var object = { - * 'user': 'fred', - * 'greet': function(greeting, punctuation) { - * return greeting + ' ' + this.user + punctuation; - * } - * }; - * - * var bound = _.bindKey(object, 'greet', 'hi'); - * bound('!'); - * // => 'hi fred!' - * - * object.greet = function(greeting, punctuation) { - * return greeting + 'ya ' + this.user + punctuation; - * }; - * - * bound('!'); - * // => 'hiya fred!' - * - * // Bound with placeholders. - * var bound = _.bindKey(object, 'greet', _, '!'); - * bound('hi'); - * // => 'hiya fred!' - */ - var bindKey = baseRest(function(object, key, partials) { - var bitmask = WRAP_BIND_FLAG | WRAP_BIND_KEY_FLAG; - if (partials.length) { - var holders = replaceHolders(partials, getHolder(bindKey)); - bitmask |= WRAP_PARTIAL_FLAG; - } - return createWrap(key, bitmask, object, partials, holders); - }); - - /** - * Creates a function that accepts arguments of `func` and either invokes - * `func` returning its result, if at least `arity` number of arguments have - * been provided, or returns a function that accepts the remaining `func` - * arguments, and so on. The arity of `func` may be specified if `func.length` - * is not sufficient. - * - * The `_.curry.placeholder` value, which defaults to `_` in monolithic builds, - * may be used as a placeholder for provided arguments. - * - * **Note:** This method doesn't set the "length" property of curried functions. - * - * @static - * @memberOf _ - * @since 2.0.0 - * @category Function - * @param {Function} func The function to curry. - * @param {number} [arity=func.length] The arity of `func`. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {Function} Returns the new curried function. - * @example - * - * var abc = function(a, b, c) { - * return [a, b, c]; - * }; - * - * var curried = _.curry(abc); - * - * curried(1)(2)(3); - * // => [1, 2, 3] - * - * curried(1, 2)(3); - * // => [1, 2, 3] - * - * curried(1, 2, 3); - * // => [1, 2, 3] - * - * // Curried with placeholders. - * curried(1)(_, 3)(2); - * // => [1, 2, 3] - */ - function curry(func, arity, guard) { - arity = guard ? undefined : arity; - var result = createWrap(func, WRAP_CURRY_FLAG, undefined, undefined, undefined, undefined, undefined, arity); - result.placeholder = curry.placeholder; - return result; - } - - /** - * This method is like `_.curry` except that arguments are applied to `func` - * in the manner of `_.partialRight` instead of `_.partial`. - * - * The `_.curryRight.placeholder` value, which defaults to `_` in monolithic - * builds, may be used as a placeholder for provided arguments. - * - * **Note:** This method doesn't set the "length" property of curried functions. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Function - * @param {Function} func The function to curry. - * @param {number} [arity=func.length] The arity of `func`. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {Function} Returns the new curried function. - * @example - * - * var abc = function(a, b, c) { - * return [a, b, c]; - * }; - * - * var curried = _.curryRight(abc); - * - * curried(3)(2)(1); - * // => [1, 2, 3] - * - * curried(2, 3)(1); - * // => [1, 2, 3] - * - * curried(1, 2, 3); - * // => [1, 2, 3] - * - * // Curried with placeholders. - * curried(3)(1, _)(2); - * // => [1, 2, 3] - */ - function curryRight(func, arity, guard) { - arity = guard ? undefined : arity; - var result = createWrap(func, WRAP_CURRY_RIGHT_FLAG, undefined, undefined, undefined, undefined, undefined, arity); - result.placeholder = curryRight.placeholder; - return result; - } - - /** - * Creates a debounced function that delays invoking `func` until after `wait` - * milliseconds have elapsed since the last time the debounced function was - * invoked. The debounced function comes with a `cancel` method to cancel - * delayed `func` invocations and a `flush` method to immediately invoke them. - * Provide `options` to indicate whether `func` should be invoked on the - * leading and/or trailing edge of the `wait` timeout. The `func` is invoked - * with the last arguments provided to the debounced function. Subsequent - * calls to the debounced function return the result of the last `func` - * invocation. - * - * **Note:** If `leading` and `trailing` options are `true`, `func` is - * invoked on the trailing edge of the timeout only if the debounced function - * is invoked more than once during the `wait` timeout. - * - * If `wait` is `0` and `leading` is `false`, `func` invocation is deferred - * until to the next tick, similar to `setTimeout` with a timeout of `0`. - * - * See [David Corbacho's article](https://css-tricks.com/debouncing-throttling-explained-examples/) - * for details over the differences between `_.debounce` and `_.throttle`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Function - * @param {Function} func The function to debounce. - * @param {number} [wait=0] The number of milliseconds to delay. - * @param {Object} [options={}] The options object. - * @param {boolean} [options.leading=false] - * Specify invoking on the leading edge of the timeout. - * @param {number} [options.maxWait] - * The maximum time `func` is allowed to be delayed before it's invoked. - * @param {boolean} [options.trailing=true] - * Specify invoking on the trailing edge of the timeout. - * @returns {Function} Returns the new debounced function. - * @example - * - * // Avoid costly calculations while the window size is in flux. - * jQuery(window).on('resize', _.debounce(calculateLayout, 150)); - * - * // Invoke `sendMail` when clicked, debouncing subsequent calls. - * jQuery(element).on('click', _.debounce(sendMail, 300, { - * 'leading': true, - * 'trailing': false - * })); - * - * // Ensure `batchLog` is invoked once after 1 second of debounced calls. - * var debounced = _.debounce(batchLog, 250, { 'maxWait': 1000 }); - * var source = new EventSource('/stream'); - * jQuery(source).on('message', debounced); - * - * // Cancel the trailing debounced invocation. - * jQuery(window).on('popstate', debounced.cancel); - */ - function debounce(func, wait, options) { - var lastArgs, - lastThis, - maxWait, - result, - timerId, - lastCallTime, - lastInvokeTime = 0, - leading = false, - maxing = false, - trailing = true; - - if (typeof func != 'function') { - throw new TypeError(FUNC_ERROR_TEXT); - } - wait = toNumber(wait) || 0; - if (isObject(options)) { - leading = !!options.leading; - maxing = 'maxWait' in options; - maxWait = maxing ? nativeMax(toNumber(options.maxWait) || 0, wait) : maxWait; - trailing = 'trailing' in options ? !!options.trailing : trailing; - } - - function invokeFunc(time) { - var args = lastArgs, - thisArg = lastThis; - - lastArgs = lastThis = undefined; - lastInvokeTime = time; - result = func.apply(thisArg, args); - return result; - } - - function leadingEdge(time) { - // Reset any `maxWait` timer. - lastInvokeTime = time; - // Start the timer for the trailing edge. - timerId = setTimeout(timerExpired, wait); - // Invoke the leading edge. - return leading ? invokeFunc(time) : result; - } - - function remainingWait(time) { - var timeSinceLastCall = time - lastCallTime, - timeSinceLastInvoke = time - lastInvokeTime, - timeWaiting = wait - timeSinceLastCall; - - return maxing - ? nativeMin(timeWaiting, maxWait - timeSinceLastInvoke) - : timeWaiting; - } - - function shouldInvoke(time) { - var timeSinceLastCall = time - lastCallTime, - timeSinceLastInvoke = time - lastInvokeTime; - - // Either this is the first call, activity has stopped and we're at the - // trailing edge, the system time has gone backwards and we're treating - // it as the trailing edge, or we've hit the `maxWait` limit. - return (lastCallTime === undefined || (timeSinceLastCall >= wait) || - (timeSinceLastCall < 0) || (maxing && timeSinceLastInvoke >= maxWait)); - } - - function timerExpired() { - var time = now(); - if (shouldInvoke(time)) { - return trailingEdge(time); - } - // Restart the timer. - timerId = setTimeout(timerExpired, remainingWait(time)); - } - - function trailingEdge(time) { - timerId = undefined; - - // Only invoke if we have `lastArgs` which means `func` has been - // debounced at least once. - if (trailing && lastArgs) { - return invokeFunc(time); - } - lastArgs = lastThis = undefined; - return result; - } - - function cancel() { - if (timerId !== undefined) { - clearTimeout(timerId); - } - lastInvokeTime = 0; - lastArgs = lastCallTime = lastThis = timerId = undefined; - } - - function flush() { - return timerId === undefined ? result : trailingEdge(now()); - } - - function debounced() { - var time = now(), - isInvoking = shouldInvoke(time); - - lastArgs = arguments; - lastThis = this; - lastCallTime = time; - - if (isInvoking) { - if (timerId === undefined) { - return leadingEdge(lastCallTime); - } - if (maxing) { - // Handle invocations in a tight loop. - timerId = setTimeout(timerExpired, wait); - return invokeFunc(lastCallTime); - } - } - if (timerId === undefined) { - timerId = setTimeout(timerExpired, wait); - } - return result; - } - debounced.cancel = cancel; - debounced.flush = flush; - return debounced; - } - - /** - * Defers invoking the `func` until the current call stack has cleared. Any - * additional arguments are provided to `func` when it's invoked. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Function - * @param {Function} func The function to defer. - * @param {...*} [args] The arguments to invoke `func` with. - * @returns {number} Returns the timer id. - * @example - * - * _.defer(function(text) { - * console.log(text); - * }, 'deferred'); - * // => Logs 'deferred' after one millisecond. - */ - var defer = baseRest(function(func, args) { - return baseDelay(func, 1, args); - }); - - /** - * Invokes `func` after `wait` milliseconds. Any additional arguments are - * provided to `func` when it's invoked. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Function - * @param {Function} func The function to delay. - * @param {number} wait The number of milliseconds to delay invocation. - * @param {...*} [args] The arguments to invoke `func` with. - * @returns {number} Returns the timer id. - * @example - * - * _.delay(function(text) { - * console.log(text); - * }, 1000, 'later'); - * // => Logs 'later' after one second. - */ - var delay = baseRest(function(func, wait, args) { - return baseDelay(func, toNumber(wait) || 0, args); - }); - - /** - * Creates a function that invokes `func` with arguments reversed. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Function - * @param {Function} func The function to flip arguments for. - * @returns {Function} Returns the new flipped function. - * @example - * - * var flipped = _.flip(function() { - * return _.toArray(arguments); - * }); - * - * flipped('a', 'b', 'c', 'd'); - * // => ['d', 'c', 'b', 'a'] - */ - function flip(func) { - return createWrap(func, WRAP_FLIP_FLAG); - } - - /** - * Creates a function that memoizes the result of `func`. If `resolver` is - * provided, it determines the cache key for storing the result based on the - * arguments provided to the memoized function. By default, the first argument - * provided to the memoized function is used as the map cache key. The `func` - * is invoked with the `this` binding of the memoized function. - * - * **Note:** The cache is exposed as the `cache` property on the memoized - * function. Its creation may be customized by replacing the `_.memoize.Cache` - * constructor with one whose instances implement the - * [`Map`](http://ecma-international.org/ecma-262/7.0/#sec-properties-of-the-map-prototype-object) - * method interface of `clear`, `delete`, `get`, `has`, and `set`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Function - * @param {Function} func The function to have its output memoized. - * @param {Function} [resolver] The function to resolve the cache key. - * @returns {Function} Returns the new memoized function. - * @example - * - * var object = { 'a': 1, 'b': 2 }; - * var other = { 'c': 3, 'd': 4 }; - * - * var values = _.memoize(_.values); - * values(object); - * // => [1, 2] - * - * values(other); - * // => [3, 4] - * - * object.a = 2; - * values(object); - * // => [1, 2] - * - * // Modify the result cache. - * values.cache.set(object, ['a', 'b']); - * values(object); - * // => ['a', 'b'] - * - * // Replace `_.memoize.Cache`. - * _.memoize.Cache = WeakMap; - */ - function memoize(func, resolver) { - if (typeof func != 'function' || (resolver != null && typeof resolver != 'function')) { - throw new TypeError(FUNC_ERROR_TEXT); - } - var memoized = function() { - var args = arguments, - key = resolver ? resolver.apply(this, args) : args[0], - cache = memoized.cache; - - if (cache.has(key)) { - return cache.get(key); - } - var result = func.apply(this, args); - memoized.cache = cache.set(key, result) || cache; - return result; - }; - memoized.cache = new (memoize.Cache || MapCache); - return memoized; - } - - // Expose `MapCache`. - memoize.Cache = MapCache; - - /** - * Creates a function that negates the result of the predicate `func`. The - * `func` predicate is invoked with the `this` binding and arguments of the - * created function. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Function - * @param {Function} predicate The predicate to negate. - * @returns {Function} Returns the new negated function. - * @example - * - * function isEven(n) { - * return n % 2 == 0; - * } - * - * _.filter([1, 2, 3, 4, 5, 6], _.negate(isEven)); - * // => [1, 3, 5] - */ - function negate(predicate) { - if (typeof predicate != 'function') { - throw new TypeError(FUNC_ERROR_TEXT); - } - return function() { - var args = arguments; - switch (args.length) { - case 0: return !predicate.call(this); - case 1: return !predicate.call(this, args[0]); - case 2: return !predicate.call(this, args[0], args[1]); - case 3: return !predicate.call(this, args[0], args[1], args[2]); - } - return !predicate.apply(this, args); - }; - } - - /** - * Creates a function that is restricted to invoking `func` once. Repeat calls - * to the function return the value of the first invocation. The `func` is - * invoked with the `this` binding and arguments of the created function. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Function - * @param {Function} func The function to restrict. - * @returns {Function} Returns the new restricted function. - * @example - * - * var initialize = _.once(createApplication); - * initialize(); - * initialize(); - * // => `createApplication` is invoked once - */ - function once(func) { - return before(2, func); - } - - /** - * Creates a function that invokes `func` with its arguments transformed. - * - * @static - * @since 4.0.0 - * @memberOf _ - * @category Function - * @param {Function} func The function to wrap. - * @param {...(Function|Function[])} [transforms=[_.identity]] - * The argument transforms. - * @returns {Function} Returns the new function. - * @example - * - * function doubled(n) { - * return n * 2; - * } - * - * function square(n) { - * return n * n; - * } - * - * var func = _.overArgs(function(x, y) { - * return [x, y]; - * }, [square, doubled]); - * - * func(9, 3); - * // => [81, 6] - * - * func(10, 5); - * // => [100, 10] - */ - var overArgs = castRest(function(func, transforms) { - transforms = (transforms.length == 1 && isArray(transforms[0])) - ? arrayMap(transforms[0], baseUnary(getIteratee())) - : arrayMap(baseFlatten(transforms, 1), baseUnary(getIteratee())); - - var funcsLength = transforms.length; - return baseRest(function(args) { - var index = -1, - length = nativeMin(args.length, funcsLength); - - while (++index < length) { - args[index] = transforms[index].call(this, args[index]); - } - return apply(func, this, args); - }); - }); - - /** - * Creates a function that invokes `func` with `partials` prepended to the - * arguments it receives. This method is like `_.bind` except it does **not** - * alter the `this` binding. - * - * The `_.partial.placeholder` value, which defaults to `_` in monolithic - * builds, may be used as a placeholder for partially applied arguments. - * - * **Note:** This method doesn't set the "length" property of partially - * applied functions. - * - * @static - * @memberOf _ - * @since 0.2.0 - * @category Function - * @param {Function} func The function to partially apply arguments to. - * @param {...*} [partials] The arguments to be partially applied. - * @returns {Function} Returns the new partially applied function. - * @example - * - * function greet(greeting, name) { - * return greeting + ' ' + name; - * } - * - * var sayHelloTo = _.partial(greet, 'hello'); - * sayHelloTo('fred'); - * // => 'hello fred' - * - * // Partially applied with placeholders. - * var greetFred = _.partial(greet, _, 'fred'); - * greetFred('hi'); - * // => 'hi fred' - */ - var partial = baseRest(function(func, partials) { - var holders = replaceHolders(partials, getHolder(partial)); - return createWrap(func, WRAP_PARTIAL_FLAG, undefined, partials, holders); - }); - - /** - * This method is like `_.partial` except that partially applied arguments - * are appended to the arguments it receives. - * - * The `_.partialRight.placeholder` value, which defaults to `_` in monolithic - * builds, may be used as a placeholder for partially applied arguments. - * - * **Note:** This method doesn't set the "length" property of partially - * applied functions. - * - * @static - * @memberOf _ - * @since 1.0.0 - * @category Function - * @param {Function} func The function to partially apply arguments to. - * @param {...*} [partials] The arguments to be partially applied. - * @returns {Function} Returns the new partially applied function. - * @example - * - * function greet(greeting, name) { - * return greeting + ' ' + name; - * } - * - * var greetFred = _.partialRight(greet, 'fred'); - * greetFred('hi'); - * // => 'hi fred' - * - * // Partially applied with placeholders. - * var sayHelloTo = _.partialRight(greet, 'hello', _); - * sayHelloTo('fred'); - * // => 'hello fred' - */ - var partialRight = baseRest(function(func, partials) { - var holders = replaceHolders(partials, getHolder(partialRight)); - return createWrap(func, WRAP_PARTIAL_RIGHT_FLAG, undefined, partials, holders); - }); - - /** - * Creates a function that invokes `func` with arguments arranged according - * to the specified `indexes` where the argument value at the first index is - * provided as the first argument, the argument value at the second index is - * provided as the second argument, and so on. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Function - * @param {Function} func The function to rearrange arguments for. - * @param {...(number|number[])} indexes The arranged argument indexes. - * @returns {Function} Returns the new function. - * @example - * - * var rearged = _.rearg(function(a, b, c) { - * return [a, b, c]; - * }, [2, 0, 1]); - * - * rearged('b', 'c', 'a') - * // => ['a', 'b', 'c'] - */ - var rearg = flatRest(function(func, indexes) { - return createWrap(func, WRAP_REARG_FLAG, undefined, undefined, undefined, indexes); - }); - - /** - * Creates a function that invokes `func` with the `this` binding of the - * created function and arguments from `start` and beyond provided as - * an array. - * - * **Note:** This method is based on the - * [rest parameter](https://mdn.io/rest_parameters). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Function - * @param {Function} func The function to apply a rest parameter to. - * @param {number} [start=func.length-1] The start position of the rest parameter. - * @returns {Function} Returns the new function. - * @example - * - * var say = _.rest(function(what, names) { - * return what + ' ' + _.initial(names).join(', ') + - * (_.size(names) > 1 ? ', & ' : '') + _.last(names); - * }); - * - * say('hello', 'fred', 'barney', 'pebbles'); - * // => 'hello fred, barney, & pebbles' - */ - function rest(func, start) { - if (typeof func != 'function') { - throw new TypeError(FUNC_ERROR_TEXT); - } - start = start === undefined ? start : toInteger(start); - return baseRest(func, start); - } - - /** - * Creates a function that invokes `func` with the `this` binding of the - * create function and an array of arguments much like - * [`Function#apply`](http://www.ecma-international.org/ecma-262/7.0/#sec-function.prototype.apply). - * - * **Note:** This method is based on the - * [spread operator](https://mdn.io/spread_operator). - * - * @static - * @memberOf _ - * @since 3.2.0 - * @category Function - * @param {Function} func The function to spread arguments over. - * @param {number} [start=0] The start position of the spread. - * @returns {Function} Returns the new function. - * @example - * - * var say = _.spread(function(who, what) { - * return who + ' says ' + what; - * }); - * - * say(['fred', 'hello']); - * // => 'fred says hello' - * - * var numbers = Promise.all([ - * Promise.resolve(40), - * Promise.resolve(36) - * ]); - * - * numbers.then(_.spread(function(x, y) { - * return x + y; - * })); - * // => a Promise of 76 - */ - function spread(func, start) { - if (typeof func != 'function') { - throw new TypeError(FUNC_ERROR_TEXT); - } - start = start == null ? 0 : nativeMax(toInteger(start), 0); - return baseRest(function(args) { - var array = args[start], - otherArgs = castSlice(args, 0, start); - - if (array) { - arrayPush(otherArgs, array); - } - return apply(func, this, otherArgs); - }); - } - - /** - * Creates a throttled function that only invokes `func` at most once per - * every `wait` milliseconds. The throttled function comes with a `cancel` - * method to cancel delayed `func` invocations and a `flush` method to - * immediately invoke them. Provide `options` to indicate whether `func` - * should be invoked on the leading and/or trailing edge of the `wait` - * timeout. The `func` is invoked with the last arguments provided to the - * throttled function. Subsequent calls to the throttled function return the - * result of the last `func` invocation. - * - * **Note:** If `leading` and `trailing` options are `true`, `func` is - * invoked on the trailing edge of the timeout only if the throttled function - * is invoked more than once during the `wait` timeout. - * - * If `wait` is `0` and `leading` is `false`, `func` invocation is deferred - * until to the next tick, similar to `setTimeout` with a timeout of `0`. - * - * See [David Corbacho's article](https://css-tricks.com/debouncing-throttling-explained-examples/) - * for details over the differences between `_.throttle` and `_.debounce`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Function - * @param {Function} func The function to throttle. - * @param {number} [wait=0] The number of milliseconds to throttle invocations to. - * @param {Object} [options={}] The options object. - * @param {boolean} [options.leading=true] - * Specify invoking on the leading edge of the timeout. - * @param {boolean} [options.trailing=true] - * Specify invoking on the trailing edge of the timeout. - * @returns {Function} Returns the new throttled function. - * @example - * - * // Avoid excessively updating the position while scrolling. - * jQuery(window).on('scroll', _.throttle(updatePosition, 100)); - * - * // Invoke `renewToken` when the click event is fired, but not more than once every 5 minutes. - * var throttled = _.throttle(renewToken, 300000, { 'trailing': false }); - * jQuery(element).on('click', throttled); - * - * // Cancel the trailing throttled invocation. - * jQuery(window).on('popstate', throttled.cancel); - */ - function throttle(func, wait, options) { - var leading = true, - trailing = true; - - if (typeof func != 'function') { - throw new TypeError(FUNC_ERROR_TEXT); - } - if (isObject(options)) { - leading = 'leading' in options ? !!options.leading : leading; - trailing = 'trailing' in options ? !!options.trailing : trailing; - } - return debounce(func, wait, { - 'leading': leading, - 'maxWait': wait, - 'trailing': trailing - }); - } - - /** - * Creates a function that accepts up to one argument, ignoring any - * additional arguments. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Function - * @param {Function} func The function to cap arguments for. - * @returns {Function} Returns the new capped function. - * @example - * - * _.map(['6', '8', '10'], _.unary(parseInt)); - * // => [6, 8, 10] - */ - function unary(func) { - return ary(func, 1); - } - - /** - * Creates a function that provides `value` to `wrapper` as its first - * argument. Any additional arguments provided to the function are appended - * to those provided to the `wrapper`. The wrapper is invoked with the `this` - * binding of the created function. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Function - * @param {*} value The value to wrap. - * @param {Function} [wrapper=identity] The wrapper function. - * @returns {Function} Returns the new function. - * @example - * - * var p = _.wrap(_.escape, function(func, text) { - * return '

' + func(text) + '

'; - * }); - * - * p('fred, barney, & pebbles'); - * // => '

fred, barney, & pebbles

' - */ - function wrap(value, wrapper) { - return partial(castFunction(wrapper), value); - } - - /*------------------------------------------------------------------------*/ - - /** - * Casts `value` as an array if it's not one. - * - * @static - * @memberOf _ - * @since 4.4.0 - * @category Lang - * @param {*} value The value to inspect. - * @returns {Array} Returns the cast array. - * @example - * - * _.castArray(1); - * // => [1] - * - * _.castArray({ 'a': 1 }); - * // => [{ 'a': 1 }] - * - * _.castArray('abc'); - * // => ['abc'] - * - * _.castArray(null); - * // => [null] - * - * _.castArray(undefined); - * // => [undefined] - * - * _.castArray(); - * // => [] - * - * var array = [1, 2, 3]; - * console.log(_.castArray(array) === array); - * // => true - */ - function castArray() { - if (!arguments.length) { - return []; - } - var value = arguments[0]; - return isArray(value) ? value : [value]; - } - - /** - * Creates a shallow clone of `value`. - * - * **Note:** This method is loosely based on the - * [structured clone algorithm](https://mdn.io/Structured_clone_algorithm) - * and supports cloning arrays, array buffers, booleans, date objects, maps, - * numbers, `Object` objects, regexes, sets, strings, symbols, and typed - * arrays. The own enumerable properties of `arguments` objects are cloned - * as plain objects. An empty object is returned for uncloneable values such - * as error objects, functions, DOM nodes, and WeakMaps. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to clone. - * @returns {*} Returns the cloned value. - * @see _.cloneDeep - * @example - * - * var objects = [{ 'a': 1 }, { 'b': 2 }]; - * - * var shallow = _.clone(objects); - * console.log(shallow[0] === objects[0]); - * // => true - */ - function clone(value) { - return baseClone(value, CLONE_SYMBOLS_FLAG); - } - - /** - * This method is like `_.clone` except that it accepts `customizer` which - * is invoked to produce the cloned value. If `customizer` returns `undefined`, - * cloning is handled by the method instead. The `customizer` is invoked with - * up to four arguments; (value [, index|key, object, stack]). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to clone. - * @param {Function} [customizer] The function to customize cloning. - * @returns {*} Returns the cloned value. - * @see _.cloneDeepWith - * @example - * - * function customizer(value) { - * if (_.isElement(value)) { - * return value.cloneNode(false); - * } - * } - * - * var el = _.cloneWith(document.body, customizer); - * - * console.log(el === document.body); - * // => false - * console.log(el.nodeName); - * // => 'BODY' - * console.log(el.childNodes.length); - * // => 0 - */ - function cloneWith(value, customizer) { - customizer = typeof customizer == 'function' ? customizer : undefined; - return baseClone(value, CLONE_SYMBOLS_FLAG, customizer); - } - - /** - * This method is like `_.clone` except that it recursively clones `value`. - * - * @static - * @memberOf _ - * @since 1.0.0 - * @category Lang - * @param {*} value The value to recursively clone. - * @returns {*} Returns the deep cloned value. - * @see _.clone - * @example - * - * var objects = [{ 'a': 1 }, { 'b': 2 }]; - * - * var deep = _.cloneDeep(objects); - * console.log(deep[0] === objects[0]); - * // => false - */ - function cloneDeep(value) { - return baseClone(value, CLONE_DEEP_FLAG | CLONE_SYMBOLS_FLAG); - } - - /** - * This method is like `_.cloneWith` except that it recursively clones `value`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to recursively clone. - * @param {Function} [customizer] The function to customize cloning. - * @returns {*} Returns the deep cloned value. - * @see _.cloneWith - * @example - * - * function customizer(value) { - * if (_.isElement(value)) { - * return value.cloneNode(true); - * } - * } - * - * var el = _.cloneDeepWith(document.body, customizer); - * - * console.log(el === document.body); - * // => false - * console.log(el.nodeName); - * // => 'BODY' - * console.log(el.childNodes.length); - * // => 20 - */ - function cloneDeepWith(value, customizer) { - customizer = typeof customizer == 'function' ? customizer : undefined; - return baseClone(value, CLONE_DEEP_FLAG | CLONE_SYMBOLS_FLAG, customizer); - } - - /** - * Checks if `object` conforms to `source` by invoking the predicate - * properties of `source` with the corresponding property values of `object`. - * - * **Note:** This method is equivalent to `_.conforms` when `source` is - * partially applied. - * - * @static - * @memberOf _ - * @since 4.14.0 - * @category Lang - * @param {Object} object The object to inspect. - * @param {Object} source The object of property predicates to conform to. - * @returns {boolean} Returns `true` if `object` conforms, else `false`. - * @example - * - * var object = { 'a': 1, 'b': 2 }; - * - * _.conformsTo(object, { 'b': function(n) { return n > 1; } }); - * // => true - * - * _.conformsTo(object, { 'b': function(n) { return n > 2; } }); - * // => false - */ - function conformsTo(object, source) { - return source == null || baseConformsTo(object, source, keys(source)); - } - - /** - * Performs a - * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * comparison between two values to determine if they are equivalent. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @returns {boolean} Returns `true` if the values are equivalent, else `false`. - * @example - * - * var object = { 'a': 1 }; - * var other = { 'a': 1 }; - * - * _.eq(object, object); - * // => true - * - * _.eq(object, other); - * // => false - * - * _.eq('a', 'a'); - * // => true - * - * _.eq('a', Object('a')); - * // => false - * - * _.eq(NaN, NaN); - * // => true - */ - function eq(value, other) { - return value === other || (value !== value && other !== other); - } - - /** - * Checks if `value` is greater than `other`. - * - * @static - * @memberOf _ - * @since 3.9.0 - * @category Lang - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @returns {boolean} Returns `true` if `value` is greater than `other`, - * else `false`. - * @see _.lt - * @example - * - * _.gt(3, 1); - * // => true - * - * _.gt(3, 3); - * // => false - * - * _.gt(1, 3); - * // => false - */ - var gt = createRelationalOperation(baseGt); - - /** - * Checks if `value` is greater than or equal to `other`. - * - * @static - * @memberOf _ - * @since 3.9.0 - * @category Lang - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @returns {boolean} Returns `true` if `value` is greater than or equal to - * `other`, else `false`. - * @see _.lte - * @example - * - * _.gte(3, 1); - * // => true - * - * _.gte(3, 3); - * // => true - * - * _.gte(1, 3); - * // => false - */ - var gte = createRelationalOperation(function(value, other) { - return value >= other; - }); - - /** - * Checks if `value` is likely an `arguments` object. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is an `arguments` object, - * else `false`. - * @example - * - * _.isArguments(function() { return arguments; }()); - * // => true - * - * _.isArguments([1, 2, 3]); - * // => false - */ - var isArguments = baseIsArguments(function() { return arguments; }()) ? baseIsArguments : function(value) { - return isObjectLike(value) && hasOwnProperty.call(value, 'callee') && - !propertyIsEnumerable.call(value, 'callee'); - }; - - /** - * Checks if `value` is classified as an `Array` object. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is an array, else `false`. - * @example - * - * _.isArray([1, 2, 3]); - * // => true - * - * _.isArray(document.body.children); - * // => false - * - * _.isArray('abc'); - * // => false - * - * _.isArray(_.noop); - * // => false - */ - var isArray = Array.isArray; - - /** - * Checks if `value` is classified as an `ArrayBuffer` object. - * - * @static - * @memberOf _ - * @since 4.3.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is an array buffer, else `false`. - * @example - * - * _.isArrayBuffer(new ArrayBuffer(2)); - * // => true - * - * _.isArrayBuffer(new Array(2)); - * // => false - */ - var isArrayBuffer = nodeIsArrayBuffer ? baseUnary(nodeIsArrayBuffer) : baseIsArrayBuffer; - - /** - * Checks if `value` is array-like. A value is considered array-like if it's - * not a function and has a `value.length` that's an integer greater than or - * equal to `0` and less than or equal to `Number.MAX_SAFE_INTEGER`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is array-like, else `false`. - * @example - * - * _.isArrayLike([1, 2, 3]); - * // => true - * - * _.isArrayLike(document.body.children); - * // => true - * - * _.isArrayLike('abc'); - * // => true - * - * _.isArrayLike(_.noop); - * // => false - */ - function isArrayLike(value) { - return value != null && isLength(value.length) && !isFunction(value); - } - - /** - * This method is like `_.isArrayLike` except that it also checks if `value` - * is an object. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is an array-like object, - * else `false`. - * @example - * - * _.isArrayLikeObject([1, 2, 3]); - * // => true - * - * _.isArrayLikeObject(document.body.children); - * // => true - * - * _.isArrayLikeObject('abc'); - * // => false - * - * _.isArrayLikeObject(_.noop); - * // => false - */ - function isArrayLikeObject(value) { - return isObjectLike(value) && isArrayLike(value); - } - - /** - * Checks if `value` is classified as a boolean primitive or object. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a boolean, else `false`. - * @example - * - * _.isBoolean(false); - * // => true - * - * _.isBoolean(null); - * // => false - */ - function isBoolean(value) { - return value === true || value === false || - (isObjectLike(value) && baseGetTag(value) == boolTag); - } - - /** - * Checks if `value` is a buffer. - * - * @static - * @memberOf _ - * @since 4.3.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a buffer, else `false`. - * @example - * - * _.isBuffer(new Buffer(2)); - * // => true - * - * _.isBuffer(new Uint8Array(2)); - * // => false - */ - var isBuffer = nativeIsBuffer || stubFalse; - - /** - * Checks if `value` is classified as a `Date` object. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a date object, else `false`. - * @example - * - * _.isDate(new Date); - * // => true - * - * _.isDate('Mon April 23 2012'); - * // => false - */ - var isDate = nodeIsDate ? baseUnary(nodeIsDate) : baseIsDate; - - /** - * Checks if `value` is likely a DOM element. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a DOM element, else `false`. - * @example - * - * _.isElement(document.body); - * // => true - * - * _.isElement(''); - * // => false - */ - function isElement(value) { - return isObjectLike(value) && value.nodeType === 1 && !isPlainObject(value); - } - - /** - * Checks if `value` is an empty object, collection, map, or set. - * - * Objects are considered empty if they have no own enumerable string keyed - * properties. - * - * Array-like values such as `arguments` objects, arrays, buffers, strings, or - * jQuery-like collections are considered empty if they have a `length` of `0`. - * Similarly, maps and sets are considered empty if they have a `size` of `0`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is empty, else `false`. - * @example - * - * _.isEmpty(null); - * // => true - * - * _.isEmpty(true); - * // => true - * - * _.isEmpty(1); - * // => true - * - * _.isEmpty([1, 2, 3]); - * // => false - * - * _.isEmpty({ 'a': 1 }); - * // => false - */ - function isEmpty(value) { - if (value == null) { - return true; - } - if (isArrayLike(value) && - (isArray(value) || typeof value == 'string' || typeof value.splice == 'function' || - isBuffer(value) || isTypedArray(value) || isArguments(value))) { - return !value.length; - } - var tag = getTag(value); - if (tag == mapTag || tag == setTag) { - return !value.size; - } - if (isPrototype(value)) { - return !baseKeys(value).length; - } - for (var key in value) { - if (hasOwnProperty.call(value, key)) { - return false; - } - } - return true; - } - - /** - * Performs a deep comparison between two values to determine if they are - * equivalent. - * - * **Note:** This method supports comparing arrays, array buffers, booleans, - * date objects, error objects, maps, numbers, `Object` objects, regexes, - * sets, strings, symbols, and typed arrays. `Object` objects are compared - * by their own, not inherited, enumerable properties. Functions and DOM - * nodes are compared by strict equality, i.e. `===`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @returns {boolean} Returns `true` if the values are equivalent, else `false`. - * @example - * - * var object = { 'a': 1 }; - * var other = { 'a': 1 }; - * - * _.isEqual(object, other); - * // => true - * - * object === other; - * // => false - */ - function isEqual(value, other) { - return baseIsEqual(value, other); - } - - /** - * This method is like `_.isEqual` except that it accepts `customizer` which - * is invoked to compare values. If `customizer` returns `undefined`, comparisons - * are handled by the method instead. The `customizer` is invoked with up to - * six arguments: (objValue, othValue [, index|key, object, other, stack]). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @param {Function} [customizer] The function to customize comparisons. - * @returns {boolean} Returns `true` if the values are equivalent, else `false`. - * @example - * - * function isGreeting(value) { - * return /^h(?:i|ello)$/.test(value); - * } - * - * function customizer(objValue, othValue) { - * if (isGreeting(objValue) && isGreeting(othValue)) { - * return true; - * } - * } - * - * var array = ['hello', 'goodbye']; - * var other = ['hi', 'goodbye']; - * - * _.isEqualWith(array, other, customizer); - * // => true - */ - function isEqualWith(value, other, customizer) { - customizer = typeof customizer == 'function' ? customizer : undefined; - var result = customizer ? customizer(value, other) : undefined; - return result === undefined ? baseIsEqual(value, other, undefined, customizer) : !!result; - } - - /** - * Checks if `value` is an `Error`, `EvalError`, `RangeError`, `ReferenceError`, - * `SyntaxError`, `TypeError`, or `URIError` object. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is an error object, else `false`. - * @example - * - * _.isError(new Error); - * // => true - * - * _.isError(Error); - * // => false - */ - function isError(value) { - if (!isObjectLike(value)) { - return false; - } - var tag = baseGetTag(value); - return tag == errorTag || tag == domExcTag || - (typeof value.message == 'string' && typeof value.name == 'string' && !isPlainObject(value)); - } - - /** - * Checks if `value` is a finite primitive number. - * - * **Note:** This method is based on - * [`Number.isFinite`](https://mdn.io/Number/isFinite). - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a finite number, else `false`. - * @example - * - * _.isFinite(3); - * // => true - * - * _.isFinite(Number.MIN_VALUE); - * // => true - * - * _.isFinite(Infinity); - * // => false - * - * _.isFinite('3'); - * // => false - */ - function isFinite(value) { - return typeof value == 'number' && nativeIsFinite(value); - } - - /** - * Checks if `value` is classified as a `Function` object. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a function, else `false`. - * @example - * - * _.isFunction(_); - * // => true - * - * _.isFunction(/abc/); - * // => false - */ - function isFunction(value) { - if (!isObject(value)) { - return false; - } - // The use of `Object#toString` avoids issues with the `typeof` operator - // in Safari 9 which returns 'object' for typed arrays and other constructors. - var tag = baseGetTag(value); - return tag == funcTag || tag == genTag || tag == asyncTag || tag == proxyTag; - } - - /** - * Checks if `value` is an integer. - * - * **Note:** This method is based on - * [`Number.isInteger`](https://mdn.io/Number/isInteger). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is an integer, else `false`. - * @example - * - * _.isInteger(3); - * // => true - * - * _.isInteger(Number.MIN_VALUE); - * // => false - * - * _.isInteger(Infinity); - * // => false - * - * _.isInteger('3'); - * // => false - */ - function isInteger(value) { - return typeof value == 'number' && value == toInteger(value); - } - - /** - * Checks if `value` is a valid array-like length. - * - * **Note:** This method is loosely based on - * [`ToLength`](http://ecma-international.org/ecma-262/7.0/#sec-tolength). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a valid length, else `false`. - * @example - * - * _.isLength(3); - * // => true - * - * _.isLength(Number.MIN_VALUE); - * // => false - * - * _.isLength(Infinity); - * // => false - * - * _.isLength('3'); - * // => false - */ - function isLength(value) { - return typeof value == 'number' && - value > -1 && value % 1 == 0 && value <= MAX_SAFE_INTEGER; - } - - /** - * Checks if `value` is the - * [language type](http://www.ecma-international.org/ecma-262/7.0/#sec-ecmascript-language-types) - * of `Object`. (e.g. arrays, functions, objects, regexes, `new Number(0)`, and `new String('')`) - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is an object, else `false`. - * @example - * - * _.isObject({}); - * // => true - * - * _.isObject([1, 2, 3]); - * // => true - * - * _.isObject(_.noop); - * // => true - * - * _.isObject(null); - * // => false - */ - function isObject(value) { - var type = typeof value; - return value != null && (type == 'object' || type == 'function'); - } - - /** - * Checks if `value` is object-like. A value is object-like if it's not `null` - * and has a `typeof` result of "object". - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is object-like, else `false`. - * @example - * - * _.isObjectLike({}); - * // => true - * - * _.isObjectLike([1, 2, 3]); - * // => true - * - * _.isObjectLike(_.noop); - * // => false - * - * _.isObjectLike(null); - * // => false - */ - function isObjectLike(value) { - return value != null && typeof value == 'object'; - } - - /** - * Checks if `value` is classified as a `Map` object. - * - * @static - * @memberOf _ - * @since 4.3.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a map, else `false`. - * @example - * - * _.isMap(new Map); - * // => true - * - * _.isMap(new WeakMap); - * // => false - */ - var isMap = nodeIsMap ? baseUnary(nodeIsMap) : baseIsMap; - - /** - * Performs a partial deep comparison between `object` and `source` to - * determine if `object` contains equivalent property values. - * - * **Note:** This method is equivalent to `_.matches` when `source` is - * partially applied. - * - * Partial comparisons will match empty array and empty object `source` - * values against any array or object value, respectively. See `_.isEqual` - * for a list of supported value comparisons. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Lang - * @param {Object} object The object to inspect. - * @param {Object} source The object of property values to match. - * @returns {boolean} Returns `true` if `object` is a match, else `false`. - * @example - * - * var object = { 'a': 1, 'b': 2 }; - * - * _.isMatch(object, { 'b': 2 }); - * // => true - * - * _.isMatch(object, { 'b': 1 }); - * // => false - */ - function isMatch(object, source) { - return object === source || baseIsMatch(object, source, getMatchData(source)); - } - - /** - * This method is like `_.isMatch` except that it accepts `customizer` which - * is invoked to compare values. If `customizer` returns `undefined`, comparisons - * are handled by the method instead. The `customizer` is invoked with five - * arguments: (objValue, srcValue, index|key, object, source). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {Object} object The object to inspect. - * @param {Object} source The object of property values to match. - * @param {Function} [customizer] The function to customize comparisons. - * @returns {boolean} Returns `true` if `object` is a match, else `false`. - * @example - * - * function isGreeting(value) { - * return /^h(?:i|ello)$/.test(value); - * } - * - * function customizer(objValue, srcValue) { - * if (isGreeting(objValue) && isGreeting(srcValue)) { - * return true; - * } - * } - * - * var object = { 'greeting': 'hello' }; - * var source = { 'greeting': 'hi' }; - * - * _.isMatchWith(object, source, customizer); - * // => true - */ - function isMatchWith(object, source, customizer) { - customizer = typeof customizer == 'function' ? customizer : undefined; - return baseIsMatch(object, source, getMatchData(source), customizer); - } - - /** - * Checks if `value` is `NaN`. - * - * **Note:** This method is based on - * [`Number.isNaN`](https://mdn.io/Number/isNaN) and is not the same as - * global [`isNaN`](https://mdn.io/isNaN) which returns `true` for - * `undefined` and other non-number values. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is `NaN`, else `false`. - * @example - * - * _.isNaN(NaN); - * // => true - * - * _.isNaN(new Number(NaN)); - * // => true - * - * isNaN(undefined); - * // => true - * - * _.isNaN(undefined); - * // => false - */ - function isNaN(value) { - // An `NaN` primitive is the only value that is not equal to itself. - // Perform the `toStringTag` check first to avoid errors with some - // ActiveX objects in IE. - return isNumber(value) && value != +value; - } - - /** - * Checks if `value` is a pristine native function. - * - * **Note:** This method can't reliably detect native functions in the presence - * of the core-js package because core-js circumvents this kind of detection. - * Despite multiple requests, the core-js maintainer has made it clear: any - * attempt to fix the detection will be obstructed. As a result, we're left - * with little choice but to throw an error. Unfortunately, this also affects - * packages, like [babel-polyfill](https://www.npmjs.com/package/babel-polyfill), - * which rely on core-js. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a native function, - * else `false`. - * @example - * - * _.isNative(Array.prototype.push); - * // => true - * - * _.isNative(_); - * // => false - */ - function isNative(value) { - if (isMaskable(value)) { - throw new Error(CORE_ERROR_TEXT); - } - return baseIsNative(value); - } - - /** - * Checks if `value` is `null`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is `null`, else `false`. - * @example - * - * _.isNull(null); - * // => true - * - * _.isNull(void 0); - * // => false - */ - function isNull(value) { - return value === null; - } - - /** - * Checks if `value` is `null` or `undefined`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is nullish, else `false`. - * @example - * - * _.isNil(null); - * // => true - * - * _.isNil(void 0); - * // => true - * - * _.isNil(NaN); - * // => false - */ - function isNil(value) { - return value == null; - } - - /** - * Checks if `value` is classified as a `Number` primitive or object. - * - * **Note:** To exclude `Infinity`, `-Infinity`, and `NaN`, which are - * classified as numbers, use the `_.isFinite` method. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a number, else `false`. - * @example - * - * _.isNumber(3); - * // => true - * - * _.isNumber(Number.MIN_VALUE); - * // => true - * - * _.isNumber(Infinity); - * // => true - * - * _.isNumber('3'); - * // => false - */ - function isNumber(value) { - return typeof value == 'number' || - (isObjectLike(value) && baseGetTag(value) == numberTag); - } - - /** - * Checks if `value` is a plain object, that is, an object created by the - * `Object` constructor or one with a `[[Prototype]]` of `null`. - * - * @static - * @memberOf _ - * @since 0.8.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a plain object, else `false`. - * @example - * - * function Foo() { - * this.a = 1; - * } - * - * _.isPlainObject(new Foo); - * // => false - * - * _.isPlainObject([1, 2, 3]); - * // => false - * - * _.isPlainObject({ 'x': 0, 'y': 0 }); - * // => true - * - * _.isPlainObject(Object.create(null)); - * // => true - */ - function isPlainObject(value) { - if (!isObjectLike(value) || baseGetTag(value) != objectTag) { - return false; - } - var proto = getPrototype(value); - if (proto === null) { - return true; - } - var Ctor = hasOwnProperty.call(proto, 'constructor') && proto.constructor; - return typeof Ctor == 'function' && Ctor instanceof Ctor && - funcToString.call(Ctor) == objectCtorString; - } - - /** - * Checks if `value` is classified as a `RegExp` object. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a regexp, else `false`. - * @example - * - * _.isRegExp(/abc/); - * // => true - * - * _.isRegExp('/abc/'); - * // => false - */ - var isRegExp = nodeIsRegExp ? baseUnary(nodeIsRegExp) : baseIsRegExp; - - /** - * Checks if `value` is a safe integer. An integer is safe if it's an IEEE-754 - * double precision number which isn't the result of a rounded unsafe integer. - * - * **Note:** This method is based on - * [`Number.isSafeInteger`](https://mdn.io/Number/isSafeInteger). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a safe integer, else `false`. - * @example - * - * _.isSafeInteger(3); - * // => true - * - * _.isSafeInteger(Number.MIN_VALUE); - * // => false - * - * _.isSafeInteger(Infinity); - * // => false - * - * _.isSafeInteger('3'); - * // => false - */ - function isSafeInteger(value) { - return isInteger(value) && value >= -MAX_SAFE_INTEGER && value <= MAX_SAFE_INTEGER; - } - - /** - * Checks if `value` is classified as a `Set` object. - * - * @static - * @memberOf _ - * @since 4.3.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a set, else `false`. - * @example - * - * _.isSet(new Set); - * // => true - * - * _.isSet(new WeakSet); - * // => false - */ - var isSet = nodeIsSet ? baseUnary(nodeIsSet) : baseIsSet; - - /** - * Checks if `value` is classified as a `String` primitive or object. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a string, else `false`. - * @example - * - * _.isString('abc'); - * // => true - * - * _.isString(1); - * // => false - */ - function isString(value) { - return typeof value == 'string' || - (!isArray(value) && isObjectLike(value) && baseGetTag(value) == stringTag); - } - - /** - * Checks if `value` is classified as a `Symbol` primitive or object. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a symbol, else `false`. - * @example - * - * _.isSymbol(Symbol.iterator); - * // => true - * - * _.isSymbol('abc'); - * // => false - */ - function isSymbol(value) { - return typeof value == 'symbol' || - (isObjectLike(value) && baseGetTag(value) == symbolTag); - } - - /** - * Checks if `value` is classified as a typed array. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a typed array, else `false`. - * @example - * - * _.isTypedArray(new Uint8Array); - * // => true - * - * _.isTypedArray([]); - * // => false - */ - var isTypedArray = nodeIsTypedArray ? baseUnary(nodeIsTypedArray) : baseIsTypedArray; - - /** - * Checks if `value` is `undefined`. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is `undefined`, else `false`. - * @example - * - * _.isUndefined(void 0); - * // => true - * - * _.isUndefined(null); - * // => false - */ - function isUndefined(value) { - return value === undefined; - } - - /** - * Checks if `value` is classified as a `WeakMap` object. - * - * @static - * @memberOf _ - * @since 4.3.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a weak map, else `false`. - * @example - * - * _.isWeakMap(new WeakMap); - * // => true - * - * _.isWeakMap(new Map); - * // => false - */ - function isWeakMap(value) { - return isObjectLike(value) && getTag(value) == weakMapTag; - } - - /** - * Checks if `value` is classified as a `WeakSet` object. - * - * @static - * @memberOf _ - * @since 4.3.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a weak set, else `false`. - * @example - * - * _.isWeakSet(new WeakSet); - * // => true - * - * _.isWeakSet(new Set); - * // => false - */ - function isWeakSet(value) { - return isObjectLike(value) && baseGetTag(value) == weakSetTag; - } - - /** - * Checks if `value` is less than `other`. - * - * @static - * @memberOf _ - * @since 3.9.0 - * @category Lang - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @returns {boolean} Returns `true` if `value` is less than `other`, - * else `false`. - * @see _.gt - * @example - * - * _.lt(1, 3); - * // => true - * - * _.lt(3, 3); - * // => false - * - * _.lt(3, 1); - * // => false - */ - var lt = createRelationalOperation(baseLt); - - /** - * Checks if `value` is less than or equal to `other`. - * - * @static - * @memberOf _ - * @since 3.9.0 - * @category Lang - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @returns {boolean} Returns `true` if `value` is less than or equal to - * `other`, else `false`. - * @see _.gte - * @example - * - * _.lte(1, 3); - * // => true - * - * _.lte(3, 3); - * // => true - * - * _.lte(3, 1); - * // => false - */ - var lte = createRelationalOperation(function(value, other) { - return value <= other; - }); - - /** - * Converts `value` to an array. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Lang - * @param {*} value The value to convert. - * @returns {Array} Returns the converted array. - * @example - * - * _.toArray({ 'a': 1, 'b': 2 }); - * // => [1, 2] - * - * _.toArray('abc'); - * // => ['a', 'b', 'c'] - * - * _.toArray(1); - * // => [] - * - * _.toArray(null); - * // => [] - */ - function toArray(value) { - if (!value) { - return []; - } - if (isArrayLike(value)) { - return isString(value) ? stringToArray(value) : copyArray(value); - } - if (symIterator && value[symIterator]) { - return iteratorToArray(value[symIterator]()); - } - var tag = getTag(value), - func = tag == mapTag ? mapToArray : (tag == setTag ? setToArray : values); - - return func(value); - } - - /** - * Converts `value` to a finite number. - * - * @static - * @memberOf _ - * @since 4.12.0 - * @category Lang - * @param {*} value The value to convert. - * @returns {number} Returns the converted number. - * @example - * - * _.toFinite(3.2); - * // => 3.2 - * - * _.toFinite(Number.MIN_VALUE); - * // => 5e-324 - * - * _.toFinite(Infinity); - * // => 1.7976931348623157e+308 - * - * _.toFinite('3.2'); - * // => 3.2 - */ - function toFinite(value) { - if (!value) { - return value === 0 ? value : 0; - } - value = toNumber(value); - if (value === INFINITY || value === -INFINITY) { - var sign = (value < 0 ? -1 : 1); - return sign * MAX_INTEGER; - } - return value === value ? value : 0; - } - - /** - * Converts `value` to an integer. - * - * **Note:** This method is loosely based on - * [`ToInteger`](http://www.ecma-international.org/ecma-262/7.0/#sec-tointeger). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to convert. - * @returns {number} Returns the converted integer. - * @example - * - * _.toInteger(3.2); - * // => 3 - * - * _.toInteger(Number.MIN_VALUE); - * // => 0 - * - * _.toInteger(Infinity); - * // => 1.7976931348623157e+308 - * - * _.toInteger('3.2'); - * // => 3 - */ - function toInteger(value) { - var result = toFinite(value), - remainder = result % 1; - - return result === result ? (remainder ? result - remainder : result) : 0; - } - - /** - * Converts `value` to an integer suitable for use as the length of an - * array-like object. - * - * **Note:** This method is based on - * [`ToLength`](http://ecma-international.org/ecma-262/7.0/#sec-tolength). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to convert. - * @returns {number} Returns the converted integer. - * @example - * - * _.toLength(3.2); - * // => 3 - * - * _.toLength(Number.MIN_VALUE); - * // => 0 - * - * _.toLength(Infinity); - * // => 4294967295 - * - * _.toLength('3.2'); - * // => 3 - */ - function toLength(value) { - return value ? baseClamp(toInteger(value), 0, MAX_ARRAY_LENGTH) : 0; - } - - /** - * Converts `value` to a number. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to process. - * @returns {number} Returns the number. - * @example - * - * _.toNumber(3.2); - * // => 3.2 - * - * _.toNumber(Number.MIN_VALUE); - * // => 5e-324 - * - * _.toNumber(Infinity); - * // => Infinity - * - * _.toNumber('3.2'); - * // => 3.2 - */ - function toNumber(value) { - if (typeof value == 'number') { - return value; - } - if (isSymbol(value)) { - return NAN; - } - if (isObject(value)) { - var other = typeof value.valueOf == 'function' ? value.valueOf() : value; - value = isObject(other) ? (other + '') : other; - } - if (typeof value != 'string') { - return value === 0 ? value : +value; - } - value = value.replace(reTrim, ''); - var isBinary = reIsBinary.test(value); - return (isBinary || reIsOctal.test(value)) - ? freeParseInt(value.slice(2), isBinary ? 2 : 8) - : (reIsBadHex.test(value) ? NAN : +value); - } - - /** - * Converts `value` to a plain object flattening inherited enumerable string - * keyed properties of `value` to own properties of the plain object. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Lang - * @param {*} value The value to convert. - * @returns {Object} Returns the converted plain object. - * @example - * - * function Foo() { - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.assign({ 'a': 1 }, new Foo); - * // => { 'a': 1, 'b': 2 } - * - * _.assign({ 'a': 1 }, _.toPlainObject(new Foo)); - * // => { 'a': 1, 'b': 2, 'c': 3 } - */ - function toPlainObject(value) { - return copyObject(value, keysIn(value)); - } - - /** - * Converts `value` to a safe integer. A safe integer can be compared and - * represented correctly. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to convert. - * @returns {number} Returns the converted integer. - * @example - * - * _.toSafeInteger(3.2); - * // => 3 - * - * _.toSafeInteger(Number.MIN_VALUE); - * // => 0 - * - * _.toSafeInteger(Infinity); - * // => 9007199254740991 - * - * _.toSafeInteger('3.2'); - * // => 3 - */ - function toSafeInteger(value) { - return value - ? baseClamp(toInteger(value), -MAX_SAFE_INTEGER, MAX_SAFE_INTEGER) - : (value === 0 ? value : 0); - } - - /** - * Converts `value` to a string. An empty string is returned for `null` - * and `undefined` values. The sign of `-0` is preserved. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to convert. - * @returns {string} Returns the converted string. - * @example - * - * _.toString(null); - * // => '' - * - * _.toString(-0); - * // => '-0' - * - * _.toString([1, 2, 3]); - * // => '1,2,3' - */ - function toString(value) { - return value == null ? '' : baseToString(value); - } - - /*------------------------------------------------------------------------*/ - - /** - * Assigns own enumerable string keyed properties of source objects to the - * destination object. Source objects are applied from left to right. - * Subsequent sources overwrite property assignments of previous sources. - * - * **Note:** This method mutates `object` and is loosely based on - * [`Object.assign`](https://mdn.io/Object/assign). - * - * @static - * @memberOf _ - * @since 0.10.0 - * @category Object - * @param {Object} object The destination object. - * @param {...Object} [sources] The source objects. - * @returns {Object} Returns `object`. - * @see _.assignIn - * @example - * - * function Foo() { - * this.a = 1; - * } - * - * function Bar() { - * this.c = 3; - * } - * - * Foo.prototype.b = 2; - * Bar.prototype.d = 4; - * - * _.assign({ 'a': 0 }, new Foo, new Bar); - * // => { 'a': 1, 'c': 3 } - */ - var assign = createAssigner(function(object, source) { - if (isPrototype(source) || isArrayLike(source)) { - copyObject(source, keys(source), object); - return; - } - for (var key in source) { - if (hasOwnProperty.call(source, key)) { - assignValue(object, key, source[key]); - } - } - }); - - /** - * This method is like `_.assign` except that it iterates over own and - * inherited source properties. - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @alias extend - * @category Object - * @param {Object} object The destination object. - * @param {...Object} [sources] The source objects. - * @returns {Object} Returns `object`. - * @see _.assign - * @example - * - * function Foo() { - * this.a = 1; - * } - * - * function Bar() { - * this.c = 3; - * } - * - * Foo.prototype.b = 2; - * Bar.prototype.d = 4; - * - * _.assignIn({ 'a': 0 }, new Foo, new Bar); - * // => { 'a': 1, 'b': 2, 'c': 3, 'd': 4 } - */ - var assignIn = createAssigner(function(object, source) { - copyObject(source, keysIn(source), object); - }); - - /** - * This method is like `_.assignIn` except that it accepts `customizer` - * which is invoked to produce the assigned values. If `customizer` returns - * `undefined`, assignment is handled by the method instead. The `customizer` - * is invoked with five arguments: (objValue, srcValue, key, object, source). - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @alias extendWith - * @category Object - * @param {Object} object The destination object. - * @param {...Object} sources The source objects. - * @param {Function} [customizer] The function to customize assigned values. - * @returns {Object} Returns `object`. - * @see _.assignWith - * @example - * - * function customizer(objValue, srcValue) { - * return _.isUndefined(objValue) ? srcValue : objValue; - * } - * - * var defaults = _.partialRight(_.assignInWith, customizer); - * - * defaults({ 'a': 1 }, { 'b': 2 }, { 'a': 3 }); - * // => { 'a': 1, 'b': 2 } - */ - var assignInWith = createAssigner(function(object, source, srcIndex, customizer) { - copyObject(source, keysIn(source), object, customizer); - }); - - /** - * This method is like `_.assign` except that it accepts `customizer` - * which is invoked to produce the assigned values. If `customizer` returns - * `undefined`, assignment is handled by the method instead. The `customizer` - * is invoked with five arguments: (objValue, srcValue, key, object, source). - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Object - * @param {Object} object The destination object. - * @param {...Object} sources The source objects. - * @param {Function} [customizer] The function to customize assigned values. - * @returns {Object} Returns `object`. - * @see _.assignInWith - * @example - * - * function customizer(objValue, srcValue) { - * return _.isUndefined(objValue) ? srcValue : objValue; - * } - * - * var defaults = _.partialRight(_.assignWith, customizer); - * - * defaults({ 'a': 1 }, { 'b': 2 }, { 'a': 3 }); - * // => { 'a': 1, 'b': 2 } - */ - var assignWith = createAssigner(function(object, source, srcIndex, customizer) { - copyObject(source, keys(source), object, customizer); - }); - - /** - * Creates an array of values corresponding to `paths` of `object`. - * - * @static - * @memberOf _ - * @since 1.0.0 - * @category Object - * @param {Object} object The object to iterate over. - * @param {...(string|string[])} [paths] The property paths to pick. - * @returns {Array} Returns the picked values. - * @example - * - * var object = { 'a': [{ 'b': { 'c': 3 } }, 4] }; - * - * _.at(object, ['a[0].b.c', 'a[1]']); - * // => [3, 4] - */ - var at = flatRest(baseAt); - - /** - * Creates an object that inherits from the `prototype` object. If a - * `properties` object is given, its own enumerable string keyed properties - * are assigned to the created object. - * - * @static - * @memberOf _ - * @since 2.3.0 - * @category Object - * @param {Object} prototype The object to inherit from. - * @param {Object} [properties] The properties to assign to the object. - * @returns {Object} Returns the new object. - * @example - * - * function Shape() { - * this.x = 0; - * this.y = 0; - * } - * - * function Circle() { - * Shape.call(this); - * } - * - * Circle.prototype = _.create(Shape.prototype, { - * 'constructor': Circle - * }); - * - * var circle = new Circle; - * circle instanceof Circle; - * // => true - * - * circle instanceof Shape; - * // => true - */ - function create(prototype, properties) { - var result = baseCreate(prototype); - return properties == null ? result : baseAssign(result, properties); - } - - /** - * Assigns own and inherited enumerable string keyed properties of source - * objects to the destination object for all destination properties that - * resolve to `undefined`. Source objects are applied from left to right. - * Once a property is set, additional values of the same property are ignored. - * - * **Note:** This method mutates `object`. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Object - * @param {Object} object The destination object. - * @param {...Object} [sources] The source objects. - * @returns {Object} Returns `object`. - * @see _.defaultsDeep - * @example - * - * _.defaults({ 'a': 1 }, { 'b': 2 }, { 'a': 3 }); - * // => { 'a': 1, 'b': 2 } - */ - var defaults = baseRest(function(object, sources) { - object = Object(object); - - var index = -1; - var length = sources.length; - var guard = length > 2 ? sources[2] : undefined; - - if (guard && isIterateeCall(sources[0], sources[1], guard)) { - length = 1; - } - - while (++index < length) { - var source = sources[index]; - var props = keysIn(source); - var propsIndex = -1; - var propsLength = props.length; - - while (++propsIndex < propsLength) { - var key = props[propsIndex]; - var value = object[key]; - - if (value === undefined || - (eq(value, objectProto[key]) && !hasOwnProperty.call(object, key))) { - object[key] = source[key]; - } - } - } - - return object; - }); - - /** - * This method is like `_.defaults` except that it recursively assigns - * default properties. - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 3.10.0 - * @category Object - * @param {Object} object The destination object. - * @param {...Object} [sources] The source objects. - * @returns {Object} Returns `object`. - * @see _.defaults - * @example - * - * _.defaultsDeep({ 'a': { 'b': 2 } }, { 'a': { 'b': 1, 'c': 3 } }); - * // => { 'a': { 'b': 2, 'c': 3 } } - */ - var defaultsDeep = baseRest(function(args) { - args.push(undefined, customDefaultsMerge); - return apply(mergeWith, undefined, args); - }); - - /** - * This method is like `_.find` except that it returns the key of the first - * element `predicate` returns truthy for instead of the element itself. - * - * @static - * @memberOf _ - * @since 1.1.0 - * @category Object - * @param {Object} object The object to inspect. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @returns {string|undefined} Returns the key of the matched element, - * else `undefined`. - * @example - * - * var users = { - * 'barney': { 'age': 36, 'active': true }, - * 'fred': { 'age': 40, 'active': false }, - * 'pebbles': { 'age': 1, 'active': true } - * }; - * - * _.findKey(users, function(o) { return o.age < 40; }); - * // => 'barney' (iteration order is not guaranteed) - * - * // The `_.matches` iteratee shorthand. - * _.findKey(users, { 'age': 1, 'active': true }); - * // => 'pebbles' - * - * // The `_.matchesProperty` iteratee shorthand. - * _.findKey(users, ['active', false]); - * // => 'fred' - * - * // The `_.property` iteratee shorthand. - * _.findKey(users, 'active'); - * // => 'barney' - */ - function findKey(object, predicate) { - return baseFindKey(object, getIteratee(predicate, 3), baseForOwn); - } - - /** - * This method is like `_.findKey` except that it iterates over elements of - * a collection in the opposite order. - * - * @static - * @memberOf _ - * @since 2.0.0 - * @category Object - * @param {Object} object The object to inspect. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @returns {string|undefined} Returns the key of the matched element, - * else `undefined`. - * @example - * - * var users = { - * 'barney': { 'age': 36, 'active': true }, - * 'fred': { 'age': 40, 'active': false }, - * 'pebbles': { 'age': 1, 'active': true } - * }; - * - * _.findLastKey(users, function(o) { return o.age < 40; }); - * // => returns 'pebbles' assuming `_.findKey` returns 'barney' - * - * // The `_.matches` iteratee shorthand. - * _.findLastKey(users, { 'age': 36, 'active': true }); - * // => 'barney' - * - * // The `_.matchesProperty` iteratee shorthand. - * _.findLastKey(users, ['active', false]); - * // => 'fred' - * - * // The `_.property` iteratee shorthand. - * _.findLastKey(users, 'active'); - * // => 'pebbles' - */ - function findLastKey(object, predicate) { - return baseFindKey(object, getIteratee(predicate, 3), baseForOwnRight); - } - - /** - * Iterates over own and inherited enumerable string keyed properties of an - * object and invokes `iteratee` for each property. The iteratee is invoked - * with three arguments: (value, key, object). Iteratee functions may exit - * iteration early by explicitly returning `false`. - * - * @static - * @memberOf _ - * @since 0.3.0 - * @category Object - * @param {Object} object The object to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Object} Returns `object`. - * @see _.forInRight - * @example - * - * function Foo() { - * this.a = 1; - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.forIn(new Foo, function(value, key) { - * console.log(key); - * }); - * // => Logs 'a', 'b', then 'c' (iteration order is not guaranteed). - */ - function forIn(object, iteratee) { - return object == null - ? object - : baseFor(object, getIteratee(iteratee, 3), keysIn); - } - - /** - * This method is like `_.forIn` except that it iterates over properties of - * `object` in the opposite order. - * - * @static - * @memberOf _ - * @since 2.0.0 - * @category Object - * @param {Object} object The object to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Object} Returns `object`. - * @see _.forIn - * @example - * - * function Foo() { - * this.a = 1; - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.forInRight(new Foo, function(value, key) { - * console.log(key); - * }); - * // => Logs 'c', 'b', then 'a' assuming `_.forIn` logs 'a', 'b', then 'c'. - */ - function forInRight(object, iteratee) { - return object == null - ? object - : baseForRight(object, getIteratee(iteratee, 3), keysIn); - } - - /** - * Iterates over own enumerable string keyed properties of an object and - * invokes `iteratee` for each property. The iteratee is invoked with three - * arguments: (value, key, object). Iteratee functions may exit iteration - * early by explicitly returning `false`. - * - * @static - * @memberOf _ - * @since 0.3.0 - * @category Object - * @param {Object} object The object to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Object} Returns `object`. - * @see _.forOwnRight - * @example - * - * function Foo() { - * this.a = 1; - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.forOwn(new Foo, function(value, key) { - * console.log(key); - * }); - * // => Logs 'a' then 'b' (iteration order is not guaranteed). - */ - function forOwn(object, iteratee) { - return object && baseForOwn(object, getIteratee(iteratee, 3)); - } - - /** - * This method is like `_.forOwn` except that it iterates over properties of - * `object` in the opposite order. - * - * @static - * @memberOf _ - * @since 2.0.0 - * @category Object - * @param {Object} object The object to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Object} Returns `object`. - * @see _.forOwn - * @example - * - * function Foo() { - * this.a = 1; - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.forOwnRight(new Foo, function(value, key) { - * console.log(key); - * }); - * // => Logs 'b' then 'a' assuming `_.forOwn` logs 'a' then 'b'. - */ - function forOwnRight(object, iteratee) { - return object && baseForOwnRight(object, getIteratee(iteratee, 3)); - } - - /** - * Creates an array of function property names from own enumerable properties - * of `object`. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Object - * @param {Object} object The object to inspect. - * @returns {Array} Returns the function names. - * @see _.functionsIn - * @example - * - * function Foo() { - * this.a = _.constant('a'); - * this.b = _.constant('b'); - * } - * - * Foo.prototype.c = _.constant('c'); - * - * _.functions(new Foo); - * // => ['a', 'b'] - */ - function functions(object) { - return object == null ? [] : baseFunctions(object, keys(object)); - } - - /** - * Creates an array of function property names from own and inherited - * enumerable properties of `object`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Object - * @param {Object} object The object to inspect. - * @returns {Array} Returns the function names. - * @see _.functions - * @example - * - * function Foo() { - * this.a = _.constant('a'); - * this.b = _.constant('b'); - * } - * - * Foo.prototype.c = _.constant('c'); - * - * _.functionsIn(new Foo); - * // => ['a', 'b', 'c'] - */ - function functionsIn(object) { - return object == null ? [] : baseFunctions(object, keysIn(object)); - } - - /** - * Gets the value at `path` of `object`. If the resolved value is - * `undefined`, the `defaultValue` is returned in its place. - * - * @static - * @memberOf _ - * @since 3.7.0 - * @category Object - * @param {Object} object The object to query. - * @param {Array|string} path The path of the property to get. - * @param {*} [defaultValue] The value returned for `undefined` resolved values. - * @returns {*} Returns the resolved value. - * @example - * - * var object = { 'a': [{ 'b': { 'c': 3 } }] }; - * - * _.get(object, 'a[0].b.c'); - * // => 3 - * - * _.get(object, ['a', '0', 'b', 'c']); - * // => 3 - * - * _.get(object, 'a.b.c', 'default'); - * // => 'default' - */ - function get(object, path, defaultValue) { - var result = object == null ? undefined : baseGet(object, path); - return result === undefined ? defaultValue : result; - } - - /** - * Checks if `path` is a direct property of `object`. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Object - * @param {Object} object The object to query. - * @param {Array|string} path The path to check. - * @returns {boolean} Returns `true` if `path` exists, else `false`. - * @example - * - * var object = { 'a': { 'b': 2 } }; - * var other = _.create({ 'a': _.create({ 'b': 2 }) }); - * - * _.has(object, 'a'); - * // => true - * - * _.has(object, 'a.b'); - * // => true - * - * _.has(object, ['a', 'b']); - * // => true - * - * _.has(other, 'a'); - * // => false - */ - function has(object, path) { - return object != null && hasPath(object, path, baseHas); - } - - /** - * Checks if `path` is a direct or inherited property of `object`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Object - * @param {Object} object The object to query. - * @param {Array|string} path The path to check. - * @returns {boolean} Returns `true` if `path` exists, else `false`. - * @example - * - * var object = _.create({ 'a': _.create({ 'b': 2 }) }); - * - * _.hasIn(object, 'a'); - * // => true - * - * _.hasIn(object, 'a.b'); - * // => true - * - * _.hasIn(object, ['a', 'b']); - * // => true - * - * _.hasIn(object, 'b'); - * // => false - */ - function hasIn(object, path) { - return object != null && hasPath(object, path, baseHasIn); - } - - /** - * Creates an object composed of the inverted keys and values of `object`. - * If `object` contains duplicate values, subsequent values overwrite - * property assignments of previous values. - * - * @static - * @memberOf _ - * @since 0.7.0 - * @category Object - * @param {Object} object The object to invert. - * @returns {Object} Returns the new inverted object. - * @example - * - * var object = { 'a': 1, 'b': 2, 'c': 1 }; - * - * _.invert(object); - * // => { '1': 'c', '2': 'b' } - */ - var invert = createInverter(function(result, value, key) { - if (value != null && - typeof value.toString != 'function') { - value = nativeObjectToString.call(value); - } - - result[value] = key; - }, constant(identity)); - - /** - * This method is like `_.invert` except that the inverted object is generated - * from the results of running each element of `object` thru `iteratee`. The - * corresponding inverted value of each inverted key is an array of keys - * responsible for generating the inverted value. The iteratee is invoked - * with one argument: (value). - * - * @static - * @memberOf _ - * @since 4.1.0 - * @category Object - * @param {Object} object The object to invert. - * @param {Function} [iteratee=_.identity] The iteratee invoked per element. - * @returns {Object} Returns the new inverted object. - * @example - * - * var object = { 'a': 1, 'b': 2, 'c': 1 }; - * - * _.invertBy(object); - * // => { '1': ['a', 'c'], '2': ['b'] } - * - * _.invertBy(object, function(value) { - * return 'group' + value; - * }); - * // => { 'group1': ['a', 'c'], 'group2': ['b'] } - */ - var invertBy = createInverter(function(result, value, key) { - if (value != null && - typeof value.toString != 'function') { - value = nativeObjectToString.call(value); - } - - if (hasOwnProperty.call(result, value)) { - result[value].push(key); - } else { - result[value] = [key]; - } - }, getIteratee); - - /** - * Invokes the method at `path` of `object`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Object - * @param {Object} object The object to query. - * @param {Array|string} path The path of the method to invoke. - * @param {...*} [args] The arguments to invoke the method with. - * @returns {*} Returns the result of the invoked method. - * @example - * - * var object = { 'a': [{ 'b': { 'c': [1, 2, 3, 4] } }] }; - * - * _.invoke(object, 'a[0].b.c.slice', 1, 3); - * // => [2, 3] - */ - var invoke = baseRest(baseInvoke); - - /** - * Creates an array of the own enumerable property names of `object`. - * - * **Note:** Non-object values are coerced to objects. See the - * [ES spec](http://ecma-international.org/ecma-262/7.0/#sec-object.keys) - * for more details. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Object - * @param {Object} object The object to query. - * @returns {Array} Returns the array of property names. - * @example - * - * function Foo() { - * this.a = 1; - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.keys(new Foo); - * // => ['a', 'b'] (iteration order is not guaranteed) - * - * _.keys('hi'); - * // => ['0', '1'] - */ - function keys(object) { - return isArrayLike(object) ? arrayLikeKeys(object) : baseKeys(object); - } - - /** - * Creates an array of the own and inherited enumerable property names of `object`. - * - * **Note:** Non-object values are coerced to objects. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Object - * @param {Object} object The object to query. - * @returns {Array} Returns the array of property names. - * @example - * - * function Foo() { - * this.a = 1; - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.keysIn(new Foo); - * // => ['a', 'b', 'c'] (iteration order is not guaranteed) - */ - function keysIn(object) { - return isArrayLike(object) ? arrayLikeKeys(object, true) : baseKeysIn(object); - } - - /** - * The opposite of `_.mapValues`; this method creates an object with the - * same values as `object` and keys generated by running each own enumerable - * string keyed property of `object` thru `iteratee`. The iteratee is invoked - * with three arguments: (value, key, object). - * - * @static - * @memberOf _ - * @since 3.8.0 - * @category Object - * @param {Object} object The object to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Object} Returns the new mapped object. - * @see _.mapValues - * @example - * - * _.mapKeys({ 'a': 1, 'b': 2 }, function(value, key) { - * return key + value; - * }); - * // => { 'a1': 1, 'b2': 2 } - */ - function mapKeys(object, iteratee) { - var result = {}; - iteratee = getIteratee(iteratee, 3); - - baseForOwn(object, function(value, key, object) { - baseAssignValue(result, iteratee(value, key, object), value); - }); - return result; - } - - /** - * Creates an object with the same keys as `object` and values generated - * by running each own enumerable string keyed property of `object` thru - * `iteratee`. The iteratee is invoked with three arguments: - * (value, key, object). - * - * @static - * @memberOf _ - * @since 2.4.0 - * @category Object - * @param {Object} object The object to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Object} Returns the new mapped object. - * @see _.mapKeys - * @example - * - * var users = { - * 'fred': { 'user': 'fred', 'age': 40 }, - * 'pebbles': { 'user': 'pebbles', 'age': 1 } - * }; - * - * _.mapValues(users, function(o) { return o.age; }); - * // => { 'fred': 40, 'pebbles': 1 } (iteration order is not guaranteed) - * - * // The `_.property` iteratee shorthand. - * _.mapValues(users, 'age'); - * // => { 'fred': 40, 'pebbles': 1 } (iteration order is not guaranteed) - */ - function mapValues(object, iteratee) { - var result = {}; - iteratee = getIteratee(iteratee, 3); - - baseForOwn(object, function(value, key, object) { - baseAssignValue(result, key, iteratee(value, key, object)); - }); - return result; - } - - /** - * This method is like `_.assign` except that it recursively merges own and - * inherited enumerable string keyed properties of source objects into the - * destination object. Source properties that resolve to `undefined` are - * skipped if a destination value exists. Array and plain object properties - * are merged recursively. Other objects and value types are overridden by - * assignment. Source objects are applied from left to right. Subsequent - * sources overwrite property assignments of previous sources. - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 0.5.0 - * @category Object - * @param {Object} object The destination object. - * @param {...Object} [sources] The source objects. - * @returns {Object} Returns `object`. - * @example - * - * var object = { - * 'a': [{ 'b': 2 }, { 'd': 4 }] - * }; - * - * var other = { - * 'a': [{ 'c': 3 }, { 'e': 5 }] - * }; - * - * _.merge(object, other); - * // => { 'a': [{ 'b': 2, 'c': 3 }, { 'd': 4, 'e': 5 }] } - */ - var merge = createAssigner(function(object, source, srcIndex) { - baseMerge(object, source, srcIndex); - }); - - /** - * This method is like `_.merge` except that it accepts `customizer` which - * is invoked to produce the merged values of the destination and source - * properties. If `customizer` returns `undefined`, merging is handled by the - * method instead. The `customizer` is invoked with six arguments: - * (objValue, srcValue, key, object, source, stack). - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Object - * @param {Object} object The destination object. - * @param {...Object} sources The source objects. - * @param {Function} customizer The function to customize assigned values. - * @returns {Object} Returns `object`. - * @example - * - * function customizer(objValue, srcValue) { - * if (_.isArray(objValue)) { - * return objValue.concat(srcValue); - * } - * } - * - * var object = { 'a': [1], 'b': [2] }; - * var other = { 'a': [3], 'b': [4] }; - * - * _.mergeWith(object, other, customizer); - * // => { 'a': [1, 3], 'b': [2, 4] } - */ - var mergeWith = createAssigner(function(object, source, srcIndex, customizer) { - baseMerge(object, source, srcIndex, customizer); - }); - - /** - * The opposite of `_.pick`; this method creates an object composed of the - * own and inherited enumerable property paths of `object` that are not omitted. - * - * **Note:** This method is considerably slower than `_.pick`. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Object - * @param {Object} object The source object. - * @param {...(string|string[])} [paths] The property paths to omit. - * @returns {Object} Returns the new object. - * @example - * - * var object = { 'a': 1, 'b': '2', 'c': 3 }; - * - * _.omit(object, ['a', 'c']); - * // => { 'b': '2' } - */ - var omit = flatRest(function(object, paths) { - var result = {}; - if (object == null) { - return result; - } - var isDeep = false; - paths = arrayMap(paths, function(path) { - path = castPath(path, object); - isDeep || (isDeep = path.length > 1); - return path; - }); - copyObject(object, getAllKeysIn(object), result); - if (isDeep) { - result = baseClone(result, CLONE_DEEP_FLAG | CLONE_FLAT_FLAG | CLONE_SYMBOLS_FLAG, customOmitClone); - } - var length = paths.length; - while (length--) { - baseUnset(result, paths[length]); - } - return result; - }); - - /** - * The opposite of `_.pickBy`; this method creates an object composed of - * the own and inherited enumerable string keyed properties of `object` that - * `predicate` doesn't return truthy for. The predicate is invoked with two - * arguments: (value, key). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Object - * @param {Object} object The source object. - * @param {Function} [predicate=_.identity] The function invoked per property. - * @returns {Object} Returns the new object. - * @example - * - * var object = { 'a': 1, 'b': '2', 'c': 3 }; - * - * _.omitBy(object, _.isNumber); - * // => { 'b': '2' } - */ - function omitBy(object, predicate) { - return pickBy(object, negate(getIteratee(predicate))); - } - - /** - * Creates an object composed of the picked `object` properties. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Object - * @param {Object} object The source object. - * @param {...(string|string[])} [paths] The property paths to pick. - * @returns {Object} Returns the new object. - * @example - * - * var object = { 'a': 1, 'b': '2', 'c': 3 }; - * - * _.pick(object, ['a', 'c']); - * // => { 'a': 1, 'c': 3 } - */ - var pick = flatRest(function(object, paths) { - return object == null ? {} : basePick(object, paths); - }); - - /** - * Creates an object composed of the `object` properties `predicate` returns - * truthy for. The predicate is invoked with two arguments: (value, key). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Object - * @param {Object} object The source object. - * @param {Function} [predicate=_.identity] The function invoked per property. - * @returns {Object} Returns the new object. - * @example - * - * var object = { 'a': 1, 'b': '2', 'c': 3 }; - * - * _.pickBy(object, _.isNumber); - * // => { 'a': 1, 'c': 3 } - */ - function pickBy(object, predicate) { - if (object == null) { - return {}; - } - var props = arrayMap(getAllKeysIn(object), function(prop) { - return [prop]; - }); - predicate = getIteratee(predicate); - return basePickBy(object, props, function(value, path) { - return predicate(value, path[0]); - }); - } - - /** - * This method is like `_.get` except that if the resolved value is a - * function it's invoked with the `this` binding of its parent object and - * its result is returned. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Object - * @param {Object} object The object to query. - * @param {Array|string} path The path of the property to resolve. - * @param {*} [defaultValue] The value returned for `undefined` resolved values. - * @returns {*} Returns the resolved value. - * @example - * - * var object = { 'a': [{ 'b': { 'c1': 3, 'c2': _.constant(4) } }] }; - * - * _.result(object, 'a[0].b.c1'); - * // => 3 - * - * _.result(object, 'a[0].b.c2'); - * // => 4 - * - * _.result(object, 'a[0].b.c3', 'default'); - * // => 'default' - * - * _.result(object, 'a[0].b.c3', _.constant('default')); - * // => 'default' - */ - function result(object, path, defaultValue) { - path = castPath(path, object); - - var index = -1, - length = path.length; - - // Ensure the loop is entered when path is empty. - if (!length) { - length = 1; - object = undefined; - } - while (++index < length) { - var value = object == null ? undefined : object[toKey(path[index])]; - if (value === undefined) { - index = length; - value = defaultValue; - } - object = isFunction(value) ? value.call(object) : value; - } - return object; - } - - /** - * Sets the value at `path` of `object`. If a portion of `path` doesn't exist, - * it's created. Arrays are created for missing index properties while objects - * are created for all other missing properties. Use `_.setWith` to customize - * `path` creation. - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 3.7.0 - * @category Object - * @param {Object} object The object to modify. - * @param {Array|string} path The path of the property to set. - * @param {*} value The value to set. - * @returns {Object} Returns `object`. - * @example - * - * var object = { 'a': [{ 'b': { 'c': 3 } }] }; - * - * _.set(object, 'a[0].b.c', 4); - * console.log(object.a[0].b.c); - * // => 4 - * - * _.set(object, ['x', '0', 'y', 'z'], 5); - * console.log(object.x[0].y.z); - * // => 5 - */ - function set(object, path, value) { - return object == null ? object : baseSet(object, path, value); - } - - /** - * This method is like `_.set` except that it accepts `customizer` which is - * invoked to produce the objects of `path`. If `customizer` returns `undefined` - * path creation is handled by the method instead. The `customizer` is invoked - * with three arguments: (nsValue, key, nsObject). - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Object - * @param {Object} object The object to modify. - * @param {Array|string} path The path of the property to set. - * @param {*} value The value to set. - * @param {Function} [customizer] The function to customize assigned values. - * @returns {Object} Returns `object`. - * @example - * - * var object = {}; - * - * _.setWith(object, '[0][1]', 'a', Object); - * // => { '0': { '1': 'a' } } - */ - function setWith(object, path, value, customizer) { - customizer = typeof customizer == 'function' ? customizer : undefined; - return object == null ? object : baseSet(object, path, value, customizer); - } - - /** - * Creates an array of own enumerable string keyed-value pairs for `object` - * which can be consumed by `_.fromPairs`. If `object` is a map or set, its - * entries are returned. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @alias entries - * @category Object - * @param {Object} object The object to query. - * @returns {Array} Returns the key-value pairs. - * @example - * - * function Foo() { - * this.a = 1; - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.toPairs(new Foo); - * // => [['a', 1], ['b', 2]] (iteration order is not guaranteed) - */ - var toPairs = createToPairs(keys); - - /** - * Creates an array of own and inherited enumerable string keyed-value pairs - * for `object` which can be consumed by `_.fromPairs`. If `object` is a map - * or set, its entries are returned. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @alias entriesIn - * @category Object - * @param {Object} object The object to query. - * @returns {Array} Returns the key-value pairs. - * @example - * - * function Foo() { - * this.a = 1; - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.toPairsIn(new Foo); - * // => [['a', 1], ['b', 2], ['c', 3]] (iteration order is not guaranteed) - */ - var toPairsIn = createToPairs(keysIn); - - /** - * An alternative to `_.reduce`; this method transforms `object` to a new - * `accumulator` object which is the result of running each of its own - * enumerable string keyed properties thru `iteratee`, with each invocation - * potentially mutating the `accumulator` object. If `accumulator` is not - * provided, a new object with the same `[[Prototype]]` will be used. The - * iteratee is invoked with four arguments: (accumulator, value, key, object). - * Iteratee functions may exit iteration early by explicitly returning `false`. - * - * @static - * @memberOf _ - * @since 1.3.0 - * @category Object - * @param {Object} object The object to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @param {*} [accumulator] The custom accumulator value. - * @returns {*} Returns the accumulated value. - * @example - * - * _.transform([2, 3, 4], function(result, n) { - * result.push(n *= n); - * return n % 2 == 0; - * }, []); - * // => [4, 9] - * - * _.transform({ 'a': 1, 'b': 2, 'c': 1 }, function(result, value, key) { - * (result[value] || (result[value] = [])).push(key); - * }, {}); - * // => { '1': ['a', 'c'], '2': ['b'] } - */ - function transform(object, iteratee, accumulator) { - var isArr = isArray(object), - isArrLike = isArr || isBuffer(object) || isTypedArray(object); - - iteratee = getIteratee(iteratee, 4); - if (accumulator == null) { - var Ctor = object && object.constructor; - if (isArrLike) { - accumulator = isArr ? new Ctor : []; - } - else if (isObject(object)) { - accumulator = isFunction(Ctor) ? baseCreate(getPrototype(object)) : {}; - } - else { - accumulator = {}; - } - } - (isArrLike ? arrayEach : baseForOwn)(object, function(value, index, object) { - return iteratee(accumulator, value, index, object); - }); - return accumulator; - } - - /** - * Removes the property at `path` of `object`. - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Object - * @param {Object} object The object to modify. - * @param {Array|string} path The path of the property to unset. - * @returns {boolean} Returns `true` if the property is deleted, else `false`. - * @example - * - * var object = { 'a': [{ 'b': { 'c': 7 } }] }; - * _.unset(object, 'a[0].b.c'); - * // => true - * - * console.log(object); - * // => { 'a': [{ 'b': {} }] }; - * - * _.unset(object, ['a', '0', 'b', 'c']); - * // => true - * - * console.log(object); - * // => { 'a': [{ 'b': {} }] }; - */ - function unset(object, path) { - return object == null ? true : baseUnset(object, path); - } - - /** - * This method is like `_.set` except that accepts `updater` to produce the - * value to set. Use `_.updateWith` to customize `path` creation. The `updater` - * is invoked with one argument: (value). - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 4.6.0 - * @category Object - * @param {Object} object The object to modify. - * @param {Array|string} path The path of the property to set. - * @param {Function} updater The function to produce the updated value. - * @returns {Object} Returns `object`. - * @example - * - * var object = { 'a': [{ 'b': { 'c': 3 } }] }; - * - * _.update(object, 'a[0].b.c', function(n) { return n * n; }); - * console.log(object.a[0].b.c); - * // => 9 - * - * _.update(object, 'x[0].y.z', function(n) { return n ? n + 1 : 0; }); - * console.log(object.x[0].y.z); - * // => 0 - */ - function update(object, path, updater) { - return object == null ? object : baseUpdate(object, path, castFunction(updater)); - } - - /** - * This method is like `_.update` except that it accepts `customizer` which is - * invoked to produce the objects of `path`. If `customizer` returns `undefined` - * path creation is handled by the method instead. The `customizer` is invoked - * with three arguments: (nsValue, key, nsObject). - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 4.6.0 - * @category Object - * @param {Object} object The object to modify. - * @param {Array|string} path The path of the property to set. - * @param {Function} updater The function to produce the updated value. - * @param {Function} [customizer] The function to customize assigned values. - * @returns {Object} Returns `object`. - * @example - * - * var object = {}; - * - * _.updateWith(object, '[0][1]', _.constant('a'), Object); - * // => { '0': { '1': 'a' } } - */ - function updateWith(object, path, updater, customizer) { - customizer = typeof customizer == 'function' ? customizer : undefined; - return object == null ? object : baseUpdate(object, path, castFunction(updater), customizer); - } - - /** - * Creates an array of the own enumerable string keyed property values of `object`. - * - * **Note:** Non-object values are coerced to objects. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Object - * @param {Object} object The object to query. - * @returns {Array} Returns the array of property values. - * @example - * - * function Foo() { - * this.a = 1; - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.values(new Foo); - * // => [1, 2] (iteration order is not guaranteed) - * - * _.values('hi'); - * // => ['h', 'i'] - */ - function values(object) { - return object == null ? [] : baseValues(object, keys(object)); - } - - /** - * Creates an array of the own and inherited enumerable string keyed property - * values of `object`. - * - * **Note:** Non-object values are coerced to objects. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Object - * @param {Object} object The object to query. - * @returns {Array} Returns the array of property values. - * @example - * - * function Foo() { - * this.a = 1; - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.valuesIn(new Foo); - * // => [1, 2, 3] (iteration order is not guaranteed) - */ - function valuesIn(object) { - return object == null ? [] : baseValues(object, keysIn(object)); - } - - /*------------------------------------------------------------------------*/ - - /** - * Clamps `number` within the inclusive `lower` and `upper` bounds. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Number - * @param {number} number The number to clamp. - * @param {number} [lower] The lower bound. - * @param {number} upper The upper bound. - * @returns {number} Returns the clamped number. - * @example - * - * _.clamp(-10, -5, 5); - * // => -5 - * - * _.clamp(10, -5, 5); - * // => 5 - */ - function clamp(number, lower, upper) { - if (upper === undefined) { - upper = lower; - lower = undefined; - } - if (upper !== undefined) { - upper = toNumber(upper); - upper = upper === upper ? upper : 0; - } - if (lower !== undefined) { - lower = toNumber(lower); - lower = lower === lower ? lower : 0; - } - return baseClamp(toNumber(number), lower, upper); - } - - /** - * Checks if `n` is between `start` and up to, but not including, `end`. If - * `end` is not specified, it's set to `start` with `start` then set to `0`. - * If `start` is greater than `end` the params are swapped to support - * negative ranges. - * - * @static - * @memberOf _ - * @since 3.3.0 - * @category Number - * @param {number} number The number to check. - * @param {number} [start=0] The start of the range. - * @param {number} end The end of the range. - * @returns {boolean} Returns `true` if `number` is in the range, else `false`. - * @see _.range, _.rangeRight - * @example - * - * _.inRange(3, 2, 4); - * // => true - * - * _.inRange(4, 8); - * // => true - * - * _.inRange(4, 2); - * // => false - * - * _.inRange(2, 2); - * // => false - * - * _.inRange(1.2, 2); - * // => true - * - * _.inRange(5.2, 4); - * // => false - * - * _.inRange(-3, -2, -6); - * // => true - */ - function inRange(number, start, end) { - start = toFinite(start); - if (end === undefined) { - end = start; - start = 0; - } else { - end = toFinite(end); - } - number = toNumber(number); - return baseInRange(number, start, end); - } - - /** - * Produces a random number between the inclusive `lower` and `upper` bounds. - * If only one argument is provided a number between `0` and the given number - * is returned. If `floating` is `true`, or either `lower` or `upper` are - * floats, a floating-point number is returned instead of an integer. - * - * **Note:** JavaScript follows the IEEE-754 standard for resolving - * floating-point values which can produce unexpected results. - * - * @static - * @memberOf _ - * @since 0.7.0 - * @category Number - * @param {number} [lower=0] The lower bound. - * @param {number} [upper=1] The upper bound. - * @param {boolean} [floating] Specify returning a floating-point number. - * @returns {number} Returns the random number. - * @example - * - * _.random(0, 5); - * // => an integer between 0 and 5 - * - * _.random(5); - * // => also an integer between 0 and 5 - * - * _.random(5, true); - * // => a floating-point number between 0 and 5 - * - * _.random(1.2, 5.2); - * // => a floating-point number between 1.2 and 5.2 - */ - function random(lower, upper, floating) { - if (floating && typeof floating != 'boolean' && isIterateeCall(lower, upper, floating)) { - upper = floating = undefined; - } - if (floating === undefined) { - if (typeof upper == 'boolean') { - floating = upper; - upper = undefined; - } - else if (typeof lower == 'boolean') { - floating = lower; - lower = undefined; - } - } - if (lower === undefined && upper === undefined) { - lower = 0; - upper = 1; - } - else { - lower = toFinite(lower); - if (upper === undefined) { - upper = lower; - lower = 0; - } else { - upper = toFinite(upper); - } - } - if (lower > upper) { - var temp = lower; - lower = upper; - upper = temp; - } - if (floating || lower % 1 || upper % 1) { - var rand = nativeRandom(); - return nativeMin(lower + (rand * (upper - lower + freeParseFloat('1e-' + ((rand + '').length - 1)))), upper); - } - return baseRandom(lower, upper); - } - - /*------------------------------------------------------------------------*/ - - /** - * Converts `string` to [camel case](https://en.wikipedia.org/wiki/CamelCase). - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to convert. - * @returns {string} Returns the camel cased string. - * @example - * - * _.camelCase('Foo Bar'); - * // => 'fooBar' - * - * _.camelCase('--foo-bar--'); - * // => 'fooBar' - * - * _.camelCase('__FOO_BAR__'); - * // => 'fooBar' - */ - var camelCase = createCompounder(function(result, word, index) { - word = word.toLowerCase(); - return result + (index ? capitalize(word) : word); - }); - - /** - * Converts the first character of `string` to upper case and the remaining - * to lower case. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to capitalize. - * @returns {string} Returns the capitalized string. - * @example - * - * _.capitalize('FRED'); - * // => 'Fred' - */ - function capitalize(string) { - return upperFirst(toString(string).toLowerCase()); - } - - /** - * Deburrs `string` by converting - * [Latin-1 Supplement](https://en.wikipedia.org/wiki/Latin-1_Supplement_(Unicode_block)#Character_table) - * and [Latin Extended-A](https://en.wikipedia.org/wiki/Latin_Extended-A) - * letters to basic Latin letters and removing - * [combining diacritical marks](https://en.wikipedia.org/wiki/Combining_Diacritical_Marks). - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to deburr. - * @returns {string} Returns the deburred string. - * @example - * - * _.deburr('déjà vu'); - * // => 'deja vu' - */ - function deburr(string) { - string = toString(string); - return string && string.replace(reLatin, deburrLetter).replace(reComboMark, ''); - } - - /** - * Checks if `string` ends with the given target string. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to inspect. - * @param {string} [target] The string to search for. - * @param {number} [position=string.length] The position to search up to. - * @returns {boolean} Returns `true` if `string` ends with `target`, - * else `false`. - * @example - * - * _.endsWith('abc', 'c'); - * // => true - * - * _.endsWith('abc', 'b'); - * // => false - * - * _.endsWith('abc', 'b', 2); - * // => true - */ - function endsWith(string, target, position) { - string = toString(string); - target = baseToString(target); - - var length = string.length; - position = position === undefined - ? length - : baseClamp(toInteger(position), 0, length); - - var end = position; - position -= target.length; - return position >= 0 && string.slice(position, end) == target; - } - - /** - * Converts the characters "&", "<", ">", '"', and "'" in `string` to their - * corresponding HTML entities. - * - * **Note:** No other characters are escaped. To escape additional - * characters use a third-party library like [_he_](https://mths.be/he). - * - * Though the ">" character is escaped for symmetry, characters like - * ">" and "/" don't need escaping in HTML and have no special meaning - * unless they're part of a tag or unquoted attribute value. See - * [Mathias Bynens's article](https://mathiasbynens.be/notes/ambiguous-ampersands) - * (under "semi-related fun fact") for more details. - * - * When working with HTML you should always - * [quote attribute values](http://wonko.com/post/html-escaping) to reduce - * XSS vectors. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category String - * @param {string} [string=''] The string to escape. - * @returns {string} Returns the escaped string. - * @example - * - * _.escape('fred, barney, & pebbles'); - * // => 'fred, barney, & pebbles' - */ - function escape(string) { - string = toString(string); - return (string && reHasUnescapedHtml.test(string)) - ? string.replace(reUnescapedHtml, escapeHtmlChar) - : string; - } - - /** - * Escapes the `RegExp` special characters "^", "$", "\", ".", "*", "+", - * "?", "(", ")", "[", "]", "{", "}", and "|" in `string`. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to escape. - * @returns {string} Returns the escaped string. - * @example - * - * _.escapeRegExp('[lodash](https://lodash.com/)'); - * // => '\[lodash\]\(https://lodash\.com/\)' - */ - function escapeRegExp(string) { - string = toString(string); - return (string && reHasRegExpChar.test(string)) - ? string.replace(reRegExpChar, '\\$&') - : string; - } - - /** - * Converts `string` to - * [kebab case](https://en.wikipedia.org/wiki/Letter_case#Special_case_styles). - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to convert. - * @returns {string} Returns the kebab cased string. - * @example - * - * _.kebabCase('Foo Bar'); - * // => 'foo-bar' - * - * _.kebabCase('fooBar'); - * // => 'foo-bar' - * - * _.kebabCase('__FOO_BAR__'); - * // => 'foo-bar' - */ - var kebabCase = createCompounder(function(result, word, index) { - return result + (index ? '-' : '') + word.toLowerCase(); - }); - - /** - * Converts `string`, as space separated words, to lower case. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category String - * @param {string} [string=''] The string to convert. - * @returns {string} Returns the lower cased string. - * @example - * - * _.lowerCase('--Foo-Bar--'); - * // => 'foo bar' - * - * _.lowerCase('fooBar'); - * // => 'foo bar' - * - * _.lowerCase('__FOO_BAR__'); - * // => 'foo bar' - */ - var lowerCase = createCompounder(function(result, word, index) { - return result + (index ? ' ' : '') + word.toLowerCase(); - }); - - /** - * Converts the first character of `string` to lower case. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category String - * @param {string} [string=''] The string to convert. - * @returns {string} Returns the converted string. - * @example - * - * _.lowerFirst('Fred'); - * // => 'fred' - * - * _.lowerFirst('FRED'); - * // => 'fRED' - */ - var lowerFirst = createCaseFirst('toLowerCase'); - - /** - * Pads `string` on the left and right sides if it's shorter than `length`. - * Padding characters are truncated if they can't be evenly divided by `length`. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to pad. - * @param {number} [length=0] The padding length. - * @param {string} [chars=' '] The string used as padding. - * @returns {string} Returns the padded string. - * @example - * - * _.pad('abc', 8); - * // => ' abc ' - * - * _.pad('abc', 8, '_-'); - * // => '_-abc_-_' - * - * _.pad('abc', 3); - * // => 'abc' - */ - function pad(string, length, chars) { - string = toString(string); - length = toInteger(length); - - var strLength = length ? stringSize(string) : 0; - if (!length || strLength >= length) { - return string; - } - var mid = (length - strLength) / 2; - return ( - createPadding(nativeFloor(mid), chars) + - string + - createPadding(nativeCeil(mid), chars) - ); - } - - /** - * Pads `string` on the right side if it's shorter than `length`. Padding - * characters are truncated if they exceed `length`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category String - * @param {string} [string=''] The string to pad. - * @param {number} [length=0] The padding length. - * @param {string} [chars=' '] The string used as padding. - * @returns {string} Returns the padded string. - * @example - * - * _.padEnd('abc', 6); - * // => 'abc ' - * - * _.padEnd('abc', 6, '_-'); - * // => 'abc_-_' - * - * _.padEnd('abc', 3); - * // => 'abc' - */ - function padEnd(string, length, chars) { - string = toString(string); - length = toInteger(length); - - var strLength = length ? stringSize(string) : 0; - return (length && strLength < length) - ? (string + createPadding(length - strLength, chars)) - : string; - } - - /** - * Pads `string` on the left side if it's shorter than `length`. Padding - * characters are truncated if they exceed `length`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category String - * @param {string} [string=''] The string to pad. - * @param {number} [length=0] The padding length. - * @param {string} [chars=' '] The string used as padding. - * @returns {string} Returns the padded string. - * @example - * - * _.padStart('abc', 6); - * // => ' abc' - * - * _.padStart('abc', 6, '_-'); - * // => '_-_abc' - * - * _.padStart('abc', 3); - * // => 'abc' - */ - function padStart(string, length, chars) { - string = toString(string); - length = toInteger(length); - - var strLength = length ? stringSize(string) : 0; - return (length && strLength < length) - ? (createPadding(length - strLength, chars) + string) - : string; - } - - /** - * Converts `string` to an integer of the specified radix. If `radix` is - * `undefined` or `0`, a `radix` of `10` is used unless `value` is a - * hexadecimal, in which case a `radix` of `16` is used. - * - * **Note:** This method aligns with the - * [ES5 implementation](https://es5.github.io/#x15.1.2.2) of `parseInt`. - * - * @static - * @memberOf _ - * @since 1.1.0 - * @category String - * @param {string} string The string to convert. - * @param {number} [radix=10] The radix to interpret `value` by. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {number} Returns the converted integer. - * @example - * - * _.parseInt('08'); - * // => 8 - * - * _.map(['6', '08', '10'], _.parseInt); - * // => [6, 8, 10] - */ - function parseInt(string, radix, guard) { - if (guard || radix == null) { - radix = 0; - } else if (radix) { - radix = +radix; - } - return nativeParseInt(toString(string).replace(reTrimStart, ''), radix || 0); - } - - /** - * Repeats the given string `n` times. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to repeat. - * @param {number} [n=1] The number of times to repeat the string. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {string} Returns the repeated string. - * @example - * - * _.repeat('*', 3); - * // => '***' - * - * _.repeat('abc', 2); - * // => 'abcabc' - * - * _.repeat('abc', 0); - * // => '' - */ - function repeat(string, n, guard) { - if ((guard ? isIterateeCall(string, n, guard) : n === undefined)) { - n = 1; - } else { - n = toInteger(n); - } - return baseRepeat(toString(string), n); - } - - /** - * Replaces matches for `pattern` in `string` with `replacement`. - * - * **Note:** This method is based on - * [`String#replace`](https://mdn.io/String/replace). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category String - * @param {string} [string=''] The string to modify. - * @param {RegExp|string} pattern The pattern to replace. - * @param {Function|string} replacement The match replacement. - * @returns {string} Returns the modified string. - * @example - * - * _.replace('Hi Fred', 'Fred', 'Barney'); - * // => 'Hi Barney' - */ - function replace() { - var args = arguments, - string = toString(args[0]); - - return args.length < 3 ? string : string.replace(args[1], args[2]); - } - - /** - * Converts `string` to - * [snake case](https://en.wikipedia.org/wiki/Snake_case). - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to convert. - * @returns {string} Returns the snake cased string. - * @example - * - * _.snakeCase('Foo Bar'); - * // => 'foo_bar' - * - * _.snakeCase('fooBar'); - * // => 'foo_bar' - * - * _.snakeCase('--FOO-BAR--'); - * // => 'foo_bar' - */ - var snakeCase = createCompounder(function(result, word, index) { - return result + (index ? '_' : '') + word.toLowerCase(); - }); - - /** - * Splits `string` by `separator`. - * - * **Note:** This method is based on - * [`String#split`](https://mdn.io/String/split). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category String - * @param {string} [string=''] The string to split. - * @param {RegExp|string} separator The separator pattern to split by. - * @param {number} [limit] The length to truncate results to. - * @returns {Array} Returns the string segments. - * @example - * - * _.split('a-b-c', '-', 2); - * // => ['a', 'b'] - */ - function split(string, separator, limit) { - if (limit && typeof limit != 'number' && isIterateeCall(string, separator, limit)) { - separator = limit = undefined; - } - limit = limit === undefined ? MAX_ARRAY_LENGTH : limit >>> 0; - if (!limit) { - return []; - } - string = toString(string); - if (string && ( - typeof separator == 'string' || - (separator != null && !isRegExp(separator)) - )) { - separator = baseToString(separator); - if (!separator && hasUnicode(string)) { - return castSlice(stringToArray(string), 0, limit); - } - } - return string.split(separator, limit); - } - - /** - * Converts `string` to - * [start case](https://en.wikipedia.org/wiki/Letter_case#Stylistic_or_specialised_usage). - * - * @static - * @memberOf _ - * @since 3.1.0 - * @category String - * @param {string} [string=''] The string to convert. - * @returns {string} Returns the start cased string. - * @example - * - * _.startCase('--foo-bar--'); - * // => 'Foo Bar' - * - * _.startCase('fooBar'); - * // => 'Foo Bar' - * - * _.startCase('__FOO_BAR__'); - * // => 'FOO BAR' - */ - var startCase = createCompounder(function(result, word, index) { - return result + (index ? ' ' : '') + upperFirst(word); - }); - - /** - * Checks if `string` starts with the given target string. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to inspect. - * @param {string} [target] The string to search for. - * @param {number} [position=0] The position to search from. - * @returns {boolean} Returns `true` if `string` starts with `target`, - * else `false`. - * @example - * - * _.startsWith('abc', 'a'); - * // => true - * - * _.startsWith('abc', 'b'); - * // => false - * - * _.startsWith('abc', 'b', 1); - * // => true - */ - function startsWith(string, target, position) { - string = toString(string); - position = position == null - ? 0 - : baseClamp(toInteger(position), 0, string.length); - - target = baseToString(target); - return string.slice(position, position + target.length) == target; - } - - /** - * Creates a compiled template function that can interpolate data properties - * in "interpolate" delimiters, HTML-escape interpolated data properties in - * "escape" delimiters, and execute JavaScript in "evaluate" delimiters. Data - * properties may be accessed as free variables in the template. If a setting - * object is given, it takes precedence over `_.templateSettings` values. - * - * **Note:** In the development build `_.template` utilizes - * [sourceURLs](http://www.html5rocks.com/en/tutorials/developertools/sourcemaps/#toc-sourceurl) - * for easier debugging. - * - * For more information on precompiling templates see - * [lodash's custom builds documentation](https://lodash.com/custom-builds). - * - * For more information on Chrome extension sandboxes see - * [Chrome's extensions documentation](https://developer.chrome.com/extensions/sandboxingEval). - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category String - * @param {string} [string=''] The template string. - * @param {Object} [options={}] The options object. - * @param {RegExp} [options.escape=_.templateSettings.escape] - * The HTML "escape" delimiter. - * @param {RegExp} [options.evaluate=_.templateSettings.evaluate] - * The "evaluate" delimiter. - * @param {Object} [options.imports=_.templateSettings.imports] - * An object to import into the template as free variables. - * @param {RegExp} [options.interpolate=_.templateSettings.interpolate] - * The "interpolate" delimiter. - * @param {string} [options.sourceURL='lodash.templateSources[n]'] - * The sourceURL of the compiled template. - * @param {string} [options.variable='obj'] - * The data object variable name. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {Function} Returns the compiled template function. - * @example - * - * // Use the "interpolate" delimiter to create a compiled template. - * var compiled = _.template('hello <%= user %>!'); - * compiled({ 'user': 'fred' }); - * // => 'hello fred!' - * - * // Use the HTML "escape" delimiter to escape data property values. - * var compiled = _.template('<%- value %>'); - * compiled({ 'value': ' - {{content-for "body-footer"}} - {{content-for "test-body-footer"}} + {{content-for "body-footer"}} {{content-for "test-body-footer"}} diff --git a/ui/tests/integration/components/alert-inline-test.js b/ui/tests/integration/components/alert-inline-test.js index c0aad2443304..a992257ede3f 100644 --- a/ui/tests/integration/components/alert-inline-test.js +++ b/ui/tests/integration/components/alert-inline-test.js @@ -1,6 +1,6 @@ /** * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 + * SPDX-License-Identifier: BUSL-1.1 */ import { module, test } from 'qunit'; @@ -8,71 +8,84 @@ import { setupRenderingTest } from 'ember-qunit'; import { render, settled, find, waitUntil } from '@ember/test-helpers'; import hbs from 'htmlbars-inline-precompile'; +const SHARED_STYLES = { + success: { + icon: 'check-circle-fill', + class: 'hds-alert--color-success', + }, + warning: { + icon: 'alert-triangle-fill', + class: 'hds-alert--color-warning', + }, +}; module('Integration | Component | alert-inline', function (hooks) { setupRenderingTest(hooks); - hooks.beforeEach(function () { - this.set('message', 'some very important alert'); - this.set('type', 'warning'); - }); + test('it renders alert message for each @color arg', async function (assert) { + const COLORS = { + ...SHARED_STYLES, + neutral: { + icon: 'info-fill', + class: 'hds-alert--color-neutral', + }, + highlight: { + icon: 'info-fill', + class: 'hds-alert--color-highlight', + }, + critical: { + icon: 'alert-diamond-fill', + class: 'hds-alert--color-critical', + }, + }; - test('it renders alert message with correct class args', async function (assert) { - await render(hbs` - - `); + const { neutral } = COLORS; // default color + await render(hbs``); assert.dom('[data-test-inline-error-message]').hasText('some very important alert'); - assert - .dom('[data-test-inline-alert]') - .hasAttribute('class', 'message-inline padding-top is-marginless size-small'); - }); + assert.dom(`[data-test-icon="${neutral.icon}"]`).exists('renders default icon'); + assert.dom('[data-test-inline-alert]').hasClass(neutral.class, 'renders default class'); - test('it yields to block text', async function (assert) { - await render(hbs` - - A much more important alert - - `); - assert.dom('[data-test-inline-error-message]').hasText('A much more important alert'); + // assert deprecated @type arg values map to expected color + for (const type in COLORS) { + this.color = type; + const color = COLORS[type]; + await render(hbs``); + assert.dom(`[data-test-icon="${color.icon}"]`).exists(`@color="${type}" renders icon: ${color.icon}`); + assert + .dom('[data-test-inline-alert]') + .hasClass(color.class, `@color="${type}" renders class: ${color.class}`); + } }); - test('it renders correctly for type=danger', async function (assert) { - this.set('type', 'danger'); - await render(hbs` - - `); - assert - .dom('[data-test-inline-error-message]') - .hasAttribute('class', 'has-text-danger', 'has danger text'); - assert.dom('[data-test-icon="x-square-fill"]').exists('danger icon exists'); - }); - - test('it renders correctly for type=warning', async function (assert) { - await render(hbs` - - `); - assert.dom('[data-test-inline-error-message]').doesNotHaveAttribute('class', 'does not have styled text'); - assert.dom('[data-test-icon="alert-triangle-fill"]').exists('warning icon exists'); + test('it renders alert color for each deprecated @type arg', async function (assert) { + const OLD_TYPES = { + ...SHARED_STYLES, + info: { + icon: 'info-fill', + class: 'hds-alert--color-highlight', + }, + danger: { + icon: 'alert-diamond-fill', + class: 'hds-alert--color-critical', + }, + }; + // assert deprecated @type arg values map to expected color + for (const type in OLD_TYPES) { + this.type = type; + const color = OLD_TYPES[type]; + await render(hbs``); + assert + .dom(`[data-test-icon="${color.icon}"]`) + .exists(`deprecated @type="${type}" renders icon: ${color.icon}`); + assert + .dom('[data-test-inline-alert]') + .hasClass(color.class, `deprecated @type="${type}" renders class: ${color.class}`); + } }); test('it mimics loading when message changes', async function (assert) { + this.message = 'some very important alert'; await render(hbs` - + `); assert .dom('[data-test-inline-error-message]') diff --git a/ui/tests/integration/components/alert-popup-test.js b/ui/tests/integration/components/alert-popup-test.js deleted file mode 100644 index 25f476e8d58a..000000000000 --- a/ui/tests/integration/components/alert-popup-test.js +++ /dev/null @@ -1,58 +0,0 @@ -import { module, test } from 'qunit'; -import { setupRenderingTest } from 'vault/tests/helpers'; -import { render } from '@ember/test-helpers'; -import { hbs } from 'ember-cli-htmlbars'; -import { click } from '@ember/test-helpers'; - -module('Integration | Component | alert-popup', function (hooks) { - setupRenderingTest(hooks); - - hooks.beforeEach(function () { - this.set('message', 'some very important alert'); - this.set('type', 'warning'); - this.set('close', () => this.set('closed', true)); - }); - - test('it renders the alert popup input', async function (assert) { - await render(hbs` - - `); - - assert.dom(this.element).hasText('Warning some very important alert'); - }); - - test('it invokes the close action', async function (assert) { - assert.expect(1); - - await render(hbs` - - `); - await click('.close-button'); - - assert.true(this.closed); - }); - - test('it renders the alert popup with different colors based on types', async function (assert) { - await render(hbs` - - `); - - assert.dom('.message').hasClass('is-highlight'); - - this.set('type', 'info'); - - await render(hbs` - - `); - - assert.dom('.message').hasClass('is-info'); - - this.set('type', 'danger'); - - await render(hbs` - - `); - - assert.dom('.message').hasClass('is-danger'); - }); -}); diff --git a/ui/tests/integration/components/app-footer-test.js b/ui/tests/integration/components/app-footer-test.js new file mode 100644 index 000000000000..cc9ee5555101 --- /dev/null +++ b/ui/tests/integration/components/app-footer-test.js @@ -0,0 +1,47 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; + +const selectors = { + versionDisplay: '[data-test-footer-version]', + upgradeLink: '[data-test-footer-upgrade-link]', + docsLink: '[data-test-footer-documentation-link]', +}; + +module('Integration | Component | app-footer', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.versionSvc = this.owner.lookup('service:version'); + }); + + test('it renders a sane default', async function (assert) { + await render(hbs``); + assert.dom(selectors.versionDisplay).hasText('Vault', 'Vault without version by default'); + assert.dom(selectors.upgradeLink).hasText('Upgrade to Vault Enterprise', 'upgrade link shows'); + assert.dom(selectors.docsLink).hasText('Documentation', 'displays docs link'); + }); + + test('it renders for community version', async function (assert) { + this.versionSvc.version = '1.15.1'; + this.versionSvc.type = 'community'; + await render(hbs``); + assert.dom(selectors.versionDisplay).hasText('Vault 1.15.1', 'Vault shows version when available'); + assert.dom(selectors.upgradeLink).hasText('Upgrade to Vault Enterprise', 'upgrade link shows'); + assert.dom(selectors.docsLink).hasText('Documentation', 'displays docs link'); + }); + test('it renders for ent version', async function (assert) { + this.versionSvc.version = '1.15.1+hsm'; + this.versionSvc.type = 'enterprise'; + await render(hbs``); + assert.dom(selectors.versionDisplay).hasText('Vault 1.15.1+hsm', 'shows version when available'); + assert.dom(selectors.upgradeLink).doesNotExist('upgrade link not shown'); + assert.dom(selectors.docsLink).hasText('Documentation', 'displays docs link'); + }); +}); diff --git a/ui/tests/integration/components/auth-config-form/options-test.js b/ui/tests/integration/components/auth-config-form/options-test.js index 24ddaf6e95c8..8f51ae81eb61 100644 --- a/ui/tests/integration/components/auth-config-form/options-test.js +++ b/ui/tests/integration/components/auth-config-form/options-test.js @@ -1,58 +1,201 @@ /** * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 + * SPDX-License-Identifier: BUSL-1.1 */ -import { resolve } from 'rsvp'; -import EmberObject from '@ember/object'; import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; -import { render, settled } from '@ember/test-helpers'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { click, fillIn, render } from '@ember/test-helpers'; import hbs from 'htmlbars-inline-precompile'; -import sinon from 'sinon'; -import { create } from 'ember-cli-page-object'; -import authConfigForm from 'vault/tests/pages/components/auth-config-form/options'; +import { GENERAL } from 'vault/tests/helpers/general-selectors'; +import { methods } from 'vault/helpers/mountable-auth-methods'; -const component = create(authConfigForm); +const userLockoutSupported = ['approle', 'ldap', 'userpass']; +const userLockoutUnsupported = methods() + .map((m) => m.type) + .filter((m) => !userLockoutSupported.includes(m)); module('Integration | Component | auth-config-form options', function (hooks) { setupRenderingTest(hooks); + setupMirage(hooks); hooks.beforeEach(function () { this.owner.lookup('service:flash-messages').registerTypes(['success']); this.router = this.owner.lookup('service:router'); + this.store = this.owner.lookup('service:store'); + this.createModel = (path, type) => { + this.model = this.store.createRecord('auth-method', { path, type }); + this.model.set('config', this.store.createRecord('mount-config')); + }; + }); + + for (const type of userLockoutSupported) { + test(`it submits data correctly for ${type} method (supports user_lockout_config)`, async function (assert) { + assert.expect(3); + const path = `my-${type}-auth/`; + this.createModel(path, type); + + this.router.reopen({ + transitionTo() { + return { + followRedirects() { + assert.ok(true, `saving ${type} calls transitionTo on save`); + }, + }; + }, + }); + + this.server.post(`sys/mounts/auth/${path}/tune`, (schema, req) => { + const payload = JSON.parse(req.requestBody); + const expected = { + default_lease_ttl: '30s', + listing_visibility: 'unauth', + token_type: 'default-batch', + user_lockout_config: { + lockout_threshold: '7', + lockout_duration: '600s', + lockout_counter_reset: '5s', + lockout_disable: true, + }, + }; + assert.propEqual(payload, expected, `${type} method payload contains tune parameters`); + return { payload }; + }); + await render(hbs``); + + assert.dom('[data-test-user-lockout-section]').hasText('User lockout configuration'); + + await click(GENERAL.inputByAttr('config.listingVisibility')); + await fillIn(GENERAL.inputByAttr('config.tokenType'), 'default-batch'); + + await click(GENERAL.ttl.toggle('Default Lease TTL')); + await fillIn(GENERAL.ttl.input('Default Lease TTL'), '30'); + + await fillIn(GENERAL.inputByAttr('config.lockoutThreshold'), '7'); + + await click(GENERAL.ttl.toggle('Lockout duration')); + await fillIn(GENERAL.ttl.input('Lockout duration'), '10'); + await fillIn( + `${GENERAL.inputByAttr('config.lockoutDuration')} ${GENERAL.selectByAttr('ttl-unit')}`, + 'm' + ); + await click(GENERAL.ttl.toggle('Lockout counter reset')); + await fillIn(GENERAL.ttl.input('Lockout counter reset'), '5'); + + await click(GENERAL.inputByAttr('config.lockoutDisable')); + + await click('[data-test-save-config]'); + }); + } + + for (const type of userLockoutUnsupported) { + if (type === 'token') return; // separate test below because does not include tokenType field + + test(`it submits data correctly for ${type} auth method`, async function (assert) { + assert.expect(7); + + const path = `my-${type}-auth/`; + this.createModel(path, type); + + this.router.reopen({ + transitionTo() { + return { + followRedirects() { + assert.ok(true, `saving ${type} calls transitionTo on save`); + }, + }; + }, + }); + + this.server.post(`sys/mounts/auth/${path}/tune`, (schema, req) => { + const payload = JSON.parse(req.requestBody); + const expected = { + default_lease_ttl: '30s', + listing_visibility: 'unauth', + token_type: 'default-batch', + }; + assert.propEqual(payload, expected, `${type} method payload contains tune parameters`); + return { payload }; + }); + await render(hbs``); + + assert + .dom('[data-test-user-lockout-section]') + .doesNotExist(`${type} method does not render user lockout section`); + + await click(GENERAL.inputByAttr('config.listingVisibility')); + await fillIn(GENERAL.inputByAttr('config.tokenType'), 'default-batch'); + + await click(GENERAL.ttl.toggle('Default Lease TTL')); + await fillIn(GENERAL.ttl.input('Default Lease TTL'), '30'); + + assert + .dom(GENERAL.inputByAttr('config.lockoutThreshold')) + .doesNotExist(`${type} method does not render lockout threshold`); + assert + .dom(GENERAL.ttl.toggle('Lockout duration')) + .doesNotExist(`${type} method does not render lockout duration `); + assert + .dom(GENERAL.ttl.toggle('Lockout counter reset')) + .doesNotExist(`${type} method does not render lockout counter reset`); + assert + .dom(GENERAL.inputByAttr('config.lockoutDisable')) + .doesNotExist(`${type} method does not render lockout disable`); + + await click('[data-test-save-config]'); + }); + } + + test('it submits data correctly for token auth method', async function (assert) { + assert.expect(8); + const type = 'token'; + const path = `my-${type}-auth/`; + this.createModel(path, type); + this.router.reopen({ transitionTo() { return { followRedirects() { - return resolve(); + assert.ok(true, `saving token calls transitionTo on save`); }, }; }, - replaceWith() { - return resolve(); - }, }); - }); - test('it submits data correctly', async function (assert) { - assert.expect(1); - const model = EmberObject.create({ - tune() { - return resolve(); - }, - config: { - serialize() { - return {}; - }, - }, - }); - sinon.spy(model.config, 'serialize'); - this.set('model', model); - await render(hbs`{{auth-config-form/options model=this.model}}`); - component.save(); - return settled().then(() => { - assert.ok(model.config.serialize.calledOnce); + this.server.post(`sys/mounts/auth/${path}/tune`, (schema, req) => { + const payload = JSON.parse(req.requestBody); + const expected = { + default_lease_ttl: '30s', + listing_visibility: 'unauth', + }; + assert.propEqual(payload, expected, `${type} method payload contains tune parameters`); + return { payload }; }); + await render(hbs``); + + assert + .dom(GENERAL.inputByAttr('config.tokenType')) + .doesNotExist('does not render tokenType for token auth method'); + + await click(GENERAL.inputByAttr('config.listingVisibility')); + await click(GENERAL.ttl.toggle('Default Lease TTL')); + await fillIn(GENERAL.ttl.input('Default Lease TTL'), '30'); + + assert.dom('[data-test-user-lockout-section]').doesNotExist('token does not render user lockout section'); + assert + .dom(GENERAL.inputByAttr('config.lockoutThreshold')) + .doesNotExist('token method does not render lockout threshold'); + assert + .dom(GENERAL.ttl.toggle('Lockout duration')) + .doesNotExist('token method does not render lockout duration '); + assert + .dom(GENERAL.ttl.toggle('Lockout counter reset')) + .doesNotExist('token method does not render lockout counter reset'); + assert + .dom(GENERAL.inputByAttr('config.lockoutDisable')) + .doesNotExist('token method does not render lockout disable'); + + await click('[data-test-save-config]'); }); }); diff --git a/ui/tests/integration/components/auth-form-test.js b/ui/tests/integration/components/auth-form-test.js index b8cd73df34e1..c82ce4acf15d 100644 --- a/ui/tests/integration/components/auth-form-test.js +++ b/ui/tests/integration/components/auth-form-test.js @@ -1,196 +1,123 @@ /** * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 + * SPDX-License-Identifier: BUSL-1.1 */ import { later, _cancelTimers as cancelTimers } from '@ember/runloop'; -import EmberObject from '@ember/object'; -import { resolve } from 'rsvp'; -import Service from '@ember/service'; import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; -import { render, settled } from '@ember/test-helpers'; +import { click, fillIn, render, settled } from '@ember/test-helpers'; import hbs from 'htmlbars-inline-precompile'; import sinon from 'sinon'; -import Pretender from 'pretender'; -import { create } from 'ember-cli-page-object'; -import authForm from '../../pages/components/auth-form'; -import { validate } from 'uuid'; - -const component = create(authForm); - -const workingAuthService = Service.extend({ - authenticate() { - return resolve({}); - }, - handleError() {}, - setLastFetch() {}, -}); - -const routerService = Service.extend({ - transitionTo() { - return { - followRedirects() { - return resolve(); - }, - }; - }, -}); +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { Response } from 'miragejs'; +import { GENERAL } from 'vault/tests/helpers/general-selectors'; +import { AUTH_FORM } from 'vault/tests/helpers/auth/auth-form-selectors'; module('Integration | Component | auth form', function (hooks) { setupRenderingTest(hooks); + setupMirage(hooks); hooks.beforeEach(function () { - this.owner.register('service:router', routerService); this.router = this.owner.lookup('service:router'); + this.selectedAuth = 'token'; + this.performAuth = sinon.spy(); + this.renderComponent = async () => { + return render(hbs` + `); + }; }); - const CSP_ERR_TEXT = `Error This is a standby Vault node but can't communicate with the active node via request forwarding. Sign in at the active node to use the Vault UI.`; - test('it renders error on CSP violation', async function (assert) { - assert.expect(2); - this.set('cluster', EmberObject.create({ standby: true })); - this.set('selectedAuth', 'token'); - await render(hbs`{{auth-form cluster=this.cluster selectedAuth=this.selectedAuth}}`); - assert.false(component.errorMessagePresent, false); - this.owner.lookup('service:csp-event').events.addObject({ violatedDirective: 'connect-src' }); - await settled(); - assert.strictEqual(component.errorText, CSP_ERR_TEXT); + test('it calls performAuth on submit', async function (assert) { + await this.renderComponent(); + await fillIn(AUTH_FORM.input('token'), '123token'); + await click(AUTH_FORM.login); + const [type, data] = this.performAuth.lastCall.args; + assert.strictEqual(type, 'token', 'performAuth is called with type'); + assert.propEqual(data, { token: '123token' }, 'performAuth is called with data'); }); - test('it renders with vault style errors', async function (assert) { - assert.expect(1); - const server = new Pretender(function () { - this.get('/v1/auth/**', () => { - return [ - 400, - { 'Content-Type': 'application/json' }, - JSON.stringify({ - errors: ['Not allowed'], - }), - ]; - }); - this.get('/v1/sys/internal/ui/mounts', this.passthrough); - }); - - this.set('cluster', EmberObject.create({})); - this.set('selectedAuth', 'token'); - await render(hbs`{{auth-form cluster=this.cluster selectedAuth=this.selectedAuth}}`); - return component.login().then(() => { - assert.strictEqual(component.errorText, 'Error Authentication failed: Not allowed'); - server.shutdown(); - }); + test('it disables sign in button when authIsRunning', async function (assert) { + this.authIsRunning = true; + await this.renderComponent(); + assert.dom(AUTH_FORM.login).isDisabled('sign in button is disabled'); + assert.dom(`${AUTH_FORM.login} [data-test-icon="loading"]`).exists('sign in button renders loading icon'); }); - test('it renders AdapterError style errors', async function (assert) { - assert.expect(1); - const server = new Pretender(function () { - this.get('/v1/auth/**', () => { - return [400, { 'Content-Type': 'application/json' }]; - }); - this.get('/v1/sys/internal/ui/mounts', this.passthrough); - }); - - this.set('cluster', EmberObject.create({})); - this.set('selectedAuth', 'token'); - await render(hbs`{{auth-form cluster=this.cluster selectedAuth=this.selectedAuth}}`); - // returns null because test does not return details of failed network request. On the app it will return the details of the error instead of null. - return component.login().then(() => { - assert.strictEqual(component.errorText, 'Error Authentication failed: null'); - server.shutdown(); - }); + test('it renders alert info message when delayIsIdle', async function (assert) { + this.delayIsIdle = true; + this.authIsRunning = true; + await this.renderComponent(); + assert + .dom(GENERAL.inlineAlert) + .hasText( + 'If login takes longer than usual, you may need to check your device for an MFA notification, or contact your administrator if login times out.' + ); }); test('it renders no tabs when no methods are passed', async function (assert) { - const methods = { - 'approle/': { - type: 'approle', - }, - }; - const server = new Pretender(function () { - this.get('/v1/sys/internal/ui/mounts', () => { - return [200, { 'Content-Type': 'application/json' }, JSON.stringify({ data: { auth: methods } })]; - }); + this.server.get('/sys/internal/ui/mounts', () => { + return { + data: { + auth: { + 'approle/': { + type: 'approle', + }, + }, + }, + }; }); - await render(hbs``); + await this.renderComponent(); - assert.strictEqual(component.tabs.length, 0, 'renders a tab for every backend'); - server.shutdown(); + assert.dom(AUTH_FORM.tabs()).doesNotExist(); }); test('it renders all the supported methods and Other tab when methods are present', async function (assert) { - const methods = { - 'foo/': { - type: 'userpass', - }, - 'approle/': { - type: 'approle', - }, - }; - const server = new Pretender(function () { - this.get('/v1/sys/internal/ui/mounts', () => { - return [200, { 'Content-Type': 'application/json' }, JSON.stringify({ data: { auth: methods } })]; - }); + this.server.get('/sys/internal/ui/mounts', () => { + return { + data: { + auth: { + 'foo/': { + type: 'userpass', + }, + 'approle/': { + type: 'approle', + }, + }, + }, + }; }); - this.set('cluster', EmberObject.create({})); - await render(hbs`{{auth-form cluster=this.cluster }}`); + await this.renderComponent(); - assert.strictEqual(component.tabs.length, 2, 'renders a tab for userpass and Other'); - assert.strictEqual(component.tabs.objectAt(0).name, 'foo', 'uses the path in the label'); - assert.strictEqual(component.tabs.objectAt(1).name, 'Other', 'second tab is the Other tab'); - server.shutdown(); + assert.dom(AUTH_FORM.tabs()).exists({ count: 2 }); + assert.dom(AUTH_FORM.tabs('foo')).exists('tab uses the path in the label'); + assert.dom(AUTH_FORM.tabs('other')).exists('second tab is the Other tab'); }); test('it renders the description', async function (assert) { - const methods = { - 'approle/': { - type: 'userpass', - description: 'app description', - }, - }; - const server = new Pretender(function () { - this.get('/v1/sys/internal/ui/mounts', () => { - return [200, { 'Content-Type': 'application/json' }, JSON.stringify({ data: { auth: methods } })]; - }); - }); - this.set('cluster', EmberObject.create({})); - await render(hbs`{{auth-form cluster=this.cluster }}`); - - assert.strictEqual( - component.descriptionText, - 'app description', - 'renders a description for auth methods' - ); - server.shutdown(); - }); - - test('it calls authenticate with the correct path', async function (assert) { - this.owner.unregister('service:auth'); - this.owner.register('service:auth', workingAuthService); - this.auth = this.owner.lookup('service:auth'); - const authSpy = sinon.spy(this.auth, 'authenticate'); - const methods = { - 'foo/': { - type: 'userpass', - }, - }; - const server = new Pretender(function () { - this.get('/v1/sys/internal/ui/mounts', () => { - return [200, { 'Content-Type': 'application/json' }, JSON.stringify({ data: { auth: methods } })]; - }); + this.selectedAuth = null; + this.server.get('/sys/internal/ui/mounts', () => { + return { + data: { + auth: { + 'approle/': { + type: 'userpass', + description: 'app description', + }, + }, + }, + }; }); - - this.set('cluster', EmberObject.create({})); - this.set('selectedAuth', 'foo/'); - await render(hbs`{{auth-form cluster=this.cluster selectedAuth=this.selectedAuth}}`); - await component.login(); - - await settled(); - assert.ok(authSpy.calledOnce, 'a call to authenticate was made'); - const { data } = authSpy.getCall(0).args[0]; - assert.strictEqual(data.path, 'foo', 'uses the id for the path'); - authSpy.restore(); - server.shutdown(); + await this.renderComponent(); + assert.dom(AUTH_FORM.description).hasText('app description'); }); test('it renders no tabs when no supported methods are present in passed methods', async function (assert) { @@ -199,106 +126,50 @@ module('Integration | Component | auth form', function (hooks) { type: 'approle', }, }; - const server = new Pretender(function () { - this.get('/v1/sys/internal/ui/mounts', () => { - return [200, { 'Content-Type': 'application/json' }, JSON.stringify({ data: { auth: methods } })]; - }); + this.server.get('/sys/internal/ui/mounts', () => { + return { data: { auth: methods } }; }); - this.set('cluster', EmberObject.create({})); - await render(hbs``); + await this.renderComponent(); - server.shutdown(); - assert.strictEqual(component.tabs.length, 0, 'renders a tab for every backend'); - }); - - test('it makes a request to unwrap if passed a wrappedToken and logs in', async function (assert) { - this.owner.register('service:auth', workingAuthService); - this.auth = this.owner.lookup('service:auth'); - const authSpy = sinon.spy(this.auth, 'authenticate'); - const server = new Pretender(function () { - this.post('/v1/sys/wrapping/unwrap', () => { - return [ - 200, - { 'content-type': 'application/json' }, - JSON.stringify({ - auth: { - client_token: '12345', - }, - }), - ]; - }); - }); - - const wrappedToken = '54321'; - this.set('wrappedToken', wrappedToken); - this.set('cluster', EmberObject.create({})); - await render(hbs``); - later(() => cancelTimers(), 50); - await settled(); - assert.strictEqual( - server.handledRequests[0].url, - '/v1/sys/wrapping/unwrap', - 'makes call to unwrap the token' - ); - assert.strictEqual( - server.handledRequests[0].requestHeaders['X-Vault-Token'], - wrappedToken, - 'uses passed wrapped token for the unwrap' - ); - assert.ok(authSpy.calledOnce, 'a call to authenticate was made'); - server.shutdown(); - authSpy.restore(); + assert.dom(AUTH_FORM.tabs()).doesNotExist(); }); test('it shows an error if unwrap errors', async function (assert) { - const server = new Pretender(function () { - this.post('/v1/sys/wrapping/unwrap', () => { - return [ - 400, - { 'Content-Type': 'application/json' }, - JSON.stringify({ - errors: ['There was an error unwrapping!'], - }), - ]; - }); + assert.expect(1); + this.wrappedToken = '54321'; + this.server.post('/sys/wrapping/unwrap', () => { + return new Response( + 400, + { 'Content-Type': 'application/json' }, + { errors: ['There was an error unwrapping!'] } + ); }); - this.set('wrappedToken', '54321'); - await render(hbs`{{auth-form cluster=this.cluster wrappedToken=this.wrappedToken}}`); + await this.renderComponent(); later(() => cancelTimers(), 50); - await settled(); - assert.strictEqual( - component.errorText, - 'Error Token unwrap failed: There was an error unwrapping!', - 'shows the error' - ); - server.shutdown(); + assert.dom(GENERAL.messageError).hasText('Error Token unwrap failed: There was an error unwrapping!'); }); test('it should retain oidc role when mount path is changed', async function (assert) { - assert.expect(1); + assert.expect(2); const auth_url = 'http://dev-foo-bar.com'; - const server = new Pretender(function () { - this.post('/v1/auth/:path/oidc/auth_url', (req) => { - const { role, redirect_uri } = JSON.parse(req.requestBody); - const goodRequest = - req.params.path === 'foo-oidc' && - role === 'foo' && - redirect_uri.includes('/auth/foo-oidc/oidc/callback'); - - return [ - goodRequest ? 200 : 400, - { 'Content-Type': 'application/json' }, - JSON.stringify( - goodRequest ? { data: { auth_url } } : { errors: [`role "${role}" could not be found`] } - ), - ]; - }); - this.get('/v1/sys/internal/ui/mounts', this.passthrough); + this.server.post('/auth/:path/oidc/auth_url', (_, req) => { + const { role, redirect_uri } = JSON.parse(req.requestBody); + const goodRequest = + req.params.path === 'foo-oidc' && + role === 'foo' && + redirect_uri.includes('/auth/foo-oidc/oidc/callback'); + + return new Response( + goodRequest ? 200 : 400, + { 'Content-Type': 'application/json' }, + JSON.stringify( + goodRequest ? { data: { auth_url } } : { errors: [`role "${role}" could not be found`] } + ) + ); }); - window.open = (url) => { assert.strictEqual(url, auth_url, 'auth_url is returned when required params are passed'); }; @@ -309,46 +180,14 @@ module('Integration | Component | auth form', function (hooks) { }, }); - this.set('cluster', EmberObject.create({})); - await render(hbs``); - - await component.selectMethod('oidc'); - await component.oidcRole('foo'); - await component.oidcMoreOptions(); - await component.oidcMountPath('foo-oidc'); - await component.login(); - - server.shutdown(); - }); - - test('it should set nonce value as uuid for okta method type', async function (assert) { - assert.expect(1); - - const server = new Pretender(function () { - this.post('/v1/auth/okta/login/foo', (req) => { - const { nonce } = JSON.parse(req.requestBody); - assert.true(validate(nonce), 'Nonce value passed as uuid for okta login'); - return [ - 200, - { 'content-type': 'application/json' }, - JSON.stringify({ - auth: { - client_token: '12345', - }, - }), - ]; - }); - this.get('/v1/sys/internal/ui/mounts', this.passthrough); - }); - - this.set('cluster', EmberObject.create({})); - await render(hbs``); - - await component.selectMethod('okta'); - await component.username('foo'); - await component.password('bar'); - await component.login(); + await this.renderComponent(); - server.shutdown(); + await fillIn(GENERAL.selectByAttr('auth-method'), 'oidc'); + await fillIn(AUTH_FORM.input('role'), 'foo'); + await click(AUTH_FORM.moreOptions); + await fillIn(AUTH_FORM.input('role'), 'foo'); + await fillIn(AUTH_FORM.mountPathInput, 'foo-oidc'); + assert.dom(AUTH_FORM.input('role')).hasValue('foo', 'role is retained when mount path is changed'); + await click(AUTH_FORM.login); }); }); diff --git a/ui/tests/integration/components/auth-jwt-test.js b/ui/tests/integration/components/auth-jwt-test.js index 6c114d907712..028fa2b926f6 100644 --- a/ui/tests/integration/components/auth-jwt-test.js +++ b/ui/tests/integration/components/auth-jwt-test.js @@ -1,20 +1,22 @@ /** * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 + * SPDX-License-Identifier: BUSL-1.1 */ import { _cancelTimers as cancelTimers } from '@ember/runloop'; import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; -import { render, settled, waitUntil } from '@ember/test-helpers'; +import { fillIn, render, settled, waitUntil } from '@ember/test-helpers'; import hbs from 'htmlbars-inline-precompile'; import sinon from 'sinon'; -import Pretender from 'pretender'; import { resolve } from 'rsvp'; import { create } from 'ember-cli-page-object'; import form from '../../pages/components/auth-jwt'; import { ERROR_WINDOW_CLOSED, ERROR_MISSING_PARAMS, ERROR_JWT_LOGIN } from 'vault/components/auth-jwt'; -import { fakeWindow, buildMessage } from '../../helpers/oidc-window-stub'; +import { fakeWindow, buildMessage } from 'vault/tests/helpers/oidc-window-stub'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { overrideResponse } from 'vault/tests/helpers/stubs'; +import { AUTH_FORM } from 'vault/tests/helpers/auth/auth-form-selectors'; const component = create(form); const windows = []; @@ -32,12 +34,6 @@ fakeWindow.reopen({ }, }); -const OIDC_AUTH_RESPONSE = { - auth: { - client_token: 'token', - }, -}; - const renderIt = async (context, path = 'jwt') => { const handler = (data, e) => { if (e && e.preventDefault) e.preventDefault(); @@ -64,6 +60,7 @@ const renderIt = async (context, path = 'jwt') => { }; module('Integration | Component | auth jwt', function (hooks) { setupRenderingTest(hooks); + setupMirage(hooks); hooks.beforeEach(function () { this.openSpy = sinon.spy(fakeWindow.proto(), 'open'); @@ -72,25 +69,19 @@ module('Integration | Component | auth jwt', function (hooks) { return 'http://example.com'; }, }); - this.server = new Pretender(function () { - this.get('/v1/auth/:path/oidc/callback', function () { - return [200, { 'Content-Type': 'application/json' }, JSON.stringify(OIDC_AUTH_RESPONSE)]; - }); - this.post('/v1/auth/:path/oidc/auth_url', (request) => { - const { role } = JSON.parse(request.requestBody); - if (['test', 'okta', 'bar'].includes(role)) { - const auth_url = role === 'test' ? 'http://example.com' : role === 'okta' ? 'http://okta.com' : ''; - return [ - 200, - { 'Content-Type': 'application/json' }, - JSON.stringify({ - data: { auth_url }, - }), - ]; - } - const errors = role === 'foo' ? ['role "foo" could not be found'] : [ERROR_JWT_LOGIN]; - return [400, { 'Content-Type': 'application/json' }, JSON.stringify({ errors })]; - }); + this.server.get('/auth/:path/oidc/callback', function () { + return { auth: { client_token: 'token' } }; + }); + this.server.post('/auth/:path/oidc/auth_url', (_, request) => { + const { role } = JSON.parse(request.requestBody); + if (['okta', 'test', 'bar'].includes(role)) { + const auth_url = role === 'test' ? 'http://example.com' : role === 'okta' ? 'http://okta.com' : ''; + return { + data: { auth_url }, + }; + } + const errors = role === 'foo' ? ['role "foo" could not be found'] : [ERROR_JWT_LOGIN]; + return overrideResponse(400, { errors }); }); }); @@ -105,20 +96,23 @@ module('Integration | Component | auth jwt', function (hooks) { }); test('jwt: it renders and makes auth_url requests', async function (assert) { + let postCount = 0; + this.server.post('/auth/:path/oidc/auth_url', (_, request) => { + postCount++; + const { path } = request.params; + const expectedUrl = `/v1/auth/${path}/oidc/auth_url`; + assert.strictEqual(request.url, expectedUrl); + return overrideResponse(400, { errors: [ERROR_JWT_LOGIN] }); + }); await renderIt(this); await settled(); + assert.strictEqual(postCount, 1, 'request to the default path is made'); assert.ok(component.jwtPresent, 'renders jwt field'); assert.ok(component.rolePresent, 'renders jwt field'); - assert.strictEqual(this.server.handledRequests.length, 1, 'request to the default path is made'); - assert.strictEqual(this.server.handledRequests[0].url, '/v1/auth/jwt/oidc/auth_url'); + this.set('selectedAuthPath', 'foo'); await settled(); - assert.strictEqual(this.server.handledRequests.length, 2, 'a second request was made'); - assert.strictEqual( - this.server.handledRequests[1].url, - '/v1/auth/foo/oidc/auth_url', - 'requests when path is set' - ); + assert.strictEqual(postCount, 2, 'a second request was made'); }); test('jwt: it calls passed action on login', async function (assert) { @@ -128,22 +122,46 @@ module('Integration | Component | auth jwt', function (hooks) { }); test('oidc: test role: it renders', async function (assert) { + // setting the path also fires off a request to auth_url but this happens inconsistently in tests + // setting here so it doesn't affect the postCount because it's not relevant to what's being tested + this.set('selectedAuthPath', 'foo'); + let postCount = 0; + this.server.post('/auth/:path/oidc/auth_url', (_, request) => { + postCount++; + const { role } = JSON.parse(request.requestBody); + const auth_url = role === 'test' ? 'http://example.com' : role === 'okta' ? 'http://okta.com' : ''; + return { + data: { auth_url }, + }; + }); await renderIt(this); await settled(); + await fillIn(AUTH_FORM.roleInput, 'test'); + assert + .dom(AUTH_FORM.input('jwt')) + .doesNotExist('does not show jwt token input if role matches OIDC login url'); + assert.dom(AUTH_FORM.login).hasText('Sign in with OIDC Provider'); + await fillIn(AUTH_FORM.roleInput, 'okta'); + // 1 for initial render, 1 for each time role changed = 3 + assert.strictEqual(postCount, 3, 'fetches the auth_url when the role changes'); + assert.dom(AUTH_FORM.login).hasText('Sign in with Okta', 'recognizes auth methods with certain urls'); + }); + + test('oidc: it fetches auth_url when path changes', async function (assert) { + assert.expect(2); + this.set('selectedAuthPath', 'foo'); + await renderIt(this); + // auth_url is requested on initial render so stubbing after rendering the component + // to test auth_url is called when the :path changes + this.server.post('/auth/:path/oidc/auth_url', (_, request) => { + assert.true(true, 'request is made to auth_url'); + assert.strictEqual(request?.params?.path, 'foo', 'request params are { path: foo }'); + return { + data: { auth_url: '' }, + }; + }); this.set('selectedAuthPath', 'foo'); - await component.role('test'); await settled(); - assert.notOk(component.jwtPresent, 'does not show jwt input for OIDC type login'); - assert.strictEqual(component.loginButtonText, 'Sign in with OIDC Provider'); - - await component.role('okta'); - // 1 for initial render, 1 for each time role changed = 3 - assert.strictEqual(this.server.handledRequests.length, 4, 'fetches the auth_url when the path changes'); - assert.strictEqual( - component.loginButtonText, - 'Sign in with Okta', - 'recognizes auth methods with certain urls' - ); }); test('oidc: it calls window.open popup window on login', async function (assert) { @@ -154,7 +172,10 @@ module('Integration | Component | auth jwt', function (hooks) { await waitUntil(() => { return this.openSpy.calledOnce; }); + cancelTimers(); + await settled(); + const call = this.openSpy.getCall(0); assert.deepEqual( call.args, @@ -189,6 +210,8 @@ module('Integration | Component | auth jwt', function (hooks) { buildMessage({ data: { source: 'oidc-callback', state: 'state', foo: 'bar' } }) ); cancelTimers(); + await settled(); + assert.strictEqual(this.error, ERROR_MISSING_PARAMS, 'calls onError with params missing error'); }); @@ -214,9 +237,11 @@ module('Integration | Component | auth jwt', function (hooks) { return this.openSpy.calledOnce; }); this.window.trigger('message', buildMessage({ origin: 'http://hackerz.com' })); + cancelTimers(); await settled(); - assert.notOk(this.handler.called, 'should not call the submit handler'); + + assert.false(this.handler.called, 'should not call the submit handler'); }); test('oidc: fails silently when event is not trusted', async function (assert) { @@ -230,7 +255,8 @@ module('Integration | Component | auth jwt', function (hooks) { this.window.trigger('message', buildMessage({ isTrusted: false })); cancelTimers(); await settled(); - assert.notOk(this.handler.called, 'should not call the submit handler'); + + assert.false(this.handler.called, 'should not call the submit handler'); }); test('oidc: it should trigger error callback when role is not found', async function (assert) { diff --git a/ui/tests/integration/components/auth/page-test.js b/ui/tests/integration/components/auth/page-test.js new file mode 100644 index 000000000000..45628cd3915a --- /dev/null +++ b/ui/tests/integration/components/auth/page-test.js @@ -0,0 +1,205 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { later, _cancelTimers as cancelTimers } from '@ember/runloop'; +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { click, fillIn, render, settled } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import sinon from 'sinon'; +import { validate } from 'uuid'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { Response } from 'miragejs'; +import { GENERAL } from 'vault/tests/helpers/general-selectors'; +import { AUTH_FORM } from 'vault/tests/helpers/auth/auth-form-selectors'; + +module('Integration | Component | auth | page ', function (hooks) { + setupRenderingTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(function () { + this.router = this.owner.lookup('service:router'); + this.auth = this.owner.lookup('service:auth'); + this.cluster = { id: '1' }; + this.selectedAuth = 'token'; + this.onSuccess = sinon.spy(); + + this.renderComponent = async () => { + return render(hbs` + + `); + }; + }); + const CSP_ERR_TEXT = `Error This is a standby Vault node but can't communicate with the active node via request forwarding. Sign in at the active node to use the Vault UI.`; + test('it renders error on CSP violation', async function (assert) { + assert.expect(2); + this.cluster.standby = true; + await this.renderComponent(); + assert.dom(GENERAL.messageError).doesNotExist(); + this.owner.lookup('service:csp-event').handleEvent({ violatedDirective: 'connect-src' }); + await settled(); + assert.dom(GENERAL.messageError).hasText(CSP_ERR_TEXT); + }); + + test('it renders with vault style errors', async function (assert) { + assert.expect(1); + this.server.get('/auth/token/lookup-self', () => { + return new Response(400, { 'Content-Type': 'application/json' }, { errors: ['Not allowed'] }); + }); + + await this.renderComponent(); + await click(AUTH_FORM.login); + assert.dom(GENERAL.messageError).hasText('Error Authentication failed: Not allowed'); + }); + + test('it renders AdapterError style errors', async function (assert) { + assert.expect(1); + this.server.get('/auth/token/lookup-self', () => { + return new Response(400, { 'Content-Type': 'application/json' }, { errors: ['API Error here'] }); + }); + + await this.renderComponent(); + await click(AUTH_FORM.login); + assert + .dom(GENERAL.messageError) + .hasText('Error Authentication failed: API Error here', 'shows the error from the API'); + }); + + test('it calls auth service authenticate method with expected args', async function (assert) { + assert.expect(1); + const authenticateStub = sinon.stub(this.auth, 'authenticate'); + this.selectedAuth = 'foo/'; // set to a non-default path + this.server.get('/sys/internal/ui/mounts', () => { + return { + data: { + auth: { + 'foo/': { + type: 'userpass', + }, + }, + }, + }; + }); + + await this.renderComponent(); + await fillIn(AUTH_FORM.input('username'), 'sandy'); + await fillIn(AUTH_FORM.input('password'), '1234'); + await click(AUTH_FORM.login); + const [actual] = authenticateStub.lastCall.args; + const expectedArgs = { + backend: 'userpass', + clusterId: '1', + data: { + username: 'sandy', + password: '1234', + path: 'foo', + }, + selectedAuth: 'foo/', + }; + assert.propEqual( + actual, + expectedArgs, + `it calls auth service authenticate method with expected args: ${JSON.stringify(actual)} ` + ); + }); + + test('it calls onSuccess with expected args', async function (assert) { + assert.expect(3); + this.server.get(`auth/token/lookup-self`, () => { + return { + data: { + policies: ['default'], + }, + }; + }); + + await this.renderComponent(); + await fillIn(AUTH_FORM.input('token'), 'mytoken'); + await click(AUTH_FORM.login); + const [authResponse, backendType, data] = this.onSuccess.lastCall.args; + const expected = { isRoot: false, namespace: '', token: 'vault-token☃1' }; + + assert.propEqual( + authResponse, + expected, + `it calls onSuccess with response: ${JSON.stringify(authResponse)} ` + ); + assert.strictEqual(backendType, 'token', `it calls onSuccess with backend type: ${backendType}`); + assert.propEqual(data, { token: 'mytoken' }, `it calls onSuccess with data: ${JSON.stringify(data)}`); + }); + + test('it makes a request to unwrap if passed a wrappedToken and logs in', async function (assert) { + assert.expect(3); + const authenticateStub = sinon.stub(this.auth, 'authenticate'); + this.wrappedToken = '54321'; + + this.server.post('/sys/wrapping/unwrap', (_, req) => { + assert.strictEqual(req.url, '/v1/sys/wrapping/unwrap', 'makes call to unwrap the token'); + assert.strictEqual( + req.requestHeaders['X-Vault-Token'], + this.wrappedToken, + 'uses passed wrapped token for the unwrap' + ); + return { + auth: { + client_token: '12345', + }, + }; + }); + + await this.renderComponent(); + later(() => cancelTimers(), 50); + await settled(); + const [actual] = authenticateStub.lastCall.args; + assert.propEqual( + actual, + { + backend: 'token', + clusterId: '1', + data: { + token: '12345', + }, + selectedAuth: 'token', + }, + `it calls auth service authenticate method with correct args: ${JSON.stringify(actual)} ` + ); + }); + + test('it should set nonce value as uuid for okta method type', async function (assert) { + assert.expect(4); + this.server.post('/auth/okta/login/foo', (_, req) => { + const { nonce } = JSON.parse(req.requestBody); + assert.true(validate(nonce), 'Nonce value passed as uuid for okta login'); + return { + auth: { + client_token: '12345', + policies: ['default'], + }, + }; + }); + + await this.renderComponent(); + + await fillIn(GENERAL.selectByAttr('auth-method'), 'okta'); + await fillIn(AUTH_FORM.input('username'), 'foo'); + await fillIn(AUTH_FORM.input('password'), 'bar'); + await click(AUTH_FORM.login); + assert + .dom('[data-test-okta-number-challenge]') + .hasText( + 'To finish signing in, you will need to complete an additional MFA step. Please wait... Back to login', + 'renders okta number challenge on submit' + ); + await click('[data-test-back-button]'); + assert.dom(AUTH_FORM.form).exists('renders auth form on return to login'); + assert.dom(GENERAL.selectByAttr('auth-method')).hasValue('okta', 'preserves method type on back'); + }); +}); diff --git a/ui/tests/integration/components/autocomplete-input-test.js b/ui/tests/integration/components/autocomplete-input-test.js index 051dd1732706..ab24c5467dd7 100644 --- a/ui/tests/integration/components/autocomplete-input-test.js +++ b/ui/tests/integration/components/autocomplete-input-test.js @@ -1,17 +1,24 @@ /** * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 + * SPDX-License-Identifier: BUSL-1.1 */ import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; import { click, fillIn, triggerEvent, typeIn, render } from '@ember/test-helpers'; import { hbs } from 'ember-cli-htmlbars'; +import { setRunOptions } from 'ember-a11y-testing/test-support'; module('Integration | Component | autocomplete-input', function (hooks) { setupRenderingTest(hooks); test('it should render label', async function (assert) { + // TODO: make the input accessible when no label provided + setRunOptions({ + rules: { + label: { enabled: false }, + }, + }); await render( hbs` `); - - assert.dom(this.element).hasText('An Option', 'shows the display name of the option'); - assert.dom('.tooltip').doesNotExist('tooltip does not exist when disabled is false'); - await click('[data-test-mount-type="aws"]'); - assert.ok(spy.calledOnce, 'calls the radio change function when option clicked'); - }); - - test('it renders correctly when disabled', async function (assert) { - const spy = sinon.spy(); - this.set('onRadioChange', spy); - await render(hbs``); - - assert.dom(this.element).hasText('An Option', 'shows the display name of the option'); - assert.dom('.ember-basic-dropdown-trigger').exists('tooltip exists'); - await click('[data-test-mount-type="aws"]'); - assert.ok(spy.notCalled, 'does not call the radio change function when option is clicked'); - }); -}); diff --git a/ui/tests/integration/components/calendar-widget-test.js b/ui/tests/integration/components/calendar-widget-test.js deleted file mode 100644 index 8869feeb4341..000000000000 --- a/ui/tests/integration/components/calendar-widget-test.js +++ /dev/null @@ -1,326 +0,0 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 - */ - -import { module, test } from 'qunit'; -import { setupRenderingTest } from 'ember-qunit'; -import { render, click, findAll, find } from '@ember/test-helpers'; -import sinon from 'sinon'; -import hbs from 'htmlbars-inline-precompile'; -import calendarDropdown from 'vault/tests/pages/components/calendar-widget'; -import { ARRAY_OF_MONTHS } from 'core/utils/date-formatters'; -import { subMonths, subYears } from 'date-fns'; -import format from 'date-fns/format'; - -module('Integration | Component | calendar-widget', function (hooks) { - setupRenderingTest(hooks); - - const isDisplayingSameYear = (comparisonDate, calendarYear) => { - return comparisonDate.getFullYear() === parseInt(calendarYear); - }; - - hooks.beforeEach(function () { - const CURRENT_DATE = new Date(); - this.set('currentDate', CURRENT_DATE); - this.set('calendarStartDate', subMonths(CURRENT_DATE, 12)); - this.set('calendarEndDate', CURRENT_DATE); - this.set('startTimestamp', subMonths(CURRENT_DATE, 12).toISOString()); - this.set('endTimestamp', CURRENT_DATE.toISOString()); - this.set('handleClientActivityQuery', sinon.spy()); - }); - - test('it renders and disables correct months when start date is 12 months ago', async function (assert) { - assert.expect(14); - await render(hbs` - - `); - - assert.dom(calendarDropdown.dateRangeTrigger).hasText( - `${format(this.calendarStartDate, 'MMM yyyy')} - - ${format(this.calendarEndDate, 'MMM yyyy')}`, - 'renders and formats start and end dates' - ); - await calendarDropdown.openCalendar(); - assert.ok(calendarDropdown.showsCalendar, 'renders the calendar component'); - - // assert months in current year are disabled/enabled correctly - const monthButtons = findAll('[data-test-calendar-month]'); - const enabledMonths = [], - disabledMonths = []; - for (let monthIdx = 0; monthIdx < 12; monthIdx++) { - if (monthIdx > this.calendarEndDate.getMonth()) { - disabledMonths.push(monthButtons[monthIdx]); - } else { - enabledMonths.push(monthButtons[monthIdx]); - } - } - enabledMonths.forEach((btn) => { - assert - .dom(btn) - .doesNotHaveClass( - 'is-readOnly', - `${ARRAY_OF_MONTHS[btn.id] + this.calendarEndDate.getFullYear()} is enabled` - ); - }); - disabledMonths.forEach((btn) => { - assert - .dom(btn) - .hasClass( - 'is-readOnly', - `${ARRAY_OF_MONTHS[btn.id] + this.calendarEndDate.getFullYear()} is read only` - ); - }); - }); - - test('it renders and disables months before start timestamp', async function (assert) { - await render(hbs` - - `); - - await calendarDropdown.openCalendar(); - assert.dom('[data-test-next-year]').isDisabled('Future year is disabled'); - await calendarDropdown.clickPreviousYear(); - assert - .dom('[data-test-display-year]') - .hasText(`${subYears(this.currentDate, 1).getFullYear()}`, 'shows the previous year'); - assert.dom('[data-test-previous-year]').isDisabled('disables previous year'); - - // assert months in previous year are disabled/enabled correctly - const monthButtons = findAll('[data-test-calendar-month]'); - const enabledMonths = [], - disabledMonths = []; - for (let monthIdx = 0; monthIdx < 12; monthIdx++) { - if (monthIdx < this.calendarStartDate.getMonth()) { - disabledMonths.push(monthButtons[monthIdx]); - } else { - enabledMonths.push(monthButtons[monthIdx]); - } - } - disabledMonths.forEach((btn) => { - assert - .dom(btn) - .hasClass( - 'is-readOnly', - `${ARRAY_OF_MONTHS[btn.id] + this.calendarEndDate.getFullYear()} is read only` - ); - }); - enabledMonths.forEach((btn) => { - assert - .dom(btn) - .doesNotHaveClass( - 'is-readOnly', - `${ARRAY_OF_MONTHS[btn.id] + this.calendarEndDate.getFullYear()} is enabled` - ); - }); - }); - - test('it calls parent callback with correct arg when clicking "Current billing period"', async function (assert) { - await render(hbs` - - `); - await calendarDropdown.menuToggle(); - await calendarDropdown.clickCurrentBillingPeriod(); - assert.propEqual( - this.handleClientActivityQuery.args[0][0], - { dateType: 'reset' }, - 'it calls parent function with reset dateType' - ); - }); - - test('it calls parent callback with correct arg when clicking "Current month"', async function (assert) { - await render(hbs` - - `); - await calendarDropdown.menuToggle(); - await calendarDropdown.clickCurrentMonth(); - assert.propEqual( - this.handleClientActivityQuery.args[0][0], - { dateType: 'currentMonth' }, - 'it calls parent function with currentMoth dateType' - ); - }); - - test('it calls parent callback with correct arg when selecting a month', async function (assert) { - await render(hbs` - - `); - await calendarDropdown.openCalendar(); - await click(`[data-test-calendar-month="${ARRAY_OF_MONTHS[this.calendarEndDate.getMonth()]}"]`); - assert.propEqual( - this.handleClientActivityQuery.lastCall.lastArg, - { - dateType: 'endDate', - monthIdx: this.currentDate.getMonth(), - monthName: ARRAY_OF_MONTHS[this.currentDate.getMonth()], - year: this.currentDate.getFullYear(), - }, - 'it calls parent function with end date (current) month/year' - ); - - await calendarDropdown.openCalendar(); - await calendarDropdown.clickPreviousYear(); - await click(`[data-test-calendar-month="${ARRAY_OF_MONTHS[this.calendarStartDate.getMonth()]}"]`); - assert.propEqual( - this.handleClientActivityQuery.lastCall.lastArg, - { - dateType: 'endDate', - monthIdx: this.currentDate.getMonth(), - monthName: ARRAY_OF_MONTHS[this.currentDate.getMonth()], - year: this.currentDate.getFullYear() - 1, - }, - 'it calls parent function with start date month/year' - ); - }); - - test('it disables correct months when start date 6 months ago', async function (assert) { - this.set('calendarStartDate', subMonths(this.currentDate, 6)); - this.set('startTimestamp', subMonths(this.currentDate, 6).toISOString()); - await render(hbs` - - `); - - await calendarDropdown.openCalendar(); - assert.dom('[data-test-next-year]').isDisabled('Future year is disabled'); - - const displayYear = find('[data-test-display-year]').innerText; - const isRangeSameYear = isDisplayingSameYear(this.calendarStartDate, displayYear); - - // only click previous year if 6 months ago was last year - if (!isRangeSameYear) { - await calendarDropdown.clickPreviousYear(); - } - assert.dom('[data-test-previous-year]').isDisabled('previous year is disabled'); - - // DOM calendar is viewing start date year - findAll('[data-test-calendar-month]').forEach((m) => { - // months before start month should always be disabled - if (m.id < this.calendarStartDate.getMonth()) { - assert.dom(m).hasClass('is-readOnly', `${ARRAY_OF_MONTHS[m.id] + displayYear} is read only`); - } - // if start/end dates are in the same year, DOM is also showing end date - if (isRangeSameYear) { - // months after end date should be disabled - if (m.id > this.calendarEndDate.getMonth()) { - assert.dom(m).hasClass('is-readOnly', `${ARRAY_OF_MONTHS[m.id] + displayYear} is read only`); - } - // months between including start/end month should be enabled - if (m.id >= this.calendarStartDate.getMonth() && m.id <= this.calendarEndDate.getMonth()) { - assert.dom(m).doesNotHaveClass('is-readOnly', `${ARRAY_OF_MONTHS[m.id] + displayYear} is enabled`); - } - } - }); - - // click back to current year if duration spans multiple years - if (!isRangeSameYear) { - await click('[data-test-next-year]'); - findAll('[data-test-calendar-month]').forEach((m) => { - // DOM is no longer showing start month, all months before current date should be enabled - if (m.id <= this.currentDate.getMonth()) { - assert.dom(m).doesNotHaveClass('is-readOnly', `${ARRAY_OF_MONTHS[m.id] + displayYear} is enabled`); - } - // future months should be disabled - if (m.id > this.currentDate.getMonth()) { - assert.dom(m).hasClass('is-readOnly', `${ARRAY_OF_MONTHS[m.id] + displayYear} is read only`); - } - }); - } - }); - - test('it disables correct months when start date 36 months ago', async function (assert) { - this.set('calendarStartDate', subMonths(this.currentDate, 36)); - this.set('startTimestamp', subMonths(this.currentDate, 36).toISOString()); - await render(hbs` - - `); - - await calendarDropdown.openCalendar(); - assert.dom('[data-test-next-year]').isDisabled('Future year is disabled'); - - let displayYear = find('[data-test-display-year]').innerText; - - while (!isDisplayingSameYear(this.calendarStartDate, displayYear)) { - await calendarDropdown.clickPreviousYear(); - displayYear = find('[data-test-display-year]').innerText; - } - - assert.dom('[data-test-previous-year]').isDisabled('previous year is disabled'); - assert.dom('[data-test-next-year]').isEnabled('next year is enabled'); - - // DOM calendar is viewing start date year (3 years ago) - findAll('[data-test-calendar-month]').forEach((m) => { - // months before start month should always be disabled - if (m.id < this.calendarStartDate.getMonth()) { - assert.dom(m).hasClass('is-readOnly', `${ARRAY_OF_MONTHS[m.id] + displayYear} is read only`); - } - if (m.id >= this.calendarStartDate.getMonth()) { - assert.dom(m).doesNotHaveClass('is-readOnly', `${ARRAY_OF_MONTHS[m.id] + displayYear} is enabled`); - } - }); - - await click('[data-test-next-year]'); - displayYear = await find('[data-test-display-year]').innerText; - - if (!isDisplayingSameYear(this.currentDate, displayYear)) { - await findAll('[data-test-calendar-month]').forEach((m) => { - // years between should have all months enabled - assert.dom(m).doesNotHaveClass('is-readOnly', `${ARRAY_OF_MONTHS[m.id] + displayYear} is enabled`); - }); - } - - await click('[data-test-next-year]'); - displayYear = await find('[data-test-display-year]').innerText; - - if (!isDisplayingSameYear(this.currentDate, displayYear)) { - await findAll('[data-test-calendar-month]').forEach((m) => { - // years between should have all months enabled - assert.dom(m).doesNotHaveClass('is-readOnly', `${ARRAY_OF_MONTHS[m.id] + displayYear} is enabled`); - }); - } - - await click('[data-test-next-year]'); - displayYear = await find('[data-test-display-year]').innerText; - // now DOM is showing current year - assert.dom('[data-test-next-year]').isDisabled('Future year is disabled'); - if (isDisplayingSameYear(this.currentDate, displayYear)) { - findAll('[data-test-calendar-month]').forEach((m) => { - // all months before current month should be enabled - if (m.id <= this.currentDate.getMonth()) { - assert.dom(m).doesNotHaveClass('is-readOnly', `${ARRAY_OF_MONTHS[m.id] + displayYear} is enabled`); - } - // future months should be disabled - if (m.id > this.currentDate.getMonth()) { - assert.dom(m).hasClass('is-readOnly', `${ARRAY_OF_MONTHS[m.id] + displayYear} is read only`); - } - }); - } - }); -}); diff --git a/ui/tests/integration/components/certificate-card-test.js b/ui/tests/integration/components/certificate-card-test.js new file mode 100644 index 000000000000..e1942a1c84e8 --- /dev/null +++ b/ui/tests/integration/components/certificate-card-test.js @@ -0,0 +1,87 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { CERTIFICATES } from 'vault/tests/helpers/pki/pki-helpers'; + +const SELECTORS = { + label: '[data-test-certificate-label]', + value: '[data-test-certificate-value]', + icon: '[data-test-certificate-icon]', + copyButton: '[data-test-copy-button]', + copyIcon: '[data-test-icon="clipboard-copy"]', +}; +const { rootPem, rootDer } = CERTIFICATES; + +module('Integration | Component | certificate-card', function (hooks) { + setupRenderingTest(hooks); + + test('it renders', async function (assert) { + await render(hbs``); + + assert.dom(SELECTORS.label).hasNoText('There is no label because there is no value'); + assert.dom(SELECTORS.value).hasNoText('There is no value because none was provided'); + assert.dom(SELECTORS.icon).exists('The certificate icon exists'); + assert.dom(SELECTORS.copyIcon).exists('The copy icon renders'); + }); + + test('it renders with an example PEM Certificate', async function (assert) { + this.certificate = rootPem; + await render(hbs``); + + assert.dom(SELECTORS.label).hasText('PEM Format', 'The label text is PEM Format'); + assert.dom(SELECTORS.value).hasText(this.certificate, 'The data rendered is correct'); + assert.dom(SELECTORS.icon).exists('The certificate icon exists'); + assert.dom(SELECTORS.copyButton).exists('The copy button exists'); + assert + .dom(SELECTORS.copyButton) + .hasAttribute('data-test-copy-button', this.certificate, 'copy value is the same as data'); + }); + + test('it renders with an example DER Certificate', async function (assert) { + this.certificate = rootDer; + await render(hbs``); + + assert.dom(SELECTORS.label).hasText('DER Format', 'The label text is DER Format'); + assert.dom(SELECTORS.value).hasText(this.certificate, 'The data rendered is correct'); + assert.dom(SELECTORS.icon).exists('The certificate icon exists'); + assert.dom(SELECTORS.copyButton).exists('The copy button exists'); + assert + .dom(SELECTORS.copyButton) + .hasAttribute('data-test-copy-button', this.certificate, 'copy value is the same as data'); + }); + + test('it renders with the PEM Format label regardless of the value provided when @isPem is true', async function (assert) { + this.certificate = 'example-certificate-text'; + await render(hbs``); + + assert.dom(SELECTORS.label).hasText('PEM Format', 'The label text is PEM Format'); + assert.dom(SELECTORS.value).hasText(this.certificate, 'The data rendered is correct'); + }); + + test('it renders with an example CA Chain', async function (assert) { + this.caChain = [ + '-----BEGIN CERTIFICATE-----\nMIIDIDCCA...\n-----END CERTIFICATE-----\n', + '-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBA...\n-----END RSA PRIVATE KEY-----\n', + ]; + + await render(hbs``); + + assert.dom(SELECTORS.label).hasText('PEM Format', 'The label text is PEM Format'); + assert.dom(SELECTORS.value).hasText(this.caChain.join(','), 'The data rendered is correct'); + assert.dom(SELECTORS.icon).exists('The certificate icon exists'); + assert.dom(SELECTORS.copyButton).exists('The copy button exists'); + assert + .dom(SELECTORS.copyButton) + .hasAttribute( + 'data-test-copy-button', + this.caChain.join('\n'), + 'copy value is array converted to a string' + ); + }); +}); diff --git a/ui/tests/integration/components/charts/vertical-bar-basic-test.js b/ui/tests/integration/components/charts/vertical-bar-basic-test.js new file mode 100644 index 000000000000..97b9352d391f --- /dev/null +++ b/ui/tests/integration/components/charts/vertical-bar-basic-test.js @@ -0,0 +1,120 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { render, triggerEvent } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; + +const EXAMPLE = [ + { + month: '7/22', + timestamp: '2022-07-01T00:00:00-07:00', + clients: null, + entity_clients: null, + non_entity_clients: null, + secret_syncs: null, + }, + { + month: '8/22', + timestamp: '2022-08-01T00:00:00-07:00', + clients: 6440, + entity_clients: 1471, + non_entity_clients: 4389, + secret_syncs: 4207, + }, + { + month: '9/22', + timestamp: '2022-09-01T00:00:00-07:00', + clients: 9583, + entity_clients: 149, + non_entity_clients: 20, + secret_syncs: 5802, + }, +]; + +module('Integration | Component | clients/charts/vertical-bar-basic', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.data = EXAMPLE; + }); + + test('it renders when some months have no data', async function (assert) { + await render( + hbs`` + ); + assert.dom('[data-test-chart="My chart"]').exists('renders chart container'); + assert.dom('[data-test-vertical-bar]').exists({ count: 3 }, 'renders 3 vertical bars'); + + // Tooltips + assert.dom('[data-test-interactive-area="9/22"]').exists('interactive area exists'); + await triggerEvent('[data-test-interactive-area="9/22"]', 'mouseover'); + assert.dom('[data-test-tooltip]').exists({ count: 1 }, 'renders tooltip on mouseover'); + assert.dom('[data-test-tooltip-count]').hasText('5,802 secret syncs', 'tooltip has exact count'); + assert.dom('[data-test-tooltip-month]').hasText('September 2022', 'tooltip has humanized month and year'); + await triggerEvent('[data-test-interactive-area="9/22"]', 'mouseout'); + assert.dom('[data-test-tooltip]').doesNotExist('removes tooltip on mouseout'); + await triggerEvent('[data-test-interactive-area="7/22"]', 'mouseover'); + assert + .dom('[data-test-tooltip-count]') + .hasText('No data', 'renders tooltip with no data message when no data is available'); + // Axis + assert.dom('[data-test-x-axis]').hasText('7/22 8/22 9/22', 'renders x-axis labels'); + assert.dom('[data-test-y-axis]').hasText('0 2k 4k', 'renders y-axis labels'); + // Table + assert.dom('[data-test-underlying-data]').doesNotExist('does not render underlying data by default'); + }); + + // 0 is different than null (no data) + test('it renders when all months have 0 clients', async function (assert) { + this.data = [ + { + month: '6/22', + timestamp: '2022-06-01T00:00:00-07:00', + clients: 0, + entity_clients: 0, + non_entity_clients: 0, + secret_syncs: 0, + }, + { + month: '7/22', + timestamp: '2022-07-01T00:00:00-07:00', + clients: 0, + entity_clients: 0, + non_entity_clients: 0, + secret_syncs: 0, + }, + ]; + await render( + hbs`` + ); + + assert.dom('[data-test-chart="My chart"]').exists('renders chart container'); + assert.dom('[data-test-vertical-bar]').exists({ count: 2 }, 'renders 2 vertical bars'); + assert.dom('[data-test-vertical-bar]').hasAttribute('height', '0', 'rectangles have 0 height'); + // Tooltips + await triggerEvent('[data-test-interactive-area="6/22"]', 'mouseover'); + assert.dom('[data-test-tooltip]').exists({ count: 1 }, 'renders tooltip on mouseover'); + assert.dom('[data-test-tooltip-count]').hasText('0 secret syncs', 'tooltip has exact count'); + assert.dom('[data-test-tooltip-month]').hasText('June 2022', 'tooltip has humanized month and year'); + await triggerEvent('[data-test-interactive-area="6/22"]', 'mouseout'); + assert.dom('[data-test-tooltip]').doesNotExist('removes tooltip on mouseout'); + // Axis + assert.dom('[data-test-x-axis]').hasText('6/22 7/22', 'renders x-axis labels'); + assert.dom('[data-test-y-axis]').hasText('0 1 2 3 4', 'renders y-axis labels'); + }); + + test('it renders underlying data', async function (assert) { + await render( + hbs`` + ); + assert.dom('[data-test-chart="My chart"]').exists('renders chart container'); + assert.dom('[data-test-underlying-data]').exists('renders underlying data when showTable=true'); + assert + .dom('[data-test-underlying-data] thead') + .hasText('Month Secret syncs Count', 'renders correct table headers'); + }); +}); diff --git a/ui/tests/integration/components/charts/vertical-bar-stacked-test.js b/ui/tests/integration/components/charts/vertical-bar-stacked-test.js new file mode 100644 index 000000000000..9107141b004a --- /dev/null +++ b/ui/tests/integration/components/charts/vertical-bar-stacked-test.js @@ -0,0 +1,144 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { findAll, render, triggerEvent } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { CHARTS } from 'vault/tests/helpers/clients/client-count-selectors'; + +const EXAMPLE = [ + { + timestamp: '2022-09-01T00:00:00', + total: null, + fuji_apples: null, + gala_apples: null, + red_delicious: null, + }, + { + timestamp: '2022-10-01T00:00:00', + total: 6440, + fuji_apples: 1471, + gala_apples: 4389, + red_delicious: 4207, + }, + { + timestamp: '2022-11-01T00:00:00', + total: 9583, + fuji_apples: 149, + gala_apples: 20, + red_delicious: 5802, + }, +]; + +module('Integration | Component | clients/charts/vertical-bar-stacked', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.data = EXAMPLE; + this.legend = [ + { key: 'fuji_apples', label: 'Fuji counts' }, + { key: 'gala_apples', label: 'Gala counts' }, + ]; + }); + + test('it renders when some months have no data', async function (assert) { + assert.expect(10); + await render( + hbs`` + ); + + assert.dom(CHARTS.chart('My chart')).exists('renders chart container'); + + const visibleBars = findAll(CHARTS.verticalBar).filter((e) => e.getAttribute('height') !== '0'); + const count = this.data.filter((d) => d.total !== null).length * 2; + assert.strictEqual(visibleBars.length, count, `renders ${count} vertical bars`); + + // Tooltips + await triggerEvent(CHARTS.hover('2022-09-01T00:00:00'), 'mouseover'); + assert.dom(CHARTS.tooltip).isVisible('renders tooltip on mouseover'); + assert + .dom(CHARTS.tooltip) + .hasText('September 2022 No data', 'renders formatted timestamp with no data message'); + await triggerEvent(CHARTS.hover('2022-09-01T00:00:00'), 'mouseout'); + assert.dom(CHARTS.tooltip).doesNotExist('removes tooltip on mouseout'); + + await triggerEvent(CHARTS.hover('2022-10-01T00:00:00'), 'mouseover'); + assert + .dom(CHARTS.tooltip) + .hasText('October 2022 1,471 Fuji counts 4,389 Gala counts', 'October tooltip has exact count'); + await triggerEvent(CHARTS.hover('2022-10-01T00:00:00'), 'mouseout'); + + await triggerEvent(CHARTS.hover('2022-11-01T00:00:00'), 'mouseover'); + assert + .dom(CHARTS.tooltip) + .hasText('November 2022 149 Fuji counts 20 Gala counts', 'November tooltip has exact count'); + await triggerEvent(CHARTS.hover('2022-11-01T00:00:00'), 'mouseout'); + + // Axis + assert.dom(CHARTS.xAxis).hasText('9/22 10/22 11/22', 'renders x-axis labels'); + assert.dom(CHARTS.yAxis).hasText('0 2k 4k', 'renders y-axis labels'); + // Table + assert.dom(CHARTS.table).doesNotExist('does not render underlying data by default'); + }); + + // 0 is different than null (no data) + test('it renders when all months have 0 clients', async function (assert) { + assert.expect(14); + + this.data = [ + { + month: '10/22', + timestamp: '2022-10-01T00:00:00', + total: 40, + fuji_apples: 0, + gala_apples: 0, + red_delicious: 40, + }, + { + month: '11/22', + timestamp: '2022-11-01T00:00:00', + total: 180, + fuji_apples: 0, + gala_apples: 0, + red_delicious: 180, + }, + ]; + await render( + hbs`` + ); + + assert.dom(CHARTS.chart('My chart')).exists('renders chart container'); + findAll(CHARTS.verticalBar).forEach((b, idx) => + assert.dom(b).isNotVisible(`bar: ${idx} does not render`) + ); + findAll(CHARTS.verticalBar).forEach((b, idx) => + assert.dom(b).hasAttribute('height', '0', `rectangle: ${idx} have 0 height`) + ); + + // Tooltips + await triggerEvent(CHARTS.hover('2022-10-01T00:00:00'), 'mouseover'); + assert.dom(CHARTS.tooltip).isVisible('renders tooltip on mouseover'); + assert.dom(CHARTS.tooltip).hasText('October 2022 0 Fuji counts 0 Gala counts', 'tooltip has 0 counts'); + await triggerEvent(CHARTS.hover('2022-10-01T00:00:00'), 'mouseout'); + assert.dom(CHARTS.tooltip).isNotVisible('removes tooltip on mouseout'); + + // Axis + assert.dom(CHARTS.xAxis).hasText('10/22 11/22', 'renders x-axis labels'); + assert.dom(CHARTS.yAxis).hasText('0 1 2 3 4', 'renders y-axis labels'); + }); + + test('it renders underlying data', async function (assert) { + assert.expect(3); + await render( + hbs`` + ); + assert.dom(CHARTS.chart('My chart')).exists('renders chart container'); + assert.dom(CHARTS.table).exists('renders underlying data when showTable=true'); + assert + .dom(`${CHARTS.table} thead`) + .hasText('Timestamp Fuji apples Gala apples', 'renders correct table headers'); + }); +}); diff --git a/ui/tests/integration/components/checkbox-grid-test.js b/ui/tests/integration/components/checkbox-grid-test.js index d31b7eb95448..f887c29370e8 100644 --- a/ui/tests/integration/components/checkbox-grid-test.js +++ b/ui/tests/integration/components/checkbox-grid-test.js @@ -1,6 +1,6 @@ /** * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 + * SPDX-License-Identifier: BUSL-1.1 */ import { module, test } from 'qunit'; diff --git a/ui/tests/integration/components/chevron-test.js b/ui/tests/integration/components/chevron-test.js index ec8f096b8f55..fc07e184431e 100644 --- a/ui/tests/integration/components/chevron-test.js +++ b/ui/tests/integration/components/chevron-test.js @@ -1,6 +1,6 @@ /** * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 + * SPDX-License-Identifier: BUSL-1.1 */ import { module, test } from 'qunit'; diff --git a/ui/tests/integration/components/choose-pgp-key-form-test.js b/ui/tests/integration/components/choose-pgp-key-form-test.js new file mode 100644 index 000000000000..23838386ae08 --- /dev/null +++ b/ui/tests/integration/components/choose-pgp-key-form-test.js @@ -0,0 +1,94 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import sinon from 'sinon'; +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { click, fillIn, render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; + +const CHOOSE_PGP = { + begin: '[data-test-choose-pgp-key-form="begin"]', + description: '[data-test-choose-pgp-key-description]', + toggle: '[data-test-text-toggle]', + useKeyButton: '[data-test-use-pgp-key-button]', + pgpTextArea: '[data-test-pgp-file-textarea]', + confirm: '[data-test-pgp-key-confirm]', + base64Output: '[data-test-pgp-key-copy]', + submit: '[data-test-confirm-pgp-key-submit]', + cancel: '[data-test-use-pgp-key-cancel]', +}; +module('Integration | Component | choose-pgp-key-form', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.set('onCancel', () => {}); + this.set('onSubmit', () => {}); + }); + + test('it renders correctly', async function (assert) { + await render( + hbs`` + ); + + assert.dom(CHOOSE_PGP.begin).exists('PGP key selection form exists'); + assert.dom(CHOOSE_PGP.description).hasText('my custom form text', 'uses custom form text'); + await click(CHOOSE_PGP.toggle); + assert.dom(CHOOSE_PGP.useKeyButton).isDisabled('use pgp button is disabled'); + await fillIn(CHOOSE_PGP.pgpTextArea, 'base64-pgp-key'); + assert.dom(CHOOSE_PGP.useKeyButton).isNotDisabled('use pgp button is no longer disabled'); + await click(CHOOSE_PGP.useKeyButton); + assert + .dom(CHOOSE_PGP.confirm) + .hasText( + 'Below is the base-64 encoded PGP Key that will be used. Click the "Do it" button to proceed.', + 'Incorporates button text in confirmation' + ); + assert.dom(CHOOSE_PGP.base64Output).hasText('base64-pgp-key', 'Shows PGP key contents'); + assert.dom(CHOOSE_PGP.submit).hasText('Do it', 'uses passed buttonText'); + await click(CHOOSE_PGP.submit); + }); + + test('it calls onSubmit correctly', async function (assert) { + const submitSpy = sinon.spy(); + this.set('onSubmit', submitSpy); + await render( + hbs`` + ); + + assert.dom(CHOOSE_PGP.begin).exists('PGP key selection form exists'); + assert + .dom(CHOOSE_PGP.description) + .hasText('Choose a PGP Key from your computer or paste the contents of one in the form below.'); + await click(CHOOSE_PGP.toggle); + assert.dom(CHOOSE_PGP.useKeyButton).isDisabled('use pgp button is disabled'); + await fillIn(CHOOSE_PGP.pgpTextArea, 'base64-pgp-key'); + assert.dom(CHOOSE_PGP.useKeyButton).isNotDisabled('use pgp button is no longer disabled'); + await click(CHOOSE_PGP.useKeyButton); + assert + .dom(CHOOSE_PGP.confirm) + .hasText( + 'Below is the base-64 encoded PGP Key that will be used. Click the "Submit" button to proceed.', + 'Confirmation text has buttonText' + ); + assert.dom(CHOOSE_PGP.base64Output).hasText('base64-pgp-key', 'Shows PGP key contents'); + assert.dom(CHOOSE_PGP.submit).hasText('Submit', 'uses passed buttonText'); + await click(CHOOSE_PGP.submit); + assert.ok(submitSpy.calledOnceWith('base64-pgp-key')); + }); + + test('it calls cancel on cancel', async function (assert) { + const cancelSpy = sinon.spy(); + this.set('onCancel', cancelSpy); + await render( + hbs`` + ); + + await click(CHOOSE_PGP.toggle); + await fillIn(CHOOSE_PGP.pgpTextArea, 'base64-pgp-key'); + await click(CHOOSE_PGP.cancel); + assert.ok(cancelSpy.calledOnce); + }); +}); diff --git a/ui/tests/integration/components/clients/attribution-test.js b/ui/tests/integration/components/clients/attribution-test.js index 40cde95851aa..fdd3bd96ca5b 100644 --- a/ui/tests/integration/components/clients/attribution-test.js +++ b/ui/tests/integration/components/clients/attribution-test.js @@ -1,231 +1,188 @@ /** * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 + * SPDX-License-Identifier: BUSL-1.1 */ import { module, test } from 'qunit'; +import sinon from 'sinon'; import { setupRenderingTest } from 'ember-qunit'; import { render } from '@ember/test-helpers'; import { hbs } from 'ember-cli-htmlbars'; -import { endOfMonth, formatRFC3339 } from 'date-fns'; -import { click } from '@ember/test-helpers'; +import { formatRFC3339 } from 'date-fns'; import subMonths from 'date-fns/subMonths'; +import timestamp from 'core/utils/timestamp'; +import { SERIALIZED_ACTIVITY_RESPONSE } from 'vault/tests/helpers/clients/client-count-helpers'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { GENERAL } from 'vault/tests/helpers/general-selectors'; +import { CLIENT_TYPES } from 'core/utils/client-count-utils'; +const CLIENTS_ATTRIBUTION = { + title: '[data-test-attribution-title]', + description: '[data-test-attribution-description]', + subtext: '[data-test-attribution-subtext]', + timestamp: '[data-test-attribution-timestamp]', + chart: '[data-test-horizontal-bar-chart]', + topItem: '[data-test-top-attribution]', + topItemCount: '[data-test-attribution-clients]', + yLabel: '[data-test-group="y-labels"]', + yLabels: '[data-test-group="y-labels"] text', +}; module('Integration | Component | clients/attribution', function (hooks) { setupRenderingTest(hooks); + setupMirage(hooks); + + hooks.before(function () { + this.timestampStub = sinon.replace(timestamp, 'now', sinon.fake.returns(new Date('2018-04-03T14:15:30'))); + }); hooks.beforeEach(function () { - this.set('startTimestamp', formatRFC3339(subMonths(new Date(), 6))); - this.set('timestamp', formatRFC3339(new Date())); - this.set('selectedNamespace', null); - this.set('chartLegend', [ - { label: 'entity clients', key: 'entity_clients' }, - { label: 'non-entity clients', key: 'non_entity_clients' }, - ]); - this.set('totalUsageCounts', { clients: 15, entity_clients: 10, non_entity_clients: 5 }); - this.set('totalClientAttribution', [ - { label: 'second', clients: 10, entity_clients: 7, non_entity_clients: 3 }, - { label: 'first', clients: 5, entity_clients: 3, non_entity_clients: 2 }, - ]); - this.set('totalMountsData', { clients: 5, entity_clients: 3, non_entity_clients: 2 }); - this.set('namespaceMountsData', [ - { label: 'auth1/', clients: 3, entity_clients: 2, non_entity_clients: 1 }, - { label: 'auth2/', clients: 2, entity_clients: 1, non_entity_clients: 1 }, - ]); + const mockNow = this.timestampStub(); + this.mockNow = mockNow; + this.startTimestamp = formatRFC3339(subMonths(mockNow, 6)); + this.timestamp = formatRFC3339(mockNow); + this.selectedNamespace = null; + this.namespaceAttribution = SERIALIZED_ACTIVITY_RESPONSE.by_namespace; + this.authMountAttribution = SERIALIZED_ACTIVITY_RESPONSE.by_namespace.find( + (ns) => ns.label === 'ns1' + ).mounts; }); test('it renders empty state with no data', async function (assert) { await render(hbs` - - + `); - assert.dom('[data-test-component="empty-state"]').exists(); - assert.dom('[data-test-empty-state-title]').hasText('No data found'); - assert.dom('[data-test-attribution-description]').hasText('There is a problem gathering data'); - assert.dom('[data-test-attribution-export-button]').doesNotExist(); - assert.dom('[data-test-attribution-timestamp]').doesNotHaveTextContaining('Updated'); + assert.dom(GENERAL.emptyStateTitle).hasText('No data found'); + assert.dom(CLIENTS_ATTRIBUTION.title).hasText('Namespace attribution', 'uses default noun'); + assert.dom(CLIENTS_ATTRIBUTION.timestamp).hasNoText(); }); - test('it renders with data for namespaces', async function (assert) { + test('it updates language based on noun', async function (assert) { + this.noun = ''; await render(hbs` - - `); + assert.dom(CLIENTS_ATTRIBUTION.timestamp).includesText('Updated Apr 3'); - assert.dom('[data-test-component="empty-state"]').doesNotExist(); - assert.dom('[data-test-horizontal-bar-chart]').exists('chart displays'); - assert.dom('[data-test-attribution-export-button]').exists(); + // when noun is blank, uses default + assert.dom(CLIENTS_ATTRIBUTION.title).hasText('Namespace attribution'); assert - .dom('[data-test-attribution-description]') + .dom(CLIENTS_ATTRIBUTION.description) .hasText( - 'This data shows the top ten namespaces by client count and can be used to understand where clients are originating. Namespaces are identified by path. To see all namespaces, export this data.' + 'This data shows the top ten namespaces by total clients and can be used to understand where clients are originating. Namespaces are identified by path.' ); assert - .dom('[data-test-attribution-subtext]') - .hasText( - 'The total clients in the namespace for this date range. This number is useful for identifying overall usage volume.' - ); - assert.dom('[data-test-top-attribution]').includesText('namespace').includesText('second'); - assert.dom('[data-test-attribution-clients]').includesText('namespace').includesText('10'); - }); + .dom(CLIENTS_ATTRIBUTION.subtext) + .hasText('This data shows the top ten namespaces by total clients for the date range selected.'); - test('it renders two charts and correct text for single, historical month', async function (assert) { - this.start = formatRFC3339(subMonths(new Date(), 1)); - this.end = formatRFC3339(subMonths(endOfMonth(new Date()), 1)); - await render(hbs` - - - `); + // when noun is mount + this.set('noun', 'mount'); + assert.dom(CLIENTS_ATTRIBUTION.title).hasText('Mount attribution'); assert - .dom('[data-test-attribution-description]') - .includesText( - 'This data shows the top ten namespaces by client count and can be used to understand where clients are originating. Namespaces are identified by path. To see all namespaces, export this data.', - 'renders correct auth attribution description' - ); - assert - .dom('[data-test-chart-container="total-clients"] .chart-description') - .includesText( - 'The total clients in the namespace for this month. This number is useful for identifying overall usage volume.', - 'renders total monthly namespace text' + .dom(CLIENTS_ATTRIBUTION.description) + .hasText( + 'This data shows the top ten mounts by client count within this namespace, and can be used to understand where clients are originating. Mounts are organized by path.' ); assert - .dom('[data-test-chart-container="new-clients"] .chart-description') - .includesText( - 'The new clients in the namespace for this month. This aids in understanding which namespaces create and use new clients.', - 'renders new monthly namespace text' + .dom(CLIENTS_ATTRIBUTION.subtext) + .hasText( + 'The total clients used by the mounts for this date range. This number is useful for identifying overall usage volume.' ); - this.set('selectedNamespace', 'second'); + // when noun is namespace + this.set('noun', 'namespace'); + assert.dom(CLIENTS_ATTRIBUTION.title).hasText('Namespace attribution'); assert - .dom('[data-test-attribution-description]') - .includesText( - 'This data shows the top ten authentication methods by client count within this namespace, and can be used to understand where clients are originating. Authentication methods are organized by path.', - 'renders correct auth attribution description' - ); - assert - .dom('[data-test-chart-container="total-clients"] .chart-description') - .includesText( - 'The total clients used by the auth method for this month. This number is useful for identifying overall usage volume.', - 'renders total monthly auth method text' + .dom(CLIENTS_ATTRIBUTION.description) + .hasText( + 'This data shows the top ten namespaces by total clients and can be used to understand where clients are originating. Namespaces are identified by path.' ); assert - .dom('[data-test-chart-container="new-clients"] .chart-description') - .includesText( - 'The new clients used by the auth method for this month. This aids in understanding which auth methods create and use new clients.', - 'renders new monthly auth method text' - ); + .dom(CLIENTS_ATTRIBUTION.subtext) + .hasText('This data shows the top ten namespaces by total clients for the date range selected.'); }); - test('it renders single chart for current month', async function (assert) { + test('it renders with data for namespaces', async function (assert) { await render(hbs` - - `); + + assert.dom(GENERAL.emptyStateTitle).doesNotExist(); + assert.dom(CLIENTS_ATTRIBUTION.chart).exists(); + assert.dom(CLIENTS_ATTRIBUTION.topItem).includesText('namespace').includesText('ns1'); + assert.dom(CLIENTS_ATTRIBUTION.topItemCount).includesText('namespace').includesText('18,903'); assert - .dom('[data-test-chart-container="single-chart"]') - .exists('renders single chart with total clients'); - assert - .dom('[data-test-attribution-subtext]') - .hasTextContaining('this month', 'renders total monthly namespace text'); + .dom(CLIENTS_ATTRIBUTION.yLabels) + .exists({ count: 2 }, 'bars reflect number of namespaces in single month'); + assert.dom(CLIENTS_ATTRIBUTION.yLabel).hasText('ns1root'); }); - test('it renders single chart and correct text for for date range', async function (assert) { + test('it renders with data for mounts', async function (assert) { await render(hbs` - - `); + assert.dom(GENERAL.emptyStateTitle).doesNotExist(); + assert.dom(CLIENTS_ATTRIBUTION.chart).exists(); + assert.dom(CLIENTS_ATTRIBUTION.topItem).includesText('mount').includesText('auth/authid/0'); + assert.dom(CLIENTS_ATTRIBUTION.topItemCount).includesText('mount').includesText('8,394'); assert - .dom('[data-test-chart-container="single-chart"]') - .exists('renders single chart with total clients'); - assert - .dom('[data-test-attribution-subtext]') - .hasTextContaining('date range', 'renders total monthly namespace text'); + .dom(CLIENTS_ATTRIBUTION.yLabels) + .exists({ count: 3 }, 'bars reflect number of mounts in single month'); + assert.dom(CLIENTS_ATTRIBUTION.yLabel).hasText('auth/authid/0pki-engine-0kvv2-engine-0'); }); - test('it renders with data for selected namespace auth methods for a date range', async function (assert) { - this.set('selectedNamespace', 'second'); + test('it shows secret syncs when flag is on', async function (assert) { + this.isSecretsSyncActivated = true; await render(hbs` - - `); - assert.dom('[data-test-component="empty-state"]').doesNotExist(); - assert.dom('[data-test-horizontal-bar-chart]').exists('chart displays'); - assert.dom('[data-test-attribution-export-button]').exists(); - assert - .dom('[data-test-attribution-description]') - .hasText( - 'This data shows the top ten authentication methods by client count within this namespace, and can be used to understand where clients are originating. Authentication methods are organized by path.' - ); - assert - .dom('[data-test-attribution-subtext]') - .hasText( - 'The total clients used by the auth method for this date range. This number is useful for identifying overall usage volume.' - ); - assert.dom('[data-test-top-attribution]').includesText('auth method').includesText('auth1/'); - assert.dom('[data-test-attribution-clients]').includesText('auth method').includesText('3'); + assert.dom('[data-test-group="secret_syncs"] rect').exists({ count: 2 }); }); - test('it renders modal', async function (assert) { + test('it hids secret syncs when flag is off or missing', async function (assert) { + this.isSecretsSyncActivated = true; await render(hbs` - - `); - await click('[data-test-attribution-export-button]'); - assert.dom('.modal.is-active .title').hasText('Export attribution data', 'modal appears to export csv'); - assert.dom('.modal.is-active').includesText('June 2022 - December 2022'); + + assert.dom('[data-test-group="secret_syncs"]').doesNotExist(); + }); + + test('it sorts and limits before rendering bars', async function (assert) { + this.tooManyAttributions = Array(15) + .fill(null) + .map((_, idx) => { + const attr = { label: `ns${idx}` }; + CLIENT_TYPES.forEach((type) => { + attr[type] = 10 + idx; + }); + return attr; + }); + await render(hbs` + + `); + assert.dom(CLIENTS_ATTRIBUTION.yLabels).exists({ count: 10 }, 'only 10 bars are shown'); + assert.dom(CLIENTS_ATTRIBUTION.topItem).includesText('ns14'); }); }); diff --git a/ui/tests/integration/components/clients/charts/vertical-bar-grouped-test.js b/ui/tests/integration/components/clients/charts/vertical-bar-grouped-test.js new file mode 100644 index 000000000000..230147ff846d --- /dev/null +++ b/ui/tests/integration/components/clients/charts/vertical-bar-grouped-test.js @@ -0,0 +1,249 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { render, triggerEvent } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { GENERAL } from 'vault/tests/helpers/general-selectors'; +import { CHARTS } from 'vault/tests/helpers/clients/client-count-selectors'; +import { assertBarChart } from 'vault/tests/helpers/clients/client-count-helpers'; + +module('Integration | Component | clients/charts/vertical-bar-grouped', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.legend = [ + { key: 'clients', label: 'Total clients' }, + { key: 'foo', label: 'Foo' }, + ]; + this.data = [ + { + timestamp: '2018-04-03T14:15:30', + clients: 14, + foo: 4, + month: '4/18', + }, + { + timestamp: '2018-05-03T14:15:30', + clients: 18, + foo: 8, + month: '5/18', + }, + { + timestamp: '2018-06-03T14:15:30', + clients: 114, + foo: 14, + month: '6/18', + }, + { + timestamp: '2018-07-03T14:15:30', + clients: 110, + foo: 10, + month: '7/18', + }, + ]; + this.renderComponent = async () => { + await render( + hbs`
+ +
` + ); + }; + }); + + test('it renders empty state when no data', async function (assert) { + this.data = []; + await this.renderComponent(); + assert.dom(CHARTS.chart('grouped vertical bar chart')).doesNotExist(); + assert.dom(GENERAL.emptyStateSubtitle).hasText('No data to display'); + }); + + test('it renders chart with data as grouped bars', async function (assert) { + await this.renderComponent(); + assert.dom(CHARTS.chart('grouped vertical bar chart')).exists(); + const barCount = this.data.length * this.legend.length; + // bars are what we expect + assert.dom(CHARTS.verticalBar).exists({ count: barCount }); + assert.dom(`.custom-bar-clients`).exists({ count: 4 }, 'clients bars have correct class'); + assert.dom(`.custom-bar-foo`).exists({ count: 4 }, 'foo bars have correct class'); + assertBarChart(assert, 'grouped vertical bar chart', this.data, true); + }); + + test('it renders chart with tooltips when some is missing', async function (assert) { + assert.expect(13); + this.data = [ + { + timestamp: '2018-04-03T14:15:30', + month: '4/18', + expectedTooltip: 'April 2018 No data', + }, + { + timestamp: '2018-05-03T14:15:30', + month: '5/18', + clients: 0, + foo: 0, + }, + { + timestamp: '2018-06-03T14:15:30', + month: '6/18', + clients: 14, + foo: 4, + expectedTooltip: 'June 2018 14 Total clients 4 Foo', + }, + ]; + await this.renderComponent(); + assert.dom(CHARTS.chart('grouped vertical bar chart')).exists(); + const barCount = this.data.length * this.legend.length; + assert.dom(CHARTS.verticalBar).exists({ count: barCount }); + assertBarChart(assert, 'grouped vertical bar chart', this.data, true); + + // TOOLTIPS - NO DATA + await triggerEvent(CHARTS.hover(this.data[0].timestamp), 'mouseover'); + assert.dom(CHARTS.tooltip).isVisible(`renders tooltip on mouseover`); + assert + .dom(CHARTS.tooltip) + .hasText(this.data[0].expectedTooltip, 'renders formatted timestamp with no data message'); + await triggerEvent(CHARTS.hover(this.data[2].timestamp), 'mouseout'); + assert.dom(CHARTS.tooltip).doesNotExist('removes tooltip on mouseout'); + + // TOOLTIPS - WITH DATA + await triggerEvent(CHARTS.hover(this.data[2].timestamp), 'mouseover'); + assert.dom(CHARTS.tooltip).isVisible(`renders tooltip on mouseover`); + assert.dom(CHARTS.tooltip).hasText(this.data[2].expectedTooltip, 'renders formatted timestamp with data'); + await triggerEvent(CHARTS.hover(this.data[2].timestamp), 'mouseout'); + assert.dom(CHARTS.tooltip).doesNotExist('removes tooltip on mouseout'); + }); + + test('it renders upgrade data', async function (assert) { + this.upgradeData = [ + { + version: '1.10.1', + previousVersion: '1.9.2', + timestampInstalled: '2018-05-03T14:15:30', + }, + ]; + await this.renderComponent(); + assert.dom(CHARTS.chart('grouped vertical bar chart')).exists(); + const barCount = this.data.length * this.legend.length; + // bars are what we expect + assert.dom(CHARTS.verticalBar).exists({ count: barCount }); + assert.dom(`.custom-bar-clients`).exists({ count: 4 }, 'clients bars have correct class'); + assert.dom(`.custom-bar-foo`).exists({ count: 4 }, 'foo bars have correct class'); + assertBarChart(assert, 'grouped vertical bar chart', this.data, true); + + // TOOLTIP + await triggerEvent(CHARTS.hover('2018-05-03T14:15:30'), 'mouseover'); + assert.dom(CHARTS.tooltip).isVisible(`renders tooltip on mouseover`); + assert + .dom(CHARTS.tooltip) + .hasText( + 'May 2018 18 Total clients 8 Foo Vault was upgraded from 1.9.2 to 1.10.1', + 'renders formatted timestamp with data' + ); + await triggerEvent(CHARTS.hover('2018-05-03T14:15:30'), 'mouseout'); + assert.dom(CHARTS.tooltip).doesNotExist('removes tooltip on mouseout'); + }); + + test('it updates axis when dataset updates', async function (assert) { + const datasets = { + small: [ + { + timestamp: '2020-04-01', + bar: 4, + month: '4/20', + }, + { + timestamp: '2020-05-01', + bar: 8, + month: '5/20', + }, + { + timestamp: '2020-06-01', + bar: 1, + }, + { + timestamp: '2020-07-01', + bar: 10, + }, + ], + large: [ + { + timestamp: '2020-08-01', + bar: 4586, + month: '8/20', + }, + { + timestamp: '2020-09-01', + bar: 8928, + month: '9/20', + }, + { + timestamp: '2020-10-01', + bar: 11948, + month: '10/20', + }, + { + timestamp: '2020-11-01', + bar: 16943, + month: '11/20', + }, + ], + broken: [ + { + timestamp: '2020-01-01', + bar: null, + month: '1/20', + }, + { + timestamp: '2020-02-01', + bar: 0, + month: '2/20', + }, + { + timestamp: '2020-03-01', + bar: 22, + month: '3/20', + }, + { + timestamp: '2020-04-01', + bar: null, + month: '4/20', + }, + { + timestamp: '2020-05-01', + bar: 70, + month: '5/20', + }, + { + timestamp: '2020-06-01', + bar: 50, + month: '6/20', + }, + ], + }; + this.legend = [{ key: 'bar', label: 'Some thing' }]; + this.set('data', datasets.small); + await this.renderComponent(); + assert.dom('[data-test-y-axis]').hasText('0 2 4 6 8 10', 'y-axis renders correctly for small values'); + assert + .dom('[data-test-x-axis]') + .hasText('4/20 5/20 6/20 7/20', 'x-axis renders correctly for small values'); + + // Update to large dataset + this.set('data', datasets.large); + assert.dom('[data-test-y-axis]').hasText('0 5k 10k 15k', 'y-axis renders correctly for new large values'); + assert + .dom('[data-test-x-axis]') + .hasText('8/20 9/20 10/20 11/20', 'x-axis renders correctly for small values'); + + // Update to broken dataset + this.set('data', datasets.broken); + assert.dom('[data-test-y-axis]').hasText('0 20 40 60', 'y-axis renders correctly for new broken values'); + assert + .dom('[data-test-x-axis]') + .hasText('1/20 2/20 3/20 4/20 5/20 6/20', 'x-axis renders correctly for small values'); + }); +}); diff --git a/ui/tests/integration/components/clients/config-test.js b/ui/tests/integration/components/clients/config-test.js index 9f69f38045a7..f5bd1fa318d9 100644 --- a/ui/tests/integration/components/clients/config-test.js +++ b/ui/tests/integration/components/clients/config-test.js @@ -1,137 +1,169 @@ /** * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 + * SPDX-License-Identifier: BUSL-1.1 */ import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; -import { render, find, click } from '@ember/test-helpers'; -import { resolve } from 'rsvp'; +import { render, find, click, fillIn } from '@ember/test-helpers'; +import { setupMirage } from 'ember-cli-mirage/test-support'; import hbs from 'htmlbars-inline-precompile'; +import sinon from 'sinon'; module('Integration | Component | client count config', function (hooks) { setupRenderingTest(hooks); - - const createAttr = (name, type, options) => { - return { - name, - type, - options, - }; - }; - - const generateModel = (overrides) => { - return { - enabled: 'On', - retentionMonths: 24, - defaultReportMonths: 12, - configAttrs: [ - createAttr('enabled', 'string', { editType: 'boolean' }), - createAttr('retentionMonths', 'number'), - ], - changedAttributes: () => ({}), - save: () => {}, - ...overrides, - }; - }; + setupMirage(hooks); hooks.beforeEach(function () { this.router = this.owner.lookup('service:router'); - this.router.reopen({ - transitionTo() { - return { - followRedirects() { - return resolve(); - }, - }; - }, - }); - const model = generateModel(); - this.model = model; + this.transitionStub = sinon.stub(this.router, 'transitionTo'); + const store = this.owner.lookup('service:store'); + this.createModel = (enabled = 'enable', reporting_enabled = false, minimum_retention_months = 48) => { + store.pushPayload('clients/config', { + modelName: 'clients/config', + id: 'foo', + data: { + enabled, + reporting_enabled, + minimum_retention_months, + retention_months: 49, + }, + }); + this.model = store.peekRecord('clients/config', 'foo'); + }; }); test('it shows the table with the correct rows by default', async function (assert) { + this.createModel(); + await render(hbs``); - assert.dom('[data-test-pricing-metrics-config-table]').exists('Pricing metrics config table exists'); + assert.dom('[data-test-clients-config-table]').exists('Clients config table exists'); const rows = document.querySelectorAll('.info-table-row'); - assert.strictEqual(rows.length, 2, 'renders 2 infotable rows'); + assert.strictEqual(rows.length, 2, 'renders 2 info table rows'); assert.ok( find('[data-test-row-value="Usage data collection"]').textContent.includes('On'), 'Enabled value matches model' ); assert.ok( - find('[data-test-row-value="Retention period"]').textContent.includes('24'), + find('[data-test-row-value="Retention period"]').textContent.includes('49'), 'Retention period value matches model' ); }); - test('TODO: it shows the config edit form when mode = edit', async function (assert) { - await render(hbs` - - - `); + test('it should function in edit mode when reporting is disabled', async function (assert) { + assert.expect(13); + const retentionMonths = 60; + this.server.put('/sys/internal/counters/config', (schema, req) => { + const { enabled, retention_months } = JSON.parse(req.requestBody); + const expected = { enabled: 'enable', retention_months: retentionMonths }; + assert.deepEqual({ enabled, retention_months }, expected, 'Correct data sent in PUT request (1)'); + return {}; + }); - assert.dom('[data-test-pricing-metrics-config-form]').exists('Pricing metrics config form exists'); - const fields = document.querySelectorAll('[data-test-field]'); - assert.strictEqual(fields.length, 2, 'renders 2 fields'); - }); + this.createModel('disable'); - test('it shows a modal with correct messaging when disabling', async function (assert) { - // Simulates the model when enabled value has been changed from On to Off - const simModel = generateModel({ - enabled: 'Off', - changedAttributes: () => ({ enabled: ['On', 'Off'] }), - }); - this.set('model', simModel); await render(hbs` - `); - await click('[data-test-edit-metrics-config-save]'); - assert.dom('.modal.is-active').exists('Modal appears'); + assert.dom('[data-test-input="enabled"]').isNotChecked('Data collection checkbox is not checked'); + assert + .dom('label[for="enabled"]') + .hasText('Data collection is off', 'Correct label renders when data collection is off'); + assert.dom('[data-test-input="retentionMonths"]').hasValue('49', 'Retention months render'); + + await click('[data-test-input="enabled"]'); + await fillIn('[data-test-input="retentionMonths"]', 20); + await click('[data-test-clients-config-save]'); + assert + .dom('[data-test-inline-error-message]') + .hasText( + 'Retention period must be greater than or equal to 48.', + 'Validation error shows for min retention period' + ); + await fillIn('[data-test-input="retentionMonths"]', 90); + await click('[data-test-clients-config-save]'); + assert + .dom('[data-test-inline-error-message]') + .hasText( + 'Retention period must be less than or equal to 60.', + 'Validation error shows for max retention period' + ); + + await fillIn('[data-test-input="retentionMonths"]', retentionMonths); + await click('[data-test-clients-config-save]'); + assert + .dom('[data-test-clients-config-modal="title"]') + .hasText('Turn usage tracking on?', 'Correct modal title renders'); + assert.dom('[data-test-clients-config-modal="on"]').exists('Correct modal description block renders'); + + await click('[data-test-clients-config-modal="continue"]'); assert.ok( - find('[data-test-modal-title]').textContent.includes('Turn usage tracking off?'), - 'Modal confirming turn tracking off' + this.transitionStub.calledWith('vault.cluster.clients.config'), + 'Route transitions correctly on save success' ); - await click('[data-test-metrics-config-cancel]'); - assert.dom('.modal.is-active').doesNotExist('Modal goes away'); + + await click('[data-test-input="enabled"]'); + await click('[data-test-clients-config-save]'); + assert.dom('[data-test-clients-config-modal]').exists('Modal renders'); + assert + .dom('[data-test-clients-config-modal="title"]') + .hasText('Turn usage tracking off?', 'Correct modal title renders'); + assert.dom('[data-test-clients-config-modal="off"]').exists('Correct modal description block renders'); + + await click('[data-test-clients-config-modal="cancel"]'); + assert.dom('[data-test-clients-config-modal]').doesNotExist('Modal is hidden on cancel'); }); - test('it shows a modal with correct messaging when enabling', async function (assert) { - // Simulates the model when enabled value has been changed from On to Off - const simModel = generateModel({ - changedAttributes: () => ({ enabled: ['Off', 'On'] }), + test('it should be hidden in edit mode when reporting is enabled', async function (assert) { + assert.expect(4); + + this.server.put('/sys/internal/counters/config', (schema, req) => { + const { enabled, retention_months } = JSON.parse(req.requestBody); + const expected = { enabled: 'enable', retention_months: 48 }; + assert.deepEqual({ enabled, retention_months }, expected, 'Correct data sent in PUT request (2)'); + return {}; }); - this.set('model', simModel); + + this.createModel('enable', true, 24); + await render(hbs` - `); - await click('[data-test-edit-metrics-config-save]'); - assert.dom('.modal.is-active').exists('Modal appears'); - assert.ok( - find('[data-test-modal-title]').textContent.includes('Turn usage tracking on?'), - 'Modal confirming turn tracking on' - ); - await click('[data-test-metrics-config-cancel]'); - assert.dom('.modal.is-active').doesNotExist('Modal goes away'); + assert.dom('[data-test-input="enabled"]').doesNotExist('Data collection input not shown '); + assert.dom('[data-test-input="retentionMonths"]').hasValue('49', 'Retention months render'); + + await fillIn('[data-test-input="retentionMonths"]', 5); + await click('[data-test-clients-config-save]'); + assert + .dom('[data-test-inline-error-message]') + .hasText( + 'Retention period must be greater than or equal to 24.', + 'Validation error shows for incorrect retention period' + ); + + await fillIn('[data-test-input="retentionMonths"]', 48); + await click('[data-test-clients-config-save]'); }); - test('it does not show a modal on save if enable left unchanged', async function (assert) { - // Simulates the model when something other than enabled changed - const simModel = generateModel({ - changedAttributes: () => ({ retentionMonths: [24, '48'] }), + test('it should not show modal when data collection is not changed', async function (assert) { + assert.expect(1); + + this.server.put('/sys/internal/counters/config', (schema, req) => { + const { enabled, retention_months } = JSON.parse(req.requestBody); + const expected = { enabled: 'enable', retention_months: 48 }; + assert.deepEqual({ enabled, retention_months }, expected, 'Correct data sent in PUT request (3)'); + return {}; }); - this.set('model', simModel); + + this.createModel(); + await render(hbs` - `); - - await click('[data-test-edit-metrics-config-save]'); - assert.dom('.modal.is-active').doesNotExist('No modal appears'); + await fillIn('[data-test-input="retentionMonths"]', 48); + await click('[data-test-clients-config-save]'); }); }); diff --git a/ui/tests/integration/components/clients/counts/nav-bar-test.js b/ui/tests/integration/components/clients/counts/nav-bar-test.js new file mode 100644 index 000000000000..a8dcaf979838 --- /dev/null +++ b/ui/tests/integration/components/clients/counts/nav-bar-test.js @@ -0,0 +1,46 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { GENERAL } from 'vault/tests/helpers/general-selectors'; + +module('Integration | Component | clients/counts/nav-bar', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.showSecretsSyncClientCounts = false; + + this.renderComponent = async () => { + await render( + hbs`` + ); + }; + }); + + test('it renders default tabs', async function (assert) { + await this.renderComponent(); + + assert.dom(GENERAL.tab('overview')).hasText('Overview'); + assert.dom(GENERAL.tab('token')).hasText('Entity/Non-entity clients'); + assert.dom(GENERAL.tab('acme')).hasText('ACME clients'); + }); + + test('it shows secrets sync tab if showSecretsSyncClientCounts is true', async function (assert) { + this.showSecretsSyncClientCounts = true; + await this.renderComponent(); + + assert.dom(GENERAL.tab('sync')).exists(); + }); + + test('it should not show secrets sync tab if showSecretsSyncClientCounts is false', async function (assert) { + this.showSecretsSyncClientCounts = false; + await this.renderComponent(); + + assert.dom(GENERAL.tab('sync')).doesNotExist(); + }); +}); diff --git a/ui/tests/integration/components/clients/date-range-test.js b/ui/tests/integration/components/clients/date-range-test.js new file mode 100644 index 000000000000..cfe40de3f8be --- /dev/null +++ b/ui/tests/integration/components/clients/date-range-test.js @@ -0,0 +1,131 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { click, fillIn, render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import Sinon from 'sinon'; +import timestamp from 'core/utils/timestamp'; +import { GENERAL } from 'vault/tests/helpers/general-selectors'; +import { CLIENT_COUNT } from 'vault/tests/helpers/clients/client-count-selectors'; + +const DATE_RANGE = CLIENT_COUNT.dateRange; +module('Integration | Component | clients/date-range', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + Sinon.replace(timestamp, 'now', Sinon.fake.returns(new Date('2018-04-03T14:15:30'))); + this.now = timestamp.now(); + this.startTime = '2018-01-01T14:15:30'; + this.endTime = '2019-01-31T14:15:30'; + this.onChange = Sinon.spy(); + this.renderComponent = async () => { + await render( + hbs`` + ); + }; + }); + + test('it renders prompt to set dates if no start time', async function (assert) { + this.startTime = undefined; + await this.renderComponent(); + + assert.dom(DATE_RANGE.dateDisplay('start')).doesNotExist(); + assert.dom(DATE_RANGE.dateDisplay('end')).doesNotExist(); + assert.dom(DATE_RANGE.edit).hasText('Set date range'); + + await click(DATE_RANGE.edit); + assert.dom(DATE_RANGE.editModal).exists(); + assert.dom(DATE_RANGE.editDate('start')).hasValue(''); + await fillIn(DATE_RANGE.editDate('start'), '2018-01'); + await fillIn(DATE_RANGE.editDate('end'), '2019-01'); + await click(GENERAL.saveButton); + assert.deepEqual(this.onChange.args[0], [ + { + end_time: 1548892800, + start_time: 1514764800, + }, + ]); + assert.dom(DATE_RANGE.editModal).doesNotExist('closes modal'); + }); + + test('it renders the date range passed and can reset it (ent)', async function (assert) { + this.owner.lookup('service:version').type = 'enterprise'; + await this.renderComponent(); + + assert.dom(DATE_RANGE.dateDisplay('start')).hasText('January 2018'); + assert.dom(DATE_RANGE.dateDisplay('end')).hasText('January 2019'); + assert.dom(DATE_RANGE.edit).hasText('Edit'); + + await click(DATE_RANGE.edit); + assert.dom(DATE_RANGE.editModal).exists(); + assert.dom(DATE_RANGE.editDate('start')).hasValue('2018-01'); + assert.dom(DATE_RANGE.editDate('end')).hasValue('2019-01'); + assert.dom(DATE_RANGE.defaultRangeAlert).doesNotExist(); + + await click(DATE_RANGE.editDate('reset')); + assert.dom(DATE_RANGE.editDate('start')).hasValue(''); + assert.dom(DATE_RANGE.editDate('end')).hasValue(''); + assert.dom(DATE_RANGE.defaultRangeAlert).exists(); + await click(GENERAL.saveButton); + assert.deepEqual(this.onChange.args[0], [{ start_time: undefined, end_time: undefined }]); + }); + + test('it renders the date range passed and cannot reset it when community', async function (assert) { + this.owner.lookup('service:version').type = 'community'; + await this.renderComponent(); + + assert.dom(DATE_RANGE.dateDisplay('start')).hasText('January 2018'); + assert.dom(DATE_RANGE.dateDisplay('end')).hasText('January 2019'); + assert.dom(DATE_RANGE.edit).hasText('Edit'); + + await click(DATE_RANGE.edit); + assert.dom(DATE_RANGE.editModal).exists(); + assert.dom(DATE_RANGE.editDate('reset')).doesNotExist(); + assert.dom(DATE_RANGE.editDate('start')).hasValue('2018-01'); + assert.dom(DATE_RANGE.editDate('end')).hasValue('2019-01'); + assert.dom(DATE_RANGE.defaultRangeAlert).doesNotExist(); + + await fillIn(DATE_RANGE.editDate('start'), ''); + assert.dom(DATE_RANGE.validation).hasText('You must supply both start and end dates.'); + await click(GENERAL.saveButton); + assert.false(this.onChange.called); + }); + + test('it does not trigger onChange if date range invalid', async function (assert) { + this.owner.lookup('service:version').type = 'enterprise'; + await this.renderComponent(); + + await click(DATE_RANGE.edit); + await click(DATE_RANGE.editDate('reset')); + await fillIn(DATE_RANGE.editDate('end'), '2017-05'); + assert.dom(DATE_RANGE.validation).hasText('You must supply both start and end dates.'); + await click(GENERAL.saveButton); + assert.false(this.onChange.called); + + await fillIn(DATE_RANGE.editDate('start'), '2018-01'); + assert.dom(DATE_RANGE.validation).hasText('Start date must be before end date.'); + await click(GENERAL.saveButton); + assert.false(this.onChange.called); + + await click(GENERAL.cancelButton); + assert.false(this.onChange.called); + assert.dom(DATE_RANGE.editModal).doesNotExist(); + }); + + test('it resets the tracked values on close', async function (assert) { + await this.renderComponent(); + + await click(DATE_RANGE.edit); + await fillIn(DATE_RANGE.editDate('start'), '2017-04'); + await fillIn(DATE_RANGE.editDate('end'), '2018-05'); + await click(GENERAL.cancelButton); + + await click(DATE_RANGE.edit); + assert.dom(DATE_RANGE.editDate('start')).hasValue('2018-01'); + assert.dom(DATE_RANGE.editDate('end')).hasValue('2019-01'); + }); +}); diff --git a/ui/tests/integration/components/clients/horizontal-bar-chart-test.js b/ui/tests/integration/components/clients/horizontal-bar-chart-test.js index 31f1c4da7439..bea40d8c8c85 100644 --- a/ui/tests/integration/components/clients/horizontal-bar-chart-test.js +++ b/ui/tests/integration/components/clients/horizontal-bar-chart-test.js @@ -1,11 +1,11 @@ /** * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 + * SPDX-License-Identifier: BUSL-1.1 */ import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; -import { findAll, render, triggerEvent } from '@ember/test-helpers'; +import { findAll, render } from '@ember/test-helpers'; import { hbs } from 'ember-cli-htmlbars'; module('Integration | Component | clients/horizontal-bar-chart', function (hooks) { @@ -48,12 +48,14 @@ module('Integration | Component | clients/horizontal-bar-chart', function (hooks textTotals.forEach((label, index) => { assert.dom(label).hasText(`${dataArray[index].clients}`, 'total value renders correct number'); }); - for (const [i, bar] of actionBars.entries()) { - const percent = Math.round((dataArray[i].clients / totalObject.clients) * 100); - await triggerEvent(bar, 'mouseover'); - const tooltip = document.querySelector('.ember-modal-dialog'); - assert.dom(tooltip).includesText(`${percent}%`, 'tooltip renders correct percentage'); - } + + // FLAKY after adding a11y testing, skip for now + // for (const [i, bar] of actionBars.entries()) { + // const percent = Math.round((dataArray[i].clients / totalObject.clients) * 100); + // await triggerEvent(bar, 'mouseover'); + // const tooltip = document.querySelector('.ember-modal-dialog'); + // assert.dom(tooltip).includesText(`${percent}%`, 'tooltip renders correct percentage'); + // } }); test('it renders data with a large range', async function (assert) { @@ -79,11 +81,12 @@ module('Integration | Component | clients/horizontal-bar-chart', function (hooks assert.strictEqual(actionBars.length, dataArray.length, 'renders correct number of hover bars'); assert.strictEqual(dataBars.length, dataArray.length * 2, 'renders correct number of data bars'); - for (const [i, bar] of actionBars.entries()) { - const percent = Math.round((dataArray[i].clients / totalObject.clients) * 100); - await triggerEvent(bar, 'mouseover'); - const tooltip = document.querySelector('.ember-modal-dialog'); - assert.dom(tooltip).includesText(`${percent}%`, 'tooltip renders correct percentage'); - } + // FLAKY after adding a11y testing, skip for now + // for (const [i, bar] of actionBars.entries()) { + // const percent = Math.round((dataArray[i].clients / totalObject.clients) * 100); + // await triggerEvent(bar, 'mouseover'); + // const tooltip = document.querySelector('.ember-modal-dialog'); + // assert.dom(tooltip).includesText(`${percent}%`, 'tooltip renders correct percentage'); + // } }); }); diff --git a/ui/tests/integration/components/clients/line-chart-test.js b/ui/tests/integration/components/clients/line-chart-test.js deleted file mode 100644 index f72ace1dcdeb..000000000000 --- a/ui/tests/integration/components/clients/line-chart-test.js +++ /dev/null @@ -1,216 +0,0 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 - */ - -import { module, test } from 'qunit'; -import { setupRenderingTest } from 'ember-qunit'; -import { find, render, findAll, triggerEvent } from '@ember/test-helpers'; -import { hbs } from 'ember-cli-htmlbars'; -import { format, formatRFC3339, subMonths } from 'date-fns'; -import { formatChartDate } from 'core/utils/date-formatters'; -module('Integration | Component | clients/line-chart', function (hooks) { - setupRenderingTest(hooks); - const CURRENT_DATE = new Date(); - hooks.beforeEach(function () { - this.set('xKey', 'foo'); - this.set('yKey', 'bar'); - this.set('dataset', [ - { - foo: 1, - bar: 4, - }, - { - foo: 2, - bar: 8, - }, - { - foo: 3, - bar: 14, - }, - { - foo: 4, - bar: 10, - }, - ]); - }); - - test('it renders', async function (assert) { - await render(hbs` -
- -
- `); - - assert.dom('[data-test-line-chart]').exists('Chart is rendered'); - assert - .dom('[data-test-line-chart="plot-point"]') - .exists({ count: this.dataset.length }, `renders ${this.dataset.length} plot points`); - - findAll('[data-test-line-chart="x-axis-labels"] text').forEach((e, i) => { - assert - .dom(e) - .hasText(`${this.dataset[i][this.xKey]}`, `renders x-axis label: ${this.dataset[i][this.xKey]}`); - }); - assert.dom(find('[data-test-line-chart="y-axis-labels"] text')).hasText('0', `y-axis starts at 0`); - }); - - test('it renders upgrade data', async function (assert) { - this.set('dataset', [ - { - foo: format(subMonths(CURRENT_DATE, 4), 'M/yy'), - bar: 4, - }, - { - foo: format(subMonths(CURRENT_DATE, 3), 'M/yy'), - bar: 8, - }, - { - foo: format(subMonths(CURRENT_DATE, 2), 'M/yy'), - bar: 14, - }, - { - foo: format(subMonths(CURRENT_DATE, 1), 'M/yy'), - bar: 10, - }, - ]); - this.set('upgradeData', [ - { - id: '1.10.1', - previousVersion: '1.9.2', - timestampInstalled: formatRFC3339(subMonths(CURRENT_DATE, 2)), - }, - ]); - await render(hbs` -
- -
- `); - assert.dom('[data-test-line-chart]').exists('Chart is rendered'); - assert - .dom('[data-test-line-chart="plot-point"]') - .exists({ count: this.dataset.length }, `renders ${this.dataset.length} plot points`); - assert - .dom(find(`[data-test-line-chart="upgrade-${this.dataset[2][this.xKey]}"]`)) - .hasStyle({ opacity: '1' }, `upgrade data point ${this.dataset[2][this.xKey]} has yellow highlight`); - }); - - test('it renders tooltip', async function (assert) { - const tooltipData = [ - { - month: format(subMonths(CURRENT_DATE, 4), 'M/yy'), - clients: 4, - new_clients: { - clients: 0, - }, - }, - { - month: format(subMonths(CURRENT_DATE, 3), 'M/yy'), - clients: 8, - new_clients: { - clients: 4, - }, - }, - { - month: format(subMonths(CURRENT_DATE, 2), 'M/yy'), - clients: 14, - new_clients: { - clients: 6, - }, - }, - { - month: format(subMonths(CURRENT_DATE, 1), 'M/yy'), - clients: 20, - new_clients: { - clients: 4, - }, - }, - ]; - this.set('dataset', tooltipData); - this.set('upgradeData', [ - { - id: '1.10.1', - previousVersion: '1.9.2', - timestampInstalled: formatRFC3339(subMonths(CURRENT_DATE, 2)), - }, - ]); - await render(hbs` -
- -
- `); - - const tooltipHoverCircles = findAll('[data-test-line-chart] circle.hover-circle'); - for (const [i, bar] of tooltipHoverCircles.entries()) { - await triggerEvent(bar, 'mouseover'); - const tooltip = document.querySelector('.ember-modal-dialog'); - const { month, clients, new_clients } = tooltipData[i]; - assert - .dom(tooltip) - .includesText( - `${formatChartDate(month)} ${clients} total clients ${new_clients.clients} new clients`, - `tooltip text is correct for ${month}` - ); - } - }); - - test('it fails gracefully when upgradeData is an object', async function (assert) { - this.set('upgradeData', { some: 'object' }); - await render(hbs` -
- -
- `); - - assert - .dom('[data-test-line-chart="plot-point"]') - .exists({ count: this.dataset.length }, 'chart still renders when upgradeData is not an array'); - }); - - test('it fails gracefully when upgradeData has incorrect key names', async function (assert) { - this.set('upgradeData', [{ incorrect: 'key names' }]); - await render(hbs` -
- -
- `); - - assert - .dom('[data-test-line-chart="plot-point"]') - .exists({ count: this.dataset.length }, 'chart still renders when upgradeData has incorrect keys'); - }); - - test('it renders empty state when no dataset', async function (assert) { - await render(hbs` -
- -
- `); - - assert.dom('[data-test-component="empty-state"]').exists('renders empty state when no data'); - assert - .dom('[data-test-empty-state-subtext]') - .hasText( - `this is a custom message to explain why you're not seeing a line chart`, - 'custom message renders' - ); - }); -}); diff --git a/ui/tests/integration/components/clients/monthly-usage-test.js b/ui/tests/integration/components/clients/monthly-usage-test.js deleted file mode 100644 index 8d5c52a12ae8..000000000000 --- a/ui/tests/integration/components/clients/monthly-usage-test.js +++ /dev/null @@ -1,1479 +0,0 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 - */ - -import { module, test } from 'qunit'; -import { setupRenderingTest } from 'ember-qunit'; -import { render } from '@ember/test-helpers'; -import { hbs } from 'ember-cli-htmlbars'; -import { formatRFC3339 } from 'date-fns'; -import { findAll } from '@ember/test-helpers'; -import { calculateAverage } from 'vault/utils/chart-helpers'; -import { formatNumber } from 'core/helpers/format-number'; - -module('Integration | Component | clients/monthly-usage', function (hooks) { - setupRenderingTest(hooks); - const DATASET = [ - { - month: '8/21', - timestamp: '2021-08-01T00:00:00Z', - counts: null, - namespaces: [], - new_clients: { - month: '8/21', - namespaces: [], - }, - namespaces_by_key: {}, - }, - { - month: '9/21', - clients: 19251, - entity_clients: 10713, - non_entity_clients: 8538, - namespaces: [ - { - label: 'root', - clients: 4852, - entity_clients: 3108, - non_entity_clients: 1744, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1598, - entity_clients: 687, - non_entity_clients: 911, - }, - { - label: 'path-1', - clients: 1429, - entity_clients: 981, - non_entity_clients: 448, - }, - { - label: 'path-4-with-over-18-characters', - clients: 965, - entity_clients: 720, - non_entity_clients: 245, - }, - { - label: 'path-2', - clients: 860, - entity_clients: 720, - non_entity_clients: 140, - }, - ], - }, - { - label: 'test-ns-2/', - clients: 4702, - entity_clients: 3057, - non_entity_clients: 1645, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1686, - entity_clients: 926, - non_entity_clients: 760, - }, - { - label: 'path-4-with-over-18-characters', - clients: 1525, - entity_clients: 789, - non_entity_clients: 736, - }, - { - label: 'path-2', - clients: 905, - entity_clients: 849, - non_entity_clients: 56, - }, - { - label: 'path-1', - clients: 586, - entity_clients: 493, - non_entity_clients: 93, - }, - ], - }, - { - label: 'test-ns-1/', - clients: 4569, - entity_clients: 1871, - non_entity_clients: 2698, - mounts: [ - { - label: 'path-4-with-over-18-characters', - clients: 1534, - entity_clients: 619, - non_entity_clients: 915, - }, - { - label: 'path-3-with-over-18-characters', - clients: 1528, - entity_clients: 589, - non_entity_clients: 939, - }, - { - label: 'path-1', - clients: 828, - entity_clients: 612, - non_entity_clients: 216, - }, - { - label: 'path-2', - clients: 679, - entity_clients: 51, - non_entity_clients: 628, - }, - ], - }, - { - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 3771, - entity_clients: 2029, - non_entity_clients: 1742, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1249, - entity_clients: 793, - non_entity_clients: 456, - }, - { - label: 'path-1', - clients: 1046, - entity_clients: 444, - non_entity_clients: 602, - }, - { - label: 'path-2', - clients: 930, - entity_clients: 277, - non_entity_clients: 653, - }, - { - label: 'path-4-with-over-18-characters', - clients: 546, - entity_clients: 515, - non_entity_clients: 31, - }, - ], - }, - { - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 1357, - entity_clients: 648, - non_entity_clients: 709, - mounts: [ - { - label: 'path-1', - clients: 613, - entity_clients: 23, - non_entity_clients: 590, - }, - { - label: 'path-3-with-over-18-characters', - clients: 543, - entity_clients: 465, - non_entity_clients: 78, - }, - { - label: 'path-2', - clients: 146, - entity_clients: 141, - non_entity_clients: 5, - }, - { - label: 'path-4-with-over-18-characters', - clients: 55, - entity_clients: 19, - non_entity_clients: 36, - }, - ], - }, - ], - namespaces_by_key: { - root: { - month: '9/21', - clients: 4852, - entity_clients: 3108, - non_entity_clients: 1744, - new_clients: { - month: '9/21', - label: 'root', - clients: 2525, - entity_clients: 1315, - non_entity_clients: 1210, - }, - mounts_by_key: { - 'path-3-with-over-18-characters': { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1598, - entity_clients: 687, - non_entity_clients: 911, - new_clients: { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1055, - entity_clients: 257, - non_entity_clients: 798, - }, - }, - 'path-1': { - month: '9/21', - label: 'path-1', - clients: 1429, - entity_clients: 981, - non_entity_clients: 448, - new_clients: { - month: '9/21', - label: 'path-1', - clients: 543, - entity_clients: 340, - non_entity_clients: 203, - }, - }, - 'path-4-with-over-18-characters': { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 965, - entity_clients: 720, - non_entity_clients: 245, - new_clients: { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 136, - entity_clients: 7, - non_entity_clients: 129, - }, - }, - 'path-2': { - month: '9/21', - label: 'path-2', - clients: 860, - entity_clients: 720, - non_entity_clients: 140, - new_clients: { - month: '9/21', - label: 'path-2', - clients: 791, - entity_clients: 711, - non_entity_clients: 80, - }, - }, - }, - }, - 'test-ns-2/': { - month: '9/21', - clients: 4702, - entity_clients: 3057, - non_entity_clients: 1645, - new_clients: { - month: '9/21', - label: 'test-ns-2/', - clients: 1537, - entity_clients: 662, - non_entity_clients: 875, - }, - mounts_by_key: { - 'path-3-with-over-18-characters': { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1686, - entity_clients: 926, - non_entity_clients: 760, - new_clients: { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 520, - entity_clients: 13, - non_entity_clients: 507, - }, - }, - 'path-4-with-over-18-characters': { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 1525, - entity_clients: 789, - non_entity_clients: 736, - new_clients: { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 499, - entity_clients: 197, - non_entity_clients: 302, - }, - }, - 'path-2': { - month: '9/21', - label: 'path-2', - clients: 905, - entity_clients: 849, - non_entity_clients: 56, - new_clients: { - month: '9/21', - label: 'path-2', - clients: 398, - entity_clients: 370, - non_entity_clients: 28, - }, - }, - 'path-1': { - month: '9/21', - label: 'path-1', - clients: 586, - entity_clients: 493, - non_entity_clients: 93, - new_clients: { - month: '9/21', - label: 'path-1', - clients: 120, - entity_clients: 82, - non_entity_clients: 38, - }, - }, - }, - }, - 'test-ns-1/': { - month: '9/21', - clients: 4569, - entity_clients: 1871, - non_entity_clients: 2698, - new_clients: { - month: '9/21', - label: 'test-ns-1/', - clients: 2712, - entity_clients: 879, - non_entity_clients: 1833, - }, - mounts_by_key: { - 'path-4-with-over-18-characters': { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 1534, - entity_clients: 619, - non_entity_clients: 915, - new_clients: { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 740, - entity_clients: 39, - non_entity_clients: 701, - }, - }, - 'path-3-with-over-18-characters': { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1528, - entity_clients: 589, - non_entity_clients: 939, - new_clients: { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1250, - entity_clients: 536, - non_entity_clients: 714, - }, - }, - 'path-1': { - month: '9/21', - label: 'path-1', - clients: 828, - entity_clients: 612, - non_entity_clients: 216, - new_clients: { - month: '9/21', - label: 'path-1', - clients: 463, - entity_clients: 283, - non_entity_clients: 180, - }, - }, - 'path-2': { - month: '9/21', - label: 'path-2', - clients: 679, - entity_clients: 51, - non_entity_clients: 628, - new_clients: { - month: '9/21', - label: 'path-2', - clients: 259, - entity_clients: 21, - non_entity_clients: 238, - }, - }, - }, - }, - 'test-ns-2-with-namespace-length-over-18-characters/': { - month: '9/21', - clients: 3771, - entity_clients: 2029, - non_entity_clients: 1742, - new_clients: { - month: '9/21', - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 2087, - entity_clients: 902, - non_entity_clients: 1185, - }, - mounts_by_key: { - 'path-3-with-over-18-characters': { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1249, - entity_clients: 793, - non_entity_clients: 456, - new_clients: { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 472, - entity_clients: 260, - non_entity_clients: 212, - }, - }, - 'path-1': { - month: '9/21', - label: 'path-1', - clients: 1046, - entity_clients: 444, - non_entity_clients: 602, - new_clients: { - month: '9/21', - label: 'path-1', - clients: 775, - entity_clients: 349, - non_entity_clients: 426, - }, - }, - 'path-2': { - month: '9/21', - label: 'path-2', - clients: 930, - entity_clients: 277, - non_entity_clients: 653, - new_clients: { - month: '9/21', - label: 'path-2', - clients: 632, - entity_clients: 90, - non_entity_clients: 542, - }, - }, - 'path-4-with-over-18-characters': { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 546, - entity_clients: 515, - non_entity_clients: 31, - new_clients: { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 208, - entity_clients: 203, - non_entity_clients: 5, - }, - }, - }, - }, - 'test-ns-1-with-namespace-length-over-18-characters/': { - month: '9/21', - clients: 1357, - entity_clients: 648, - non_entity_clients: 709, - new_clients: { - month: '9/21', - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 560, - entity_clients: 189, - non_entity_clients: 371, - }, - mounts_by_key: { - 'path-1': { - month: '9/21', - label: 'path-1', - clients: 613, - entity_clients: 23, - non_entity_clients: 590, - new_clients: { - month: '9/21', - label: 'path-1', - clients: 318, - entity_clients: 12, - non_entity_clients: 306, - }, - }, - 'path-3-with-over-18-characters': { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 543, - entity_clients: 465, - non_entity_clients: 78, - new_clients: { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 126, - entity_clients: 89, - non_entity_clients: 37, - }, - }, - 'path-2': { - month: '9/21', - label: 'path-2', - clients: 146, - entity_clients: 141, - non_entity_clients: 5, - new_clients: { - month: '9/21', - label: 'path-2', - clients: 76, - entity_clients: 75, - non_entity_clients: 1, - }, - }, - 'path-4-with-over-18-characters': { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 55, - entity_clients: 19, - non_entity_clients: 36, - new_clients: { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 40, - entity_clients: 13, - non_entity_clients: 27, - }, - }, - }, - }, - }, - new_clients: { - month: '9/21', - clients: 9421, - entity_clients: 3947, - non_entity_clients: 5474, - namespaces: [ - { - label: 'test-ns-1/', - clients: 2712, - entity_clients: 879, - non_entity_clients: 1833, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1250, - entity_clients: 536, - non_entity_clients: 714, - }, - { - label: 'path-4-with-over-18-characters', - clients: 740, - entity_clients: 39, - non_entity_clients: 701, - }, - { - label: 'path-1', - clients: 463, - entity_clients: 283, - non_entity_clients: 180, - }, - { - label: 'path-2', - clients: 259, - entity_clients: 21, - non_entity_clients: 238, - }, - ], - }, - { - label: 'root', - clients: 2525, - entity_clients: 1315, - non_entity_clients: 1210, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1055, - entity_clients: 257, - non_entity_clients: 798, - }, - { - label: 'path-2', - clients: 791, - entity_clients: 711, - non_entity_clients: 80, - }, - { - label: 'path-1', - clients: 543, - entity_clients: 340, - non_entity_clients: 203, - }, - { - label: 'path-4-with-over-18-characters', - clients: 136, - entity_clients: 7, - non_entity_clients: 129, - }, - ], - }, - { - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 2087, - entity_clients: 902, - non_entity_clients: 1185, - mounts: [ - { - label: 'path-1', - clients: 775, - entity_clients: 349, - non_entity_clients: 426, - }, - { - label: 'path-2', - clients: 632, - entity_clients: 90, - non_entity_clients: 542, - }, - { - label: 'path-3-with-over-18-characters', - clients: 472, - entity_clients: 260, - non_entity_clients: 212, - }, - { - label: 'path-4-with-over-18-characters', - clients: 208, - entity_clients: 203, - non_entity_clients: 5, - }, - ], - }, - { - label: 'test-ns-2/', - clients: 1537, - entity_clients: 662, - non_entity_clients: 875, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 520, - entity_clients: 13, - non_entity_clients: 507, - }, - { - label: 'path-4-with-over-18-characters', - clients: 499, - entity_clients: 197, - non_entity_clients: 302, - }, - { - label: 'path-2', - clients: 398, - entity_clients: 370, - non_entity_clients: 28, - }, - { - label: 'path-1', - clients: 120, - entity_clients: 82, - non_entity_clients: 38, - }, - ], - }, - { - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 560, - entity_clients: 189, - non_entity_clients: 371, - mounts: [ - { - label: 'path-1', - clients: 318, - entity_clients: 12, - non_entity_clients: 306, - }, - { - label: 'path-3-with-over-18-characters', - clients: 126, - entity_clients: 89, - non_entity_clients: 37, - }, - { - label: 'path-2', - clients: 76, - entity_clients: 75, - non_entity_clients: 1, - }, - { - label: 'path-4-with-over-18-characters', - clients: 40, - entity_clients: 13, - non_entity_clients: 27, - }, - ], - }, - ], - }, - }, - { - month: '10/21', - clients: 19417, - entity_clients: 10105, - non_entity_clients: 9312, - namespaces: [ - { - label: 'root', - clients: 4835, - entity_clients: 2364, - non_entity_clients: 2471, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1797, - entity_clients: 883, - non_entity_clients: 914, - }, - { - label: 'path-1', - clients: 1501, - entity_clients: 663, - non_entity_clients: 838, - }, - { - label: 'path-2', - clients: 1461, - entity_clients: 800, - non_entity_clients: 661, - }, - { - label: 'path-4-with-over-18-characters', - clients: 76, - entity_clients: 18, - non_entity_clients: 58, - }, - ], - }, - { - label: 'test-ns-2/', - clients: 4027, - entity_clients: 1692, - non_entity_clients: 2335, - mounts: [ - { - label: 'path-4-with-over-18-characters', - clients: 1223, - entity_clients: 820, - non_entity_clients: 403, - }, - { - label: 'path-3-with-over-18-characters', - clients: 1110, - entity_clients: 111, - non_entity_clients: 999, - }, - { - label: 'path-1', - clients: 1034, - entity_clients: 462, - non_entity_clients: 572, - }, - { - label: 'path-2', - clients: 660, - entity_clients: 299, - non_entity_clients: 361, - }, - ], - }, - { - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 3924, - entity_clients: 2132, - non_entity_clients: 1792, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1411, - entity_clients: 765, - non_entity_clients: 646, - }, - { - label: 'path-2', - clients: 1205, - entity_clients: 382, - non_entity_clients: 823, - }, - { - label: 'path-1', - clients: 884, - entity_clients: 850, - non_entity_clients: 34, - }, - { - label: 'path-4-with-over-18-characters', - clients: 424, - entity_clients: 135, - non_entity_clients: 289, - }, - ], - }, - { - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 3639, - entity_clients: 2314, - non_entity_clients: 1325, - mounts: [ - { - label: 'path-1', - clients: 1062, - entity_clients: 781, - non_entity_clients: 281, - }, - { - label: 'path-4-with-over-18-characters', - clients: 1021, - entity_clients: 609, - non_entity_clients: 412, - }, - { - label: 'path-2', - clients: 849, - entity_clients: 426, - non_entity_clients: 423, - }, - { - label: 'path-3-with-over-18-characters', - clients: 707, - entity_clients: 498, - non_entity_clients: 209, - }, - ], - }, - { - label: 'test-ns-1/', - clients: 2992, - entity_clients: 1603, - non_entity_clients: 1389, - mounts: [ - { - label: 'path-1', - clients: 1140, - entity_clients: 480, - non_entity_clients: 660, - }, - { - label: 'path-4-with-over-18-characters', - clients: 1058, - entity_clients: 651, - non_entity_clients: 407, - }, - { - label: 'path-2', - clients: 575, - entity_clients: 416, - non_entity_clients: 159, - }, - { - label: 'path-3-with-over-18-characters', - clients: 219, - entity_clients: 56, - non_entity_clients: 163, - }, - ], - }, - ], - namespaces_by_key: { - root: { - month: '10/21', - clients: 4835, - entity_clients: 2364, - non_entity_clients: 2471, - new_clients: { - month: '10/21', - label: 'root', - clients: 1732, - entity_clients: 586, - non_entity_clients: 1146, - }, - mounts_by_key: { - 'path-3-with-over-18-characters': { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 1797, - entity_clients: 883, - non_entity_clients: 914, - new_clients: { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 907, - entity_clients: 192, - non_entity_clients: 715, - }, - }, - 'path-1': { - month: '10/21', - label: 'path-1', - clients: 1501, - entity_clients: 663, - non_entity_clients: 838, - new_clients: { - month: '10/21', - label: 'path-1', - clients: 276, - entity_clients: 202, - non_entity_clients: 74, - }, - }, - 'path-2': { - month: '10/21', - label: 'path-2', - clients: 1461, - entity_clients: 800, - non_entity_clients: 661, - new_clients: { - month: '10/21', - label: 'path-2', - clients: 502, - entity_clients: 189, - non_entity_clients: 313, - }, - }, - 'path-4-with-over-18-characters': { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 76, - entity_clients: 18, - non_entity_clients: 58, - new_clients: { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 47, - entity_clients: 3, - non_entity_clients: 44, - }, - }, - }, - }, - 'test-ns-2/': { - month: '10/21', - clients: 4027, - entity_clients: 1692, - non_entity_clients: 2335, - new_clients: { - month: '10/21', - label: 'test-ns-2/', - clients: 2301, - entity_clients: 678, - non_entity_clients: 1623, - }, - mounts_by_key: { - 'path-4-with-over-18-characters': { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 1223, - entity_clients: 820, - non_entity_clients: 403, - new_clients: { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 602, - entity_clients: 212, - non_entity_clients: 390, - }, - }, - 'path-3-with-over-18-characters': { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 1110, - entity_clients: 111, - non_entity_clients: 999, - new_clients: { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 440, - entity_clients: 7, - non_entity_clients: 433, - }, - }, - 'path-1': { - month: '10/21', - label: 'path-1', - clients: 1034, - entity_clients: 462, - non_entity_clients: 572, - new_clients: { - month: '10/21', - label: 'path-1', - clients: 980, - entity_clients: 454, - non_entity_clients: 526, - }, - }, - 'path-2': { - month: '10/21', - label: 'path-2', - clients: 660, - entity_clients: 299, - non_entity_clients: 361, - new_clients: { - month: '10/21', - label: 'path-2', - clients: 279, - entity_clients: 5, - non_entity_clients: 274, - }, - }, - }, - }, - 'test-ns-2-with-namespace-length-over-18-characters/': { - month: '10/21', - clients: 3924, - entity_clients: 2132, - non_entity_clients: 1792, - new_clients: { - month: '10/21', - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 1561, - entity_clients: 1225, - non_entity_clients: 336, - }, - mounts_by_key: { - 'path-3-with-over-18-characters': { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 1411, - entity_clients: 765, - non_entity_clients: 646, - new_clients: { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 948, - entity_clients: 660, - non_entity_clients: 288, - }, - }, - 'path-2': { - month: '10/21', - label: 'path-2', - clients: 1205, - entity_clients: 382, - non_entity_clients: 823, - new_clients: { - month: '10/21', - label: 'path-2', - clients: 305, - entity_clients: 289, - non_entity_clients: 16, - }, - }, - 'path-1': { - month: '10/21', - label: 'path-1', - clients: 884, - entity_clients: 850, - non_entity_clients: 34, - new_clients: { - month: '10/21', - label: 'path-1', - clients: 230, - entity_clients: 207, - non_entity_clients: 23, - }, - }, - 'path-4-with-over-18-characters': { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 424, - entity_clients: 135, - non_entity_clients: 289, - new_clients: { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 78, - entity_clients: 69, - non_entity_clients: 9, - }, - }, - }, - }, - 'test-ns-1-with-namespace-length-over-18-characters/': { - month: '10/21', - clients: 3639, - entity_clients: 2314, - non_entity_clients: 1325, - new_clients: { - month: '10/21', - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 1245, - entity_clients: 710, - non_entity_clients: 535, - }, - mounts_by_key: { - 'path-1': { - month: '10/21', - label: 'path-1', - clients: 1062, - entity_clients: 781, - non_entity_clients: 281, - new_clients: { - month: '10/21', - label: 'path-1', - clients: 288, - entity_clients: 63, - non_entity_clients: 225, - }, - }, - 'path-4-with-over-18-characters': { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 1021, - entity_clients: 609, - non_entity_clients: 412, - new_clients: { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 440, - entity_clients: 323, - non_entity_clients: 117, - }, - }, - 'path-2': { - month: '10/21', - label: 'path-2', - clients: 849, - entity_clients: 426, - non_entity_clients: 423, - new_clients: { - month: '10/21', - label: 'path-2', - clients: 339, - entity_clients: 308, - non_entity_clients: 31, - }, - }, - 'path-3-with-over-18-characters': { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 707, - entity_clients: 498, - non_entity_clients: 209, - new_clients: { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 178, - entity_clients: 16, - non_entity_clients: 162, - }, - }, - }, - }, - 'test-ns-1/': { - month: '10/21', - clients: 2992, - entity_clients: 1603, - non_entity_clients: 1389, - new_clients: { - month: '10/21', - label: 'test-ns-1/', - clients: 820, - entity_clients: 356, - non_entity_clients: 464, - }, - mounts_by_key: { - 'path-1': { - month: '10/21', - label: 'path-1', - clients: 1140, - entity_clients: 480, - non_entity_clients: 660, - new_clients: { - month: '10/21', - label: 'path-1', - clients: 239, - entity_clients: 30, - non_entity_clients: 209, - }, - }, - 'path-4-with-over-18-characters': { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 1058, - entity_clients: 651, - non_entity_clients: 407, - new_clients: { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 256, - entity_clients: 63, - non_entity_clients: 193, - }, - }, - 'path-2': { - month: '10/21', - label: 'path-2', - clients: 575, - entity_clients: 416, - non_entity_clients: 159, - new_clients: { - month: '10/21', - label: 'path-2', - clients: 259, - entity_clients: 245, - non_entity_clients: 14, - }, - }, - 'path-3-with-over-18-characters': { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 219, - entity_clients: 56, - non_entity_clients: 163, - new_clients: { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 66, - entity_clients: 18, - non_entity_clients: 48, - }, - }, - }, - }, - }, - new_clients: { - month: '10/21', - clients: 7659, - entity_clients: 3555, - non_entity_clients: 4104, - namespaces: [ - { - label: 'test-ns-2/', - clients: 2301, - entity_clients: 678, - non_entity_clients: 1623, - mounts: [ - { - label: 'path-1', - clients: 980, - entity_clients: 454, - non_entity_clients: 526, - }, - { - label: 'path-4-with-over-18-characters', - clients: 602, - entity_clients: 212, - non_entity_clients: 390, - }, - { - label: 'path-3-with-over-18-characters', - clients: 440, - entity_clients: 7, - non_entity_clients: 433, - }, - { - label: 'path-2', - clients: 279, - entity_clients: 5, - non_entity_clients: 274, - }, - ], - }, - { - label: 'root', - clients: 1732, - entity_clients: 586, - non_entity_clients: 1146, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 907, - entity_clients: 192, - non_entity_clients: 715, - }, - { - label: 'path-2', - clients: 502, - entity_clients: 189, - non_entity_clients: 313, - }, - { - label: 'path-1', - clients: 276, - entity_clients: 202, - non_entity_clients: 74, - }, - { - label: 'path-4-with-over-18-characters', - clients: 47, - entity_clients: 3, - non_entity_clients: 44, - }, - ], - }, - { - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 1561, - entity_clients: 1225, - non_entity_clients: 336, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 948, - entity_clients: 660, - non_entity_clients: 288, - }, - { - label: 'path-2', - clients: 305, - entity_clients: 289, - non_entity_clients: 16, - }, - { - label: 'path-1', - clients: 230, - entity_clients: 207, - non_entity_clients: 23, - }, - { - label: 'path-4-with-over-18-characters', - clients: 78, - entity_clients: 69, - non_entity_clients: 9, - }, - ], - }, - { - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 1245, - entity_clients: 710, - non_entity_clients: 535, - mounts: [ - { - label: 'path-4-with-over-18-characters', - clients: 440, - entity_clients: 323, - non_entity_clients: 117, - }, - { - label: 'path-2', - clients: 339, - entity_clients: 308, - non_entity_clients: 31, - }, - { - label: 'path-1', - clients: 288, - entity_clients: 63, - non_entity_clients: 225, - }, - { - label: 'path-3-with-over-18-characters', - clients: 178, - entity_clients: 16, - non_entity_clients: 162, - }, - ], - }, - { - label: 'test-ns-1/', - clients: 820, - entity_clients: 356, - non_entity_clients: 464, - mounts: [ - { - label: 'path-2', - clients: 259, - entity_clients: 245, - non_entity_clients: 14, - }, - { - label: 'path-4-with-over-18-characters', - clients: 256, - entity_clients: 63, - non_entity_clients: 193, - }, - { - label: 'path-1', - clients: 239, - entity_clients: 30, - non_entity_clients: 209, - }, - { - label: 'path-3-with-over-18-characters', - clients: 66, - entity_clients: 18, - non_entity_clients: 48, - }, - ], - }, - ], - }, - }, - ]; - hooks.beforeEach(function () { - this.set('timestamp', formatRFC3339(new Date())); - this.set('isDateRange', true); - this.set('chartLegend', [ - { label: 'entity clients', key: 'entity_clients' }, - { label: 'non-entity clients', key: 'non_entity_clients' }, - ]); - this.set('byMonthActivityData', DATASET); - }); - - test('it renders empty state with no data', async function (assert) { - await render(hbs` - - - `); - assert.dom('[data-test-monthly-usage]').exists('monthly usage component renders'); - assert.dom('[data-test-component="empty-state"]').exists(); - assert.dom('[data-test-empty-state-subtext]').hasText('No data to display'); - assert.dom('[data-test-monthly-usage-average-total] p.data-details').hasText('0', 'average total is 0'); - assert.dom('[data-test-monthly-usage-average-new] p.data-details').hasText('0', 'average new is 0'); - assert.dom('[data-test-vertical-bar-chart]').doesNotExist('vertical bar chart does not render'); - assert.dom('[data-test-monthly-usage-legend]').doesNotExist('legend does not exist'); - assert.dom('[data-test-monthly-usage-timestamp]').exists('renders timestamp'); - }); - - test('it renders with month over month activity data', async function (assert) { - const expectedTotal = formatNumber([calculateAverage(DATASET, 'clients')]); - const expectedNew = formatNumber([ - calculateAverage( - DATASET?.map((d) => d.new_clients), - 'clients' - ), - ]); - await render(hbs` - - - `); - assert.dom('[data-test-monthly-usage]').exists('monthly usage component renders'); - assert.dom('[data-test-component="empty-state"]').doesNotExist(); - assert.dom('[data-test-vertical-bar-chart]').exists('vertical bar chart displays'); - assert.dom('[data-test-monthly-usage-legend]').exists('renders vertical bar chart legend'); - assert.dom('[data-test-monthly-usage-timestamp]').exists('renders timestamp'); - - findAll('[data-test-vertical-chart="x-axis-labels"] text').forEach((e, i) => { - assert.dom(e).hasText(`${DATASET[i].month}`, `renders x-axis label: ${DATASET[i].month}`); - }); - assert - .dom('[data-test-vertical-chart="data-bar"]') - .exists( - { count: DATASET.filter((m) => m.counts !== null).length * 2 }, - 'renders correct number of data bars' - ); - assert - .dom('[data-test-monthly-usage-average-total] p.data-details') - .hasText(`${expectedTotal}`, `renders correct total average ${expectedTotal}`); - assert - .dom('[data-test-monthly-usage-average-new] p.data-details') - .hasText(`${expectedNew}`, `renders correct new average ${expectedNew}`); - }); -}); diff --git a/ui/tests/integration/components/clients/no-data-test.js b/ui/tests/integration/components/clients/no-data-test.js new file mode 100644 index 000000000000..7605d24d6a85 --- /dev/null +++ b/ui/tests/integration/components/clients/no-data-test.js @@ -0,0 +1,93 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { GENERAL } from 'vault/tests/helpers/general-selectors'; +import { allowAllCapabilitiesStub } from 'vault/tests/helpers/stubs'; + +module('Integration | Component | clients/no-data', function (hooks) { + setupRenderingTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(async function () { + this.server.post('/sys/capabilities-self', allowAllCapabilitiesStub()); + this.store = this.owner.lookup('service:store'); + this.setConfig = async (data) => { + // the clients/config model does some funky serializing for the "enabled" param + // so stubbing the request here instead of just the model for additional coverage + this.server.get('sys/internal/counters/config', () => { + return { + request_id: '25a94b99-b49a-c4ac-cb7b-5ba0eb390a25', + data, + }; + }); + return this.store.queryRecord('clients/config', {}); + }; + this.renderComponent = async () => { + return render(hbs``); + }; + }); + + test('it renders empty state when enabled is "on"', async function (assert) { + assert.expect(2); + const data = { + enabled: 'default-enabled', + reporting_enabled: false, + }; + ``; + this.config = await this.setConfig(data); + await this.renderComponent(); + assert.dom(GENERAL.emptyStateTitle).hasText('No data received'); + assert + .dom(GENERAL.emptyStateMessage) + .hasText('Tracking is turned on and Vault is gathering data. It should appear here within 30 minutes.'); + }); + + test('it renders empty state when reporting_enabled is true', async function (assert) { + assert.expect(2); + const data = { + enabled: 'default-disabled', + reporting_enabled: true, + }; + this.config = await this.setConfig(data); + await this.renderComponent(); + assert.dom(GENERAL.emptyStateTitle).hasText('No data received'); + assert + .dom(GENERAL.emptyStateMessage) + .hasText('Tracking is turned on and Vault is gathering data. It should appear here within 30 minutes.'); + }); + + test('it renders empty state when reporting is fully disabled', async function (assert) { + assert.expect(2); + const data = { + enabled: 'default-disabled', + reporting_enabled: false, + }; + this.config = await this.setConfig(data); + await this.renderComponent(); + assert.dom(GENERAL.emptyStateTitle).hasText('Data tracking is disabled'); + assert + .dom(GENERAL.emptyStateMessage) + .hasText( + 'Tracking is disabled, and no data is being collected. To turn it on, edit the configuration.' + ); + }); + + test('it renders empty state when config data is not available', async function (assert) { + assert.expect(2); + this.config = null; + await this.renderComponent(); + assert.dom(GENERAL.emptyStateTitle).hasText('Activity configuration data is unavailable'); + assert + .dom(GENERAL.emptyStateMessage) + .hasText( + 'Reporting status is unknown and could be enabled or disabled. Check the Vault logs for more information.' + ); + }); +}); diff --git a/ui/tests/integration/components/clients/page-header-test.js b/ui/tests/integration/components/clients/page-header-test.js new file mode 100644 index 000000000000..7be13f5a8495 --- /dev/null +++ b/ui/tests/integration/components/clients/page-header-test.js @@ -0,0 +1,301 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { click, fillIn, render, waitFor, waitUntil } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { Response } from 'miragejs'; +import Sinon from 'sinon'; +import { GENERAL } from 'vault/tests/helpers/general-selectors'; +import { capabilitiesStub, overrideResponse } from 'vault/tests/helpers/stubs'; +import { CLIENT_COUNT } from 'vault/tests/helpers/clients/client-count-selectors'; + +// this test coverage mostly is around the export button functionality +// since everything else is static +module('Integration | Component | clients/page-header', function (hooks) { + setupRenderingTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(function () { + this.downloadStub = Sinon.stub(this.owner.lookup('service:download'), 'download'); + this.startTimestamp = '2022-06-01T23:00:11.050Z'; + this.endTimestamp = '2022-12-01T23:00:11.050Z'; + this.selectedNamespace = undefined; + this.upgradesDuringActivity = []; + this.noData = undefined; + this.server.post('/sys/capabilities-self', () => + capabilitiesStub('sys/internal/counters/activity/export', ['sudo']) + ); + + this.renderComponent = async () => { + return render(hbs` + `); + }; + }); + + test('it shows the export button if user does has SUDO capabilities', async function (assert) { + await this.renderComponent(); + assert.dom(CLIENT_COUNT.exportButton).exists(); + }); + + test('it hides the export button if user does has SUDO capabilities but there is no data', async function (assert) { + this.noData = true; + await this.renderComponent(); + assert.dom(CLIENT_COUNT.exportButton).doesNotExist(); + }); + + test('it hides the export button if user does not have SUDO capabilities', async function (assert) { + this.server.post('/sys/capabilities-self', () => + capabilitiesStub('sys/internal/counters/activity/export', ['read']) + ); + + await this.renderComponent(); + assert.dom(CLIENT_COUNT.exportButton).doesNotExist(); + }); + + test('defaults to show the export button if capabilities cannot be read', async function (assert) { + this.server.post('/sys/capabilities-self', () => overrideResponse(403)); + + await this.renderComponent(); + assert.dom(CLIENT_COUNT.exportButton).exists(); + }); + + test('it shows the export API error on the modal', async function (assert) { + this.server.get('/sys/internal/counters/activity/export', function () { + return overrideResponse(403); + }); + + await this.renderComponent(); + + await click(CLIENT_COUNT.exportButton); + await click(GENERAL.confirmButton); + await waitFor('[data-test-export-error]'); + assert.dom('[data-test-export-error]').hasText('permission denied'); + }); + + test('it exports when json format', async function (assert) { + assert.expect(2); + this.server.get('/sys/internal/counters/activity/export', function (_, req) { + assert.deepEqual(req.queryParams, { + format: 'json', + start_time: '2022-06-01T23:00:11.050Z', + end_time: '2022-12-01T23:00:11.050Z', + }); + return new Response(200, { 'Content-Type': 'application/json' }, { example: 'data' }); + }); + + await this.renderComponent(); + + await click(CLIENT_COUNT.exportButton); + await fillIn('[data-test-download-format]', 'jsonl'); + await click(GENERAL.confirmButton); + await waitUntil(() => this.downloadStub.calledOnce); + const extension = this.downloadStub.lastCall.args[2]; + assert.strictEqual(extension, 'jsonl'); + }); + + test('it exports when csv format', async function (assert) { + assert.expect(2); + + this.server.get('/sys/internal/counters/activity/export', function (_, req) { + assert.deepEqual(req.queryParams, { + format: 'csv', + start_time: '2022-06-01T23:00:11.050Z', + end_time: '2022-12-01T23:00:11.050Z', + }); + return new Response(200, { 'Content-Type': 'text/csv' }, 'example,data'); + }); + + await this.renderComponent(); + + await click(CLIENT_COUNT.exportButton); + await fillIn('[data-test-download-format]', 'csv'); + await click(GENERAL.confirmButton); + await waitUntil(() => this.downloadStub.calledOnce); + const extension = this.downloadStub.lastCall.args[2]; + assert.strictEqual(extension, 'csv'); + }); + + test('it sends the current namespace in export request', async function (assert) { + assert.expect(2); + const namespaceSvc = this.owner.lookup('service:namespace'); + namespaceSvc.path = 'foo'; + this.server.get('/sys/internal/counters/activity/export', function (_, req) { + assert.strictEqual(req.requestHeaders['X-Vault-Namespace'], 'foo'); + return new Response(200, { 'Content-Type': 'text/csv' }, ''); + }); + + await this.renderComponent(); + + assert.dom(CLIENT_COUNT.exportButton).exists(); + await click(CLIENT_COUNT.exportButton); + await click(GENERAL.confirmButton); + }); + test('it sends the selected namespace in export request', async function (assert) { + assert.expect(2); + this.server.get('/sys/internal/counters/activity/export', function (_, req) { + assert.strictEqual(req.requestHeaders['X-Vault-Namespace'], 'foobar'); + return new Response(200, { 'Content-Type': 'text/csv' }, ''); + }); + this.selectedNamespace = 'foobar/'; + + await this.renderComponent(); + assert.dom(CLIENT_COUNT.exportButton).exists(); + await click(CLIENT_COUNT.exportButton); + await click(GENERAL.confirmButton); + }); + + test('it sends the current + selected namespace in export request', async function (assert) { + assert.expect(2); + const namespaceSvc = this.owner.lookup('service:namespace'); + namespaceSvc.path = 'foo'; + this.server.get('/sys/internal/counters/activity/export', function (_, req) { + assert.strictEqual(req.requestHeaders['X-Vault-Namespace'], 'foo/bar'); + return new Response(200, { 'Content-Type': 'text/csv' }, ''); + }); + this.selectedNamespace = 'bar/'; + + await this.renderComponent(); + + assert.dom(CLIENT_COUNT.exportButton).exists(); + await click(CLIENT_COUNT.exportButton); + await click(GENERAL.confirmButton); + }); + + test('it shows a no data message if export returns 204', async function (assert) { + this.server.get('/sys/internal/counters/activity/export', () => overrideResponse(204)); + await this.renderComponent(); + + await click(CLIENT_COUNT.exportButton); + await click(GENERAL.confirmButton); + await waitFor('[data-test-export-error]'); + assert.dom('[data-test-export-error]').hasText('no data to export in provided time range.'); + }); + + test('it shows upgrade data in export modal', async function (assert) { + this.upgradesDuringActivity = [ + { version: '1.10.1', previousVersion: '1.9.9', timestampInstalled: '2021-11-18T10:23:16Z' }, + ]; + await this.renderComponent(); + await click(CLIENT_COUNT.exportButton); + await waitFor('[data-test-export-upgrade-warning]'); + assert.dom('[data-test-export-upgrade-warning]').includesText('1.10.1 (Nov 18, 2021)'); + }); + + module('download naming', function () { + test('is correct for date range', async function (assert) { + assert.expect(2); + this.server.get('/sys/internal/counters/activity/export', function (_, req) { + assert.deepEqual(req.queryParams, { + format: 'csv', + start_time: '2022-06-01T23:00:11.050Z', + end_time: '2022-12-01T23:00:11.050Z', + }); + return new Response(200, { 'Content-Type': 'text/csv' }, ''); + }); + + await this.renderComponent(); + await click(CLIENT_COUNT.exportButton); + await click(GENERAL.confirmButton); + await waitUntil(() => this.downloadStub.calledOnce); + const args = this.downloadStub.lastCall.args; + const [filename] = args; + assert.strictEqual(filename, 'clients_export_June 2022-December 2022', 'csv has expected filename'); + }); + + test('is correct for a single month', async function (assert) { + assert.expect(2); + this.endTimestamp = '2022-06-21T23:00:11.050Z'; + this.server.get('/sys/internal/counters/activity/export', function (_, req) { + assert.deepEqual(req.queryParams, { + format: 'csv', + start_time: '2022-06-01T23:00:11.050Z', + end_time: '2022-06-21T23:00:11.050Z', + }); + return new Response(200, { 'Content-Type': 'text/csv' }, ''); + }); + await this.renderComponent(); + + await click(CLIENT_COUNT.exportButton); + await click(GENERAL.confirmButton); + await waitUntil(() => this.downloadStub.calledOnce); + const [filename] = this.downloadStub.lastCall.args; + assert.strictEqual(filename, 'clients_export_June 2022', 'csv has single month in filename'); + }); + test('omits date if no start/end timestamp', async function (assert) { + assert.expect(2); + this.startTimestamp = undefined; + this.endTimestamp = undefined; + + this.server.get('/sys/internal/counters/activity/export', function (_, req) { + assert.deepEqual(req.queryParams, { + format: 'csv', + }); + return new Response(200, { 'Content-Type': 'text/csv' }, ''); + }); + + await this.renderComponent(); + + await click(CLIENT_COUNT.exportButton); + await click(GENERAL.confirmButton); + await waitUntil(() => this.downloadStub.calledOnce); + const [filename] = this.downloadStub.lastCall.args; + assert.strictEqual(filename, 'clients_export'); + }); + + test('includes current namespace', async function (assert) { + assert.expect(2); + this.startTimestamp = undefined; + this.endTimestamp = undefined; + const namespace = this.owner.lookup('service:namespace'); + namespace.path = 'bar/'; + + this.server.get('/sys/internal/counters/activity/export', function (_, req) { + assert.deepEqual(req.queryParams, { + format: 'csv', + }); + return new Response(200, { 'Content-Type': 'text/csv' }, ''); + }); + + await this.renderComponent(); + + await click(CLIENT_COUNT.exportButton); + await click(GENERAL.confirmButton); + await waitUntil(() => this.downloadStub.calledOnce); + const [filename] = this.downloadStub.lastCall.args; + assert.strictEqual(filename, 'clients_export_bar'); + }); + + test('includes selectedNamespace', async function (assert) { + assert.expect(2); + this.startTimestamp = undefined; + this.endTimestamp = undefined; + this.selectedNamespace = 'foo/'; + + this.server.get('/sys/internal/counters/activity/export', function (_, req) { + assert.deepEqual(req.queryParams, { + format: 'csv', + }); + return new Response(200, { 'Content-Type': 'text/csv' }, ''); + }); + + await this.renderComponent(); + + await click(CLIENT_COUNT.exportButton); + await click(GENERAL.confirmButton); + await waitUntil(() => this.downloadStub.calledOnce); + const [filename] = this.downloadStub.lastCall.args; + assert.strictEqual(filename, 'clients_export_foo'); + }); + }); +}); diff --git a/ui/tests/integration/components/clients/page/acme-test.js b/ui/tests/integration/components/clients/page/acme-test.js new file mode 100644 index 000000000000..7df2e1a44826 --- /dev/null +++ b/ui/tests/integration/components/clients/page/acme-test.js @@ -0,0 +1,197 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { setRunOptions } from 'ember-a11y-testing/test-support'; +import { render, findAll } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import clientsHandler, { LICENSE_START, STATIC_NOW } from 'vault/mirage/handlers/clients'; +import { getUnixTime } from 'date-fns'; +import { GENERAL } from 'vault/tests/helpers/general-selectors'; +import { CLIENT_COUNT, CHARTS } from 'vault/tests/helpers/clients/client-count-selectors'; +import { formatNumber } from 'core/helpers/format-number'; +import { calculateAverage } from 'vault/utils/chart-helpers'; +import { dateFormat } from 'core/helpers/date-format'; +import { assertBarChart } from 'vault/tests/helpers/clients/client-count-helpers'; + +const START_TIME = getUnixTime(LICENSE_START); +const END_TIME = getUnixTime(STATIC_NOW); +const { statText, usageStats } = CLIENT_COUNT; + +module('Integration | Component | clients | Clients::Page::Acme', function (hooks) { + setupRenderingTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(async function () { + clientsHandler(this.server); + this.store = this.owner.lookup('service:store'); + const activityQuery = { + start_time: { timestamp: START_TIME }, + end_time: { timestamp: END_TIME }, + }; + // set this to 0 + this.activity = await this.store.queryRecord('clients/activity', activityQuery); + this.startTimestamp = START_TIME; + this.endTimestamp = END_TIME; + + this.renderComponent = () => + render(hbs` + + `); + // Fails on #ember-testing-container + setRunOptions({ + rules: { + 'scrollable-region-focusable': { enabled: false }, + }, + }); + }); + + test('it should render with full month activity data charts', async function (assert) { + const monthCount = this.activity.byMonth.length; + assert.expect(7 + monthCount * 2); + const expectedTotal = formatNumber([this.activity.total.acme_clients]); + const expectedNewAvg = formatNumber([ + calculateAverage( + this.activity.byMonth.map((m) => m?.new_clients), + 'acme_clients' + ), + ]); + await this.renderComponent(); + assert + .dom(statText('Total ACME clients')) + .hasText( + `Total ACME clients The total number of ACME requests made to Vault during this time period. ${expectedTotal}`, + `renders correct total acme stat ${expectedTotal}` + ); + assert.dom(statText('Average new ACME clients per month')).hasTextContaining(`${expectedNewAvg}`); + + const formattedTimestamp = dateFormat([this.activity.responseTimestamp, 'MMM d yyyy, h:mm:ss aaa'], { + withTimeZone: true, + }); + assert.dom(CHARTS.timestamp).hasText(`Updated ${formattedTimestamp}`, 'renders response timestamp'); + + assertBarChart(assert, 'ACME usage', this.activity.byMonth); + assertBarChart(assert, 'Monthly new', this.activity.byMonth); + }); + + test('it should render stats without chart for a single month', async function (assert) { + assert.expect(4); + const activityQuery = { start_time: { timestamp: END_TIME }, end_time: { timestamp: END_TIME } }; + this.activity = await this.store.queryRecord('clients/activity', activityQuery); + const expectedTotal = formatNumber([this.activity.total.acme_clients]); + await this.renderComponent(); + + assert.dom(CHARTS.chart('ACME usage')).doesNotExist('total usage chart does not render'); + assert.dom(CHARTS.container('Monthly new')).doesNotExist('monthly new chart does not render'); + assert.dom(statText('Average new ACME clients per month')).doesNotExist(); + assert + .dom(usageStats('ACME usage')) + .hasText( + `ACME usage Usage metrics tutorial This data can be used to understand how many ACME clients have been used for the queried month. Each ACME request is counted as one client. Total ACME clients ${expectedTotal}`, + 'it renders usage stats with single month copy' + ); + }); + + // EMPTY STATES + test('it should render empty state when ACME data does not exist for a date range', async function (assert) { + assert.expect(7); + // this happens when a user queries historical data that predates the monthly breakdown (added in 1.11) + // only entity + non-entity clients existed then, so we show an empty state for ACME clients + // because the activity response just returns { acme_clients: 0 } which isn't very clear + this.activity.byMonth = []; + + await this.renderComponent(); + + assert.dom(GENERAL.emptyStateTitle).hasText('No ACME clients'); + assert + .dom(GENERAL.emptyStateMessage) + .hasText('There is no ACME client data available for this date range.'); + + assert.dom(CHARTS.chart('ACME usage')).doesNotExist('vertical bar chart does not render'); + assert.dom(CHARTS.container('Monthly new')).doesNotExist('monthly new chart does not render'); + assert.dom(statText('Total ACME clients')).doesNotExist(); + assert.dom(statText('Average new ACME clients per month')).doesNotExist(); + assert.dom(usageStats('ACME usage')).doesNotExist(); + }); + + test('it should render empty state when ACME data does not exist for a single month', async function (assert) { + assert.expect(1); + const activityQuery = { start_time: { timestamp: START_TIME }, end_time: { timestamp: START_TIME } }; + this.activity = await this.store.queryRecord('clients/activity', activityQuery); + this.activity.byMonth = []; + + await this.renderComponent(); + + assert.dom(GENERAL.emptyStateMessage).hasText('There is no ACME client data available for this month.'); + }); + + test('it should render empty total usage chart when monthly counts are null or 0', async function (assert) { + assert.expect(8); + // manually stub because mirage isn't setup to handle mixed data yet + const counts = { + acme_clients: 0, + clients: 19, + entity_clients: 0, + non_entity_clients: 19, + secret_syncs: 0, + }; + this.activity.byMonth = [ + { + month: '3/24', + timestamp: '2024-03-01T00:00:00Z', + namespaces: [], + new_clients: { + month: '3/24', + timestamp: '2024-03-01T00:00:00Z', + namespaces: [], + }, + }, + { + month: '4/24', + timestamp: '2024-04-01T00:00:00Z', + ...counts, + namespaces: [], + new_clients: { + month: '4/24', + timestamp: '2024-04-01T00:00:00Z', + namespaces: [], + }, + }, + ]; + this.activity.total = counts; + + await this.renderComponent(); + + assert.dom(CHARTS.chart('ACME usage')).exists('renders empty ACME usage chart'); + assert + .dom(statText('Total ACME clients')) + .hasTextContaining('The total number of ACME requests made to Vault during this time period. 0'); + findAll(`${CHARTS.chart('ACME usage')} ${CHARTS.xAxisLabel}`).forEach((e, i) => { + assert + .dom(e) + .hasText( + `${this.activity.byMonth[i].month}`, + `renders x-axis labels for empty bar chart: ${this.activity.byMonth[i].month}` + ); + }); + findAll(`${CHARTS.chart('ACME usage')} ${CHARTS.verticalBar}`).forEach((e, i) => { + assert.dom(e).isNotVisible(`does not render data bar for: ${this.activity.byMonth[i].month}`); + }); + + assert + .dom(CHARTS.container('Monthly new')) + .doesNotExist('empty monthly new chart does not render at all'); + assert.dom(statText('Average new ACME clients per month')).doesNotExist(); + }); +}); diff --git a/ui/tests/integration/components/clients/page/counts-test.js b/ui/tests/integration/components/clients/page/counts-test.js new file mode 100644 index 000000000000..9d42a6e291d0 --- /dev/null +++ b/ui/tests/integration/components/clients/page/counts-test.js @@ -0,0 +1,288 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { render, click, findAll, fillIn } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import clientsHandler, { LICENSE_START, STATIC_NOW } from 'vault/mirage/handlers/clients'; +import { fromUnixTime, getUnixTime } from 'date-fns'; +import { GENERAL } from 'vault/tests/helpers/general-selectors'; +import { CLIENT_COUNT } from 'vault/tests/helpers/clients/client-count-selectors'; +import { selectChoose } from 'ember-power-select/test-support'; +import timestamp from 'core/utils/timestamp'; +import sinon from 'sinon'; +import { allowAllCapabilitiesStub } from 'vault/tests/helpers/stubs'; + +const START_TIME = getUnixTime(LICENSE_START); +const END_TIME = getUnixTime(STATIC_NOW); +const START_ISO = LICENSE_START.toISOString(); +const END_ISO = STATIC_NOW.toISOString(); + +module('Integration | Component | clients | Page::Counts', function (hooks) { + setupRenderingTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(async function () { + sinon.replace(timestamp, 'now', sinon.fake.returns(STATIC_NOW)); + clientsHandler(this.server); + this.server.post('/sys/capabilities-self', allowAllCapabilitiesStub()); + this.store = this.owner.lookup('service:store'); + const activityQuery = { + start_time: { timestamp: START_TIME }, + end_time: { timestamp: END_TIME }, + }; + this.activity = await this.store.queryRecord('clients/activity', activityQuery); + this.config = await this.store.queryRecord('clients/config', {}); + this.startTimestamp = START_ISO; + this.endTimestamp = END_ISO; + this.versionHistory = []; + this.renderComponent = () => + render(hbs` + +
Yield block
+
+ `); + }); + + test('it should populate start and end month displays', async function (assert) { + await this.renderComponent(); + + assert.dom(CLIENT_COUNT.dateRange.dateDisplay('start')).hasText('July 2023', 'Start month renders'); + assert.dom(CLIENT_COUNT.dateRange.dateDisplay('end')).hasText('January 2024', 'End month renders'); + }); + + test('it should render no data empty state', async function (assert) { + this.activity = { id: 'no-data' }; + + await this.renderComponent(); + + assert + .dom(GENERAL.emptyStateTitle) + .hasText('No data received from July 2023 to January 2024', 'No data empty state renders'); + }); + + test('it should render activity error', async function (assert) { + this.activity = null; + this.activityError = { httpStatus: 403 }; + + await this.renderComponent(); + + assert + .dom(GENERAL.emptyStateTitle) + .hasText('You are not authorized', 'Activity error empty state renders'); + }); + + test('it should render config disabled alert', async function (assert) { + this.config.enabled = 'Off'; + + await this.renderComponent(); + + assert + .dom(CLIENT_COUNT.counts.configDisabled) + .hasText('Tracking is disabled', 'Config disabled alert renders'); + }); + + const jan23start = getUnixTime(new Date('2023-01-01T00:00:00Z')); + // license start is July 2, 2024 on date change it recalculates start to beginning of the month + const july23start = getUnixTime(new Date('2023-07-01T00:00:00Z')); + const dec23end = getUnixTime(new Date('2023-12-31T00:00:00Z')); + const jan24end = getUnixTime(new Date('2024-01-31T00:00:00Z')); + [ + { + scenario: 'changing start only', + expected: { start_time: jan23start, end_time: jan24end }, + editStart: '2023-01', + expectedStart: 'January 2023', + expectedEnd: 'January 2024', + }, + { + scenario: 'changing end only', + expected: { start_time: july23start, end_time: dec23end }, + editEnd: '2023-12', + expectedStart: 'July 2023', + expectedEnd: 'December 2023', + }, + { + scenario: 'changing both', + expected: { start_time: jan23start, end_time: dec23end }, + editStart: '2023-01', + editEnd: '2023-12', + expectedStart: 'January 2023', + expectedEnd: 'December 2023', + }, + { + scenario: 'reset', + expected: { start_time: undefined, end_time: undefined }, + reset: true, + expectedStart: 'July 2023', + expectedEnd: 'January 2024', + }, + ].forEach((testCase) => { + test(`it should send correct millis value on filter change when ${testCase.scenario}`, async function (assert) { + assert.expect(5); + // set to enterprise so reset will save correctly + this.owner.lookup('service:version').type = 'enterprise'; + this.onFilterChange = (params) => { + assert.deepEqual(params, testCase.expected, 'Correct values sent on filter change'); + // in the app, the timestamp choices trigger a qp refresh as millis from epoch, + // but in the model they are translated from millis to ISO timestamps before being + // passed to this component. Mock that behavior here. + this.set( + 'startTimestamp', + params?.start_time ? fromUnixTime(params.start_time).toISOString() : START_ISO + ); + this.set('endTimestamp', params?.end_time ? fromUnixTime(params.end_time).toISOString() : END_ISO); + }; + await this.renderComponent(); + await click(CLIENT_COUNT.dateRange.edit); + + // page starts with default billing dates, which are july 23 - jan 24 + assert.dom(CLIENT_COUNT.dateRange.editDate('start')).hasValue('2023-07'); + assert.dom(CLIENT_COUNT.dateRange.editDate('end')).hasValue('2024-01'); + + if (testCase.editStart) { + await fillIn(CLIENT_COUNT.dateRange.editDate('start'), testCase.editStart); + } + if (testCase.editEnd) { + await fillIn(CLIENT_COUNT.dateRange.editDate('end'), testCase.editEnd); + } + if (testCase.reset) { + await click(CLIENT_COUNT.dateRange.reset); + } + await click(GENERAL.saveButton); + assert.dom(CLIENT_COUNT.dateRange.dateDisplay('start')).hasText(testCase.expectedStart); + assert.dom(CLIENT_COUNT.dateRange.dateDisplay('end')).hasText(testCase.expectedEnd); + }); + }); + + test('it should render namespace and auth mount filters', async function (assert) { + assert.expect(5); + + this.namespace = 'root'; + this.mountPath = 'auth/authid0'; + + let assertion = (params) => + assert.deepEqual(params, { ns: undefined, mountPath: undefined }, 'Auth mount cleared with namespace'); + this.onFilterChange = (params) => { + if (assertion) { + assertion(params); + } + const keys = Object.keys(params); + this.namespace = keys.includes('ns') ? params.ns : this.namespace; + this.mountPath = keys.includes('mountPath') ? params.mountPath : this.mountPath; + }; + + await this.renderComponent(); + + assert.dom(CLIENT_COUNT.counts.namespaces).includesText(this.namespace, 'Selected namespace renders'); + assert.dom(CLIENT_COUNT.counts.mountPaths).includesText(this.mountPath, 'Selected auth mount renders'); + + await click(`${CLIENT_COUNT.counts.namespaces} button`); + // this is only necessary in tests since SearchSelect does not respond to initialValue changes + // in the app the component is rerender on query param change + assertion = null; + await click(`${CLIENT_COUNT.counts.mountPaths} button`); + assertion = (params) => assert.true(params.ns.includes('ns'), 'Namespace value sent on change'); + await selectChoose(CLIENT_COUNT.counts.namespaces, '.ember-power-select-option', 0); + + assertion = (params) => + assert.true(params.mountPath.includes('auth/'), 'Auth mount value sent on change'); + await selectChoose(CLIENT_COUNT.counts.mountPaths, 'auth/authid0'); + }); + + test('it should render start time discrepancy alert', async function (assert) { + this.startTimestamp = new Date('2022-06-01T00:00:00Z').toISOString(); + + await this.renderComponent(); + + assert + .dom(CLIENT_COUNT.counts.startDiscrepancy) + .hasText( + 'You requested data from June 2022. We only have data from July 2023, and that is what is being shown here.', + 'Start discrepancy alert renders' + ); + }); + + test('it renders alert if upgrade happened within queried activity', async function (assert) { + assert.expect(5); + this.versionHistory = await this.store.findAll('clients/version-history').then((resp) => { + return resp.map(({ version, previousVersion, timestampInstalled }) => { + return { + version, + previousVersion, + timestampInstalled, + }; + }); + }); + + await this.renderComponent(); + + assert + .dom(CLIENT_COUNT.upgradeWarning) + .hasTextContaining( + `Client count data contains 3 upgrades Vault was upgraded during this time period. Keep this in mind while looking at the data. Visit our Client count FAQ for more information.`, + 'it renders title and subtext' + ); + + const [first, second, third] = findAll(`${CLIENT_COUNT.upgradeWarning} li`); + assert + .dom(first) + .hasText( + `1.9.1 (upgraded on Aug 2, 2023) - We introduced changes to non-entity token and local auth mount logic for client counting in 1.9.`, + 'alert includes 1.9.1 upgrade' + ); + assert + .dom(second) + .hasTextContaining( + `1.10.1 (upgraded on Sep 2, 2023) - We added monthly breakdowns and mount level attribution starting in 1.10.`, + 'alert includes 1.10.1 upgrade' + ); + assert + .dom(third) + .hasTextContaining( + `1.17.0 (upgraded on Dec 2, 2023) - We separated ACME clients from non-entity clients starting in 1.17.`, + 'alert includes 1.17.0 upgrade' + ); + assert + .dom(`${CLIENT_COUNT.upgradeWarning} ul`) + .doesNotHaveTextContaining( + '1.10.3', + 'Warning does not include subsequent patch releases (e.g. 1.10.3) of the same notable upgrade.' + ); + }); + + test('it should render empty state for no start when CE', async function (assert) { + this.owner.lookup('service:version').type = 'community'; + this.startTimestamp = null; + this.activity = {}; + + await this.renderComponent(); + + assert.dom(GENERAL.emptyStateTitle).hasText('No start date found', 'Empty state renders'); + assert.dom(CLIENT_COUNT.dateRange.edit).hasText('Set date range'); + }); + + test('it should render catch all empty state', async function (assert) { + this.activity.total = null; + + await this.renderComponent(); + + assert + .dom(GENERAL.emptyStateTitle) + .hasText('No data received from July 2023 to January 2024', 'Empty state renders'); + }); +}); diff --git a/ui/tests/integration/components/clients/page/overview-test.js b/ui/tests/integration/components/clients/page/overview-test.js new file mode 100644 index 000000000000..554478299431 --- /dev/null +++ b/ui/tests/integration/components/clients/page/overview-test.js @@ -0,0 +1,110 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { ACTIVITY_RESPONSE_STUB } from 'vault/tests/helpers/clients/client-count-helpers'; +import { filterActivityResponse } from 'vault/mirage/handlers/clients'; +import { CHARTS, CLIENT_COUNT } from 'vault/tests/helpers/clients/client-count-selectors'; + +module('Integration | Component | clients/page/overview', function (hooks) { + setupRenderingTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(async function () { + this.server.get('sys/internal/counters/activity', (_, req) => { + const namespace = req.requestHeaders['X-Vault-Namespace']; + if (namespace === 'no-data') { + return { + request_id: 'some-activity-id', + data: { + by_namespace: [], + end_time: '2024-08-31T23:59:59Z', + months: [], + start_time: '2024-01-01T00:00:00Z', + total: { + distinct_entities: 0, + entity_clients: 0, + non_entity_tokens: 0, + non_entity_clients: 0, + clients: 0, + secret_syncs: 0, + }, + }, + }; + } + return { + request_id: 'some-activity-id', + data: filterActivityResponse(ACTIVITY_RESPONSE_STUB, namespace), + }; + }); + this.store = this.owner.lookup('service:store'); + this.mountPath = ''; + this.namespace = ''; + this.versionHistory = ''; + }); + + test('it hides attribution data when mount filter applied', async function (assert) { + this.mountPath = ''; + this.activity = await this.store.queryRecord('clients/activity', { + namespace: 'ns1', + }); + await render( + hbs`` + ); + + assert.dom(CHARTS.container('Vault client counts')).exists('shows running totals'); + assert.dom(CLIENT_COUNT.attributionBlock('namespace')).exists(); + assert.dom(CLIENT_COUNT.attributionBlock('mount')).exists(); + + this.set('mountPath', 'auth/authid/0'); + assert.dom(CHARTS.container('Vault client counts')).exists('shows running totals'); + assert.dom(CLIENT_COUNT.attributionBlock('namespace')).doesNotExist(); + assert.dom(CLIENT_COUNT.attributionBlock('mount')).doesNotExist(); + }); + + test('it hides attribution data when no data returned', async function (assert) { + this.mountPath = ''; + this.activity = await this.store.queryRecord('clients/activity', { + namespace: 'no-data', + }); + await render(hbs``); + assert.dom(CLIENT_COUNT.usageStats('Total usage')).exists(); + assert.dom(CHARTS.container('Vault client counts')).doesNotExist('usage stats instead of running totals'); + assert.dom(CLIENT_COUNT.attributionBlock('namespace')).doesNotExist(); + assert.dom(CLIENT_COUNT.attributionBlock('mount')).doesNotExist(); + }); + + test('it shows the correct mount attributions', async function (assert) { + this.nsService = this.owner.lookup('service:namespace'); + const rootActivity = await this.store.queryRecord('clients/activity', {}); + this.activity = rootActivity; + await render(hbs``); + // start at "root" namespace + let expectedMounts = rootActivity.byNamespace.find((ns) => ns.label === 'root').mounts; + assert + .dom(`${CLIENT_COUNT.attributionBlock('mount')} [data-test-group="y-labels"] text`) + .exists({ count: expectedMounts.length }); + assert + .dom(`${CLIENT_COUNT.attributionBlock('mount')} [data-test-group="y-labels"]`) + .includesText(expectedMounts[0].label); + + // now pretend we're querying within a child namespace + this.nsService.path = 'ns1'; + this.activity = await this.store.queryRecord('clients/activity', { + namespace: 'ns1', + }); + expectedMounts = rootActivity.byNamespace.find((ns) => ns.label === 'ns1').mounts; + assert + .dom(`${CLIENT_COUNT.attributionBlock('mount')} [data-test-group="y-labels"] text`) + .exists({ count: expectedMounts.length }); + assert + .dom(`${CLIENT_COUNT.attributionBlock('mount')} [data-test-group="y-labels"]`) + .includesText(expectedMounts[0].label); + }); +}); diff --git a/ui/tests/integration/components/clients/page/sync-test.js b/ui/tests/integration/components/clients/page/sync-test.js new file mode 100644 index 000000000000..604cde102d65 --- /dev/null +++ b/ui/tests/integration/components/clients/page/sync-test.js @@ -0,0 +1,207 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { render, findAll } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import { LICENSE_START, STATIC_NOW } from 'vault/mirage/handlers/clients'; +import syncHandler from 'vault/mirage/handlers/sync'; +import { getUnixTime } from 'date-fns'; +import { GENERAL } from 'vault/tests/helpers/general-selectors'; +import { CLIENT_COUNT, CHARTS } from 'vault/tests/helpers/clients/client-count-selectors'; +import { formatNumber } from 'core/helpers/format-number'; +import { calculateAverage } from 'vault/utils/chart-helpers'; +import { dateFormat } from 'core/helpers/date-format'; +import { assertBarChart } from 'vault/tests/helpers/clients/client-count-helpers'; + +const START_TIME = getUnixTime(LICENSE_START); +const END_TIME = getUnixTime(STATIC_NOW); +const { statText, usageStats } = CLIENT_COUNT; + +module('Integration | Component | clients | Clients::Page::Sync', function (hooks) { + setupRenderingTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(async function () { + this.renderComponent = () => + render(hbs` + + `); + }); + + module('with secrets sync not activated', function () { + test('it should render an empty state', async function (assert) { + await this.renderComponent(); + + assert.dom(GENERAL.emptyStateTitle).hasText('No Secrets Sync clients'); + assert + .dom(GENERAL.emptyStateMessage) + .hasText('No data is available because Secrets Sync has not been activated.'); + assert.dom(GENERAL.emptyStateActions).hasText('Activate Secrets Sync'); + + assert.dom(CHARTS.chart('Secrets sync usage')).doesNotExist(); + assert.dom(statText('Total sync clients')).doesNotExist(); + }); + }); + + module('with secrets sync activated', function (hooks) { + hooks.beforeEach(async function () { + syncHandler(this.server); + this.owner.lookup('service:flags').activatedFlags = ['secrets-sync']; + + this.store = this.owner.lookup('service:store'); + const activityQuery = { + start_time: { timestamp: START_TIME }, + end_time: { timestamp: END_TIME }, + }; + // set this to 0 + this.activity = await this.store.queryRecord('clients/activity', activityQuery); + this.startTimestamp = START_TIME; + this.endTimestamp = END_TIME; + }); + + test('it should render with full month activity data', async function (assert) { + const monthCount = this.activity.byMonth.length; + assert.expect(7 + monthCount * 2); + const expectedTotal = formatNumber([this.activity.total.secret_syncs]); + const expectedNewAvg = formatNumber([ + calculateAverage( + this.activity.byMonth.map((m) => m?.new_clients), + 'secret_syncs' + ), + ]); + await this.renderComponent(); + + assert + .dom(statText('Total sync clients')) + .hasText( + `Total sync clients The total number of secrets synced from Vault to other destinations during this date range. ${expectedTotal}`, + `renders correct total sync stat ${expectedTotal}` + ); + assert.dom(statText('Average new sync clients per month')).hasTextContaining(`${expectedNewAvg}`); + + const formattedTimestamp = dateFormat([this.activity.responseTimestamp, 'MMM d yyyy, h:mm:ss aaa'], { + withTimeZone: true, + }); + assert.dom(CHARTS.timestamp).hasText(`Updated ${formattedTimestamp}`, 'renders response timestamp'); + + assertBarChart(assert, 'Secrets sync usage', this.activity.byMonth); + assertBarChart(assert, 'Monthly new', this.activity.byMonth); + }); + + test('it should render stats without chart for a single month', async function (assert) { + const activityQuery = { start_time: { timestamp: END_TIME }, end_time: { timestamp: END_TIME } }; + this.activity = await this.store.queryRecord('clients/activity', activityQuery); + const expectedTotal = formatNumber([this.activity.total.secret_syncs]); + await this.renderComponent(); + + assert.dom(CHARTS.chart('Secrets sync usage')).doesNotExist('total usage chart does not render'); + assert.dom(CHARTS.container('Monthly new')).doesNotExist('monthly new chart does not render'); + assert.dom(statText('Average new sync clients per month')).doesNotExist(); + assert + .dom(usageStats('Secrets sync usage')) + .hasText( + `Secrets sync usage Usage metrics tutorial This data can be used to understand how many secrets sync clients have been used for this date range. Each Vault secret that is synced to at least one destination counts as one Vault client. Total sync clients ${expectedTotal}`, + 'it renders usage stats with single month copy' + ); + }); + + // EMPTY STATES + test('it should render empty state when sync data does not exist for a date range', async function (assert) { + assert.expect(7); + // this happens when a user queries historical data that predates the monthly breakdown (added in 1.11) + // only entity + non-entity clients existed then, so we show an empty state for sync clients + // because the activity response just returns { secret_syncs: 0 } which isn't very clear + this.activity.byMonth = []; + + await this.renderComponent(); + + assert.dom(GENERAL.emptyStateTitle).hasText('No secrets sync clients'); + assert.dom(GENERAL.emptyStateMessage).hasText('There is no sync data available for this date range.'); + + assert.dom(CHARTS.chart('Secrets sync usage')).doesNotExist('vertical bar chart does not render'); + assert.dom(CHARTS.container('Monthly new')).doesNotExist('monthly new chart does not render'); + assert.dom(statText('Total sync clients')).doesNotExist(); + assert.dom(statText('Average new sync clients per month')).doesNotExist(); + assert.dom(usageStats('Secrets sync usage')).doesNotExist(); + }); + + test('it should render empty state when sync data does not exist for a single month', async function (assert) { + assert.expect(1); + const activityQuery = { start_time: { timestamp: START_TIME }, end_time: { timestamp: START_TIME } }; + this.activity = await this.store.queryRecord('clients/activity', activityQuery); + this.activity.byMonth = []; + await this.renderComponent(); + + assert.dom(GENERAL.emptyStateMessage).hasText('There is no sync data available for this month.'); + }); + + test('it should render an empty total usage chart if secrets sync is activated but monthly syncs are null or 0', async function (assert) { + // manually stub because mirage isn't setup to handle mixed data yet + const counts = { + clients: 10, + entity_clients: 4, + non_entity_clients: 6, + secret_syncs: 0, + }; + const monthData = { + month: '1/24', + timestamp: '2024-01-01T00:00:00-08:00', + ...counts, + namespaces: [ + { + label: 'root', + ...counts, + mounts: [], + }, + ], + }; + this.activity.byMonth = [ + { + ...monthData, + new_clients: { + ...monthData, + }, + }, + ]; + this.activity.total = counts; + + assert.expect(6); + await this.renderComponent(); + + assert.dom(CHARTS.chart('Secrets sync usage')).exists('renders empty sync usage chart'); + assert + .dom(statText('Total sync clients')) + .hasText( + 'Total sync clients The total number of secrets synced from Vault to other destinations during this date range. 0' + ); + findAll(`${CHARTS.chart('Secrets sync usage')} ${CHARTS.xAxisLabel}`).forEach((e, i) => { + assert + .dom(e) + .hasText( + `${this.activity.byMonth[i].month}`, + `renders x-axis labels for empty bar chart: ${this.activity.byMonth[i].month}` + ); + }); + findAll(`${CHARTS.chart('Secrets sync usage')} ${CHARTS.verticalBar}`).forEach((e, i) => { + assert.dom(e).isNotVisible(`does not render data bar for: ${this.activity.byMonth[i].month}`); + }); + + assert + .dom(CHARTS.container('Monthly new')) + .doesNotExist('empty monthly new chart does not render at all'); + assert.dom(statText('Average new sync clients per month')).doesNotExist(); + }); + }); +}); diff --git a/ui/tests/integration/components/clients/page/token-test.js b/ui/tests/integration/components/clients/page/token-test.js new file mode 100644 index 000000000000..9528ae324d17 --- /dev/null +++ b/ui/tests/integration/components/clients/page/token-test.js @@ -0,0 +1,171 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { setRunOptions } from 'ember-a11y-testing/test-support'; +import { render } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import clientsHandler, { LICENSE_START, STATIC_NOW } from 'vault/mirage/handlers/clients'; +import { getUnixTime } from 'date-fns'; +import { calculateAverage } from 'vault/utils/chart-helpers'; +import { formatNumber } from 'core/helpers/format-number'; +import { dateFormat } from 'core/helpers/date-format'; +import { GENERAL } from 'vault/tests/helpers/general-selectors'; +import { CHARTS, CLIENT_COUNT } from 'vault/tests/helpers/clients/client-count-selectors'; +import { assertBarChart } from 'vault/tests/helpers/clients/client-count-helpers'; + +const START_TIME = getUnixTime(LICENSE_START); +const END_TIME = getUnixTime(STATIC_NOW); + +module('Integration | Component | clients | Clients::Page::Token', function (hooks) { + setupRenderingTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(async function () { + clientsHandler(this.server); + const store = this.owner.lookup('service:store'); + const activityQuery = { + start_time: { timestamp: START_TIME }, + end_time: { timestamp: END_TIME }, + }; + this.activity = await store.queryRecord('clients/activity', activityQuery); + this.newActivity = this.activity.byMonth.map((d) => d.new_clients); + this.versionHistory = await store + .findAll('clients/version-history') + .then((response) => { + return response.map(({ version, previousVersion, timestampInstalled }) => { + return { + version, + previousVersion, + timestampInstalled, + }; + }); + }) + .catch(() => []); + this.startTimestamp = START_TIME; + this.endTimestamp = END_TIME; + this.renderComponent = () => + render(hbs` + + `); + // Fails on #ember-testing-container + setRunOptions({ + rules: { + 'scrollable-region-focusable': { enabled: false }, + }, + }); + }); + + test('it should render monthly total chart', async function (assert) { + const count = this.activity.byMonth.length; + const { entity_clients, non_entity_clients } = this.activity.total; + assert.expect(count + 7); + + const expectedTotal = formatNumber([entity_clients + non_entity_clients]); + const chart = CHARTS.container('Entity/Non-entity clients usage'); + await this.renderComponent(); + + assert + .dom(CLIENT_COUNT.statTextValue('Total clients')) + .hasText(expectedTotal, 'renders correct total clients'); + + // assert bar chart is correct + assert.dom(`${chart} ${CHARTS.xAxis}`).hasText('7/23 8/23 9/23 10/23 11/23 12/23 1/24'); + assertBarChart(assert, 'Entity/Non-entity clients usage', this.activity.byMonth, true); + + const formattedTimestamp = dateFormat([this.activity.responseTimestamp, 'MMM d yyyy, h:mm:ss aaa'], { + withTimeZone: true, + }); + assert.dom(`${chart} ${CHARTS.timestamp}`).hasText(`Updated ${formattedTimestamp}`, 'renders timestamp'); + assert.dom(`${chart} ${CHARTS.legendLabel(1)}`).hasText('Entity clients', 'Legend label renders'); + assert.dom(`${chart} ${CHARTS.legendLabel(2)}`).hasText('Non-entity clients', 'Legend label renders'); + }); + + test('it should render monthly new chart', async function (assert) { + const count = this.newActivity.length; + assert.expect(count + 8); + const expectedNewEntity = formatNumber([calculateAverage(this.newActivity, 'entity_clients')]); + const expectedNewNonEntity = formatNumber([calculateAverage(this.newActivity, 'non_entity_clients')]); + const chart = CHARTS.container('Monthly new'); + + await this.renderComponent(); + + assert + .dom(CLIENT_COUNT.statTextValue('Average new entity clients per month')) + .hasText(expectedNewEntity, 'renders correct new entity clients'); + assert + .dom(CLIENT_COUNT.statTextValue('Average new non-entity clients per month')) + .hasText(expectedNewNonEntity, 'renders correct new nonentity clients'); + const formattedTimestamp = dateFormat([this.activity.responseTimestamp, 'MMM d yyyy, h:mm:ss aaa'], { + withTimeZone: true, + }); + assert.dom(`${chart} ${CHARTS.timestamp}`).hasText(`Updated ${formattedTimestamp}`, 'renders timestamp'); + assert.dom(`${chart} ${CHARTS.legendLabel(1)}`).hasText('Entity clients', 'Legend label renders'); + assert.dom(`${chart} ${CHARTS.legendLabel(2)}`).hasText('Non-entity clients', 'Legend label renders'); + + // assert bar chart is correct + assert.dom(`${chart} ${CHARTS.xAxis}`).hasText('7/23 8/23 9/23 10/23 11/23 12/23 1/24'); + assertBarChart(assert, 'Monthly new', this.newActivity, true); + }); + + test('it should render empty state for no new monthly data', async function (assert) { + this.activity.byMonth = this.activity.byMonth.map((d) => ({ + ...d, + new_clients: { month: d.month }, + })); + const chart = CHARTS.container('Monthly new'); + + await this.renderComponent(); + + assert.dom(`${chart} ${CHARTS.verticalBar}`).doesNotExist('Chart does not render'); + assert.dom(`${chart} ${CHARTS.legend}`).doesNotExist('Legend does not render'); + assert.dom(GENERAL.emptyStateTitle).hasText('No new clients'); + assert + .dom(CLIENT_COUNT.statText('Average new entity clients per month')) + .doesNotExist('New client counts does not exist'); + assert + .dom(CLIENT_COUNT.statText('Average new non-entity clients per month')) + .doesNotExist('Average new client counts does not exist'); + }); + + test('it should render usage stats', async function (assert) { + assert.expect(6); + + this.activity.endTime = this.activity.startTime; + const { + total: { entity_clients, non_entity_clients }, + } = this.activity; + + const checkUsage = () => { + assert + .dom(CLIENT_COUNT.statTextValue('Total clients')) + .hasText(formatNumber([entity_clients + non_entity_clients]), 'Total clients value renders'); + assert + .dom(CLIENT_COUNT.statTextValue('Entity')) + .hasText(formatNumber([entity_clients]), 'Entity value renders'); + assert + .dom(CLIENT_COUNT.statTextValue('Non-entity')) + .hasText(formatNumber([non_entity_clients]), 'Non-entity value renders'); + }; + + // total usage should display for single month query + await this.renderComponent(); + checkUsage(); + + // total usage should display when there is no monthly data + this.activity.byMonth = null; + await this.renderComponent(); + checkUsage(); + }); +}); diff --git a/ui/tests/integration/components/clients/running-total-test.js b/ui/tests/integration/components/clients/running-total-test.js index e2845ecf2182..e3963529b687 100644 --- a/ui/tests/integration/components/clients/running-total-test.js +++ b/ui/tests/integration/components/clients/running-total-test.js @@ -1,1588 +1,183 @@ /** * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 + * SPDX-License-Identifier: BUSL-1.1 */ import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; +import { setupMirage } from 'ember-cli-mirage/test-support'; import { render } from '@ember/test-helpers'; import { hbs } from 'ember-cli-htmlbars'; -import { formatRFC3339 } from 'date-fns'; +import clientsHandler, { LICENSE_START, STATIC_NOW } from 'vault/mirage/handlers/clients'; +import sinon from 'sinon'; +import { formatRFC3339, getUnixTime } from 'date-fns'; import { findAll } from '@ember/test-helpers'; -import { calculateAverage } from 'vault/utils/chart-helpers'; import { formatNumber } from 'core/helpers/format-number'; +import timestamp from 'core/utils/timestamp'; +import { setRunOptions } from 'ember-a11y-testing/test-support'; +import { CLIENT_COUNT, CHARTS } from 'vault/tests/helpers/clients/client-count-selectors'; + +const START_TIME = getUnixTime(LICENSE_START); module('Integration | Component | clients/running-total', function (hooks) { setupRenderingTest(hooks); - const MONTHLY_ACTIVITY = [ - { - month: '8/21', - timestamp: '2021-08-01T00:00:00Z', - counts: null, - namespaces: [], - new_clients: { - month: '8/21', - namespaces: [], - }, - namespaces_by_key: {}, - }, - { - month: '9/21', - clients: 19251, - entity_clients: 10713, - non_entity_clients: 8538, - namespaces: [ - { - label: 'root', - clients: 4852, - entity_clients: 3108, - non_entity_clients: 1744, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1598, - entity_clients: 687, - non_entity_clients: 911, - }, - { - label: 'path-1', - clients: 1429, - entity_clients: 981, - non_entity_clients: 448, - }, - { - label: 'path-4-with-over-18-characters', - clients: 965, - entity_clients: 720, - non_entity_clients: 245, - }, - { - label: 'path-2', - clients: 860, - entity_clients: 720, - non_entity_clients: 140, - }, - ], - }, - { - label: 'test-ns-2/', - clients: 4702, - entity_clients: 3057, - non_entity_clients: 1645, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1686, - entity_clients: 926, - non_entity_clients: 760, - }, - { - label: 'path-4-with-over-18-characters', - clients: 1525, - entity_clients: 789, - non_entity_clients: 736, - }, - { - label: 'path-2', - clients: 905, - entity_clients: 849, - non_entity_clients: 56, - }, - { - label: 'path-1', - clients: 586, - entity_clients: 493, - non_entity_clients: 93, - }, - ], - }, - { - label: 'test-ns-1/', - clients: 4569, - entity_clients: 1871, - non_entity_clients: 2698, - mounts: [ - { - label: 'path-4-with-over-18-characters', - clients: 1534, - entity_clients: 619, - non_entity_clients: 915, - }, - { - label: 'path-3-with-over-18-characters', - clients: 1528, - entity_clients: 589, - non_entity_clients: 939, - }, - { - label: 'path-1', - clients: 828, - entity_clients: 612, - non_entity_clients: 216, - }, - { - label: 'path-2', - clients: 679, - entity_clients: 51, - non_entity_clients: 628, - }, - ], - }, - { - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 3771, - entity_clients: 2029, - non_entity_clients: 1742, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1249, - entity_clients: 793, - non_entity_clients: 456, - }, - { - label: 'path-1', - clients: 1046, - entity_clients: 444, - non_entity_clients: 602, - }, - { - label: 'path-2', - clients: 930, - entity_clients: 277, - non_entity_clients: 653, - }, - { - label: 'path-4-with-over-18-characters', - clients: 546, - entity_clients: 515, - non_entity_clients: 31, - }, - ], - }, - { - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 1357, - entity_clients: 648, - non_entity_clients: 709, - mounts: [ - { - label: 'path-1', - clients: 613, - entity_clients: 23, - non_entity_clients: 590, - }, - { - label: 'path-3-with-over-18-characters', - clients: 543, - entity_clients: 465, - non_entity_clients: 78, - }, - { - label: 'path-2', - clients: 146, - entity_clients: 141, - non_entity_clients: 5, - }, - { - label: 'path-4-with-over-18-characters', - clients: 55, - entity_clients: 19, - non_entity_clients: 36, - }, - ], - }, - ], - namespaces_by_key: { - root: { - month: '9/21', - clients: 4852, - entity_clients: 3108, - non_entity_clients: 1744, - new_clients: { - month: '9/21', - label: 'root', - clients: 2525, - entity_clients: 1315, - non_entity_clients: 1210, - }, - mounts_by_key: { - 'path-3-with-over-18-characters': { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1598, - entity_clients: 687, - non_entity_clients: 911, - new_clients: { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1055, - entity_clients: 257, - non_entity_clients: 798, - }, - }, - 'path-1': { - month: '9/21', - label: 'path-1', - clients: 1429, - entity_clients: 981, - non_entity_clients: 448, - new_clients: { - month: '9/21', - label: 'path-1', - clients: 543, - entity_clients: 340, - non_entity_clients: 203, - }, - }, - 'path-4-with-over-18-characters': { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 965, - entity_clients: 720, - non_entity_clients: 245, - new_clients: { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 136, - entity_clients: 7, - non_entity_clients: 129, - }, - }, - 'path-2': { - month: '9/21', - label: 'path-2', - clients: 860, - entity_clients: 720, - non_entity_clients: 140, - new_clients: { - month: '9/21', - label: 'path-2', - clients: 791, - entity_clients: 711, - non_entity_clients: 80, - }, - }, - }, - }, - 'test-ns-2/': { - month: '9/21', - clients: 4702, - entity_clients: 3057, - non_entity_clients: 1645, - new_clients: { - month: '9/21', - label: 'test-ns-2/', - clients: 1537, - entity_clients: 662, - non_entity_clients: 875, - }, - mounts_by_key: { - 'path-3-with-over-18-characters': { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1686, - entity_clients: 926, - non_entity_clients: 760, - new_clients: { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 520, - entity_clients: 13, - non_entity_clients: 507, - }, - }, - 'path-4-with-over-18-characters': { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 1525, - entity_clients: 789, - non_entity_clients: 736, - new_clients: { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 499, - entity_clients: 197, - non_entity_clients: 302, - }, - }, - 'path-2': { - month: '9/21', - label: 'path-2', - clients: 905, - entity_clients: 849, - non_entity_clients: 56, - new_clients: { - month: '9/21', - label: 'path-2', - clients: 398, - entity_clients: 370, - non_entity_clients: 28, - }, - }, - 'path-1': { - month: '9/21', - label: 'path-1', - clients: 586, - entity_clients: 493, - non_entity_clients: 93, - new_clients: { - month: '9/21', - label: 'path-1', - clients: 120, - entity_clients: 82, - non_entity_clients: 38, - }, - }, - }, - }, - 'test-ns-1/': { - month: '9/21', - clients: 4569, - entity_clients: 1871, - non_entity_clients: 2698, - new_clients: { - month: '9/21', - label: 'test-ns-1/', - clients: 2712, - entity_clients: 879, - non_entity_clients: 1833, - }, - mounts_by_key: { - 'path-4-with-over-18-characters': { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 1534, - entity_clients: 619, - non_entity_clients: 915, - new_clients: { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 740, - entity_clients: 39, - non_entity_clients: 701, - }, - }, - 'path-3-with-over-18-characters': { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1528, - entity_clients: 589, - non_entity_clients: 939, - new_clients: { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1250, - entity_clients: 536, - non_entity_clients: 714, - }, - }, - 'path-1': { - month: '9/21', - label: 'path-1', - clients: 828, - entity_clients: 612, - non_entity_clients: 216, - new_clients: { - month: '9/21', - label: 'path-1', - clients: 463, - entity_clients: 283, - non_entity_clients: 180, - }, - }, - 'path-2': { - month: '9/21', - label: 'path-2', - clients: 679, - entity_clients: 51, - non_entity_clients: 628, - new_clients: { - month: '9/21', - label: 'path-2', - clients: 259, - entity_clients: 21, - non_entity_clients: 238, - }, - }, - }, - }, - 'test-ns-2-with-namespace-length-over-18-characters/': { - month: '9/21', - clients: 3771, - entity_clients: 2029, - non_entity_clients: 1742, - new_clients: { - month: '9/21', - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 2087, - entity_clients: 902, - non_entity_clients: 1185, - }, - mounts_by_key: { - 'path-3-with-over-18-characters': { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1249, - entity_clients: 793, - non_entity_clients: 456, - new_clients: { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 472, - entity_clients: 260, - non_entity_clients: 212, - }, - }, - 'path-1': { - month: '9/21', - label: 'path-1', - clients: 1046, - entity_clients: 444, - non_entity_clients: 602, - new_clients: { - month: '9/21', - label: 'path-1', - clients: 775, - entity_clients: 349, - non_entity_clients: 426, - }, - }, - 'path-2': { - month: '9/21', - label: 'path-2', - clients: 930, - entity_clients: 277, - non_entity_clients: 653, - new_clients: { - month: '9/21', - label: 'path-2', - clients: 632, - entity_clients: 90, - non_entity_clients: 542, - }, - }, - 'path-4-with-over-18-characters': { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 546, - entity_clients: 515, - non_entity_clients: 31, - new_clients: { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 208, - entity_clients: 203, - non_entity_clients: 5, - }, - }, - }, - }, - 'test-ns-1-with-namespace-length-over-18-characters/': { - month: '9/21', - clients: 1357, - entity_clients: 648, - non_entity_clients: 709, - new_clients: { - month: '9/21', - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 560, - entity_clients: 189, - non_entity_clients: 371, - }, - mounts_by_key: { - 'path-1': { - month: '9/21', - label: 'path-1', - clients: 613, - entity_clients: 23, - non_entity_clients: 590, - new_clients: { - month: '9/21', - label: 'path-1', - clients: 318, - entity_clients: 12, - non_entity_clients: 306, - }, - }, - 'path-3-with-over-18-characters': { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 543, - entity_clients: 465, - non_entity_clients: 78, - new_clients: { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 126, - entity_clients: 89, - non_entity_clients: 37, - }, - }, - 'path-2': { - month: '9/21', - label: 'path-2', - clients: 146, - entity_clients: 141, - non_entity_clients: 5, - new_clients: { - month: '9/21', - label: 'path-2', - clients: 76, - entity_clients: 75, - non_entity_clients: 1, - }, - }, - 'path-4-with-over-18-characters': { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 55, - entity_clients: 19, - non_entity_clients: 36, - new_clients: { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 40, - entity_clients: 13, - non_entity_clients: 27, - }, - }, - }, - }, - }, - new_clients: { - month: '9/21', - clients: 9421, - entity_clients: 3947, - non_entity_clients: 5474, - namespaces: [ - { - label: 'test-ns-1/', - clients: 2712, - entity_clients: 879, - non_entity_clients: 1833, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1250, - entity_clients: 536, - non_entity_clients: 714, - }, - { - label: 'path-4-with-over-18-characters', - clients: 740, - entity_clients: 39, - non_entity_clients: 701, - }, - { - label: 'path-1', - clients: 463, - entity_clients: 283, - non_entity_clients: 180, - }, - { - label: 'path-2', - clients: 259, - entity_clients: 21, - non_entity_clients: 238, - }, - ], - }, - { - label: 'root', - clients: 2525, - entity_clients: 1315, - non_entity_clients: 1210, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1055, - entity_clients: 257, - non_entity_clients: 798, - }, - { - label: 'path-2', - clients: 791, - entity_clients: 711, - non_entity_clients: 80, - }, - { - label: 'path-1', - clients: 543, - entity_clients: 340, - non_entity_clients: 203, - }, - { - label: 'path-4-with-over-18-characters', - clients: 136, - entity_clients: 7, - non_entity_clients: 129, - }, - ], - }, - { - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 2087, - entity_clients: 902, - non_entity_clients: 1185, - mounts: [ - { - label: 'path-1', - clients: 775, - entity_clients: 349, - non_entity_clients: 426, - }, - { - label: 'path-2', - clients: 632, - entity_clients: 90, - non_entity_clients: 542, - }, - { - label: 'path-3-with-over-18-characters', - clients: 472, - entity_clients: 260, - non_entity_clients: 212, - }, - { - label: 'path-4-with-over-18-characters', - clients: 208, - entity_clients: 203, - non_entity_clients: 5, - }, - ], - }, - { - label: 'test-ns-2/', - clients: 1537, - entity_clients: 662, - non_entity_clients: 875, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 520, - entity_clients: 13, - non_entity_clients: 507, - }, - { - label: 'path-4-with-over-18-characters', - clients: 499, - entity_clients: 197, - non_entity_clients: 302, - }, - { - label: 'path-2', - clients: 398, - entity_clients: 370, - non_entity_clients: 28, - }, - { - label: 'path-1', - clients: 120, - entity_clients: 82, - non_entity_clients: 38, - }, - ], - }, - { - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 560, - entity_clients: 189, - non_entity_clients: 371, - mounts: [ - { - label: 'path-1', - clients: 318, - entity_clients: 12, - non_entity_clients: 306, - }, - { - label: 'path-3-with-over-18-characters', - clients: 126, - entity_clients: 89, - non_entity_clients: 37, - }, - { - label: 'path-2', - clients: 76, - entity_clients: 75, - non_entity_clients: 1, - }, - { - label: 'path-4-with-over-18-characters', - clients: 40, - entity_clients: 13, - non_entity_clients: 27, - }, - ], - }, - ], - }, - }, - { - month: '10/21', - clients: 19417, - entity_clients: 10105, - non_entity_clients: 9312, - namespaces: [ - { - label: 'root', - clients: 4835, - entity_clients: 2364, - non_entity_clients: 2471, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1797, - entity_clients: 883, - non_entity_clients: 914, - }, - { - label: 'path-1', - clients: 1501, - entity_clients: 663, - non_entity_clients: 838, - }, - { - label: 'path-2', - clients: 1461, - entity_clients: 800, - non_entity_clients: 661, - }, - { - label: 'path-4-with-over-18-characters', - clients: 76, - entity_clients: 18, - non_entity_clients: 58, - }, - ], - }, - { - label: 'test-ns-2/', - clients: 4027, - entity_clients: 1692, - non_entity_clients: 2335, - mounts: [ - { - label: 'path-4-with-over-18-characters', - clients: 1223, - entity_clients: 820, - non_entity_clients: 403, - }, - { - label: 'path-3-with-over-18-characters', - clients: 1110, - entity_clients: 111, - non_entity_clients: 999, - }, - { - label: 'path-1', - clients: 1034, - entity_clients: 462, - non_entity_clients: 572, - }, - { - label: 'path-2', - clients: 660, - entity_clients: 299, - non_entity_clients: 361, - }, - ], - }, - { - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 3924, - entity_clients: 2132, - non_entity_clients: 1792, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1411, - entity_clients: 765, - non_entity_clients: 646, - }, - { - label: 'path-2', - clients: 1205, - entity_clients: 382, - non_entity_clients: 823, - }, - { - label: 'path-1', - clients: 884, - entity_clients: 850, - non_entity_clients: 34, - }, - { - label: 'path-4-with-over-18-characters', - clients: 424, - entity_clients: 135, - non_entity_clients: 289, - }, - ], - }, - { - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 3639, - entity_clients: 2314, - non_entity_clients: 1325, - mounts: [ - { - label: 'path-1', - clients: 1062, - entity_clients: 781, - non_entity_clients: 281, - }, - { - label: 'path-4-with-over-18-characters', - clients: 1021, - entity_clients: 609, - non_entity_clients: 412, - }, - { - label: 'path-2', - clients: 849, - entity_clients: 426, - non_entity_clients: 423, - }, - { - label: 'path-3-with-over-18-characters', - clients: 707, - entity_clients: 498, - non_entity_clients: 209, - }, - ], - }, - { - label: 'test-ns-1/', - clients: 2992, - entity_clients: 1603, - non_entity_clients: 1389, - mounts: [ - { - label: 'path-1', - clients: 1140, - entity_clients: 480, - non_entity_clients: 660, - }, - { - label: 'path-4-with-over-18-characters', - clients: 1058, - entity_clients: 651, - non_entity_clients: 407, - }, - { - label: 'path-2', - clients: 575, - entity_clients: 416, - non_entity_clients: 159, - }, - { - label: 'path-3-with-over-18-characters', - clients: 219, - entity_clients: 56, - non_entity_clients: 163, - }, - ], - }, - ], - namespaces_by_key: { - root: { - month: '10/21', - clients: 4835, - entity_clients: 2364, - non_entity_clients: 2471, - new_clients: { - month: '10/21', - label: 'root', - clients: 1732, - entity_clients: 586, - non_entity_clients: 1146, - }, - mounts_by_key: { - 'path-3-with-over-18-characters': { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 1797, - entity_clients: 883, - non_entity_clients: 914, - new_clients: { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 907, - entity_clients: 192, - non_entity_clients: 715, - }, - }, - 'path-1': { - month: '10/21', - label: 'path-1', - clients: 1501, - entity_clients: 663, - non_entity_clients: 838, - new_clients: { - month: '10/21', - label: 'path-1', - clients: 276, - entity_clients: 202, - non_entity_clients: 74, - }, - }, - 'path-2': { - month: '10/21', - label: 'path-2', - clients: 1461, - entity_clients: 800, - non_entity_clients: 661, - new_clients: { - month: '10/21', - label: 'path-2', - clients: 502, - entity_clients: 189, - non_entity_clients: 313, - }, - }, - 'path-4-with-over-18-characters': { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 76, - entity_clients: 18, - non_entity_clients: 58, - new_clients: { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 47, - entity_clients: 3, - non_entity_clients: 44, - }, - }, - }, - }, - 'test-ns-2/': { - month: '10/21', - clients: 4027, - entity_clients: 1692, - non_entity_clients: 2335, - new_clients: { - month: '10/21', - label: 'test-ns-2/', - clients: 2301, - entity_clients: 678, - non_entity_clients: 1623, - }, - mounts_by_key: { - 'path-4-with-over-18-characters': { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 1223, - entity_clients: 820, - non_entity_clients: 403, - new_clients: { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 602, - entity_clients: 212, - non_entity_clients: 390, - }, - }, - 'path-3-with-over-18-characters': { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 1110, - entity_clients: 111, - non_entity_clients: 999, - new_clients: { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 440, - entity_clients: 7, - non_entity_clients: 433, - }, - }, - 'path-1': { - month: '10/21', - label: 'path-1', - clients: 1034, - entity_clients: 462, - non_entity_clients: 572, - new_clients: { - month: '10/21', - label: 'path-1', - clients: 980, - entity_clients: 454, - non_entity_clients: 526, - }, - }, - 'path-2': { - month: '10/21', - label: 'path-2', - clients: 660, - entity_clients: 299, - non_entity_clients: 361, - new_clients: { - month: '10/21', - label: 'path-2', - clients: 279, - entity_clients: 5, - non_entity_clients: 274, - }, - }, - }, - }, - 'test-ns-2-with-namespace-length-over-18-characters/': { - month: '10/21', - clients: 3924, - entity_clients: 2132, - non_entity_clients: 1792, - new_clients: { - month: '10/21', - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 1561, - entity_clients: 1225, - non_entity_clients: 336, - }, - mounts_by_key: { - 'path-3-with-over-18-characters': { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 1411, - entity_clients: 765, - non_entity_clients: 646, - new_clients: { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 948, - entity_clients: 660, - non_entity_clients: 288, - }, - }, - 'path-2': { - month: '10/21', - label: 'path-2', - clients: 1205, - entity_clients: 382, - non_entity_clients: 823, - new_clients: { - month: '10/21', - label: 'path-2', - clients: 305, - entity_clients: 289, - non_entity_clients: 16, - }, - }, - 'path-1': { - month: '10/21', - label: 'path-1', - clients: 884, - entity_clients: 850, - non_entity_clients: 34, - new_clients: { - month: '10/21', - label: 'path-1', - clients: 230, - entity_clients: 207, - non_entity_clients: 23, - }, - }, - 'path-4-with-over-18-characters': { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 424, - entity_clients: 135, - non_entity_clients: 289, - new_clients: { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 78, - entity_clients: 69, - non_entity_clients: 9, - }, - }, - }, - }, - 'test-ns-1-with-namespace-length-over-18-characters/': { - month: '10/21', - clients: 3639, - entity_clients: 2314, - non_entity_clients: 1325, - new_clients: { - month: '10/21', - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 1245, - entity_clients: 710, - non_entity_clients: 535, - }, - mounts_by_key: { - 'path-1': { - month: '10/21', - label: 'path-1', - clients: 1062, - entity_clients: 781, - non_entity_clients: 281, - new_clients: { - month: '10/21', - label: 'path-1', - clients: 288, - entity_clients: 63, - non_entity_clients: 225, - }, - }, - 'path-4-with-over-18-characters': { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 1021, - entity_clients: 609, - non_entity_clients: 412, - new_clients: { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 440, - entity_clients: 323, - non_entity_clients: 117, - }, - }, - 'path-2': { - month: '10/21', - label: 'path-2', - clients: 849, - entity_clients: 426, - non_entity_clients: 423, - new_clients: { - month: '10/21', - label: 'path-2', - clients: 339, - entity_clients: 308, - non_entity_clients: 31, - }, - }, - 'path-3-with-over-18-characters': { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 707, - entity_clients: 498, - non_entity_clients: 209, - new_clients: { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 178, - entity_clients: 16, - non_entity_clients: 162, - }, - }, - }, - }, - 'test-ns-1/': { - month: '10/21', - clients: 2992, - entity_clients: 1603, - non_entity_clients: 1389, - new_clients: { - month: '10/21', - label: 'test-ns-1/', - clients: 820, - entity_clients: 356, - non_entity_clients: 464, - }, - mounts_by_key: { - 'path-1': { - month: '10/21', - label: 'path-1', - clients: 1140, - entity_clients: 480, - non_entity_clients: 660, - new_clients: { - month: '10/21', - label: 'path-1', - clients: 239, - entity_clients: 30, - non_entity_clients: 209, - }, - }, - 'path-4-with-over-18-characters': { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 1058, - entity_clients: 651, - non_entity_clients: 407, - new_clients: { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 256, - entity_clients: 63, - non_entity_clients: 193, - }, - }, - 'path-2': { - month: '10/21', - label: 'path-2', - clients: 575, - entity_clients: 416, - non_entity_clients: 159, - new_clients: { - month: '10/21', - label: 'path-2', - clients: 259, - entity_clients: 245, - non_entity_clients: 14, - }, - }, - 'path-3-with-over-18-characters': { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 219, - entity_clients: 56, - non_entity_clients: 163, - new_clients: { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 66, - entity_clients: 18, - non_entity_clients: 48, - }, - }, - }, - }, - }, - new_clients: { - month: '10/21', - clients: 7659, - entity_clients: 3555, - non_entity_clients: 4104, - namespaces: [ - { - label: 'test-ns-2/', - clients: 2301, - entity_clients: 678, - non_entity_clients: 1623, - mounts: [ - { - label: 'path-1', - clients: 980, - entity_clients: 454, - non_entity_clients: 526, - }, - { - label: 'path-4-with-over-18-characters', - clients: 602, - entity_clients: 212, - non_entity_clients: 390, - }, - { - label: 'path-3-with-over-18-characters', - clients: 440, - entity_clients: 7, - non_entity_clients: 433, - }, - { - label: 'path-2', - clients: 279, - entity_clients: 5, - non_entity_clients: 274, - }, - ], - }, - { - label: 'root', - clients: 1732, - entity_clients: 586, - non_entity_clients: 1146, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 907, - entity_clients: 192, - non_entity_clients: 715, - }, - { - label: 'path-2', - clients: 502, - entity_clients: 189, - non_entity_clients: 313, - }, - { - label: 'path-1', - clients: 276, - entity_clients: 202, - non_entity_clients: 74, - }, - { - label: 'path-4-with-over-18-characters', - clients: 47, - entity_clients: 3, - non_entity_clients: 44, - }, - ], - }, - { - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 1561, - entity_clients: 1225, - non_entity_clients: 336, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 948, - entity_clients: 660, - non_entity_clients: 288, - }, - { - label: 'path-2', - clients: 305, - entity_clients: 289, - non_entity_clients: 16, - }, - { - label: 'path-1', - clients: 230, - entity_clients: 207, - non_entity_clients: 23, - }, - { - label: 'path-4-with-over-18-characters', - clients: 78, - entity_clients: 69, - non_entity_clients: 9, - }, - ], - }, - { - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 1245, - entity_clients: 710, - non_entity_clients: 535, - mounts: [ - { - label: 'path-4-with-over-18-characters', - clients: 440, - entity_clients: 323, - non_entity_clients: 117, - }, - { - label: 'path-2', - clients: 339, - entity_clients: 308, - non_entity_clients: 31, - }, - { - label: 'path-1', - clients: 288, - entity_clients: 63, - non_entity_clients: 225, - }, - { - label: 'path-3-with-over-18-characters', - clients: 178, - entity_clients: 16, - non_entity_clients: 162, - }, - ], - }, - { - label: 'test-ns-1/', - clients: 820, - entity_clients: 356, - non_entity_clients: 464, - mounts: [ - { - label: 'path-2', - clients: 259, - entity_clients: 245, - non_entity_clients: 14, - }, - { - label: 'path-4-with-over-18-characters', - clients: 256, - entity_clients: 63, - non_entity_clients: 193, - }, - { - label: 'path-1', - clients: 239, - entity_clients: 30, - non_entity_clients: 209, - }, - { - label: 'path-3-with-over-18-characters', - clients: 66, - entity_clients: 18, - non_entity_clients: 48, - }, - ], - }, - ], - }, - }, - ]; - const NEW_ACTIVITY = MONTHLY_ACTIVITY.map((d) => d.new_clients); - const TOTAL_USAGE_COUNTS = { - clients: 38668, - entity_clients: 20818, - non_entity_clients: 17850, - }; - hooks.beforeEach(function () { - this.set('timestamp', formatRFC3339(new Date())); + setupMirage(hooks); + + hooks.beforeEach(async function () { + sinon.replace(timestamp, 'now', sinon.fake.returns(STATIC_NOW)); + clientsHandler(this.server); + const store = this.owner.lookup('service:store'); + const activityQuery = { + start_time: { timestamp: START_TIME }, + end_time: { timestamp: getUnixTime(timestamp.now()) }, + }; + const activity = await store.queryRecord('clients/activity', activityQuery); + this.byMonthActivity = activity.byMonth; + this.newActivity = this.byMonthActivity.map((d) => d.new_clients); + this.totalUsageCounts = activity.total; + this.set('timestamp', formatRFC3339(timestamp.now())); this.set('chartLegend', [ { label: 'entity clients', key: 'entity_clients' }, { label: 'non-entity clients', key: 'non_entity_clients' }, ]); - }); - - test('it renders with full monthly activity data', async function (assert) { - this.set('byMonthActivityData', MONTHLY_ACTIVITY); - this.set('totalUsageCounts', TOTAL_USAGE_COUNTS); - const expectedTotalEntity = formatNumber([TOTAL_USAGE_COUNTS.entity_clients]); - const expectedTotalNonEntity = formatNumber([TOTAL_USAGE_COUNTS.non_entity_clients]); - const expectedNewEntity = formatNumber([calculateAverage(NEW_ACTIVITY, 'entity_clients')]); - const expectedNewNonEntity = formatNumber([calculateAverage(NEW_ACTIVITY, 'non_entity_clients')]); + this.isSecretsSyncActivated = true; + this.isHistoricalMonth = false; - await render(hbs` - + this.renderComponent = async () => { + await render(hbs` + @isSecretsSyncActivated={{this.isSecretsSyncActivated}} + @byMonthActivityData={{this.byMonthActivity}} + @runningTotals={{this.totalUsageCounts}} + @upgradeData={{this.upgradesDuringActivity}} + @responseTimestamp={{this.timestamp}} + @isHistoricalMonth={{this.isHistoricalMonth}} + /> `); + }; + // Fails on #ember-testing-container + setRunOptions({ + rules: { + 'scrollable-region-focusable': { enabled: false }, + }, + }); + }); - assert.dom('[data-test-running-total]').exists('running total component renders'); - assert.dom('[data-test-line-chart]').exists('line chart renders'); - assert.dom('[data-test-vertical-bar-chart]').exists('vertical bar chart renders'); - assert.dom('[data-test-running-total-legend]').exists('legend renders'); - assert.dom('[data-test-running-total-timestamp]').exists('renders timestamp'); - assert - .dom('[data-test-running-total-entity] p.data-details') - .hasText(`${expectedTotalEntity}`, `renders correct total average ${expectedTotalEntity}`); - assert - .dom('[data-test-running-total-nonentity] p.data-details') - .hasText(`${expectedTotalNonEntity}`, `renders correct new average ${expectedTotalNonEntity}`); - assert - .dom('[data-test-running-new-entity] p.data-details') - .hasText(`${expectedNewEntity}`, `renders correct total average ${expectedNewEntity}`); - assert - .dom('[data-test-running-new-nonentity] p.data-details') - .hasText(`${expectedNewNonEntity}`, `renders correct new average ${expectedNewNonEntity}`); + test('it renders with full monthly activity data', async function (assert) { + await this.renderComponent(); - // assert line chart is correct - findAll('[data-test-line-chart="x-axis-labels"] text').forEach((e, i) => { + assert.dom(CHARTS.container('Vault client counts')).exists('running total component renders'); + assert.dom(CHARTS.chart('Vault client counts')).exists('bar chart renders'); + + const expectedValues = { + 'Running client total': formatNumber([this.totalUsageCounts.clients]), + Entity: formatNumber([this.totalUsageCounts.entity_clients]), + 'Non-entity': formatNumber([this.totalUsageCounts.non_entity_clients]), + ACME: formatNumber([this.totalUsageCounts.acme_clients]), + 'Secret sync': formatNumber([this.totalUsageCounts.secret_syncs]), + }; + for (const label in expectedValues) { assert - .dom(e) + .dom(CLIENT_COUNT.statTextValue(label)) .hasText( - `${MONTHLY_ACTIVITY[i].month}`, - `renders x-axis labels for line chart: ${MONTHLY_ACTIVITY[i].month}` + `${expectedValues[label]}`, + `stat label: ${label} renders correct total: ${expectedValues[label]}` ); - }); - assert - .dom('[data-test-line-chart="plot-point"]') - .exists( - { count: MONTHLY_ACTIVITY.filter((m) => m.counts !== null).length }, - 'renders correct number of plot points' - ); + } - // assert bar chart is correct - findAll('[data-test-vertical-chart="x-axis-labels"] text').forEach((e, i) => { + // assert grouped bar chart is correct + findAll(CHARTS.xAxisLabel).forEach((e, i) => { assert .dom(e) .hasText( - `${MONTHLY_ACTIVITY[i].month}`, - `renders x-axis labels for bar chart: ${MONTHLY_ACTIVITY[i].month}` + `${this.byMonthActivity[i].month}`, + `renders x-axis labels for bar chart: ${this.byMonthActivity[i].month}` ); }); assert - .dom('[data-test-vertical-chart="data-bar"]') - .exists( - { count: MONTHLY_ACTIVITY.filter((m) => m.counts !== null).length * 2 }, - 'renders correct number of data bars' - ); + .dom(CHARTS.verticalBar) + .exists({ count: this.byMonthActivity.length * 2 }, 'renders correct number of bars '); }); test('it renders with no new monthly data', async function (assert) { - const monthlyWithoutNew = MONTHLY_ACTIVITY.map((d) => ({ ...d, new_clients: { month: d.month } })); - this.set('byMonthActivityData', monthlyWithoutNew); - this.set('totalUsageCounts', TOTAL_USAGE_COUNTS); - const expectedTotalEntity = formatNumber([TOTAL_USAGE_COUNTS.entity_clients]); - const expectedTotalNonEntity = formatNumber([TOTAL_USAGE_COUNTS.non_entity_clients]); + this.byMonthActivity = this.byMonthActivity.map((d) => ({ + ...d, + new_clients: { month: d.month }, + })); - await render(hbs` - - - `); - assert.dom('[data-test-running-total]').exists('running total component renders'); - assert.dom('[data-test-line-chart]').exists('line chart renders'); - assert.dom('[data-test-vertical-bar-chart]').doesNotExist('vertical bar chart does not render'); - assert.dom('[data-test-running-total-legend]').doesNotExist('legend does not render'); - assert.dom('[data-test-component="empty-state"]').exists('renders empty state'); - assert.dom('[data-test-empty-state-title]').hasText('No new clients'); - assert.dom('[data-test-running-total-timestamp]').exists('renders timestamp'); - assert - .dom('[data-test-running-total-entity] p.data-details') - .hasText(`${expectedTotalEntity}`, `renders correct total average ${expectedTotalEntity}`); - assert - .dom('[data-test-running-total-nonentity] p.data-details') - .hasText(`${expectedTotalNonEntity}`, `renders correct new average ${expectedTotalNonEntity}`); - assert - .dom('[data-test-running-new-entity] p.data-details') - .doesNotExist('new client counts does not exist'); - assert - .dom('[data-test-running-new-nonentity] p.data-details') - .doesNotExist('average new client counts does not exist'); + await this.renderComponent(); + + assert.dom(CHARTS.container('Vault client counts')).exists('running total component renders'); + assert.dom(CHARTS.chart('Vault client counts')).exists('bar chart renders'); + + const expectedValues = { + Entity: formatNumber([this.totalUsageCounts.entity_clients]), + 'Non-entity': formatNumber([this.totalUsageCounts.non_entity_clients]), + ACME: formatNumber([this.totalUsageCounts.acme_clients]), + 'Secret sync': formatNumber([this.totalUsageCounts.secret_syncs]), + }; + for (const label in expectedValues) { + assert + .dom(CLIENT_COUNT.statTextValue(label)) + .hasText( + `${expectedValues[label]}`, + `stat label: ${label} renders correct total: ${expectedValues[label]}` + ); + } }); test('it renders with single historical month data', async function (assert) { - const singleMonth = MONTHLY_ACTIVITY[MONTHLY_ACTIVITY.length - 1]; - const singleMonthNew = NEW_ACTIVITY[NEW_ACTIVITY.length - 1]; - this.set('singleMonth', [singleMonth]); - const expectedTotalClients = formatNumber([singleMonth.clients]); - const expectedTotalEntity = formatNumber([singleMonth.entity_clients]); - const expectedTotalNonEntity = formatNumber([singleMonth.non_entity_clients]); - const expectedNewClients = formatNumber([singleMonthNew.clients]); - const expectedNewEntity = formatNumber([singleMonthNew.entity_clients]); - const expectedNewNonEntity = formatNumber([singleMonthNew.non_entity_clients]); + const singleMonth = this.byMonthActivity[this.byMonthActivity.length - 1]; + const singleMonthNew = this.newActivity[this.newActivity.length - 1]; + this.byMonthActivity = [singleMonth]; + this.isHistoricalMonth = true; - await render(hbs` - - - `); - assert.dom('[data-test-running-total]').exists('running total component renders'); - assert.dom('[data-test-line-chart]').doesNotExist('line chart does not render'); - assert.dom('[data-test-vertical-bar-chart]').doesNotExist('vertical bar chart does not render'); - assert.dom('[data-test-running-total-legend]').doesNotExist('legend does not render'); - assert.dom('[data-test-running-total-timestamp]').doesNotExist('renders timestamp'); - assert.dom('[data-test-stat-text-container]').exists({ count: 6 }, 'renders stat text containers'); - assert - .dom('[data-test-new] [data-test-stat-text-container="New clients"] div.stat-value') - .hasText(`${expectedNewClients}`, `renders correct total new clients: ${expectedNewClients}`); - assert - .dom('[data-test-new] [data-test-stat-text-container="Entity clients"] div.stat-value') - .hasText(`${expectedNewEntity}`, `renders correct total new entity: ${expectedNewEntity}`); - assert - .dom('[data-test-new] [data-test-stat-text-container="Non-entity clients"] div.stat-value') - .hasText(`${expectedNewNonEntity}`, `renders correct total new non-entity: ${expectedNewNonEntity}`); - assert - .dom('[data-test-total] [data-test-stat-text-container="Total monthly clients"] div.stat-value') - .hasText(`${expectedTotalClients}`, `renders correct total clients: ${expectedTotalClients}`); - assert - .dom('[data-test-total] [data-test-stat-text-container="Entity clients"] div.stat-value') - .hasText(`${expectedTotalEntity}`, `renders correct total entity: ${expectedTotalEntity}`); - assert - .dom('[data-test-total] [data-test-stat-text-container="Non-entity clients"] div.stat-value') - .hasText(`${expectedTotalNonEntity}`, `renders correct total non-entity: ${expectedTotalNonEntity}`); + await this.renderComponent(); + + let expectedStats = { + 'Total monthly clients': formatNumber([singleMonth.clients]), + Entity: formatNumber([singleMonth.entity_clients]), + 'Non-entity': formatNumber([singleMonth.non_entity_clients]), + ACME: formatNumber([singleMonth.acme_clients]), + 'Secret sync': formatNumber([singleMonth.secret_syncs]), + }; + for (const label in expectedStats) { + assert + .dom(`[data-test-total] ${CLIENT_COUNT.statTextValue(label)}`) + .hasText( + `${expectedStats[label]}`, + `stat label: ${label} renders single month total: ${expectedStats[label]}` + ); + } + + expectedStats = { + 'New clients': formatNumber([singleMonthNew.clients]), + Entity: formatNumber([singleMonthNew.entity_clients]), + 'Non-entity': formatNumber([singleMonthNew.non_entity_clients]), + ACME: formatNumber([singleMonthNew.acme_clients]), + 'Secret sync': formatNumber([singleMonthNew.secret_syncs]), + }; + for (const label in expectedStats) { + assert + .dom(`[data-test-new] ${CLIENT_COUNT.statTextValue(label)}`) + .hasText( + `${expectedStats[label]}`, + `stat label: ${label} renders single month new clients: ${expectedStats[label]}` + ); + } + assert.dom(CHARTS.chart('Vault client counts')).doesNotExist('bar chart does not render'); + assert.dom(CLIENT_COUNT.statTextValue()).exists({ count: 10 }, 'renders 10 stat text containers'); + }); + + test('it hides secret sync totals when feature is not activated', async function (assert) { + this.isSecretsSyncActivated = false; + + await this.renderComponent(); + + assert.dom(CHARTS.container('Vault client counts')).exists('running total component renders'); + assert.dom(CHARTS.chart('Vault client counts')).exists('bar chart renders'); + assert.dom(CLIENT_COUNT.statTextValue('Entity')).exists(); + assert.dom(CLIENT_COUNT.statTextValue('Non-entity')).exists(); + assert.dom(CLIENT_COUNT.statTextValue('Secret sync')).doesNotExist('does not render secret syncs'); }); }); diff --git a/ui/tests/integration/components/clients/usage-stats-test.js b/ui/tests/integration/components/clients/usage-stats-test.js index f77697523bff..c06166acf65b 100644 --- a/ui/tests/integration/components/clients/usage-stats-test.js +++ b/ui/tests/integration/components/clients/usage-stats-test.js @@ -1,6 +1,6 @@ /** * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 + * SPDX-License-Identifier: BUSL-1.1 */ import { module, test } from 'qunit'; @@ -11,45 +11,22 @@ import { hbs } from 'ember-cli-htmlbars'; module('Integration | Component | clients/usage-stats', function (hooks) { setupRenderingTest(hooks); - test('it renders defaults', async function (assert) { - await render(hbs``); + test('it renders', async function (assert) { + await render( + hbs` + + yielded content! + ` + ); - assert.dom('[data-test-stat-text]').exists({ count: 3 }, 'Renders 3 Stat texts even with no data passed'); - assert.dom('[data-test-stat-text="total-clients"]').exists('Total clients exists'); - assert.dom('[data-test-stat-text="total-clients"] .stat-value').hasText('-', 'renders dash when no data'); - assert.dom('[data-test-stat-text="entity-clients"]').exists('Entity clients exists'); + assert.dom('[data-test-usage-stats="My stats"]').exists(); + assert.dom('h3').hasText('My stats', 'title renders in h3 tag'); + assert.dom('p').hasText('a very important description', 'description renders in p tag'); assert - .dom('[data-test-stat-text="entity-clients"] .stat-value') - .hasText('-', 'renders dash when no data'); - assert.dom('[data-test-stat-text="non-entity-clients"]').exists('Non entity clients exists'); - assert - .dom('[data-test-stat-text="non-entity-clients"] .stat-value') - .hasText('-', 'renders dash when no data'); + .dom('[data-test-usage-stats="My stats"]') + .hasTextContaining('yielded content!', 'it renders yielded content'); assert .dom('a') .hasAttribute('href', 'https://developer.hashicorp.com/vault/tutorials/monitoring/usage-metrics'); }); - - test('it renders with data', async function (assert) { - this.set('counts', { - clients: 17, - entity_clients: 7, - non_entity_clients: 10, - }); - await render(hbs``); - - assert.dom('[data-test-stat-text]').exists({ count: 3 }, 'Renders 3 Stat texts even with no data passed'); - assert.dom('[data-test-stat-text="total-clients"]').exists('Total clients exists'); - assert - .dom('[data-test-stat-text="total-clients"] .stat-value') - .hasText('17', 'Total clients shows passed value'); - assert.dom('[data-test-stat-text="entity-clients"]').exists('Entity clients exists'); - assert - .dom('[data-test-stat-text="entity-clients"] .stat-value') - .hasText('7', 'entity clients shows passed value'); - assert.dom('[data-test-stat-text="non-entity-clients"]').exists('Non entity clients exists'); - assert - .dom('[data-test-stat-text="non-entity-clients"] .stat-value') - .hasText('10', 'non entity clients shows passed value'); - }); }); diff --git a/ui/tests/integration/components/clients/vertical-bar-chart-test.js b/ui/tests/integration/components/clients/vertical-bar-chart-test.js deleted file mode 100644 index 2300b91b1708..000000000000 --- a/ui/tests/integration/components/clients/vertical-bar-chart-test.js +++ /dev/null @@ -1,112 +0,0 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 - */ - -import { module, test } from 'qunit'; -import { setupRenderingTest } from 'ember-qunit'; -import { render, findAll, find, triggerEvent } from '@ember/test-helpers'; -import { hbs } from 'ember-cli-htmlbars'; - -module('Integration | Component | clients/vertical-bar-chart', function (hooks) { - setupRenderingTest(hooks); - hooks.beforeEach(function () { - this.set('chartLegend', [ - { label: 'entity clients', key: 'entity_clients' }, - { label: 'non-entity clients', key: 'non_entity_clients' }, - ]); - }); - - test('it renders chart and tooltip for total clients', async function (assert) { - const barChartData = [ - { month: 'january', clients: 141, entity_clients: 91, non_entity_clients: 50, new_clients: 5 }, - { month: 'february', clients: 251, entity_clients: 101, non_entity_clients: 150, new_clients: 5 }, - ]; - this.set('barChartData', barChartData); - - await render(hbs` -
- -
- `); - const tooltipHoverBars = findAll('[data-test-vertical-bar-chart] rect.tooltip-rect'); - assert.dom('[data-test-vertical-bar-chart]').exists('renders chart'); - assert - .dom('[data-test-vertical-chart="data-bar"]') - .exists({ count: barChartData.length * 2 }, 'renders correct number of bars'); // multiply length by 2 because bars are stacked - - assert.dom(find('[data-test-vertical-chart="y-axis-labels"] text')).hasText('0', `y-axis starts at 0`); - findAll('[data-test-vertical-chart="x-axis-labels"] text').forEach((e, i) => { - assert.dom(e).hasText(`${barChartData[i].month}`, `renders x-axis label: ${barChartData[i].month}`); - }); - - for (const [i, bar] of tooltipHoverBars.entries()) { - await triggerEvent(bar, 'mouseover'); - const tooltip = document.querySelector('.ember-modal-dialog'); - assert - .dom(tooltip) - .includesText( - `${barChartData[i].clients} total clients ${barChartData[i].entity_clients} entity clients ${barChartData[i].non_entity_clients} non-entity clients`, - 'tooltip text is correct' - ); - } - }); - - test('it renders chart and tooltip for new clients', async function (assert) { - const barChartData = [ - { month: 'january', entity_clients: 91, non_entity_clients: 50, clients: 0 }, - { month: 'february', entity_clients: 101, non_entity_clients: 150, clients: 110 }, - ]; - this.set('barChartData', barChartData); - - await render(hbs` -
- -
- `); - - const tooltipHoverBars = findAll('[data-test-vertical-bar-chart] rect.tooltip-rect'); - assert.dom('[data-test-vertical-bar-chart]').exists('renders chart'); - assert - .dom('[data-test-vertical-chart="data-bar"]') - .exists({ count: barChartData.length * 2 }, 'renders correct number of bars'); // multiply length by 2 because bars are stacked - - assert.dom(find('[data-test-vertical-chart="y-axis-labels"] text')).hasText('0', `y-axis starts at 0`); - findAll('[data-test-vertical-chart="x-axis-labels"] text').forEach((e, i) => { - assert.dom(e).hasText(`${barChartData[i].month}`, `renders x-axis label: ${barChartData[i].month}`); - }); - - for (const [i, bar] of tooltipHoverBars.entries()) { - await triggerEvent(bar, 'mouseover'); - const tooltip = document.querySelector('.ember-modal-dialog'); - assert - .dom(tooltip) - .includesText( - `${barChartData[i].clients} new clients ${barChartData[i].entity_clients} entity clients ${barChartData[i].non_entity_clients} non-entity clients`, - 'tooltip text is correct' - ); - } - }); - - test('it renders empty state when no dataset', async function (assert) { - await render(hbs` -
- -
- `); - - assert.dom('[data-test-component="empty-state"]').exists('renders empty state when no data'); - assert - .dom('[data-test-empty-state-subtext]') - .hasText( - `this is a custom message to explain why you're not seeing a vertical bar chart`, - 'custom message renders' - ); - }); -}); diff --git a/ui/tests/integration/components/config-ui/messages/page/create-and-edit-test.js b/ui/tests/integration/components/config-ui/messages/page/create-and-edit-test.js new file mode 100644 index 000000000000..e08430ed6285 --- /dev/null +++ b/ui/tests/integration/components/config-ui/messages/page/create-and-edit-test.js @@ -0,0 +1,267 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { setupEngine } from 'ember-engines/test-support'; +import { render, click, fillIn } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { datetimeLocalStringFormat } from 'core/utils/date-formatters'; +import { format, addDays, startOfDay } from 'date-fns'; +import { CUSTOM_MESSAGES } from 'vault/tests/helpers/config-ui/message-selectors'; +import timestamp from 'core/utils/timestamp'; +import { GENERAL } from 'vault/tests/helpers/general-selectors'; + +module('Integration | Component | messages/page/create-and-edit', function (hooks) { + setupRenderingTest(hooks); + setupEngine(hooks, 'config-ui'); + setupMirage(hooks); + + hooks.beforeEach(function () { + this.context = { owner: this.engine }; + this.store = this.owner.lookup('service:store'); + this.message = this.store.createRecord('config-ui/message'); + }); + + test('it should display all the create form fields and default radio button values', async function (assert) { + assert.expect(17); + + await render(hbs``, { + owner: this.engine, + }); + + assert.dom(GENERAL.title).hasText('Create message'); + assert.dom(CUSTOM_MESSAGES.radio('authenticated')).exists(); + assert.dom(CUSTOM_MESSAGES.radio('unauthenticated')).exists(); + assert.dom(CUSTOM_MESSAGES.radio('authenticated')).isChecked(); + assert.dom(CUSTOM_MESSAGES.radio('unauthenticated')).isNotChecked(); + assert.dom(CUSTOM_MESSAGES.radio('banner')).exists(); + assert.dom(CUSTOM_MESSAGES.radio('modal')).exists(); + assert.dom(CUSTOM_MESSAGES.radio('banner')).isChecked(); + assert.dom(CUSTOM_MESSAGES.radio('modal')).isNotChecked(); + assert.dom(CUSTOM_MESSAGES.field('title')).exists(); + assert.dom(CUSTOM_MESSAGES.field('message')).exists(); + assert.dom('[data-test-kv-key="0"]').exists(); + assert.dom('[data-test-kv-value="0"]').exists(); + assert.dom(CUSTOM_MESSAGES.input('startTime')).exists(); + assert + .dom(CUSTOM_MESSAGES.input('startTime')) + .hasValue(format(addDays(startOfDay(timestamp.now()), 1), datetimeLocalStringFormat)); + assert.dom(CUSTOM_MESSAGES.input('endTime')).exists(); + assert.dom(CUSTOM_MESSAGES.input('endTime')).hasValue(''); + }); + + test('it should display validation errors for invalid form fields', async function (assert) { + assert.expect(8); + await render(hbs``, { + owner: this.engine, + }); + + await fillIn(CUSTOM_MESSAGES.input('startTime'), '2024-01-20T00:00'); + await fillIn(CUSTOM_MESSAGES.input('endTime'), '2024-01-01T00:00'); + await click(CUSTOM_MESSAGES.button('create-message')); + assert.dom(CUSTOM_MESSAGES.input('title')).hasClass('has-error-border'); + assert + .dom(`${CUSTOM_MESSAGES.fieldValidation('title')} ${CUSTOM_MESSAGES.inlineErrorMessage}`) + .hasText('Title is required.'); + assert.dom(CUSTOM_MESSAGES.input('message')).hasClass('has-error-border'); + assert + .dom(`${CUSTOM_MESSAGES.fieldValidation('message')} ${CUSTOM_MESSAGES.inlineErrorMessage}`) + .hasText('Message is required.'); + assert.dom(CUSTOM_MESSAGES.input('startTime')).hasClass('has-error-border'); + assert + .dom(`${CUSTOM_MESSAGES.fieldValidation('startTime')} ${CUSTOM_MESSAGES.inlineErrorMessage}`) + .hasText('Start time is after end time.'); + assert.dom(CUSTOM_MESSAGES.input('endTime')).hasClass('has-error-border'); + assert + .dom(`${CUSTOM_MESSAGES.fieldValidation('endTime')} ${CUSTOM_MESSAGES.inlineErrorMessage}`) + .hasText('End time is before start time.'); + }); + + test('it should create new message', async function (assert) { + assert.expect(1); + + this.server.post('/sys/config/ui/custom-messages', () => { + assert.ok(true, 'POST request made to create message'); + }); + + await render(hbs``, { + owner: this.engine, + }); + await fillIn(CUSTOM_MESSAGES.input('title'), 'Awesome custom message title'); + await fillIn( + CUSTOM_MESSAGES.input('message'), + 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Pulvinar mattis nunc sed blandit libero volutpat sed cras ornare.' + ); + await fillIn( + CUSTOM_MESSAGES.input('startTime'), + format(addDays(startOfDay(new Date('2023-12-12')), 1), datetimeLocalStringFormat) + ); + await click('#specificDate'); + await fillIn( + CUSTOM_MESSAGES.input('endTime'), + format(addDays(startOfDay(new Date('2023-12-12')), 10), datetimeLocalStringFormat) + ); + await fillIn('[data-test-kv-key="0"]', 'Learn more'); + await fillIn('[data-test-kv-value="0"]', 'www.learn.com'); + await click(CUSTOM_MESSAGES.button('create-message')); + }); + + test('it should have form vaildations', async function (assert) { + assert.expect(4); + await render(hbs``, { + owner: this.engine, + }); + await click(CUSTOM_MESSAGES.button('create-message')); + assert + .dom(CUSTOM_MESSAGES.input('title')) + .hasClass('has-error-border', 'show error border for title field'); + assert + .dom(`${CUSTOM_MESSAGES.fieldValidation('title')} ${CUSTOM_MESSAGES.inlineErrorMessage}`) + .hasText('Title is required.'); + assert + .dom(CUSTOM_MESSAGES.input('message')) + .hasClass('has-error-border', 'show error border for message field'); + assert + .dom(`${CUSTOM_MESSAGES.fieldValidation('message')} ${CUSTOM_MESSAGES.inlineErrorMessage}`) + .hasText('Message is required.'); + }); + + test('it should prepopulate form if form is in edit mode', async function (assert) { + assert.expect(13); + this.store.pushPayload('config-ui/message', { + modelName: 'config-ui/message', + id: 'hhhhh-iiii-lllll-dddd', + type: 'modal', + authenticated: false, + title: 'Hello world', + message: 'Blah blah blah. Some super long message.', + start_time: '2023-12-12T08:00:00.000Z', + end_time: '2023-12-21T08:00:00.000Z', + link: { 'Learn more': 'www.learnmore.com' }, + }); + this.message = this.store.peekRecord('config-ui/message', 'hhhhh-iiii-lllll-dddd'); + await render(hbs``, { + owner: this.engine, + }); + + assert.dom(GENERAL.title).hasText('Edit message'); + assert.dom(CUSTOM_MESSAGES.radio('authenticated')).exists(); + assert.dom(CUSTOM_MESSAGES.radio('unauthenticated')).isChecked(); + assert.dom(CUSTOM_MESSAGES.radio('modal')).exists(); + assert.dom(CUSTOM_MESSAGES.radio('modal')).isChecked(); + assert.dom(CUSTOM_MESSAGES.input('title')).hasValue('Hello world'); + assert.dom(CUSTOM_MESSAGES.input('message')).hasValue('Blah blah blah. Some super long message.'); + assert.dom('[data-test-kv-key="0"]').exists(); + assert.dom('[data-test-kv-key="0"]').hasValue('Learn more'); + assert.dom('[data-test-kv-value="0"]').exists(); + assert.dom('[data-test-kv-value="0"]').hasValue('www.learnmore.com'); + await click('#specificDate'); + assert + .dom(CUSTOM_MESSAGES.input('startTime')) + .hasValue(format(new Date(this.message.startTime), datetimeLocalStringFormat)); + assert + .dom(CUSTOM_MESSAGES.input('endTime')) + .hasValue(format(new Date(this.message.endTime), datetimeLocalStringFormat)); + }); + + test('it should show a preview image modal when preview is clicked', async function (assert) { + assert.expect(6); + await render(hbs``, { + owner: this.engine, + }); + await fillIn(CUSTOM_MESSAGES.input('title'), 'Awesome custom message title'); + await fillIn( + CUSTOM_MESSAGES.input('message'), + 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Pulvinar mattis nunc sed blandit libero volutpat sed cras ornare.' + ); + await click(CUSTOM_MESSAGES.button('preview')); + assert.dom(CUSTOM_MESSAGES.modal('preview modal')).doesNotExist(); + assert.dom(CUSTOM_MESSAGES.modal('preview image')).exists(); + assert + .dom(CUSTOM_MESSAGES.alertTitle('Awesome custom message title')) + .hasText('Awesome custom message title'); + assert + .dom(CUSTOM_MESSAGES.alertDescription('Awesome custom message title')) + .hasText( + 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Pulvinar mattis nunc sed blandit libero volutpat sed cras ornare.' + ); + assert.dom('img').hasAttribute('src', '/ui/images/custom-messages-dashboard.png'); + await click(CUSTOM_MESSAGES.modalButton('Close')); + await click('#unauthenticated'); + await click(CUSTOM_MESSAGES.button('preview')); + assert.dom('img').hasAttribute('src', '/ui/images/custom-messages-login.png'); + }); + + test('it should show a preview modal when preview is clicked', async function (assert) { + assert.expect(4); + await render(hbs``, { + owner: this.engine, + }); + await click(CUSTOM_MESSAGES.radio('modal')); + await fillIn(CUSTOM_MESSAGES.input('title'), 'Preview modal title'); + await fillIn(CUSTOM_MESSAGES.input('message'), 'Some preview modal message thats super long.'); + await click(CUSTOM_MESSAGES.button('preview')); + assert.dom(CUSTOM_MESSAGES.modal('preview modal')).exists(); + assert.dom(CUSTOM_MESSAGES.modal('preview image')).doesNotExist(); + assert.dom(CUSTOM_MESSAGES.modalTitle('Preview modal title')).hasText('Preview modal title'); + assert + .dom(CUSTOM_MESSAGES.modalBody('Preview modal title')) + .hasText('Some preview modal message thats super long.'); + }); + + test('it should show multiple modal message', async function (assert) { + assert.expect(2); + + this.store.pushPayload('config-ui/message', { + modelName: 'config-ui/message', + id: '01234567-89ab-cdef-0123-456789abcdef', + active: true, + type: 'modal', + authenticated: true, + title: 'Message title 1', + message: 'Some long long long message', + link: { here: 'www.example.com' }, + startTime: '2021-08-01T00:00:00Z', + endTime: '', + }); + this.store.pushPayload('config-ui/message', { + modelName: 'config-ui/message', + id: '01234567-89ab-vvvv-0123-456789abcdef', + active: true, + type: 'modal', + authenticated: false, + title: 'Message title 2', + message: 'Some long long long message', + link: { here: 'www.example.com' }, + startTime: '2021-08-01T00:00:00Z', + endTime: '2090-08-01T00:00:00Z', + }); + + this.messages = this.store.peekAll('config-ui/message'); + + await render( + hbs``, + { + owner: this.engine, + } + ); + await fillIn(CUSTOM_MESSAGES.input('title'), 'Awesome custom message title'); + await fillIn( + CUSTOM_MESSAGES.input('message'), + 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Pulvinar mattis nunc sed blandit libero volutpat sed cras ornare.' + ); + await click(CUSTOM_MESSAGES.radio('modal')); + await click(CUSTOM_MESSAGES.button('create-message')); + assert.dom(CUSTOM_MESSAGES.modalTitle('Warning: more than one modal')).exists(); + assert + .dom(CUSTOM_MESSAGES.modalBody('Warning: more than one modal')) + .hasText( + 'You have an active modal configured after the user logs in and are trying to create another one. It is recommended to avoid having more than one modal at once as it can be intrusive for users. Would you like to continue creating your message? Click “Confirm” to continue.' + ); + await click(CUSTOM_MESSAGES.modalButton('confirm')); + }); +}); diff --git a/ui/tests/integration/components/config-ui/messages/page/details-test.js b/ui/tests/integration/components/config-ui/messages/page/details-test.js new file mode 100644 index 000000000000..84473f1dc6c7 --- /dev/null +++ b/ui/tests/integration/components/config-ui/messages/page/details-test.js @@ -0,0 +1,92 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { setupEngine } from 'ember-engines/test-support'; +import { render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { dateFormat } from 'core/helpers/date-format'; + +const allFields = [ + { label: 'Active', key: 'active' }, + { label: 'Type', key: 'type' }, + { label: 'Authenticated', key: 'authenticated' }, + { label: 'Title', key: 'title' }, + { label: 'Message', key: 'message' }, + { label: 'Start time', key: 'startTime' }, + { label: 'End time', key: 'endTime' }, + { label: 'Link', key: 'link' }, +]; + +module('Integration | Component | messages/page/details', function (hooks) { + setupRenderingTest(hooks); + setupEngine(hooks, 'config-ui'); + setupMirage(hooks); + + hooks.beforeEach(function () { + this.context = { owner: this.engine }; + this.store = this.owner.lookup('service:store'); + + this.server.post('/sys/capabilities-self', () => ({ + data: { + capabilities: ['root'], + }, + })); + + this.store.pushPayload('config-ui/message', { + modelName: 'config-ui/message', + id: '01234567-89ab-cdef-0123-456789abcdef', + active: true, + type: 'banner', + authenticated: true, + title: 'Message title 1', + message: 'Some long long long message', + link: { here: 'www.example.com' }, + start_time: '2021-08-01T00:00:00Z', + end_time: '', + canDeleteCustomMessages: true, + canEditCustomMessages: true, + }); + }); + + test('it should show the message details', async function (assert) { + this.message = await this.store.peekRecord('config-ui/message', '01234567-89ab-cdef-0123-456789abcdef'); + + await render(hbs``, { + owner: this.engine, + }); + assert.dom('[data-test-page-title]').hasText('Message title 1'); + assert + .dom('[data-test-component="info-table-row"]') + .exists({ count: allFields.length }, 'Correct number of filtered fields render'); + + allFields.forEach((field) => { + assert + .dom(`[data-test-row-label="${field.label}"]`) + .hasText(field.label, `${field.label} label renders`); + if (field.key === 'startTime' || field.key === 'endTime') { + const formattedDate = dateFormat([this.message[field.key], 'MMM d, yyyy hh:mm aaa'], { + withTimeZone: true, + }); + assert + .dom(`[data-test-row-value="${field.label}"]`) + .hasText(formattedDate || 'Never', `${field.label} value renders`); + } else if (field.key === 'authenticated' || field.key === 'active') { + assert + .dom(`[data-test-value-div="${field.label}"]`) + .hasText(this.message[field.key] ? 'Yes' : 'No', `${field.label} value renders`); + } else if (field.key === 'link') { + assert.dom('[data-test-value-div="Link"]').exists(); + assert.dom('[data-test-value-div="Link"] [data-test-link="message link"]').hasText('here'); + } else { + assert + .dom(`[data-test-row-value="${field.label}"]`) + .hasText(this.message[field.key], `${field.label} value renders`); + } + }); + }); +}); diff --git a/ui/tests/integration/components/config-ui/messages/page/list-test.js b/ui/tests/integration/components/config-ui/messages/page/list-test.js new file mode 100644 index 000000000000..bd95a13d27b8 --- /dev/null +++ b/ui/tests/integration/components/config-ui/messages/page/list-test.js @@ -0,0 +1,146 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { setupEngine } from 'ember-engines/test-support'; +import { render, click } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { CUSTOM_MESSAGES } from 'vault/tests/helpers/config-ui/message-selectors'; +import { allowAllCapabilitiesStub } from 'vault/tests/helpers/stubs'; + +const META = { + currentPage: 1, + lastPage: 1, + nextPage: 1, + prevPage: 1, + total: 3, + pageSize: 15, +}; + +module('Integration | Component | messages/page/list', function (hooks) { + setupRenderingTest(hooks); + setupEngine(hooks, 'config-ui'); + setupMirage(hooks); + + hooks.beforeEach(function () { + this.server.post('/sys/capabilities-self', allowAllCapabilitiesStub()); + this.store = this.owner.lookup('service:store'); + + this.store.pushPayload('config-ui/message', { + modelName: 'config-ui/message', + id: '0', + active: true, + type: 'banner', + authenticated: true, + title: 'Message title 1', + message: 'Some long long long message', + link: { title: 'here', href: 'www.example.com' }, + start_time: '2021-08-01T00:00:00Z', + end_time: '', + }); + this.store.pushPayload('config-ui/message', { + modelName: 'config-ui/message', + id: '1', + active: false, + type: 'modal', + authenticated: true, + title: 'Message title 2', + message: 'Some long long long message blah blah blah', + link: { title: 'here', href: 'www.example2.com' }, + start_time: '2023-07-01T00:00:00Z', + end_time: '2023-08-01T00:00:00Z', + }); + this.store.pushPayload('config-ui/message', { + modelName: 'config-ui/message', + id: '2', + active: false, + type: 'banner', + authenticated: false, + title: 'Message title 3', + message: 'Some long long long message', + link: { title: 'here', href: 'www.example.com' }, + }); + }); + + test('it should show the messages empty state', async function (assert) { + this.messages = []; + + await render(hbs``, { + owner: this.engine, + }); + + assert.dom('[data-test-empty-state-title]').hasText('No messages yet'); + assert + .dom('[data-test-empty-state-message]') + .hasText( + 'Add a custom message for all users after they log into Vault. Create message to get started.' + ); + }); + + test('it should show the list of custom messages', async function (assert) { + this.messages = this.store.peekAll('config-ui/message', {}); + this.messages.meta = META; + await render(hbs``, { + owner: this.engine, + }); + assert.dom('[data-test-icon="message-circle"]').exists(); + for (const message of this.messages) { + assert.dom(CUSTOM_MESSAGES.listItem('Message title 1')).exists(); + assert.dom(`[data-linked-block-title="${message.id}"]`).hasText(message.title); + } + }); + + test('it should show max message warning modal', async function (assert) { + for (let i = 0; i < 97; i++) { + this.store.pushPayload('config-ui/message', { + modelName: 'config-ui/message', + id: `${i}-a`, + active: true, + type: 'banner', + authenticated: false, + title: `Message title ${i}`, + message: 'Some long long long message', + link: { title: 'here', href: 'www.example.com' }, + start_time: '2021-08-01T00:00:00Z', + }); + } + + this.messages = this.store.peekAll('config-ui/message', {}); + this.messages.meta = { + currentPage: 1, + lastPage: 1, + nextPage: 1, + prevPage: 1, + total: this.messages.length, + pageSize: 100, + }; + await render(hbs``, { + owner: this.engine, + }); + await click(CUSTOM_MESSAGES.button('create message')); + assert + .dom(CUSTOM_MESSAGES.modalTitle('maximum-message-modal')) + .hasText('Maximum number of messages reached'); + assert + .dom(CUSTOM_MESSAGES.modalBody('maximum-message-modal')) + .hasText( + 'Vault can only store up to 100 messages. To create a message, delete one of your messages to clear up space.' + ); + await click(CUSTOM_MESSAGES.modalButton('maximum-message-modal')); + }); + + test('it should show the correct badge colors based on badge status', async function (assert) { + this.messages = this.store.peekAll('config-ui/message', {}); + this.messages.meta = META; + await render(hbs``, { + owner: this.engine, + }); + assert.dom(CUSTOM_MESSAGES.badge('0')).hasClass('hds-badge--color-success'); + assert.dom(CUSTOM_MESSAGES.badge('1')).hasClass('hds-badge--color-neutral'); + assert.dom(CUSTOM_MESSAGES.badge('2')).hasClass('hds-badge--color-highlight'); + }); +}); diff --git a/ui/tests/integration/components/confirm-action-test.js b/ui/tests/integration/components/confirm-action-test.js index 9b30c33266f2..0883b8e15333 100644 --- a/ui/tests/integration/components/confirm-action-test.js +++ b/ui/tests/integration/components/confirm-action-test.js @@ -1,6 +1,6 @@ /** * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: MPL-2.0 + * SPDX-License-Identifier: BUSL-1.1 */ import { module, test } from 'qunit'; @@ -8,51 +8,136 @@ import { setupRenderingTest } from 'ember-qunit'; import { render, click } from '@ember/test-helpers'; import hbs from 'htmlbars-inline-precompile'; import sinon from 'sinon'; +import { setRunOptions } from 'ember-a11y-testing/test-support'; +const SELECTORS = { + modalToggle: '[data-test-confirm-action-trigger]', + title: '[data-test-confirm-action-title]', + message: '[data-test-confirm-action-message]', + confirm: '[data-test-confirm-button]', + cancel: '[data-test-confirm-cancel-button]', +}; module('Integration | Component | confirm-action', function (hooks) { setupRenderingTest(hooks); - test('it renders and on click shows the correct icon', async function (assert) { - const confirmAction = sinon.spy(); - this.set('onConfirm', confirmAction); + hooks.beforeEach(function () { + this.onConfirm = sinon.spy(); + }); + + test('it renders defaults and calls onConfirmAction', async function (assert) { await render(hbs` - DELETE - + /> `); - assert.dom('[data-test-icon="chevron-down"]').exists('Icon is pointing down'); - await click('[data-test-confirm-action-trigger="true"]'); - assert.dom('[data-test-icon="chevron-up"]').exists('Icon is now pointing up'); - assert.dom('[data-test-confirm-action-title]').hasText('Delete this?'); + + assert.dom(SELECTORS.modalToggle).hasText('DELETE', 'renders button text'); + await click(SELECTORS.modalToggle); + assert + .dom('#confirm-action-modal') + .hasClass('hds-modal--color-critical', 'renders critical modal color by default'); + assert.dom(SELECTORS.confirm).hasClass('hds-button--color-critical', 'renders critical confirm button'); + assert.dom(SELECTORS.title).hasText('Are you sure?', 'renders default title'); + assert + .dom(SELECTORS.message) + .hasText('You will not be able to recover it later.', 'renders default body text'); + await click(SELECTORS.cancel); + assert.false(this.onConfirm.called, 'does not call the action when Cancel is clicked'); + await click(SELECTORS.modalToggle); + await click(SELECTORS.confirm); + assert.true(this.onConfirm.called, 'calls the action when Confirm is clicked'); + assert.dom(SELECTORS.title).doesNotExist('modal closes after confirm is clicked'); }); - test('it closes the confirmation modal on successful delete', async function (assert) { - const confirmAction = sinon.spy(); - this.set('onConfirm', confirmAction); + test('it renders isInDropdown defaults and calls onConfirmAction', async function (assert) { + setRunOptions({ + rules: { + // this component breaks this rule because it expects to be rendered within